ctl.c revision 278906
1/*- 2 * Copyright (c) 2003-2009 Silicon Graphics International Corp. 3 * Copyright (c) 2012 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Edward Tomasz Napierala 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions, and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * substantially similar to the "NO WARRANTY" disclaimer below 17 * ("Disclaimer") and any redistribution must be conditioned upon 18 * including a substantially similar Disclaimer requirement for further 19 * binary redistribution. 20 * 21 * NO WARRANTY 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGES. 33 * 34 * $Id$ 35 */ 36/* 37 * CAM Target Layer, a SCSI device emulation subsystem. 38 * 39 * Author: Ken Merry <ken@FreeBSD.org> 40 */ 41 42#define _CTL_C 43 44#include <sys/cdefs.h> 45__FBSDID("$FreeBSD: stable/10/sys/cam/ctl/ctl.c 278906 2015-02-17 17:34:45Z mav $"); 46 47#include <sys/param.h> 48#include <sys/systm.h> 49#include <sys/ctype.h> 50#include <sys/kernel.h> 51#include <sys/types.h> 52#include <sys/kthread.h> 53#include <sys/bio.h> 54#include <sys/fcntl.h> 55#include <sys/lock.h> 56#include <sys/module.h> 57#include <sys/mutex.h> 58#include <sys/condvar.h> 59#include <sys/malloc.h> 60#include <sys/conf.h> 61#include <sys/ioccom.h> 62#include <sys/queue.h> 63#include <sys/sbuf.h> 64#include <sys/smp.h> 65#include <sys/endian.h> 66#include <sys/sysctl.h> 67#include <vm/uma.h> 68 69#include <cam/cam.h> 70#include <cam/scsi/scsi_all.h> 71#include <cam/scsi/scsi_da.h> 72#include <cam/ctl/ctl_io.h> 73#include <cam/ctl/ctl.h> 74#include <cam/ctl/ctl_frontend.h> 75#include <cam/ctl/ctl_frontend_internal.h> 76#include <cam/ctl/ctl_util.h> 77#include <cam/ctl/ctl_backend.h> 78#include <cam/ctl/ctl_ioctl.h> 79#include <cam/ctl/ctl_ha.h> 80#include <cam/ctl/ctl_private.h> 81#include <cam/ctl/ctl_debug.h> 82#include <cam/ctl/ctl_scsi_all.h> 83#include <cam/ctl/ctl_error.h> 84 85struct ctl_softc *control_softc = NULL; 86 87/* 88 * Size and alignment macros needed for Copan-specific HA hardware. These 89 * can go away when the HA code is re-written, and uses busdma for any 90 * hardware. 91 */ 92#define CTL_ALIGN_8B(target, source, type) \ 93 if (((uint32_t)source & 0x7) != 0) \ 94 target = (type)(source + (0x8 - ((uint32_t)source & 0x7)));\ 95 else \ 96 target = (type)source; 97 98#define CTL_SIZE_8B(target, size) \ 99 if ((size & 0x7) != 0) \ 100 target = size + (0x8 - (size & 0x7)); \ 101 else \ 102 target = size; 103 104#define CTL_ALIGN_8B_MARGIN 16 105 106/* 107 * Template mode pages. 108 */ 109 110/* 111 * Note that these are default values only. The actual values will be 112 * filled in when the user does a mode sense. 113 */ 114const static struct copan_debugconf_subpage debugconf_page_default = { 115 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 116 DBGCNF_SUBPAGE_CODE, /* subpage */ 117 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 118 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 119 DBGCNF_VERSION, /* page_version */ 120 {CTL_TIME_IO_DEFAULT_SECS>>8, 121 CTL_TIME_IO_DEFAULT_SECS>>0}, /* ctl_time_io_secs */ 122}; 123 124const static struct copan_debugconf_subpage debugconf_page_changeable = { 125 DBGCNF_PAGE_CODE | SMPH_SPF, /* page_code */ 126 DBGCNF_SUBPAGE_CODE, /* subpage */ 127 {(sizeof(struct copan_debugconf_subpage) - 4) >> 8, 128 (sizeof(struct copan_debugconf_subpage) - 4) >> 0}, /* page_length */ 129 0, /* page_version */ 130 {0xff,0xff}, /* ctl_time_io_secs */ 131}; 132 133const static struct scsi_da_rw_recovery_page rw_er_page_default = { 134 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 135 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 136 /*byte3*/SMS_RWER_AWRE|SMS_RWER_ARRE, 137 /*read_retry_count*/0, 138 /*correction_span*/0, 139 /*head_offset_count*/0, 140 /*data_strobe_offset_cnt*/0, 141 /*byte8*/SMS_RWER_LBPERE, 142 /*write_retry_count*/0, 143 /*reserved2*/0, 144 /*recovery_time_limit*/{0, 0}, 145}; 146 147const static struct scsi_da_rw_recovery_page rw_er_page_changeable = { 148 /*page_code*/SMS_RW_ERROR_RECOVERY_PAGE, 149 /*page_length*/sizeof(struct scsi_da_rw_recovery_page) - 2, 150 /*byte3*/0, 151 /*read_retry_count*/0, 152 /*correction_span*/0, 153 /*head_offset_count*/0, 154 /*data_strobe_offset_cnt*/0, 155 /*byte8*/0, 156 /*write_retry_count*/0, 157 /*reserved2*/0, 158 /*recovery_time_limit*/{0, 0}, 159}; 160 161const static struct scsi_format_page format_page_default = { 162 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 163 /*page_length*/sizeof(struct scsi_format_page) - 2, 164 /*tracks_per_zone*/ {0, 0}, 165 /*alt_sectors_per_zone*/ {0, 0}, 166 /*alt_tracks_per_zone*/ {0, 0}, 167 /*alt_tracks_per_lun*/ {0, 0}, 168 /*sectors_per_track*/ {(CTL_DEFAULT_SECTORS_PER_TRACK >> 8) & 0xff, 169 CTL_DEFAULT_SECTORS_PER_TRACK & 0xff}, 170 /*bytes_per_sector*/ {0, 0}, 171 /*interleave*/ {0, 0}, 172 /*track_skew*/ {0, 0}, 173 /*cylinder_skew*/ {0, 0}, 174 /*flags*/ SFP_HSEC, 175 /*reserved*/ {0, 0, 0} 176}; 177 178const static struct scsi_format_page format_page_changeable = { 179 /*page_code*/SMS_FORMAT_DEVICE_PAGE, 180 /*page_length*/sizeof(struct scsi_format_page) - 2, 181 /*tracks_per_zone*/ {0, 0}, 182 /*alt_sectors_per_zone*/ {0, 0}, 183 /*alt_tracks_per_zone*/ {0, 0}, 184 /*alt_tracks_per_lun*/ {0, 0}, 185 /*sectors_per_track*/ {0, 0}, 186 /*bytes_per_sector*/ {0, 0}, 187 /*interleave*/ {0, 0}, 188 /*track_skew*/ {0, 0}, 189 /*cylinder_skew*/ {0, 0}, 190 /*flags*/ 0, 191 /*reserved*/ {0, 0, 0} 192}; 193 194const static struct scsi_rigid_disk_page rigid_disk_page_default = { 195 /*page_code*/SMS_RIGID_DISK_PAGE, 196 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 197 /*cylinders*/ {0, 0, 0}, 198 /*heads*/ CTL_DEFAULT_HEADS, 199 /*start_write_precomp*/ {0, 0, 0}, 200 /*start_reduced_current*/ {0, 0, 0}, 201 /*step_rate*/ {0, 0}, 202 /*landing_zone_cylinder*/ {0, 0, 0}, 203 /*rpl*/ SRDP_RPL_DISABLED, 204 /*rotational_offset*/ 0, 205 /*reserved1*/ 0, 206 /*rotation_rate*/ {(CTL_DEFAULT_ROTATION_RATE >> 8) & 0xff, 207 CTL_DEFAULT_ROTATION_RATE & 0xff}, 208 /*reserved2*/ {0, 0} 209}; 210 211const static struct scsi_rigid_disk_page rigid_disk_page_changeable = { 212 /*page_code*/SMS_RIGID_DISK_PAGE, 213 /*page_length*/sizeof(struct scsi_rigid_disk_page) - 2, 214 /*cylinders*/ {0, 0, 0}, 215 /*heads*/ 0, 216 /*start_write_precomp*/ {0, 0, 0}, 217 /*start_reduced_current*/ {0, 0, 0}, 218 /*step_rate*/ {0, 0}, 219 /*landing_zone_cylinder*/ {0, 0, 0}, 220 /*rpl*/ 0, 221 /*rotational_offset*/ 0, 222 /*reserved1*/ 0, 223 /*rotation_rate*/ {0, 0}, 224 /*reserved2*/ {0, 0} 225}; 226 227const static struct scsi_caching_page caching_page_default = { 228 /*page_code*/SMS_CACHING_PAGE, 229 /*page_length*/sizeof(struct scsi_caching_page) - 2, 230 /*flags1*/ SCP_DISC | SCP_WCE, 231 /*ret_priority*/ 0, 232 /*disable_pf_transfer_len*/ {0xff, 0xff}, 233 /*min_prefetch*/ {0, 0}, 234 /*max_prefetch*/ {0xff, 0xff}, 235 /*max_pf_ceiling*/ {0xff, 0xff}, 236 /*flags2*/ 0, 237 /*cache_segments*/ 0, 238 /*cache_seg_size*/ {0, 0}, 239 /*reserved*/ 0, 240 /*non_cache_seg_size*/ {0, 0, 0} 241}; 242 243const static struct scsi_caching_page caching_page_changeable = { 244 /*page_code*/SMS_CACHING_PAGE, 245 /*page_length*/sizeof(struct scsi_caching_page) - 2, 246 /*flags1*/ SCP_WCE | SCP_RCD, 247 /*ret_priority*/ 0, 248 /*disable_pf_transfer_len*/ {0, 0}, 249 /*min_prefetch*/ {0, 0}, 250 /*max_prefetch*/ {0, 0}, 251 /*max_pf_ceiling*/ {0, 0}, 252 /*flags2*/ 0, 253 /*cache_segments*/ 0, 254 /*cache_seg_size*/ {0, 0}, 255 /*reserved*/ 0, 256 /*non_cache_seg_size*/ {0, 0, 0} 257}; 258 259const static struct scsi_control_page control_page_default = { 260 /*page_code*/SMS_CONTROL_MODE_PAGE, 261 /*page_length*/sizeof(struct scsi_control_page) - 2, 262 /*rlec*/0, 263 /*queue_flags*/SCP_QUEUE_ALG_RESTRICTED, 264 /*eca_and_aen*/0, 265 /*flags4*/SCP_TAS, 266 /*aen_holdoff_period*/{0, 0}, 267 /*busy_timeout_period*/{0, 0}, 268 /*extended_selftest_completion_time*/{0, 0} 269}; 270 271const static struct scsi_control_page control_page_changeable = { 272 /*page_code*/SMS_CONTROL_MODE_PAGE, 273 /*page_length*/sizeof(struct scsi_control_page) - 2, 274 /*rlec*/SCP_DSENSE, 275 /*queue_flags*/SCP_QUEUE_ALG_MASK, 276 /*eca_and_aen*/SCP_SWP, 277 /*flags4*/0, 278 /*aen_holdoff_period*/{0, 0}, 279 /*busy_timeout_period*/{0, 0}, 280 /*extended_selftest_completion_time*/{0, 0} 281}; 282 283const static struct scsi_info_exceptions_page ie_page_default = { 284 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 285 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 286 /*info_flags*/SIEP_FLAGS_DEXCPT, 287 /*mrie*/0, 288 /*interval_timer*/{0, 0, 0, 0}, 289 /*report_count*/{0, 0, 0, 0} 290}; 291 292const static struct scsi_info_exceptions_page ie_page_changeable = { 293 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE, 294 /*page_length*/sizeof(struct scsi_info_exceptions_page) - 2, 295 /*info_flags*/0, 296 /*mrie*/0, 297 /*interval_timer*/{0, 0, 0, 0}, 298 /*report_count*/{0, 0, 0, 0} 299}; 300 301#define CTL_LBPM_LEN (sizeof(struct ctl_logical_block_provisioning_page) - 4) 302 303const static struct ctl_logical_block_provisioning_page lbp_page_default = {{ 304 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 305 /*subpage_code*/0x02, 306 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 307 /*flags*/0, 308 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 309 /*descr*/{}}, 310 {{/*flags*/0, 311 /*resource*/0x01, 312 /*reserved*/{0, 0}, 313 /*count*/{0, 0, 0, 0}}, 314 {/*flags*/0, 315 /*resource*/0x02, 316 /*reserved*/{0, 0}, 317 /*count*/{0, 0, 0, 0}}, 318 {/*flags*/0, 319 /*resource*/0xf1, 320 /*reserved*/{0, 0}, 321 /*count*/{0, 0, 0, 0}}, 322 {/*flags*/0, 323 /*resource*/0xf2, 324 /*reserved*/{0, 0}, 325 /*count*/{0, 0, 0, 0}} 326 } 327}; 328 329const static struct ctl_logical_block_provisioning_page lbp_page_changeable = {{ 330 /*page_code*/SMS_INFO_EXCEPTIONS_PAGE | SMPH_SPF, 331 /*subpage_code*/0x02, 332 /*page_length*/{CTL_LBPM_LEN >> 8, CTL_LBPM_LEN}, 333 /*flags*/0, 334 /*reserved*/{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 335 /*descr*/{}}, 336 {{/*flags*/0, 337 /*resource*/0, 338 /*reserved*/{0, 0}, 339 /*count*/{0, 0, 0, 0}}, 340 {/*flags*/0, 341 /*resource*/0, 342 /*reserved*/{0, 0}, 343 /*count*/{0, 0, 0, 0}}, 344 {/*flags*/0, 345 /*resource*/0, 346 /*reserved*/{0, 0}, 347 /*count*/{0, 0, 0, 0}}, 348 {/*flags*/0, 349 /*resource*/0, 350 /*reserved*/{0, 0}, 351 /*count*/{0, 0, 0, 0}} 352 } 353}; 354 355/* 356 * XXX KDM move these into the softc. 357 */ 358static int rcv_sync_msg; 359static uint8_t ctl_pause_rtr; 360 361SYSCTL_NODE(_kern_cam, OID_AUTO, ctl, CTLFLAG_RD, 0, "CAM Target Layer"); 362static int worker_threads = -1; 363TUNABLE_INT("kern.cam.ctl.worker_threads", &worker_threads); 364SYSCTL_INT(_kern_cam_ctl, OID_AUTO, worker_threads, CTLFLAG_RDTUN, 365 &worker_threads, 1, "Number of worker threads"); 366static int ctl_debug = CTL_DEBUG_NONE; 367TUNABLE_INT("kern.cam.ctl.debug", &ctl_debug); 368SYSCTL_INT(_kern_cam_ctl, OID_AUTO, debug, CTLFLAG_RWTUN, 369 &ctl_debug, 0, "Enabled debug flags"); 370 371/* 372 * Supported pages (0x00), Serial number (0x80), Device ID (0x83), 373 * Extended INQUIRY Data (0x86), Mode Page Policy (0x87), 374 * SCSI Ports (0x88), Third-party Copy (0x8F), Block limits (0xB0), 375 * Block Device Characteristics (0xB1) and Logical Block Provisioning (0xB2) 376 */ 377#define SCSI_EVPD_NUM_SUPPORTED_PAGES 10 378 379static void ctl_isc_event_handler(ctl_ha_channel chanel, ctl_ha_event event, 380 int param); 381static void ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest); 382static int ctl_init(void); 383void ctl_shutdown(void); 384static int ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td); 385static int ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td); 386static void ctl_ioctl_online(void *arg); 387static void ctl_ioctl_offline(void *arg); 388static int ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id); 389static int ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id); 390static int ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio); 391static int ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio); 392static int ctl_ioctl_submit_wait(union ctl_io *io); 393static void ctl_ioctl_datamove(union ctl_io *io); 394static void ctl_ioctl_done(union ctl_io *io); 395static void ctl_ioctl_hard_startstop_callback(void *arg, 396 struct cfi_metatask *metatask); 397static void ctl_ioctl_bbrread_callback(void *arg,struct cfi_metatask *metatask); 398static int ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 399 struct ctl_ooa *ooa_hdr, 400 struct ctl_ooa_entry *kern_entries); 401static int ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 402 struct thread *td); 403static uint32_t ctl_map_lun(struct ctl_softc *softc, int port_num, uint32_t lun); 404static uint32_t ctl_map_lun_back(struct ctl_softc *softc, int port_num, uint32_t lun); 405static int ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *lun, 406 struct ctl_be_lun *be_lun, struct ctl_id target_id); 407static int ctl_free_lun(struct ctl_lun *lun); 408static void ctl_create_lun(struct ctl_be_lun *be_lun); 409/** 410static void ctl_failover_change_pages(struct ctl_softc *softc, 411 struct ctl_scsiio *ctsio, int master); 412**/ 413 414static int ctl_do_mode_select(union ctl_io *io); 415static int ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, 416 uint64_t res_key, uint64_t sa_res_key, 417 uint8_t type, uint32_t residx, 418 struct ctl_scsiio *ctsio, 419 struct scsi_per_res_out *cdb, 420 struct scsi_per_res_out_parms* param); 421static void ctl_pro_preempt_other(struct ctl_lun *lun, 422 union ctl_ha_msg *msg); 423static void ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg); 424static int ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len); 425static int ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len); 426static int ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len); 427static int ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len); 428static int ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len); 429static int ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, 430 int alloc_len); 431static int ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, 432 int alloc_len); 433static int ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len); 434static int ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len); 435static int ctl_inquiry_evpd(struct ctl_scsiio *ctsio); 436static int ctl_inquiry_std(struct ctl_scsiio *ctsio); 437static int ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len); 438static ctl_action ctl_extent_check(union ctl_io *io1, union ctl_io *io2, 439 bool seq); 440static ctl_action ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2); 441static ctl_action ctl_check_for_blockage(struct ctl_lun *lun, 442 union ctl_io *pending_io, union ctl_io *ooa_io); 443static ctl_action ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 444 union ctl_io *starting_io); 445static int ctl_check_blocked(struct ctl_lun *lun); 446static int ctl_scsiio_lun_check(struct ctl_lun *lun, 447 const struct ctl_cmd_entry *entry, 448 struct ctl_scsiio *ctsio); 449//static int ctl_check_rtr(union ctl_io *pending_io, struct ctl_softc *softc); 450static void ctl_failover(void); 451static void ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx, 452 ctl_ua_type ua_type); 453static int ctl_scsiio_precheck(struct ctl_softc *ctl_softc, 454 struct ctl_scsiio *ctsio); 455static int ctl_scsiio(struct ctl_scsiio *ctsio); 456 457static int ctl_bus_reset(struct ctl_softc *ctl_softc, union ctl_io *io); 458static int ctl_target_reset(struct ctl_softc *ctl_softc, union ctl_io *io, 459 ctl_ua_type ua_type); 460static int ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, 461 ctl_ua_type ua_type); 462static int ctl_abort_task(union ctl_io *io); 463static int ctl_abort_task_set(union ctl_io *io); 464static int ctl_i_t_nexus_reset(union ctl_io *io); 465static void ctl_run_task(union ctl_io *io); 466#ifdef CTL_IO_DELAY 467static void ctl_datamove_timer_wakeup(void *arg); 468static void ctl_done_timer_wakeup(void *arg); 469#endif /* CTL_IO_DELAY */ 470 471static void ctl_send_datamove_done(union ctl_io *io, int have_lock); 472static void ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq); 473static int ctl_datamove_remote_dm_write_cb(union ctl_io *io); 474static void ctl_datamove_remote_write(union ctl_io *io); 475static int ctl_datamove_remote_dm_read_cb(union ctl_io *io); 476static void ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq); 477static int ctl_datamove_remote_sgl_setup(union ctl_io *io); 478static int ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 479 ctl_ha_dt_cb callback); 480static void ctl_datamove_remote_read(union ctl_io *io); 481static void ctl_datamove_remote(union ctl_io *io); 482static int ctl_process_done(union ctl_io *io); 483static void ctl_lun_thread(void *arg); 484static void ctl_thresh_thread(void *arg); 485static void ctl_work_thread(void *arg); 486static void ctl_enqueue_incoming(union ctl_io *io); 487static void ctl_enqueue_rtr(union ctl_io *io); 488static void ctl_enqueue_done(union ctl_io *io); 489static void ctl_enqueue_isc(union ctl_io *io); 490static const struct ctl_cmd_entry * 491 ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa); 492static const struct ctl_cmd_entry * 493 ctl_validate_command(struct ctl_scsiio *ctsio); 494static int ctl_cmd_applicable(uint8_t lun_type, 495 const struct ctl_cmd_entry *entry); 496 497/* 498 * Load the serialization table. This isn't very pretty, but is probably 499 * the easiest way to do it. 500 */ 501#include "ctl_ser_table.c" 502 503/* 504 * We only need to define open, close and ioctl routines for this driver. 505 */ 506static struct cdevsw ctl_cdevsw = { 507 .d_version = D_VERSION, 508 .d_flags = 0, 509 .d_open = ctl_open, 510 .d_close = ctl_close, 511 .d_ioctl = ctl_ioctl, 512 .d_name = "ctl", 513}; 514 515 516MALLOC_DEFINE(M_CTL, "ctlmem", "Memory used for CTL"); 517MALLOC_DEFINE(M_CTLIO, "ctlio", "Memory used for CTL requests"); 518 519static int ctl_module_event_handler(module_t, int /*modeventtype_t*/, void *); 520 521static moduledata_t ctl_moduledata = { 522 "ctl", 523 ctl_module_event_handler, 524 NULL 525}; 526 527DECLARE_MODULE(ctl, ctl_moduledata, SI_SUB_CONFIGURE, SI_ORDER_THIRD); 528MODULE_VERSION(ctl, 1); 529 530static struct ctl_frontend ioctl_frontend = 531{ 532 .name = "ioctl", 533}; 534 535static void 536ctl_isc_handler_finish_xfer(struct ctl_softc *ctl_softc, 537 union ctl_ha_msg *msg_info) 538{ 539 struct ctl_scsiio *ctsio; 540 541 if (msg_info->hdr.original_sc == NULL) { 542 printf("%s: original_sc == NULL!\n", __func__); 543 /* XXX KDM now what? */ 544 return; 545 } 546 547 ctsio = &msg_info->hdr.original_sc->scsiio; 548 ctsio->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 549 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 550 ctsio->io_hdr.status = msg_info->hdr.status; 551 ctsio->scsi_status = msg_info->scsi.scsi_status; 552 ctsio->sense_len = msg_info->scsi.sense_len; 553 ctsio->sense_residual = msg_info->scsi.sense_residual; 554 ctsio->residual = msg_info->scsi.residual; 555 memcpy(&ctsio->sense_data, &msg_info->scsi.sense_data, 556 sizeof(ctsio->sense_data)); 557 memcpy(&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 558 &msg_info->scsi.lbalen, sizeof(msg_info->scsi.lbalen)); 559 ctl_enqueue_isc((union ctl_io *)ctsio); 560} 561 562static void 563ctl_isc_handler_finish_ser_only(struct ctl_softc *ctl_softc, 564 union ctl_ha_msg *msg_info) 565{ 566 struct ctl_scsiio *ctsio; 567 568 if (msg_info->hdr.serializing_sc == NULL) { 569 printf("%s: serializing_sc == NULL!\n", __func__); 570 /* XXX KDM now what? */ 571 return; 572 } 573 574 ctsio = &msg_info->hdr.serializing_sc->scsiio; 575#if 0 576 /* 577 * Attempt to catch the situation where an I/O has 578 * been freed, and we're using it again. 579 */ 580 if (ctsio->io_hdr.io_type == 0xff) { 581 union ctl_io *tmp_io; 582 tmp_io = (union ctl_io *)ctsio; 583 printf("%s: %p use after free!\n", __func__, 584 ctsio); 585 printf("%s: type %d msg %d cdb %x iptl: " 586 "%d:%d:%d:%d tag 0x%04x " 587 "flag %#x status %x\n", 588 __func__, 589 tmp_io->io_hdr.io_type, 590 tmp_io->io_hdr.msg_type, 591 tmp_io->scsiio.cdb[0], 592 tmp_io->io_hdr.nexus.initid.id, 593 tmp_io->io_hdr.nexus.targ_port, 594 tmp_io->io_hdr.nexus.targ_target.id, 595 tmp_io->io_hdr.nexus.targ_lun, 596 (tmp_io->io_hdr.io_type == 597 CTL_IO_TASK) ? 598 tmp_io->taskio.tag_num : 599 tmp_io->scsiio.tag_num, 600 tmp_io->io_hdr.flags, 601 tmp_io->io_hdr.status); 602 } 603#endif 604 ctsio->io_hdr.msg_type = CTL_MSG_FINISH_IO; 605 ctl_enqueue_isc((union ctl_io *)ctsio); 606} 607 608/* 609 * ISC (Inter Shelf Communication) event handler. Events from the HA 610 * subsystem come in here. 611 */ 612static void 613ctl_isc_event_handler(ctl_ha_channel channel, ctl_ha_event event, int param) 614{ 615 struct ctl_softc *softc; 616 union ctl_io *io; 617 struct ctl_prio *presio; 618 ctl_ha_status isc_status; 619 620 softc = control_softc; 621 io = NULL; 622 623 624#if 0 625 printf("CTL: Isc Msg event %d\n", event); 626#endif 627 if (event == CTL_HA_EVT_MSG_RECV) { 628 union ctl_ha_msg msg_info; 629 630 isc_status = ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 631 sizeof(msg_info), /*wait*/ 0); 632#if 0 633 printf("CTL: msg_type %d\n", msg_info.msg_type); 634#endif 635 if (isc_status != 0) { 636 printf("Error receiving message, status = %d\n", 637 isc_status); 638 return; 639 } 640 641 switch (msg_info.hdr.msg_type) { 642 case CTL_MSG_SERIALIZE: 643#if 0 644 printf("Serialize\n"); 645#endif 646 io = ctl_alloc_io_nowait(softc->othersc_pool); 647 if (io == NULL) { 648 printf("ctl_isc_event_handler: can't allocate " 649 "ctl_io!\n"); 650 /* Bad Juju */ 651 /* Need to set busy and send msg back */ 652 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 653 msg_info.hdr.status = CTL_SCSI_ERROR; 654 msg_info.scsi.scsi_status = SCSI_STATUS_BUSY; 655 msg_info.scsi.sense_len = 0; 656 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 657 sizeof(msg_info), 0) > CTL_HA_STATUS_SUCCESS){ 658 } 659 goto bailout; 660 } 661 ctl_zero_io(io); 662 // populate ctsio from msg_info 663 io->io_hdr.io_type = CTL_IO_SCSI; 664 io->io_hdr.msg_type = CTL_MSG_SERIALIZE; 665 io->io_hdr.original_sc = msg_info.hdr.original_sc; 666#if 0 667 printf("pOrig %x\n", (int)msg_info.original_sc); 668#endif 669 io->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC | 670 CTL_FLAG_IO_ACTIVE; 671 /* 672 * If we're in serialization-only mode, we don't 673 * want to go through full done processing. Thus 674 * the COPY flag. 675 * 676 * XXX KDM add another flag that is more specific. 677 */ 678 if (softc->ha_mode == CTL_HA_MODE_SER_ONLY) 679 io->io_hdr.flags |= CTL_FLAG_INT_COPY; 680 io->io_hdr.nexus = msg_info.hdr.nexus; 681#if 0 682 printf("targ %d, port %d, iid %d, lun %d\n", 683 io->io_hdr.nexus.targ_target.id, 684 io->io_hdr.nexus.targ_port, 685 io->io_hdr.nexus.initid.id, 686 io->io_hdr.nexus.targ_lun); 687#endif 688 io->scsiio.tag_num = msg_info.scsi.tag_num; 689 io->scsiio.tag_type = msg_info.scsi.tag_type; 690 memcpy(io->scsiio.cdb, msg_info.scsi.cdb, 691 CTL_MAX_CDBLEN); 692 if (softc->ha_mode == CTL_HA_MODE_XFER) { 693 const struct ctl_cmd_entry *entry; 694 695 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 696 io->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 697 io->io_hdr.flags |= 698 entry->flags & CTL_FLAG_DATA_MASK; 699 } 700 ctl_enqueue_isc(io); 701 break; 702 703 /* Performed on the Originating SC, XFER mode only */ 704 case CTL_MSG_DATAMOVE: { 705 struct ctl_sg_entry *sgl; 706 int i, j; 707 708 io = msg_info.hdr.original_sc; 709 if (io == NULL) { 710 printf("%s: original_sc == NULL!\n", __func__); 711 /* XXX KDM do something here */ 712 break; 713 } 714 io->io_hdr.msg_type = CTL_MSG_DATAMOVE; 715 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 716 /* 717 * Keep track of this, we need to send it back over 718 * when the datamove is complete. 719 */ 720 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 721 722 if (msg_info.dt.sg_sequence == 0) { 723 /* 724 * XXX KDM we use the preallocated S/G list 725 * here, but we'll need to change this to 726 * dynamic allocation if we need larger S/G 727 * lists. 728 */ 729 if (msg_info.dt.kern_sg_entries > 730 sizeof(io->io_hdr.remote_sglist) / 731 sizeof(io->io_hdr.remote_sglist[0])) { 732 printf("%s: number of S/G entries " 733 "needed %u > allocated num %zd\n", 734 __func__, 735 msg_info.dt.kern_sg_entries, 736 sizeof(io->io_hdr.remote_sglist)/ 737 sizeof(io->io_hdr.remote_sglist[0])); 738 739 /* 740 * XXX KDM send a message back to 741 * the other side to shut down the 742 * DMA. The error will come back 743 * through via the normal channel. 744 */ 745 break; 746 } 747 sgl = io->io_hdr.remote_sglist; 748 memset(sgl, 0, 749 sizeof(io->io_hdr.remote_sglist)); 750 751 io->scsiio.kern_data_ptr = (uint8_t *)sgl; 752 753 io->scsiio.kern_sg_entries = 754 msg_info.dt.kern_sg_entries; 755 io->scsiio.rem_sg_entries = 756 msg_info.dt.kern_sg_entries; 757 io->scsiio.kern_data_len = 758 msg_info.dt.kern_data_len; 759 io->scsiio.kern_total_len = 760 msg_info.dt.kern_total_len; 761 io->scsiio.kern_data_resid = 762 msg_info.dt.kern_data_resid; 763 io->scsiio.kern_rel_offset = 764 msg_info.dt.kern_rel_offset; 765 /* 766 * Clear out per-DMA flags. 767 */ 768 io->io_hdr.flags &= ~CTL_FLAG_RDMA_MASK; 769 /* 770 * Add per-DMA flags that are set for this 771 * particular DMA request. 772 */ 773 io->io_hdr.flags |= msg_info.dt.flags & 774 CTL_FLAG_RDMA_MASK; 775 } else 776 sgl = (struct ctl_sg_entry *) 777 io->scsiio.kern_data_ptr; 778 779 for (i = msg_info.dt.sent_sg_entries, j = 0; 780 i < (msg_info.dt.sent_sg_entries + 781 msg_info.dt.cur_sg_entries); i++, j++) { 782 sgl[i].addr = msg_info.dt.sg_list[j].addr; 783 sgl[i].len = msg_info.dt.sg_list[j].len; 784 785#if 0 786 printf("%s: L: %p,%d -> %p,%d j=%d, i=%d\n", 787 __func__, 788 msg_info.dt.sg_list[j].addr, 789 msg_info.dt.sg_list[j].len, 790 sgl[i].addr, sgl[i].len, j, i); 791#endif 792 } 793#if 0 794 memcpy(&sgl[msg_info.dt.sent_sg_entries], 795 msg_info.dt.sg_list, 796 sizeof(*sgl) * msg_info.dt.cur_sg_entries); 797#endif 798 799 /* 800 * If this is the last piece of the I/O, we've got 801 * the full S/G list. Queue processing in the thread. 802 * Otherwise wait for the next piece. 803 */ 804 if (msg_info.dt.sg_last != 0) 805 ctl_enqueue_isc(io); 806 break; 807 } 808 /* Performed on the Serializing (primary) SC, XFER mode only */ 809 case CTL_MSG_DATAMOVE_DONE: { 810 if (msg_info.hdr.serializing_sc == NULL) { 811 printf("%s: serializing_sc == NULL!\n", 812 __func__); 813 /* XXX KDM now what? */ 814 break; 815 } 816 /* 817 * We grab the sense information here in case 818 * there was a failure, so we can return status 819 * back to the initiator. 820 */ 821 io = msg_info.hdr.serializing_sc; 822 io->io_hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 823 io->io_hdr.status = msg_info.hdr.status; 824 io->scsiio.scsi_status = msg_info.scsi.scsi_status; 825 io->scsiio.sense_len = msg_info.scsi.sense_len; 826 io->scsiio.sense_residual =msg_info.scsi.sense_residual; 827 io->io_hdr.port_status = msg_info.scsi.fetd_status; 828 io->scsiio.residual = msg_info.scsi.residual; 829 memcpy(&io->scsiio.sense_data,&msg_info.scsi.sense_data, 830 sizeof(io->scsiio.sense_data)); 831 ctl_enqueue_isc(io); 832 break; 833 } 834 835 /* Preformed on Originating SC, SER_ONLY mode */ 836 case CTL_MSG_R2R: 837 io = msg_info.hdr.original_sc; 838 if (io == NULL) { 839 printf("%s: Major Bummer\n", __func__); 840 return; 841 } else { 842#if 0 843 printf("pOrig %x\n",(int) ctsio); 844#endif 845 } 846 io->io_hdr.msg_type = CTL_MSG_R2R; 847 io->io_hdr.serializing_sc = msg_info.hdr.serializing_sc; 848 ctl_enqueue_isc(io); 849 break; 850 851 /* 852 * Performed on Serializing(i.e. primary SC) SC in SER_ONLY 853 * mode. 854 * Performed on the Originating (i.e. secondary) SC in XFER 855 * mode 856 */ 857 case CTL_MSG_FINISH_IO: 858 if (softc->ha_mode == CTL_HA_MODE_XFER) 859 ctl_isc_handler_finish_xfer(softc, 860 &msg_info); 861 else 862 ctl_isc_handler_finish_ser_only(softc, 863 &msg_info); 864 break; 865 866 /* Preformed on Originating SC */ 867 case CTL_MSG_BAD_JUJU: 868 io = msg_info.hdr.original_sc; 869 if (io == NULL) { 870 printf("%s: Bad JUJU!, original_sc is NULL!\n", 871 __func__); 872 break; 873 } 874 ctl_copy_sense_data(&msg_info, io); 875 /* 876 * IO should have already been cleaned up on other 877 * SC so clear this flag so we won't send a message 878 * back to finish the IO there. 879 */ 880 io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 881 io->io_hdr.flags |= CTL_FLAG_IO_ACTIVE; 882 883 /* io = msg_info.hdr.serializing_sc; */ 884 io->io_hdr.msg_type = CTL_MSG_BAD_JUJU; 885 ctl_enqueue_isc(io); 886 break; 887 888 /* Handle resets sent from the other side */ 889 case CTL_MSG_MANAGE_TASKS: { 890 struct ctl_taskio *taskio; 891 taskio = (struct ctl_taskio *)ctl_alloc_io_nowait( 892 softc->othersc_pool); 893 if (taskio == NULL) { 894 printf("ctl_isc_event_handler: can't allocate " 895 "ctl_io!\n"); 896 /* Bad Juju */ 897 /* should I just call the proper reset func 898 here??? */ 899 goto bailout; 900 } 901 ctl_zero_io((union ctl_io *)taskio); 902 taskio->io_hdr.io_type = CTL_IO_TASK; 903 taskio->io_hdr.flags |= CTL_FLAG_FROM_OTHER_SC; 904 taskio->io_hdr.nexus = msg_info.hdr.nexus; 905 taskio->task_action = msg_info.task.task_action; 906 taskio->tag_num = msg_info.task.tag_num; 907 taskio->tag_type = msg_info.task.tag_type; 908#ifdef CTL_TIME_IO 909 taskio->io_hdr.start_time = time_uptime; 910 getbintime(&taskio->io_hdr.start_bt); 911#if 0 912 cs_prof_gettime(&taskio->io_hdr.start_ticks); 913#endif 914#endif /* CTL_TIME_IO */ 915 ctl_run_task((union ctl_io *)taskio); 916 break; 917 } 918 /* Persistent Reserve action which needs attention */ 919 case CTL_MSG_PERS_ACTION: 920 presio = (struct ctl_prio *)ctl_alloc_io_nowait( 921 softc->othersc_pool); 922 if (presio == NULL) { 923 printf("ctl_isc_event_handler: can't allocate " 924 "ctl_io!\n"); 925 /* Bad Juju */ 926 /* Need to set busy and send msg back */ 927 goto bailout; 928 } 929 ctl_zero_io((union ctl_io *)presio); 930 presio->io_hdr.msg_type = CTL_MSG_PERS_ACTION; 931 presio->pr_msg = msg_info.pr; 932 ctl_enqueue_isc((union ctl_io *)presio); 933 break; 934 case CTL_MSG_SYNC_FE: 935 rcv_sync_msg = 1; 936 break; 937 default: 938 printf("How did I get here?\n"); 939 } 940 } else if (event == CTL_HA_EVT_MSG_SENT) { 941 if (param != CTL_HA_STATUS_SUCCESS) { 942 printf("Bad status from ctl_ha_msg_send status %d\n", 943 param); 944 } 945 return; 946 } else if (event == CTL_HA_EVT_DISCONNECT) { 947 printf("CTL: Got a disconnect from Isc\n"); 948 return; 949 } else { 950 printf("ctl_isc_event_handler: Unknown event %d\n", event); 951 return; 952 } 953 954bailout: 955 return; 956} 957 958static void 959ctl_copy_sense_data(union ctl_ha_msg *src, union ctl_io *dest) 960{ 961 struct scsi_sense_data *sense; 962 963 sense = &dest->scsiio.sense_data; 964 bcopy(&src->scsi.sense_data, sense, sizeof(*sense)); 965 dest->scsiio.scsi_status = src->scsi.scsi_status; 966 dest->scsiio.sense_len = src->scsi.sense_len; 967 dest->io_hdr.status = src->hdr.status; 968} 969 970static void 971ctl_est_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 972{ 973 ctl_ua_type *pu; 974 975 mtx_assert(&lun->lun_lock, MA_OWNED); 976 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 977 if (pu == NULL) 978 return; 979 pu[initidx % CTL_MAX_INIT_PER_PORT] |= ua; 980} 981 982static void 983ctl_est_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 984{ 985 int i, j; 986 987 mtx_assert(&lun->lun_lock, MA_OWNED); 988 for (i = 0; i < CTL_MAX_PORTS; i++) { 989 if (lun->pending_ua[i] == NULL) 990 continue; 991 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 992 if (i * CTL_MAX_INIT_PER_PORT + j == except) 993 continue; 994 lun->pending_ua[i][j] |= ua; 995 } 996 } 997} 998 999static void 1000ctl_clr_ua(struct ctl_lun *lun, uint32_t initidx, ctl_ua_type ua) 1001{ 1002 ctl_ua_type *pu; 1003 1004 mtx_assert(&lun->lun_lock, MA_OWNED); 1005 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 1006 if (pu == NULL) 1007 return; 1008 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua; 1009} 1010 1011static void 1012ctl_clr_ua_all(struct ctl_lun *lun, uint32_t except, ctl_ua_type ua) 1013{ 1014 int i, j; 1015 1016 mtx_assert(&lun->lun_lock, MA_OWNED); 1017 for (i = 0; i < CTL_MAX_PORTS; i++) { 1018 if (lun->pending_ua[i] == NULL) 1019 continue; 1020 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 1021 if (i * CTL_MAX_INIT_PER_PORT + j == except) 1022 continue; 1023 lun->pending_ua[i][j] &= ~ua; 1024 } 1025 } 1026} 1027 1028static int 1029ctl_ha_state_sysctl(SYSCTL_HANDLER_ARGS) 1030{ 1031 struct ctl_softc *softc = (struct ctl_softc *)arg1; 1032 struct ctl_lun *lun; 1033 int error, value; 1034 1035 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) 1036 value = 0; 1037 else 1038 value = 1; 1039 1040 error = sysctl_handle_int(oidp, &value, 0, req); 1041 if ((error != 0) || (req->newptr == NULL)) 1042 return (error); 1043 1044 mtx_lock(&softc->ctl_lock); 1045 if (value == 0) 1046 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1047 else 1048 softc->flags &= ~CTL_FLAG_ACTIVE_SHELF; 1049 STAILQ_FOREACH(lun, &softc->lun_list, links) { 1050 mtx_lock(&lun->lun_lock); 1051 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 1052 mtx_unlock(&lun->lun_lock); 1053 } 1054 mtx_unlock(&softc->ctl_lock); 1055 return (0); 1056} 1057 1058static int 1059ctl_init(void) 1060{ 1061 struct ctl_softc *softc; 1062 void *other_pool; 1063 struct ctl_port *port; 1064 int i, error, retval; 1065 //int isc_retval; 1066 1067 retval = 0; 1068 ctl_pause_rtr = 0; 1069 rcv_sync_msg = 0; 1070 1071 control_softc = malloc(sizeof(*control_softc), M_DEVBUF, 1072 M_WAITOK | M_ZERO); 1073 softc = control_softc; 1074 1075 softc->dev = make_dev(&ctl_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, 1076 "cam/ctl"); 1077 1078 softc->dev->si_drv1 = softc; 1079 1080 /* 1081 * By default, return a "bad LUN" peripheral qualifier for unknown 1082 * LUNs. The user can override this default using the tunable or 1083 * sysctl. See the comment in ctl_inquiry_std() for more details. 1084 */ 1085 softc->inquiry_pq_no_lun = 1; 1086 TUNABLE_INT_FETCH("kern.cam.ctl.inquiry_pq_no_lun", 1087 &softc->inquiry_pq_no_lun); 1088 sysctl_ctx_init(&softc->sysctl_ctx); 1089 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 1090 SYSCTL_STATIC_CHILDREN(_kern_cam), OID_AUTO, "ctl", 1091 CTLFLAG_RD, 0, "CAM Target Layer"); 1092 1093 if (softc->sysctl_tree == NULL) { 1094 printf("%s: unable to allocate sysctl tree\n", __func__); 1095 destroy_dev(softc->dev); 1096 free(control_softc, M_DEVBUF); 1097 control_softc = NULL; 1098 return (ENOMEM); 1099 } 1100 1101 SYSCTL_ADD_INT(&softc->sysctl_ctx, 1102 SYSCTL_CHILDREN(softc->sysctl_tree), OID_AUTO, 1103 "inquiry_pq_no_lun", CTLFLAG_RW, 1104 &softc->inquiry_pq_no_lun, 0, 1105 "Report no lun possible for invalid LUNs"); 1106 1107 mtx_init(&softc->ctl_lock, "CTL mutex", NULL, MTX_DEF); 1108 softc->io_zone = uma_zcreate("CTL IO", sizeof(union ctl_io), 1109 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 1110 softc->open_count = 0; 1111 1112 /* 1113 * Default to actually sending a SYNCHRONIZE CACHE command down to 1114 * the drive. 1115 */ 1116 softc->flags = CTL_FLAG_REAL_SYNC; 1117 1118 /* 1119 * In Copan's HA scheme, the "master" and "slave" roles are 1120 * figured out through the slot the controller is in. Although it 1121 * is an active/active system, someone has to be in charge. 1122 */ 1123 SYSCTL_ADD_INT(&softc->sysctl_ctx, SYSCTL_CHILDREN(softc->sysctl_tree), 1124 OID_AUTO, "ha_id", CTLFLAG_RDTUN, &softc->ha_id, 0, 1125 "HA head ID (0 - no HA)"); 1126 if (softc->ha_id == 0) { 1127 softc->flags |= CTL_FLAG_ACTIVE_SHELF; 1128 softc->is_single = 1; 1129 softc->port_offset = 0; 1130 } else 1131 softc->port_offset = (softc->ha_id - 1) * CTL_MAX_PORTS; 1132 softc->persis_offset = softc->port_offset * CTL_MAX_INIT_PER_PORT; 1133 1134 /* 1135 * XXX KDM need to figure out where we want to get our target ID 1136 * and WWID. Is it different on each port? 1137 */ 1138 softc->target.id = 0; 1139 softc->target.wwid[0] = 0x12345678; 1140 softc->target.wwid[1] = 0x87654321; 1141 STAILQ_INIT(&softc->lun_list); 1142 STAILQ_INIT(&softc->pending_lun_queue); 1143 STAILQ_INIT(&softc->fe_list); 1144 STAILQ_INIT(&softc->port_list); 1145 STAILQ_INIT(&softc->be_list); 1146 ctl_tpc_init(softc); 1147 1148 if (ctl_pool_create(softc, "othersc", CTL_POOL_ENTRIES_OTHER_SC, 1149 &other_pool) != 0) 1150 { 1151 printf("ctl: can't allocate %d entry other SC pool, " 1152 "exiting\n", CTL_POOL_ENTRIES_OTHER_SC); 1153 return (ENOMEM); 1154 } 1155 softc->othersc_pool = other_pool; 1156 1157 if (worker_threads <= 0) 1158 worker_threads = max(1, mp_ncpus / 4); 1159 if (worker_threads > CTL_MAX_THREADS) 1160 worker_threads = CTL_MAX_THREADS; 1161 1162 for (i = 0; i < worker_threads; i++) { 1163 struct ctl_thread *thr = &softc->threads[i]; 1164 1165 mtx_init(&thr->queue_lock, "CTL queue mutex", NULL, MTX_DEF); 1166 thr->ctl_softc = softc; 1167 STAILQ_INIT(&thr->incoming_queue); 1168 STAILQ_INIT(&thr->rtr_queue); 1169 STAILQ_INIT(&thr->done_queue); 1170 STAILQ_INIT(&thr->isc_queue); 1171 1172 error = kproc_kthread_add(ctl_work_thread, thr, 1173 &softc->ctl_proc, &thr->thread, 0, 0, "ctl", "work%d", i); 1174 if (error != 0) { 1175 printf("error creating CTL work thread!\n"); 1176 ctl_pool_free(other_pool); 1177 return (error); 1178 } 1179 } 1180 error = kproc_kthread_add(ctl_lun_thread, softc, 1181 &softc->ctl_proc, NULL, 0, 0, "ctl", "lun"); 1182 if (error != 0) { 1183 printf("error creating CTL lun thread!\n"); 1184 ctl_pool_free(other_pool); 1185 return (error); 1186 } 1187 error = kproc_kthread_add(ctl_thresh_thread, softc, 1188 &softc->ctl_proc, NULL, 0, 0, "ctl", "thresh"); 1189 if (error != 0) { 1190 printf("error creating CTL threshold thread!\n"); 1191 ctl_pool_free(other_pool); 1192 return (error); 1193 } 1194 if (bootverbose) 1195 printf("ctl: CAM Target Layer loaded\n"); 1196 1197 /* 1198 * Initialize the ioctl front end. 1199 */ 1200 ctl_frontend_register(&ioctl_frontend); 1201 port = &softc->ioctl_info.port; 1202 port->frontend = &ioctl_frontend; 1203 sprintf(softc->ioctl_info.port_name, "ioctl"); 1204 port->port_type = CTL_PORT_IOCTL; 1205 port->num_requested_ctl_io = 100; 1206 port->port_name = softc->ioctl_info.port_name; 1207 port->port_online = ctl_ioctl_online; 1208 port->port_offline = ctl_ioctl_offline; 1209 port->onoff_arg = &softc->ioctl_info; 1210 port->lun_enable = ctl_ioctl_lun_enable; 1211 port->lun_disable = ctl_ioctl_lun_disable; 1212 port->targ_lun_arg = &softc->ioctl_info; 1213 port->fe_datamove = ctl_ioctl_datamove; 1214 port->fe_done = ctl_ioctl_done; 1215 port->max_targets = 15; 1216 port->max_target_id = 15; 1217 1218 if (ctl_port_register(&softc->ioctl_info.port) != 0) { 1219 printf("ctl: ioctl front end registration failed, will " 1220 "continue anyway\n"); 1221 } 1222 1223 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 1224 OID_AUTO, "ha_state", CTLTYPE_INT | CTLFLAG_RWTUN, 1225 softc, 0, ctl_ha_state_sysctl, "I", "HA state for this head"); 1226 1227#ifdef CTL_IO_DELAY 1228 if (sizeof(struct callout) > CTL_TIMER_BYTES) { 1229 printf("sizeof(struct callout) %zd > CTL_TIMER_BYTES %zd\n", 1230 sizeof(struct callout), CTL_TIMER_BYTES); 1231 return (EINVAL); 1232 } 1233#endif /* CTL_IO_DELAY */ 1234 1235 return (0); 1236} 1237 1238void 1239ctl_shutdown(void) 1240{ 1241 struct ctl_softc *softc; 1242 struct ctl_lun *lun, *next_lun; 1243 1244 softc = (struct ctl_softc *)control_softc; 1245 1246 if (ctl_port_deregister(&softc->ioctl_info.port) != 0) 1247 printf("ctl: ioctl front end deregistration failed\n"); 1248 1249 mtx_lock(&softc->ctl_lock); 1250 1251 /* 1252 * Free up each LUN. 1253 */ 1254 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){ 1255 next_lun = STAILQ_NEXT(lun, links); 1256 ctl_free_lun(lun); 1257 } 1258 1259 mtx_unlock(&softc->ctl_lock); 1260 1261 ctl_frontend_deregister(&ioctl_frontend); 1262 1263#if 0 1264 ctl_shutdown_thread(softc->work_thread); 1265 mtx_destroy(&softc->queue_lock); 1266#endif 1267 1268 ctl_tpc_shutdown(softc); 1269 uma_zdestroy(softc->io_zone); 1270 mtx_destroy(&softc->ctl_lock); 1271 1272 destroy_dev(softc->dev); 1273 1274 sysctl_ctx_free(&softc->sysctl_ctx); 1275 1276 free(control_softc, M_DEVBUF); 1277 control_softc = NULL; 1278 1279 if (bootverbose) 1280 printf("ctl: CAM Target Layer unloaded\n"); 1281} 1282 1283static int 1284ctl_module_event_handler(module_t mod, int what, void *arg) 1285{ 1286 1287 switch (what) { 1288 case MOD_LOAD: 1289 return (ctl_init()); 1290 case MOD_UNLOAD: 1291 return (EBUSY); 1292 default: 1293 return (EOPNOTSUPP); 1294 } 1295} 1296 1297/* 1298 * XXX KDM should we do some access checks here? Bump a reference count to 1299 * prevent a CTL module from being unloaded while someone has it open? 1300 */ 1301static int 1302ctl_open(struct cdev *dev, int flags, int fmt, struct thread *td) 1303{ 1304 return (0); 1305} 1306 1307static int 1308ctl_close(struct cdev *dev, int flags, int fmt, struct thread *td) 1309{ 1310 return (0); 1311} 1312 1313int 1314ctl_port_enable(ctl_port_type port_type) 1315{ 1316 struct ctl_softc *softc = control_softc; 1317 struct ctl_port *port; 1318 1319 if (softc->is_single == 0) { 1320 union ctl_ha_msg msg_info; 1321 int isc_retval; 1322 1323#if 0 1324 printf("%s: HA mode, synchronizing frontend enable\n", 1325 __func__); 1326#endif 1327 msg_info.hdr.msg_type = CTL_MSG_SYNC_FE; 1328 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1329 sizeof(msg_info), 1 )) > CTL_HA_STATUS_SUCCESS) { 1330 printf("Sync msg send error retval %d\n", isc_retval); 1331 } 1332 if (!rcv_sync_msg) { 1333 isc_retval=ctl_ha_msg_recv(CTL_HA_CHAN_CTL, &msg_info, 1334 sizeof(msg_info), 1); 1335 } 1336#if 0 1337 printf("CTL:Frontend Enable\n"); 1338 } else { 1339 printf("%s: single mode, skipping frontend synchronization\n", 1340 __func__); 1341#endif 1342 } 1343 1344 STAILQ_FOREACH(port, &softc->port_list, links) { 1345 if (port_type & port->port_type) 1346 { 1347#if 0 1348 printf("port %d\n", port->targ_port); 1349#endif 1350 ctl_port_online(port); 1351 } 1352 } 1353 1354 return (0); 1355} 1356 1357int 1358ctl_port_disable(ctl_port_type port_type) 1359{ 1360 struct ctl_softc *softc; 1361 struct ctl_port *port; 1362 1363 softc = control_softc; 1364 1365 STAILQ_FOREACH(port, &softc->port_list, links) { 1366 if (port_type & port->port_type) 1367 ctl_port_offline(port); 1368 } 1369 1370 return (0); 1371} 1372 1373/* 1374 * Returns 0 for success, 1 for failure. 1375 * Currently the only failure mode is if there aren't enough entries 1376 * allocated. So, in case of a failure, look at num_entries_dropped, 1377 * reallocate and try again. 1378 */ 1379int 1380ctl_port_list(struct ctl_port_entry *entries, int num_entries_alloced, 1381 int *num_entries_filled, int *num_entries_dropped, 1382 ctl_port_type port_type, int no_virtual) 1383{ 1384 struct ctl_softc *softc; 1385 struct ctl_port *port; 1386 int entries_dropped, entries_filled; 1387 int retval; 1388 int i; 1389 1390 softc = control_softc; 1391 1392 retval = 0; 1393 entries_filled = 0; 1394 entries_dropped = 0; 1395 1396 i = 0; 1397 mtx_lock(&softc->ctl_lock); 1398 STAILQ_FOREACH(port, &softc->port_list, links) { 1399 struct ctl_port_entry *entry; 1400 1401 if ((port->port_type & port_type) == 0) 1402 continue; 1403 1404 if ((no_virtual != 0) 1405 && (port->virtual_port != 0)) 1406 continue; 1407 1408 if (entries_filled >= num_entries_alloced) { 1409 entries_dropped++; 1410 continue; 1411 } 1412 entry = &entries[i]; 1413 1414 entry->port_type = port->port_type; 1415 strlcpy(entry->port_name, port->port_name, 1416 sizeof(entry->port_name)); 1417 entry->physical_port = port->physical_port; 1418 entry->virtual_port = port->virtual_port; 1419 entry->wwnn = port->wwnn; 1420 entry->wwpn = port->wwpn; 1421 1422 i++; 1423 entries_filled++; 1424 } 1425 1426 mtx_unlock(&softc->ctl_lock); 1427 1428 if (entries_dropped > 0) 1429 retval = 1; 1430 1431 *num_entries_dropped = entries_dropped; 1432 *num_entries_filled = entries_filled; 1433 1434 return (retval); 1435} 1436 1437static void 1438ctl_ioctl_online(void *arg) 1439{ 1440 struct ctl_ioctl_info *ioctl_info; 1441 1442 ioctl_info = (struct ctl_ioctl_info *)arg; 1443 1444 ioctl_info->flags |= CTL_IOCTL_FLAG_ENABLED; 1445} 1446 1447static void 1448ctl_ioctl_offline(void *arg) 1449{ 1450 struct ctl_ioctl_info *ioctl_info; 1451 1452 ioctl_info = (struct ctl_ioctl_info *)arg; 1453 1454 ioctl_info->flags &= ~CTL_IOCTL_FLAG_ENABLED; 1455} 1456 1457/* 1458 * Remove an initiator by port number and initiator ID. 1459 * Returns 0 for success, -1 for failure. 1460 */ 1461int 1462ctl_remove_initiator(struct ctl_port *port, int iid) 1463{ 1464 struct ctl_softc *softc = control_softc; 1465 1466 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1467 1468 if (iid > CTL_MAX_INIT_PER_PORT) { 1469 printf("%s: initiator ID %u > maximun %u!\n", 1470 __func__, iid, CTL_MAX_INIT_PER_PORT); 1471 return (-1); 1472 } 1473 1474 mtx_lock(&softc->ctl_lock); 1475 port->wwpn_iid[iid].in_use--; 1476 port->wwpn_iid[iid].last_use = time_uptime; 1477 mtx_unlock(&softc->ctl_lock); 1478 1479 return (0); 1480} 1481 1482/* 1483 * Add an initiator to the initiator map. 1484 * Returns iid for success, < 0 for failure. 1485 */ 1486int 1487ctl_add_initiator(struct ctl_port *port, int iid, uint64_t wwpn, char *name) 1488{ 1489 struct ctl_softc *softc = control_softc; 1490 time_t best_time; 1491 int i, best; 1492 1493 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 1494 1495 if (iid >= CTL_MAX_INIT_PER_PORT) { 1496 printf("%s: WWPN %#jx initiator ID %u > maximum %u!\n", 1497 __func__, wwpn, iid, CTL_MAX_INIT_PER_PORT); 1498 free(name, M_CTL); 1499 return (-1); 1500 } 1501 1502 mtx_lock(&softc->ctl_lock); 1503 1504 if (iid < 0 && (wwpn != 0 || name != NULL)) { 1505 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1506 if (wwpn != 0 && wwpn == port->wwpn_iid[i].wwpn) { 1507 iid = i; 1508 break; 1509 } 1510 if (name != NULL && port->wwpn_iid[i].name != NULL && 1511 strcmp(name, port->wwpn_iid[i].name) == 0) { 1512 iid = i; 1513 break; 1514 } 1515 } 1516 } 1517 1518 if (iid < 0) { 1519 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1520 if (port->wwpn_iid[i].in_use == 0 && 1521 port->wwpn_iid[i].wwpn == 0 && 1522 port->wwpn_iid[i].name == NULL) { 1523 iid = i; 1524 break; 1525 } 1526 } 1527 } 1528 1529 if (iid < 0) { 1530 best = -1; 1531 best_time = INT32_MAX; 1532 for (i = 0; i < CTL_MAX_INIT_PER_PORT; i++) { 1533 if (port->wwpn_iid[i].in_use == 0) { 1534 if (port->wwpn_iid[i].last_use < best_time) { 1535 best = i; 1536 best_time = port->wwpn_iid[i].last_use; 1537 } 1538 } 1539 } 1540 iid = best; 1541 } 1542 1543 if (iid < 0) { 1544 mtx_unlock(&softc->ctl_lock); 1545 free(name, M_CTL); 1546 return (-2); 1547 } 1548 1549 if (port->wwpn_iid[iid].in_use > 0 && (wwpn != 0 || name != NULL)) { 1550 /* 1551 * This is not an error yet. 1552 */ 1553 if (wwpn != 0 && wwpn == port->wwpn_iid[iid].wwpn) { 1554#if 0 1555 printf("%s: port %d iid %u WWPN %#jx arrived" 1556 " again\n", __func__, port->targ_port, 1557 iid, (uintmax_t)wwpn); 1558#endif 1559 goto take; 1560 } 1561 if (name != NULL && port->wwpn_iid[iid].name != NULL && 1562 strcmp(name, port->wwpn_iid[iid].name) == 0) { 1563#if 0 1564 printf("%s: port %d iid %u name '%s' arrived" 1565 " again\n", __func__, port->targ_port, 1566 iid, name); 1567#endif 1568 goto take; 1569 } 1570 1571 /* 1572 * This is an error, but what do we do about it? The 1573 * driver is telling us we have a new WWPN for this 1574 * initiator ID, so we pretty much need to use it. 1575 */ 1576 printf("%s: port %d iid %u WWPN %#jx '%s' arrived," 1577 " but WWPN %#jx '%s' is still at that address\n", 1578 __func__, port->targ_port, iid, wwpn, name, 1579 (uintmax_t)port->wwpn_iid[iid].wwpn, 1580 port->wwpn_iid[iid].name); 1581 1582 /* 1583 * XXX KDM clear have_ca and ua_pending on each LUN for 1584 * this initiator. 1585 */ 1586 } 1587take: 1588 free(port->wwpn_iid[iid].name, M_CTL); 1589 port->wwpn_iid[iid].name = name; 1590 port->wwpn_iid[iid].wwpn = wwpn; 1591 port->wwpn_iid[iid].in_use++; 1592 mtx_unlock(&softc->ctl_lock); 1593 1594 return (iid); 1595} 1596 1597static int 1598ctl_create_iid(struct ctl_port *port, int iid, uint8_t *buf) 1599{ 1600 int len; 1601 1602 switch (port->port_type) { 1603 case CTL_PORT_FC: 1604 { 1605 struct scsi_transportid_fcp *id = 1606 (struct scsi_transportid_fcp *)buf; 1607 if (port->wwpn_iid[iid].wwpn == 0) 1608 return (0); 1609 memset(id, 0, sizeof(*id)); 1610 id->format_protocol = SCSI_PROTO_FC; 1611 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->n_port_name); 1612 return (sizeof(*id)); 1613 } 1614 case CTL_PORT_ISCSI: 1615 { 1616 struct scsi_transportid_iscsi_port *id = 1617 (struct scsi_transportid_iscsi_port *)buf; 1618 if (port->wwpn_iid[iid].name == NULL) 1619 return (0); 1620 memset(id, 0, 256); 1621 id->format_protocol = SCSI_TRN_ISCSI_FORMAT_PORT | 1622 SCSI_PROTO_ISCSI; 1623 len = strlcpy(id->iscsi_name, port->wwpn_iid[iid].name, 252) + 1; 1624 len = roundup2(min(len, 252), 4); 1625 scsi_ulto2b(len, id->additional_length); 1626 return (sizeof(*id) + len); 1627 } 1628 case CTL_PORT_SAS: 1629 { 1630 struct scsi_transportid_sas *id = 1631 (struct scsi_transportid_sas *)buf; 1632 if (port->wwpn_iid[iid].wwpn == 0) 1633 return (0); 1634 memset(id, 0, sizeof(*id)); 1635 id->format_protocol = SCSI_PROTO_SAS; 1636 scsi_u64to8b(port->wwpn_iid[iid].wwpn, id->sas_address); 1637 return (sizeof(*id)); 1638 } 1639 default: 1640 { 1641 struct scsi_transportid_spi *id = 1642 (struct scsi_transportid_spi *)buf; 1643 memset(id, 0, sizeof(*id)); 1644 id->format_protocol = SCSI_PROTO_SPI; 1645 scsi_ulto2b(iid, id->scsi_addr); 1646 scsi_ulto2b(port->targ_port, id->rel_trgt_port_id); 1647 return (sizeof(*id)); 1648 } 1649 } 1650} 1651 1652static int 1653ctl_ioctl_lun_enable(void *arg, struct ctl_id targ_id, int lun_id) 1654{ 1655 return (0); 1656} 1657 1658static int 1659ctl_ioctl_lun_disable(void *arg, struct ctl_id targ_id, int lun_id) 1660{ 1661 return (0); 1662} 1663 1664/* 1665 * Data movement routine for the CTL ioctl frontend port. 1666 */ 1667static int 1668ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio) 1669{ 1670 struct ctl_sg_entry *ext_sglist, *kern_sglist; 1671 struct ctl_sg_entry ext_entry, kern_entry; 1672 int ext_sglen, ext_sg_entries, kern_sg_entries; 1673 int ext_sg_start, ext_offset; 1674 int len_to_copy, len_copied; 1675 int kern_watermark, ext_watermark; 1676 int ext_sglist_malloced; 1677 int i, j; 1678 1679 ext_sglist_malloced = 0; 1680 ext_sg_start = 0; 1681 ext_offset = 0; 1682 1683 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n")); 1684 1685 /* 1686 * If this flag is set, fake the data transfer. 1687 */ 1688 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) { 1689 ctsio->ext_data_filled = ctsio->ext_data_len; 1690 goto bailout; 1691 } 1692 1693 /* 1694 * To simplify things here, if we have a single buffer, stick it in 1695 * a S/G entry and just make it a single entry S/G list. 1696 */ 1697 if (ctsio->io_hdr.flags & CTL_FLAG_EDPTR_SGLIST) { 1698 int len_seen; 1699 1700 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist); 1701 1702 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL, 1703 M_WAITOK); 1704 ext_sglist_malloced = 1; 1705 if (copyin(ctsio->ext_data_ptr, ext_sglist, 1706 ext_sglen) != 0) { 1707 ctl_set_internal_failure(ctsio, 1708 /*sks_valid*/ 0, 1709 /*retry_count*/ 0); 1710 goto bailout; 1711 } 1712 ext_sg_entries = ctsio->ext_sg_entries; 1713 len_seen = 0; 1714 for (i = 0; i < ext_sg_entries; i++) { 1715 if ((len_seen + ext_sglist[i].len) >= 1716 ctsio->ext_data_filled) { 1717 ext_sg_start = i; 1718 ext_offset = ctsio->ext_data_filled - len_seen; 1719 break; 1720 } 1721 len_seen += ext_sglist[i].len; 1722 } 1723 } else { 1724 ext_sglist = &ext_entry; 1725 ext_sglist->addr = ctsio->ext_data_ptr; 1726 ext_sglist->len = ctsio->ext_data_len; 1727 ext_sg_entries = 1; 1728 ext_sg_start = 0; 1729 ext_offset = ctsio->ext_data_filled; 1730 } 1731 1732 if (ctsio->kern_sg_entries > 0) { 1733 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr; 1734 kern_sg_entries = ctsio->kern_sg_entries; 1735 } else { 1736 kern_sglist = &kern_entry; 1737 kern_sglist->addr = ctsio->kern_data_ptr; 1738 kern_sglist->len = ctsio->kern_data_len; 1739 kern_sg_entries = 1; 1740 } 1741 1742 1743 kern_watermark = 0; 1744 ext_watermark = ext_offset; 1745 len_copied = 0; 1746 for (i = ext_sg_start, j = 0; 1747 i < ext_sg_entries && j < kern_sg_entries;) { 1748 uint8_t *ext_ptr, *kern_ptr; 1749 1750 len_to_copy = MIN(ext_sglist[i].len - ext_watermark, 1751 kern_sglist[j].len - kern_watermark); 1752 1753 ext_ptr = (uint8_t *)ext_sglist[i].addr; 1754 ext_ptr = ext_ptr + ext_watermark; 1755 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 1756 /* 1757 * XXX KDM fix this! 1758 */ 1759 panic("need to implement bus address support"); 1760#if 0 1761 kern_ptr = bus_to_virt(kern_sglist[j].addr); 1762#endif 1763 } else 1764 kern_ptr = (uint8_t *)kern_sglist[j].addr; 1765 kern_ptr = kern_ptr + kern_watermark; 1766 1767 kern_watermark += len_to_copy; 1768 ext_watermark += len_to_copy; 1769 1770 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) == 1771 CTL_FLAG_DATA_IN) { 1772 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1773 "bytes to user\n", len_to_copy)); 1774 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1775 "to %p\n", kern_ptr, ext_ptr)); 1776 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) { 1777 ctl_set_internal_failure(ctsio, 1778 /*sks_valid*/ 0, 1779 /*retry_count*/ 0); 1780 goto bailout; 1781 } 1782 } else { 1783 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d " 1784 "bytes from user\n", len_to_copy)); 1785 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p " 1786 "to %p\n", ext_ptr, kern_ptr)); 1787 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){ 1788 ctl_set_internal_failure(ctsio, 1789 /*sks_valid*/ 0, 1790 /*retry_count*/0); 1791 goto bailout; 1792 } 1793 } 1794 1795 len_copied += len_to_copy; 1796 1797 if (ext_sglist[i].len == ext_watermark) { 1798 i++; 1799 ext_watermark = 0; 1800 } 1801 1802 if (kern_sglist[j].len == kern_watermark) { 1803 j++; 1804 kern_watermark = 0; 1805 } 1806 } 1807 1808 ctsio->ext_data_filled += len_copied; 1809 1810 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, " 1811 "kern_sg_entries: %d\n", ext_sg_entries, 1812 kern_sg_entries)); 1813 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, " 1814 "kern_data_len = %d\n", ctsio->ext_data_len, 1815 ctsio->kern_data_len)); 1816 1817 1818 /* XXX KDM set residual?? */ 1819bailout: 1820 1821 if (ext_sglist_malloced != 0) 1822 free(ext_sglist, M_CTL); 1823 1824 return (CTL_RETVAL_COMPLETE); 1825} 1826 1827/* 1828 * Serialize a command that went down the "wrong" side, and so was sent to 1829 * this controller for execution. The logic is a little different than the 1830 * standard case in ctl_scsiio_precheck(). Errors in this case need to get 1831 * sent back to the other side, but in the success case, we execute the 1832 * command on this side (XFER mode) or tell the other side to execute it 1833 * (SER_ONLY mode). 1834 */ 1835static int 1836ctl_serialize_other_sc_cmd(struct ctl_scsiio *ctsio) 1837{ 1838 struct ctl_softc *softc; 1839 union ctl_ha_msg msg_info; 1840 struct ctl_lun *lun; 1841 int retval = 0; 1842 uint32_t targ_lun; 1843 1844 softc = control_softc; 1845 1846 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 1847 lun = softc->ctl_luns[targ_lun]; 1848 if (lun==NULL) 1849 { 1850 /* 1851 * Why isn't LUN defined? The other side wouldn't 1852 * send a cmd if the LUN is undefined. 1853 */ 1854 printf("%s: Bad JUJU!, LUN is NULL!\n", __func__); 1855 1856 /* "Logical unit not supported" */ 1857 ctl_set_sense_data(&msg_info.scsi.sense_data, 1858 lun, 1859 /*sense_format*/SSD_TYPE_NONE, 1860 /*current_error*/ 1, 1861 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1862 /*asc*/ 0x25, 1863 /*ascq*/ 0x00, 1864 SSD_ELEM_NONE); 1865 1866 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1867 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1868 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1869 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1870 msg_info.hdr.serializing_sc = NULL; 1871 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1872 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1873 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1874 } 1875 return(1); 1876 1877 } 1878 1879 mtx_lock(&lun->lun_lock); 1880 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1881 1882 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 1883 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, ctl_ooaq, 1884 ooa_links))) { 1885 case CTL_ACTION_BLOCK: 1886 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 1887 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 1888 blocked_links); 1889 break; 1890 case CTL_ACTION_PASS: 1891 case CTL_ACTION_SKIP: 1892 if (softc->ha_mode == CTL_HA_MODE_XFER) { 1893 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 1894 ctl_enqueue_rtr((union ctl_io *)ctsio); 1895 } else { 1896 1897 /* send msg back to other side */ 1898 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1899 msg_info.hdr.serializing_sc = (union ctl_io *)ctsio; 1900 msg_info.hdr.msg_type = CTL_MSG_R2R; 1901#if 0 1902 printf("2. pOrig %x\n", (int)msg_info.hdr.original_sc); 1903#endif 1904 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1905 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1906 } 1907 } 1908 break; 1909 case CTL_ACTION_OVERLAP: 1910 /* OVERLAPPED COMMANDS ATTEMPTED */ 1911 ctl_set_sense_data(&msg_info.scsi.sense_data, 1912 lun, 1913 /*sense_format*/SSD_TYPE_NONE, 1914 /*current_error*/ 1, 1915 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1916 /*asc*/ 0x4E, 1917 /*ascq*/ 0x00, 1918 SSD_ELEM_NONE); 1919 1920 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1921 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1922 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1923 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1924 msg_info.hdr.serializing_sc = NULL; 1925 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1926#if 0 1927 printf("BAD JUJU:Major Bummer Overlap\n"); 1928#endif 1929 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1930 retval = 1; 1931 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1932 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1933 } 1934 break; 1935 case CTL_ACTION_OVERLAP_TAG: 1936 /* TAGGED OVERLAPPED COMMANDS (NN = QUEUE TAG) */ 1937 ctl_set_sense_data(&msg_info.scsi.sense_data, 1938 lun, 1939 /*sense_format*/SSD_TYPE_NONE, 1940 /*current_error*/ 1, 1941 /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, 1942 /*asc*/ 0x4D, 1943 /*ascq*/ ctsio->tag_num & 0xff, 1944 SSD_ELEM_NONE); 1945 1946 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1947 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1948 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1949 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1950 msg_info.hdr.serializing_sc = NULL; 1951 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1952#if 0 1953 printf("BAD JUJU:Major Bummer Overlap Tag\n"); 1954#endif 1955 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1956 retval = 1; 1957 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1958 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1959 } 1960 break; 1961 case CTL_ACTION_ERROR: 1962 default: 1963 /* "Internal target failure" */ 1964 ctl_set_sense_data(&msg_info.scsi.sense_data, 1965 lun, 1966 /*sense_format*/SSD_TYPE_NONE, 1967 /*current_error*/ 1, 1968 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 1969 /*asc*/ 0x44, 1970 /*ascq*/ 0x00, 1971 SSD_ELEM_NONE); 1972 1973 msg_info.scsi.sense_len = SSD_FULL_SIZE; 1974 msg_info.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 1975 msg_info.hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 1976 msg_info.hdr.original_sc = ctsio->io_hdr.original_sc; 1977 msg_info.hdr.serializing_sc = NULL; 1978 msg_info.hdr.msg_type = CTL_MSG_BAD_JUJU; 1979#if 0 1980 printf("BAD JUJU:Major Bummer HW Error\n"); 1981#endif 1982 TAILQ_REMOVE(&lun->ooa_queue, &ctsio->io_hdr, ooa_links); 1983 retval = 1; 1984 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_info, 1985 sizeof(msg_info), 0 ) > CTL_HA_STATUS_SUCCESS) { 1986 } 1987 break; 1988 } 1989 mtx_unlock(&lun->lun_lock); 1990 return (retval); 1991} 1992 1993static int 1994ctl_ioctl_submit_wait(union ctl_io *io) 1995{ 1996 struct ctl_fe_ioctl_params params; 1997 ctl_fe_ioctl_state last_state; 1998 int done, retval; 1999 2000 retval = 0; 2001 2002 bzero(¶ms, sizeof(params)); 2003 2004 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF); 2005 cv_init(¶ms.sem, "ctlioccv"); 2006 params.state = CTL_IOCTL_INPROG; 2007 last_state = params.state; 2008 2009 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms; 2010 2011 CTL_DEBUG_PRINT(("ctl_ioctl_submit_wait\n")); 2012 2013 /* This shouldn't happen */ 2014 if ((retval = ctl_queue(io)) != CTL_RETVAL_COMPLETE) 2015 return (retval); 2016 2017 done = 0; 2018 2019 do { 2020 mtx_lock(¶ms.ioctl_mtx); 2021 /* 2022 * Check the state here, and don't sleep if the state has 2023 * already changed (i.e. wakeup has already occured, but we 2024 * weren't waiting yet). 2025 */ 2026 if (params.state == last_state) { 2027 /* XXX KDM cv_wait_sig instead? */ 2028 cv_wait(¶ms.sem, ¶ms.ioctl_mtx); 2029 } 2030 last_state = params.state; 2031 2032 switch (params.state) { 2033 case CTL_IOCTL_INPROG: 2034 /* Why did we wake up? */ 2035 /* XXX KDM error here? */ 2036 mtx_unlock(¶ms.ioctl_mtx); 2037 break; 2038 case CTL_IOCTL_DATAMOVE: 2039 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n")); 2040 2041 /* 2042 * change last_state back to INPROG to avoid 2043 * deadlock on subsequent data moves. 2044 */ 2045 params.state = last_state = CTL_IOCTL_INPROG; 2046 2047 mtx_unlock(¶ms.ioctl_mtx); 2048 ctl_ioctl_do_datamove(&io->scsiio); 2049 /* 2050 * Note that in some cases, most notably writes, 2051 * this will queue the I/O and call us back later. 2052 * In other cases, generally reads, this routine 2053 * will immediately call back and wake us up, 2054 * probably using our own context. 2055 */ 2056 io->scsiio.be_move_done(io); 2057 break; 2058 case CTL_IOCTL_DONE: 2059 mtx_unlock(¶ms.ioctl_mtx); 2060 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n")); 2061 done = 1; 2062 break; 2063 default: 2064 mtx_unlock(¶ms.ioctl_mtx); 2065 /* XXX KDM error here? */ 2066 break; 2067 } 2068 } while (done == 0); 2069 2070 mtx_destroy(¶ms.ioctl_mtx); 2071 cv_destroy(¶ms.sem); 2072 2073 return (CTL_RETVAL_COMPLETE); 2074} 2075 2076static void 2077ctl_ioctl_datamove(union ctl_io *io) 2078{ 2079 struct ctl_fe_ioctl_params *params; 2080 2081 params = (struct ctl_fe_ioctl_params *) 2082 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2083 2084 mtx_lock(¶ms->ioctl_mtx); 2085 params->state = CTL_IOCTL_DATAMOVE; 2086 cv_broadcast(¶ms->sem); 2087 mtx_unlock(¶ms->ioctl_mtx); 2088} 2089 2090static void 2091ctl_ioctl_done(union ctl_io *io) 2092{ 2093 struct ctl_fe_ioctl_params *params; 2094 2095 params = (struct ctl_fe_ioctl_params *) 2096 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr; 2097 2098 mtx_lock(¶ms->ioctl_mtx); 2099 params->state = CTL_IOCTL_DONE; 2100 cv_broadcast(¶ms->sem); 2101 mtx_unlock(¶ms->ioctl_mtx); 2102} 2103 2104static void 2105ctl_ioctl_hard_startstop_callback(void *arg, struct cfi_metatask *metatask) 2106{ 2107 struct ctl_fe_ioctl_startstop_info *sd_info; 2108 2109 sd_info = (struct ctl_fe_ioctl_startstop_info *)arg; 2110 2111 sd_info->hs_info.status = metatask->status; 2112 sd_info->hs_info.total_luns = metatask->taskinfo.startstop.total_luns; 2113 sd_info->hs_info.luns_complete = 2114 metatask->taskinfo.startstop.luns_complete; 2115 sd_info->hs_info.luns_failed = metatask->taskinfo.startstop.luns_failed; 2116 2117 cv_broadcast(&sd_info->sem); 2118} 2119 2120static void 2121ctl_ioctl_bbrread_callback(void *arg, struct cfi_metatask *metatask) 2122{ 2123 struct ctl_fe_ioctl_bbrread_info *fe_bbr_info; 2124 2125 fe_bbr_info = (struct ctl_fe_ioctl_bbrread_info *)arg; 2126 2127 mtx_lock(fe_bbr_info->lock); 2128 fe_bbr_info->bbr_info->status = metatask->status; 2129 fe_bbr_info->bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2130 fe_bbr_info->wakeup_done = 1; 2131 mtx_unlock(fe_bbr_info->lock); 2132 2133 cv_broadcast(&fe_bbr_info->sem); 2134} 2135 2136/* 2137 * Returns 0 for success, errno for failure. 2138 */ 2139static int 2140ctl_ioctl_fill_ooa(struct ctl_lun *lun, uint32_t *cur_fill_num, 2141 struct ctl_ooa *ooa_hdr, struct ctl_ooa_entry *kern_entries) 2142{ 2143 union ctl_io *io; 2144 int retval; 2145 2146 retval = 0; 2147 2148 mtx_lock(&lun->lun_lock); 2149 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); (io != NULL); 2150 (*cur_fill_num)++, io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2151 ooa_links)) { 2152 struct ctl_ooa_entry *entry; 2153 2154 /* 2155 * If we've got more than we can fit, just count the 2156 * remaining entries. 2157 */ 2158 if (*cur_fill_num >= ooa_hdr->alloc_num) 2159 continue; 2160 2161 entry = &kern_entries[*cur_fill_num]; 2162 2163 entry->tag_num = io->scsiio.tag_num; 2164 entry->lun_num = lun->lun; 2165#ifdef CTL_TIME_IO 2166 entry->start_bt = io->io_hdr.start_bt; 2167#endif 2168 bcopy(io->scsiio.cdb, entry->cdb, io->scsiio.cdb_len); 2169 entry->cdb_len = io->scsiio.cdb_len; 2170 if (io->io_hdr.flags & CTL_FLAG_BLOCKED) 2171 entry->cmd_flags |= CTL_OOACMD_FLAG_BLOCKED; 2172 2173 if (io->io_hdr.flags & CTL_FLAG_DMA_INPROG) 2174 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA; 2175 2176 if (io->io_hdr.flags & CTL_FLAG_ABORT) 2177 entry->cmd_flags |= CTL_OOACMD_FLAG_ABORT; 2178 2179 if (io->io_hdr.flags & CTL_FLAG_IS_WAS_ON_RTR) 2180 entry->cmd_flags |= CTL_OOACMD_FLAG_RTR; 2181 2182 if (io->io_hdr.flags & CTL_FLAG_DMA_QUEUED) 2183 entry->cmd_flags |= CTL_OOACMD_FLAG_DMA_QUEUED; 2184 } 2185 mtx_unlock(&lun->lun_lock); 2186 2187 return (retval); 2188} 2189 2190static void * 2191ctl_copyin_alloc(void *user_addr, int len, char *error_str, 2192 size_t error_str_len) 2193{ 2194 void *kptr; 2195 2196 kptr = malloc(len, M_CTL, M_WAITOK | M_ZERO); 2197 2198 if (copyin(user_addr, kptr, len) != 0) { 2199 snprintf(error_str, error_str_len, "Error copying %d bytes " 2200 "from user address %p to kernel address %p", len, 2201 user_addr, kptr); 2202 free(kptr, M_CTL); 2203 return (NULL); 2204 } 2205 2206 return (kptr); 2207} 2208 2209static void 2210ctl_free_args(int num_args, struct ctl_be_arg *args) 2211{ 2212 int i; 2213 2214 if (args == NULL) 2215 return; 2216 2217 for (i = 0; i < num_args; i++) { 2218 free(args[i].kname, M_CTL); 2219 free(args[i].kvalue, M_CTL); 2220 } 2221 2222 free(args, M_CTL); 2223} 2224 2225static struct ctl_be_arg * 2226ctl_copyin_args(int num_args, struct ctl_be_arg *uargs, 2227 char *error_str, size_t error_str_len) 2228{ 2229 struct ctl_be_arg *args; 2230 int i; 2231 2232 args = ctl_copyin_alloc(uargs, num_args * sizeof(*args), 2233 error_str, error_str_len); 2234 2235 if (args == NULL) 2236 goto bailout; 2237 2238 for (i = 0; i < num_args; i++) { 2239 args[i].kname = NULL; 2240 args[i].kvalue = NULL; 2241 } 2242 2243 for (i = 0; i < num_args; i++) { 2244 uint8_t *tmpptr; 2245 2246 args[i].kname = ctl_copyin_alloc(args[i].name, 2247 args[i].namelen, error_str, error_str_len); 2248 if (args[i].kname == NULL) 2249 goto bailout; 2250 2251 if (args[i].kname[args[i].namelen - 1] != '\0') { 2252 snprintf(error_str, error_str_len, "Argument %d " 2253 "name is not NUL-terminated", i); 2254 goto bailout; 2255 } 2256 2257 if (args[i].flags & CTL_BEARG_RD) { 2258 tmpptr = ctl_copyin_alloc(args[i].value, 2259 args[i].vallen, error_str, error_str_len); 2260 if (tmpptr == NULL) 2261 goto bailout; 2262 if ((args[i].flags & CTL_BEARG_ASCII) 2263 && (tmpptr[args[i].vallen - 1] != '\0')) { 2264 snprintf(error_str, error_str_len, "Argument " 2265 "%d value is not NUL-terminated", i); 2266 goto bailout; 2267 } 2268 args[i].kvalue = tmpptr; 2269 } else { 2270 args[i].kvalue = malloc(args[i].vallen, 2271 M_CTL, M_WAITOK | M_ZERO); 2272 } 2273 } 2274 2275 return (args); 2276bailout: 2277 2278 ctl_free_args(num_args, args); 2279 2280 return (NULL); 2281} 2282 2283static void 2284ctl_copyout_args(int num_args, struct ctl_be_arg *args) 2285{ 2286 int i; 2287 2288 for (i = 0; i < num_args; i++) { 2289 if (args[i].flags & CTL_BEARG_WR) 2290 copyout(args[i].kvalue, args[i].value, args[i].vallen); 2291 } 2292} 2293 2294/* 2295 * Escape characters that are illegal or not recommended in XML. 2296 */ 2297int 2298ctl_sbuf_printf_esc(struct sbuf *sb, char *str, int size) 2299{ 2300 char *end = str + size; 2301 int retval; 2302 2303 retval = 0; 2304 2305 for (; *str && str < end; str++) { 2306 switch (*str) { 2307 case '&': 2308 retval = sbuf_printf(sb, "&"); 2309 break; 2310 case '>': 2311 retval = sbuf_printf(sb, ">"); 2312 break; 2313 case '<': 2314 retval = sbuf_printf(sb, "<"); 2315 break; 2316 default: 2317 retval = sbuf_putc(sb, *str); 2318 break; 2319 } 2320 2321 if (retval != 0) 2322 break; 2323 2324 } 2325 2326 return (retval); 2327} 2328 2329static void 2330ctl_id_sbuf(struct ctl_devid *id, struct sbuf *sb) 2331{ 2332 struct scsi_vpd_id_descriptor *desc; 2333 int i; 2334 2335 if (id == NULL || id->len < 4) 2336 return; 2337 desc = (struct scsi_vpd_id_descriptor *)id->data; 2338 switch (desc->id_type & SVPD_ID_TYPE_MASK) { 2339 case SVPD_ID_TYPE_T10: 2340 sbuf_printf(sb, "t10."); 2341 break; 2342 case SVPD_ID_TYPE_EUI64: 2343 sbuf_printf(sb, "eui."); 2344 break; 2345 case SVPD_ID_TYPE_NAA: 2346 sbuf_printf(sb, "naa."); 2347 break; 2348 case SVPD_ID_TYPE_SCSI_NAME: 2349 break; 2350 } 2351 switch (desc->proto_codeset & SVPD_ID_CODESET_MASK) { 2352 case SVPD_ID_CODESET_BINARY: 2353 for (i = 0; i < desc->length; i++) 2354 sbuf_printf(sb, "%02x", desc->identifier[i]); 2355 break; 2356 case SVPD_ID_CODESET_ASCII: 2357 sbuf_printf(sb, "%.*s", (int)desc->length, 2358 (char *)desc->identifier); 2359 break; 2360 case SVPD_ID_CODESET_UTF8: 2361 sbuf_printf(sb, "%s", (char *)desc->identifier); 2362 break; 2363 } 2364} 2365 2366static int 2367ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, 2368 struct thread *td) 2369{ 2370 struct ctl_softc *softc; 2371 int retval; 2372 2373 softc = control_softc; 2374 2375 retval = 0; 2376 2377 switch (cmd) { 2378 case CTL_IO: { 2379 union ctl_io *io; 2380 void *pool_tmp; 2381 2382 /* 2383 * If we haven't been "enabled", don't allow any SCSI I/O 2384 * to this FETD. 2385 */ 2386 if ((softc->ioctl_info.flags & CTL_IOCTL_FLAG_ENABLED) == 0) { 2387 retval = EPERM; 2388 break; 2389 } 2390 2391 io = ctl_alloc_io(softc->ioctl_info.port.ctl_pool_ref); 2392 2393 /* 2394 * Need to save the pool reference so it doesn't get 2395 * spammed by the user's ctl_io. 2396 */ 2397 pool_tmp = io->io_hdr.pool; 2398 memcpy(io, (void *)addr, sizeof(*io)); 2399 io->io_hdr.pool = pool_tmp; 2400 2401 /* 2402 * No status yet, so make sure the status is set properly. 2403 */ 2404 io->io_hdr.status = CTL_STATUS_NONE; 2405 2406 /* 2407 * The user sets the initiator ID, target and LUN IDs. 2408 */ 2409 io->io_hdr.nexus.targ_port = softc->ioctl_info.port.targ_port; 2410 io->io_hdr.flags |= CTL_FLAG_USER_REQ; 2411 if ((io->io_hdr.io_type == CTL_IO_SCSI) 2412 && (io->scsiio.tag_type != CTL_TAG_UNTAGGED)) 2413 io->scsiio.tag_num = softc->ioctl_info.cur_tag_num++; 2414 2415 retval = ctl_ioctl_submit_wait(io); 2416 2417 if (retval != 0) { 2418 ctl_free_io(io); 2419 break; 2420 } 2421 2422 memcpy((void *)addr, io, sizeof(*io)); 2423 2424 /* return this to our pool */ 2425 ctl_free_io(io); 2426 2427 break; 2428 } 2429 case CTL_ENABLE_PORT: 2430 case CTL_DISABLE_PORT: 2431 case CTL_SET_PORT_WWNS: { 2432 struct ctl_port *port; 2433 struct ctl_port_entry *entry; 2434 2435 entry = (struct ctl_port_entry *)addr; 2436 2437 mtx_lock(&softc->ctl_lock); 2438 STAILQ_FOREACH(port, &softc->port_list, links) { 2439 int action, done; 2440 2441 action = 0; 2442 done = 0; 2443 2444 if ((entry->port_type == CTL_PORT_NONE) 2445 && (entry->targ_port == port->targ_port)) { 2446 /* 2447 * If the user only wants to enable or 2448 * disable or set WWNs on a specific port, 2449 * do the operation and we're done. 2450 */ 2451 action = 1; 2452 done = 1; 2453 } else if (entry->port_type & port->port_type) { 2454 /* 2455 * Compare the user's type mask with the 2456 * particular frontend type to see if we 2457 * have a match. 2458 */ 2459 action = 1; 2460 done = 0; 2461 2462 /* 2463 * Make sure the user isn't trying to set 2464 * WWNs on multiple ports at the same time. 2465 */ 2466 if (cmd == CTL_SET_PORT_WWNS) { 2467 printf("%s: Can't set WWNs on " 2468 "multiple ports\n", __func__); 2469 retval = EINVAL; 2470 break; 2471 } 2472 } 2473 if (action != 0) { 2474 /* 2475 * XXX KDM we have to drop the lock here, 2476 * because the online/offline operations 2477 * can potentially block. We need to 2478 * reference count the frontends so they 2479 * can't go away, 2480 */ 2481 mtx_unlock(&softc->ctl_lock); 2482 2483 if (cmd == CTL_ENABLE_PORT) { 2484 struct ctl_lun *lun; 2485 2486 STAILQ_FOREACH(lun, &softc->lun_list, 2487 links) { 2488 port->lun_enable(port->targ_lun_arg, 2489 lun->target, 2490 lun->lun); 2491 } 2492 2493 ctl_port_online(port); 2494 } else if (cmd == CTL_DISABLE_PORT) { 2495 struct ctl_lun *lun; 2496 2497 ctl_port_offline(port); 2498 2499 STAILQ_FOREACH(lun, &softc->lun_list, 2500 links) { 2501 port->lun_disable( 2502 port->targ_lun_arg, 2503 lun->target, 2504 lun->lun); 2505 } 2506 } 2507 2508 mtx_lock(&softc->ctl_lock); 2509 2510 if (cmd == CTL_SET_PORT_WWNS) 2511 ctl_port_set_wwns(port, 2512 (entry->flags & CTL_PORT_WWNN_VALID) ? 2513 1 : 0, entry->wwnn, 2514 (entry->flags & CTL_PORT_WWPN_VALID) ? 2515 1 : 0, entry->wwpn); 2516 } 2517 if (done != 0) 2518 break; 2519 } 2520 mtx_unlock(&softc->ctl_lock); 2521 break; 2522 } 2523 case CTL_GET_PORT_LIST: { 2524 struct ctl_port *port; 2525 struct ctl_port_list *list; 2526 int i; 2527 2528 list = (struct ctl_port_list *)addr; 2529 2530 if (list->alloc_len != (list->alloc_num * 2531 sizeof(struct ctl_port_entry))) { 2532 printf("%s: CTL_GET_PORT_LIST: alloc_len %u != " 2533 "alloc_num %u * sizeof(struct ctl_port_entry) " 2534 "%zu\n", __func__, list->alloc_len, 2535 list->alloc_num, sizeof(struct ctl_port_entry)); 2536 retval = EINVAL; 2537 break; 2538 } 2539 list->fill_len = 0; 2540 list->fill_num = 0; 2541 list->dropped_num = 0; 2542 i = 0; 2543 mtx_lock(&softc->ctl_lock); 2544 STAILQ_FOREACH(port, &softc->port_list, links) { 2545 struct ctl_port_entry entry, *list_entry; 2546 2547 if (list->fill_num >= list->alloc_num) { 2548 list->dropped_num++; 2549 continue; 2550 } 2551 2552 entry.port_type = port->port_type; 2553 strlcpy(entry.port_name, port->port_name, 2554 sizeof(entry.port_name)); 2555 entry.targ_port = port->targ_port; 2556 entry.physical_port = port->physical_port; 2557 entry.virtual_port = port->virtual_port; 2558 entry.wwnn = port->wwnn; 2559 entry.wwpn = port->wwpn; 2560 if (port->status & CTL_PORT_STATUS_ONLINE) 2561 entry.online = 1; 2562 else 2563 entry.online = 0; 2564 2565 list_entry = &list->entries[i]; 2566 2567 retval = copyout(&entry, list_entry, sizeof(entry)); 2568 if (retval != 0) { 2569 printf("%s: CTL_GET_PORT_LIST: copyout " 2570 "returned %d\n", __func__, retval); 2571 break; 2572 } 2573 i++; 2574 list->fill_num++; 2575 list->fill_len += sizeof(entry); 2576 } 2577 mtx_unlock(&softc->ctl_lock); 2578 2579 /* 2580 * If this is non-zero, we had a copyout fault, so there's 2581 * probably no point in attempting to set the status inside 2582 * the structure. 2583 */ 2584 if (retval != 0) 2585 break; 2586 2587 if (list->dropped_num > 0) 2588 list->status = CTL_PORT_LIST_NEED_MORE_SPACE; 2589 else 2590 list->status = CTL_PORT_LIST_OK; 2591 break; 2592 } 2593 case CTL_DUMP_OOA: { 2594 struct ctl_lun *lun; 2595 union ctl_io *io; 2596 char printbuf[128]; 2597 struct sbuf sb; 2598 2599 mtx_lock(&softc->ctl_lock); 2600 printf("Dumping OOA queues:\n"); 2601 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2602 mtx_lock(&lun->lun_lock); 2603 for (io = (union ctl_io *)TAILQ_FIRST( 2604 &lun->ooa_queue); io != NULL; 2605 io = (union ctl_io *)TAILQ_NEXT(&io->io_hdr, 2606 ooa_links)) { 2607 sbuf_new(&sb, printbuf, sizeof(printbuf), 2608 SBUF_FIXEDLEN); 2609 sbuf_printf(&sb, "LUN %jd tag 0x%04x%s%s%s%s: ", 2610 (intmax_t)lun->lun, 2611 io->scsiio.tag_num, 2612 (io->io_hdr.flags & 2613 CTL_FLAG_BLOCKED) ? "" : " BLOCKED", 2614 (io->io_hdr.flags & 2615 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 2616 (io->io_hdr.flags & 2617 CTL_FLAG_ABORT) ? " ABORT" : "", 2618 (io->io_hdr.flags & 2619 CTL_FLAG_IS_WAS_ON_RTR) ? " RTR" : ""); 2620 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 2621 sbuf_finish(&sb); 2622 printf("%s\n", sbuf_data(&sb)); 2623 } 2624 mtx_unlock(&lun->lun_lock); 2625 } 2626 printf("OOA queues dump done\n"); 2627 mtx_unlock(&softc->ctl_lock); 2628 break; 2629 } 2630 case CTL_GET_OOA: { 2631 struct ctl_lun *lun; 2632 struct ctl_ooa *ooa_hdr; 2633 struct ctl_ooa_entry *entries; 2634 uint32_t cur_fill_num; 2635 2636 ooa_hdr = (struct ctl_ooa *)addr; 2637 2638 if ((ooa_hdr->alloc_len == 0) 2639 || (ooa_hdr->alloc_num == 0)) { 2640 printf("%s: CTL_GET_OOA: alloc len %u and alloc num %u " 2641 "must be non-zero\n", __func__, 2642 ooa_hdr->alloc_len, ooa_hdr->alloc_num); 2643 retval = EINVAL; 2644 break; 2645 } 2646 2647 if (ooa_hdr->alloc_len != (ooa_hdr->alloc_num * 2648 sizeof(struct ctl_ooa_entry))) { 2649 printf("%s: CTL_GET_OOA: alloc len %u must be alloc " 2650 "num %d * sizeof(struct ctl_ooa_entry) %zd\n", 2651 __func__, ooa_hdr->alloc_len, 2652 ooa_hdr->alloc_num,sizeof(struct ctl_ooa_entry)); 2653 retval = EINVAL; 2654 break; 2655 } 2656 2657 entries = malloc(ooa_hdr->alloc_len, M_CTL, M_WAITOK | M_ZERO); 2658 if (entries == NULL) { 2659 printf("%s: could not allocate %d bytes for OOA " 2660 "dump\n", __func__, ooa_hdr->alloc_len); 2661 retval = ENOMEM; 2662 break; 2663 } 2664 2665 mtx_lock(&softc->ctl_lock); 2666 if (((ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) == 0) 2667 && ((ooa_hdr->lun_num >= CTL_MAX_LUNS) 2668 || (softc->ctl_luns[ooa_hdr->lun_num] == NULL))) { 2669 mtx_unlock(&softc->ctl_lock); 2670 free(entries, M_CTL); 2671 printf("%s: CTL_GET_OOA: invalid LUN %ju\n", 2672 __func__, (uintmax_t)ooa_hdr->lun_num); 2673 retval = EINVAL; 2674 break; 2675 } 2676 2677 cur_fill_num = 0; 2678 2679 if (ooa_hdr->flags & CTL_OOA_FLAG_ALL_LUNS) { 2680 STAILQ_FOREACH(lun, &softc->lun_list, links) { 2681 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num, 2682 ooa_hdr, entries); 2683 if (retval != 0) 2684 break; 2685 } 2686 if (retval != 0) { 2687 mtx_unlock(&softc->ctl_lock); 2688 free(entries, M_CTL); 2689 break; 2690 } 2691 } else { 2692 lun = softc->ctl_luns[ooa_hdr->lun_num]; 2693 2694 retval = ctl_ioctl_fill_ooa(lun, &cur_fill_num,ooa_hdr, 2695 entries); 2696 } 2697 mtx_unlock(&softc->ctl_lock); 2698 2699 ooa_hdr->fill_num = min(cur_fill_num, ooa_hdr->alloc_num); 2700 ooa_hdr->fill_len = ooa_hdr->fill_num * 2701 sizeof(struct ctl_ooa_entry); 2702 retval = copyout(entries, ooa_hdr->entries, ooa_hdr->fill_len); 2703 if (retval != 0) { 2704 printf("%s: error copying out %d bytes for OOA dump\n", 2705 __func__, ooa_hdr->fill_len); 2706 } 2707 2708 getbintime(&ooa_hdr->cur_bt); 2709 2710 if (cur_fill_num > ooa_hdr->alloc_num) { 2711 ooa_hdr->dropped_num = cur_fill_num -ooa_hdr->alloc_num; 2712 ooa_hdr->status = CTL_OOA_NEED_MORE_SPACE; 2713 } else { 2714 ooa_hdr->dropped_num = 0; 2715 ooa_hdr->status = CTL_OOA_OK; 2716 } 2717 2718 free(entries, M_CTL); 2719 break; 2720 } 2721 case CTL_CHECK_OOA: { 2722 union ctl_io *io; 2723 struct ctl_lun *lun; 2724 struct ctl_ooa_info *ooa_info; 2725 2726 2727 ooa_info = (struct ctl_ooa_info *)addr; 2728 2729 if (ooa_info->lun_id >= CTL_MAX_LUNS) { 2730 ooa_info->status = CTL_OOA_INVALID_LUN; 2731 break; 2732 } 2733 mtx_lock(&softc->ctl_lock); 2734 lun = softc->ctl_luns[ooa_info->lun_id]; 2735 if (lun == NULL) { 2736 mtx_unlock(&softc->ctl_lock); 2737 ooa_info->status = CTL_OOA_INVALID_LUN; 2738 break; 2739 } 2740 mtx_lock(&lun->lun_lock); 2741 mtx_unlock(&softc->ctl_lock); 2742 ooa_info->num_entries = 0; 2743 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 2744 io != NULL; io = (union ctl_io *)TAILQ_NEXT( 2745 &io->io_hdr, ooa_links)) { 2746 ooa_info->num_entries++; 2747 } 2748 mtx_unlock(&lun->lun_lock); 2749 2750 ooa_info->status = CTL_OOA_SUCCESS; 2751 2752 break; 2753 } 2754 case CTL_HARD_START: 2755 case CTL_HARD_STOP: { 2756 struct ctl_fe_ioctl_startstop_info ss_info; 2757 struct cfi_metatask *metatask; 2758 struct mtx hs_mtx; 2759 2760 mtx_init(&hs_mtx, "HS Mutex", NULL, MTX_DEF); 2761 2762 cv_init(&ss_info.sem, "hard start/stop cv" ); 2763 2764 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2765 if (metatask == NULL) { 2766 retval = ENOMEM; 2767 mtx_destroy(&hs_mtx); 2768 break; 2769 } 2770 2771 if (cmd == CTL_HARD_START) 2772 metatask->tasktype = CFI_TASK_STARTUP; 2773 else 2774 metatask->tasktype = CFI_TASK_SHUTDOWN; 2775 2776 metatask->callback = ctl_ioctl_hard_startstop_callback; 2777 metatask->callback_arg = &ss_info; 2778 2779 cfi_action(metatask); 2780 2781 /* Wait for the callback */ 2782 mtx_lock(&hs_mtx); 2783 cv_wait_sig(&ss_info.sem, &hs_mtx); 2784 mtx_unlock(&hs_mtx); 2785 2786 /* 2787 * All information has been copied from the metatask by the 2788 * time cv_broadcast() is called, so we free the metatask here. 2789 */ 2790 cfi_free_metatask(metatask); 2791 2792 memcpy((void *)addr, &ss_info.hs_info, sizeof(ss_info.hs_info)); 2793 2794 mtx_destroy(&hs_mtx); 2795 break; 2796 } 2797 case CTL_BBRREAD: { 2798 struct ctl_bbrread_info *bbr_info; 2799 struct ctl_fe_ioctl_bbrread_info fe_bbr_info; 2800 struct mtx bbr_mtx; 2801 struct cfi_metatask *metatask; 2802 2803 bbr_info = (struct ctl_bbrread_info *)addr; 2804 2805 bzero(&fe_bbr_info, sizeof(fe_bbr_info)); 2806 2807 bzero(&bbr_mtx, sizeof(bbr_mtx)); 2808 mtx_init(&bbr_mtx, "BBR Mutex", NULL, MTX_DEF); 2809 2810 fe_bbr_info.bbr_info = bbr_info; 2811 fe_bbr_info.lock = &bbr_mtx; 2812 2813 cv_init(&fe_bbr_info.sem, "BBR read cv"); 2814 metatask = cfi_alloc_metatask(/*can_wait*/ 1); 2815 2816 if (metatask == NULL) { 2817 mtx_destroy(&bbr_mtx); 2818 cv_destroy(&fe_bbr_info.sem); 2819 retval = ENOMEM; 2820 break; 2821 } 2822 metatask->tasktype = CFI_TASK_BBRREAD; 2823 metatask->callback = ctl_ioctl_bbrread_callback; 2824 metatask->callback_arg = &fe_bbr_info; 2825 metatask->taskinfo.bbrread.lun_num = bbr_info->lun_num; 2826 metatask->taskinfo.bbrread.lba = bbr_info->lba; 2827 metatask->taskinfo.bbrread.len = bbr_info->len; 2828 2829 cfi_action(metatask); 2830 2831 mtx_lock(&bbr_mtx); 2832 while (fe_bbr_info.wakeup_done == 0) 2833 cv_wait_sig(&fe_bbr_info.sem, &bbr_mtx); 2834 mtx_unlock(&bbr_mtx); 2835 2836 bbr_info->status = metatask->status; 2837 bbr_info->bbr_status = metatask->taskinfo.bbrread.status; 2838 bbr_info->scsi_status = metatask->taskinfo.bbrread.scsi_status; 2839 memcpy(&bbr_info->sense_data, 2840 &metatask->taskinfo.bbrread.sense_data, 2841 MIN(sizeof(bbr_info->sense_data), 2842 sizeof(metatask->taskinfo.bbrread.sense_data))); 2843 2844 cfi_free_metatask(metatask); 2845 2846 mtx_destroy(&bbr_mtx); 2847 cv_destroy(&fe_bbr_info.sem); 2848 2849 break; 2850 } 2851 case CTL_DELAY_IO: { 2852 struct ctl_io_delay_info *delay_info; 2853#ifdef CTL_IO_DELAY 2854 struct ctl_lun *lun; 2855#endif /* CTL_IO_DELAY */ 2856 2857 delay_info = (struct ctl_io_delay_info *)addr; 2858 2859#ifdef CTL_IO_DELAY 2860 mtx_lock(&softc->ctl_lock); 2861 2862 if ((delay_info->lun_id >= CTL_MAX_LUNS) 2863 || (softc->ctl_luns[delay_info->lun_id] == NULL)) { 2864 delay_info->status = CTL_DELAY_STATUS_INVALID_LUN; 2865 } else { 2866 lun = softc->ctl_luns[delay_info->lun_id]; 2867 mtx_lock(&lun->lun_lock); 2868 2869 delay_info->status = CTL_DELAY_STATUS_OK; 2870 2871 switch (delay_info->delay_type) { 2872 case CTL_DELAY_TYPE_CONT: 2873 break; 2874 case CTL_DELAY_TYPE_ONESHOT: 2875 break; 2876 default: 2877 delay_info->status = 2878 CTL_DELAY_STATUS_INVALID_TYPE; 2879 break; 2880 } 2881 2882 switch (delay_info->delay_loc) { 2883 case CTL_DELAY_LOC_DATAMOVE: 2884 lun->delay_info.datamove_type = 2885 delay_info->delay_type; 2886 lun->delay_info.datamove_delay = 2887 delay_info->delay_secs; 2888 break; 2889 case CTL_DELAY_LOC_DONE: 2890 lun->delay_info.done_type = 2891 delay_info->delay_type; 2892 lun->delay_info.done_delay = 2893 delay_info->delay_secs; 2894 break; 2895 default: 2896 delay_info->status = 2897 CTL_DELAY_STATUS_INVALID_LOC; 2898 break; 2899 } 2900 mtx_unlock(&lun->lun_lock); 2901 } 2902 2903 mtx_unlock(&softc->ctl_lock); 2904#else 2905 delay_info->status = CTL_DELAY_STATUS_NOT_IMPLEMENTED; 2906#endif /* CTL_IO_DELAY */ 2907 break; 2908 } 2909 case CTL_REALSYNC_SET: { 2910 int *syncstate; 2911 2912 syncstate = (int *)addr; 2913 2914 mtx_lock(&softc->ctl_lock); 2915 switch (*syncstate) { 2916 case 0: 2917 softc->flags &= ~CTL_FLAG_REAL_SYNC; 2918 break; 2919 case 1: 2920 softc->flags |= CTL_FLAG_REAL_SYNC; 2921 break; 2922 default: 2923 retval = EINVAL; 2924 break; 2925 } 2926 mtx_unlock(&softc->ctl_lock); 2927 break; 2928 } 2929 case CTL_REALSYNC_GET: { 2930 int *syncstate; 2931 2932 syncstate = (int*)addr; 2933 2934 mtx_lock(&softc->ctl_lock); 2935 if (softc->flags & CTL_FLAG_REAL_SYNC) 2936 *syncstate = 1; 2937 else 2938 *syncstate = 0; 2939 mtx_unlock(&softc->ctl_lock); 2940 2941 break; 2942 } 2943 case CTL_SETSYNC: 2944 case CTL_GETSYNC: { 2945 struct ctl_sync_info *sync_info; 2946 struct ctl_lun *lun; 2947 2948 sync_info = (struct ctl_sync_info *)addr; 2949 2950 mtx_lock(&softc->ctl_lock); 2951 lun = softc->ctl_luns[sync_info->lun_id]; 2952 if (lun == NULL) { 2953 mtx_unlock(&softc->ctl_lock); 2954 sync_info->status = CTL_GS_SYNC_NO_LUN; 2955 } 2956 /* 2957 * Get or set the sync interval. We're not bounds checking 2958 * in the set case, hopefully the user won't do something 2959 * silly. 2960 */ 2961 mtx_lock(&lun->lun_lock); 2962 mtx_unlock(&softc->ctl_lock); 2963 if (cmd == CTL_GETSYNC) 2964 sync_info->sync_interval = lun->sync_interval; 2965 else 2966 lun->sync_interval = sync_info->sync_interval; 2967 mtx_unlock(&lun->lun_lock); 2968 2969 sync_info->status = CTL_GS_SYNC_OK; 2970 2971 break; 2972 } 2973 case CTL_GETSTATS: { 2974 struct ctl_stats *stats; 2975 struct ctl_lun *lun; 2976 int i; 2977 2978 stats = (struct ctl_stats *)addr; 2979 2980 if ((sizeof(struct ctl_lun_io_stats) * softc->num_luns) > 2981 stats->alloc_len) { 2982 stats->status = CTL_SS_NEED_MORE_SPACE; 2983 stats->num_luns = softc->num_luns; 2984 break; 2985 } 2986 /* 2987 * XXX KDM no locking here. If the LUN list changes, 2988 * things can blow up. 2989 */ 2990 for (i = 0, lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; 2991 i++, lun = STAILQ_NEXT(lun, links)) { 2992 retval = copyout(&lun->stats, &stats->lun_stats[i], 2993 sizeof(lun->stats)); 2994 if (retval != 0) 2995 break; 2996 } 2997 stats->num_luns = softc->num_luns; 2998 stats->fill_len = sizeof(struct ctl_lun_io_stats) * 2999 softc->num_luns; 3000 stats->status = CTL_SS_OK; 3001#ifdef CTL_TIME_IO 3002 stats->flags = CTL_STATS_FLAG_TIME_VALID; 3003#else 3004 stats->flags = CTL_STATS_FLAG_NONE; 3005#endif 3006 getnanouptime(&stats->timestamp); 3007 break; 3008 } 3009 case CTL_ERROR_INJECT: { 3010 struct ctl_error_desc *err_desc, *new_err_desc; 3011 struct ctl_lun *lun; 3012 3013 err_desc = (struct ctl_error_desc *)addr; 3014 3015 new_err_desc = malloc(sizeof(*new_err_desc), M_CTL, 3016 M_WAITOK | M_ZERO); 3017 bcopy(err_desc, new_err_desc, sizeof(*new_err_desc)); 3018 3019 mtx_lock(&softc->ctl_lock); 3020 lun = softc->ctl_luns[err_desc->lun_id]; 3021 if (lun == NULL) { 3022 mtx_unlock(&softc->ctl_lock); 3023 free(new_err_desc, M_CTL); 3024 printf("%s: CTL_ERROR_INJECT: invalid LUN %ju\n", 3025 __func__, (uintmax_t)err_desc->lun_id); 3026 retval = EINVAL; 3027 break; 3028 } 3029 mtx_lock(&lun->lun_lock); 3030 mtx_unlock(&softc->ctl_lock); 3031 3032 /* 3033 * We could do some checking here to verify the validity 3034 * of the request, but given the complexity of error 3035 * injection requests, the checking logic would be fairly 3036 * complex. 3037 * 3038 * For now, if the request is invalid, it just won't get 3039 * executed and might get deleted. 3040 */ 3041 STAILQ_INSERT_TAIL(&lun->error_list, new_err_desc, links); 3042 3043 /* 3044 * XXX KDM check to make sure the serial number is unique, 3045 * in case we somehow manage to wrap. That shouldn't 3046 * happen for a very long time, but it's the right thing to 3047 * do. 3048 */ 3049 new_err_desc->serial = lun->error_serial; 3050 err_desc->serial = lun->error_serial; 3051 lun->error_serial++; 3052 3053 mtx_unlock(&lun->lun_lock); 3054 break; 3055 } 3056 case CTL_ERROR_INJECT_DELETE: { 3057 struct ctl_error_desc *delete_desc, *desc, *desc2; 3058 struct ctl_lun *lun; 3059 int delete_done; 3060 3061 delete_desc = (struct ctl_error_desc *)addr; 3062 delete_done = 0; 3063 3064 mtx_lock(&softc->ctl_lock); 3065 lun = softc->ctl_luns[delete_desc->lun_id]; 3066 if (lun == NULL) { 3067 mtx_unlock(&softc->ctl_lock); 3068 printf("%s: CTL_ERROR_INJECT_DELETE: invalid LUN %ju\n", 3069 __func__, (uintmax_t)delete_desc->lun_id); 3070 retval = EINVAL; 3071 break; 3072 } 3073 mtx_lock(&lun->lun_lock); 3074 mtx_unlock(&softc->ctl_lock); 3075 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 3076 if (desc->serial != delete_desc->serial) 3077 continue; 3078 3079 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, 3080 links); 3081 free(desc, M_CTL); 3082 delete_done = 1; 3083 } 3084 mtx_unlock(&lun->lun_lock); 3085 if (delete_done == 0) { 3086 printf("%s: CTL_ERROR_INJECT_DELETE: can't find " 3087 "error serial %ju on LUN %u\n", __func__, 3088 delete_desc->serial, delete_desc->lun_id); 3089 retval = EINVAL; 3090 break; 3091 } 3092 break; 3093 } 3094 case CTL_DUMP_STRUCTS: { 3095 int i, j, k; 3096 struct ctl_port *port; 3097 struct ctl_frontend *fe; 3098 3099 mtx_lock(&softc->ctl_lock); 3100 printf("CTL Persistent Reservation information start:\n"); 3101 for (i = 0; i < CTL_MAX_LUNS; i++) { 3102 struct ctl_lun *lun; 3103 3104 lun = softc->ctl_luns[i]; 3105 3106 if ((lun == NULL) 3107 || ((lun->flags & CTL_LUN_DISABLED) != 0)) 3108 continue; 3109 3110 for (j = 0; j < (CTL_MAX_PORTS * 2); j++) { 3111 if (lun->pr_keys[j] == NULL) 3112 continue; 3113 for (k = 0; k < CTL_MAX_INIT_PER_PORT; k++){ 3114 if (lun->pr_keys[j][k] == 0) 3115 continue; 3116 printf(" LUN %d port %d iid %d key " 3117 "%#jx\n", i, j, k, 3118 (uintmax_t)lun->pr_keys[j][k]); 3119 } 3120 } 3121 } 3122 printf("CTL Persistent Reservation information end\n"); 3123 printf("CTL Ports:\n"); 3124 STAILQ_FOREACH(port, &softc->port_list, links) { 3125 printf(" Port %d '%s' Frontend '%s' Type %u pp %d vp %d WWNN " 3126 "%#jx WWPN %#jx\n", port->targ_port, port->port_name, 3127 port->frontend->name, port->port_type, 3128 port->physical_port, port->virtual_port, 3129 (uintmax_t)port->wwnn, (uintmax_t)port->wwpn); 3130 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3131 if (port->wwpn_iid[j].in_use == 0 && 3132 port->wwpn_iid[j].wwpn == 0 && 3133 port->wwpn_iid[j].name == NULL) 3134 continue; 3135 3136 printf(" iid %u use %d WWPN %#jx '%s'\n", 3137 j, port->wwpn_iid[j].in_use, 3138 (uintmax_t)port->wwpn_iid[j].wwpn, 3139 port->wwpn_iid[j].name); 3140 } 3141 } 3142 printf("CTL Port information end\n"); 3143 mtx_unlock(&softc->ctl_lock); 3144 /* 3145 * XXX KDM calling this without a lock. We'd likely want 3146 * to drop the lock before calling the frontend's dump 3147 * routine anyway. 3148 */ 3149 printf("CTL Frontends:\n"); 3150 STAILQ_FOREACH(fe, &softc->fe_list, links) { 3151 printf(" Frontend '%s'\n", fe->name); 3152 if (fe->fe_dump != NULL) 3153 fe->fe_dump(); 3154 } 3155 printf("CTL Frontend information end\n"); 3156 break; 3157 } 3158 case CTL_LUN_REQ: { 3159 struct ctl_lun_req *lun_req; 3160 struct ctl_backend_driver *backend; 3161 3162 lun_req = (struct ctl_lun_req *)addr; 3163 3164 backend = ctl_backend_find(lun_req->backend); 3165 if (backend == NULL) { 3166 lun_req->status = CTL_LUN_ERROR; 3167 snprintf(lun_req->error_str, 3168 sizeof(lun_req->error_str), 3169 "Backend \"%s\" not found.", 3170 lun_req->backend); 3171 break; 3172 } 3173 if (lun_req->num_be_args > 0) { 3174 lun_req->kern_be_args = ctl_copyin_args( 3175 lun_req->num_be_args, 3176 lun_req->be_args, 3177 lun_req->error_str, 3178 sizeof(lun_req->error_str)); 3179 if (lun_req->kern_be_args == NULL) { 3180 lun_req->status = CTL_LUN_ERROR; 3181 break; 3182 } 3183 } 3184 3185 retval = backend->ioctl(dev, cmd, addr, flag, td); 3186 3187 if (lun_req->num_be_args > 0) { 3188 ctl_copyout_args(lun_req->num_be_args, 3189 lun_req->kern_be_args); 3190 ctl_free_args(lun_req->num_be_args, 3191 lun_req->kern_be_args); 3192 } 3193 break; 3194 } 3195 case CTL_LUN_LIST: { 3196 struct sbuf *sb; 3197 struct ctl_lun *lun; 3198 struct ctl_lun_list *list; 3199 struct ctl_option *opt; 3200 3201 list = (struct ctl_lun_list *)addr; 3202 3203 /* 3204 * Allocate a fixed length sbuf here, based on the length 3205 * of the user's buffer. We could allocate an auto-extending 3206 * buffer, and then tell the user how much larger our 3207 * amount of data is than his buffer, but that presents 3208 * some problems: 3209 * 3210 * 1. The sbuf(9) routines use a blocking malloc, and so 3211 * we can't hold a lock while calling them with an 3212 * auto-extending buffer. 3213 * 3214 * 2. There is not currently a LUN reference counting 3215 * mechanism, outside of outstanding transactions on 3216 * the LUN's OOA queue. So a LUN could go away on us 3217 * while we're getting the LUN number, backend-specific 3218 * information, etc. Thus, given the way things 3219 * currently work, we need to hold the CTL lock while 3220 * grabbing LUN information. 3221 * 3222 * So, from the user's standpoint, the best thing to do is 3223 * allocate what he thinks is a reasonable buffer length, 3224 * and then if he gets a CTL_LUN_LIST_NEED_MORE_SPACE error, 3225 * double the buffer length and try again. (And repeat 3226 * that until he succeeds.) 3227 */ 3228 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3229 if (sb == NULL) { 3230 list->status = CTL_LUN_LIST_ERROR; 3231 snprintf(list->error_str, sizeof(list->error_str), 3232 "Unable to allocate %d bytes for LUN list", 3233 list->alloc_len); 3234 break; 3235 } 3236 3237 sbuf_printf(sb, "<ctllunlist>\n"); 3238 3239 mtx_lock(&softc->ctl_lock); 3240 STAILQ_FOREACH(lun, &softc->lun_list, links) { 3241 mtx_lock(&lun->lun_lock); 3242 retval = sbuf_printf(sb, "<lun id=\"%ju\">\n", 3243 (uintmax_t)lun->lun); 3244 3245 /* 3246 * Bail out as soon as we see that we've overfilled 3247 * the buffer. 3248 */ 3249 if (retval != 0) 3250 break; 3251 3252 retval = sbuf_printf(sb, "\t<backend_type>%s" 3253 "</backend_type>\n", 3254 (lun->backend == NULL) ? "none" : 3255 lun->backend->name); 3256 3257 if (retval != 0) 3258 break; 3259 3260 retval = sbuf_printf(sb, "\t<lun_type>%d</lun_type>\n", 3261 lun->be_lun->lun_type); 3262 3263 if (retval != 0) 3264 break; 3265 3266 if (lun->backend == NULL) { 3267 retval = sbuf_printf(sb, "</lun>\n"); 3268 if (retval != 0) 3269 break; 3270 continue; 3271 } 3272 3273 retval = sbuf_printf(sb, "\t<size>%ju</size>\n", 3274 (lun->be_lun->maxlba > 0) ? 3275 lun->be_lun->maxlba + 1 : 0); 3276 3277 if (retval != 0) 3278 break; 3279 3280 retval = sbuf_printf(sb, "\t<blocksize>%u</blocksize>\n", 3281 lun->be_lun->blocksize); 3282 3283 if (retval != 0) 3284 break; 3285 3286 retval = sbuf_printf(sb, "\t<serial_number>"); 3287 3288 if (retval != 0) 3289 break; 3290 3291 retval = ctl_sbuf_printf_esc(sb, 3292 lun->be_lun->serial_num, 3293 sizeof(lun->be_lun->serial_num)); 3294 3295 if (retval != 0) 3296 break; 3297 3298 retval = sbuf_printf(sb, "</serial_number>\n"); 3299 3300 if (retval != 0) 3301 break; 3302 3303 retval = sbuf_printf(sb, "\t<device_id>"); 3304 3305 if (retval != 0) 3306 break; 3307 3308 retval = ctl_sbuf_printf_esc(sb, 3309 lun->be_lun->device_id, 3310 sizeof(lun->be_lun->device_id)); 3311 3312 if (retval != 0) 3313 break; 3314 3315 retval = sbuf_printf(sb, "</device_id>\n"); 3316 3317 if (retval != 0) 3318 break; 3319 3320 if (lun->backend->lun_info != NULL) { 3321 retval = lun->backend->lun_info(lun->be_lun->be_lun, sb); 3322 if (retval != 0) 3323 break; 3324 } 3325 STAILQ_FOREACH(opt, &lun->be_lun->options, links) { 3326 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3327 opt->name, opt->value, opt->name); 3328 if (retval != 0) 3329 break; 3330 } 3331 3332 retval = sbuf_printf(sb, "</lun>\n"); 3333 3334 if (retval != 0) 3335 break; 3336 mtx_unlock(&lun->lun_lock); 3337 } 3338 if (lun != NULL) 3339 mtx_unlock(&lun->lun_lock); 3340 mtx_unlock(&softc->ctl_lock); 3341 3342 if ((retval != 0) 3343 || ((retval = sbuf_printf(sb, "</ctllunlist>\n")) != 0)) { 3344 retval = 0; 3345 sbuf_delete(sb); 3346 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3347 snprintf(list->error_str, sizeof(list->error_str), 3348 "Out of space, %d bytes is too small", 3349 list->alloc_len); 3350 break; 3351 } 3352 3353 sbuf_finish(sb); 3354 3355 retval = copyout(sbuf_data(sb), list->lun_xml, 3356 sbuf_len(sb) + 1); 3357 3358 list->fill_len = sbuf_len(sb) + 1; 3359 list->status = CTL_LUN_LIST_OK; 3360 sbuf_delete(sb); 3361 break; 3362 } 3363 case CTL_ISCSI: { 3364 struct ctl_iscsi *ci; 3365 struct ctl_frontend *fe; 3366 3367 ci = (struct ctl_iscsi *)addr; 3368 3369 fe = ctl_frontend_find("iscsi"); 3370 if (fe == NULL) { 3371 ci->status = CTL_ISCSI_ERROR; 3372 snprintf(ci->error_str, sizeof(ci->error_str), 3373 "Frontend \"iscsi\" not found."); 3374 break; 3375 } 3376 3377 retval = fe->ioctl(dev, cmd, addr, flag, td); 3378 break; 3379 } 3380 case CTL_PORT_REQ: { 3381 struct ctl_req *req; 3382 struct ctl_frontend *fe; 3383 3384 req = (struct ctl_req *)addr; 3385 3386 fe = ctl_frontend_find(req->driver); 3387 if (fe == NULL) { 3388 req->status = CTL_LUN_ERROR; 3389 snprintf(req->error_str, sizeof(req->error_str), 3390 "Frontend \"%s\" not found.", req->driver); 3391 break; 3392 } 3393 if (req->num_args > 0) { 3394 req->kern_args = ctl_copyin_args(req->num_args, 3395 req->args, req->error_str, sizeof(req->error_str)); 3396 if (req->kern_args == NULL) { 3397 req->status = CTL_LUN_ERROR; 3398 break; 3399 } 3400 } 3401 3402 retval = fe->ioctl(dev, cmd, addr, flag, td); 3403 3404 if (req->num_args > 0) { 3405 ctl_copyout_args(req->num_args, req->kern_args); 3406 ctl_free_args(req->num_args, req->kern_args); 3407 } 3408 break; 3409 } 3410 case CTL_PORT_LIST: { 3411 struct sbuf *sb; 3412 struct ctl_port *port; 3413 struct ctl_lun_list *list; 3414 struct ctl_option *opt; 3415 int j; 3416 3417 list = (struct ctl_lun_list *)addr; 3418 3419 sb = sbuf_new(NULL, NULL, list->alloc_len, SBUF_FIXEDLEN); 3420 if (sb == NULL) { 3421 list->status = CTL_LUN_LIST_ERROR; 3422 snprintf(list->error_str, sizeof(list->error_str), 3423 "Unable to allocate %d bytes for LUN list", 3424 list->alloc_len); 3425 break; 3426 } 3427 3428 sbuf_printf(sb, "<ctlportlist>\n"); 3429 3430 mtx_lock(&softc->ctl_lock); 3431 STAILQ_FOREACH(port, &softc->port_list, links) { 3432 retval = sbuf_printf(sb, "<targ_port id=\"%ju\">\n", 3433 (uintmax_t)port->targ_port); 3434 3435 /* 3436 * Bail out as soon as we see that we've overfilled 3437 * the buffer. 3438 */ 3439 if (retval != 0) 3440 break; 3441 3442 retval = sbuf_printf(sb, "\t<frontend_type>%s" 3443 "</frontend_type>\n", port->frontend->name); 3444 if (retval != 0) 3445 break; 3446 3447 retval = sbuf_printf(sb, "\t<port_type>%d</port_type>\n", 3448 port->port_type); 3449 if (retval != 0) 3450 break; 3451 3452 retval = sbuf_printf(sb, "\t<online>%s</online>\n", 3453 (port->status & CTL_PORT_STATUS_ONLINE) ? "YES" : "NO"); 3454 if (retval != 0) 3455 break; 3456 3457 retval = sbuf_printf(sb, "\t<port_name>%s</port_name>\n", 3458 port->port_name); 3459 if (retval != 0) 3460 break; 3461 3462 retval = sbuf_printf(sb, "\t<physical_port>%d</physical_port>\n", 3463 port->physical_port); 3464 if (retval != 0) 3465 break; 3466 3467 retval = sbuf_printf(sb, "\t<virtual_port>%d</virtual_port>\n", 3468 port->virtual_port); 3469 if (retval != 0) 3470 break; 3471 3472 if (port->target_devid != NULL) { 3473 sbuf_printf(sb, "\t<target>"); 3474 ctl_id_sbuf(port->target_devid, sb); 3475 sbuf_printf(sb, "</target>\n"); 3476 } 3477 3478 if (port->port_devid != NULL) { 3479 sbuf_printf(sb, "\t<port>"); 3480 ctl_id_sbuf(port->port_devid, sb); 3481 sbuf_printf(sb, "</port>\n"); 3482 } 3483 3484 if (port->port_info != NULL) { 3485 retval = port->port_info(port->onoff_arg, sb); 3486 if (retval != 0) 3487 break; 3488 } 3489 STAILQ_FOREACH(opt, &port->options, links) { 3490 retval = sbuf_printf(sb, "\t<%s>%s</%s>\n", 3491 opt->name, opt->value, opt->name); 3492 if (retval != 0) 3493 break; 3494 } 3495 3496 for (j = 0; j < CTL_MAX_INIT_PER_PORT; j++) { 3497 if (port->wwpn_iid[j].in_use == 0 || 3498 (port->wwpn_iid[j].wwpn == 0 && 3499 port->wwpn_iid[j].name == NULL)) 3500 continue; 3501 3502 if (port->wwpn_iid[j].name != NULL) 3503 retval = sbuf_printf(sb, 3504 "\t<initiator id=\"%u\">%s</initiator>\n", 3505 j, port->wwpn_iid[j].name); 3506 else 3507 retval = sbuf_printf(sb, 3508 "\t<initiator id=\"%u\">naa.%08jx</initiator>\n", 3509 j, port->wwpn_iid[j].wwpn); 3510 if (retval != 0) 3511 break; 3512 } 3513 if (retval != 0) 3514 break; 3515 3516 retval = sbuf_printf(sb, "</targ_port>\n"); 3517 if (retval != 0) 3518 break; 3519 } 3520 mtx_unlock(&softc->ctl_lock); 3521 3522 if ((retval != 0) 3523 || ((retval = sbuf_printf(sb, "</ctlportlist>\n")) != 0)) { 3524 retval = 0; 3525 sbuf_delete(sb); 3526 list->status = CTL_LUN_LIST_NEED_MORE_SPACE; 3527 snprintf(list->error_str, sizeof(list->error_str), 3528 "Out of space, %d bytes is too small", 3529 list->alloc_len); 3530 break; 3531 } 3532 3533 sbuf_finish(sb); 3534 3535 retval = copyout(sbuf_data(sb), list->lun_xml, 3536 sbuf_len(sb) + 1); 3537 3538 list->fill_len = sbuf_len(sb) + 1; 3539 list->status = CTL_LUN_LIST_OK; 3540 sbuf_delete(sb); 3541 break; 3542 } 3543 default: { 3544 /* XXX KDM should we fix this? */ 3545#if 0 3546 struct ctl_backend_driver *backend; 3547 unsigned int type; 3548 int found; 3549 3550 found = 0; 3551 3552 /* 3553 * We encode the backend type as the ioctl type for backend 3554 * ioctls. So parse it out here, and then search for a 3555 * backend of this type. 3556 */ 3557 type = _IOC_TYPE(cmd); 3558 3559 STAILQ_FOREACH(backend, &softc->be_list, links) { 3560 if (backend->type == type) { 3561 found = 1; 3562 break; 3563 } 3564 } 3565 if (found == 0) { 3566 printf("ctl: unknown ioctl command %#lx or backend " 3567 "%d\n", cmd, type); 3568 retval = EINVAL; 3569 break; 3570 } 3571 retval = backend->ioctl(dev, cmd, addr, flag, td); 3572#endif 3573 retval = ENOTTY; 3574 break; 3575 } 3576 } 3577 return (retval); 3578} 3579 3580uint32_t 3581ctl_get_initindex(struct ctl_nexus *nexus) 3582{ 3583 if (nexus->targ_port < CTL_MAX_PORTS) 3584 return (nexus->initid.id + 3585 (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3586 else 3587 return (nexus->initid.id + 3588 ((nexus->targ_port - CTL_MAX_PORTS) * 3589 CTL_MAX_INIT_PER_PORT)); 3590} 3591 3592uint32_t 3593ctl_get_resindex(struct ctl_nexus *nexus) 3594{ 3595 return (nexus->initid.id + (nexus->targ_port * CTL_MAX_INIT_PER_PORT)); 3596} 3597 3598uint32_t 3599ctl_port_idx(int port_num) 3600{ 3601 if (port_num < CTL_MAX_PORTS) 3602 return(port_num); 3603 else 3604 return(port_num - CTL_MAX_PORTS); 3605} 3606 3607static uint32_t 3608ctl_map_lun(struct ctl_softc *softc, int port_num, uint32_t lun_id) 3609{ 3610 struct ctl_port *port; 3611 3612 port = softc->ctl_ports[ctl_port_idx(port_num)]; 3613 if (port == NULL) 3614 return (UINT32_MAX); 3615 if (port->lun_map == NULL) 3616 return (lun_id); 3617 return (port->lun_map(port->targ_lun_arg, lun_id)); 3618} 3619 3620static uint32_t 3621ctl_map_lun_back(struct ctl_softc *softc, int port_num, uint32_t lun_id) 3622{ 3623 struct ctl_port *port; 3624 uint32_t i; 3625 3626 port = softc->ctl_ports[ctl_port_idx(port_num)]; 3627 if (port->lun_map == NULL) 3628 return (lun_id); 3629 for (i = 0; i < CTL_MAX_LUNS; i++) { 3630 if (port->lun_map(port->targ_lun_arg, i) == lun_id) 3631 return (i); 3632 } 3633 return (UINT32_MAX); 3634} 3635 3636/* 3637 * Note: This only works for bitmask sizes that are at least 32 bits, and 3638 * that are a power of 2. 3639 */ 3640int 3641ctl_ffz(uint32_t *mask, uint32_t size) 3642{ 3643 uint32_t num_chunks, num_pieces; 3644 int i, j; 3645 3646 num_chunks = (size >> 5); 3647 if (num_chunks == 0) 3648 num_chunks++; 3649 num_pieces = MIN((sizeof(uint32_t) * 8), size); 3650 3651 for (i = 0; i < num_chunks; i++) { 3652 for (j = 0; j < num_pieces; j++) { 3653 if ((mask[i] & (1 << j)) == 0) 3654 return ((i << 5) + j); 3655 } 3656 } 3657 3658 return (-1); 3659} 3660 3661int 3662ctl_set_mask(uint32_t *mask, uint32_t bit) 3663{ 3664 uint32_t chunk, piece; 3665 3666 chunk = bit >> 5; 3667 piece = bit % (sizeof(uint32_t) * 8); 3668 3669 if ((mask[chunk] & (1 << piece)) != 0) 3670 return (-1); 3671 else 3672 mask[chunk] |= (1 << piece); 3673 3674 return (0); 3675} 3676 3677int 3678ctl_clear_mask(uint32_t *mask, uint32_t bit) 3679{ 3680 uint32_t chunk, piece; 3681 3682 chunk = bit >> 5; 3683 piece = bit % (sizeof(uint32_t) * 8); 3684 3685 if ((mask[chunk] & (1 << piece)) == 0) 3686 return (-1); 3687 else 3688 mask[chunk] &= ~(1 << piece); 3689 3690 return (0); 3691} 3692 3693int 3694ctl_is_set(uint32_t *mask, uint32_t bit) 3695{ 3696 uint32_t chunk, piece; 3697 3698 chunk = bit >> 5; 3699 piece = bit % (sizeof(uint32_t) * 8); 3700 3701 if ((mask[chunk] & (1 << piece)) == 0) 3702 return (0); 3703 else 3704 return (1); 3705} 3706 3707static uint64_t 3708ctl_get_prkey(struct ctl_lun *lun, uint32_t residx) 3709{ 3710 uint64_t *t; 3711 3712 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3713 if (t == NULL) 3714 return (0); 3715 return (t[residx % CTL_MAX_INIT_PER_PORT]); 3716} 3717 3718static void 3719ctl_clr_prkey(struct ctl_lun *lun, uint32_t residx) 3720{ 3721 uint64_t *t; 3722 3723 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3724 if (t == NULL) 3725 return; 3726 t[residx % CTL_MAX_INIT_PER_PORT] = 0; 3727} 3728 3729static void 3730ctl_alloc_prkey(struct ctl_lun *lun, uint32_t residx) 3731{ 3732 uint64_t *p; 3733 u_int i; 3734 3735 i = residx/CTL_MAX_INIT_PER_PORT; 3736 if (lun->pr_keys[i] != NULL) 3737 return; 3738 mtx_unlock(&lun->lun_lock); 3739 p = malloc(sizeof(uint64_t) * CTL_MAX_INIT_PER_PORT, M_CTL, 3740 M_WAITOK | M_ZERO); 3741 mtx_lock(&lun->lun_lock); 3742 if (lun->pr_keys[i] == NULL) 3743 lun->pr_keys[i] = p; 3744 else 3745 free(p, M_CTL); 3746} 3747 3748static void 3749ctl_set_prkey(struct ctl_lun *lun, uint32_t residx, uint64_t key) 3750{ 3751 uint64_t *t; 3752 3753 t = lun->pr_keys[residx/CTL_MAX_INIT_PER_PORT]; 3754 KASSERT(t != NULL, ("prkey %d is not allocated", residx)); 3755 t[residx % CTL_MAX_INIT_PER_PORT] = key; 3756} 3757 3758/* 3759 * ctl_softc, pool_name, total_ctl_io are passed in. 3760 * npool is passed out. 3761 */ 3762int 3763ctl_pool_create(struct ctl_softc *ctl_softc, const char *pool_name, 3764 uint32_t total_ctl_io, void **npool) 3765{ 3766#ifdef IO_POOLS 3767 struct ctl_io_pool *pool; 3768 3769 pool = (struct ctl_io_pool *)malloc(sizeof(*pool), M_CTL, 3770 M_NOWAIT | M_ZERO); 3771 if (pool == NULL) 3772 return (ENOMEM); 3773 3774 snprintf(pool->name, sizeof(pool->name), "CTL IO %s", pool_name); 3775 pool->ctl_softc = ctl_softc; 3776 pool->zone = uma_zsecond_create(pool->name, NULL, 3777 NULL, NULL, NULL, ctl_softc->io_zone); 3778 /* uma_prealloc(pool->zone, total_ctl_io); */ 3779 3780 *npool = pool; 3781#else 3782 *npool = ctl_softc->io_zone; 3783#endif 3784 return (0); 3785} 3786 3787void 3788ctl_pool_free(struct ctl_io_pool *pool) 3789{ 3790 3791 if (pool == NULL) 3792 return; 3793 3794#ifdef IO_POOLS 3795 uma_zdestroy(pool->zone); 3796 free(pool, M_CTL); 3797#endif 3798} 3799 3800union ctl_io * 3801ctl_alloc_io(void *pool_ref) 3802{ 3803 union ctl_io *io; 3804#ifdef IO_POOLS 3805 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3806 3807 io = uma_zalloc(pool->zone, M_WAITOK); 3808#else 3809 io = uma_zalloc((uma_zone_t)pool_ref, M_WAITOK); 3810#endif 3811 if (io != NULL) 3812 io->io_hdr.pool = pool_ref; 3813 return (io); 3814} 3815 3816union ctl_io * 3817ctl_alloc_io_nowait(void *pool_ref) 3818{ 3819 union ctl_io *io; 3820#ifdef IO_POOLS 3821 struct ctl_io_pool *pool = (struct ctl_io_pool *)pool_ref; 3822 3823 io = uma_zalloc(pool->zone, M_NOWAIT); 3824#else 3825 io = uma_zalloc((uma_zone_t)pool_ref, M_NOWAIT); 3826#endif 3827 if (io != NULL) 3828 io->io_hdr.pool = pool_ref; 3829 return (io); 3830} 3831 3832void 3833ctl_free_io(union ctl_io *io) 3834{ 3835#ifdef IO_POOLS 3836 struct ctl_io_pool *pool; 3837#endif 3838 3839 if (io == NULL) 3840 return; 3841 3842#ifdef IO_POOLS 3843 pool = (struct ctl_io_pool *)io->io_hdr.pool; 3844 uma_zfree(pool->zone, io); 3845#else 3846 uma_zfree((uma_zone_t)io->io_hdr.pool, io); 3847#endif 3848} 3849 3850void 3851ctl_zero_io(union ctl_io *io) 3852{ 3853 void *pool_ref; 3854 3855 if (io == NULL) 3856 return; 3857 3858 /* 3859 * May need to preserve linked list pointers at some point too. 3860 */ 3861 pool_ref = io->io_hdr.pool; 3862 memset(io, 0, sizeof(*io)); 3863 io->io_hdr.pool = pool_ref; 3864} 3865 3866/* 3867 * This routine is currently used for internal copies of ctl_ios that need 3868 * to persist for some reason after we've already returned status to the 3869 * FETD. (Thus the flag set.) 3870 * 3871 * XXX XXX 3872 * Note that this makes a blind copy of all fields in the ctl_io, except 3873 * for the pool reference. This includes any memory that has been 3874 * allocated! That memory will no longer be valid after done has been 3875 * called, so this would be VERY DANGEROUS for command that actually does 3876 * any reads or writes. Right now (11/7/2005), this is only used for immediate 3877 * start and stop commands, which don't transfer any data, so this is not a 3878 * problem. If it is used for anything else, the caller would also need to 3879 * allocate data buffer space and this routine would need to be modified to 3880 * copy the data buffer(s) as well. 3881 */ 3882void 3883ctl_copy_io(union ctl_io *src, union ctl_io *dest) 3884{ 3885 void *pool_ref; 3886 3887 if ((src == NULL) 3888 || (dest == NULL)) 3889 return; 3890 3891 /* 3892 * May need to preserve linked list pointers at some point too. 3893 */ 3894 pool_ref = dest->io_hdr.pool; 3895 3896 memcpy(dest, src, MIN(sizeof(*src), sizeof(*dest))); 3897 3898 dest->io_hdr.pool = pool_ref; 3899 /* 3900 * We need to know that this is an internal copy, and doesn't need 3901 * to get passed back to the FETD that allocated it. 3902 */ 3903 dest->io_hdr.flags |= CTL_FLAG_INT_COPY; 3904} 3905 3906int 3907ctl_expand_number(const char *buf, uint64_t *num) 3908{ 3909 char *endptr; 3910 uint64_t number; 3911 unsigned shift; 3912 3913 number = strtoq(buf, &endptr, 0); 3914 3915 switch (tolower((unsigned char)*endptr)) { 3916 case 'e': 3917 shift = 60; 3918 break; 3919 case 'p': 3920 shift = 50; 3921 break; 3922 case 't': 3923 shift = 40; 3924 break; 3925 case 'g': 3926 shift = 30; 3927 break; 3928 case 'm': 3929 shift = 20; 3930 break; 3931 case 'k': 3932 shift = 10; 3933 break; 3934 case 'b': 3935 case '\0': /* No unit. */ 3936 *num = number; 3937 return (0); 3938 default: 3939 /* Unrecognized unit. */ 3940 return (-1); 3941 } 3942 3943 if ((number << shift) >> shift != number) { 3944 /* Overflow */ 3945 return (-1); 3946 } 3947 *num = number << shift; 3948 return (0); 3949} 3950 3951 3952/* 3953 * This routine could be used in the future to load default and/or saved 3954 * mode page parameters for a particuar lun. 3955 */ 3956static int 3957ctl_init_page_index(struct ctl_lun *lun) 3958{ 3959 int i; 3960 struct ctl_page_index *page_index; 3961 const char *value; 3962 uint64_t ival; 3963 3964 memcpy(&lun->mode_pages.index, page_index_template, 3965 sizeof(page_index_template)); 3966 3967 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 3968 3969 page_index = &lun->mode_pages.index[i]; 3970 /* 3971 * If this is a disk-only mode page, there's no point in 3972 * setting it up. For some pages, we have to have some 3973 * basic information about the disk in order to calculate the 3974 * mode page data. 3975 */ 3976 if ((lun->be_lun->lun_type != T_DIRECT) 3977 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 3978 continue; 3979 3980 switch (page_index->page_code & SMPH_PC_MASK) { 3981 case SMS_RW_ERROR_RECOVERY_PAGE: { 3982 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 3983 panic("subpage is incorrect!"); 3984 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT], 3985 &rw_er_page_default, 3986 sizeof(rw_er_page_default)); 3987 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_CHANGEABLE], 3988 &rw_er_page_changeable, 3989 sizeof(rw_er_page_changeable)); 3990 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_DEFAULT], 3991 &rw_er_page_default, 3992 sizeof(rw_er_page_default)); 3993 memcpy(&lun->mode_pages.rw_er_page[CTL_PAGE_SAVED], 3994 &rw_er_page_default, 3995 sizeof(rw_er_page_default)); 3996 page_index->page_data = 3997 (uint8_t *)lun->mode_pages.rw_er_page; 3998 break; 3999 } 4000 case SMS_FORMAT_DEVICE_PAGE: { 4001 struct scsi_format_page *format_page; 4002 4003 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4004 panic("subpage is incorrect!"); 4005 4006 /* 4007 * Sectors per track are set above. Bytes per 4008 * sector need to be set here on a per-LUN basis. 4009 */ 4010 memcpy(&lun->mode_pages.format_page[CTL_PAGE_CURRENT], 4011 &format_page_default, 4012 sizeof(format_page_default)); 4013 memcpy(&lun->mode_pages.format_page[ 4014 CTL_PAGE_CHANGEABLE], &format_page_changeable, 4015 sizeof(format_page_changeable)); 4016 memcpy(&lun->mode_pages.format_page[CTL_PAGE_DEFAULT], 4017 &format_page_default, 4018 sizeof(format_page_default)); 4019 memcpy(&lun->mode_pages.format_page[CTL_PAGE_SAVED], 4020 &format_page_default, 4021 sizeof(format_page_default)); 4022 4023 format_page = &lun->mode_pages.format_page[ 4024 CTL_PAGE_CURRENT]; 4025 scsi_ulto2b(lun->be_lun->blocksize, 4026 format_page->bytes_per_sector); 4027 4028 format_page = &lun->mode_pages.format_page[ 4029 CTL_PAGE_DEFAULT]; 4030 scsi_ulto2b(lun->be_lun->blocksize, 4031 format_page->bytes_per_sector); 4032 4033 format_page = &lun->mode_pages.format_page[ 4034 CTL_PAGE_SAVED]; 4035 scsi_ulto2b(lun->be_lun->blocksize, 4036 format_page->bytes_per_sector); 4037 4038 page_index->page_data = 4039 (uint8_t *)lun->mode_pages.format_page; 4040 break; 4041 } 4042 case SMS_RIGID_DISK_PAGE: { 4043 struct scsi_rigid_disk_page *rigid_disk_page; 4044 uint32_t sectors_per_cylinder; 4045 uint64_t cylinders; 4046#ifndef __XSCALE__ 4047 int shift; 4048#endif /* !__XSCALE__ */ 4049 4050 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4051 panic("invalid subpage value %d", 4052 page_index->subpage); 4053 4054 /* 4055 * Rotation rate and sectors per track are set 4056 * above. We calculate the cylinders here based on 4057 * capacity. Due to the number of heads and 4058 * sectors per track we're using, smaller arrays 4059 * may turn out to have 0 cylinders. Linux and 4060 * FreeBSD don't pay attention to these mode pages 4061 * to figure out capacity, but Solaris does. It 4062 * seems to deal with 0 cylinders just fine, and 4063 * works out a fake geometry based on the capacity. 4064 */ 4065 memcpy(&lun->mode_pages.rigid_disk_page[ 4066 CTL_PAGE_DEFAULT], &rigid_disk_page_default, 4067 sizeof(rigid_disk_page_default)); 4068 memcpy(&lun->mode_pages.rigid_disk_page[ 4069 CTL_PAGE_CHANGEABLE],&rigid_disk_page_changeable, 4070 sizeof(rigid_disk_page_changeable)); 4071 4072 sectors_per_cylinder = CTL_DEFAULT_SECTORS_PER_TRACK * 4073 CTL_DEFAULT_HEADS; 4074 4075 /* 4076 * The divide method here will be more accurate, 4077 * probably, but results in floating point being 4078 * used in the kernel on i386 (__udivdi3()). On the 4079 * XScale, though, __udivdi3() is implemented in 4080 * software. 4081 * 4082 * The shift method for cylinder calculation is 4083 * accurate if sectors_per_cylinder is a power of 4084 * 2. Otherwise it might be slightly off -- you 4085 * might have a bit of a truncation problem. 4086 */ 4087#ifdef __XSCALE__ 4088 cylinders = (lun->be_lun->maxlba + 1) / 4089 sectors_per_cylinder; 4090#else 4091 for (shift = 31; shift > 0; shift--) { 4092 if (sectors_per_cylinder & (1 << shift)) 4093 break; 4094 } 4095 cylinders = (lun->be_lun->maxlba + 1) >> shift; 4096#endif 4097 4098 /* 4099 * We've basically got 3 bytes, or 24 bits for the 4100 * cylinder size in the mode page. If we're over, 4101 * just round down to 2^24. 4102 */ 4103 if (cylinders > 0xffffff) 4104 cylinders = 0xffffff; 4105 4106 rigid_disk_page = &lun->mode_pages.rigid_disk_page[ 4107 CTL_PAGE_DEFAULT]; 4108 scsi_ulto3b(cylinders, rigid_disk_page->cylinders); 4109 4110 if ((value = ctl_get_opt(&lun->be_lun->options, 4111 "rpm")) != NULL) { 4112 scsi_ulto2b(strtol(value, NULL, 0), 4113 rigid_disk_page->rotation_rate); 4114 } 4115 4116 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_CURRENT], 4117 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4118 sizeof(rigid_disk_page_default)); 4119 memcpy(&lun->mode_pages.rigid_disk_page[CTL_PAGE_SAVED], 4120 &lun->mode_pages.rigid_disk_page[CTL_PAGE_DEFAULT], 4121 sizeof(rigid_disk_page_default)); 4122 4123 page_index->page_data = 4124 (uint8_t *)lun->mode_pages.rigid_disk_page; 4125 break; 4126 } 4127 case SMS_CACHING_PAGE: { 4128 struct scsi_caching_page *caching_page; 4129 4130 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4131 panic("invalid subpage value %d", 4132 page_index->subpage); 4133 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_DEFAULT], 4134 &caching_page_default, 4135 sizeof(caching_page_default)); 4136 memcpy(&lun->mode_pages.caching_page[ 4137 CTL_PAGE_CHANGEABLE], &caching_page_changeable, 4138 sizeof(caching_page_changeable)); 4139 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4140 &caching_page_default, 4141 sizeof(caching_page_default)); 4142 caching_page = &lun->mode_pages.caching_page[ 4143 CTL_PAGE_SAVED]; 4144 value = ctl_get_opt(&lun->be_lun->options, "writecache"); 4145 if (value != NULL && strcmp(value, "off") == 0) 4146 caching_page->flags1 &= ~SCP_WCE; 4147 value = ctl_get_opt(&lun->be_lun->options, "readcache"); 4148 if (value != NULL && strcmp(value, "off") == 0) 4149 caching_page->flags1 |= SCP_RCD; 4150 memcpy(&lun->mode_pages.caching_page[CTL_PAGE_CURRENT], 4151 &lun->mode_pages.caching_page[CTL_PAGE_SAVED], 4152 sizeof(caching_page_default)); 4153 page_index->page_data = 4154 (uint8_t *)lun->mode_pages.caching_page; 4155 break; 4156 } 4157 case SMS_CONTROL_MODE_PAGE: { 4158 struct scsi_control_page *control_page; 4159 4160 if (page_index->subpage != SMS_SUBPAGE_PAGE_0) 4161 panic("invalid subpage value %d", 4162 page_index->subpage); 4163 4164 memcpy(&lun->mode_pages.control_page[CTL_PAGE_DEFAULT], 4165 &control_page_default, 4166 sizeof(control_page_default)); 4167 memcpy(&lun->mode_pages.control_page[ 4168 CTL_PAGE_CHANGEABLE], &control_page_changeable, 4169 sizeof(control_page_changeable)); 4170 memcpy(&lun->mode_pages.control_page[CTL_PAGE_SAVED], 4171 &control_page_default, 4172 sizeof(control_page_default)); 4173 control_page = &lun->mode_pages.control_page[ 4174 CTL_PAGE_SAVED]; 4175 value = ctl_get_opt(&lun->be_lun->options, "reordering"); 4176 if (value != NULL && strcmp(value, "unrestricted") == 0) { 4177 control_page->queue_flags &= ~SCP_QUEUE_ALG_MASK; 4178 control_page->queue_flags |= SCP_QUEUE_ALG_UNRESTRICTED; 4179 } 4180 memcpy(&lun->mode_pages.control_page[CTL_PAGE_CURRENT], 4181 &lun->mode_pages.control_page[CTL_PAGE_SAVED], 4182 sizeof(control_page_default)); 4183 page_index->page_data = 4184 (uint8_t *)lun->mode_pages.control_page; 4185 break; 4186 4187 } 4188 case SMS_INFO_EXCEPTIONS_PAGE: { 4189 switch (page_index->subpage) { 4190 case SMS_SUBPAGE_PAGE_0: 4191 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_CURRENT], 4192 &ie_page_default, 4193 sizeof(ie_page_default)); 4194 memcpy(&lun->mode_pages.ie_page[ 4195 CTL_PAGE_CHANGEABLE], &ie_page_changeable, 4196 sizeof(ie_page_changeable)); 4197 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_DEFAULT], 4198 &ie_page_default, 4199 sizeof(ie_page_default)); 4200 memcpy(&lun->mode_pages.ie_page[CTL_PAGE_SAVED], 4201 &ie_page_default, 4202 sizeof(ie_page_default)); 4203 page_index->page_data = 4204 (uint8_t *)lun->mode_pages.ie_page; 4205 break; 4206 case 0x02: { 4207 struct ctl_logical_block_provisioning_page *page; 4208 4209 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_DEFAULT], 4210 &lbp_page_default, 4211 sizeof(lbp_page_default)); 4212 memcpy(&lun->mode_pages.lbp_page[ 4213 CTL_PAGE_CHANGEABLE], &lbp_page_changeable, 4214 sizeof(lbp_page_changeable)); 4215 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4216 &lbp_page_default, 4217 sizeof(lbp_page_default)); 4218 page = &lun->mode_pages.lbp_page[CTL_PAGE_SAVED]; 4219 value = ctl_get_opt(&lun->be_lun->options, 4220 "avail-threshold"); 4221 if (value != NULL && 4222 ctl_expand_number(value, &ival) == 0) { 4223 page->descr[0].flags |= SLBPPD_ENABLED | 4224 SLBPPD_ARMING_DEC; 4225 if (lun->be_lun->blocksize) 4226 ival /= lun->be_lun->blocksize; 4227 else 4228 ival /= 512; 4229 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4230 page->descr[0].count); 4231 } 4232 value = ctl_get_opt(&lun->be_lun->options, 4233 "used-threshold"); 4234 if (value != NULL && 4235 ctl_expand_number(value, &ival) == 0) { 4236 page->descr[1].flags |= SLBPPD_ENABLED | 4237 SLBPPD_ARMING_INC; 4238 if (lun->be_lun->blocksize) 4239 ival /= lun->be_lun->blocksize; 4240 else 4241 ival /= 512; 4242 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4243 page->descr[1].count); 4244 } 4245 value = ctl_get_opt(&lun->be_lun->options, 4246 "pool-avail-threshold"); 4247 if (value != NULL && 4248 ctl_expand_number(value, &ival) == 0) { 4249 page->descr[2].flags |= SLBPPD_ENABLED | 4250 SLBPPD_ARMING_DEC; 4251 if (lun->be_lun->blocksize) 4252 ival /= lun->be_lun->blocksize; 4253 else 4254 ival /= 512; 4255 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4256 page->descr[2].count); 4257 } 4258 value = ctl_get_opt(&lun->be_lun->options, 4259 "pool-used-threshold"); 4260 if (value != NULL && 4261 ctl_expand_number(value, &ival) == 0) { 4262 page->descr[3].flags |= SLBPPD_ENABLED | 4263 SLBPPD_ARMING_INC; 4264 if (lun->be_lun->blocksize) 4265 ival /= lun->be_lun->blocksize; 4266 else 4267 ival /= 512; 4268 scsi_ulto4b(ival >> CTL_LBP_EXPONENT, 4269 page->descr[3].count); 4270 } 4271 memcpy(&lun->mode_pages.lbp_page[CTL_PAGE_CURRENT], 4272 &lun->mode_pages.lbp_page[CTL_PAGE_SAVED], 4273 sizeof(lbp_page_default)); 4274 page_index->page_data = 4275 (uint8_t *)lun->mode_pages.lbp_page; 4276 }} 4277 break; 4278 } 4279 case SMS_VENDOR_SPECIFIC_PAGE:{ 4280 switch (page_index->subpage) { 4281 case DBGCNF_SUBPAGE_CODE: { 4282 struct copan_debugconf_subpage *current_page, 4283 *saved_page; 4284 4285 memcpy(&lun->mode_pages.debugconf_subpage[ 4286 CTL_PAGE_CURRENT], 4287 &debugconf_page_default, 4288 sizeof(debugconf_page_default)); 4289 memcpy(&lun->mode_pages.debugconf_subpage[ 4290 CTL_PAGE_CHANGEABLE], 4291 &debugconf_page_changeable, 4292 sizeof(debugconf_page_changeable)); 4293 memcpy(&lun->mode_pages.debugconf_subpage[ 4294 CTL_PAGE_DEFAULT], 4295 &debugconf_page_default, 4296 sizeof(debugconf_page_default)); 4297 memcpy(&lun->mode_pages.debugconf_subpage[ 4298 CTL_PAGE_SAVED], 4299 &debugconf_page_default, 4300 sizeof(debugconf_page_default)); 4301 page_index->page_data = 4302 (uint8_t *)lun->mode_pages.debugconf_subpage; 4303 4304 current_page = (struct copan_debugconf_subpage *) 4305 (page_index->page_data + 4306 (page_index->page_len * 4307 CTL_PAGE_CURRENT)); 4308 saved_page = (struct copan_debugconf_subpage *) 4309 (page_index->page_data + 4310 (page_index->page_len * 4311 CTL_PAGE_SAVED)); 4312 break; 4313 } 4314 default: 4315 panic("invalid subpage value %d", 4316 page_index->subpage); 4317 break; 4318 } 4319 break; 4320 } 4321 default: 4322 panic("invalid page value %d", 4323 page_index->page_code & SMPH_PC_MASK); 4324 break; 4325 } 4326 } 4327 4328 return (CTL_RETVAL_COMPLETE); 4329} 4330 4331static int 4332ctl_init_log_page_index(struct ctl_lun *lun) 4333{ 4334 struct ctl_page_index *page_index; 4335 int i, j, k, prev; 4336 4337 memcpy(&lun->log_pages.index, log_page_index_template, 4338 sizeof(log_page_index_template)); 4339 4340 prev = -1; 4341 for (i = 0, j = 0, k = 0; i < CTL_NUM_LOG_PAGES; i++) { 4342 4343 page_index = &lun->log_pages.index[i]; 4344 /* 4345 * If this is a disk-only mode page, there's no point in 4346 * setting it up. For some pages, we have to have some 4347 * basic information about the disk in order to calculate the 4348 * mode page data. 4349 */ 4350 if ((lun->be_lun->lun_type != T_DIRECT) 4351 && (page_index->page_flags & CTL_PAGE_FLAG_DISK_ONLY)) 4352 continue; 4353 4354 if (page_index->page_code == SLS_LOGICAL_BLOCK_PROVISIONING && 4355 lun->backend->lun_attr == NULL) 4356 continue; 4357 4358 if (page_index->page_code != prev) { 4359 lun->log_pages.pages_page[j] = page_index->page_code; 4360 prev = page_index->page_code; 4361 j++; 4362 } 4363 lun->log_pages.subpages_page[k*2] = page_index->page_code; 4364 lun->log_pages.subpages_page[k*2+1] = page_index->subpage; 4365 k++; 4366 } 4367 lun->log_pages.index[0].page_data = &lun->log_pages.pages_page[0]; 4368 lun->log_pages.index[0].page_len = j; 4369 lun->log_pages.index[1].page_data = &lun->log_pages.subpages_page[0]; 4370 lun->log_pages.index[1].page_len = k * 2; 4371 lun->log_pages.index[2].page_data = &lun->log_pages.lbp_page[0]; 4372 lun->log_pages.index[2].page_len = 12*CTL_NUM_LBP_PARAMS; 4373 4374 return (CTL_RETVAL_COMPLETE); 4375} 4376 4377static int 4378hex2bin(const char *str, uint8_t *buf, int buf_size) 4379{ 4380 int i; 4381 u_char c; 4382 4383 memset(buf, 0, buf_size); 4384 while (isspace(str[0])) 4385 str++; 4386 if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) 4387 str += 2; 4388 buf_size *= 2; 4389 for (i = 0; str[i] != 0 && i < buf_size; i++) { 4390 c = str[i]; 4391 if (isdigit(c)) 4392 c -= '0'; 4393 else if (isalpha(c)) 4394 c -= isupper(c) ? 'A' - 10 : 'a' - 10; 4395 else 4396 break; 4397 if (c >= 16) 4398 break; 4399 if ((i & 1) == 0) 4400 buf[i / 2] |= (c << 4); 4401 else 4402 buf[i / 2] |= c; 4403 } 4404 return ((i + 1) / 2); 4405} 4406 4407/* 4408 * LUN allocation. 4409 * 4410 * Requirements: 4411 * - caller allocates and zeros LUN storage, or passes in a NULL LUN if he 4412 * wants us to allocate the LUN and he can block. 4413 * - ctl_softc is always set 4414 * - be_lun is set if the LUN has a backend (needed for disk LUNs) 4415 * 4416 * Returns 0 for success, non-zero (errno) for failure. 4417 */ 4418static int 4419ctl_alloc_lun(struct ctl_softc *ctl_softc, struct ctl_lun *ctl_lun, 4420 struct ctl_be_lun *const be_lun, struct ctl_id target_id) 4421{ 4422 struct ctl_lun *nlun, *lun; 4423 struct ctl_port *port; 4424 struct scsi_vpd_id_descriptor *desc; 4425 struct scsi_vpd_id_t10 *t10id; 4426 const char *eui, *naa, *scsiname, *vendor, *value; 4427 int lun_number, i, lun_malloced; 4428 int devidlen, idlen1, idlen2 = 0, len; 4429 4430 if (be_lun == NULL) 4431 return (EINVAL); 4432 4433 /* 4434 * We currently only support Direct Access or Processor LUN types. 4435 */ 4436 switch (be_lun->lun_type) { 4437 case T_DIRECT: 4438 break; 4439 case T_PROCESSOR: 4440 break; 4441 case T_SEQUENTIAL: 4442 case T_CHANGER: 4443 default: 4444 be_lun->lun_config_status(be_lun->be_lun, 4445 CTL_LUN_CONFIG_FAILURE); 4446 break; 4447 } 4448 if (ctl_lun == NULL) { 4449 lun = malloc(sizeof(*lun), M_CTL, M_WAITOK); 4450 lun_malloced = 1; 4451 } else { 4452 lun_malloced = 0; 4453 lun = ctl_lun; 4454 } 4455 4456 memset(lun, 0, sizeof(*lun)); 4457 if (lun_malloced) 4458 lun->flags = CTL_LUN_MALLOCED; 4459 4460 /* Generate LUN ID. */ 4461 devidlen = max(CTL_DEVID_MIN_LEN, 4462 strnlen(be_lun->device_id, CTL_DEVID_LEN)); 4463 idlen1 = sizeof(*t10id) + devidlen; 4464 len = sizeof(struct scsi_vpd_id_descriptor) + idlen1; 4465 scsiname = ctl_get_opt(&be_lun->options, "scsiname"); 4466 if (scsiname != NULL) { 4467 idlen2 = roundup2(strlen(scsiname) + 1, 4); 4468 len += sizeof(struct scsi_vpd_id_descriptor) + idlen2; 4469 } 4470 eui = ctl_get_opt(&be_lun->options, "eui"); 4471 if (eui != NULL) { 4472 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4473 } 4474 naa = ctl_get_opt(&be_lun->options, "naa"); 4475 if (naa != NULL) { 4476 len += sizeof(struct scsi_vpd_id_descriptor) + 16; 4477 } 4478 lun->lun_devid = malloc(sizeof(struct ctl_devid) + len, 4479 M_CTL, M_WAITOK | M_ZERO); 4480 desc = (struct scsi_vpd_id_descriptor *)lun->lun_devid->data; 4481 desc->proto_codeset = SVPD_ID_CODESET_ASCII; 4482 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | SVPD_ID_TYPE_T10; 4483 desc->length = idlen1; 4484 t10id = (struct scsi_vpd_id_t10 *)&desc->identifier[0]; 4485 memset(t10id->vendor, ' ', sizeof(t10id->vendor)); 4486 if ((vendor = ctl_get_opt(&be_lun->options, "vendor")) == NULL) { 4487 strncpy((char *)t10id->vendor, CTL_VENDOR, sizeof(t10id->vendor)); 4488 } else { 4489 strncpy(t10id->vendor, vendor, 4490 min(sizeof(t10id->vendor), strlen(vendor))); 4491 } 4492 strncpy((char *)t10id->vendor_spec_id, 4493 (char *)be_lun->device_id, devidlen); 4494 if (scsiname != NULL) { 4495 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4496 desc->length); 4497 desc->proto_codeset = SVPD_ID_CODESET_UTF8; 4498 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4499 SVPD_ID_TYPE_SCSI_NAME; 4500 desc->length = idlen2; 4501 strlcpy(desc->identifier, scsiname, idlen2); 4502 } 4503 if (eui != NULL) { 4504 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4505 desc->length); 4506 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4507 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4508 SVPD_ID_TYPE_EUI64; 4509 desc->length = hex2bin(eui, desc->identifier, 16); 4510 desc->length = desc->length > 12 ? 16 : 4511 (desc->length > 8 ? 12 : 8); 4512 len -= 16 - desc->length; 4513 } 4514 if (naa != NULL) { 4515 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 4516 desc->length); 4517 desc->proto_codeset = SVPD_ID_CODESET_BINARY; 4518 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_LUN | 4519 SVPD_ID_TYPE_NAA; 4520 desc->length = hex2bin(naa, desc->identifier, 16); 4521 desc->length = desc->length > 8 ? 16 : 8; 4522 len -= 16 - desc->length; 4523 } 4524 lun->lun_devid->len = len; 4525 4526 mtx_lock(&ctl_softc->ctl_lock); 4527 /* 4528 * See if the caller requested a particular LUN number. If so, see 4529 * if it is available. Otherwise, allocate the first available LUN. 4530 */ 4531 if (be_lun->flags & CTL_LUN_FLAG_ID_REQ) { 4532 if ((be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) 4533 || (ctl_is_set(ctl_softc->ctl_lun_mask, be_lun->req_lun_id))) { 4534 mtx_unlock(&ctl_softc->ctl_lock); 4535 if (be_lun->req_lun_id > (CTL_MAX_LUNS - 1)) { 4536 printf("ctl: requested LUN ID %d is higher " 4537 "than CTL_MAX_LUNS - 1 (%d)\n", 4538 be_lun->req_lun_id, CTL_MAX_LUNS - 1); 4539 } else { 4540 /* 4541 * XXX KDM return an error, or just assign 4542 * another LUN ID in this case?? 4543 */ 4544 printf("ctl: requested LUN ID %d is already " 4545 "in use\n", be_lun->req_lun_id); 4546 } 4547 if (lun->flags & CTL_LUN_MALLOCED) 4548 free(lun, M_CTL); 4549 be_lun->lun_config_status(be_lun->be_lun, 4550 CTL_LUN_CONFIG_FAILURE); 4551 return (ENOSPC); 4552 } 4553 lun_number = be_lun->req_lun_id; 4554 } else { 4555 lun_number = ctl_ffz(ctl_softc->ctl_lun_mask, CTL_MAX_LUNS); 4556 if (lun_number == -1) { 4557 mtx_unlock(&ctl_softc->ctl_lock); 4558 printf("ctl: can't allocate LUN on target %ju, out of " 4559 "LUNs\n", (uintmax_t)target_id.id); 4560 if (lun->flags & CTL_LUN_MALLOCED) 4561 free(lun, M_CTL); 4562 be_lun->lun_config_status(be_lun->be_lun, 4563 CTL_LUN_CONFIG_FAILURE); 4564 return (ENOSPC); 4565 } 4566 } 4567 ctl_set_mask(ctl_softc->ctl_lun_mask, lun_number); 4568 4569 mtx_init(&lun->lun_lock, "CTL LUN", NULL, MTX_DEF); 4570 lun->target = target_id; 4571 lun->lun = lun_number; 4572 lun->be_lun = be_lun; 4573 /* 4574 * The processor LUN is always enabled. Disk LUNs come on line 4575 * disabled, and must be enabled by the backend. 4576 */ 4577 lun->flags |= CTL_LUN_DISABLED; 4578 lun->backend = be_lun->be; 4579 be_lun->ctl_lun = lun; 4580 be_lun->lun_id = lun_number; 4581 atomic_add_int(&be_lun->be->num_luns, 1); 4582 if (be_lun->flags & CTL_LUN_FLAG_OFFLINE) 4583 lun->flags |= CTL_LUN_OFFLINE; 4584 4585 if (be_lun->flags & CTL_LUN_FLAG_POWERED_OFF) 4586 lun->flags |= CTL_LUN_STOPPED; 4587 4588 if (be_lun->flags & CTL_LUN_FLAG_INOPERABLE) 4589 lun->flags |= CTL_LUN_INOPERABLE; 4590 4591 if (be_lun->flags & CTL_LUN_FLAG_PRIMARY) 4592 lun->flags |= CTL_LUN_PRIMARY_SC; 4593 4594 value = ctl_get_opt(&be_lun->options, "readonly"); 4595 if (value != NULL && strcmp(value, "on") == 0) 4596 lun->flags |= CTL_LUN_READONLY; 4597 4598 lun->serseq = CTL_LUN_SERSEQ_OFF; 4599 if (be_lun->flags & CTL_LUN_FLAG_SERSEQ_READ) 4600 lun->serseq = CTL_LUN_SERSEQ_READ; 4601 value = ctl_get_opt(&be_lun->options, "serseq"); 4602 if (value != NULL && strcmp(value, "on") == 0) 4603 lun->serseq = CTL_LUN_SERSEQ_ON; 4604 else if (value != NULL && strcmp(value, "read") == 0) 4605 lun->serseq = CTL_LUN_SERSEQ_READ; 4606 else if (value != NULL && strcmp(value, "off") == 0) 4607 lun->serseq = CTL_LUN_SERSEQ_OFF; 4608 4609 lun->ctl_softc = ctl_softc; 4610 TAILQ_INIT(&lun->ooa_queue); 4611 TAILQ_INIT(&lun->blocked_queue); 4612 STAILQ_INIT(&lun->error_list); 4613 ctl_tpc_lun_init(lun); 4614 4615 /* 4616 * Initialize the mode and log page index. 4617 */ 4618 ctl_init_page_index(lun); 4619 ctl_init_log_page_index(lun); 4620 4621 /* 4622 * Now, before we insert this lun on the lun list, set the lun 4623 * inventory changed UA for all other luns. 4624 */ 4625 STAILQ_FOREACH(nlun, &ctl_softc->lun_list, links) { 4626 mtx_lock(&nlun->lun_lock); 4627 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4628 mtx_unlock(&nlun->lun_lock); 4629 } 4630 4631 STAILQ_INSERT_TAIL(&ctl_softc->lun_list, lun, links); 4632 4633 ctl_softc->ctl_luns[lun_number] = lun; 4634 4635 ctl_softc->num_luns++; 4636 4637 /* Setup statistics gathering */ 4638 lun->stats.device_type = be_lun->lun_type; 4639 lun->stats.lun_number = lun_number; 4640 if (lun->stats.device_type == T_DIRECT) 4641 lun->stats.blocksize = be_lun->blocksize; 4642 else 4643 lun->stats.flags = CTL_LUN_STATS_NO_BLOCKSIZE; 4644 for (i = 0;i < CTL_MAX_PORTS;i++) 4645 lun->stats.ports[i].targ_port = i; 4646 4647 mtx_unlock(&ctl_softc->ctl_lock); 4648 4649 lun->be_lun->lun_config_status(lun->be_lun->be_lun, CTL_LUN_CONFIG_OK); 4650 4651 /* 4652 * Run through each registered FETD and bring it online if it isn't 4653 * already. Enable the target ID if it hasn't been enabled, and 4654 * enable this particular LUN. 4655 */ 4656 STAILQ_FOREACH(port, &ctl_softc->port_list, links) { 4657 int retval; 4658 4659 retval = port->lun_enable(port->targ_lun_arg, target_id,lun_number); 4660 if (retval != 0) { 4661 printf("ctl_alloc_lun: FETD %s port %d returned error " 4662 "%d for lun_enable on target %ju lun %d\n", 4663 port->port_name, port->targ_port, retval, 4664 (uintmax_t)target_id.id, lun_number); 4665 } else 4666 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 4667 } 4668 return (0); 4669} 4670 4671/* 4672 * Delete a LUN. 4673 * Assumptions: 4674 * - LUN has already been marked invalid and any pending I/O has been taken 4675 * care of. 4676 */ 4677static int 4678ctl_free_lun(struct ctl_lun *lun) 4679{ 4680 struct ctl_softc *softc; 4681#if 0 4682 struct ctl_port *port; 4683#endif 4684 struct ctl_lun *nlun; 4685 int i; 4686 4687 softc = lun->ctl_softc; 4688 4689 mtx_assert(&softc->ctl_lock, MA_OWNED); 4690 4691 STAILQ_REMOVE(&softc->lun_list, lun, ctl_lun, links); 4692 4693 ctl_clear_mask(softc->ctl_lun_mask, lun->lun); 4694 4695 softc->ctl_luns[lun->lun] = NULL; 4696 4697 if (!TAILQ_EMPTY(&lun->ooa_queue)) 4698 panic("Freeing a LUN %p with outstanding I/O!!\n", lun); 4699 4700 softc->num_luns--; 4701 4702 /* 4703 * XXX KDM this scheme only works for a single target/multiple LUN 4704 * setup. It needs to be revamped for a multiple target scheme. 4705 * 4706 * XXX KDM this results in port->lun_disable() getting called twice, 4707 * once when ctl_disable_lun() is called, and a second time here. 4708 * We really need to re-think the LUN disable semantics. There 4709 * should probably be several steps/levels to LUN removal: 4710 * - disable 4711 * - invalidate 4712 * - free 4713 * 4714 * Right now we only have a disable method when communicating to 4715 * the front end ports, at least for individual LUNs. 4716 */ 4717#if 0 4718 STAILQ_FOREACH(port, &softc->port_list, links) { 4719 int retval; 4720 4721 retval = port->lun_disable(port->targ_lun_arg, lun->target, 4722 lun->lun); 4723 if (retval != 0) { 4724 printf("ctl_free_lun: FETD %s port %d returned error " 4725 "%d for lun_disable on target %ju lun %jd\n", 4726 port->port_name, port->targ_port, retval, 4727 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4728 } 4729 4730 if (STAILQ_FIRST(&softc->lun_list) == NULL) { 4731 port->status &= ~CTL_PORT_STATUS_LUN_ONLINE; 4732 4733 retval = port->targ_disable(port->targ_lun_arg,lun->target); 4734 if (retval != 0) { 4735 printf("ctl_free_lun: FETD %s port %d " 4736 "returned error %d for targ_disable on " 4737 "target %ju\n", port->port_name, 4738 port->targ_port, retval, 4739 (uintmax_t)lun->target.id); 4740 } else 4741 port->status &= ~CTL_PORT_STATUS_TARG_ONLINE; 4742 4743 if ((port->status & CTL_PORT_STATUS_TARG_ONLINE) != 0) 4744 continue; 4745 4746#if 0 4747 port->port_offline(port->onoff_arg); 4748 port->status &= ~CTL_PORT_STATUS_ONLINE; 4749#endif 4750 } 4751 } 4752#endif 4753 4754 /* 4755 * Tell the backend to free resources, if this LUN has a backend. 4756 */ 4757 atomic_subtract_int(&lun->be_lun->be->num_luns, 1); 4758 lun->be_lun->lun_shutdown(lun->be_lun->be_lun); 4759 4760 ctl_tpc_lun_shutdown(lun); 4761 mtx_destroy(&lun->lun_lock); 4762 free(lun->lun_devid, M_CTL); 4763 for (i = 0; i < CTL_MAX_PORTS; i++) 4764 free(lun->pending_ua[i], M_CTL); 4765 for (i = 0; i < 2 * CTL_MAX_PORTS; i++) 4766 free(lun->pr_keys[i], M_CTL); 4767 free(lun->write_buffer, M_CTL); 4768 if (lun->flags & CTL_LUN_MALLOCED) 4769 free(lun, M_CTL); 4770 4771 STAILQ_FOREACH(nlun, &softc->lun_list, links) { 4772 mtx_lock(&nlun->lun_lock); 4773 ctl_est_ua_all(nlun, -1, CTL_UA_LUN_CHANGE); 4774 mtx_unlock(&nlun->lun_lock); 4775 } 4776 4777 return (0); 4778} 4779 4780static void 4781ctl_create_lun(struct ctl_be_lun *be_lun) 4782{ 4783 struct ctl_softc *softc; 4784 4785 softc = control_softc; 4786 4787 /* 4788 * ctl_alloc_lun() should handle all potential failure cases. 4789 */ 4790 ctl_alloc_lun(softc, NULL, be_lun, softc->target); 4791} 4792 4793int 4794ctl_add_lun(struct ctl_be_lun *be_lun) 4795{ 4796 struct ctl_softc *softc = control_softc; 4797 4798 mtx_lock(&softc->ctl_lock); 4799 STAILQ_INSERT_TAIL(&softc->pending_lun_queue, be_lun, links); 4800 mtx_unlock(&softc->ctl_lock); 4801 wakeup(&softc->pending_lun_queue); 4802 4803 return (0); 4804} 4805 4806int 4807ctl_enable_lun(struct ctl_be_lun *be_lun) 4808{ 4809 struct ctl_softc *softc; 4810 struct ctl_port *port, *nport; 4811 struct ctl_lun *lun; 4812 int retval; 4813 4814 lun = (struct ctl_lun *)be_lun->ctl_lun; 4815 softc = lun->ctl_softc; 4816 4817 mtx_lock(&softc->ctl_lock); 4818 mtx_lock(&lun->lun_lock); 4819 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4820 /* 4821 * eh? Why did we get called if the LUN is already 4822 * enabled? 4823 */ 4824 mtx_unlock(&lun->lun_lock); 4825 mtx_unlock(&softc->ctl_lock); 4826 return (0); 4827 } 4828 lun->flags &= ~CTL_LUN_DISABLED; 4829 mtx_unlock(&lun->lun_lock); 4830 4831 for (port = STAILQ_FIRST(&softc->port_list); port != NULL; port = nport) { 4832 nport = STAILQ_NEXT(port, links); 4833 4834 /* 4835 * Drop the lock while we call the FETD's enable routine. 4836 * This can lead to a callback into CTL (at least in the 4837 * case of the internal initiator frontend. 4838 */ 4839 mtx_unlock(&softc->ctl_lock); 4840 retval = port->lun_enable(port->targ_lun_arg, lun->target,lun->lun); 4841 mtx_lock(&softc->ctl_lock); 4842 if (retval != 0) { 4843 printf("%s: FETD %s port %d returned error " 4844 "%d for lun_enable on target %ju lun %jd\n", 4845 __func__, port->port_name, port->targ_port, retval, 4846 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4847 } 4848#if 0 4849 else { 4850 /* NOTE: TODO: why does lun enable affect port status? */ 4851 port->status |= CTL_PORT_STATUS_LUN_ONLINE; 4852 } 4853#endif 4854 } 4855 4856 mtx_unlock(&softc->ctl_lock); 4857 4858 return (0); 4859} 4860 4861int 4862ctl_disable_lun(struct ctl_be_lun *be_lun) 4863{ 4864 struct ctl_softc *softc; 4865 struct ctl_port *port; 4866 struct ctl_lun *lun; 4867 int retval; 4868 4869 lun = (struct ctl_lun *)be_lun->ctl_lun; 4870 softc = lun->ctl_softc; 4871 4872 mtx_lock(&softc->ctl_lock); 4873 mtx_lock(&lun->lun_lock); 4874 if (lun->flags & CTL_LUN_DISABLED) { 4875 mtx_unlock(&lun->lun_lock); 4876 mtx_unlock(&softc->ctl_lock); 4877 return (0); 4878 } 4879 lun->flags |= CTL_LUN_DISABLED; 4880 mtx_unlock(&lun->lun_lock); 4881 4882 STAILQ_FOREACH(port, &softc->port_list, links) { 4883 mtx_unlock(&softc->ctl_lock); 4884 /* 4885 * Drop the lock before we call the frontend's disable 4886 * routine, to avoid lock order reversals. 4887 * 4888 * XXX KDM what happens if the frontend list changes while 4889 * we're traversing it? It's unlikely, but should be handled. 4890 */ 4891 retval = port->lun_disable(port->targ_lun_arg, lun->target, 4892 lun->lun); 4893 mtx_lock(&softc->ctl_lock); 4894 if (retval != 0) { 4895 printf("ctl_alloc_lun: FETD %s port %d returned error " 4896 "%d for lun_disable on target %ju lun %jd\n", 4897 port->port_name, port->targ_port, retval, 4898 (uintmax_t)lun->target.id, (intmax_t)lun->lun); 4899 } 4900 } 4901 4902 mtx_unlock(&softc->ctl_lock); 4903 4904 return (0); 4905} 4906 4907int 4908ctl_start_lun(struct ctl_be_lun *be_lun) 4909{ 4910 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4911 4912 mtx_lock(&lun->lun_lock); 4913 lun->flags &= ~CTL_LUN_STOPPED; 4914 mtx_unlock(&lun->lun_lock); 4915 return (0); 4916} 4917 4918int 4919ctl_stop_lun(struct ctl_be_lun *be_lun) 4920{ 4921 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4922 4923 mtx_lock(&lun->lun_lock); 4924 lun->flags |= CTL_LUN_STOPPED; 4925 mtx_unlock(&lun->lun_lock); 4926 return (0); 4927} 4928 4929int 4930ctl_lun_offline(struct ctl_be_lun *be_lun) 4931{ 4932 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4933 4934 mtx_lock(&lun->lun_lock); 4935 lun->flags |= CTL_LUN_OFFLINE; 4936 mtx_unlock(&lun->lun_lock); 4937 return (0); 4938} 4939 4940int 4941ctl_lun_online(struct ctl_be_lun *be_lun) 4942{ 4943 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4944 4945 mtx_lock(&lun->lun_lock); 4946 lun->flags &= ~CTL_LUN_OFFLINE; 4947 mtx_unlock(&lun->lun_lock); 4948 return (0); 4949} 4950 4951int 4952ctl_invalidate_lun(struct ctl_be_lun *be_lun) 4953{ 4954 struct ctl_softc *softc; 4955 struct ctl_lun *lun; 4956 4957 lun = (struct ctl_lun *)be_lun->ctl_lun; 4958 softc = lun->ctl_softc; 4959 4960 mtx_lock(&lun->lun_lock); 4961 4962 /* 4963 * The LUN needs to be disabled before it can be marked invalid. 4964 */ 4965 if ((lun->flags & CTL_LUN_DISABLED) == 0) { 4966 mtx_unlock(&lun->lun_lock); 4967 return (-1); 4968 } 4969 /* 4970 * Mark the LUN invalid. 4971 */ 4972 lun->flags |= CTL_LUN_INVALID; 4973 4974 /* 4975 * If there is nothing in the OOA queue, go ahead and free the LUN. 4976 * If we have something in the OOA queue, we'll free it when the 4977 * last I/O completes. 4978 */ 4979 if (TAILQ_EMPTY(&lun->ooa_queue)) { 4980 mtx_unlock(&lun->lun_lock); 4981 mtx_lock(&softc->ctl_lock); 4982 ctl_free_lun(lun); 4983 mtx_unlock(&softc->ctl_lock); 4984 } else 4985 mtx_unlock(&lun->lun_lock); 4986 4987 return (0); 4988} 4989 4990int 4991ctl_lun_inoperable(struct ctl_be_lun *be_lun) 4992{ 4993 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 4994 4995 mtx_lock(&lun->lun_lock); 4996 lun->flags |= CTL_LUN_INOPERABLE; 4997 mtx_unlock(&lun->lun_lock); 4998 return (0); 4999} 5000 5001int 5002ctl_lun_operable(struct ctl_be_lun *be_lun) 5003{ 5004 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5005 5006 mtx_lock(&lun->lun_lock); 5007 lun->flags &= ~CTL_LUN_INOPERABLE; 5008 mtx_unlock(&lun->lun_lock); 5009 return (0); 5010} 5011 5012void 5013ctl_lun_capacity_changed(struct ctl_be_lun *be_lun) 5014{ 5015 struct ctl_lun *lun = (struct ctl_lun *)be_lun->ctl_lun; 5016 5017 mtx_lock(&lun->lun_lock); 5018 ctl_est_ua_all(lun, -1, CTL_UA_CAPACITY_CHANGED); 5019 mtx_unlock(&lun->lun_lock); 5020} 5021 5022/* 5023 * Backend "memory move is complete" callback for requests that never 5024 * make it down to say RAIDCore's configuration code. 5025 */ 5026int 5027ctl_config_move_done(union ctl_io *io) 5028{ 5029 int retval; 5030 5031 CTL_DEBUG_PRINT(("ctl_config_move_done\n")); 5032 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, 5033 ("Config I/O type isn't CTL_IO_SCSI (%d)!", io->io_hdr.io_type)); 5034 5035 if ((io->io_hdr.port_status != 0) && 5036 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5037 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5038 /* 5039 * For hardware error sense keys, the sense key 5040 * specific value is defined to be a retry count, 5041 * but we use it to pass back an internal FETD 5042 * error code. XXX KDM Hopefully the FETD is only 5043 * using 16 bits for an error code, since that's 5044 * all the space we have in the sks field. 5045 */ 5046 ctl_set_internal_failure(&io->scsiio, 5047 /*sks_valid*/ 1, 5048 /*retry_count*/ 5049 io->io_hdr.port_status); 5050 } 5051 5052 if (((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) || 5053 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5054 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS) || 5055 ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)) { 5056 /* 5057 * XXX KDM just assuming a single pointer here, and not a 5058 * S/G list. If we start using S/G lists for config data, 5059 * we'll need to know how to clean them up here as well. 5060 */ 5061 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5062 free(io->scsiio.kern_data_ptr, M_CTL); 5063 ctl_done(io); 5064 retval = CTL_RETVAL_COMPLETE; 5065 } else { 5066 /* 5067 * XXX KDM now we need to continue data movement. Some 5068 * options: 5069 * - call ctl_scsiio() again? We don't do this for data 5070 * writes, because for those at least we know ahead of 5071 * time where the write will go and how long it is. For 5072 * config writes, though, that information is largely 5073 * contained within the write itself, thus we need to 5074 * parse out the data again. 5075 * 5076 * - Call some other function once the data is in? 5077 */ 5078 if (ctl_debug & CTL_DEBUG_CDB_DATA) 5079 ctl_data_print(io); 5080 5081 /* 5082 * XXX KDM call ctl_scsiio() again for now, and check flag 5083 * bits to see whether we're allocated or not. 5084 */ 5085 retval = ctl_scsiio(&io->scsiio); 5086 } 5087 return (retval); 5088} 5089 5090/* 5091 * This gets called by a backend driver when it is done with a 5092 * data_submit method. 5093 */ 5094void 5095ctl_data_submit_done(union ctl_io *io) 5096{ 5097 /* 5098 * If the IO_CONT flag is set, we need to call the supplied 5099 * function to continue processing the I/O, instead of completing 5100 * the I/O just yet. 5101 * 5102 * If there is an error, though, we don't want to keep processing. 5103 * Instead, just send status back to the initiator. 5104 */ 5105 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5106 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5107 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5108 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5109 io->scsiio.io_cont(io); 5110 return; 5111 } 5112 ctl_done(io); 5113} 5114 5115/* 5116 * This gets called by a backend driver when it is done with a 5117 * configuration write. 5118 */ 5119void 5120ctl_config_write_done(union ctl_io *io) 5121{ 5122 uint8_t *buf; 5123 5124 /* 5125 * If the IO_CONT flag is set, we need to call the supplied 5126 * function to continue processing the I/O, instead of completing 5127 * the I/O just yet. 5128 * 5129 * If there is an error, though, we don't want to keep processing. 5130 * Instead, just send status back to the initiator. 5131 */ 5132 if ((io->io_hdr.flags & CTL_FLAG_IO_CONT) && 5133 (io->io_hdr.flags & CTL_FLAG_ABORT) == 0 && 5134 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE || 5135 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) { 5136 io->scsiio.io_cont(io); 5137 return; 5138 } 5139 /* 5140 * Since a configuration write can be done for commands that actually 5141 * have data allocated, like write buffer, and commands that have 5142 * no data, like start/stop unit, we need to check here. 5143 */ 5144 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5145 buf = io->scsiio.kern_data_ptr; 5146 else 5147 buf = NULL; 5148 ctl_done(io); 5149 if (buf) 5150 free(buf, M_CTL); 5151} 5152 5153void 5154ctl_config_read_done(union ctl_io *io) 5155{ 5156 uint8_t *buf; 5157 5158 /* 5159 * If there is some error -- we are done, skip data transfer. 5160 */ 5161 if ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0 || 5162 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE && 5163 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) { 5164 if (io->io_hdr.flags & CTL_FLAG_ALLOCATED) 5165 buf = io->scsiio.kern_data_ptr; 5166 else 5167 buf = NULL; 5168 ctl_done(io); 5169 if (buf) 5170 free(buf, M_CTL); 5171 return; 5172 } 5173 5174 /* 5175 * If the IO_CONT flag is set, we need to call the supplied 5176 * function to continue processing the I/O, instead of completing 5177 * the I/O just yet. 5178 */ 5179 if (io->io_hdr.flags & CTL_FLAG_IO_CONT) { 5180 io->scsiio.io_cont(io); 5181 return; 5182 } 5183 5184 ctl_datamove(io); 5185} 5186 5187/* 5188 * SCSI release command. 5189 */ 5190int 5191ctl_scsi_release(struct ctl_scsiio *ctsio) 5192{ 5193 int length, longid, thirdparty_id, resv_id; 5194 struct ctl_lun *lun; 5195 uint32_t residx; 5196 5197 length = 0; 5198 resv_id = 0; 5199 5200 CTL_DEBUG_PRINT(("ctl_scsi_release\n")); 5201 5202 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5203 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5204 5205 switch (ctsio->cdb[0]) { 5206 case RELEASE_10: { 5207 struct scsi_release_10 *cdb; 5208 5209 cdb = (struct scsi_release_10 *)ctsio->cdb; 5210 5211 if (cdb->byte2 & SR10_LONGID) 5212 longid = 1; 5213 else 5214 thirdparty_id = cdb->thirdparty_id; 5215 5216 resv_id = cdb->resv_id; 5217 length = scsi_2btoul(cdb->length); 5218 break; 5219 } 5220 } 5221 5222 5223 /* 5224 * XXX KDM right now, we only support LUN reservation. We don't 5225 * support 3rd party reservations, or extent reservations, which 5226 * might actually need the parameter list. If we've gotten this 5227 * far, we've got a LUN reservation. Anything else got kicked out 5228 * above. So, according to SPC, ignore the length. 5229 */ 5230 length = 0; 5231 5232 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5233 && (length > 0)) { 5234 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5235 ctsio->kern_data_len = length; 5236 ctsio->kern_total_len = length; 5237 ctsio->kern_data_resid = 0; 5238 ctsio->kern_rel_offset = 0; 5239 ctsio->kern_sg_entries = 0; 5240 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5241 ctsio->be_move_done = ctl_config_move_done; 5242 ctl_datamove((union ctl_io *)ctsio); 5243 5244 return (CTL_RETVAL_COMPLETE); 5245 } 5246 5247 if (length > 0) 5248 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5249 5250 mtx_lock(&lun->lun_lock); 5251 5252 /* 5253 * According to SPC, it is not an error for an intiator to attempt 5254 * to release a reservation on a LUN that isn't reserved, or that 5255 * is reserved by another initiator. The reservation can only be 5256 * released, though, by the initiator who made it or by one of 5257 * several reset type events. 5258 */ 5259 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 5260 lun->flags &= ~CTL_LUN_RESERVED; 5261 5262 mtx_unlock(&lun->lun_lock); 5263 5264 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5265 free(ctsio->kern_data_ptr, M_CTL); 5266 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5267 } 5268 5269 ctl_set_success(ctsio); 5270 ctl_done((union ctl_io *)ctsio); 5271 return (CTL_RETVAL_COMPLETE); 5272} 5273 5274int 5275ctl_scsi_reserve(struct ctl_scsiio *ctsio) 5276{ 5277 int extent, thirdparty, longid; 5278 int resv_id, length; 5279 uint64_t thirdparty_id; 5280 struct ctl_lun *lun; 5281 uint32_t residx; 5282 5283 extent = 0; 5284 thirdparty = 0; 5285 longid = 0; 5286 resv_id = 0; 5287 length = 0; 5288 thirdparty_id = 0; 5289 5290 CTL_DEBUG_PRINT(("ctl_reserve\n")); 5291 5292 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5293 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5294 5295 switch (ctsio->cdb[0]) { 5296 case RESERVE_10: { 5297 struct scsi_reserve_10 *cdb; 5298 5299 cdb = (struct scsi_reserve_10 *)ctsio->cdb; 5300 5301 if (cdb->byte2 & SR10_LONGID) 5302 longid = 1; 5303 else 5304 thirdparty_id = cdb->thirdparty_id; 5305 5306 resv_id = cdb->resv_id; 5307 length = scsi_2btoul(cdb->length); 5308 break; 5309 } 5310 } 5311 5312 /* 5313 * XXX KDM right now, we only support LUN reservation. We don't 5314 * support 3rd party reservations, or extent reservations, which 5315 * might actually need the parameter list. If we've gotten this 5316 * far, we've got a LUN reservation. Anything else got kicked out 5317 * above. So, according to SPC, ignore the length. 5318 */ 5319 length = 0; 5320 5321 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5322 && (length > 0)) { 5323 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5324 ctsio->kern_data_len = length; 5325 ctsio->kern_total_len = length; 5326 ctsio->kern_data_resid = 0; 5327 ctsio->kern_rel_offset = 0; 5328 ctsio->kern_sg_entries = 0; 5329 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5330 ctsio->be_move_done = ctl_config_move_done; 5331 ctl_datamove((union ctl_io *)ctsio); 5332 5333 return (CTL_RETVAL_COMPLETE); 5334 } 5335 5336 if (length > 0) 5337 thirdparty_id = scsi_8btou64(ctsio->kern_data_ptr); 5338 5339 mtx_lock(&lun->lun_lock); 5340 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx != residx)) { 5341 ctl_set_reservation_conflict(ctsio); 5342 goto bailout; 5343 } 5344 5345 lun->flags |= CTL_LUN_RESERVED; 5346 lun->res_idx = residx; 5347 5348 ctl_set_success(ctsio); 5349 5350bailout: 5351 mtx_unlock(&lun->lun_lock); 5352 5353 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5354 free(ctsio->kern_data_ptr, M_CTL); 5355 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5356 } 5357 5358 ctl_done((union ctl_io *)ctsio); 5359 return (CTL_RETVAL_COMPLETE); 5360} 5361 5362int 5363ctl_start_stop(struct ctl_scsiio *ctsio) 5364{ 5365 struct scsi_start_stop_unit *cdb; 5366 struct ctl_lun *lun; 5367 int retval; 5368 5369 CTL_DEBUG_PRINT(("ctl_start_stop\n")); 5370 5371 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5372 retval = 0; 5373 5374 cdb = (struct scsi_start_stop_unit *)ctsio->cdb; 5375 5376 /* 5377 * XXX KDM 5378 * We don't support the immediate bit on a stop unit. In order to 5379 * do that, we would need to code up a way to know that a stop is 5380 * pending, and hold off any new commands until it completes, one 5381 * way or another. Then we could accept or reject those commands 5382 * depending on its status. We would almost need to do the reverse 5383 * of what we do below for an immediate start -- return the copy of 5384 * the ctl_io to the FETD with status to send to the host (and to 5385 * free the copy!) and then free the original I/O once the stop 5386 * actually completes. That way, the OOA queue mechanism can work 5387 * to block commands that shouldn't proceed. Another alternative 5388 * would be to put the copy in the queue in place of the original, 5389 * and return the original back to the caller. That could be 5390 * slightly safer.. 5391 */ 5392 if ((cdb->byte2 & SSS_IMMED) 5393 && ((cdb->how & SSS_START) == 0)) { 5394 ctl_set_invalid_field(ctsio, 5395 /*sks_valid*/ 1, 5396 /*command*/ 1, 5397 /*field*/ 1, 5398 /*bit_valid*/ 1, 5399 /*bit*/ 0); 5400 ctl_done((union ctl_io *)ctsio); 5401 return (CTL_RETVAL_COMPLETE); 5402 } 5403 5404 if ((lun->flags & CTL_LUN_PR_RESERVED) 5405 && ((cdb->how & SSS_START)==0)) { 5406 uint32_t residx; 5407 5408 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 5409 if (ctl_get_prkey(lun, residx) == 0 5410 || (lun->pr_res_idx!=residx && lun->res_type < 4)) { 5411 5412 ctl_set_reservation_conflict(ctsio); 5413 ctl_done((union ctl_io *)ctsio); 5414 return (CTL_RETVAL_COMPLETE); 5415 } 5416 } 5417 5418 /* 5419 * If there is no backend on this device, we can't start or stop 5420 * it. In theory we shouldn't get any start/stop commands in the 5421 * first place at this level if the LUN doesn't have a backend. 5422 * That should get stopped by the command decode code. 5423 */ 5424 if (lun->backend == NULL) { 5425 ctl_set_invalid_opcode(ctsio); 5426 ctl_done((union ctl_io *)ctsio); 5427 return (CTL_RETVAL_COMPLETE); 5428 } 5429 5430 /* 5431 * XXX KDM Copan-specific offline behavior. 5432 * Figure out a reasonable way to port this? 5433 */ 5434#ifdef NEEDTOPORT 5435 mtx_lock(&lun->lun_lock); 5436 5437 if (((cdb->byte2 & SSS_ONOFFLINE) == 0) 5438 && (lun->flags & CTL_LUN_OFFLINE)) { 5439 /* 5440 * If the LUN is offline, and the on/offline bit isn't set, 5441 * reject the start or stop. Otherwise, let it through. 5442 */ 5443 mtx_unlock(&lun->lun_lock); 5444 ctl_set_lun_not_ready(ctsio); 5445 ctl_done((union ctl_io *)ctsio); 5446 } else { 5447 mtx_unlock(&lun->lun_lock); 5448#endif /* NEEDTOPORT */ 5449 /* 5450 * This could be a start or a stop when we're online, 5451 * or a stop/offline or start/online. A start or stop when 5452 * we're offline is covered in the case above. 5453 */ 5454 /* 5455 * In the non-immediate case, we send the request to 5456 * the backend and return status to the user when 5457 * it is done. 5458 * 5459 * In the immediate case, we allocate a new ctl_io 5460 * to hold a copy of the request, and send that to 5461 * the backend. We then set good status on the 5462 * user's request and return it immediately. 5463 */ 5464 if (cdb->byte2 & SSS_IMMED) { 5465 union ctl_io *new_io; 5466 5467 new_io = ctl_alloc_io(ctsio->io_hdr.pool); 5468 ctl_copy_io((union ctl_io *)ctsio, new_io); 5469 retval = lun->backend->config_write(new_io); 5470 ctl_set_success(ctsio); 5471 ctl_done((union ctl_io *)ctsio); 5472 } else { 5473 retval = lun->backend->config_write( 5474 (union ctl_io *)ctsio); 5475 } 5476#ifdef NEEDTOPORT 5477 } 5478#endif 5479 return (retval); 5480} 5481 5482/* 5483 * We support the SYNCHRONIZE CACHE command (10 and 16 byte versions), but 5484 * we don't really do anything with the LBA and length fields if the user 5485 * passes them in. Instead we'll just flush out the cache for the entire 5486 * LUN. 5487 */ 5488int 5489ctl_sync_cache(struct ctl_scsiio *ctsio) 5490{ 5491 struct ctl_lun *lun; 5492 struct ctl_softc *softc; 5493 uint64_t starting_lba; 5494 uint32_t block_count; 5495 int retval; 5496 5497 CTL_DEBUG_PRINT(("ctl_sync_cache\n")); 5498 5499 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5500 softc = lun->ctl_softc; 5501 retval = 0; 5502 5503 switch (ctsio->cdb[0]) { 5504 case SYNCHRONIZE_CACHE: { 5505 struct scsi_sync_cache *cdb; 5506 cdb = (struct scsi_sync_cache *)ctsio->cdb; 5507 5508 starting_lba = scsi_4btoul(cdb->begin_lba); 5509 block_count = scsi_2btoul(cdb->lb_count); 5510 break; 5511 } 5512 case SYNCHRONIZE_CACHE_16: { 5513 struct scsi_sync_cache_16 *cdb; 5514 cdb = (struct scsi_sync_cache_16 *)ctsio->cdb; 5515 5516 starting_lba = scsi_8btou64(cdb->begin_lba); 5517 block_count = scsi_4btoul(cdb->lb_count); 5518 break; 5519 } 5520 default: 5521 ctl_set_invalid_opcode(ctsio); 5522 ctl_done((union ctl_io *)ctsio); 5523 goto bailout; 5524 break; /* NOTREACHED */ 5525 } 5526 5527 /* 5528 * We check the LBA and length, but don't do anything with them. 5529 * A SYNCHRONIZE CACHE will cause the entire cache for this lun to 5530 * get flushed. This check will just help satisfy anyone who wants 5531 * to see an error for an out of range LBA. 5532 */ 5533 if ((starting_lba + block_count) > (lun->be_lun->maxlba + 1)) { 5534 ctl_set_lba_out_of_range(ctsio); 5535 ctl_done((union ctl_io *)ctsio); 5536 goto bailout; 5537 } 5538 5539 /* 5540 * If this LUN has no backend, we can't flush the cache anyway. 5541 */ 5542 if (lun->backend == NULL) { 5543 ctl_set_invalid_opcode(ctsio); 5544 ctl_done((union ctl_io *)ctsio); 5545 goto bailout; 5546 } 5547 5548 /* 5549 * Check to see whether we're configured to send the SYNCHRONIZE 5550 * CACHE command directly to the back end. 5551 */ 5552 mtx_lock(&lun->lun_lock); 5553 if ((softc->flags & CTL_FLAG_REAL_SYNC) 5554 && (++(lun->sync_count) >= lun->sync_interval)) { 5555 lun->sync_count = 0; 5556 mtx_unlock(&lun->lun_lock); 5557 retval = lun->backend->config_write((union ctl_io *)ctsio); 5558 } else { 5559 mtx_unlock(&lun->lun_lock); 5560 ctl_set_success(ctsio); 5561 ctl_done((union ctl_io *)ctsio); 5562 } 5563 5564bailout: 5565 5566 return (retval); 5567} 5568 5569int 5570ctl_format(struct ctl_scsiio *ctsio) 5571{ 5572 struct scsi_format *cdb; 5573 struct ctl_lun *lun; 5574 int length, defect_list_len; 5575 5576 CTL_DEBUG_PRINT(("ctl_format\n")); 5577 5578 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5579 5580 cdb = (struct scsi_format *)ctsio->cdb; 5581 5582 length = 0; 5583 if (cdb->byte2 & SF_FMTDATA) { 5584 if (cdb->byte2 & SF_LONGLIST) 5585 length = sizeof(struct scsi_format_header_long); 5586 else 5587 length = sizeof(struct scsi_format_header_short); 5588 } 5589 5590 if (((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) 5591 && (length > 0)) { 5592 ctsio->kern_data_ptr = malloc(length, M_CTL, M_WAITOK); 5593 ctsio->kern_data_len = length; 5594 ctsio->kern_total_len = length; 5595 ctsio->kern_data_resid = 0; 5596 ctsio->kern_rel_offset = 0; 5597 ctsio->kern_sg_entries = 0; 5598 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5599 ctsio->be_move_done = ctl_config_move_done; 5600 ctl_datamove((union ctl_io *)ctsio); 5601 5602 return (CTL_RETVAL_COMPLETE); 5603 } 5604 5605 defect_list_len = 0; 5606 5607 if (cdb->byte2 & SF_FMTDATA) { 5608 if (cdb->byte2 & SF_LONGLIST) { 5609 struct scsi_format_header_long *header; 5610 5611 header = (struct scsi_format_header_long *) 5612 ctsio->kern_data_ptr; 5613 5614 defect_list_len = scsi_4btoul(header->defect_list_len); 5615 if (defect_list_len != 0) { 5616 ctl_set_invalid_field(ctsio, 5617 /*sks_valid*/ 1, 5618 /*command*/ 0, 5619 /*field*/ 2, 5620 /*bit_valid*/ 0, 5621 /*bit*/ 0); 5622 goto bailout; 5623 } 5624 } else { 5625 struct scsi_format_header_short *header; 5626 5627 header = (struct scsi_format_header_short *) 5628 ctsio->kern_data_ptr; 5629 5630 defect_list_len = scsi_2btoul(header->defect_list_len); 5631 if (defect_list_len != 0) { 5632 ctl_set_invalid_field(ctsio, 5633 /*sks_valid*/ 1, 5634 /*command*/ 0, 5635 /*field*/ 2, 5636 /*bit_valid*/ 0, 5637 /*bit*/ 0); 5638 goto bailout; 5639 } 5640 } 5641 } 5642 5643 /* 5644 * The format command will clear out the "Medium format corrupted" 5645 * status if set by the configuration code. That status is really 5646 * just a way to notify the host that we have lost the media, and 5647 * get them to issue a command that will basically make them think 5648 * they're blowing away the media. 5649 */ 5650 mtx_lock(&lun->lun_lock); 5651 lun->flags &= ~CTL_LUN_INOPERABLE; 5652 mtx_unlock(&lun->lun_lock); 5653 5654 ctl_set_success(ctsio); 5655bailout: 5656 5657 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 5658 free(ctsio->kern_data_ptr, M_CTL); 5659 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 5660 } 5661 5662 ctl_done((union ctl_io *)ctsio); 5663 return (CTL_RETVAL_COMPLETE); 5664} 5665 5666int 5667ctl_read_buffer(struct ctl_scsiio *ctsio) 5668{ 5669 struct scsi_read_buffer *cdb; 5670 struct ctl_lun *lun; 5671 int buffer_offset, len; 5672 static uint8_t descr[4]; 5673 static uint8_t echo_descr[4] = { 0 }; 5674 5675 CTL_DEBUG_PRINT(("ctl_read_buffer\n")); 5676 5677 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5678 cdb = (struct scsi_read_buffer *)ctsio->cdb; 5679 5680 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA && 5681 (cdb->byte2 & RWB_MODE) != RWB_MODE_ECHO_DESCR && 5682 (cdb->byte2 & RWB_MODE) != RWB_MODE_DESCR) { 5683 ctl_set_invalid_field(ctsio, 5684 /*sks_valid*/ 1, 5685 /*command*/ 1, 5686 /*field*/ 1, 5687 /*bit_valid*/ 1, 5688 /*bit*/ 4); 5689 ctl_done((union ctl_io *)ctsio); 5690 return (CTL_RETVAL_COMPLETE); 5691 } 5692 5693 len = scsi_3btoul(cdb->length); 5694 buffer_offset = scsi_3btoul(cdb->offset); 5695 5696 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5697 ctl_set_invalid_field(ctsio, 5698 /*sks_valid*/ 1, 5699 /*command*/ 1, 5700 /*field*/ 6, 5701 /*bit_valid*/ 0, 5702 /*bit*/ 0); 5703 ctl_done((union ctl_io *)ctsio); 5704 return (CTL_RETVAL_COMPLETE); 5705 } 5706 5707 if ((cdb->byte2 & RWB_MODE) == RWB_MODE_DESCR) { 5708 descr[0] = 0; 5709 scsi_ulto3b(CTL_WRITE_BUFFER_SIZE, &descr[1]); 5710 ctsio->kern_data_ptr = descr; 5711 len = min(len, sizeof(descr)); 5712 } else if ((cdb->byte2 & RWB_MODE) == RWB_MODE_ECHO_DESCR) { 5713 ctsio->kern_data_ptr = echo_descr; 5714 len = min(len, sizeof(echo_descr)); 5715 } else { 5716 if (lun->write_buffer == NULL) { 5717 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5718 M_CTL, M_WAITOK); 5719 } 5720 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5721 } 5722 ctsio->kern_data_len = len; 5723 ctsio->kern_total_len = len; 5724 ctsio->kern_data_resid = 0; 5725 ctsio->kern_rel_offset = 0; 5726 ctsio->kern_sg_entries = 0; 5727 ctl_set_success(ctsio); 5728 ctsio->be_move_done = ctl_config_move_done; 5729 ctl_datamove((union ctl_io *)ctsio); 5730 return (CTL_RETVAL_COMPLETE); 5731} 5732 5733int 5734ctl_write_buffer(struct ctl_scsiio *ctsio) 5735{ 5736 struct scsi_write_buffer *cdb; 5737 struct ctl_lun *lun; 5738 int buffer_offset, len; 5739 5740 CTL_DEBUG_PRINT(("ctl_write_buffer\n")); 5741 5742 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5743 cdb = (struct scsi_write_buffer *)ctsio->cdb; 5744 5745 if ((cdb->byte2 & RWB_MODE) != RWB_MODE_DATA) { 5746 ctl_set_invalid_field(ctsio, 5747 /*sks_valid*/ 1, 5748 /*command*/ 1, 5749 /*field*/ 1, 5750 /*bit_valid*/ 1, 5751 /*bit*/ 4); 5752 ctl_done((union ctl_io *)ctsio); 5753 return (CTL_RETVAL_COMPLETE); 5754 } 5755 5756 len = scsi_3btoul(cdb->length); 5757 buffer_offset = scsi_3btoul(cdb->offset); 5758 5759 if (buffer_offset + len > CTL_WRITE_BUFFER_SIZE) { 5760 ctl_set_invalid_field(ctsio, 5761 /*sks_valid*/ 1, 5762 /*command*/ 1, 5763 /*field*/ 6, 5764 /*bit_valid*/ 0, 5765 /*bit*/ 0); 5766 ctl_done((union ctl_io *)ctsio); 5767 return (CTL_RETVAL_COMPLETE); 5768 } 5769 5770 /* 5771 * If we've got a kernel request that hasn't been malloced yet, 5772 * malloc it and tell the caller the data buffer is here. 5773 */ 5774 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5775 if (lun->write_buffer == NULL) { 5776 lun->write_buffer = malloc(CTL_WRITE_BUFFER_SIZE, 5777 M_CTL, M_WAITOK); 5778 } 5779 ctsio->kern_data_ptr = lun->write_buffer + buffer_offset; 5780 ctsio->kern_data_len = len; 5781 ctsio->kern_total_len = len; 5782 ctsio->kern_data_resid = 0; 5783 ctsio->kern_rel_offset = 0; 5784 ctsio->kern_sg_entries = 0; 5785 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5786 ctsio->be_move_done = ctl_config_move_done; 5787 ctl_datamove((union ctl_io *)ctsio); 5788 5789 return (CTL_RETVAL_COMPLETE); 5790 } 5791 5792 ctl_set_success(ctsio); 5793 ctl_done((union ctl_io *)ctsio); 5794 return (CTL_RETVAL_COMPLETE); 5795} 5796 5797int 5798ctl_write_same(struct ctl_scsiio *ctsio) 5799{ 5800 struct ctl_lun *lun; 5801 struct ctl_lba_len_flags *lbalen; 5802 uint64_t lba; 5803 uint32_t num_blocks; 5804 int len, retval; 5805 uint8_t byte2; 5806 5807 retval = CTL_RETVAL_COMPLETE; 5808 5809 CTL_DEBUG_PRINT(("ctl_write_same\n")); 5810 5811 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5812 5813 switch (ctsio->cdb[0]) { 5814 case WRITE_SAME_10: { 5815 struct scsi_write_same_10 *cdb; 5816 5817 cdb = (struct scsi_write_same_10 *)ctsio->cdb; 5818 5819 lba = scsi_4btoul(cdb->addr); 5820 num_blocks = scsi_2btoul(cdb->length); 5821 byte2 = cdb->byte2; 5822 break; 5823 } 5824 case WRITE_SAME_16: { 5825 struct scsi_write_same_16 *cdb; 5826 5827 cdb = (struct scsi_write_same_16 *)ctsio->cdb; 5828 5829 lba = scsi_8btou64(cdb->addr); 5830 num_blocks = scsi_4btoul(cdb->length); 5831 byte2 = cdb->byte2; 5832 break; 5833 } 5834 default: 5835 /* 5836 * We got a command we don't support. This shouldn't 5837 * happen, commands should be filtered out above us. 5838 */ 5839 ctl_set_invalid_opcode(ctsio); 5840 ctl_done((union ctl_io *)ctsio); 5841 5842 return (CTL_RETVAL_COMPLETE); 5843 break; /* NOTREACHED */ 5844 } 5845 5846 /* NDOB and ANCHOR flags can be used only together with UNMAP */ 5847 if ((byte2 & SWS_UNMAP) == 0 && 5848 (byte2 & (SWS_NDOB | SWS_ANCHOR)) != 0) { 5849 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 5850 /*command*/ 1, /*field*/ 1, /*bit_valid*/ 1, /*bit*/ 0); 5851 ctl_done((union ctl_io *)ctsio); 5852 return (CTL_RETVAL_COMPLETE); 5853 } 5854 5855 /* 5856 * The first check is to make sure we're in bounds, the second 5857 * check is to catch wrap-around problems. If the lba + num blocks 5858 * is less than the lba, then we've wrapped around and the block 5859 * range is invalid anyway. 5860 */ 5861 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5862 || ((lba + num_blocks) < lba)) { 5863 ctl_set_lba_out_of_range(ctsio); 5864 ctl_done((union ctl_io *)ctsio); 5865 return (CTL_RETVAL_COMPLETE); 5866 } 5867 5868 /* Zero number of blocks means "to the last logical block" */ 5869 if (num_blocks == 0) { 5870 if ((lun->be_lun->maxlba + 1) - lba > UINT32_MAX) { 5871 ctl_set_invalid_field(ctsio, 5872 /*sks_valid*/ 0, 5873 /*command*/ 1, 5874 /*field*/ 0, 5875 /*bit_valid*/ 0, 5876 /*bit*/ 0); 5877 ctl_done((union ctl_io *)ctsio); 5878 return (CTL_RETVAL_COMPLETE); 5879 } 5880 num_blocks = (lun->be_lun->maxlba + 1) - lba; 5881 } 5882 5883 len = lun->be_lun->blocksize; 5884 5885 /* 5886 * If we've got a kernel request that hasn't been malloced yet, 5887 * malloc it and tell the caller the data buffer is here. 5888 */ 5889 if ((byte2 & SWS_NDOB) == 0 && 5890 (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5891 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5892 ctsio->kern_data_len = len; 5893 ctsio->kern_total_len = len; 5894 ctsio->kern_data_resid = 0; 5895 ctsio->kern_rel_offset = 0; 5896 ctsio->kern_sg_entries = 0; 5897 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5898 ctsio->be_move_done = ctl_config_move_done; 5899 ctl_datamove((union ctl_io *)ctsio); 5900 5901 return (CTL_RETVAL_COMPLETE); 5902 } 5903 5904 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5905 lbalen->lba = lba; 5906 lbalen->len = num_blocks; 5907 lbalen->flags = byte2; 5908 retval = lun->backend->config_write((union ctl_io *)ctsio); 5909 5910 return (retval); 5911} 5912 5913int 5914ctl_unmap(struct ctl_scsiio *ctsio) 5915{ 5916 struct ctl_lun *lun; 5917 struct scsi_unmap *cdb; 5918 struct ctl_ptr_len_flags *ptrlen; 5919 struct scsi_unmap_header *hdr; 5920 struct scsi_unmap_desc *buf, *end, *endnz, *range; 5921 uint64_t lba; 5922 uint32_t num_blocks; 5923 int len, retval; 5924 uint8_t byte2; 5925 5926 retval = CTL_RETVAL_COMPLETE; 5927 5928 CTL_DEBUG_PRINT(("ctl_unmap\n")); 5929 5930 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 5931 cdb = (struct scsi_unmap *)ctsio->cdb; 5932 5933 len = scsi_2btoul(cdb->length); 5934 byte2 = cdb->byte2; 5935 5936 /* 5937 * If we've got a kernel request that hasn't been malloced yet, 5938 * malloc it and tell the caller the data buffer is here. 5939 */ 5940 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 5941 ctsio->kern_data_ptr = malloc(len, M_CTL, M_WAITOK);; 5942 ctsio->kern_data_len = len; 5943 ctsio->kern_total_len = len; 5944 ctsio->kern_data_resid = 0; 5945 ctsio->kern_rel_offset = 0; 5946 ctsio->kern_sg_entries = 0; 5947 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 5948 ctsio->be_move_done = ctl_config_move_done; 5949 ctl_datamove((union ctl_io *)ctsio); 5950 5951 return (CTL_RETVAL_COMPLETE); 5952 } 5953 5954 len = ctsio->kern_total_len - ctsio->kern_data_resid; 5955 hdr = (struct scsi_unmap_header *)ctsio->kern_data_ptr; 5956 if (len < sizeof (*hdr) || 5957 len < (scsi_2btoul(hdr->length) + sizeof(hdr->length)) || 5958 len < (scsi_2btoul(hdr->desc_length) + sizeof (*hdr)) || 5959 scsi_2btoul(hdr->desc_length) % sizeof(*buf) != 0) { 5960 ctl_set_invalid_field(ctsio, 5961 /*sks_valid*/ 0, 5962 /*command*/ 0, 5963 /*field*/ 0, 5964 /*bit_valid*/ 0, 5965 /*bit*/ 0); 5966 goto done; 5967 } 5968 len = scsi_2btoul(hdr->desc_length); 5969 buf = (struct scsi_unmap_desc *)(hdr + 1); 5970 end = buf + len / sizeof(*buf); 5971 5972 endnz = buf; 5973 for (range = buf; range < end; range++) { 5974 lba = scsi_8btou64(range->lba); 5975 num_blocks = scsi_4btoul(range->length); 5976 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 5977 || ((lba + num_blocks) < lba)) { 5978 ctl_set_lba_out_of_range(ctsio); 5979 ctl_done((union ctl_io *)ctsio); 5980 return (CTL_RETVAL_COMPLETE); 5981 } 5982 if (num_blocks != 0) 5983 endnz = range + 1; 5984 } 5985 5986 /* 5987 * Block backend can not handle zero last range. 5988 * Filter it out and return if there is nothing left. 5989 */ 5990 len = (uint8_t *)endnz - (uint8_t *)buf; 5991 if (len == 0) { 5992 ctl_set_success(ctsio); 5993 goto done; 5994 } 5995 5996 mtx_lock(&lun->lun_lock); 5997 ptrlen = (struct ctl_ptr_len_flags *) 5998 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 5999 ptrlen->ptr = (void *)buf; 6000 ptrlen->len = len; 6001 ptrlen->flags = byte2; 6002 ctl_check_blocked(lun); 6003 mtx_unlock(&lun->lun_lock); 6004 6005 retval = lun->backend->config_write((union ctl_io *)ctsio); 6006 return (retval); 6007 6008done: 6009 if (ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) { 6010 free(ctsio->kern_data_ptr, M_CTL); 6011 ctsio->io_hdr.flags &= ~CTL_FLAG_ALLOCATED; 6012 } 6013 ctl_done((union ctl_io *)ctsio); 6014 return (CTL_RETVAL_COMPLETE); 6015} 6016 6017/* 6018 * Note that this function currently doesn't actually do anything inside 6019 * CTL to enforce things if the DQue bit is turned on. 6020 * 6021 * Also note that this function can't be used in the default case, because 6022 * the DQue bit isn't set in the changeable mask for the control mode page 6023 * anyway. This is just here as an example for how to implement a page 6024 * handler, and a placeholder in case we want to allow the user to turn 6025 * tagged queueing on and off. 6026 * 6027 * The D_SENSE bit handling is functional, however, and will turn 6028 * descriptor sense on and off for a given LUN. 6029 */ 6030int 6031ctl_control_page_handler(struct ctl_scsiio *ctsio, 6032 struct ctl_page_index *page_index, uint8_t *page_ptr) 6033{ 6034 struct scsi_control_page *current_cp, *saved_cp, *user_cp; 6035 struct ctl_lun *lun; 6036 int set_ua; 6037 uint32_t initidx; 6038 6039 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6040 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6041 set_ua = 0; 6042 6043 user_cp = (struct scsi_control_page *)page_ptr; 6044 current_cp = (struct scsi_control_page *) 6045 (page_index->page_data + (page_index->page_len * 6046 CTL_PAGE_CURRENT)); 6047 saved_cp = (struct scsi_control_page *) 6048 (page_index->page_data + (page_index->page_len * 6049 CTL_PAGE_SAVED)); 6050 6051 mtx_lock(&lun->lun_lock); 6052 if (((current_cp->rlec & SCP_DSENSE) == 0) 6053 && ((user_cp->rlec & SCP_DSENSE) != 0)) { 6054 /* 6055 * Descriptor sense is currently turned off and the user 6056 * wants to turn it on. 6057 */ 6058 current_cp->rlec |= SCP_DSENSE; 6059 saved_cp->rlec |= SCP_DSENSE; 6060 lun->flags |= CTL_LUN_SENSE_DESC; 6061 set_ua = 1; 6062 } else if (((current_cp->rlec & SCP_DSENSE) != 0) 6063 && ((user_cp->rlec & SCP_DSENSE) == 0)) { 6064 /* 6065 * Descriptor sense is currently turned on, and the user 6066 * wants to turn it off. 6067 */ 6068 current_cp->rlec &= ~SCP_DSENSE; 6069 saved_cp->rlec &= ~SCP_DSENSE; 6070 lun->flags &= ~CTL_LUN_SENSE_DESC; 6071 set_ua = 1; 6072 } 6073 if ((current_cp->queue_flags & SCP_QUEUE_ALG_MASK) != 6074 (user_cp->queue_flags & SCP_QUEUE_ALG_MASK)) { 6075 current_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6076 current_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6077 saved_cp->queue_flags &= ~SCP_QUEUE_ALG_MASK; 6078 saved_cp->queue_flags |= user_cp->queue_flags & SCP_QUEUE_ALG_MASK; 6079 set_ua = 1; 6080 } 6081 if ((current_cp->eca_and_aen & SCP_SWP) != 6082 (user_cp->eca_and_aen & SCP_SWP)) { 6083 current_cp->eca_and_aen &= ~SCP_SWP; 6084 current_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6085 saved_cp->eca_and_aen &= ~SCP_SWP; 6086 saved_cp->eca_and_aen |= user_cp->eca_and_aen & SCP_SWP; 6087 set_ua = 1; 6088 } 6089 if (set_ua != 0) 6090 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6091 mtx_unlock(&lun->lun_lock); 6092 6093 return (0); 6094} 6095 6096int 6097ctl_caching_sp_handler(struct ctl_scsiio *ctsio, 6098 struct ctl_page_index *page_index, uint8_t *page_ptr) 6099{ 6100 struct scsi_caching_page *current_cp, *saved_cp, *user_cp; 6101 struct ctl_lun *lun; 6102 int set_ua; 6103 uint32_t initidx; 6104 6105 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6106 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 6107 set_ua = 0; 6108 6109 user_cp = (struct scsi_caching_page *)page_ptr; 6110 current_cp = (struct scsi_caching_page *) 6111 (page_index->page_data + (page_index->page_len * 6112 CTL_PAGE_CURRENT)); 6113 saved_cp = (struct scsi_caching_page *) 6114 (page_index->page_data + (page_index->page_len * 6115 CTL_PAGE_SAVED)); 6116 6117 mtx_lock(&lun->lun_lock); 6118 if ((current_cp->flags1 & (SCP_WCE | SCP_RCD)) != 6119 (user_cp->flags1 & (SCP_WCE | SCP_RCD))) { 6120 current_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6121 current_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6122 saved_cp->flags1 &= ~(SCP_WCE | SCP_RCD); 6123 saved_cp->flags1 |= user_cp->flags1 & (SCP_WCE | SCP_RCD); 6124 set_ua = 1; 6125 } 6126 if (set_ua != 0) 6127 ctl_est_ua_all(lun, initidx, CTL_UA_MODE_CHANGE); 6128 mtx_unlock(&lun->lun_lock); 6129 6130 return (0); 6131} 6132 6133int 6134ctl_debugconf_sp_select_handler(struct ctl_scsiio *ctsio, 6135 struct ctl_page_index *page_index, 6136 uint8_t *page_ptr) 6137{ 6138 uint8_t *c; 6139 int i; 6140 6141 c = ((struct copan_debugconf_subpage *)page_ptr)->ctl_time_io_secs; 6142 ctl_time_io_secs = 6143 (c[0] << 8) | 6144 (c[1] << 0) | 6145 0; 6146 CTL_DEBUG_PRINT(("set ctl_time_io_secs to %d\n", ctl_time_io_secs)); 6147 printf("set ctl_time_io_secs to %d\n", ctl_time_io_secs); 6148 printf("page data:"); 6149 for (i=0; i<8; i++) 6150 printf(" %.2x",page_ptr[i]); 6151 printf("\n"); 6152 return (0); 6153} 6154 6155int 6156ctl_debugconf_sp_sense_handler(struct ctl_scsiio *ctsio, 6157 struct ctl_page_index *page_index, 6158 int pc) 6159{ 6160 struct copan_debugconf_subpage *page; 6161 6162 page = (struct copan_debugconf_subpage *)page_index->page_data + 6163 (page_index->page_len * pc); 6164 6165 switch (pc) { 6166 case SMS_PAGE_CTRL_CHANGEABLE >> 6: 6167 case SMS_PAGE_CTRL_DEFAULT >> 6: 6168 case SMS_PAGE_CTRL_SAVED >> 6: 6169 /* 6170 * We don't update the changable or default bits for this page. 6171 */ 6172 break; 6173 case SMS_PAGE_CTRL_CURRENT >> 6: 6174 page->ctl_time_io_secs[0] = ctl_time_io_secs >> 8; 6175 page->ctl_time_io_secs[1] = ctl_time_io_secs >> 0; 6176 break; 6177 default: 6178#ifdef NEEDTOPORT 6179 EPRINT(0, "Invalid PC %d!!", pc); 6180#endif /* NEEDTOPORT */ 6181 break; 6182 } 6183 return (0); 6184} 6185 6186 6187static int 6188ctl_do_mode_select(union ctl_io *io) 6189{ 6190 struct scsi_mode_page_header *page_header; 6191 struct ctl_page_index *page_index; 6192 struct ctl_scsiio *ctsio; 6193 int control_dev, page_len; 6194 int page_len_offset, page_len_size; 6195 union ctl_modepage_info *modepage_info; 6196 struct ctl_lun *lun; 6197 int *len_left, *len_used; 6198 int retval, i; 6199 6200 ctsio = &io->scsiio; 6201 page_index = NULL; 6202 page_len = 0; 6203 retval = CTL_RETVAL_COMPLETE; 6204 6205 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6206 6207 if (lun->be_lun->lun_type != T_DIRECT) 6208 control_dev = 1; 6209 else 6210 control_dev = 0; 6211 6212 modepage_info = (union ctl_modepage_info *) 6213 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6214 len_left = &modepage_info->header.len_left; 6215 len_used = &modepage_info->header.len_used; 6216 6217do_next_page: 6218 6219 page_header = (struct scsi_mode_page_header *) 6220 (ctsio->kern_data_ptr + *len_used); 6221 6222 if (*len_left == 0) { 6223 free(ctsio->kern_data_ptr, M_CTL); 6224 ctl_set_success(ctsio); 6225 ctl_done((union ctl_io *)ctsio); 6226 return (CTL_RETVAL_COMPLETE); 6227 } else if (*len_left < sizeof(struct scsi_mode_page_header)) { 6228 6229 free(ctsio->kern_data_ptr, M_CTL); 6230 ctl_set_param_len_error(ctsio); 6231 ctl_done((union ctl_io *)ctsio); 6232 return (CTL_RETVAL_COMPLETE); 6233 6234 } else if ((page_header->page_code & SMPH_SPF) 6235 && (*len_left < sizeof(struct scsi_mode_page_header_sp))) { 6236 6237 free(ctsio->kern_data_ptr, M_CTL); 6238 ctl_set_param_len_error(ctsio); 6239 ctl_done((union ctl_io *)ctsio); 6240 return (CTL_RETVAL_COMPLETE); 6241 } 6242 6243 6244 /* 6245 * XXX KDM should we do something with the block descriptor? 6246 */ 6247 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6248 6249 if ((control_dev != 0) 6250 && (lun->mode_pages.index[i].page_flags & 6251 CTL_PAGE_FLAG_DISK_ONLY)) 6252 continue; 6253 6254 if ((lun->mode_pages.index[i].page_code & SMPH_PC_MASK) != 6255 (page_header->page_code & SMPH_PC_MASK)) 6256 continue; 6257 6258 /* 6259 * If neither page has a subpage code, then we've got a 6260 * match. 6261 */ 6262 if (((lun->mode_pages.index[i].page_code & SMPH_SPF) == 0) 6263 && ((page_header->page_code & SMPH_SPF) == 0)) { 6264 page_index = &lun->mode_pages.index[i]; 6265 page_len = page_header->page_length; 6266 break; 6267 } 6268 6269 /* 6270 * If both pages have subpages, then the subpage numbers 6271 * have to match. 6272 */ 6273 if ((lun->mode_pages.index[i].page_code & SMPH_SPF) 6274 && (page_header->page_code & SMPH_SPF)) { 6275 struct scsi_mode_page_header_sp *sph; 6276 6277 sph = (struct scsi_mode_page_header_sp *)page_header; 6278 6279 if (lun->mode_pages.index[i].subpage == 6280 sph->subpage) { 6281 page_index = &lun->mode_pages.index[i]; 6282 page_len = scsi_2btoul(sph->page_length); 6283 break; 6284 } 6285 } 6286 } 6287 6288 /* 6289 * If we couldn't find the page, or if we don't have a mode select 6290 * handler for it, send back an error to the user. 6291 */ 6292 if ((page_index == NULL) 6293 || (page_index->select_handler == NULL)) { 6294 ctl_set_invalid_field(ctsio, 6295 /*sks_valid*/ 1, 6296 /*command*/ 0, 6297 /*field*/ *len_used, 6298 /*bit_valid*/ 0, 6299 /*bit*/ 0); 6300 free(ctsio->kern_data_ptr, M_CTL); 6301 ctl_done((union ctl_io *)ctsio); 6302 return (CTL_RETVAL_COMPLETE); 6303 } 6304 6305 if (page_index->page_code & SMPH_SPF) { 6306 page_len_offset = 2; 6307 page_len_size = 2; 6308 } else { 6309 page_len_size = 1; 6310 page_len_offset = 1; 6311 } 6312 6313 /* 6314 * If the length the initiator gives us isn't the one we specify in 6315 * the mode page header, or if they didn't specify enough data in 6316 * the CDB to avoid truncating this page, kick out the request. 6317 */ 6318 if ((page_len != (page_index->page_len - page_len_offset - 6319 page_len_size)) 6320 || (*len_left < page_index->page_len)) { 6321 6322 6323 ctl_set_invalid_field(ctsio, 6324 /*sks_valid*/ 1, 6325 /*command*/ 0, 6326 /*field*/ *len_used + page_len_offset, 6327 /*bit_valid*/ 0, 6328 /*bit*/ 0); 6329 free(ctsio->kern_data_ptr, M_CTL); 6330 ctl_done((union ctl_io *)ctsio); 6331 return (CTL_RETVAL_COMPLETE); 6332 } 6333 6334 /* 6335 * Run through the mode page, checking to make sure that the bits 6336 * the user changed are actually legal for him to change. 6337 */ 6338 for (i = 0; i < page_index->page_len; i++) { 6339 uint8_t *user_byte, *change_mask, *current_byte; 6340 int bad_bit; 6341 int j; 6342 6343 user_byte = (uint8_t *)page_header + i; 6344 change_mask = page_index->page_data + 6345 (page_index->page_len * CTL_PAGE_CHANGEABLE) + i; 6346 current_byte = page_index->page_data + 6347 (page_index->page_len * CTL_PAGE_CURRENT) + i; 6348 6349 /* 6350 * Check to see whether the user set any bits in this byte 6351 * that he is not allowed to set. 6352 */ 6353 if ((*user_byte & ~(*change_mask)) == 6354 (*current_byte & ~(*change_mask))) 6355 continue; 6356 6357 /* 6358 * Go through bit by bit to determine which one is illegal. 6359 */ 6360 bad_bit = 0; 6361 for (j = 7; j >= 0; j--) { 6362 if ((((1 << i) & ~(*change_mask)) & *user_byte) != 6363 (((1 << i) & ~(*change_mask)) & *current_byte)) { 6364 bad_bit = i; 6365 break; 6366 } 6367 } 6368 ctl_set_invalid_field(ctsio, 6369 /*sks_valid*/ 1, 6370 /*command*/ 0, 6371 /*field*/ *len_used + i, 6372 /*bit_valid*/ 1, 6373 /*bit*/ bad_bit); 6374 free(ctsio->kern_data_ptr, M_CTL); 6375 ctl_done((union ctl_io *)ctsio); 6376 return (CTL_RETVAL_COMPLETE); 6377 } 6378 6379 /* 6380 * Decrement these before we call the page handler, since we may 6381 * end up getting called back one way or another before the handler 6382 * returns to this context. 6383 */ 6384 *len_left -= page_index->page_len; 6385 *len_used += page_index->page_len; 6386 6387 retval = page_index->select_handler(ctsio, page_index, 6388 (uint8_t *)page_header); 6389 6390 /* 6391 * If the page handler returns CTL_RETVAL_QUEUED, then we need to 6392 * wait until this queued command completes to finish processing 6393 * the mode page. If it returns anything other than 6394 * CTL_RETVAL_COMPLETE (e.g. CTL_RETVAL_ERROR), then it should have 6395 * already set the sense information, freed the data pointer, and 6396 * completed the io for us. 6397 */ 6398 if (retval != CTL_RETVAL_COMPLETE) 6399 goto bailout_no_done; 6400 6401 /* 6402 * If the initiator sent us more than one page, parse the next one. 6403 */ 6404 if (*len_left > 0) 6405 goto do_next_page; 6406 6407 ctl_set_success(ctsio); 6408 free(ctsio->kern_data_ptr, M_CTL); 6409 ctl_done((union ctl_io *)ctsio); 6410 6411bailout_no_done: 6412 6413 return (CTL_RETVAL_COMPLETE); 6414 6415} 6416 6417int 6418ctl_mode_select(struct ctl_scsiio *ctsio) 6419{ 6420 int param_len, pf, sp; 6421 int header_size, bd_len; 6422 int len_left, len_used; 6423 struct ctl_page_index *page_index; 6424 struct ctl_lun *lun; 6425 int control_dev, page_len; 6426 union ctl_modepage_info *modepage_info; 6427 int retval; 6428 6429 pf = 0; 6430 sp = 0; 6431 page_len = 0; 6432 len_used = 0; 6433 len_left = 0; 6434 retval = 0; 6435 bd_len = 0; 6436 page_index = NULL; 6437 6438 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6439 6440 if (lun->be_lun->lun_type != T_DIRECT) 6441 control_dev = 1; 6442 else 6443 control_dev = 0; 6444 6445 switch (ctsio->cdb[0]) { 6446 case MODE_SELECT_6: { 6447 struct scsi_mode_select_6 *cdb; 6448 6449 cdb = (struct scsi_mode_select_6 *)ctsio->cdb; 6450 6451 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6452 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6453 6454 param_len = cdb->length; 6455 header_size = sizeof(struct scsi_mode_header_6); 6456 break; 6457 } 6458 case MODE_SELECT_10: { 6459 struct scsi_mode_select_10 *cdb; 6460 6461 cdb = (struct scsi_mode_select_10 *)ctsio->cdb; 6462 6463 pf = (cdb->byte2 & SMS_PF) ? 1 : 0; 6464 sp = (cdb->byte2 & SMS_SP) ? 1 : 0; 6465 6466 param_len = scsi_2btoul(cdb->length); 6467 header_size = sizeof(struct scsi_mode_header_10); 6468 break; 6469 } 6470 default: 6471 ctl_set_invalid_opcode(ctsio); 6472 ctl_done((union ctl_io *)ctsio); 6473 return (CTL_RETVAL_COMPLETE); 6474 break; /* NOTREACHED */ 6475 } 6476 6477 /* 6478 * From SPC-3: 6479 * "A parameter list length of zero indicates that the Data-Out Buffer 6480 * shall be empty. This condition shall not be considered as an error." 6481 */ 6482 if (param_len == 0) { 6483 ctl_set_success(ctsio); 6484 ctl_done((union ctl_io *)ctsio); 6485 return (CTL_RETVAL_COMPLETE); 6486 } 6487 6488 /* 6489 * Since we'll hit this the first time through, prior to 6490 * allocation, we don't need to free a data buffer here. 6491 */ 6492 if (param_len < header_size) { 6493 ctl_set_param_len_error(ctsio); 6494 ctl_done((union ctl_io *)ctsio); 6495 return (CTL_RETVAL_COMPLETE); 6496 } 6497 6498 /* 6499 * Allocate the data buffer and grab the user's data. In theory, 6500 * we shouldn't have to sanity check the parameter list length here 6501 * because the maximum size is 64K. We should be able to malloc 6502 * that much without too many problems. 6503 */ 6504 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 6505 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 6506 ctsio->kern_data_len = param_len; 6507 ctsio->kern_total_len = param_len; 6508 ctsio->kern_data_resid = 0; 6509 ctsio->kern_rel_offset = 0; 6510 ctsio->kern_sg_entries = 0; 6511 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6512 ctsio->be_move_done = ctl_config_move_done; 6513 ctl_datamove((union ctl_io *)ctsio); 6514 6515 return (CTL_RETVAL_COMPLETE); 6516 } 6517 6518 switch (ctsio->cdb[0]) { 6519 case MODE_SELECT_6: { 6520 struct scsi_mode_header_6 *mh6; 6521 6522 mh6 = (struct scsi_mode_header_6 *)ctsio->kern_data_ptr; 6523 bd_len = mh6->blk_desc_len; 6524 break; 6525 } 6526 case MODE_SELECT_10: { 6527 struct scsi_mode_header_10 *mh10; 6528 6529 mh10 = (struct scsi_mode_header_10 *)ctsio->kern_data_ptr; 6530 bd_len = scsi_2btoul(mh10->blk_desc_len); 6531 break; 6532 } 6533 default: 6534 panic("Invalid CDB type %#x", ctsio->cdb[0]); 6535 break; 6536 } 6537 6538 if (param_len < (header_size + bd_len)) { 6539 free(ctsio->kern_data_ptr, M_CTL); 6540 ctl_set_param_len_error(ctsio); 6541 ctl_done((union ctl_io *)ctsio); 6542 return (CTL_RETVAL_COMPLETE); 6543 } 6544 6545 /* 6546 * Set the IO_CONT flag, so that if this I/O gets passed to 6547 * ctl_config_write_done(), it'll get passed back to 6548 * ctl_do_mode_select() for further processing, or completion if 6549 * we're all done. 6550 */ 6551 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 6552 ctsio->io_cont = ctl_do_mode_select; 6553 6554 modepage_info = (union ctl_modepage_info *) 6555 ctsio->io_hdr.ctl_private[CTL_PRIV_MODEPAGE].bytes; 6556 6557 memset(modepage_info, 0, sizeof(*modepage_info)); 6558 6559 len_left = param_len - header_size - bd_len; 6560 len_used = header_size + bd_len; 6561 6562 modepage_info->header.len_left = len_left; 6563 modepage_info->header.len_used = len_used; 6564 6565 return (ctl_do_mode_select((union ctl_io *)ctsio)); 6566} 6567 6568int 6569ctl_mode_sense(struct ctl_scsiio *ctsio) 6570{ 6571 struct ctl_lun *lun; 6572 int pc, page_code, dbd, llba, subpage; 6573 int alloc_len, page_len, header_len, total_len; 6574 struct scsi_mode_block_descr *block_desc; 6575 struct ctl_page_index *page_index; 6576 int control_dev; 6577 6578 dbd = 0; 6579 llba = 0; 6580 block_desc = NULL; 6581 page_index = NULL; 6582 6583 CTL_DEBUG_PRINT(("ctl_mode_sense\n")); 6584 6585 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6586 6587 if (lun->be_lun->lun_type != T_DIRECT) 6588 control_dev = 1; 6589 else 6590 control_dev = 0; 6591 6592 switch (ctsio->cdb[0]) { 6593 case MODE_SENSE_6: { 6594 struct scsi_mode_sense_6 *cdb; 6595 6596 cdb = (struct scsi_mode_sense_6 *)ctsio->cdb; 6597 6598 header_len = sizeof(struct scsi_mode_hdr_6); 6599 if (cdb->byte2 & SMS_DBD) 6600 dbd = 1; 6601 else 6602 header_len += sizeof(struct scsi_mode_block_descr); 6603 6604 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6605 page_code = cdb->page & SMS_PAGE_CODE; 6606 subpage = cdb->subpage; 6607 alloc_len = cdb->length; 6608 break; 6609 } 6610 case MODE_SENSE_10: { 6611 struct scsi_mode_sense_10 *cdb; 6612 6613 cdb = (struct scsi_mode_sense_10 *)ctsio->cdb; 6614 6615 header_len = sizeof(struct scsi_mode_hdr_10); 6616 6617 if (cdb->byte2 & SMS_DBD) 6618 dbd = 1; 6619 else 6620 header_len += sizeof(struct scsi_mode_block_descr); 6621 if (cdb->byte2 & SMS10_LLBAA) 6622 llba = 1; 6623 pc = (cdb->page & SMS_PAGE_CTRL_MASK) >> 6; 6624 page_code = cdb->page & SMS_PAGE_CODE; 6625 subpage = cdb->subpage; 6626 alloc_len = scsi_2btoul(cdb->length); 6627 break; 6628 } 6629 default: 6630 ctl_set_invalid_opcode(ctsio); 6631 ctl_done((union ctl_io *)ctsio); 6632 return (CTL_RETVAL_COMPLETE); 6633 break; /* NOTREACHED */ 6634 } 6635 6636 /* 6637 * We have to make a first pass through to calculate the size of 6638 * the pages that match the user's query. Then we allocate enough 6639 * memory to hold it, and actually copy the data into the buffer. 6640 */ 6641 switch (page_code) { 6642 case SMS_ALL_PAGES_PAGE: { 6643 int i; 6644 6645 page_len = 0; 6646 6647 /* 6648 * At the moment, values other than 0 and 0xff here are 6649 * reserved according to SPC-3. 6650 */ 6651 if ((subpage != SMS_SUBPAGE_PAGE_0) 6652 && (subpage != SMS_SUBPAGE_ALL)) { 6653 ctl_set_invalid_field(ctsio, 6654 /*sks_valid*/ 1, 6655 /*command*/ 1, 6656 /*field*/ 3, 6657 /*bit_valid*/ 0, 6658 /*bit*/ 0); 6659 ctl_done((union ctl_io *)ctsio); 6660 return (CTL_RETVAL_COMPLETE); 6661 } 6662 6663 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6664 if ((control_dev != 0) 6665 && (lun->mode_pages.index[i].page_flags & 6666 CTL_PAGE_FLAG_DISK_ONLY)) 6667 continue; 6668 6669 /* 6670 * We don't use this subpage if the user didn't 6671 * request all subpages. 6672 */ 6673 if ((lun->mode_pages.index[i].subpage != 0) 6674 && (subpage == SMS_SUBPAGE_PAGE_0)) 6675 continue; 6676 6677#if 0 6678 printf("found page %#x len %d\n", 6679 lun->mode_pages.index[i].page_code & 6680 SMPH_PC_MASK, 6681 lun->mode_pages.index[i].page_len); 6682#endif 6683 page_len += lun->mode_pages.index[i].page_len; 6684 } 6685 break; 6686 } 6687 default: { 6688 int i; 6689 6690 page_len = 0; 6691 6692 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6693 /* Look for the right page code */ 6694 if ((lun->mode_pages.index[i].page_code & 6695 SMPH_PC_MASK) != page_code) 6696 continue; 6697 6698 /* Look for the right subpage or the subpage wildcard*/ 6699 if ((lun->mode_pages.index[i].subpage != subpage) 6700 && (subpage != SMS_SUBPAGE_ALL)) 6701 continue; 6702 6703 /* Make sure the page is supported for this dev type */ 6704 if ((control_dev != 0) 6705 && (lun->mode_pages.index[i].page_flags & 6706 CTL_PAGE_FLAG_DISK_ONLY)) 6707 continue; 6708 6709#if 0 6710 printf("found page %#x len %d\n", 6711 lun->mode_pages.index[i].page_code & 6712 SMPH_PC_MASK, 6713 lun->mode_pages.index[i].page_len); 6714#endif 6715 6716 page_len += lun->mode_pages.index[i].page_len; 6717 } 6718 6719 if (page_len == 0) { 6720 ctl_set_invalid_field(ctsio, 6721 /*sks_valid*/ 1, 6722 /*command*/ 1, 6723 /*field*/ 2, 6724 /*bit_valid*/ 1, 6725 /*bit*/ 5); 6726 ctl_done((union ctl_io *)ctsio); 6727 return (CTL_RETVAL_COMPLETE); 6728 } 6729 break; 6730 } 6731 } 6732 6733 total_len = header_len + page_len; 6734#if 0 6735 printf("header_len = %d, page_len = %d, total_len = %d\n", 6736 header_len, page_len, total_len); 6737#endif 6738 6739 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 6740 ctsio->kern_sg_entries = 0; 6741 ctsio->kern_data_resid = 0; 6742 ctsio->kern_rel_offset = 0; 6743 if (total_len < alloc_len) { 6744 ctsio->residual = alloc_len - total_len; 6745 ctsio->kern_data_len = total_len; 6746 ctsio->kern_total_len = total_len; 6747 } else { 6748 ctsio->residual = 0; 6749 ctsio->kern_data_len = alloc_len; 6750 ctsio->kern_total_len = alloc_len; 6751 } 6752 6753 switch (ctsio->cdb[0]) { 6754 case MODE_SENSE_6: { 6755 struct scsi_mode_hdr_6 *header; 6756 6757 header = (struct scsi_mode_hdr_6 *)ctsio->kern_data_ptr; 6758 6759 header->datalen = MIN(total_len - 1, 254); 6760 if (control_dev == 0) { 6761 header->dev_specific = 0x10; /* DPOFUA */ 6762 if ((lun->flags & CTL_LUN_READONLY) || 6763 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6764 .eca_and_aen & SCP_SWP) != 0) 6765 header->dev_specific |= 0x80; /* WP */ 6766 } 6767 if (dbd) 6768 header->block_descr_len = 0; 6769 else 6770 header->block_descr_len = 6771 sizeof(struct scsi_mode_block_descr); 6772 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6773 break; 6774 } 6775 case MODE_SENSE_10: { 6776 struct scsi_mode_hdr_10 *header; 6777 int datalen; 6778 6779 header = (struct scsi_mode_hdr_10 *)ctsio->kern_data_ptr; 6780 6781 datalen = MIN(total_len - 2, 65533); 6782 scsi_ulto2b(datalen, header->datalen); 6783 if (control_dev == 0) { 6784 header->dev_specific = 0x10; /* DPOFUA */ 6785 if ((lun->flags & CTL_LUN_READONLY) || 6786 (lun->mode_pages.control_page[CTL_PAGE_CURRENT] 6787 .eca_and_aen & SCP_SWP) != 0) 6788 header->dev_specific |= 0x80; /* WP */ 6789 } 6790 if (dbd) 6791 scsi_ulto2b(0, header->block_descr_len); 6792 else 6793 scsi_ulto2b(sizeof(struct scsi_mode_block_descr), 6794 header->block_descr_len); 6795 block_desc = (struct scsi_mode_block_descr *)&header[1]; 6796 break; 6797 } 6798 default: 6799 panic("invalid CDB type %#x", ctsio->cdb[0]); 6800 break; /* NOTREACHED */ 6801 } 6802 6803 /* 6804 * If we've got a disk, use its blocksize in the block 6805 * descriptor. Otherwise, just set it to 0. 6806 */ 6807 if (dbd == 0) { 6808 if (control_dev == 0) 6809 scsi_ulto3b(lun->be_lun->blocksize, 6810 block_desc->block_len); 6811 else 6812 scsi_ulto3b(0, block_desc->block_len); 6813 } 6814 6815 switch (page_code) { 6816 case SMS_ALL_PAGES_PAGE: { 6817 int i, data_used; 6818 6819 data_used = header_len; 6820 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6821 struct ctl_page_index *page_index; 6822 6823 page_index = &lun->mode_pages.index[i]; 6824 6825 if ((control_dev != 0) 6826 && (page_index->page_flags & 6827 CTL_PAGE_FLAG_DISK_ONLY)) 6828 continue; 6829 6830 /* 6831 * We don't use this subpage if the user didn't 6832 * request all subpages. We already checked (above) 6833 * to make sure the user only specified a subpage 6834 * of 0 or 0xff in the SMS_ALL_PAGES_PAGE case. 6835 */ 6836 if ((page_index->subpage != 0) 6837 && (subpage == SMS_SUBPAGE_PAGE_0)) 6838 continue; 6839 6840 /* 6841 * Call the handler, if it exists, to update the 6842 * page to the latest values. 6843 */ 6844 if (page_index->sense_handler != NULL) 6845 page_index->sense_handler(ctsio, page_index,pc); 6846 6847 memcpy(ctsio->kern_data_ptr + data_used, 6848 page_index->page_data + 6849 (page_index->page_len * pc), 6850 page_index->page_len); 6851 data_used += page_index->page_len; 6852 } 6853 break; 6854 } 6855 default: { 6856 int i, data_used; 6857 6858 data_used = header_len; 6859 6860 for (i = 0; i < CTL_NUM_MODE_PAGES; i++) { 6861 struct ctl_page_index *page_index; 6862 6863 page_index = &lun->mode_pages.index[i]; 6864 6865 /* Look for the right page code */ 6866 if ((page_index->page_code & SMPH_PC_MASK) != page_code) 6867 continue; 6868 6869 /* Look for the right subpage or the subpage wildcard*/ 6870 if ((page_index->subpage != subpage) 6871 && (subpage != SMS_SUBPAGE_ALL)) 6872 continue; 6873 6874 /* Make sure the page is supported for this dev type */ 6875 if ((control_dev != 0) 6876 && (page_index->page_flags & 6877 CTL_PAGE_FLAG_DISK_ONLY)) 6878 continue; 6879 6880 /* 6881 * Call the handler, if it exists, to update the 6882 * page to the latest values. 6883 */ 6884 if (page_index->sense_handler != NULL) 6885 page_index->sense_handler(ctsio, page_index,pc); 6886 6887 memcpy(ctsio->kern_data_ptr + data_used, 6888 page_index->page_data + 6889 (page_index->page_len * pc), 6890 page_index->page_len); 6891 data_used += page_index->page_len; 6892 } 6893 break; 6894 } 6895 } 6896 6897 ctl_set_success(ctsio); 6898 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 6899 ctsio->be_move_done = ctl_config_move_done; 6900 ctl_datamove((union ctl_io *)ctsio); 6901 return (CTL_RETVAL_COMPLETE); 6902} 6903 6904int 6905ctl_lbp_log_sense_handler(struct ctl_scsiio *ctsio, 6906 struct ctl_page_index *page_index, 6907 int pc) 6908{ 6909 struct ctl_lun *lun; 6910 struct scsi_log_param_header *phdr; 6911 uint8_t *data; 6912 uint64_t val; 6913 6914 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6915 data = page_index->page_data; 6916 6917 if (lun->backend->lun_attr != NULL && 6918 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksavail")) 6919 != UINT64_MAX) { 6920 phdr = (struct scsi_log_param_header *)data; 6921 scsi_ulto2b(0x0001, phdr->param_code); 6922 phdr->param_control = SLP_LBIN | SLP_LP; 6923 phdr->param_len = 8; 6924 data = (uint8_t *)(phdr + 1); 6925 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6926 data[4] = 0x02; /* per-pool */ 6927 data += phdr->param_len; 6928 } 6929 6930 if (lun->backend->lun_attr != NULL && 6931 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "blocksused")) 6932 != UINT64_MAX) { 6933 phdr = (struct scsi_log_param_header *)data; 6934 scsi_ulto2b(0x0002, phdr->param_code); 6935 phdr->param_control = SLP_LBIN | SLP_LP; 6936 phdr->param_len = 8; 6937 data = (uint8_t *)(phdr + 1); 6938 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6939 data[4] = 0x01; /* per-LUN */ 6940 data += phdr->param_len; 6941 } 6942 6943 if (lun->backend->lun_attr != NULL && 6944 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksavail")) 6945 != UINT64_MAX) { 6946 phdr = (struct scsi_log_param_header *)data; 6947 scsi_ulto2b(0x00f1, phdr->param_code); 6948 phdr->param_control = SLP_LBIN | SLP_LP; 6949 phdr->param_len = 8; 6950 data = (uint8_t *)(phdr + 1); 6951 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6952 data[4] = 0x02; /* per-pool */ 6953 data += phdr->param_len; 6954 } 6955 6956 if (lun->backend->lun_attr != NULL && 6957 (val = lun->backend->lun_attr(lun->be_lun->be_lun, "poolblocksused")) 6958 != UINT64_MAX) { 6959 phdr = (struct scsi_log_param_header *)data; 6960 scsi_ulto2b(0x00f2, phdr->param_code); 6961 phdr->param_control = SLP_LBIN | SLP_LP; 6962 phdr->param_len = 8; 6963 data = (uint8_t *)(phdr + 1); 6964 scsi_ulto4b(val >> CTL_LBP_EXPONENT, data); 6965 data[4] = 0x02; /* per-pool */ 6966 data += phdr->param_len; 6967 } 6968 6969 page_index->page_len = data - page_index->page_data; 6970 return (0); 6971} 6972 6973int 6974ctl_log_sense(struct ctl_scsiio *ctsio) 6975{ 6976 struct ctl_lun *lun; 6977 int i, pc, page_code, subpage; 6978 int alloc_len, total_len; 6979 struct ctl_page_index *page_index; 6980 struct scsi_log_sense *cdb; 6981 struct scsi_log_header *header; 6982 6983 CTL_DEBUG_PRINT(("ctl_log_sense\n")); 6984 6985 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 6986 cdb = (struct scsi_log_sense *)ctsio->cdb; 6987 pc = (cdb->page & SLS_PAGE_CTRL_MASK) >> 6; 6988 page_code = cdb->page & SLS_PAGE_CODE; 6989 subpage = cdb->subpage; 6990 alloc_len = scsi_2btoul(cdb->length); 6991 6992 page_index = NULL; 6993 for (i = 0; i < CTL_NUM_LOG_PAGES; i++) { 6994 page_index = &lun->log_pages.index[i]; 6995 6996 /* Look for the right page code */ 6997 if ((page_index->page_code & SL_PAGE_CODE) != page_code) 6998 continue; 6999 7000 /* Look for the right subpage or the subpage wildcard*/ 7001 if (page_index->subpage != subpage) 7002 continue; 7003 7004 break; 7005 } 7006 if (i >= CTL_NUM_LOG_PAGES) { 7007 ctl_set_invalid_field(ctsio, 7008 /*sks_valid*/ 1, 7009 /*command*/ 1, 7010 /*field*/ 2, 7011 /*bit_valid*/ 0, 7012 /*bit*/ 0); 7013 ctl_done((union ctl_io *)ctsio); 7014 return (CTL_RETVAL_COMPLETE); 7015 } 7016 7017 total_len = sizeof(struct scsi_log_header) + page_index->page_len; 7018 7019 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7020 ctsio->kern_sg_entries = 0; 7021 ctsio->kern_data_resid = 0; 7022 ctsio->kern_rel_offset = 0; 7023 if (total_len < alloc_len) { 7024 ctsio->residual = alloc_len - total_len; 7025 ctsio->kern_data_len = total_len; 7026 ctsio->kern_total_len = total_len; 7027 } else { 7028 ctsio->residual = 0; 7029 ctsio->kern_data_len = alloc_len; 7030 ctsio->kern_total_len = alloc_len; 7031 } 7032 7033 header = (struct scsi_log_header *)ctsio->kern_data_ptr; 7034 header->page = page_index->page_code; 7035 if (page_index->subpage) { 7036 header->page |= SL_SPF; 7037 header->subpage = page_index->subpage; 7038 } 7039 scsi_ulto2b(page_index->page_len, header->datalen); 7040 7041 /* 7042 * Call the handler, if it exists, to update the 7043 * page to the latest values. 7044 */ 7045 if (page_index->sense_handler != NULL) 7046 page_index->sense_handler(ctsio, page_index, pc); 7047 7048 memcpy(header + 1, page_index->page_data, page_index->page_len); 7049 7050 ctl_set_success(ctsio); 7051 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7052 ctsio->be_move_done = ctl_config_move_done; 7053 ctl_datamove((union ctl_io *)ctsio); 7054 return (CTL_RETVAL_COMPLETE); 7055} 7056 7057int 7058ctl_read_capacity(struct ctl_scsiio *ctsio) 7059{ 7060 struct scsi_read_capacity *cdb; 7061 struct scsi_read_capacity_data *data; 7062 struct ctl_lun *lun; 7063 uint32_t lba; 7064 7065 CTL_DEBUG_PRINT(("ctl_read_capacity\n")); 7066 7067 cdb = (struct scsi_read_capacity *)ctsio->cdb; 7068 7069 lba = scsi_4btoul(cdb->addr); 7070 if (((cdb->pmi & SRC_PMI) == 0) 7071 && (lba != 0)) { 7072 ctl_set_invalid_field(/*ctsio*/ ctsio, 7073 /*sks_valid*/ 1, 7074 /*command*/ 1, 7075 /*field*/ 2, 7076 /*bit_valid*/ 0, 7077 /*bit*/ 0); 7078 ctl_done((union ctl_io *)ctsio); 7079 return (CTL_RETVAL_COMPLETE); 7080 } 7081 7082 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7083 7084 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7085 data = (struct scsi_read_capacity_data *)ctsio->kern_data_ptr; 7086 ctsio->residual = 0; 7087 ctsio->kern_data_len = sizeof(*data); 7088 ctsio->kern_total_len = sizeof(*data); 7089 ctsio->kern_data_resid = 0; 7090 ctsio->kern_rel_offset = 0; 7091 ctsio->kern_sg_entries = 0; 7092 7093 /* 7094 * If the maximum LBA is greater than 0xfffffffe, the user must 7095 * issue a SERVICE ACTION IN (16) command, with the read capacity 7096 * serivce action set. 7097 */ 7098 if (lun->be_lun->maxlba > 0xfffffffe) 7099 scsi_ulto4b(0xffffffff, data->addr); 7100 else 7101 scsi_ulto4b(lun->be_lun->maxlba, data->addr); 7102 7103 /* 7104 * XXX KDM this may not be 512 bytes... 7105 */ 7106 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7107 7108 ctl_set_success(ctsio); 7109 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7110 ctsio->be_move_done = ctl_config_move_done; 7111 ctl_datamove((union ctl_io *)ctsio); 7112 return (CTL_RETVAL_COMPLETE); 7113} 7114 7115int 7116ctl_read_capacity_16(struct ctl_scsiio *ctsio) 7117{ 7118 struct scsi_read_capacity_16 *cdb; 7119 struct scsi_read_capacity_data_long *data; 7120 struct ctl_lun *lun; 7121 uint64_t lba; 7122 uint32_t alloc_len; 7123 7124 CTL_DEBUG_PRINT(("ctl_read_capacity_16\n")); 7125 7126 cdb = (struct scsi_read_capacity_16 *)ctsio->cdb; 7127 7128 alloc_len = scsi_4btoul(cdb->alloc_len); 7129 lba = scsi_8btou64(cdb->addr); 7130 7131 if ((cdb->reladr & SRC16_PMI) 7132 && (lba != 0)) { 7133 ctl_set_invalid_field(/*ctsio*/ ctsio, 7134 /*sks_valid*/ 1, 7135 /*command*/ 1, 7136 /*field*/ 2, 7137 /*bit_valid*/ 0, 7138 /*bit*/ 0); 7139 ctl_done((union ctl_io *)ctsio); 7140 return (CTL_RETVAL_COMPLETE); 7141 } 7142 7143 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7144 7145 ctsio->kern_data_ptr = malloc(sizeof(*data), M_CTL, M_WAITOK | M_ZERO); 7146 data = (struct scsi_read_capacity_data_long *)ctsio->kern_data_ptr; 7147 7148 if (sizeof(*data) < alloc_len) { 7149 ctsio->residual = alloc_len - sizeof(*data); 7150 ctsio->kern_data_len = sizeof(*data); 7151 ctsio->kern_total_len = sizeof(*data); 7152 } else { 7153 ctsio->residual = 0; 7154 ctsio->kern_data_len = alloc_len; 7155 ctsio->kern_total_len = alloc_len; 7156 } 7157 ctsio->kern_data_resid = 0; 7158 ctsio->kern_rel_offset = 0; 7159 ctsio->kern_sg_entries = 0; 7160 7161 scsi_u64to8b(lun->be_lun->maxlba, data->addr); 7162 /* XXX KDM this may not be 512 bytes... */ 7163 scsi_ulto4b(lun->be_lun->blocksize, data->length); 7164 data->prot_lbppbe = lun->be_lun->pblockexp & SRC16_LBPPBE; 7165 scsi_ulto2b(lun->be_lun->pblockoff & SRC16_LALBA_A, data->lalba_lbp); 7166 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) 7167 data->lalba_lbp[0] |= SRC16_LBPME | SRC16_LBPRZ; 7168 7169 ctl_set_success(ctsio); 7170 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7171 ctsio->be_move_done = ctl_config_move_done; 7172 ctl_datamove((union ctl_io *)ctsio); 7173 return (CTL_RETVAL_COMPLETE); 7174} 7175 7176int 7177ctl_get_lba_status(struct ctl_scsiio *ctsio) 7178{ 7179 struct scsi_get_lba_status *cdb; 7180 struct scsi_get_lba_status_data *data; 7181 struct ctl_lun *lun; 7182 struct ctl_lba_len_flags *lbalen; 7183 uint64_t lba; 7184 uint32_t alloc_len, total_len; 7185 int retval; 7186 7187 CTL_DEBUG_PRINT(("ctl_get_lba_status\n")); 7188 7189 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7190 cdb = (struct scsi_get_lba_status *)ctsio->cdb; 7191 lba = scsi_8btou64(cdb->addr); 7192 alloc_len = scsi_4btoul(cdb->alloc_len); 7193 7194 if (lba > lun->be_lun->maxlba) { 7195 ctl_set_lba_out_of_range(ctsio); 7196 ctl_done((union ctl_io *)ctsio); 7197 return (CTL_RETVAL_COMPLETE); 7198 } 7199 7200 total_len = sizeof(*data) + sizeof(data->descr[0]); 7201 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7202 data = (struct scsi_get_lba_status_data *)ctsio->kern_data_ptr; 7203 7204 if (total_len < alloc_len) { 7205 ctsio->residual = alloc_len - total_len; 7206 ctsio->kern_data_len = total_len; 7207 ctsio->kern_total_len = total_len; 7208 } else { 7209 ctsio->residual = 0; 7210 ctsio->kern_data_len = alloc_len; 7211 ctsio->kern_total_len = alloc_len; 7212 } 7213 ctsio->kern_data_resid = 0; 7214 ctsio->kern_rel_offset = 0; 7215 ctsio->kern_sg_entries = 0; 7216 7217 /* Fill dummy data in case backend can't tell anything. */ 7218 scsi_ulto4b(4 + sizeof(data->descr[0]), data->length); 7219 scsi_u64to8b(lba, data->descr[0].addr); 7220 scsi_ulto4b(MIN(UINT32_MAX, lun->be_lun->maxlba + 1 - lba), 7221 data->descr[0].length); 7222 data->descr[0].status = 0; /* Mapped or unknown. */ 7223 7224 ctl_set_success(ctsio); 7225 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7226 ctsio->be_move_done = ctl_config_move_done; 7227 7228 lbalen = (struct ctl_lba_len_flags *)&ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 7229 lbalen->lba = lba; 7230 lbalen->len = total_len; 7231 lbalen->flags = 0; 7232 retval = lun->backend->config_read((union ctl_io *)ctsio); 7233 return (CTL_RETVAL_COMPLETE); 7234} 7235 7236int 7237ctl_read_defect(struct ctl_scsiio *ctsio) 7238{ 7239 struct scsi_read_defect_data_10 *ccb10; 7240 struct scsi_read_defect_data_12 *ccb12; 7241 struct scsi_read_defect_data_hdr_10 *data10; 7242 struct scsi_read_defect_data_hdr_12 *data12; 7243 uint32_t alloc_len, data_len; 7244 uint8_t format; 7245 7246 CTL_DEBUG_PRINT(("ctl_read_defect\n")); 7247 7248 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7249 ccb10 = (struct scsi_read_defect_data_10 *)&ctsio->cdb; 7250 format = ccb10->format; 7251 alloc_len = scsi_2btoul(ccb10->alloc_length); 7252 data_len = sizeof(*data10); 7253 } else { 7254 ccb12 = (struct scsi_read_defect_data_12 *)&ctsio->cdb; 7255 format = ccb12->format; 7256 alloc_len = scsi_4btoul(ccb12->alloc_length); 7257 data_len = sizeof(*data12); 7258 } 7259 if (alloc_len == 0) { 7260 ctl_set_success(ctsio); 7261 ctl_done((union ctl_io *)ctsio); 7262 return (CTL_RETVAL_COMPLETE); 7263 } 7264 7265 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 7266 if (data_len < alloc_len) { 7267 ctsio->residual = alloc_len - data_len; 7268 ctsio->kern_data_len = data_len; 7269 ctsio->kern_total_len = data_len; 7270 } else { 7271 ctsio->residual = 0; 7272 ctsio->kern_data_len = alloc_len; 7273 ctsio->kern_total_len = alloc_len; 7274 } 7275 ctsio->kern_data_resid = 0; 7276 ctsio->kern_rel_offset = 0; 7277 ctsio->kern_sg_entries = 0; 7278 7279 if (ctsio->cdb[0] == READ_DEFECT_DATA_10) { 7280 data10 = (struct scsi_read_defect_data_hdr_10 *) 7281 ctsio->kern_data_ptr; 7282 data10->format = format; 7283 scsi_ulto2b(0, data10->length); 7284 } else { 7285 data12 = (struct scsi_read_defect_data_hdr_12 *) 7286 ctsio->kern_data_ptr; 7287 data12->format = format; 7288 scsi_ulto2b(0, data12->generation); 7289 scsi_ulto4b(0, data12->length); 7290 } 7291 7292 ctl_set_success(ctsio); 7293 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7294 ctsio->be_move_done = ctl_config_move_done; 7295 ctl_datamove((union ctl_io *)ctsio); 7296 return (CTL_RETVAL_COMPLETE); 7297} 7298 7299int 7300ctl_report_tagret_port_groups(struct ctl_scsiio *ctsio) 7301{ 7302 struct scsi_maintenance_in *cdb; 7303 int retval; 7304 int alloc_len, ext, total_len = 0, g, p, pc, pg, gs, os; 7305 int num_target_port_groups, num_target_ports; 7306 struct ctl_lun *lun; 7307 struct ctl_softc *softc; 7308 struct ctl_port *port; 7309 struct scsi_target_group_data *rtg_ptr; 7310 struct scsi_target_group_data_extended *rtg_ext_ptr; 7311 struct scsi_target_port_group_descriptor *tpg_desc; 7312 7313 CTL_DEBUG_PRINT(("ctl_report_tagret_port_groups\n")); 7314 7315 cdb = (struct scsi_maintenance_in *)ctsio->cdb; 7316 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7317 softc = lun->ctl_softc; 7318 7319 retval = CTL_RETVAL_COMPLETE; 7320 7321 switch (cdb->byte2 & STG_PDF_MASK) { 7322 case STG_PDF_LENGTH: 7323 ext = 0; 7324 break; 7325 case STG_PDF_EXTENDED: 7326 ext = 1; 7327 break; 7328 default: 7329 ctl_set_invalid_field(/*ctsio*/ ctsio, 7330 /*sks_valid*/ 1, 7331 /*command*/ 1, 7332 /*field*/ 2, 7333 /*bit_valid*/ 1, 7334 /*bit*/ 5); 7335 ctl_done((union ctl_io *)ctsio); 7336 return(retval); 7337 } 7338 7339 if (softc->is_single) 7340 num_target_port_groups = 1; 7341 else 7342 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 7343 num_target_ports = 0; 7344 mtx_lock(&softc->ctl_lock); 7345 STAILQ_FOREACH(port, &softc->port_list, links) { 7346 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7347 continue; 7348 if (ctl_map_lun_back(softc, port->targ_port, lun->lun) >= 7349 CTL_MAX_LUNS) 7350 continue; 7351 num_target_ports++; 7352 } 7353 mtx_unlock(&softc->ctl_lock); 7354 7355 if (ext) 7356 total_len = sizeof(struct scsi_target_group_data_extended); 7357 else 7358 total_len = sizeof(struct scsi_target_group_data); 7359 total_len += sizeof(struct scsi_target_port_group_descriptor) * 7360 num_target_port_groups + 7361 sizeof(struct scsi_target_port_descriptor) * 7362 num_target_ports * num_target_port_groups; 7363 7364 alloc_len = scsi_4btoul(cdb->length); 7365 7366 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7367 7368 ctsio->kern_sg_entries = 0; 7369 7370 if (total_len < alloc_len) { 7371 ctsio->residual = alloc_len - total_len; 7372 ctsio->kern_data_len = total_len; 7373 ctsio->kern_total_len = total_len; 7374 } else { 7375 ctsio->residual = 0; 7376 ctsio->kern_data_len = alloc_len; 7377 ctsio->kern_total_len = alloc_len; 7378 } 7379 ctsio->kern_data_resid = 0; 7380 ctsio->kern_rel_offset = 0; 7381 7382 if (ext) { 7383 rtg_ext_ptr = (struct scsi_target_group_data_extended *) 7384 ctsio->kern_data_ptr; 7385 scsi_ulto4b(total_len - 4, rtg_ext_ptr->length); 7386 rtg_ext_ptr->format_type = 0x10; 7387 rtg_ext_ptr->implicit_transition_time = 0; 7388 tpg_desc = &rtg_ext_ptr->groups[0]; 7389 } else { 7390 rtg_ptr = (struct scsi_target_group_data *) 7391 ctsio->kern_data_ptr; 7392 scsi_ulto4b(total_len - 4, rtg_ptr->length); 7393 tpg_desc = &rtg_ptr->groups[0]; 7394 } 7395 7396 mtx_lock(&softc->ctl_lock); 7397 pg = softc->port_offset / CTL_MAX_PORTS; 7398 if (softc->flags & CTL_FLAG_ACTIVE_SHELF) { 7399 if (softc->ha_mode == CTL_HA_MODE_ACT_STBY) { 7400 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7401 os = TPG_ASYMMETRIC_ACCESS_STANDBY; 7402 } else if (lun->flags & CTL_LUN_PRIMARY_SC) { 7403 gs = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7404 os = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7405 } else { 7406 gs = TPG_ASYMMETRIC_ACCESS_NONOPTIMIZED; 7407 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7408 } 7409 } else { 7410 gs = TPG_ASYMMETRIC_ACCESS_STANDBY; 7411 os = TPG_ASYMMETRIC_ACCESS_OPTIMIZED; 7412 } 7413 for (g = 0; g < num_target_port_groups; g++) { 7414 tpg_desc->pref_state = (g == pg) ? gs : os; 7415 tpg_desc->support = TPG_AO_SUP | TPG_AN_SUP | TPG_S_SUP; 7416 scsi_ulto2b(g + 1, tpg_desc->target_port_group); 7417 tpg_desc->status = TPG_IMPLICIT; 7418 pc = 0; 7419 STAILQ_FOREACH(port, &softc->port_list, links) { 7420 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 7421 continue; 7422 if (ctl_map_lun_back(softc, port->targ_port, lun->lun) 7423 >= CTL_MAX_LUNS) 7424 continue; 7425 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 7426 scsi_ulto2b(p, tpg_desc->descriptors[pc]. 7427 relative_target_port_identifier); 7428 pc++; 7429 } 7430 tpg_desc->target_port_count = pc; 7431 tpg_desc = (struct scsi_target_port_group_descriptor *) 7432 &tpg_desc->descriptors[pc]; 7433 } 7434 mtx_unlock(&softc->ctl_lock); 7435 7436 ctl_set_success(ctsio); 7437 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7438 ctsio->be_move_done = ctl_config_move_done; 7439 ctl_datamove((union ctl_io *)ctsio); 7440 return(retval); 7441} 7442 7443int 7444ctl_report_supported_opcodes(struct ctl_scsiio *ctsio) 7445{ 7446 struct ctl_lun *lun; 7447 struct scsi_report_supported_opcodes *cdb; 7448 const struct ctl_cmd_entry *entry, *sentry; 7449 struct scsi_report_supported_opcodes_all *all; 7450 struct scsi_report_supported_opcodes_descr *descr; 7451 struct scsi_report_supported_opcodes_one *one; 7452 int retval; 7453 int alloc_len, total_len; 7454 int opcode, service_action, i, j, num; 7455 7456 CTL_DEBUG_PRINT(("ctl_report_supported_opcodes\n")); 7457 7458 cdb = (struct scsi_report_supported_opcodes *)ctsio->cdb; 7459 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7460 7461 retval = CTL_RETVAL_COMPLETE; 7462 7463 opcode = cdb->requested_opcode; 7464 service_action = scsi_2btoul(cdb->requested_service_action); 7465 switch (cdb->options & RSO_OPTIONS_MASK) { 7466 case RSO_OPTIONS_ALL: 7467 num = 0; 7468 for (i = 0; i < 256; i++) { 7469 entry = &ctl_cmd_table[i]; 7470 if (entry->flags & CTL_CMD_FLAG_SA5) { 7471 for (j = 0; j < 32; j++) { 7472 sentry = &((const struct ctl_cmd_entry *) 7473 entry->execute)[j]; 7474 if (ctl_cmd_applicable( 7475 lun->be_lun->lun_type, sentry)) 7476 num++; 7477 } 7478 } else { 7479 if (ctl_cmd_applicable(lun->be_lun->lun_type, 7480 entry)) 7481 num++; 7482 } 7483 } 7484 total_len = sizeof(struct scsi_report_supported_opcodes_all) + 7485 num * sizeof(struct scsi_report_supported_opcodes_descr); 7486 break; 7487 case RSO_OPTIONS_OC: 7488 if (ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) { 7489 ctl_set_invalid_field(/*ctsio*/ ctsio, 7490 /*sks_valid*/ 1, 7491 /*command*/ 1, 7492 /*field*/ 2, 7493 /*bit_valid*/ 1, 7494 /*bit*/ 2); 7495 ctl_done((union ctl_io *)ctsio); 7496 return (CTL_RETVAL_COMPLETE); 7497 } 7498 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7499 break; 7500 case RSO_OPTIONS_OC_SA: 7501 if ((ctl_cmd_table[opcode].flags & CTL_CMD_FLAG_SA5) == 0 || 7502 service_action >= 32) { 7503 ctl_set_invalid_field(/*ctsio*/ ctsio, 7504 /*sks_valid*/ 1, 7505 /*command*/ 1, 7506 /*field*/ 2, 7507 /*bit_valid*/ 1, 7508 /*bit*/ 2); 7509 ctl_done((union ctl_io *)ctsio); 7510 return (CTL_RETVAL_COMPLETE); 7511 } 7512 total_len = sizeof(struct scsi_report_supported_opcodes_one) + 32; 7513 break; 7514 default: 7515 ctl_set_invalid_field(/*ctsio*/ ctsio, 7516 /*sks_valid*/ 1, 7517 /*command*/ 1, 7518 /*field*/ 2, 7519 /*bit_valid*/ 1, 7520 /*bit*/ 2); 7521 ctl_done((union ctl_io *)ctsio); 7522 return (CTL_RETVAL_COMPLETE); 7523 } 7524 7525 alloc_len = scsi_4btoul(cdb->length); 7526 7527 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7528 7529 ctsio->kern_sg_entries = 0; 7530 7531 if (total_len < alloc_len) { 7532 ctsio->residual = alloc_len - total_len; 7533 ctsio->kern_data_len = total_len; 7534 ctsio->kern_total_len = total_len; 7535 } else { 7536 ctsio->residual = 0; 7537 ctsio->kern_data_len = alloc_len; 7538 ctsio->kern_total_len = alloc_len; 7539 } 7540 ctsio->kern_data_resid = 0; 7541 ctsio->kern_rel_offset = 0; 7542 7543 switch (cdb->options & RSO_OPTIONS_MASK) { 7544 case RSO_OPTIONS_ALL: 7545 all = (struct scsi_report_supported_opcodes_all *) 7546 ctsio->kern_data_ptr; 7547 num = 0; 7548 for (i = 0; i < 256; i++) { 7549 entry = &ctl_cmd_table[i]; 7550 if (entry->flags & CTL_CMD_FLAG_SA5) { 7551 for (j = 0; j < 32; j++) { 7552 sentry = &((const struct ctl_cmd_entry *) 7553 entry->execute)[j]; 7554 if (!ctl_cmd_applicable( 7555 lun->be_lun->lun_type, sentry)) 7556 continue; 7557 descr = &all->descr[num++]; 7558 descr->opcode = i; 7559 scsi_ulto2b(j, descr->service_action); 7560 descr->flags = RSO_SERVACTV; 7561 scsi_ulto2b(sentry->length, 7562 descr->cdb_length); 7563 } 7564 } else { 7565 if (!ctl_cmd_applicable(lun->be_lun->lun_type, 7566 entry)) 7567 continue; 7568 descr = &all->descr[num++]; 7569 descr->opcode = i; 7570 scsi_ulto2b(0, descr->service_action); 7571 descr->flags = 0; 7572 scsi_ulto2b(entry->length, descr->cdb_length); 7573 } 7574 } 7575 scsi_ulto4b( 7576 num * sizeof(struct scsi_report_supported_opcodes_descr), 7577 all->length); 7578 break; 7579 case RSO_OPTIONS_OC: 7580 one = (struct scsi_report_supported_opcodes_one *) 7581 ctsio->kern_data_ptr; 7582 entry = &ctl_cmd_table[opcode]; 7583 goto fill_one; 7584 case RSO_OPTIONS_OC_SA: 7585 one = (struct scsi_report_supported_opcodes_one *) 7586 ctsio->kern_data_ptr; 7587 entry = &ctl_cmd_table[opcode]; 7588 entry = &((const struct ctl_cmd_entry *) 7589 entry->execute)[service_action]; 7590fill_one: 7591 if (ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 7592 one->support = 3; 7593 scsi_ulto2b(entry->length, one->cdb_length); 7594 one->cdb_usage[0] = opcode; 7595 memcpy(&one->cdb_usage[1], entry->usage, 7596 entry->length - 1); 7597 } else 7598 one->support = 1; 7599 break; 7600 } 7601 7602 ctl_set_success(ctsio); 7603 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7604 ctsio->be_move_done = ctl_config_move_done; 7605 ctl_datamove((union ctl_io *)ctsio); 7606 return(retval); 7607} 7608 7609int 7610ctl_report_supported_tmf(struct ctl_scsiio *ctsio) 7611{ 7612 struct scsi_report_supported_tmf *cdb; 7613 struct scsi_report_supported_tmf_data *data; 7614 int retval; 7615 int alloc_len, total_len; 7616 7617 CTL_DEBUG_PRINT(("ctl_report_supported_tmf\n")); 7618 7619 cdb = (struct scsi_report_supported_tmf *)ctsio->cdb; 7620 7621 retval = CTL_RETVAL_COMPLETE; 7622 7623 total_len = sizeof(struct scsi_report_supported_tmf_data); 7624 alloc_len = scsi_4btoul(cdb->length); 7625 7626 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7627 7628 ctsio->kern_sg_entries = 0; 7629 7630 if (total_len < alloc_len) { 7631 ctsio->residual = alloc_len - total_len; 7632 ctsio->kern_data_len = total_len; 7633 ctsio->kern_total_len = total_len; 7634 } else { 7635 ctsio->residual = 0; 7636 ctsio->kern_data_len = alloc_len; 7637 ctsio->kern_total_len = alloc_len; 7638 } 7639 ctsio->kern_data_resid = 0; 7640 ctsio->kern_rel_offset = 0; 7641 7642 data = (struct scsi_report_supported_tmf_data *)ctsio->kern_data_ptr; 7643 data->byte1 |= RST_ATS | RST_ATSS | RST_CTSS | RST_LURS | RST_TRS; 7644 data->byte2 |= RST_ITNRS; 7645 7646 ctl_set_success(ctsio); 7647 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7648 ctsio->be_move_done = ctl_config_move_done; 7649 ctl_datamove((union ctl_io *)ctsio); 7650 return (retval); 7651} 7652 7653int 7654ctl_report_timestamp(struct ctl_scsiio *ctsio) 7655{ 7656 struct scsi_report_timestamp *cdb; 7657 struct scsi_report_timestamp_data *data; 7658 struct timeval tv; 7659 int64_t timestamp; 7660 int retval; 7661 int alloc_len, total_len; 7662 7663 CTL_DEBUG_PRINT(("ctl_report_timestamp\n")); 7664 7665 cdb = (struct scsi_report_timestamp *)ctsio->cdb; 7666 7667 retval = CTL_RETVAL_COMPLETE; 7668 7669 total_len = sizeof(struct scsi_report_timestamp_data); 7670 alloc_len = scsi_4btoul(cdb->length); 7671 7672 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7673 7674 ctsio->kern_sg_entries = 0; 7675 7676 if (total_len < alloc_len) { 7677 ctsio->residual = alloc_len - total_len; 7678 ctsio->kern_data_len = total_len; 7679 ctsio->kern_total_len = total_len; 7680 } else { 7681 ctsio->residual = 0; 7682 ctsio->kern_data_len = alloc_len; 7683 ctsio->kern_total_len = alloc_len; 7684 } 7685 ctsio->kern_data_resid = 0; 7686 ctsio->kern_rel_offset = 0; 7687 7688 data = (struct scsi_report_timestamp_data *)ctsio->kern_data_ptr; 7689 scsi_ulto2b(sizeof(*data) - 2, data->length); 7690 data->origin = RTS_ORIG_OUTSIDE; 7691 getmicrotime(&tv); 7692 timestamp = (int64_t)tv.tv_sec * 1000 + tv.tv_usec / 1000; 7693 scsi_ulto4b(timestamp >> 16, data->timestamp); 7694 scsi_ulto2b(timestamp & 0xffff, &data->timestamp[4]); 7695 7696 ctl_set_success(ctsio); 7697 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7698 ctsio->be_move_done = ctl_config_move_done; 7699 ctl_datamove((union ctl_io *)ctsio); 7700 return (retval); 7701} 7702 7703int 7704ctl_persistent_reserve_in(struct ctl_scsiio *ctsio) 7705{ 7706 struct scsi_per_res_in *cdb; 7707 int alloc_len, total_len = 0; 7708 /* struct scsi_per_res_in_rsrv in_data; */ 7709 struct ctl_lun *lun; 7710 struct ctl_softc *softc; 7711 uint64_t key; 7712 7713 CTL_DEBUG_PRINT(("ctl_persistent_reserve_in\n")); 7714 7715 cdb = (struct scsi_per_res_in *)ctsio->cdb; 7716 7717 alloc_len = scsi_2btoul(cdb->length); 7718 7719 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 7720 softc = lun->ctl_softc; 7721 7722retry: 7723 mtx_lock(&lun->lun_lock); 7724 switch (cdb->action) { 7725 case SPRI_RK: /* read keys */ 7726 total_len = sizeof(struct scsi_per_res_in_keys) + 7727 lun->pr_key_count * 7728 sizeof(struct scsi_per_res_key); 7729 break; 7730 case SPRI_RR: /* read reservation */ 7731 if (lun->flags & CTL_LUN_PR_RESERVED) 7732 total_len = sizeof(struct scsi_per_res_in_rsrv); 7733 else 7734 total_len = sizeof(struct scsi_per_res_in_header); 7735 break; 7736 case SPRI_RC: /* report capabilities */ 7737 total_len = sizeof(struct scsi_per_res_cap); 7738 break; 7739 case SPRI_RS: /* read full status */ 7740 total_len = sizeof(struct scsi_per_res_in_header) + 7741 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7742 lun->pr_key_count; 7743 break; 7744 default: 7745 panic("Invalid PR type %x", cdb->action); 7746 } 7747 mtx_unlock(&lun->lun_lock); 7748 7749 ctsio->kern_data_ptr = malloc(total_len, M_CTL, M_WAITOK | M_ZERO); 7750 7751 if (total_len < alloc_len) { 7752 ctsio->residual = alloc_len - total_len; 7753 ctsio->kern_data_len = total_len; 7754 ctsio->kern_total_len = total_len; 7755 } else { 7756 ctsio->residual = 0; 7757 ctsio->kern_data_len = alloc_len; 7758 ctsio->kern_total_len = alloc_len; 7759 } 7760 7761 ctsio->kern_data_resid = 0; 7762 ctsio->kern_rel_offset = 0; 7763 ctsio->kern_sg_entries = 0; 7764 7765 mtx_lock(&lun->lun_lock); 7766 switch (cdb->action) { 7767 case SPRI_RK: { // read keys 7768 struct scsi_per_res_in_keys *res_keys; 7769 int i, key_count; 7770 7771 res_keys = (struct scsi_per_res_in_keys*)ctsio->kern_data_ptr; 7772 7773 /* 7774 * We had to drop the lock to allocate our buffer, which 7775 * leaves time for someone to come in with another 7776 * persistent reservation. (That is unlikely, though, 7777 * since this should be the only persistent reservation 7778 * command active right now.) 7779 */ 7780 if (total_len != (sizeof(struct scsi_per_res_in_keys) + 7781 (lun->pr_key_count * 7782 sizeof(struct scsi_per_res_key)))){ 7783 mtx_unlock(&lun->lun_lock); 7784 free(ctsio->kern_data_ptr, M_CTL); 7785 printf("%s: reservation length changed, retrying\n", 7786 __func__); 7787 goto retry; 7788 } 7789 7790 scsi_ulto4b(lun->PRGeneration, res_keys->header.generation); 7791 7792 scsi_ulto4b(sizeof(struct scsi_per_res_key) * 7793 lun->pr_key_count, res_keys->header.length); 7794 7795 for (i = 0, key_count = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7796 if ((key = ctl_get_prkey(lun, i)) == 0) 7797 continue; 7798 7799 /* 7800 * We used lun->pr_key_count to calculate the 7801 * size to allocate. If it turns out the number of 7802 * initiators with the registered flag set is 7803 * larger than that (i.e. they haven't been kept in 7804 * sync), we've got a problem. 7805 */ 7806 if (key_count >= lun->pr_key_count) { 7807#ifdef NEEDTOPORT 7808 csevent_log(CSC_CTL | CSC_SHELF_SW | 7809 CTL_PR_ERROR, 7810 csevent_LogType_Fault, 7811 csevent_AlertLevel_Yellow, 7812 csevent_FRU_ShelfController, 7813 csevent_FRU_Firmware, 7814 csevent_FRU_Unknown, 7815 "registered keys %d >= key " 7816 "count %d", key_count, 7817 lun->pr_key_count); 7818#endif 7819 key_count++; 7820 continue; 7821 } 7822 scsi_u64to8b(key, res_keys->keys[key_count].key); 7823 key_count++; 7824 } 7825 break; 7826 } 7827 case SPRI_RR: { // read reservation 7828 struct scsi_per_res_in_rsrv *res; 7829 int tmp_len, header_only; 7830 7831 res = (struct scsi_per_res_in_rsrv *)ctsio->kern_data_ptr; 7832 7833 scsi_ulto4b(lun->PRGeneration, res->header.generation); 7834 7835 if (lun->flags & CTL_LUN_PR_RESERVED) 7836 { 7837 tmp_len = sizeof(struct scsi_per_res_in_rsrv); 7838 scsi_ulto4b(sizeof(struct scsi_per_res_in_rsrv_data), 7839 res->header.length); 7840 header_only = 0; 7841 } else { 7842 tmp_len = sizeof(struct scsi_per_res_in_header); 7843 scsi_ulto4b(0, res->header.length); 7844 header_only = 1; 7845 } 7846 7847 /* 7848 * We had to drop the lock to allocate our buffer, which 7849 * leaves time for someone to come in with another 7850 * persistent reservation. (That is unlikely, though, 7851 * since this should be the only persistent reservation 7852 * command active right now.) 7853 */ 7854 if (tmp_len != total_len) { 7855 mtx_unlock(&lun->lun_lock); 7856 free(ctsio->kern_data_ptr, M_CTL); 7857 printf("%s: reservation status changed, retrying\n", 7858 __func__); 7859 goto retry; 7860 } 7861 7862 /* 7863 * No reservation held, so we're done. 7864 */ 7865 if (header_only != 0) 7866 break; 7867 7868 /* 7869 * If the registration is an All Registrants type, the key 7870 * is 0, since it doesn't really matter. 7871 */ 7872 if (lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 7873 scsi_u64to8b(ctl_get_prkey(lun, lun->pr_res_idx), 7874 res->data.reservation); 7875 } 7876 res->data.scopetype = lun->res_type; 7877 break; 7878 } 7879 case SPRI_RC: //report capabilities 7880 { 7881 struct scsi_per_res_cap *res_cap; 7882 uint16_t type_mask; 7883 7884 res_cap = (struct scsi_per_res_cap *)ctsio->kern_data_ptr; 7885 scsi_ulto2b(sizeof(*res_cap), res_cap->length); 7886 res_cap->flags2 |= SPRI_TMV | SPRI_ALLOW_5; 7887 type_mask = SPRI_TM_WR_EX_AR | 7888 SPRI_TM_EX_AC_RO | 7889 SPRI_TM_WR_EX_RO | 7890 SPRI_TM_EX_AC | 7891 SPRI_TM_WR_EX | 7892 SPRI_TM_EX_AC_AR; 7893 scsi_ulto2b(type_mask, res_cap->type_mask); 7894 break; 7895 } 7896 case SPRI_RS: { // read full status 7897 struct scsi_per_res_in_full *res_status; 7898 struct scsi_per_res_in_full_desc *res_desc; 7899 struct ctl_port *port; 7900 int i, len; 7901 7902 res_status = (struct scsi_per_res_in_full*)ctsio->kern_data_ptr; 7903 7904 /* 7905 * We had to drop the lock to allocate our buffer, which 7906 * leaves time for someone to come in with another 7907 * persistent reservation. (That is unlikely, though, 7908 * since this should be the only persistent reservation 7909 * command active right now.) 7910 */ 7911 if (total_len < (sizeof(struct scsi_per_res_in_header) + 7912 (sizeof(struct scsi_per_res_in_full_desc) + 256) * 7913 lun->pr_key_count)){ 7914 mtx_unlock(&lun->lun_lock); 7915 free(ctsio->kern_data_ptr, M_CTL); 7916 printf("%s: reservation length changed, retrying\n", 7917 __func__); 7918 goto retry; 7919 } 7920 7921 scsi_ulto4b(lun->PRGeneration, res_status->header.generation); 7922 7923 res_desc = &res_status->desc[0]; 7924 for (i = 0; i < 2*CTL_MAX_INITIATORS; i++) { 7925 if ((key = ctl_get_prkey(lun, i)) == 0) 7926 continue; 7927 7928 scsi_u64to8b(key, res_desc->res_key.key); 7929 if ((lun->flags & CTL_LUN_PR_RESERVED) && 7930 (lun->pr_res_idx == i || 7931 lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS)) { 7932 res_desc->flags = SPRI_FULL_R_HOLDER; 7933 res_desc->scopetype = lun->res_type; 7934 } 7935 scsi_ulto2b(i / CTL_MAX_INIT_PER_PORT, 7936 res_desc->rel_trgt_port_id); 7937 len = 0; 7938 port = softc->ctl_ports[ 7939 ctl_port_idx(i / CTL_MAX_INIT_PER_PORT)]; 7940 if (port != NULL) 7941 len = ctl_create_iid(port, 7942 i % CTL_MAX_INIT_PER_PORT, 7943 res_desc->transport_id); 7944 scsi_ulto4b(len, res_desc->additional_length); 7945 res_desc = (struct scsi_per_res_in_full_desc *) 7946 &res_desc->transport_id[len]; 7947 } 7948 scsi_ulto4b((uint8_t *)res_desc - (uint8_t *)&res_status->desc[0], 7949 res_status->header.length); 7950 break; 7951 } 7952 default: 7953 /* 7954 * This is a bug, because we just checked for this above, 7955 * and should have returned an error. 7956 */ 7957 panic("Invalid PR type %x", cdb->action); 7958 break; /* NOTREACHED */ 7959 } 7960 mtx_unlock(&lun->lun_lock); 7961 7962 ctl_set_success(ctsio); 7963 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 7964 ctsio->be_move_done = ctl_config_move_done; 7965 ctl_datamove((union ctl_io *)ctsio); 7966 return (CTL_RETVAL_COMPLETE); 7967} 7968 7969static void 7970ctl_est_res_ua(struct ctl_lun *lun, uint32_t residx, ctl_ua_type ua) 7971{ 7972 int off = lun->ctl_softc->persis_offset; 7973 7974 if (residx >= off && residx < off + CTL_MAX_INITIATORS) 7975 ctl_est_ua(lun, residx - off, ua); 7976} 7977 7978/* 7979 * Returns 0 if ctl_persistent_reserve_out() should continue, non-zero if 7980 * it should return. 7981 */ 7982static int 7983ctl_pro_preempt(struct ctl_softc *softc, struct ctl_lun *lun, uint64_t res_key, 7984 uint64_t sa_res_key, uint8_t type, uint32_t residx, 7985 struct ctl_scsiio *ctsio, struct scsi_per_res_out *cdb, 7986 struct scsi_per_res_out_parms* param) 7987{ 7988 union ctl_ha_msg persis_io; 7989 int retval, i; 7990 int isc_retval; 7991 7992 retval = 0; 7993 7994 mtx_lock(&lun->lun_lock); 7995 if (sa_res_key == 0) { 7996 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 7997 /* validate scope and type */ 7998 if ((cdb->scope_type & SPR_SCOPE_MASK) != 7999 SPR_LU_SCOPE) { 8000 mtx_unlock(&lun->lun_lock); 8001 ctl_set_invalid_field(/*ctsio*/ ctsio, 8002 /*sks_valid*/ 1, 8003 /*command*/ 1, 8004 /*field*/ 2, 8005 /*bit_valid*/ 1, 8006 /*bit*/ 4); 8007 ctl_done((union ctl_io *)ctsio); 8008 return (1); 8009 } 8010 8011 if (type>8 || type==2 || type==4 || type==0) { 8012 mtx_unlock(&lun->lun_lock); 8013 ctl_set_invalid_field(/*ctsio*/ ctsio, 8014 /*sks_valid*/ 1, 8015 /*command*/ 1, 8016 /*field*/ 2, 8017 /*bit_valid*/ 1, 8018 /*bit*/ 0); 8019 ctl_done((union ctl_io *)ctsio); 8020 return (1); 8021 } 8022 8023 /* 8024 * Unregister everybody else and build UA for 8025 * them 8026 */ 8027 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8028 if (i == residx || ctl_get_prkey(lun, i) == 0) 8029 continue; 8030 8031 ctl_clr_prkey(lun, i); 8032 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8033 } 8034 lun->pr_key_count = 1; 8035 lun->res_type = type; 8036 if (lun->res_type != SPR_TYPE_WR_EX_AR 8037 && lun->res_type != SPR_TYPE_EX_AC_AR) 8038 lun->pr_res_idx = residx; 8039 8040 /* send msg to other side */ 8041 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8042 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8043 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8044 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8045 persis_io.pr.pr_info.res_type = type; 8046 memcpy(persis_io.pr.pr_info.sa_res_key, 8047 param->serv_act_res_key, 8048 sizeof(param->serv_act_res_key)); 8049 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8050 &persis_io, sizeof(persis_io), 0)) > 8051 CTL_HA_STATUS_SUCCESS) { 8052 printf("CTL:Persis Out error returned " 8053 "from ctl_ha_msg_send %d\n", 8054 isc_retval); 8055 } 8056 } else { 8057 /* not all registrants */ 8058 mtx_unlock(&lun->lun_lock); 8059 free(ctsio->kern_data_ptr, M_CTL); 8060 ctl_set_invalid_field(ctsio, 8061 /*sks_valid*/ 1, 8062 /*command*/ 0, 8063 /*field*/ 8, 8064 /*bit_valid*/ 0, 8065 /*bit*/ 0); 8066 ctl_done((union ctl_io *)ctsio); 8067 return (1); 8068 } 8069 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8070 || !(lun->flags & CTL_LUN_PR_RESERVED)) { 8071 int found = 0; 8072 8073 if (res_key == sa_res_key) { 8074 /* special case */ 8075 /* 8076 * The spec implies this is not good but doesn't 8077 * say what to do. There are two choices either 8078 * generate a res conflict or check condition 8079 * with illegal field in parameter data. Since 8080 * that is what is done when the sa_res_key is 8081 * zero I'll take that approach since this has 8082 * to do with the sa_res_key. 8083 */ 8084 mtx_unlock(&lun->lun_lock); 8085 free(ctsio->kern_data_ptr, M_CTL); 8086 ctl_set_invalid_field(ctsio, 8087 /*sks_valid*/ 1, 8088 /*command*/ 0, 8089 /*field*/ 8, 8090 /*bit_valid*/ 0, 8091 /*bit*/ 0); 8092 ctl_done((union ctl_io *)ctsio); 8093 return (1); 8094 } 8095 8096 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8097 if (ctl_get_prkey(lun, i) != sa_res_key) 8098 continue; 8099 8100 found = 1; 8101 ctl_clr_prkey(lun, i); 8102 lun->pr_key_count--; 8103 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8104 } 8105 if (!found) { 8106 mtx_unlock(&lun->lun_lock); 8107 free(ctsio->kern_data_ptr, M_CTL); 8108 ctl_set_reservation_conflict(ctsio); 8109 ctl_done((union ctl_io *)ctsio); 8110 return (CTL_RETVAL_COMPLETE); 8111 } 8112 /* send msg to other side */ 8113 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8114 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8115 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8116 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8117 persis_io.pr.pr_info.res_type = type; 8118 memcpy(persis_io.pr.pr_info.sa_res_key, 8119 param->serv_act_res_key, 8120 sizeof(param->serv_act_res_key)); 8121 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8122 &persis_io, sizeof(persis_io), 0)) > 8123 CTL_HA_STATUS_SUCCESS) { 8124 printf("CTL:Persis Out error returned from " 8125 "ctl_ha_msg_send %d\n", isc_retval); 8126 } 8127 } else { 8128 /* Reserved but not all registrants */ 8129 /* sa_res_key is res holder */ 8130 if (sa_res_key == ctl_get_prkey(lun, lun->pr_res_idx)) { 8131 /* validate scope and type */ 8132 if ((cdb->scope_type & SPR_SCOPE_MASK) != 8133 SPR_LU_SCOPE) { 8134 mtx_unlock(&lun->lun_lock); 8135 ctl_set_invalid_field(/*ctsio*/ ctsio, 8136 /*sks_valid*/ 1, 8137 /*command*/ 1, 8138 /*field*/ 2, 8139 /*bit_valid*/ 1, 8140 /*bit*/ 4); 8141 ctl_done((union ctl_io *)ctsio); 8142 return (1); 8143 } 8144 8145 if (type>8 || type==2 || type==4 || type==0) { 8146 mtx_unlock(&lun->lun_lock); 8147 ctl_set_invalid_field(/*ctsio*/ ctsio, 8148 /*sks_valid*/ 1, 8149 /*command*/ 1, 8150 /*field*/ 2, 8151 /*bit_valid*/ 1, 8152 /*bit*/ 0); 8153 ctl_done((union ctl_io *)ctsio); 8154 return (1); 8155 } 8156 8157 /* 8158 * Do the following: 8159 * if sa_res_key != res_key remove all 8160 * registrants w/sa_res_key and generate UA 8161 * for these registrants(Registrations 8162 * Preempted) if it wasn't an exclusive 8163 * reservation generate UA(Reservations 8164 * Preempted) for all other registered nexuses 8165 * if the type has changed. Establish the new 8166 * reservation and holder. If res_key and 8167 * sa_res_key are the same do the above 8168 * except don't unregister the res holder. 8169 */ 8170 8171 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8172 if (i == residx || ctl_get_prkey(lun, i) == 0) 8173 continue; 8174 8175 if (sa_res_key == ctl_get_prkey(lun, i)) { 8176 ctl_clr_prkey(lun, i); 8177 lun->pr_key_count--; 8178 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8179 } else if (type != lun->res_type 8180 && (lun->res_type == SPR_TYPE_WR_EX_RO 8181 || lun->res_type ==SPR_TYPE_EX_AC_RO)){ 8182 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 8183 } 8184 } 8185 lun->res_type = type; 8186 if (lun->res_type != SPR_TYPE_WR_EX_AR 8187 && lun->res_type != SPR_TYPE_EX_AC_AR) 8188 lun->pr_res_idx = residx; 8189 else 8190 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8191 8192 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8193 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8194 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8195 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8196 persis_io.pr.pr_info.res_type = type; 8197 memcpy(persis_io.pr.pr_info.sa_res_key, 8198 param->serv_act_res_key, 8199 sizeof(param->serv_act_res_key)); 8200 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8201 &persis_io, sizeof(persis_io), 0)) > 8202 CTL_HA_STATUS_SUCCESS) { 8203 printf("CTL:Persis Out error returned " 8204 "from ctl_ha_msg_send %d\n", 8205 isc_retval); 8206 } 8207 } else { 8208 /* 8209 * sa_res_key is not the res holder just 8210 * remove registrants 8211 */ 8212 int found=0; 8213 8214 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8215 if (sa_res_key != ctl_get_prkey(lun, i)) 8216 continue; 8217 8218 found = 1; 8219 ctl_clr_prkey(lun, i); 8220 lun->pr_key_count--; 8221 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8222 } 8223 8224 if (!found) { 8225 mtx_unlock(&lun->lun_lock); 8226 free(ctsio->kern_data_ptr, M_CTL); 8227 ctl_set_reservation_conflict(ctsio); 8228 ctl_done((union ctl_io *)ctsio); 8229 return (1); 8230 } 8231 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8232 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8233 persis_io.pr.pr_info.action = CTL_PR_PREEMPT; 8234 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8235 persis_io.pr.pr_info.res_type = type; 8236 memcpy(persis_io.pr.pr_info.sa_res_key, 8237 param->serv_act_res_key, 8238 sizeof(param->serv_act_res_key)); 8239 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8240 &persis_io, sizeof(persis_io), 0)) > 8241 CTL_HA_STATUS_SUCCESS) { 8242 printf("CTL:Persis Out error returned " 8243 "from ctl_ha_msg_send %d\n", 8244 isc_retval); 8245 } 8246 } 8247 } 8248 8249 lun->PRGeneration++; 8250 mtx_unlock(&lun->lun_lock); 8251 8252 return (retval); 8253} 8254 8255static void 8256ctl_pro_preempt_other(struct ctl_lun *lun, union ctl_ha_msg *msg) 8257{ 8258 uint64_t sa_res_key; 8259 int i; 8260 8261 sa_res_key = scsi_8btou64(msg->pr.pr_info.sa_res_key); 8262 8263 if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS 8264 || lun->pr_res_idx == CTL_PR_NO_RESERVATION 8265 || sa_res_key != ctl_get_prkey(lun, lun->pr_res_idx)) { 8266 if (sa_res_key == 0) { 8267 /* 8268 * Unregister everybody else and build UA for 8269 * them 8270 */ 8271 for(i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8272 if (i == msg->pr.pr_info.residx || 8273 ctl_get_prkey(lun, i) == 0) 8274 continue; 8275 8276 ctl_clr_prkey(lun, i); 8277 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8278 } 8279 8280 lun->pr_key_count = 1; 8281 lun->res_type = msg->pr.pr_info.res_type; 8282 if (lun->res_type != SPR_TYPE_WR_EX_AR 8283 && lun->res_type != SPR_TYPE_EX_AC_AR) 8284 lun->pr_res_idx = msg->pr.pr_info.residx; 8285 } else { 8286 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8287 if (sa_res_key == ctl_get_prkey(lun, i)) 8288 continue; 8289 8290 ctl_clr_prkey(lun, i); 8291 lun->pr_key_count--; 8292 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8293 } 8294 } 8295 } else { 8296 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8297 if (i == msg->pr.pr_info.residx || 8298 ctl_get_prkey(lun, i) == 0) 8299 continue; 8300 8301 if (sa_res_key == ctl_get_prkey(lun, i)) { 8302 ctl_clr_prkey(lun, i); 8303 lun->pr_key_count--; 8304 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8305 } else if (msg->pr.pr_info.res_type != lun->res_type 8306 && (lun->res_type == SPR_TYPE_WR_EX_RO 8307 || lun->res_type == SPR_TYPE_EX_AC_RO)) { 8308 ctl_est_res_ua(lun, i, CTL_UA_RES_RELEASE); 8309 } 8310 } 8311 lun->res_type = msg->pr.pr_info.res_type; 8312 if (lun->res_type != SPR_TYPE_WR_EX_AR 8313 && lun->res_type != SPR_TYPE_EX_AC_AR) 8314 lun->pr_res_idx = msg->pr.pr_info.residx; 8315 else 8316 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8317 } 8318 lun->PRGeneration++; 8319 8320} 8321 8322 8323int 8324ctl_persistent_reserve_out(struct ctl_scsiio *ctsio) 8325{ 8326 int retval; 8327 int isc_retval; 8328 u_int32_t param_len; 8329 struct scsi_per_res_out *cdb; 8330 struct ctl_lun *lun; 8331 struct scsi_per_res_out_parms* param; 8332 struct ctl_softc *softc; 8333 uint32_t residx; 8334 uint64_t res_key, sa_res_key, key; 8335 uint8_t type; 8336 union ctl_ha_msg persis_io; 8337 int i; 8338 8339 CTL_DEBUG_PRINT(("ctl_persistent_reserve_out\n")); 8340 8341 retval = CTL_RETVAL_COMPLETE; 8342 8343 cdb = (struct scsi_per_res_out *)ctsio->cdb; 8344 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8345 softc = lun->ctl_softc; 8346 8347 /* 8348 * We only support whole-LUN scope. The scope & type are ignored for 8349 * register, register and ignore existing key and clear. 8350 * We sometimes ignore scope and type on preempts too!! 8351 * Verify reservation type here as well. 8352 */ 8353 type = cdb->scope_type & SPR_TYPE_MASK; 8354 if ((cdb->action == SPRO_RESERVE) 8355 || (cdb->action == SPRO_RELEASE)) { 8356 if ((cdb->scope_type & SPR_SCOPE_MASK) != SPR_LU_SCOPE) { 8357 ctl_set_invalid_field(/*ctsio*/ ctsio, 8358 /*sks_valid*/ 1, 8359 /*command*/ 1, 8360 /*field*/ 2, 8361 /*bit_valid*/ 1, 8362 /*bit*/ 4); 8363 ctl_done((union ctl_io *)ctsio); 8364 return (CTL_RETVAL_COMPLETE); 8365 } 8366 8367 if (type>8 || type==2 || type==4 || type==0) { 8368 ctl_set_invalid_field(/*ctsio*/ ctsio, 8369 /*sks_valid*/ 1, 8370 /*command*/ 1, 8371 /*field*/ 2, 8372 /*bit_valid*/ 1, 8373 /*bit*/ 0); 8374 ctl_done((union ctl_io *)ctsio); 8375 return (CTL_RETVAL_COMPLETE); 8376 } 8377 } 8378 8379 param_len = scsi_4btoul(cdb->length); 8380 8381 if ((ctsio->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0) { 8382 ctsio->kern_data_ptr = malloc(param_len, M_CTL, M_WAITOK); 8383 ctsio->kern_data_len = param_len; 8384 ctsio->kern_total_len = param_len; 8385 ctsio->kern_data_resid = 0; 8386 ctsio->kern_rel_offset = 0; 8387 ctsio->kern_sg_entries = 0; 8388 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 8389 ctsio->be_move_done = ctl_config_move_done; 8390 ctl_datamove((union ctl_io *)ctsio); 8391 8392 return (CTL_RETVAL_COMPLETE); 8393 } 8394 8395 param = (struct scsi_per_res_out_parms *)ctsio->kern_data_ptr; 8396 8397 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 8398 res_key = scsi_8btou64(param->res_key.key); 8399 sa_res_key = scsi_8btou64(param->serv_act_res_key); 8400 8401 /* 8402 * Validate the reservation key here except for SPRO_REG_IGNO 8403 * This must be done for all other service actions 8404 */ 8405 if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REG_IGNO) { 8406 mtx_lock(&lun->lun_lock); 8407 if ((key = ctl_get_prkey(lun, residx)) != 0) { 8408 if (res_key != key) { 8409 /* 8410 * The current key passed in doesn't match 8411 * the one the initiator previously 8412 * registered. 8413 */ 8414 mtx_unlock(&lun->lun_lock); 8415 free(ctsio->kern_data_ptr, M_CTL); 8416 ctl_set_reservation_conflict(ctsio); 8417 ctl_done((union ctl_io *)ctsio); 8418 return (CTL_RETVAL_COMPLETE); 8419 } 8420 } else if ((cdb->action & SPRO_ACTION_MASK) != SPRO_REGISTER) { 8421 /* 8422 * We are not registered 8423 */ 8424 mtx_unlock(&lun->lun_lock); 8425 free(ctsio->kern_data_ptr, M_CTL); 8426 ctl_set_reservation_conflict(ctsio); 8427 ctl_done((union ctl_io *)ctsio); 8428 return (CTL_RETVAL_COMPLETE); 8429 } else if (res_key != 0) { 8430 /* 8431 * We are not registered and trying to register but 8432 * the register key isn't zero. 8433 */ 8434 mtx_unlock(&lun->lun_lock); 8435 free(ctsio->kern_data_ptr, M_CTL); 8436 ctl_set_reservation_conflict(ctsio); 8437 ctl_done((union ctl_io *)ctsio); 8438 return (CTL_RETVAL_COMPLETE); 8439 } 8440 mtx_unlock(&lun->lun_lock); 8441 } 8442 8443 switch (cdb->action & SPRO_ACTION_MASK) { 8444 case SPRO_REGISTER: 8445 case SPRO_REG_IGNO: { 8446 8447#if 0 8448 printf("Registration received\n"); 8449#endif 8450 8451 /* 8452 * We don't support any of these options, as we report in 8453 * the read capabilities request (see 8454 * ctl_persistent_reserve_in(), above). 8455 */ 8456 if ((param->flags & SPR_SPEC_I_PT) 8457 || (param->flags & SPR_ALL_TG_PT) 8458 || (param->flags & SPR_APTPL)) { 8459 int bit_ptr; 8460 8461 if (param->flags & SPR_APTPL) 8462 bit_ptr = 0; 8463 else if (param->flags & SPR_ALL_TG_PT) 8464 bit_ptr = 2; 8465 else /* SPR_SPEC_I_PT */ 8466 bit_ptr = 3; 8467 8468 free(ctsio->kern_data_ptr, M_CTL); 8469 ctl_set_invalid_field(ctsio, 8470 /*sks_valid*/ 1, 8471 /*command*/ 0, 8472 /*field*/ 20, 8473 /*bit_valid*/ 1, 8474 /*bit*/ bit_ptr); 8475 ctl_done((union ctl_io *)ctsio); 8476 return (CTL_RETVAL_COMPLETE); 8477 } 8478 8479 mtx_lock(&lun->lun_lock); 8480 8481 /* 8482 * The initiator wants to clear the 8483 * key/unregister. 8484 */ 8485 if (sa_res_key == 0) { 8486 if ((res_key == 0 8487 && (cdb->action & SPRO_ACTION_MASK) == SPRO_REGISTER) 8488 || ((cdb->action & SPRO_ACTION_MASK) == SPRO_REG_IGNO 8489 && ctl_get_prkey(lun, residx) == 0)) { 8490 mtx_unlock(&lun->lun_lock); 8491 goto done; 8492 } 8493 8494 ctl_clr_prkey(lun, residx); 8495 lun->pr_key_count--; 8496 8497 if (residx == lun->pr_res_idx) { 8498 lun->flags &= ~CTL_LUN_PR_RESERVED; 8499 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8500 8501 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8502 || lun->res_type == SPR_TYPE_EX_AC_RO) 8503 && lun->pr_key_count) { 8504 /* 8505 * If the reservation is a registrants 8506 * only type we need to generate a UA 8507 * for other registered inits. The 8508 * sense code should be RESERVATIONS 8509 * RELEASED 8510 */ 8511 8512 for (i = 0; i < CTL_MAX_INITIATORS;i++){ 8513 if (ctl_get_prkey(lun, i + 8514 softc->persis_offset) == 0) 8515 continue; 8516 ctl_est_ua(lun, i, 8517 CTL_UA_RES_RELEASE); 8518 } 8519 } 8520 lun->res_type = 0; 8521 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8522 if (lun->pr_key_count==0) { 8523 lun->flags &= ~CTL_LUN_PR_RESERVED; 8524 lun->res_type = 0; 8525 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8526 } 8527 } 8528 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8529 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8530 persis_io.pr.pr_info.action = CTL_PR_UNREG_KEY; 8531 persis_io.pr.pr_info.residx = residx; 8532 if ((isc_retval = ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8533 &persis_io, sizeof(persis_io), 0 )) > 8534 CTL_HA_STATUS_SUCCESS) { 8535 printf("CTL:Persis Out error returned from " 8536 "ctl_ha_msg_send %d\n", isc_retval); 8537 } 8538 } else /* sa_res_key != 0 */ { 8539 8540 /* 8541 * If we aren't registered currently then increment 8542 * the key count and set the registered flag. 8543 */ 8544 ctl_alloc_prkey(lun, residx); 8545 if (ctl_get_prkey(lun, residx) == 0) 8546 lun->pr_key_count++; 8547 ctl_set_prkey(lun, residx, sa_res_key); 8548 8549 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8550 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8551 persis_io.pr.pr_info.action = CTL_PR_REG_KEY; 8552 persis_io.pr.pr_info.residx = residx; 8553 memcpy(persis_io.pr.pr_info.sa_res_key, 8554 param->serv_act_res_key, 8555 sizeof(param->serv_act_res_key)); 8556 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8557 &persis_io, sizeof(persis_io), 0)) > 8558 CTL_HA_STATUS_SUCCESS) { 8559 printf("CTL:Persis Out error returned from " 8560 "ctl_ha_msg_send %d\n", isc_retval); 8561 } 8562 } 8563 lun->PRGeneration++; 8564 mtx_unlock(&lun->lun_lock); 8565 8566 break; 8567 } 8568 case SPRO_RESERVE: 8569#if 0 8570 printf("Reserve executed type %d\n", type); 8571#endif 8572 mtx_lock(&lun->lun_lock); 8573 if (lun->flags & CTL_LUN_PR_RESERVED) { 8574 /* 8575 * if this isn't the reservation holder and it's 8576 * not a "all registrants" type or if the type is 8577 * different then we have a conflict 8578 */ 8579 if ((lun->pr_res_idx != residx 8580 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) 8581 || lun->res_type != type) { 8582 mtx_unlock(&lun->lun_lock); 8583 free(ctsio->kern_data_ptr, M_CTL); 8584 ctl_set_reservation_conflict(ctsio); 8585 ctl_done((union ctl_io *)ctsio); 8586 return (CTL_RETVAL_COMPLETE); 8587 } 8588 mtx_unlock(&lun->lun_lock); 8589 } else /* create a reservation */ { 8590 /* 8591 * If it's not an "all registrants" type record 8592 * reservation holder 8593 */ 8594 if (type != SPR_TYPE_WR_EX_AR 8595 && type != SPR_TYPE_EX_AC_AR) 8596 lun->pr_res_idx = residx; /* Res holder */ 8597 else 8598 lun->pr_res_idx = CTL_PR_ALL_REGISTRANTS; 8599 8600 lun->flags |= CTL_LUN_PR_RESERVED; 8601 lun->res_type = type; 8602 8603 mtx_unlock(&lun->lun_lock); 8604 8605 /* send msg to other side */ 8606 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8607 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8608 persis_io.pr.pr_info.action = CTL_PR_RESERVE; 8609 persis_io.pr.pr_info.residx = lun->pr_res_idx; 8610 persis_io.pr.pr_info.res_type = type; 8611 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 8612 &persis_io, sizeof(persis_io), 0)) > 8613 CTL_HA_STATUS_SUCCESS) { 8614 printf("CTL:Persis Out error returned from " 8615 "ctl_ha_msg_send %d\n", isc_retval); 8616 } 8617 } 8618 break; 8619 8620 case SPRO_RELEASE: 8621 mtx_lock(&lun->lun_lock); 8622 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0) { 8623 /* No reservation exists return good status */ 8624 mtx_unlock(&lun->lun_lock); 8625 goto done; 8626 } 8627 /* 8628 * Is this nexus a reservation holder? 8629 */ 8630 if (lun->pr_res_idx != residx 8631 && lun->pr_res_idx != CTL_PR_ALL_REGISTRANTS) { 8632 /* 8633 * not a res holder return good status but 8634 * do nothing 8635 */ 8636 mtx_unlock(&lun->lun_lock); 8637 goto done; 8638 } 8639 8640 if (lun->res_type != type) { 8641 mtx_unlock(&lun->lun_lock); 8642 free(ctsio->kern_data_ptr, M_CTL); 8643 ctl_set_illegal_pr_release(ctsio); 8644 ctl_done((union ctl_io *)ctsio); 8645 return (CTL_RETVAL_COMPLETE); 8646 } 8647 8648 /* okay to release */ 8649 lun->flags &= ~CTL_LUN_PR_RESERVED; 8650 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8651 lun->res_type = 0; 8652 8653 /* 8654 * if this isn't an exclusive access 8655 * res generate UA for all other 8656 * registrants. 8657 */ 8658 if (type != SPR_TYPE_EX_AC 8659 && type != SPR_TYPE_WR_EX) { 8660 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8661 if (i == residx || 8662 ctl_get_prkey(lun, 8663 i + softc->persis_offset) == 0) 8664 continue; 8665 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8666 } 8667 } 8668 mtx_unlock(&lun->lun_lock); 8669 /* Send msg to other side */ 8670 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8671 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8672 persis_io.pr.pr_info.action = CTL_PR_RELEASE; 8673 if ((isc_retval=ctl_ha_msg_send( CTL_HA_CHAN_CTL, &persis_io, 8674 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8675 printf("CTL:Persis Out error returned from " 8676 "ctl_ha_msg_send %d\n", isc_retval); 8677 } 8678 break; 8679 8680 case SPRO_CLEAR: 8681 /* send msg to other side */ 8682 8683 mtx_lock(&lun->lun_lock); 8684 lun->flags &= ~CTL_LUN_PR_RESERVED; 8685 lun->res_type = 0; 8686 lun->pr_key_count = 0; 8687 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8688 8689 ctl_clr_prkey(lun, residx); 8690 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) 8691 if (ctl_get_prkey(lun, i) != 0) { 8692 ctl_clr_prkey(lun, i); 8693 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8694 } 8695 lun->PRGeneration++; 8696 mtx_unlock(&lun->lun_lock); 8697 persis_io.hdr.nexus = ctsio->io_hdr.nexus; 8698 persis_io.hdr.msg_type = CTL_MSG_PERS_ACTION; 8699 persis_io.pr.pr_info.action = CTL_PR_CLEAR; 8700 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, &persis_io, 8701 sizeof(persis_io), 0)) > CTL_HA_STATUS_SUCCESS) { 8702 printf("CTL:Persis Out error returned from " 8703 "ctl_ha_msg_send %d\n", isc_retval); 8704 } 8705 break; 8706 8707 case SPRO_PREEMPT: 8708 case SPRO_PRE_ABO: { 8709 int nretval; 8710 8711 nretval = ctl_pro_preempt(softc, lun, res_key, sa_res_key, type, 8712 residx, ctsio, cdb, param); 8713 if (nretval != 0) 8714 return (CTL_RETVAL_COMPLETE); 8715 break; 8716 } 8717 default: 8718 panic("Invalid PR type %x", cdb->action); 8719 } 8720 8721done: 8722 free(ctsio->kern_data_ptr, M_CTL); 8723 ctl_set_success(ctsio); 8724 ctl_done((union ctl_io *)ctsio); 8725 8726 return (retval); 8727} 8728 8729/* 8730 * This routine is for handling a message from the other SC pertaining to 8731 * persistent reserve out. All the error checking will have been done 8732 * so only perorming the action need be done here to keep the two 8733 * in sync. 8734 */ 8735static void 8736ctl_hndl_per_res_out_on_other_sc(union ctl_ha_msg *msg) 8737{ 8738 struct ctl_lun *lun; 8739 struct ctl_softc *softc; 8740 int i; 8741 uint32_t targ_lun; 8742 8743 softc = control_softc; 8744 8745 targ_lun = msg->hdr.nexus.targ_mapped_lun; 8746 lun = softc->ctl_luns[targ_lun]; 8747 mtx_lock(&lun->lun_lock); 8748 switch(msg->pr.pr_info.action) { 8749 case CTL_PR_REG_KEY: 8750 ctl_alloc_prkey(lun, msg->pr.pr_info.residx); 8751 if (ctl_get_prkey(lun, msg->pr.pr_info.residx) == 0) 8752 lun->pr_key_count++; 8753 ctl_set_prkey(lun, msg->pr.pr_info.residx, 8754 scsi_8btou64(msg->pr.pr_info.sa_res_key)); 8755 lun->PRGeneration++; 8756 break; 8757 8758 case CTL_PR_UNREG_KEY: 8759 ctl_clr_prkey(lun, msg->pr.pr_info.residx); 8760 lun->pr_key_count--; 8761 8762 /* XXX Need to see if the reservation has been released */ 8763 /* if so do we need to generate UA? */ 8764 if (msg->pr.pr_info.residx == lun->pr_res_idx) { 8765 lun->flags &= ~CTL_LUN_PR_RESERVED; 8766 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8767 8768 if ((lun->res_type == SPR_TYPE_WR_EX_RO 8769 || lun->res_type == SPR_TYPE_EX_AC_RO) 8770 && lun->pr_key_count) { 8771 /* 8772 * If the reservation is a registrants 8773 * only type we need to generate a UA 8774 * for other registered inits. The 8775 * sense code should be RESERVATIONS 8776 * RELEASED 8777 */ 8778 8779 for (i = 0; i < CTL_MAX_INITIATORS; i++) { 8780 if (ctl_get_prkey(lun, i + 8781 softc->persis_offset) == 0) 8782 continue; 8783 8784 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8785 } 8786 } 8787 lun->res_type = 0; 8788 } else if (lun->pr_res_idx == CTL_PR_ALL_REGISTRANTS) { 8789 if (lun->pr_key_count==0) { 8790 lun->flags &= ~CTL_LUN_PR_RESERVED; 8791 lun->res_type = 0; 8792 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8793 } 8794 } 8795 lun->PRGeneration++; 8796 break; 8797 8798 case CTL_PR_RESERVE: 8799 lun->flags |= CTL_LUN_PR_RESERVED; 8800 lun->res_type = msg->pr.pr_info.res_type; 8801 lun->pr_res_idx = msg->pr.pr_info.residx; 8802 8803 break; 8804 8805 case CTL_PR_RELEASE: 8806 /* 8807 * if this isn't an exclusive access res generate UA for all 8808 * other registrants. 8809 */ 8810 if (lun->res_type != SPR_TYPE_EX_AC 8811 && lun->res_type != SPR_TYPE_WR_EX) { 8812 for (i = 0; i < CTL_MAX_INITIATORS; i++) 8813 if (ctl_get_prkey(lun, i + softc->persis_offset) != 0) 8814 ctl_est_ua(lun, i, CTL_UA_RES_RELEASE); 8815 } 8816 8817 lun->flags &= ~CTL_LUN_PR_RESERVED; 8818 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8819 lun->res_type = 0; 8820 break; 8821 8822 case CTL_PR_PREEMPT: 8823 ctl_pro_preempt_other(lun, msg); 8824 break; 8825 case CTL_PR_CLEAR: 8826 lun->flags &= ~CTL_LUN_PR_RESERVED; 8827 lun->res_type = 0; 8828 lun->pr_key_count = 0; 8829 lun->pr_res_idx = CTL_PR_NO_RESERVATION; 8830 8831 for (i=0; i < 2*CTL_MAX_INITIATORS; i++) { 8832 if (ctl_get_prkey(lun, i) == 0) 8833 continue; 8834 ctl_clr_prkey(lun, i); 8835 ctl_est_res_ua(lun, i, CTL_UA_REG_PREEMPT); 8836 } 8837 lun->PRGeneration++; 8838 break; 8839 } 8840 8841 mtx_unlock(&lun->lun_lock); 8842} 8843 8844int 8845ctl_read_write(struct ctl_scsiio *ctsio) 8846{ 8847 struct ctl_lun *lun; 8848 struct ctl_lba_len_flags *lbalen; 8849 uint64_t lba; 8850 uint32_t num_blocks; 8851 int flags, retval; 8852 int isread; 8853 8854 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 8855 8856 CTL_DEBUG_PRINT(("ctl_read_write: command: %#x\n", ctsio->cdb[0])); 8857 8858 flags = 0; 8859 retval = CTL_RETVAL_COMPLETE; 8860 8861 isread = ctsio->cdb[0] == READ_6 || ctsio->cdb[0] == READ_10 8862 || ctsio->cdb[0] == READ_12 || ctsio->cdb[0] == READ_16; 8863 switch (ctsio->cdb[0]) { 8864 case READ_6: 8865 case WRITE_6: { 8866 struct scsi_rw_6 *cdb; 8867 8868 cdb = (struct scsi_rw_6 *)ctsio->cdb; 8869 8870 lba = scsi_3btoul(cdb->addr); 8871 /* only 5 bits are valid in the most significant address byte */ 8872 lba &= 0x1fffff; 8873 num_blocks = cdb->length; 8874 /* 8875 * This is correct according to SBC-2. 8876 */ 8877 if (num_blocks == 0) 8878 num_blocks = 256; 8879 break; 8880 } 8881 case READ_10: 8882 case WRITE_10: { 8883 struct scsi_rw_10 *cdb; 8884 8885 cdb = (struct scsi_rw_10 *)ctsio->cdb; 8886 if (cdb->byte2 & SRW10_FUA) 8887 flags |= CTL_LLF_FUA; 8888 if (cdb->byte2 & SRW10_DPO) 8889 flags |= CTL_LLF_DPO; 8890 lba = scsi_4btoul(cdb->addr); 8891 num_blocks = scsi_2btoul(cdb->length); 8892 break; 8893 } 8894 case WRITE_VERIFY_10: { 8895 struct scsi_write_verify_10 *cdb; 8896 8897 cdb = (struct scsi_write_verify_10 *)ctsio->cdb; 8898 flags |= CTL_LLF_FUA; 8899 if (cdb->byte2 & SWV_DPO) 8900 flags |= CTL_LLF_DPO; 8901 lba = scsi_4btoul(cdb->addr); 8902 num_blocks = scsi_2btoul(cdb->length); 8903 break; 8904 } 8905 case READ_12: 8906 case WRITE_12: { 8907 struct scsi_rw_12 *cdb; 8908 8909 cdb = (struct scsi_rw_12 *)ctsio->cdb; 8910 if (cdb->byte2 & SRW12_FUA) 8911 flags |= CTL_LLF_FUA; 8912 if (cdb->byte2 & SRW12_DPO) 8913 flags |= CTL_LLF_DPO; 8914 lba = scsi_4btoul(cdb->addr); 8915 num_blocks = scsi_4btoul(cdb->length); 8916 break; 8917 } 8918 case WRITE_VERIFY_12: { 8919 struct scsi_write_verify_12 *cdb; 8920 8921 cdb = (struct scsi_write_verify_12 *)ctsio->cdb; 8922 flags |= CTL_LLF_FUA; 8923 if (cdb->byte2 & SWV_DPO) 8924 flags |= CTL_LLF_DPO; 8925 lba = scsi_4btoul(cdb->addr); 8926 num_blocks = scsi_4btoul(cdb->length); 8927 break; 8928 } 8929 case READ_16: 8930 case WRITE_16: { 8931 struct scsi_rw_16 *cdb; 8932 8933 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8934 if (cdb->byte2 & SRW12_FUA) 8935 flags |= CTL_LLF_FUA; 8936 if (cdb->byte2 & SRW12_DPO) 8937 flags |= CTL_LLF_DPO; 8938 lba = scsi_8btou64(cdb->addr); 8939 num_blocks = scsi_4btoul(cdb->length); 8940 break; 8941 } 8942 case WRITE_ATOMIC_16: { 8943 struct scsi_rw_16 *cdb; 8944 8945 if (lun->be_lun->atomicblock == 0) { 8946 ctl_set_invalid_opcode(ctsio); 8947 ctl_done((union ctl_io *)ctsio); 8948 return (CTL_RETVAL_COMPLETE); 8949 } 8950 8951 cdb = (struct scsi_rw_16 *)ctsio->cdb; 8952 if (cdb->byte2 & SRW12_FUA) 8953 flags |= CTL_LLF_FUA; 8954 if (cdb->byte2 & SRW12_DPO) 8955 flags |= CTL_LLF_DPO; 8956 lba = scsi_8btou64(cdb->addr); 8957 num_blocks = scsi_4btoul(cdb->length); 8958 if (num_blocks > lun->be_lun->atomicblock) { 8959 ctl_set_invalid_field(ctsio, /*sks_valid*/ 1, 8960 /*command*/ 1, /*field*/ 12, /*bit_valid*/ 0, 8961 /*bit*/ 0); 8962 ctl_done((union ctl_io *)ctsio); 8963 return (CTL_RETVAL_COMPLETE); 8964 } 8965 break; 8966 } 8967 case WRITE_VERIFY_16: { 8968 struct scsi_write_verify_16 *cdb; 8969 8970 cdb = (struct scsi_write_verify_16 *)ctsio->cdb; 8971 flags |= CTL_LLF_FUA; 8972 if (cdb->byte2 & SWV_DPO) 8973 flags |= CTL_LLF_DPO; 8974 lba = scsi_8btou64(cdb->addr); 8975 num_blocks = scsi_4btoul(cdb->length); 8976 break; 8977 } 8978 default: 8979 /* 8980 * We got a command we don't support. This shouldn't 8981 * happen, commands should be filtered out above us. 8982 */ 8983 ctl_set_invalid_opcode(ctsio); 8984 ctl_done((union ctl_io *)ctsio); 8985 8986 return (CTL_RETVAL_COMPLETE); 8987 break; /* NOTREACHED */ 8988 } 8989 8990 /* 8991 * The first check is to make sure we're in bounds, the second 8992 * check is to catch wrap-around problems. If the lba + num blocks 8993 * is less than the lba, then we've wrapped around and the block 8994 * range is invalid anyway. 8995 */ 8996 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 8997 || ((lba + num_blocks) < lba)) { 8998 ctl_set_lba_out_of_range(ctsio); 8999 ctl_done((union ctl_io *)ctsio); 9000 return (CTL_RETVAL_COMPLETE); 9001 } 9002 9003 /* 9004 * According to SBC-3, a transfer length of 0 is not an error. 9005 * Note that this cannot happen with WRITE(6) or READ(6), since 0 9006 * translates to 256 blocks for those commands. 9007 */ 9008 if (num_blocks == 0) { 9009 ctl_set_success(ctsio); 9010 ctl_done((union ctl_io *)ctsio); 9011 return (CTL_RETVAL_COMPLETE); 9012 } 9013 9014 /* Set FUA and/or DPO if caches are disabled. */ 9015 if (isread) { 9016 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9017 SCP_RCD) != 0) 9018 flags |= CTL_LLF_FUA | CTL_LLF_DPO; 9019 } else { 9020 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9021 SCP_WCE) == 0) 9022 flags |= CTL_LLF_FUA; 9023 } 9024 9025 lbalen = (struct ctl_lba_len_flags *) 9026 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9027 lbalen->lba = lba; 9028 lbalen->len = num_blocks; 9029 lbalen->flags = (isread ? CTL_LLF_READ : CTL_LLF_WRITE) | flags; 9030 9031 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9032 ctsio->kern_rel_offset = 0; 9033 9034 CTL_DEBUG_PRINT(("ctl_read_write: calling data_submit()\n")); 9035 9036 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9037 9038 return (retval); 9039} 9040 9041static int 9042ctl_cnw_cont(union ctl_io *io) 9043{ 9044 struct ctl_scsiio *ctsio; 9045 struct ctl_lun *lun; 9046 struct ctl_lba_len_flags *lbalen; 9047 int retval; 9048 9049 ctsio = &io->scsiio; 9050 ctsio->io_hdr.status = CTL_STATUS_NONE; 9051 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_CONT; 9052 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9053 lbalen = (struct ctl_lba_len_flags *) 9054 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9055 lbalen->flags &= ~CTL_LLF_COMPARE; 9056 lbalen->flags |= CTL_LLF_WRITE; 9057 9058 CTL_DEBUG_PRINT(("ctl_cnw_cont: calling data_submit()\n")); 9059 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9060 return (retval); 9061} 9062 9063int 9064ctl_cnw(struct ctl_scsiio *ctsio) 9065{ 9066 struct ctl_lun *lun; 9067 struct ctl_lba_len_flags *lbalen; 9068 uint64_t lba; 9069 uint32_t num_blocks; 9070 int flags, retval; 9071 9072 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9073 9074 CTL_DEBUG_PRINT(("ctl_cnw: command: %#x\n", ctsio->cdb[0])); 9075 9076 flags = 0; 9077 retval = CTL_RETVAL_COMPLETE; 9078 9079 switch (ctsio->cdb[0]) { 9080 case COMPARE_AND_WRITE: { 9081 struct scsi_compare_and_write *cdb; 9082 9083 cdb = (struct scsi_compare_and_write *)ctsio->cdb; 9084 if (cdb->byte2 & SRW10_FUA) 9085 flags |= CTL_LLF_FUA; 9086 if (cdb->byte2 & SRW10_DPO) 9087 flags |= CTL_LLF_DPO; 9088 lba = scsi_8btou64(cdb->addr); 9089 num_blocks = cdb->length; 9090 break; 9091 } 9092 default: 9093 /* 9094 * We got a command we don't support. This shouldn't 9095 * happen, commands should be filtered out above us. 9096 */ 9097 ctl_set_invalid_opcode(ctsio); 9098 ctl_done((union ctl_io *)ctsio); 9099 9100 return (CTL_RETVAL_COMPLETE); 9101 break; /* NOTREACHED */ 9102 } 9103 9104 /* 9105 * The first check is to make sure we're in bounds, the second 9106 * check is to catch wrap-around problems. If the lba + num blocks 9107 * is less than the lba, then we've wrapped around and the block 9108 * range is invalid anyway. 9109 */ 9110 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9111 || ((lba + num_blocks) < lba)) { 9112 ctl_set_lba_out_of_range(ctsio); 9113 ctl_done((union ctl_io *)ctsio); 9114 return (CTL_RETVAL_COMPLETE); 9115 } 9116 9117 /* 9118 * According to SBC-3, a transfer length of 0 is not an error. 9119 */ 9120 if (num_blocks == 0) { 9121 ctl_set_success(ctsio); 9122 ctl_done((union ctl_io *)ctsio); 9123 return (CTL_RETVAL_COMPLETE); 9124 } 9125 9126 /* Set FUA if write cache is disabled. */ 9127 if ((lun->mode_pages.caching_page[CTL_PAGE_CURRENT].flags1 & 9128 SCP_WCE) == 0) 9129 flags |= CTL_LLF_FUA; 9130 9131 ctsio->kern_total_len = 2 * num_blocks * lun->be_lun->blocksize; 9132 ctsio->kern_rel_offset = 0; 9133 9134 /* 9135 * Set the IO_CONT flag, so that if this I/O gets passed to 9136 * ctl_data_submit_done(), it'll get passed back to 9137 * ctl_ctl_cnw_cont() for further processing. 9138 */ 9139 ctsio->io_hdr.flags |= CTL_FLAG_IO_CONT; 9140 ctsio->io_cont = ctl_cnw_cont; 9141 9142 lbalen = (struct ctl_lba_len_flags *) 9143 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9144 lbalen->lba = lba; 9145 lbalen->len = num_blocks; 9146 lbalen->flags = CTL_LLF_COMPARE | flags; 9147 9148 CTL_DEBUG_PRINT(("ctl_cnw: calling data_submit()\n")); 9149 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9150 return (retval); 9151} 9152 9153int 9154ctl_verify(struct ctl_scsiio *ctsio) 9155{ 9156 struct ctl_lun *lun; 9157 struct ctl_lba_len_flags *lbalen; 9158 uint64_t lba; 9159 uint32_t num_blocks; 9160 int bytchk, flags; 9161 int retval; 9162 9163 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9164 9165 CTL_DEBUG_PRINT(("ctl_verify: command: %#x\n", ctsio->cdb[0])); 9166 9167 bytchk = 0; 9168 flags = CTL_LLF_FUA; 9169 retval = CTL_RETVAL_COMPLETE; 9170 9171 switch (ctsio->cdb[0]) { 9172 case VERIFY_10: { 9173 struct scsi_verify_10 *cdb; 9174 9175 cdb = (struct scsi_verify_10 *)ctsio->cdb; 9176 if (cdb->byte2 & SVFY_BYTCHK) 9177 bytchk = 1; 9178 if (cdb->byte2 & SVFY_DPO) 9179 flags |= CTL_LLF_DPO; 9180 lba = scsi_4btoul(cdb->addr); 9181 num_blocks = scsi_2btoul(cdb->length); 9182 break; 9183 } 9184 case VERIFY_12: { 9185 struct scsi_verify_12 *cdb; 9186 9187 cdb = (struct scsi_verify_12 *)ctsio->cdb; 9188 if (cdb->byte2 & SVFY_BYTCHK) 9189 bytchk = 1; 9190 if (cdb->byte2 & SVFY_DPO) 9191 flags |= CTL_LLF_DPO; 9192 lba = scsi_4btoul(cdb->addr); 9193 num_blocks = scsi_4btoul(cdb->length); 9194 break; 9195 } 9196 case VERIFY_16: { 9197 struct scsi_rw_16 *cdb; 9198 9199 cdb = (struct scsi_rw_16 *)ctsio->cdb; 9200 if (cdb->byte2 & SVFY_BYTCHK) 9201 bytchk = 1; 9202 if (cdb->byte2 & SVFY_DPO) 9203 flags |= CTL_LLF_DPO; 9204 lba = scsi_8btou64(cdb->addr); 9205 num_blocks = scsi_4btoul(cdb->length); 9206 break; 9207 } 9208 default: 9209 /* 9210 * We got a command we don't support. This shouldn't 9211 * happen, commands should be filtered out above us. 9212 */ 9213 ctl_set_invalid_opcode(ctsio); 9214 ctl_done((union ctl_io *)ctsio); 9215 return (CTL_RETVAL_COMPLETE); 9216 } 9217 9218 /* 9219 * The first check is to make sure we're in bounds, the second 9220 * check is to catch wrap-around problems. If the lba + num blocks 9221 * is less than the lba, then we've wrapped around and the block 9222 * range is invalid anyway. 9223 */ 9224 if (((lba + num_blocks) > (lun->be_lun->maxlba + 1)) 9225 || ((lba + num_blocks) < lba)) { 9226 ctl_set_lba_out_of_range(ctsio); 9227 ctl_done((union ctl_io *)ctsio); 9228 return (CTL_RETVAL_COMPLETE); 9229 } 9230 9231 /* 9232 * According to SBC-3, a transfer length of 0 is not an error. 9233 */ 9234 if (num_blocks == 0) { 9235 ctl_set_success(ctsio); 9236 ctl_done((union ctl_io *)ctsio); 9237 return (CTL_RETVAL_COMPLETE); 9238 } 9239 9240 lbalen = (struct ctl_lba_len_flags *) 9241 &ctsio->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 9242 lbalen->lba = lba; 9243 lbalen->len = num_blocks; 9244 if (bytchk) { 9245 lbalen->flags = CTL_LLF_COMPARE | flags; 9246 ctsio->kern_total_len = num_blocks * lun->be_lun->blocksize; 9247 } else { 9248 lbalen->flags = CTL_LLF_VERIFY | flags; 9249 ctsio->kern_total_len = 0; 9250 } 9251 ctsio->kern_rel_offset = 0; 9252 9253 CTL_DEBUG_PRINT(("ctl_verify: calling data_submit()\n")); 9254 retval = lun->backend->data_submit((union ctl_io *)ctsio); 9255 return (retval); 9256} 9257 9258int 9259ctl_report_luns(struct ctl_scsiio *ctsio) 9260{ 9261 struct ctl_softc *softc = control_softc; 9262 struct scsi_report_luns *cdb; 9263 struct scsi_report_luns_data *lun_data; 9264 struct ctl_lun *lun, *request_lun; 9265 int num_luns, retval; 9266 uint32_t alloc_len, lun_datalen; 9267 int num_filled, well_known; 9268 uint32_t initidx, targ_lun_id, lun_id; 9269 9270 retval = CTL_RETVAL_COMPLETE; 9271 well_known = 0; 9272 9273 cdb = (struct scsi_report_luns *)ctsio->cdb; 9274 9275 CTL_DEBUG_PRINT(("ctl_report_luns\n")); 9276 9277 mtx_lock(&softc->ctl_lock); 9278 num_luns = softc->num_luns; 9279 mtx_unlock(&softc->ctl_lock); 9280 9281 switch (cdb->select_report) { 9282 case RPL_REPORT_DEFAULT: 9283 case RPL_REPORT_ALL: 9284 break; 9285 case RPL_REPORT_WELLKNOWN: 9286 well_known = 1; 9287 num_luns = 0; 9288 break; 9289 default: 9290 ctl_set_invalid_field(ctsio, 9291 /*sks_valid*/ 1, 9292 /*command*/ 1, 9293 /*field*/ 2, 9294 /*bit_valid*/ 0, 9295 /*bit*/ 0); 9296 ctl_done((union ctl_io *)ctsio); 9297 return (retval); 9298 break; /* NOTREACHED */ 9299 } 9300 9301 alloc_len = scsi_4btoul(cdb->length); 9302 /* 9303 * The initiator has to allocate at least 16 bytes for this request, 9304 * so he can at least get the header and the first LUN. Otherwise 9305 * we reject the request (per SPC-3 rev 14, section 6.21). 9306 */ 9307 if (alloc_len < (sizeof(struct scsi_report_luns_data) + 9308 sizeof(struct scsi_report_luns_lundata))) { 9309 ctl_set_invalid_field(ctsio, 9310 /*sks_valid*/ 1, 9311 /*command*/ 1, 9312 /*field*/ 6, 9313 /*bit_valid*/ 0, 9314 /*bit*/ 0); 9315 ctl_done((union ctl_io *)ctsio); 9316 return (retval); 9317 } 9318 9319 request_lun = (struct ctl_lun *) 9320 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9321 9322 lun_datalen = sizeof(*lun_data) + 9323 (num_luns * sizeof(struct scsi_report_luns_lundata)); 9324 9325 ctsio->kern_data_ptr = malloc(lun_datalen, M_CTL, M_WAITOK | M_ZERO); 9326 lun_data = (struct scsi_report_luns_data *)ctsio->kern_data_ptr; 9327 ctsio->kern_sg_entries = 0; 9328 9329 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9330 9331 mtx_lock(&softc->ctl_lock); 9332 for (targ_lun_id = 0, num_filled = 0; targ_lun_id < CTL_MAX_LUNS && num_filled < num_luns; targ_lun_id++) { 9333 lun_id = ctl_map_lun(softc, ctsio->io_hdr.nexus.targ_port, 9334 targ_lun_id); 9335 if (lun_id >= CTL_MAX_LUNS) 9336 continue; 9337 lun = softc->ctl_luns[lun_id]; 9338 if (lun == NULL) 9339 continue; 9340 9341 if (targ_lun_id <= 0xff) { 9342 /* 9343 * Peripheral addressing method, bus number 0. 9344 */ 9345 lun_data->luns[num_filled].lundata[0] = 9346 RPL_LUNDATA_ATYP_PERIPH; 9347 lun_data->luns[num_filled].lundata[1] = targ_lun_id; 9348 num_filled++; 9349 } else if (targ_lun_id <= 0x3fff) { 9350 /* 9351 * Flat addressing method. 9352 */ 9353 lun_data->luns[num_filled].lundata[0] = 9354 RPL_LUNDATA_ATYP_FLAT | (targ_lun_id >> 8); 9355 lun_data->luns[num_filled].lundata[1] = 9356 (targ_lun_id & 0xff); 9357 num_filled++; 9358 } else if (targ_lun_id <= 0xffffff) { 9359 /* 9360 * Extended flat addressing method. 9361 */ 9362 lun_data->luns[num_filled].lundata[0] = 9363 RPL_LUNDATA_ATYP_EXTLUN | 0x12; 9364 scsi_ulto3b(targ_lun_id, 9365 &lun_data->luns[num_filled].lundata[1]); 9366 num_filled++; 9367 } else { 9368 printf("ctl_report_luns: bogus LUN number %jd, " 9369 "skipping\n", (intmax_t)targ_lun_id); 9370 } 9371 /* 9372 * According to SPC-3, rev 14 section 6.21: 9373 * 9374 * "The execution of a REPORT LUNS command to any valid and 9375 * installed logical unit shall clear the REPORTED LUNS DATA 9376 * HAS CHANGED unit attention condition for all logical 9377 * units of that target with respect to the requesting 9378 * initiator. A valid and installed logical unit is one 9379 * having a PERIPHERAL QUALIFIER of 000b in the standard 9380 * INQUIRY data (see 6.4.2)." 9381 * 9382 * If request_lun is NULL, the LUN this report luns command 9383 * was issued to is either disabled or doesn't exist. In that 9384 * case, we shouldn't clear any pending lun change unit 9385 * attention. 9386 */ 9387 if (request_lun != NULL) { 9388 mtx_lock(&lun->lun_lock); 9389 ctl_clr_ua(lun, initidx, CTL_UA_RES_RELEASE); 9390 mtx_unlock(&lun->lun_lock); 9391 } 9392 } 9393 mtx_unlock(&softc->ctl_lock); 9394 9395 /* 9396 * It's quite possible that we've returned fewer LUNs than we allocated 9397 * space for. Trim it. 9398 */ 9399 lun_datalen = sizeof(*lun_data) + 9400 (num_filled * sizeof(struct scsi_report_luns_lundata)); 9401 9402 if (lun_datalen < alloc_len) { 9403 ctsio->residual = alloc_len - lun_datalen; 9404 ctsio->kern_data_len = lun_datalen; 9405 ctsio->kern_total_len = lun_datalen; 9406 } else { 9407 ctsio->residual = 0; 9408 ctsio->kern_data_len = alloc_len; 9409 ctsio->kern_total_len = alloc_len; 9410 } 9411 ctsio->kern_data_resid = 0; 9412 ctsio->kern_rel_offset = 0; 9413 ctsio->kern_sg_entries = 0; 9414 9415 /* 9416 * We set this to the actual data length, regardless of how much 9417 * space we actually have to return results. If the user looks at 9418 * this value, he'll know whether or not he allocated enough space 9419 * and reissue the command if necessary. We don't support well 9420 * known logical units, so if the user asks for that, return none. 9421 */ 9422 scsi_ulto4b(lun_datalen - 8, lun_data->length); 9423 9424 /* 9425 * We can only return SCSI_STATUS_CHECK_COND when we can't satisfy 9426 * this request. 9427 */ 9428 ctl_set_success(ctsio); 9429 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9430 ctsio->be_move_done = ctl_config_move_done; 9431 ctl_datamove((union ctl_io *)ctsio); 9432 return (retval); 9433} 9434 9435int 9436ctl_request_sense(struct ctl_scsiio *ctsio) 9437{ 9438 struct scsi_request_sense *cdb; 9439 struct scsi_sense_data *sense_ptr; 9440 struct ctl_softc *ctl_softc; 9441 struct ctl_lun *lun; 9442 uint32_t initidx; 9443 int have_error; 9444 scsi_sense_data_type sense_format; 9445 ctl_ua_type ua_type; 9446 9447 cdb = (struct scsi_request_sense *)ctsio->cdb; 9448 9449 ctl_softc = control_softc; 9450 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9451 9452 CTL_DEBUG_PRINT(("ctl_request_sense\n")); 9453 9454 /* 9455 * Determine which sense format the user wants. 9456 */ 9457 if (cdb->byte2 & SRS_DESC) 9458 sense_format = SSD_TYPE_DESC; 9459 else 9460 sense_format = SSD_TYPE_FIXED; 9461 9462 ctsio->kern_data_ptr = malloc(sizeof(*sense_ptr), M_CTL, M_WAITOK); 9463 sense_ptr = (struct scsi_sense_data *)ctsio->kern_data_ptr; 9464 ctsio->kern_sg_entries = 0; 9465 9466 /* 9467 * struct scsi_sense_data, which is currently set to 256 bytes, is 9468 * larger than the largest allowed value for the length field in the 9469 * REQUEST SENSE CDB, which is 252 bytes as of SPC-4. 9470 */ 9471 ctsio->residual = 0; 9472 ctsio->kern_data_len = cdb->length; 9473 ctsio->kern_total_len = cdb->length; 9474 9475 ctsio->kern_data_resid = 0; 9476 ctsio->kern_rel_offset = 0; 9477 ctsio->kern_sg_entries = 0; 9478 9479 /* 9480 * If we don't have a LUN, we don't have any pending sense. 9481 */ 9482 if (lun == NULL) 9483 goto no_sense; 9484 9485 have_error = 0; 9486 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 9487 /* 9488 * Check for pending sense, and then for pending unit attentions. 9489 * Pending sense gets returned first, then pending unit attentions. 9490 */ 9491 mtx_lock(&lun->lun_lock); 9492#ifdef CTL_WITH_CA 9493 if (ctl_is_set(lun->have_ca, initidx)) { 9494 scsi_sense_data_type stored_format; 9495 9496 /* 9497 * Check to see which sense format was used for the stored 9498 * sense data. 9499 */ 9500 stored_format = scsi_sense_type(&lun->pending_sense[initidx]); 9501 9502 /* 9503 * If the user requested a different sense format than the 9504 * one we stored, then we need to convert it to the other 9505 * format. If we're going from descriptor to fixed format 9506 * sense data, we may lose things in translation, depending 9507 * on what options were used. 9508 * 9509 * If the stored format is SSD_TYPE_NONE (i.e. invalid), 9510 * for some reason we'll just copy it out as-is. 9511 */ 9512 if ((stored_format == SSD_TYPE_FIXED) 9513 && (sense_format == SSD_TYPE_DESC)) 9514 ctl_sense_to_desc((struct scsi_sense_data_fixed *) 9515 &lun->pending_sense[initidx], 9516 (struct scsi_sense_data_desc *)sense_ptr); 9517 else if ((stored_format == SSD_TYPE_DESC) 9518 && (sense_format == SSD_TYPE_FIXED)) 9519 ctl_sense_to_fixed((struct scsi_sense_data_desc *) 9520 &lun->pending_sense[initidx], 9521 (struct scsi_sense_data_fixed *)sense_ptr); 9522 else 9523 memcpy(sense_ptr, &lun->pending_sense[initidx], 9524 MIN(sizeof(*sense_ptr), 9525 sizeof(lun->pending_sense[initidx]))); 9526 9527 ctl_clear_mask(lun->have_ca, initidx); 9528 have_error = 1; 9529 } else 9530#endif 9531 { 9532 ua_type = ctl_build_ua(lun, initidx, sense_ptr, sense_format); 9533 if (ua_type != CTL_UA_NONE) 9534 have_error = 1; 9535 if (ua_type == CTL_UA_LUN_CHANGE) { 9536 mtx_unlock(&lun->lun_lock); 9537 mtx_lock(&ctl_softc->ctl_lock); 9538 ctl_clear_ua(ctl_softc, initidx, ua_type); 9539 mtx_unlock(&ctl_softc->ctl_lock); 9540 mtx_lock(&lun->lun_lock); 9541 } 9542 9543 } 9544 mtx_unlock(&lun->lun_lock); 9545 9546 /* 9547 * We already have a pending error, return it. 9548 */ 9549 if (have_error != 0) { 9550 /* 9551 * We report the SCSI status as OK, since the status of the 9552 * request sense command itself is OK. 9553 * We report 0 for the sense length, because we aren't doing 9554 * autosense in this case. We're reporting sense as 9555 * parameter data. 9556 */ 9557 ctl_set_success(ctsio); 9558 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9559 ctsio->be_move_done = ctl_config_move_done; 9560 ctl_datamove((union ctl_io *)ctsio); 9561 return (CTL_RETVAL_COMPLETE); 9562 } 9563 9564no_sense: 9565 9566 /* 9567 * No sense information to report, so we report that everything is 9568 * okay. 9569 */ 9570 ctl_set_sense_data(sense_ptr, 9571 lun, 9572 sense_format, 9573 /*current_error*/ 1, 9574 /*sense_key*/ SSD_KEY_NO_SENSE, 9575 /*asc*/ 0x00, 9576 /*ascq*/ 0x00, 9577 SSD_ELEM_NONE); 9578 9579 /* 9580 * We report 0 for the sense length, because we aren't doing 9581 * autosense in this case. We're reporting sense as parameter data. 9582 */ 9583 ctl_set_success(ctsio); 9584 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9585 ctsio->be_move_done = ctl_config_move_done; 9586 ctl_datamove((union ctl_io *)ctsio); 9587 return (CTL_RETVAL_COMPLETE); 9588} 9589 9590int 9591ctl_tur(struct ctl_scsiio *ctsio) 9592{ 9593 9594 CTL_DEBUG_PRINT(("ctl_tur\n")); 9595 9596 ctl_set_success(ctsio); 9597 ctl_done((union ctl_io *)ctsio); 9598 9599 return (CTL_RETVAL_COMPLETE); 9600} 9601 9602#ifdef notyet 9603static int 9604ctl_cmddt_inquiry(struct ctl_scsiio *ctsio) 9605{ 9606 9607} 9608#endif 9609 9610/* 9611 * SCSI VPD page 0x00, the Supported VPD Pages page. 9612 */ 9613static int 9614ctl_inquiry_evpd_supported(struct ctl_scsiio *ctsio, int alloc_len) 9615{ 9616 struct scsi_vpd_supported_pages *pages; 9617 int sup_page_size; 9618 struct ctl_lun *lun; 9619 int p; 9620 9621 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9622 9623 sup_page_size = sizeof(struct scsi_vpd_supported_pages) * 9624 SCSI_EVPD_NUM_SUPPORTED_PAGES; 9625 ctsio->kern_data_ptr = malloc(sup_page_size, M_CTL, M_WAITOK | M_ZERO); 9626 pages = (struct scsi_vpd_supported_pages *)ctsio->kern_data_ptr; 9627 ctsio->kern_sg_entries = 0; 9628 9629 if (sup_page_size < alloc_len) { 9630 ctsio->residual = alloc_len - sup_page_size; 9631 ctsio->kern_data_len = sup_page_size; 9632 ctsio->kern_total_len = sup_page_size; 9633 } else { 9634 ctsio->residual = 0; 9635 ctsio->kern_data_len = alloc_len; 9636 ctsio->kern_total_len = alloc_len; 9637 } 9638 ctsio->kern_data_resid = 0; 9639 ctsio->kern_rel_offset = 0; 9640 ctsio->kern_sg_entries = 0; 9641 9642 /* 9643 * The control device is always connected. The disk device, on the 9644 * other hand, may not be online all the time. Need to change this 9645 * to figure out whether the disk device is actually online or not. 9646 */ 9647 if (lun != NULL) 9648 pages->device = (SID_QUAL_LU_CONNECTED << 5) | 9649 lun->be_lun->lun_type; 9650 else 9651 pages->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9652 9653 p = 0; 9654 /* Supported VPD pages */ 9655 pages->page_list[p++] = SVPD_SUPPORTED_PAGES; 9656 /* Serial Number */ 9657 pages->page_list[p++] = SVPD_UNIT_SERIAL_NUMBER; 9658 /* Device Identification */ 9659 pages->page_list[p++] = SVPD_DEVICE_ID; 9660 /* Extended INQUIRY Data */ 9661 pages->page_list[p++] = SVPD_EXTENDED_INQUIRY_DATA; 9662 /* Mode Page Policy */ 9663 pages->page_list[p++] = SVPD_MODE_PAGE_POLICY; 9664 /* SCSI Ports */ 9665 pages->page_list[p++] = SVPD_SCSI_PORTS; 9666 /* Third-party Copy */ 9667 pages->page_list[p++] = SVPD_SCSI_TPC; 9668 if (lun != NULL && lun->be_lun->lun_type == T_DIRECT) { 9669 /* Block limits */ 9670 pages->page_list[p++] = SVPD_BLOCK_LIMITS; 9671 /* Block Device Characteristics */ 9672 pages->page_list[p++] = SVPD_BDC; 9673 /* Logical Block Provisioning */ 9674 pages->page_list[p++] = SVPD_LBP; 9675 } 9676 pages->length = p; 9677 9678 ctl_set_success(ctsio); 9679 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9680 ctsio->be_move_done = ctl_config_move_done; 9681 ctl_datamove((union ctl_io *)ctsio); 9682 return (CTL_RETVAL_COMPLETE); 9683} 9684 9685/* 9686 * SCSI VPD page 0x80, the Unit Serial Number page. 9687 */ 9688static int 9689ctl_inquiry_evpd_serial(struct ctl_scsiio *ctsio, int alloc_len) 9690{ 9691 struct scsi_vpd_unit_serial_number *sn_ptr; 9692 struct ctl_lun *lun; 9693 int data_len; 9694 9695 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9696 9697 data_len = 4 + CTL_SN_LEN; 9698 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9699 sn_ptr = (struct scsi_vpd_unit_serial_number *)ctsio->kern_data_ptr; 9700 if (data_len < alloc_len) { 9701 ctsio->residual = alloc_len - data_len; 9702 ctsio->kern_data_len = data_len; 9703 ctsio->kern_total_len = data_len; 9704 } else { 9705 ctsio->residual = 0; 9706 ctsio->kern_data_len = alloc_len; 9707 ctsio->kern_total_len = alloc_len; 9708 } 9709 ctsio->kern_data_resid = 0; 9710 ctsio->kern_rel_offset = 0; 9711 ctsio->kern_sg_entries = 0; 9712 9713 /* 9714 * The control device is always connected. The disk device, on the 9715 * other hand, may not be online all the time. Need to change this 9716 * to figure out whether the disk device is actually online or not. 9717 */ 9718 if (lun != NULL) 9719 sn_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9720 lun->be_lun->lun_type; 9721 else 9722 sn_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9723 9724 sn_ptr->page_code = SVPD_UNIT_SERIAL_NUMBER; 9725 sn_ptr->length = CTL_SN_LEN; 9726 /* 9727 * If we don't have a LUN, we just leave the serial number as 9728 * all spaces. 9729 */ 9730 if (lun != NULL) { 9731 strncpy((char *)sn_ptr->serial_num, 9732 (char *)lun->be_lun->serial_num, CTL_SN_LEN); 9733 } else 9734 memset(sn_ptr->serial_num, 0x20, CTL_SN_LEN); 9735 9736 ctl_set_success(ctsio); 9737 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9738 ctsio->be_move_done = ctl_config_move_done; 9739 ctl_datamove((union ctl_io *)ctsio); 9740 return (CTL_RETVAL_COMPLETE); 9741} 9742 9743 9744/* 9745 * SCSI VPD page 0x86, the Extended INQUIRY Data page. 9746 */ 9747static int 9748ctl_inquiry_evpd_eid(struct ctl_scsiio *ctsio, int alloc_len) 9749{ 9750 struct scsi_vpd_extended_inquiry_data *eid_ptr; 9751 struct ctl_lun *lun; 9752 int data_len; 9753 9754 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9755 9756 data_len = sizeof(struct scsi_vpd_extended_inquiry_data); 9757 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9758 eid_ptr = (struct scsi_vpd_extended_inquiry_data *)ctsio->kern_data_ptr; 9759 ctsio->kern_sg_entries = 0; 9760 9761 if (data_len < alloc_len) { 9762 ctsio->residual = alloc_len - data_len; 9763 ctsio->kern_data_len = data_len; 9764 ctsio->kern_total_len = data_len; 9765 } else { 9766 ctsio->residual = 0; 9767 ctsio->kern_data_len = alloc_len; 9768 ctsio->kern_total_len = alloc_len; 9769 } 9770 ctsio->kern_data_resid = 0; 9771 ctsio->kern_rel_offset = 0; 9772 ctsio->kern_sg_entries = 0; 9773 9774 /* 9775 * The control device is always connected. The disk device, on the 9776 * other hand, may not be online all the time. 9777 */ 9778 if (lun != NULL) 9779 eid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9780 lun->be_lun->lun_type; 9781 else 9782 eid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9783 eid_ptr->page_code = SVPD_EXTENDED_INQUIRY_DATA; 9784 scsi_ulto2b(data_len - 4, eid_ptr->page_length); 9785 /* 9786 * We support head of queue, ordered and simple tags. 9787 */ 9788 eid_ptr->flags2 = SVPD_EID_HEADSUP | SVPD_EID_ORDSUP | SVPD_EID_SIMPSUP; 9789 /* 9790 * Volatile cache supported. 9791 */ 9792 eid_ptr->flags3 = SVPD_EID_V_SUP; 9793 9794 /* 9795 * This means that we clear the REPORTED LUNS DATA HAS CHANGED unit 9796 * attention for a particular IT nexus on all LUNs once we report 9797 * it to that nexus once. This bit is required as of SPC-4. 9798 */ 9799 eid_ptr->flags4 = SVPD_EID_LUICLT; 9800 9801 /* 9802 * XXX KDM in order to correctly answer this, we would need 9803 * information from the SIM to determine how much sense data it 9804 * can send. So this would really be a path inquiry field, most 9805 * likely. This can be set to a maximum of 252 according to SPC-4, 9806 * but the hardware may or may not be able to support that much. 9807 * 0 just means that the maximum sense data length is not reported. 9808 */ 9809 eid_ptr->max_sense_length = 0; 9810 9811 ctl_set_success(ctsio); 9812 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9813 ctsio->be_move_done = ctl_config_move_done; 9814 ctl_datamove((union ctl_io *)ctsio); 9815 return (CTL_RETVAL_COMPLETE); 9816} 9817 9818static int 9819ctl_inquiry_evpd_mpp(struct ctl_scsiio *ctsio, int alloc_len) 9820{ 9821 struct scsi_vpd_mode_page_policy *mpp_ptr; 9822 struct ctl_lun *lun; 9823 int data_len; 9824 9825 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9826 9827 data_len = sizeof(struct scsi_vpd_mode_page_policy) + 9828 sizeof(struct scsi_vpd_mode_page_policy_descr); 9829 9830 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9831 mpp_ptr = (struct scsi_vpd_mode_page_policy *)ctsio->kern_data_ptr; 9832 ctsio->kern_sg_entries = 0; 9833 9834 if (data_len < alloc_len) { 9835 ctsio->residual = alloc_len - data_len; 9836 ctsio->kern_data_len = data_len; 9837 ctsio->kern_total_len = data_len; 9838 } else { 9839 ctsio->residual = 0; 9840 ctsio->kern_data_len = alloc_len; 9841 ctsio->kern_total_len = alloc_len; 9842 } 9843 ctsio->kern_data_resid = 0; 9844 ctsio->kern_rel_offset = 0; 9845 ctsio->kern_sg_entries = 0; 9846 9847 /* 9848 * The control device is always connected. The disk device, on the 9849 * other hand, may not be online all the time. 9850 */ 9851 if (lun != NULL) 9852 mpp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9853 lun->be_lun->lun_type; 9854 else 9855 mpp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9856 mpp_ptr->page_code = SVPD_MODE_PAGE_POLICY; 9857 scsi_ulto2b(data_len - 4, mpp_ptr->page_length); 9858 mpp_ptr->descr[0].page_code = 0x3f; 9859 mpp_ptr->descr[0].subpage_code = 0xff; 9860 mpp_ptr->descr[0].policy = SVPD_MPP_SHARED; 9861 9862 ctl_set_success(ctsio); 9863 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9864 ctsio->be_move_done = ctl_config_move_done; 9865 ctl_datamove((union ctl_io *)ctsio); 9866 return (CTL_RETVAL_COMPLETE); 9867} 9868 9869/* 9870 * SCSI VPD page 0x83, the Device Identification page. 9871 */ 9872static int 9873ctl_inquiry_evpd_devid(struct ctl_scsiio *ctsio, int alloc_len) 9874{ 9875 struct scsi_vpd_device_id *devid_ptr; 9876 struct scsi_vpd_id_descriptor *desc; 9877 struct ctl_softc *softc; 9878 struct ctl_lun *lun; 9879 struct ctl_port *port; 9880 int data_len; 9881 uint8_t proto; 9882 9883 softc = control_softc; 9884 9885 port = softc->ctl_ports[ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]; 9886 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 9887 9888 data_len = sizeof(struct scsi_vpd_device_id) + 9889 sizeof(struct scsi_vpd_id_descriptor) + 9890 sizeof(struct scsi_vpd_id_rel_trgt_port_id) + 9891 sizeof(struct scsi_vpd_id_descriptor) + 9892 sizeof(struct scsi_vpd_id_trgt_port_grp_id); 9893 if (lun && lun->lun_devid) 9894 data_len += lun->lun_devid->len; 9895 if (port->port_devid) 9896 data_len += port->port_devid->len; 9897 if (port->target_devid) 9898 data_len += port->target_devid->len; 9899 9900 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 9901 devid_ptr = (struct scsi_vpd_device_id *)ctsio->kern_data_ptr; 9902 ctsio->kern_sg_entries = 0; 9903 9904 if (data_len < alloc_len) { 9905 ctsio->residual = alloc_len - data_len; 9906 ctsio->kern_data_len = data_len; 9907 ctsio->kern_total_len = data_len; 9908 } else { 9909 ctsio->residual = 0; 9910 ctsio->kern_data_len = alloc_len; 9911 ctsio->kern_total_len = alloc_len; 9912 } 9913 ctsio->kern_data_resid = 0; 9914 ctsio->kern_rel_offset = 0; 9915 ctsio->kern_sg_entries = 0; 9916 9917 /* 9918 * The control device is always connected. The disk device, on the 9919 * other hand, may not be online all the time. 9920 */ 9921 if (lun != NULL) 9922 devid_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 9923 lun->be_lun->lun_type; 9924 else 9925 devid_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 9926 devid_ptr->page_code = SVPD_DEVICE_ID; 9927 scsi_ulto2b(data_len - 4, devid_ptr->length); 9928 9929 if (port->port_type == CTL_PORT_FC) 9930 proto = SCSI_PROTO_FC << 4; 9931 else if (port->port_type == CTL_PORT_ISCSI) 9932 proto = SCSI_PROTO_ISCSI << 4; 9933 else 9934 proto = SCSI_PROTO_SPI << 4; 9935 desc = (struct scsi_vpd_id_descriptor *)devid_ptr->desc_list; 9936 9937 /* 9938 * We're using a LUN association here. i.e., this device ID is a 9939 * per-LUN identifier. 9940 */ 9941 if (lun && lun->lun_devid) { 9942 memcpy(desc, lun->lun_devid->data, lun->lun_devid->len); 9943 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9944 lun->lun_devid->len); 9945 } 9946 9947 /* 9948 * This is for the WWPN which is a port association. 9949 */ 9950 if (port->port_devid) { 9951 memcpy(desc, port->port_devid->data, port->port_devid->len); 9952 desc = (struct scsi_vpd_id_descriptor *)((uint8_t *)desc + 9953 port->port_devid->len); 9954 } 9955 9956 /* 9957 * This is for the Relative Target Port(type 4h) identifier 9958 */ 9959 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9960 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9961 SVPD_ID_TYPE_RELTARG; 9962 desc->length = 4; 9963 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port, &desc->identifier[2]); 9964 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9965 sizeof(struct scsi_vpd_id_rel_trgt_port_id)); 9966 9967 /* 9968 * This is for the Target Port Group(type 5h) identifier 9969 */ 9970 desc->proto_codeset = proto | SVPD_ID_CODESET_BINARY; 9971 desc->id_type = SVPD_ID_PIV | SVPD_ID_ASSOC_PORT | 9972 SVPD_ID_TYPE_TPORTGRP; 9973 desc->length = 4; 9974 scsi_ulto2b(ctsio->io_hdr.nexus.targ_port / CTL_MAX_PORTS + 1, 9975 &desc->identifier[2]); 9976 desc = (struct scsi_vpd_id_descriptor *)(&desc->identifier[0] + 9977 sizeof(struct scsi_vpd_id_trgt_port_grp_id)); 9978 9979 /* 9980 * This is for the Target identifier 9981 */ 9982 if (port->target_devid) { 9983 memcpy(desc, port->target_devid->data, port->target_devid->len); 9984 } 9985 9986 ctl_set_success(ctsio); 9987 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 9988 ctsio->be_move_done = ctl_config_move_done; 9989 ctl_datamove((union ctl_io *)ctsio); 9990 return (CTL_RETVAL_COMPLETE); 9991} 9992 9993static int 9994ctl_inquiry_evpd_scsi_ports(struct ctl_scsiio *ctsio, int alloc_len) 9995{ 9996 struct ctl_softc *softc = control_softc; 9997 struct scsi_vpd_scsi_ports *sp; 9998 struct scsi_vpd_port_designation *pd; 9999 struct scsi_vpd_port_designation_cont *pdc; 10000 struct ctl_lun *lun; 10001 struct ctl_port *port; 10002 int data_len, num_target_ports, iid_len, id_len, g, pg, p; 10003 int num_target_port_groups; 10004 10005 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10006 10007 if (softc->is_single) 10008 num_target_port_groups = 1; 10009 else 10010 num_target_port_groups = NUM_TARGET_PORT_GROUPS; 10011 num_target_ports = 0; 10012 iid_len = 0; 10013 id_len = 0; 10014 mtx_lock(&softc->ctl_lock); 10015 STAILQ_FOREACH(port, &softc->port_list, links) { 10016 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10017 continue; 10018 if (lun != NULL && 10019 ctl_map_lun_back(softc, port->targ_port, lun->lun) >= 10020 CTL_MAX_LUNS) 10021 continue; 10022 num_target_ports++; 10023 if (port->init_devid) 10024 iid_len += port->init_devid->len; 10025 if (port->port_devid) 10026 id_len += port->port_devid->len; 10027 } 10028 mtx_unlock(&softc->ctl_lock); 10029 10030 data_len = sizeof(struct scsi_vpd_scsi_ports) + num_target_port_groups * 10031 num_target_ports * (sizeof(struct scsi_vpd_port_designation) + 10032 sizeof(struct scsi_vpd_port_designation_cont)) + iid_len + id_len; 10033 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10034 sp = (struct scsi_vpd_scsi_ports *)ctsio->kern_data_ptr; 10035 ctsio->kern_sg_entries = 0; 10036 10037 if (data_len < alloc_len) { 10038 ctsio->residual = alloc_len - data_len; 10039 ctsio->kern_data_len = data_len; 10040 ctsio->kern_total_len = data_len; 10041 } else { 10042 ctsio->residual = 0; 10043 ctsio->kern_data_len = alloc_len; 10044 ctsio->kern_total_len = alloc_len; 10045 } 10046 ctsio->kern_data_resid = 0; 10047 ctsio->kern_rel_offset = 0; 10048 ctsio->kern_sg_entries = 0; 10049 10050 /* 10051 * The control device is always connected. The disk device, on the 10052 * other hand, may not be online all the time. Need to change this 10053 * to figure out whether the disk device is actually online or not. 10054 */ 10055 if (lun != NULL) 10056 sp->device = (SID_QUAL_LU_CONNECTED << 5) | 10057 lun->be_lun->lun_type; 10058 else 10059 sp->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10060 10061 sp->page_code = SVPD_SCSI_PORTS; 10062 scsi_ulto2b(data_len - sizeof(struct scsi_vpd_scsi_ports), 10063 sp->page_length); 10064 pd = &sp->design[0]; 10065 10066 mtx_lock(&softc->ctl_lock); 10067 pg = softc->port_offset / CTL_MAX_PORTS; 10068 for (g = 0; g < num_target_port_groups; g++) { 10069 STAILQ_FOREACH(port, &softc->port_list, links) { 10070 if ((port->status & CTL_PORT_STATUS_ONLINE) == 0) 10071 continue; 10072 if (lun != NULL && 10073 ctl_map_lun_back(softc, port->targ_port, lun->lun) 10074 >= CTL_MAX_LUNS) 10075 continue; 10076 p = port->targ_port % CTL_MAX_PORTS + g * CTL_MAX_PORTS; 10077 scsi_ulto2b(p, pd->relative_port_id); 10078 if (port->init_devid && g == pg) { 10079 iid_len = port->init_devid->len; 10080 memcpy(pd->initiator_transportid, 10081 port->init_devid->data, port->init_devid->len); 10082 } else 10083 iid_len = 0; 10084 scsi_ulto2b(iid_len, pd->initiator_transportid_length); 10085 pdc = (struct scsi_vpd_port_designation_cont *) 10086 (&pd->initiator_transportid[iid_len]); 10087 if (port->port_devid && g == pg) { 10088 id_len = port->port_devid->len; 10089 memcpy(pdc->target_port_descriptors, 10090 port->port_devid->data, port->port_devid->len); 10091 } else 10092 id_len = 0; 10093 scsi_ulto2b(id_len, pdc->target_port_descriptors_length); 10094 pd = (struct scsi_vpd_port_designation *) 10095 ((uint8_t *)pdc->target_port_descriptors + id_len); 10096 } 10097 } 10098 mtx_unlock(&softc->ctl_lock); 10099 10100 ctl_set_success(ctsio); 10101 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10102 ctsio->be_move_done = ctl_config_move_done; 10103 ctl_datamove((union ctl_io *)ctsio); 10104 return (CTL_RETVAL_COMPLETE); 10105} 10106 10107static int 10108ctl_inquiry_evpd_block_limits(struct ctl_scsiio *ctsio, int alloc_len) 10109{ 10110 struct scsi_vpd_block_limits *bl_ptr; 10111 struct ctl_lun *lun; 10112 int bs; 10113 10114 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10115 10116 ctsio->kern_data_ptr = malloc(sizeof(*bl_ptr), M_CTL, M_WAITOK | M_ZERO); 10117 bl_ptr = (struct scsi_vpd_block_limits *)ctsio->kern_data_ptr; 10118 ctsio->kern_sg_entries = 0; 10119 10120 if (sizeof(*bl_ptr) < alloc_len) { 10121 ctsio->residual = alloc_len - sizeof(*bl_ptr); 10122 ctsio->kern_data_len = sizeof(*bl_ptr); 10123 ctsio->kern_total_len = sizeof(*bl_ptr); 10124 } else { 10125 ctsio->residual = 0; 10126 ctsio->kern_data_len = alloc_len; 10127 ctsio->kern_total_len = alloc_len; 10128 } 10129 ctsio->kern_data_resid = 0; 10130 ctsio->kern_rel_offset = 0; 10131 ctsio->kern_sg_entries = 0; 10132 10133 /* 10134 * The control device is always connected. The disk device, on the 10135 * other hand, may not be online all the time. Need to change this 10136 * to figure out whether the disk device is actually online or not. 10137 */ 10138 if (lun != NULL) 10139 bl_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10140 lun->be_lun->lun_type; 10141 else 10142 bl_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10143 10144 bl_ptr->page_code = SVPD_BLOCK_LIMITS; 10145 scsi_ulto2b(sizeof(*bl_ptr) - 4, bl_ptr->page_length); 10146 bl_ptr->max_cmp_write_len = 0xff; 10147 scsi_ulto4b(0xffffffff, bl_ptr->max_txfer_len); 10148 if (lun != NULL) { 10149 bs = lun->be_lun->blocksize; 10150 scsi_ulto4b(lun->be_lun->opttxferlen, bl_ptr->opt_txfer_len); 10151 if (lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10152 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_lba_cnt); 10153 scsi_ulto4b(0xffffffff, bl_ptr->max_unmap_blk_cnt); 10154 if (lun->be_lun->ublockexp != 0) { 10155 scsi_ulto4b((1 << lun->be_lun->ublockexp), 10156 bl_ptr->opt_unmap_grain); 10157 scsi_ulto4b(0x80000000 | lun->be_lun->ublockoff, 10158 bl_ptr->unmap_grain_align); 10159 } 10160 } 10161 scsi_ulto4b(lun->be_lun->atomicblock, 10162 bl_ptr->max_atomic_transfer_length); 10163 scsi_ulto4b(0, bl_ptr->atomic_alignment); 10164 scsi_ulto4b(0, bl_ptr->atomic_transfer_length_granularity); 10165 } 10166 scsi_u64to8b(UINT64_MAX, bl_ptr->max_write_same_length); 10167 10168 ctl_set_success(ctsio); 10169 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10170 ctsio->be_move_done = ctl_config_move_done; 10171 ctl_datamove((union ctl_io *)ctsio); 10172 return (CTL_RETVAL_COMPLETE); 10173} 10174 10175static int 10176ctl_inquiry_evpd_bdc(struct ctl_scsiio *ctsio, int alloc_len) 10177{ 10178 struct scsi_vpd_block_device_characteristics *bdc_ptr; 10179 struct ctl_lun *lun; 10180 const char *value; 10181 u_int i; 10182 10183 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10184 10185 ctsio->kern_data_ptr = malloc(sizeof(*bdc_ptr), M_CTL, M_WAITOK | M_ZERO); 10186 bdc_ptr = (struct scsi_vpd_block_device_characteristics *)ctsio->kern_data_ptr; 10187 ctsio->kern_sg_entries = 0; 10188 10189 if (sizeof(*bdc_ptr) < alloc_len) { 10190 ctsio->residual = alloc_len - sizeof(*bdc_ptr); 10191 ctsio->kern_data_len = sizeof(*bdc_ptr); 10192 ctsio->kern_total_len = sizeof(*bdc_ptr); 10193 } else { 10194 ctsio->residual = 0; 10195 ctsio->kern_data_len = alloc_len; 10196 ctsio->kern_total_len = alloc_len; 10197 } 10198 ctsio->kern_data_resid = 0; 10199 ctsio->kern_rel_offset = 0; 10200 ctsio->kern_sg_entries = 0; 10201 10202 /* 10203 * The control device is always connected. The disk device, on the 10204 * other hand, may not be online all the time. Need to change this 10205 * to figure out whether the disk device is actually online or not. 10206 */ 10207 if (lun != NULL) 10208 bdc_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10209 lun->be_lun->lun_type; 10210 else 10211 bdc_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10212 bdc_ptr->page_code = SVPD_BDC; 10213 scsi_ulto2b(sizeof(*bdc_ptr) - 4, bdc_ptr->page_length); 10214 if (lun != NULL && 10215 (value = ctl_get_opt(&lun->be_lun->options, "rpm")) != NULL) 10216 i = strtol(value, NULL, 0); 10217 else 10218 i = CTL_DEFAULT_ROTATION_RATE; 10219 scsi_ulto2b(i, bdc_ptr->medium_rotation_rate); 10220 if (lun != NULL && 10221 (value = ctl_get_opt(&lun->be_lun->options, "formfactor")) != NULL) 10222 i = strtol(value, NULL, 0); 10223 else 10224 i = 0; 10225 bdc_ptr->wab_wac_ff = (i & 0x0f); 10226 bdc_ptr->flags = SVPD_FUAB | SVPD_VBULS; 10227 10228 ctl_set_success(ctsio); 10229 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10230 ctsio->be_move_done = ctl_config_move_done; 10231 ctl_datamove((union ctl_io *)ctsio); 10232 return (CTL_RETVAL_COMPLETE); 10233} 10234 10235static int 10236ctl_inquiry_evpd_lbp(struct ctl_scsiio *ctsio, int alloc_len) 10237{ 10238 struct scsi_vpd_logical_block_prov *lbp_ptr; 10239 struct ctl_lun *lun; 10240 10241 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10242 10243 ctsio->kern_data_ptr = malloc(sizeof(*lbp_ptr), M_CTL, M_WAITOK | M_ZERO); 10244 lbp_ptr = (struct scsi_vpd_logical_block_prov *)ctsio->kern_data_ptr; 10245 ctsio->kern_sg_entries = 0; 10246 10247 if (sizeof(*lbp_ptr) < alloc_len) { 10248 ctsio->residual = alloc_len - sizeof(*lbp_ptr); 10249 ctsio->kern_data_len = sizeof(*lbp_ptr); 10250 ctsio->kern_total_len = sizeof(*lbp_ptr); 10251 } else { 10252 ctsio->residual = 0; 10253 ctsio->kern_data_len = alloc_len; 10254 ctsio->kern_total_len = alloc_len; 10255 } 10256 ctsio->kern_data_resid = 0; 10257 ctsio->kern_rel_offset = 0; 10258 ctsio->kern_sg_entries = 0; 10259 10260 /* 10261 * The control device is always connected. The disk device, on the 10262 * other hand, may not be online all the time. Need to change this 10263 * to figure out whether the disk device is actually online or not. 10264 */ 10265 if (lun != NULL) 10266 lbp_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10267 lun->be_lun->lun_type; 10268 else 10269 lbp_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10270 10271 lbp_ptr->page_code = SVPD_LBP; 10272 scsi_ulto2b(sizeof(*lbp_ptr) - 4, lbp_ptr->page_length); 10273 lbp_ptr->threshold_exponent = CTL_LBP_EXPONENT; 10274 if (lun != NULL && lun->be_lun->flags & CTL_LUN_FLAG_UNMAP) { 10275 lbp_ptr->flags = SVPD_LBP_UNMAP | SVPD_LBP_WS16 | 10276 SVPD_LBP_WS10 | SVPD_LBP_RZ | SVPD_LBP_ANC_SUP; 10277 lbp_ptr->prov_type = SVPD_LBP_THIN; 10278 } 10279 10280 ctl_set_success(ctsio); 10281 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10282 ctsio->be_move_done = ctl_config_move_done; 10283 ctl_datamove((union ctl_io *)ctsio); 10284 return (CTL_RETVAL_COMPLETE); 10285} 10286 10287/* 10288 * INQUIRY with the EVPD bit set. 10289 */ 10290static int 10291ctl_inquiry_evpd(struct ctl_scsiio *ctsio) 10292{ 10293 struct ctl_lun *lun; 10294 struct scsi_inquiry *cdb; 10295 int alloc_len, retval; 10296 10297 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10298 cdb = (struct scsi_inquiry *)ctsio->cdb; 10299 alloc_len = scsi_2btoul(cdb->length); 10300 10301 switch (cdb->page_code) { 10302 case SVPD_SUPPORTED_PAGES: 10303 retval = ctl_inquiry_evpd_supported(ctsio, alloc_len); 10304 break; 10305 case SVPD_UNIT_SERIAL_NUMBER: 10306 retval = ctl_inquiry_evpd_serial(ctsio, alloc_len); 10307 break; 10308 case SVPD_DEVICE_ID: 10309 retval = ctl_inquiry_evpd_devid(ctsio, alloc_len); 10310 break; 10311 case SVPD_EXTENDED_INQUIRY_DATA: 10312 retval = ctl_inquiry_evpd_eid(ctsio, alloc_len); 10313 break; 10314 case SVPD_MODE_PAGE_POLICY: 10315 retval = ctl_inquiry_evpd_mpp(ctsio, alloc_len); 10316 break; 10317 case SVPD_SCSI_PORTS: 10318 retval = ctl_inquiry_evpd_scsi_ports(ctsio, alloc_len); 10319 break; 10320 case SVPD_SCSI_TPC: 10321 retval = ctl_inquiry_evpd_tpc(ctsio, alloc_len); 10322 break; 10323 case SVPD_BLOCK_LIMITS: 10324 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10325 goto err; 10326 retval = ctl_inquiry_evpd_block_limits(ctsio, alloc_len); 10327 break; 10328 case SVPD_BDC: 10329 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10330 goto err; 10331 retval = ctl_inquiry_evpd_bdc(ctsio, alloc_len); 10332 break; 10333 case SVPD_LBP: 10334 if (lun == NULL || lun->be_lun->lun_type != T_DIRECT) 10335 goto err; 10336 retval = ctl_inquiry_evpd_lbp(ctsio, alloc_len); 10337 break; 10338 default: 10339err: 10340 ctl_set_invalid_field(ctsio, 10341 /*sks_valid*/ 1, 10342 /*command*/ 1, 10343 /*field*/ 2, 10344 /*bit_valid*/ 0, 10345 /*bit*/ 0); 10346 ctl_done((union ctl_io *)ctsio); 10347 retval = CTL_RETVAL_COMPLETE; 10348 break; 10349 } 10350 10351 return (retval); 10352} 10353 10354/* 10355 * Standard INQUIRY data. 10356 */ 10357static int 10358ctl_inquiry_std(struct ctl_scsiio *ctsio) 10359{ 10360 struct scsi_inquiry_data *inq_ptr; 10361 struct scsi_inquiry *cdb; 10362 struct ctl_softc *softc; 10363 struct ctl_lun *lun; 10364 char *val; 10365 uint32_t alloc_len, data_len; 10366 ctl_port_type port_type; 10367 10368 softc = control_softc; 10369 10370 /* 10371 * Figure out whether we're talking to a Fibre Channel port or not. 10372 * We treat the ioctl front end, and any SCSI adapters, as packetized 10373 * SCSI front ends. 10374 */ 10375 port_type = softc->ctl_ports[ 10376 ctl_port_idx(ctsio->io_hdr.nexus.targ_port)]->port_type; 10377 if (port_type == CTL_PORT_IOCTL || port_type == CTL_PORT_INTERNAL) 10378 port_type = CTL_PORT_SCSI; 10379 10380 lun = ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 10381 cdb = (struct scsi_inquiry *)ctsio->cdb; 10382 alloc_len = scsi_2btoul(cdb->length); 10383 10384 /* 10385 * We malloc the full inquiry data size here and fill it 10386 * in. If the user only asks for less, we'll give him 10387 * that much. 10388 */ 10389 data_len = offsetof(struct scsi_inquiry_data, vendor_specific1); 10390 ctsio->kern_data_ptr = malloc(data_len, M_CTL, M_WAITOK | M_ZERO); 10391 inq_ptr = (struct scsi_inquiry_data *)ctsio->kern_data_ptr; 10392 ctsio->kern_sg_entries = 0; 10393 ctsio->kern_data_resid = 0; 10394 ctsio->kern_rel_offset = 0; 10395 10396 if (data_len < alloc_len) { 10397 ctsio->residual = alloc_len - data_len; 10398 ctsio->kern_data_len = data_len; 10399 ctsio->kern_total_len = data_len; 10400 } else { 10401 ctsio->residual = 0; 10402 ctsio->kern_data_len = alloc_len; 10403 ctsio->kern_total_len = alloc_len; 10404 } 10405 10406 /* 10407 * If we have a LUN configured, report it as connected. Otherwise, 10408 * report that it is offline or no device is supported, depending 10409 * on the value of inquiry_pq_no_lun. 10410 * 10411 * According to the spec (SPC-4 r34), the peripheral qualifier 10412 * SID_QUAL_LU_OFFLINE (001b) is used in the following scenario: 10413 * 10414 * "A peripheral device having the specified peripheral device type 10415 * is not connected to this logical unit. However, the device 10416 * server is capable of supporting the specified peripheral device 10417 * type on this logical unit." 10418 * 10419 * According to the same spec, the peripheral qualifier 10420 * SID_QUAL_BAD_LU (011b) is used in this scenario: 10421 * 10422 * "The device server is not capable of supporting a peripheral 10423 * device on this logical unit. For this peripheral qualifier the 10424 * peripheral device type shall be set to 1Fh. All other peripheral 10425 * device type values are reserved for this peripheral qualifier." 10426 * 10427 * Given the text, it would seem that we probably want to report that 10428 * the LUN is offline here. There is no LUN connected, but we can 10429 * support a LUN at the given LUN number. 10430 * 10431 * In the real world, though, it sounds like things are a little 10432 * different: 10433 * 10434 * - Linux, when presented with a LUN with the offline peripheral 10435 * qualifier, will create an sg driver instance for it. So when 10436 * you attach it to CTL, you wind up with a ton of sg driver 10437 * instances. (One for every LUN that Linux bothered to probe.) 10438 * Linux does this despite the fact that it issues a REPORT LUNs 10439 * to LUN 0 to get the inventory of supported LUNs. 10440 * 10441 * - There is other anecdotal evidence (from Emulex folks) about 10442 * arrays that use the offline peripheral qualifier for LUNs that 10443 * are on the "passive" path in an active/passive array. 10444 * 10445 * So the solution is provide a hopefully reasonable default 10446 * (return bad/no LUN) and allow the user to change the behavior 10447 * with a tunable/sysctl variable. 10448 */ 10449 if (lun != NULL) 10450 inq_ptr->device = (SID_QUAL_LU_CONNECTED << 5) | 10451 lun->be_lun->lun_type; 10452 else if (softc->inquiry_pq_no_lun == 0) 10453 inq_ptr->device = (SID_QUAL_LU_OFFLINE << 5) | T_DIRECT; 10454 else 10455 inq_ptr->device = (SID_QUAL_BAD_LU << 5) | T_NODEVICE; 10456 10457 /* RMB in byte 2 is 0 */ 10458 inq_ptr->version = SCSI_REV_SPC4; 10459 10460 /* 10461 * According to SAM-3, even if a device only supports a single 10462 * level of LUN addressing, it should still set the HISUP bit: 10463 * 10464 * 4.9.1 Logical unit numbers overview 10465 * 10466 * All logical unit number formats described in this standard are 10467 * hierarchical in structure even when only a single level in that 10468 * hierarchy is used. The HISUP bit shall be set to one in the 10469 * standard INQUIRY data (see SPC-2) when any logical unit number 10470 * format described in this standard is used. Non-hierarchical 10471 * formats are outside the scope of this standard. 10472 * 10473 * Therefore we set the HiSup bit here. 10474 * 10475 * The reponse format is 2, per SPC-3. 10476 */ 10477 inq_ptr->response_format = SID_HiSup | 2; 10478 10479 inq_ptr->additional_length = data_len - 10480 (offsetof(struct scsi_inquiry_data, additional_length) + 1); 10481 CTL_DEBUG_PRINT(("additional_length = %d\n", 10482 inq_ptr->additional_length)); 10483 10484 inq_ptr->spc3_flags = SPC3_SID_3PC | SPC3_SID_TPGS_IMPLICIT; 10485 /* 16 bit addressing */ 10486 if (port_type == CTL_PORT_SCSI) 10487 inq_ptr->spc2_flags = SPC2_SID_ADDR16; 10488 /* XXX set the SID_MultiP bit here if we're actually going to 10489 respond on multiple ports */ 10490 inq_ptr->spc2_flags |= SPC2_SID_MultiP; 10491 10492 /* 16 bit data bus, synchronous transfers */ 10493 if (port_type == CTL_PORT_SCSI) 10494 inq_ptr->flags = SID_WBus16 | SID_Sync; 10495 /* 10496 * XXX KDM do we want to support tagged queueing on the control 10497 * device at all? 10498 */ 10499 if ((lun == NULL) 10500 || (lun->be_lun->lun_type != T_PROCESSOR)) 10501 inq_ptr->flags |= SID_CmdQue; 10502 /* 10503 * Per SPC-3, unused bytes in ASCII strings are filled with spaces. 10504 * We have 8 bytes for the vendor name, and 16 bytes for the device 10505 * name and 4 bytes for the revision. 10506 */ 10507 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10508 "vendor")) == NULL) { 10509 strncpy(inq_ptr->vendor, CTL_VENDOR, sizeof(inq_ptr->vendor)); 10510 } else { 10511 memset(inq_ptr->vendor, ' ', sizeof(inq_ptr->vendor)); 10512 strncpy(inq_ptr->vendor, val, 10513 min(sizeof(inq_ptr->vendor), strlen(val))); 10514 } 10515 if (lun == NULL) { 10516 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10517 sizeof(inq_ptr->product)); 10518 } else if ((val = ctl_get_opt(&lun->be_lun->options, "product")) == NULL) { 10519 switch (lun->be_lun->lun_type) { 10520 case T_DIRECT: 10521 strncpy(inq_ptr->product, CTL_DIRECT_PRODUCT, 10522 sizeof(inq_ptr->product)); 10523 break; 10524 case T_PROCESSOR: 10525 strncpy(inq_ptr->product, CTL_PROCESSOR_PRODUCT, 10526 sizeof(inq_ptr->product)); 10527 break; 10528 default: 10529 strncpy(inq_ptr->product, CTL_UNKNOWN_PRODUCT, 10530 sizeof(inq_ptr->product)); 10531 break; 10532 } 10533 } else { 10534 memset(inq_ptr->product, ' ', sizeof(inq_ptr->product)); 10535 strncpy(inq_ptr->product, val, 10536 min(sizeof(inq_ptr->product), strlen(val))); 10537 } 10538 10539 /* 10540 * XXX make this a macro somewhere so it automatically gets 10541 * incremented when we make changes. 10542 */ 10543 if (lun == NULL || (val = ctl_get_opt(&lun->be_lun->options, 10544 "revision")) == NULL) { 10545 strncpy(inq_ptr->revision, "0001", sizeof(inq_ptr->revision)); 10546 } else { 10547 memset(inq_ptr->revision, ' ', sizeof(inq_ptr->revision)); 10548 strncpy(inq_ptr->revision, val, 10549 min(sizeof(inq_ptr->revision), strlen(val))); 10550 } 10551 10552 /* 10553 * For parallel SCSI, we support double transition and single 10554 * transition clocking. We also support QAS (Quick Arbitration 10555 * and Selection) and Information Unit transfers on both the 10556 * control and array devices. 10557 */ 10558 if (port_type == CTL_PORT_SCSI) 10559 inq_ptr->spi3data = SID_SPI_CLOCK_DT_ST | SID_SPI_QAS | 10560 SID_SPI_IUS; 10561 10562 /* SAM-5 (no version claimed) */ 10563 scsi_ulto2b(0x00A0, inq_ptr->version1); 10564 /* SPC-4 (no version claimed) */ 10565 scsi_ulto2b(0x0460, inq_ptr->version2); 10566 if (port_type == CTL_PORT_FC) { 10567 /* FCP-2 ANSI INCITS.350:2003 */ 10568 scsi_ulto2b(0x0917, inq_ptr->version3); 10569 } else if (port_type == CTL_PORT_SCSI) { 10570 /* SPI-4 ANSI INCITS.362:200x */ 10571 scsi_ulto2b(0x0B56, inq_ptr->version3); 10572 } else if (port_type == CTL_PORT_ISCSI) { 10573 /* iSCSI (no version claimed) */ 10574 scsi_ulto2b(0x0960, inq_ptr->version3); 10575 } else if (port_type == CTL_PORT_SAS) { 10576 /* SAS (no version claimed) */ 10577 scsi_ulto2b(0x0BE0, inq_ptr->version3); 10578 } 10579 10580 if (lun == NULL) { 10581 /* SBC-4 (no version claimed) */ 10582 scsi_ulto2b(0x0600, inq_ptr->version4); 10583 } else { 10584 switch (lun->be_lun->lun_type) { 10585 case T_DIRECT: 10586 /* SBC-4 (no version claimed) */ 10587 scsi_ulto2b(0x0600, inq_ptr->version4); 10588 break; 10589 case T_PROCESSOR: 10590 default: 10591 break; 10592 } 10593 } 10594 10595 ctl_set_success(ctsio); 10596 ctsio->io_hdr.flags |= CTL_FLAG_ALLOCATED; 10597 ctsio->be_move_done = ctl_config_move_done; 10598 ctl_datamove((union ctl_io *)ctsio); 10599 return (CTL_RETVAL_COMPLETE); 10600} 10601 10602int 10603ctl_inquiry(struct ctl_scsiio *ctsio) 10604{ 10605 struct scsi_inquiry *cdb; 10606 int retval; 10607 10608 CTL_DEBUG_PRINT(("ctl_inquiry\n")); 10609 10610 cdb = (struct scsi_inquiry *)ctsio->cdb; 10611 if (cdb->byte2 & SI_EVPD) 10612 retval = ctl_inquiry_evpd(ctsio); 10613 else if (cdb->page_code == 0) 10614 retval = ctl_inquiry_std(ctsio); 10615 else { 10616 ctl_set_invalid_field(ctsio, 10617 /*sks_valid*/ 1, 10618 /*command*/ 1, 10619 /*field*/ 2, 10620 /*bit_valid*/ 0, 10621 /*bit*/ 0); 10622 ctl_done((union ctl_io *)ctsio); 10623 return (CTL_RETVAL_COMPLETE); 10624 } 10625 10626 return (retval); 10627} 10628 10629/* 10630 * For known CDB types, parse the LBA and length. 10631 */ 10632static int 10633ctl_get_lba_len(union ctl_io *io, uint64_t *lba, uint64_t *len) 10634{ 10635 if (io->io_hdr.io_type != CTL_IO_SCSI) 10636 return (1); 10637 10638 switch (io->scsiio.cdb[0]) { 10639 case COMPARE_AND_WRITE: { 10640 struct scsi_compare_and_write *cdb; 10641 10642 cdb = (struct scsi_compare_and_write *)io->scsiio.cdb; 10643 10644 *lba = scsi_8btou64(cdb->addr); 10645 *len = cdb->length; 10646 break; 10647 } 10648 case READ_6: 10649 case WRITE_6: { 10650 struct scsi_rw_6 *cdb; 10651 10652 cdb = (struct scsi_rw_6 *)io->scsiio.cdb; 10653 10654 *lba = scsi_3btoul(cdb->addr); 10655 /* only 5 bits are valid in the most significant address byte */ 10656 *lba &= 0x1fffff; 10657 *len = cdb->length; 10658 break; 10659 } 10660 case READ_10: 10661 case WRITE_10: { 10662 struct scsi_rw_10 *cdb; 10663 10664 cdb = (struct scsi_rw_10 *)io->scsiio.cdb; 10665 10666 *lba = scsi_4btoul(cdb->addr); 10667 *len = scsi_2btoul(cdb->length); 10668 break; 10669 } 10670 case WRITE_VERIFY_10: { 10671 struct scsi_write_verify_10 *cdb; 10672 10673 cdb = (struct scsi_write_verify_10 *)io->scsiio.cdb; 10674 10675 *lba = scsi_4btoul(cdb->addr); 10676 *len = scsi_2btoul(cdb->length); 10677 break; 10678 } 10679 case READ_12: 10680 case WRITE_12: { 10681 struct scsi_rw_12 *cdb; 10682 10683 cdb = (struct scsi_rw_12 *)io->scsiio.cdb; 10684 10685 *lba = scsi_4btoul(cdb->addr); 10686 *len = scsi_4btoul(cdb->length); 10687 break; 10688 } 10689 case WRITE_VERIFY_12: { 10690 struct scsi_write_verify_12 *cdb; 10691 10692 cdb = (struct scsi_write_verify_12 *)io->scsiio.cdb; 10693 10694 *lba = scsi_4btoul(cdb->addr); 10695 *len = scsi_4btoul(cdb->length); 10696 break; 10697 } 10698 case READ_16: 10699 case WRITE_16: 10700 case WRITE_ATOMIC_16: { 10701 struct scsi_rw_16 *cdb; 10702 10703 cdb = (struct scsi_rw_16 *)io->scsiio.cdb; 10704 10705 *lba = scsi_8btou64(cdb->addr); 10706 *len = scsi_4btoul(cdb->length); 10707 break; 10708 } 10709 case WRITE_VERIFY_16: { 10710 struct scsi_write_verify_16 *cdb; 10711 10712 cdb = (struct scsi_write_verify_16 *)io->scsiio.cdb; 10713 10714 *lba = scsi_8btou64(cdb->addr); 10715 *len = scsi_4btoul(cdb->length); 10716 break; 10717 } 10718 case WRITE_SAME_10: { 10719 struct scsi_write_same_10 *cdb; 10720 10721 cdb = (struct scsi_write_same_10 *)io->scsiio.cdb; 10722 10723 *lba = scsi_4btoul(cdb->addr); 10724 *len = scsi_2btoul(cdb->length); 10725 break; 10726 } 10727 case WRITE_SAME_16: { 10728 struct scsi_write_same_16 *cdb; 10729 10730 cdb = (struct scsi_write_same_16 *)io->scsiio.cdb; 10731 10732 *lba = scsi_8btou64(cdb->addr); 10733 *len = scsi_4btoul(cdb->length); 10734 break; 10735 } 10736 case VERIFY_10: { 10737 struct scsi_verify_10 *cdb; 10738 10739 cdb = (struct scsi_verify_10 *)io->scsiio.cdb; 10740 10741 *lba = scsi_4btoul(cdb->addr); 10742 *len = scsi_2btoul(cdb->length); 10743 break; 10744 } 10745 case VERIFY_12: { 10746 struct scsi_verify_12 *cdb; 10747 10748 cdb = (struct scsi_verify_12 *)io->scsiio.cdb; 10749 10750 *lba = scsi_4btoul(cdb->addr); 10751 *len = scsi_4btoul(cdb->length); 10752 break; 10753 } 10754 case VERIFY_16: { 10755 struct scsi_verify_16 *cdb; 10756 10757 cdb = (struct scsi_verify_16 *)io->scsiio.cdb; 10758 10759 *lba = scsi_8btou64(cdb->addr); 10760 *len = scsi_4btoul(cdb->length); 10761 break; 10762 } 10763 case UNMAP: { 10764 *lba = 0; 10765 *len = UINT64_MAX; 10766 break; 10767 } 10768 case SERVICE_ACTION_IN: { /* GET LBA STATUS */ 10769 struct scsi_get_lba_status *cdb; 10770 10771 cdb = (struct scsi_get_lba_status *)io->scsiio.cdb; 10772 *lba = scsi_8btou64(cdb->addr); 10773 *len = UINT32_MAX; 10774 break; 10775 } 10776 default: 10777 return (1); 10778 break; /* NOTREACHED */ 10779 } 10780 10781 return (0); 10782} 10783 10784static ctl_action 10785ctl_extent_check_lba(uint64_t lba1, uint64_t len1, uint64_t lba2, uint64_t len2, 10786 bool seq) 10787{ 10788 uint64_t endlba1, endlba2; 10789 10790 endlba1 = lba1 + len1 - (seq ? 0 : 1); 10791 endlba2 = lba2 + len2 - 1; 10792 10793 if ((endlba1 < lba2) || (endlba2 < lba1)) 10794 return (CTL_ACTION_PASS); 10795 else 10796 return (CTL_ACTION_BLOCK); 10797} 10798 10799static int 10800ctl_extent_check_unmap(union ctl_io *io, uint64_t lba2, uint64_t len2) 10801{ 10802 struct ctl_ptr_len_flags *ptrlen; 10803 struct scsi_unmap_desc *buf, *end, *range; 10804 uint64_t lba; 10805 uint32_t len; 10806 10807 /* If not UNMAP -- go other way. */ 10808 if (io->io_hdr.io_type != CTL_IO_SCSI || 10809 io->scsiio.cdb[0] != UNMAP) 10810 return (CTL_ACTION_ERROR); 10811 10812 /* If UNMAP without data -- block and wait for data. */ 10813 ptrlen = (struct ctl_ptr_len_flags *) 10814 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN]; 10815 if ((io->io_hdr.flags & CTL_FLAG_ALLOCATED) == 0 || 10816 ptrlen->ptr == NULL) 10817 return (CTL_ACTION_BLOCK); 10818 10819 /* UNMAP with data -- check for collision. */ 10820 buf = (struct scsi_unmap_desc *)ptrlen->ptr; 10821 end = buf + ptrlen->len / sizeof(*buf); 10822 for (range = buf; range < end; range++) { 10823 lba = scsi_8btou64(range->lba); 10824 len = scsi_4btoul(range->length); 10825 if ((lba < lba2 + len2) && (lba + len > lba2)) 10826 return (CTL_ACTION_BLOCK); 10827 } 10828 return (CTL_ACTION_PASS); 10829} 10830 10831static ctl_action 10832ctl_extent_check(union ctl_io *io1, union ctl_io *io2, bool seq) 10833{ 10834 uint64_t lba1, lba2; 10835 uint64_t len1, len2; 10836 int retval; 10837 10838 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10839 return (CTL_ACTION_ERROR); 10840 10841 retval = ctl_extent_check_unmap(io1, lba2, len2); 10842 if (retval != CTL_ACTION_ERROR) 10843 return (retval); 10844 10845 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10846 return (CTL_ACTION_ERROR); 10847 10848 return (ctl_extent_check_lba(lba1, len1, lba2, len2, seq)); 10849} 10850 10851static ctl_action 10852ctl_extent_check_seq(union ctl_io *io1, union ctl_io *io2) 10853{ 10854 uint64_t lba1, lba2; 10855 uint64_t len1, len2; 10856 10857 if (ctl_get_lba_len(io1, &lba1, &len1) != 0) 10858 return (CTL_ACTION_ERROR); 10859 if (ctl_get_lba_len(io2, &lba2, &len2) != 0) 10860 return (CTL_ACTION_ERROR); 10861 10862 if (lba1 + len1 == lba2) 10863 return (CTL_ACTION_BLOCK); 10864 return (CTL_ACTION_PASS); 10865} 10866 10867static ctl_action 10868ctl_check_for_blockage(struct ctl_lun *lun, union ctl_io *pending_io, 10869 union ctl_io *ooa_io) 10870{ 10871 const struct ctl_cmd_entry *pending_entry, *ooa_entry; 10872 ctl_serialize_action *serialize_row; 10873 10874 /* 10875 * The initiator attempted multiple untagged commands at the same 10876 * time. Can't do that. 10877 */ 10878 if ((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10879 && (ooa_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10880 && ((pending_io->io_hdr.nexus.targ_port == 10881 ooa_io->io_hdr.nexus.targ_port) 10882 && (pending_io->io_hdr.nexus.initid.id == 10883 ooa_io->io_hdr.nexus.initid.id)) 10884 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10885 CTL_FLAG_STATUS_SENT)) == 0)) 10886 return (CTL_ACTION_OVERLAP); 10887 10888 /* 10889 * The initiator attempted to send multiple tagged commands with 10890 * the same ID. (It's fine if different initiators have the same 10891 * tag ID.) 10892 * 10893 * Even if all of those conditions are true, we don't kill the I/O 10894 * if the command ahead of us has been aborted. We won't end up 10895 * sending it to the FETD, and it's perfectly legal to resend a 10896 * command with the same tag number as long as the previous 10897 * instance of this tag number has been aborted somehow. 10898 */ 10899 if ((pending_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10900 && (ooa_io->scsiio.tag_type != CTL_TAG_UNTAGGED) 10901 && (pending_io->scsiio.tag_num == ooa_io->scsiio.tag_num) 10902 && ((pending_io->io_hdr.nexus.targ_port == 10903 ooa_io->io_hdr.nexus.targ_port) 10904 && (pending_io->io_hdr.nexus.initid.id == 10905 ooa_io->io_hdr.nexus.initid.id)) 10906 && ((ooa_io->io_hdr.flags & (CTL_FLAG_ABORT | 10907 CTL_FLAG_STATUS_SENT)) == 0)) 10908 return (CTL_ACTION_OVERLAP_TAG); 10909 10910 /* 10911 * If we get a head of queue tag, SAM-3 says that we should 10912 * immediately execute it. 10913 * 10914 * What happens if this command would normally block for some other 10915 * reason? e.g. a request sense with a head of queue tag 10916 * immediately after a write. Normally that would block, but this 10917 * will result in its getting executed immediately... 10918 * 10919 * We currently return "pass" instead of "skip", so we'll end up 10920 * going through the rest of the queue to check for overlapped tags. 10921 * 10922 * XXX KDM check for other types of blockage first?? 10923 */ 10924 if (pending_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10925 return (CTL_ACTION_PASS); 10926 10927 /* 10928 * Ordered tags have to block until all items ahead of them 10929 * have completed. If we get called with an ordered tag, we always 10930 * block, if something else is ahead of us in the queue. 10931 */ 10932 if (pending_io->scsiio.tag_type == CTL_TAG_ORDERED) 10933 return (CTL_ACTION_BLOCK); 10934 10935 /* 10936 * Simple tags get blocked until all head of queue and ordered tags 10937 * ahead of them have completed. I'm lumping untagged commands in 10938 * with simple tags here. XXX KDM is that the right thing to do? 10939 */ 10940 if (((pending_io->scsiio.tag_type == CTL_TAG_UNTAGGED) 10941 || (pending_io->scsiio.tag_type == CTL_TAG_SIMPLE)) 10942 && ((ooa_io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE) 10943 || (ooa_io->scsiio.tag_type == CTL_TAG_ORDERED))) 10944 return (CTL_ACTION_BLOCK); 10945 10946 pending_entry = ctl_get_cmd_entry(&pending_io->scsiio, NULL); 10947 ooa_entry = ctl_get_cmd_entry(&ooa_io->scsiio, NULL); 10948 10949 serialize_row = ctl_serialize_table[ooa_entry->seridx]; 10950 10951 switch (serialize_row[pending_entry->seridx]) { 10952 case CTL_SER_BLOCK: 10953 return (CTL_ACTION_BLOCK); 10954 case CTL_SER_EXTENT: 10955 return (ctl_extent_check(ooa_io, pending_io, 10956 (lun->serseq == CTL_LUN_SERSEQ_ON))); 10957 case CTL_SER_EXTENTOPT: 10958 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10959 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10960 return (ctl_extent_check(ooa_io, pending_io, 10961 (lun->serseq == CTL_LUN_SERSEQ_ON))); 10962 return (CTL_ACTION_PASS); 10963 case CTL_SER_EXTENTSEQ: 10964 if (lun->serseq != CTL_LUN_SERSEQ_OFF) 10965 return (ctl_extent_check_seq(ooa_io, pending_io)); 10966 return (CTL_ACTION_PASS); 10967 case CTL_SER_PASS: 10968 return (CTL_ACTION_PASS); 10969 case CTL_SER_BLOCKOPT: 10970 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT].queue_flags 10971 & SCP_QUEUE_ALG_MASK) != SCP_QUEUE_ALG_UNRESTRICTED) 10972 return (CTL_ACTION_BLOCK); 10973 return (CTL_ACTION_PASS); 10974 case CTL_SER_SKIP: 10975 return (CTL_ACTION_SKIP); 10976 default: 10977 panic("invalid serialization value %d", 10978 serialize_row[pending_entry->seridx]); 10979 } 10980 10981 return (CTL_ACTION_ERROR); 10982} 10983 10984/* 10985 * Check for blockage or overlaps against the OOA (Order Of Arrival) queue. 10986 * Assumptions: 10987 * - pending_io is generally either incoming, or on the blocked queue 10988 * - starting I/O is the I/O we want to start the check with. 10989 */ 10990static ctl_action 10991ctl_check_ooa(struct ctl_lun *lun, union ctl_io *pending_io, 10992 union ctl_io *starting_io) 10993{ 10994 union ctl_io *ooa_io; 10995 ctl_action action; 10996 10997 mtx_assert(&lun->lun_lock, MA_OWNED); 10998 10999 /* 11000 * Run back along the OOA queue, starting with the current 11001 * blocked I/O and going through every I/O before it on the 11002 * queue. If starting_io is NULL, we'll just end up returning 11003 * CTL_ACTION_PASS. 11004 */ 11005 for (ooa_io = starting_io; ooa_io != NULL; 11006 ooa_io = (union ctl_io *)TAILQ_PREV(&ooa_io->io_hdr, ctl_ooaq, 11007 ooa_links)){ 11008 11009 /* 11010 * This routine just checks to see whether 11011 * cur_blocked is blocked by ooa_io, which is ahead 11012 * of it in the queue. It doesn't queue/dequeue 11013 * cur_blocked. 11014 */ 11015 action = ctl_check_for_blockage(lun, pending_io, ooa_io); 11016 switch (action) { 11017 case CTL_ACTION_BLOCK: 11018 case CTL_ACTION_OVERLAP: 11019 case CTL_ACTION_OVERLAP_TAG: 11020 case CTL_ACTION_SKIP: 11021 case CTL_ACTION_ERROR: 11022 return (action); 11023 break; /* NOTREACHED */ 11024 case CTL_ACTION_PASS: 11025 break; 11026 default: 11027 panic("invalid action %d", action); 11028 break; /* NOTREACHED */ 11029 } 11030 } 11031 11032 return (CTL_ACTION_PASS); 11033} 11034 11035/* 11036 * Assumptions: 11037 * - An I/O has just completed, and has been removed from the per-LUN OOA 11038 * queue, so some items on the blocked queue may now be unblocked. 11039 */ 11040static int 11041ctl_check_blocked(struct ctl_lun *lun) 11042{ 11043 union ctl_io *cur_blocked, *next_blocked; 11044 11045 mtx_assert(&lun->lun_lock, MA_OWNED); 11046 11047 /* 11048 * Run forward from the head of the blocked queue, checking each 11049 * entry against the I/Os prior to it on the OOA queue to see if 11050 * there is still any blockage. 11051 * 11052 * We cannot use the TAILQ_FOREACH() macro, because it can't deal 11053 * with our removing a variable on it while it is traversing the 11054 * list. 11055 */ 11056 for (cur_blocked = (union ctl_io *)TAILQ_FIRST(&lun->blocked_queue); 11057 cur_blocked != NULL; cur_blocked = next_blocked) { 11058 union ctl_io *prev_ooa; 11059 ctl_action action; 11060 11061 next_blocked = (union ctl_io *)TAILQ_NEXT(&cur_blocked->io_hdr, 11062 blocked_links); 11063 11064 prev_ooa = (union ctl_io *)TAILQ_PREV(&cur_blocked->io_hdr, 11065 ctl_ooaq, ooa_links); 11066 11067 /* 11068 * If cur_blocked happens to be the first item in the OOA 11069 * queue now, prev_ooa will be NULL, and the action 11070 * returned will just be CTL_ACTION_PASS. 11071 */ 11072 action = ctl_check_ooa(lun, cur_blocked, prev_ooa); 11073 11074 switch (action) { 11075 case CTL_ACTION_BLOCK: 11076 /* Nothing to do here, still blocked */ 11077 break; 11078 case CTL_ACTION_OVERLAP: 11079 case CTL_ACTION_OVERLAP_TAG: 11080 /* 11081 * This shouldn't happen! In theory we've already 11082 * checked this command for overlap... 11083 */ 11084 break; 11085 case CTL_ACTION_PASS: 11086 case CTL_ACTION_SKIP: { 11087 const struct ctl_cmd_entry *entry; 11088 int isc_retval; 11089 11090 /* 11091 * The skip case shouldn't happen, this transaction 11092 * should have never made it onto the blocked queue. 11093 */ 11094 /* 11095 * This I/O is no longer blocked, we can remove it 11096 * from the blocked queue. Since this is a TAILQ 11097 * (doubly linked list), we can do O(1) removals 11098 * from any place on the list. 11099 */ 11100 TAILQ_REMOVE(&lun->blocked_queue, &cur_blocked->io_hdr, 11101 blocked_links); 11102 cur_blocked->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11103 11104 if (cur_blocked->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC){ 11105 /* 11106 * Need to send IO back to original side to 11107 * run 11108 */ 11109 union ctl_ha_msg msg_info; 11110 11111 msg_info.hdr.original_sc = 11112 cur_blocked->io_hdr.original_sc; 11113 msg_info.hdr.serializing_sc = cur_blocked; 11114 msg_info.hdr.msg_type = CTL_MSG_R2R; 11115 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11116 &msg_info, sizeof(msg_info), 0)) > 11117 CTL_HA_STATUS_SUCCESS) { 11118 printf("CTL:Check Blocked error from " 11119 "ctl_ha_msg_send %d\n", 11120 isc_retval); 11121 } 11122 break; 11123 } 11124 entry = ctl_get_cmd_entry(&cur_blocked->scsiio, NULL); 11125 11126 /* 11127 * Check this I/O for LUN state changes that may 11128 * have happened while this command was blocked. 11129 * The LUN state may have been changed by a command 11130 * ahead of us in the queue, so we need to re-check 11131 * for any states that can be caused by SCSI 11132 * commands. 11133 */ 11134 if (ctl_scsiio_lun_check(lun, entry, 11135 &cur_blocked->scsiio) == 0) { 11136 cur_blocked->io_hdr.flags |= 11137 CTL_FLAG_IS_WAS_ON_RTR; 11138 ctl_enqueue_rtr(cur_blocked); 11139 } else 11140 ctl_done(cur_blocked); 11141 break; 11142 } 11143 default: 11144 /* 11145 * This probably shouldn't happen -- we shouldn't 11146 * get CTL_ACTION_ERROR, or anything else. 11147 */ 11148 break; 11149 } 11150 } 11151 11152 return (CTL_RETVAL_COMPLETE); 11153} 11154 11155/* 11156 * This routine (with one exception) checks LUN flags that can be set by 11157 * commands ahead of us in the OOA queue. These flags have to be checked 11158 * when a command initially comes in, and when we pull a command off the 11159 * blocked queue and are preparing to execute it. The reason we have to 11160 * check these flags for commands on the blocked queue is that the LUN 11161 * state may have been changed by a command ahead of us while we're on the 11162 * blocked queue. 11163 * 11164 * Ordering is somewhat important with these checks, so please pay 11165 * careful attention to the placement of any new checks. 11166 */ 11167static int 11168ctl_scsiio_lun_check(struct ctl_lun *lun, 11169 const struct ctl_cmd_entry *entry, struct ctl_scsiio *ctsio) 11170{ 11171 struct ctl_softc *softc = lun->ctl_softc; 11172 int retval; 11173 uint32_t residx; 11174 11175 retval = 0; 11176 11177 mtx_assert(&lun->lun_lock, MA_OWNED); 11178 11179 /* 11180 * If this shelf is a secondary shelf controller, we have to reject 11181 * any media access commands. 11182 */ 11183 if ((softc->flags & CTL_FLAG_ACTIVE_SHELF) == 0 && 11184 (entry->flags & CTL_CMD_FLAG_OK_ON_SECONDARY) == 0) { 11185 ctl_set_lun_standby(ctsio); 11186 retval = 1; 11187 goto bailout; 11188 } 11189 11190 if (entry->pattern & CTL_LUN_PAT_WRITE) { 11191 if (lun->flags & CTL_LUN_READONLY) { 11192 ctl_set_sense(ctsio, /*current_error*/ 1, 11193 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11194 /*asc*/ 0x27, /*ascq*/ 0x01, SSD_ELEM_NONE); 11195 retval = 1; 11196 goto bailout; 11197 } 11198 if ((lun->mode_pages.control_page[CTL_PAGE_CURRENT] 11199 .eca_and_aen & SCP_SWP) != 0) { 11200 ctl_set_sense(ctsio, /*current_error*/ 1, 11201 /*sense_key*/ SSD_KEY_DATA_PROTECT, 11202 /*asc*/ 0x27, /*ascq*/ 0x02, SSD_ELEM_NONE); 11203 retval = 1; 11204 goto bailout; 11205 } 11206 } 11207 11208 /* 11209 * Check for a reservation conflict. If this command isn't allowed 11210 * even on reserved LUNs, and if this initiator isn't the one who 11211 * reserved us, reject the command with a reservation conflict. 11212 */ 11213 residx = ctl_get_resindex(&ctsio->io_hdr.nexus); 11214 if ((lun->flags & CTL_LUN_RESERVED) 11215 && ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_RESV) == 0)) { 11216 if (lun->res_idx != residx) { 11217 ctl_set_reservation_conflict(ctsio); 11218 retval = 1; 11219 goto bailout; 11220 } 11221 } 11222 11223 if ((lun->flags & CTL_LUN_PR_RESERVED) == 0 || 11224 (entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_RESV)) { 11225 /* No reservation or command is allowed. */; 11226 } else if ((entry->flags & CTL_CMD_FLAG_ALLOW_ON_PR_WRESV) && 11227 (lun->res_type == SPR_TYPE_WR_EX || 11228 lun->res_type == SPR_TYPE_WR_EX_RO || 11229 lun->res_type == SPR_TYPE_WR_EX_AR)) { 11230 /* The command is allowed for Write Exclusive resv. */; 11231 } else { 11232 /* 11233 * if we aren't registered or it's a res holder type 11234 * reservation and this isn't the res holder then set a 11235 * conflict. 11236 */ 11237 if (ctl_get_prkey(lun, residx) == 0 11238 || (residx != lun->pr_res_idx && lun->res_type < 4)) { 11239 ctl_set_reservation_conflict(ctsio); 11240 retval = 1; 11241 goto bailout; 11242 } 11243 11244 } 11245 11246 if ((lun->flags & CTL_LUN_OFFLINE) 11247 && ((entry->flags & CTL_CMD_FLAG_OK_ON_OFFLINE) == 0)) { 11248 ctl_set_lun_not_ready(ctsio); 11249 retval = 1; 11250 goto bailout; 11251 } 11252 11253 /* 11254 * If the LUN is stopped, see if this particular command is allowed 11255 * for a stopped lun. Otherwise, reject it with 0x04,0x02. 11256 */ 11257 if ((lun->flags & CTL_LUN_STOPPED) 11258 && ((entry->flags & CTL_CMD_FLAG_OK_ON_STOPPED) == 0)) { 11259 /* "Logical unit not ready, initializing cmd. required" */ 11260 ctl_set_lun_stopped(ctsio); 11261 retval = 1; 11262 goto bailout; 11263 } 11264 11265 if ((lun->flags & CTL_LUN_INOPERABLE) 11266 && ((entry->flags & CTL_CMD_FLAG_OK_ON_INOPERABLE) == 0)) { 11267 /* "Medium format corrupted" */ 11268 ctl_set_medium_format_corrupted(ctsio); 11269 retval = 1; 11270 goto bailout; 11271 } 11272 11273bailout: 11274 return (retval); 11275 11276} 11277 11278static void 11279ctl_failover_io(union ctl_io *io, int have_lock) 11280{ 11281 ctl_set_busy(&io->scsiio); 11282 ctl_done(io); 11283} 11284 11285static void 11286ctl_failover(void) 11287{ 11288 struct ctl_lun *lun; 11289 struct ctl_softc *softc; 11290 union ctl_io *next_io, *pending_io; 11291 union ctl_io *io; 11292 int lun_idx; 11293 11294 softc = control_softc; 11295 11296 mtx_lock(&softc->ctl_lock); 11297 /* 11298 * Remove any cmds from the other SC from the rtr queue. These 11299 * will obviously only be for LUNs for which we're the primary. 11300 * We can't send status or get/send data for these commands. 11301 * Since they haven't been executed yet, we can just remove them. 11302 * We'll either abort them or delete them below, depending on 11303 * which HA mode we're in. 11304 */ 11305#ifdef notyet 11306 mtx_lock(&softc->queue_lock); 11307 for (io = (union ctl_io *)STAILQ_FIRST(&softc->rtr_queue); 11308 io != NULL; io = next_io) { 11309 next_io = (union ctl_io *)STAILQ_NEXT(&io->io_hdr, links); 11310 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11311 STAILQ_REMOVE(&softc->rtr_queue, &io->io_hdr, 11312 ctl_io_hdr, links); 11313 } 11314 mtx_unlock(&softc->queue_lock); 11315#endif 11316 11317 for (lun_idx=0; lun_idx < softc->num_luns; lun_idx++) { 11318 lun = softc->ctl_luns[lun_idx]; 11319 if (lun==NULL) 11320 continue; 11321 11322 /* 11323 * Processor LUNs are primary on both sides. 11324 * XXX will this always be true? 11325 */ 11326 if (lun->be_lun->lun_type == T_PROCESSOR) 11327 continue; 11328 11329 if ((lun->flags & CTL_LUN_PRIMARY_SC) 11330 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11331 printf("FAILOVER: primary lun %d\n", lun_idx); 11332 /* 11333 * Remove all commands from the other SC. First from the 11334 * blocked queue then from the ooa queue. Once we have 11335 * removed them. Call ctl_check_blocked to see if there 11336 * is anything that can run. 11337 */ 11338 for (io = (union ctl_io *)TAILQ_FIRST( 11339 &lun->blocked_queue); io != NULL; io = next_io) { 11340 11341 next_io = (union ctl_io *)TAILQ_NEXT( 11342 &io->io_hdr, blocked_links); 11343 11344 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11345 TAILQ_REMOVE(&lun->blocked_queue, 11346 &io->io_hdr,blocked_links); 11347 io->io_hdr.flags &= ~CTL_FLAG_BLOCKED; 11348 TAILQ_REMOVE(&lun->ooa_queue, 11349 &io->io_hdr, ooa_links); 11350 11351 ctl_free_io(io); 11352 } 11353 } 11354 11355 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11356 io != NULL; io = next_io) { 11357 11358 next_io = (union ctl_io *)TAILQ_NEXT( 11359 &io->io_hdr, ooa_links); 11360 11361 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) { 11362 11363 TAILQ_REMOVE(&lun->ooa_queue, 11364 &io->io_hdr, 11365 ooa_links); 11366 11367 ctl_free_io(io); 11368 } 11369 } 11370 ctl_check_blocked(lun); 11371 } else if ((lun->flags & CTL_LUN_PRIMARY_SC) 11372 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 11373 11374 printf("FAILOVER: primary lun %d\n", lun_idx); 11375 /* 11376 * Abort all commands from the other SC. We can't 11377 * send status back for them now. These should get 11378 * cleaned up when they are completed or come out 11379 * for a datamove operation. 11380 */ 11381 for (io = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); 11382 io != NULL; io = next_io) { 11383 next_io = (union ctl_io *)TAILQ_NEXT( 11384 &io->io_hdr, ooa_links); 11385 11386 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 11387 io->io_hdr.flags |= CTL_FLAG_ABORT; 11388 } 11389 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11390 && (softc->ha_mode == CTL_HA_MODE_XFER)) { 11391 11392 printf("FAILOVER: secondary lun %d\n", lun_idx); 11393 11394 lun->flags |= CTL_LUN_PRIMARY_SC; 11395 11396 /* 11397 * We send all I/O that was sent to this controller 11398 * and redirected to the other side back with 11399 * busy status, and have the initiator retry it. 11400 * Figuring out how much data has been transferred, 11401 * etc. and picking up where we left off would be 11402 * very tricky. 11403 * 11404 * XXX KDM need to remove I/O from the blocked 11405 * queue as well! 11406 */ 11407 for (pending_io = (union ctl_io *)TAILQ_FIRST( 11408 &lun->ooa_queue); pending_io != NULL; 11409 pending_io = next_io) { 11410 11411 next_io = (union ctl_io *)TAILQ_NEXT( 11412 &pending_io->io_hdr, ooa_links); 11413 11414 pending_io->io_hdr.flags &= 11415 ~CTL_FLAG_SENT_2OTHER_SC; 11416 11417 if (pending_io->io_hdr.flags & 11418 CTL_FLAG_IO_ACTIVE) { 11419 pending_io->io_hdr.flags |= 11420 CTL_FLAG_FAILOVER; 11421 } else { 11422 ctl_set_busy(&pending_io->scsiio); 11423 ctl_done(pending_io); 11424 } 11425 } 11426 11427 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11428 } else if (((lun->flags & CTL_LUN_PRIMARY_SC) == 0) 11429 && (softc->ha_mode == CTL_HA_MODE_SER_ONLY)) { 11430 printf("FAILOVER: secondary lun %d\n", lun_idx); 11431 /* 11432 * if the first io on the OOA is not on the RtR queue 11433 * add it. 11434 */ 11435 lun->flags |= CTL_LUN_PRIMARY_SC; 11436 11437 pending_io = (union ctl_io *)TAILQ_FIRST( 11438 &lun->ooa_queue); 11439 if (pending_io==NULL) { 11440 printf("Nothing on OOA queue\n"); 11441 continue; 11442 } 11443 11444 pending_io->io_hdr.flags &= ~CTL_FLAG_SENT_2OTHER_SC; 11445 if ((pending_io->io_hdr.flags & 11446 CTL_FLAG_IS_WAS_ON_RTR) == 0) { 11447 pending_io->io_hdr.flags |= 11448 CTL_FLAG_IS_WAS_ON_RTR; 11449 ctl_enqueue_rtr(pending_io); 11450 } 11451#if 0 11452 else 11453 { 11454 printf("Tag 0x%04x is running\n", 11455 pending_io->scsiio.tag_num); 11456 } 11457#endif 11458 11459 next_io = (union ctl_io *)TAILQ_NEXT( 11460 &pending_io->io_hdr, ooa_links); 11461 for (pending_io=next_io; pending_io != NULL; 11462 pending_io = next_io) { 11463 pending_io->io_hdr.flags &= 11464 ~CTL_FLAG_SENT_2OTHER_SC; 11465 next_io = (union ctl_io *)TAILQ_NEXT( 11466 &pending_io->io_hdr, ooa_links); 11467 if (pending_io->io_hdr.flags & 11468 CTL_FLAG_IS_WAS_ON_RTR) { 11469#if 0 11470 printf("Tag 0x%04x is running\n", 11471 pending_io->scsiio.tag_num); 11472#endif 11473 continue; 11474 } 11475 11476 switch (ctl_check_ooa(lun, pending_io, 11477 (union ctl_io *)TAILQ_PREV( 11478 &pending_io->io_hdr, ctl_ooaq, 11479 ooa_links))) { 11480 11481 case CTL_ACTION_BLOCK: 11482 TAILQ_INSERT_TAIL(&lun->blocked_queue, 11483 &pending_io->io_hdr, 11484 blocked_links); 11485 pending_io->io_hdr.flags |= 11486 CTL_FLAG_BLOCKED; 11487 break; 11488 case CTL_ACTION_PASS: 11489 case CTL_ACTION_SKIP: 11490 pending_io->io_hdr.flags |= 11491 CTL_FLAG_IS_WAS_ON_RTR; 11492 ctl_enqueue_rtr(pending_io); 11493 break; 11494 case CTL_ACTION_OVERLAP: 11495 ctl_set_overlapped_cmd( 11496 (struct ctl_scsiio *)pending_io); 11497 ctl_done(pending_io); 11498 break; 11499 case CTL_ACTION_OVERLAP_TAG: 11500 ctl_set_overlapped_tag( 11501 (struct ctl_scsiio *)pending_io, 11502 pending_io->scsiio.tag_num & 0xff); 11503 ctl_done(pending_io); 11504 break; 11505 case CTL_ACTION_ERROR: 11506 default: 11507 ctl_set_internal_failure( 11508 (struct ctl_scsiio *)pending_io, 11509 0, // sks_valid 11510 0); //retry count 11511 ctl_done(pending_io); 11512 break; 11513 } 11514 } 11515 11516 ctl_est_ua_all(lun, -1, CTL_UA_ASYM_ACC_CHANGE); 11517 } else { 11518 panic("Unhandled HA mode failover, LUN flags = %#x, " 11519 "ha_mode = #%x", lun->flags, softc->ha_mode); 11520 } 11521 } 11522 ctl_pause_rtr = 0; 11523 mtx_unlock(&softc->ctl_lock); 11524} 11525 11526static void 11527ctl_clear_ua(struct ctl_softc *ctl_softc, uint32_t initidx, 11528 ctl_ua_type ua_type) 11529{ 11530 struct ctl_lun *lun; 11531 ctl_ua_type *pu; 11532 11533 mtx_assert(&ctl_softc->ctl_lock, MA_OWNED); 11534 11535 STAILQ_FOREACH(lun, &ctl_softc->lun_list, links) { 11536 mtx_lock(&lun->lun_lock); 11537 pu = lun->pending_ua[initidx / CTL_MAX_INIT_PER_PORT]; 11538 if (pu != NULL) 11539 pu[initidx % CTL_MAX_INIT_PER_PORT] &= ~ua_type; 11540 mtx_unlock(&lun->lun_lock); 11541 } 11542} 11543 11544static int 11545ctl_scsiio_precheck(struct ctl_softc *softc, struct ctl_scsiio *ctsio) 11546{ 11547 struct ctl_lun *lun; 11548 const struct ctl_cmd_entry *entry; 11549 uint32_t initidx, targ_lun; 11550 int retval; 11551 11552 retval = 0; 11553 11554 lun = NULL; 11555 11556 targ_lun = ctsio->io_hdr.nexus.targ_mapped_lun; 11557 if ((targ_lun < CTL_MAX_LUNS) 11558 && ((lun = softc->ctl_luns[targ_lun]) != NULL)) { 11559 /* 11560 * If the LUN is invalid, pretend that it doesn't exist. 11561 * It will go away as soon as all pending I/O has been 11562 * completed. 11563 */ 11564 mtx_lock(&lun->lun_lock); 11565 if (lun->flags & CTL_LUN_DISABLED) { 11566 mtx_unlock(&lun->lun_lock); 11567 lun = NULL; 11568 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11569 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11570 } else { 11571 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = lun; 11572 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = 11573 lun->be_lun; 11574 if (lun->be_lun->lun_type == T_PROCESSOR) { 11575 ctsio->io_hdr.flags |= CTL_FLAG_CONTROL_DEV; 11576 } 11577 11578 /* 11579 * Every I/O goes into the OOA queue for a 11580 * particular LUN, and stays there until completion. 11581 */ 11582 TAILQ_INSERT_TAIL(&lun->ooa_queue, &ctsio->io_hdr, 11583 ooa_links); 11584 } 11585 } else { 11586 ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr = NULL; 11587 ctsio->io_hdr.ctl_private[CTL_PRIV_BACKEND_LUN].ptr = NULL; 11588 } 11589 11590 /* Get command entry and return error if it is unsuppotyed. */ 11591 entry = ctl_validate_command(ctsio); 11592 if (entry == NULL) { 11593 if (lun) 11594 mtx_unlock(&lun->lun_lock); 11595 return (retval); 11596 } 11597 11598 ctsio->io_hdr.flags &= ~CTL_FLAG_DATA_MASK; 11599 ctsio->io_hdr.flags |= entry->flags & CTL_FLAG_DATA_MASK; 11600 11601 /* 11602 * Check to see whether we can send this command to LUNs that don't 11603 * exist. This should pretty much only be the case for inquiry 11604 * and request sense. Further checks, below, really require having 11605 * a LUN, so we can't really check the command anymore. Just put 11606 * it on the rtr queue. 11607 */ 11608 if (lun == NULL) { 11609 if (entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) { 11610 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11611 ctl_enqueue_rtr((union ctl_io *)ctsio); 11612 return (retval); 11613 } 11614 11615 ctl_set_unsupported_lun(ctsio); 11616 ctl_done((union ctl_io *)ctsio); 11617 CTL_DEBUG_PRINT(("ctl_scsiio_precheck: bailing out due to invalid LUN\n")); 11618 return (retval); 11619 } else { 11620 /* 11621 * Make sure we support this particular command on this LUN. 11622 * e.g., we don't support writes to the control LUN. 11623 */ 11624 if (!ctl_cmd_applicable(lun->be_lun->lun_type, entry)) { 11625 mtx_unlock(&lun->lun_lock); 11626 ctl_set_invalid_opcode(ctsio); 11627 ctl_done((union ctl_io *)ctsio); 11628 return (retval); 11629 } 11630 } 11631 11632 initidx = ctl_get_initindex(&ctsio->io_hdr.nexus); 11633 11634#ifdef CTL_WITH_CA 11635 /* 11636 * If we've got a request sense, it'll clear the contingent 11637 * allegiance condition. Otherwise, if we have a CA condition for 11638 * this initiator, clear it, because it sent down a command other 11639 * than request sense. 11640 */ 11641 if ((ctsio->cdb[0] != REQUEST_SENSE) 11642 && (ctl_is_set(lun->have_ca, initidx))) 11643 ctl_clear_mask(lun->have_ca, initidx); 11644#endif 11645 11646 /* 11647 * If the command has this flag set, it handles its own unit 11648 * attention reporting, we shouldn't do anything. Otherwise we 11649 * check for any pending unit attentions, and send them back to the 11650 * initiator. We only do this when a command initially comes in, 11651 * not when we pull it off the blocked queue. 11652 * 11653 * According to SAM-3, section 5.3.2, the order that things get 11654 * presented back to the host is basically unit attentions caused 11655 * by some sort of reset event, busy status, reservation conflicts 11656 * or task set full, and finally any other status. 11657 * 11658 * One issue here is that some of the unit attentions we report 11659 * don't fall into the "reset" category (e.g. "reported luns data 11660 * has changed"). So reporting it here, before the reservation 11661 * check, may be technically wrong. I guess the only thing to do 11662 * would be to check for and report the reset events here, and then 11663 * check for the other unit attention types after we check for a 11664 * reservation conflict. 11665 * 11666 * XXX KDM need to fix this 11667 */ 11668 if ((entry->flags & CTL_CMD_FLAG_NO_SENSE) == 0) { 11669 ctl_ua_type ua_type; 11670 scsi_sense_data_type sense_format; 11671 11672 if (lun->flags & CTL_LUN_SENSE_DESC) 11673 sense_format = SSD_TYPE_DESC; 11674 else 11675 sense_format = SSD_TYPE_FIXED; 11676 11677 ua_type = ctl_build_ua(lun, initidx, &ctsio->sense_data, 11678 sense_format); 11679 if (ua_type != CTL_UA_NONE) { 11680 mtx_unlock(&lun->lun_lock); 11681 ctsio->scsi_status = SCSI_STATUS_CHECK_COND; 11682 ctsio->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 11683 ctsio->sense_len = SSD_FULL_SIZE; 11684 ctl_done((union ctl_io *)ctsio); 11685 return (retval); 11686 } 11687 } 11688 11689 11690 if (ctl_scsiio_lun_check(lun, entry, ctsio) != 0) { 11691 mtx_unlock(&lun->lun_lock); 11692 ctl_done((union ctl_io *)ctsio); 11693 return (retval); 11694 } 11695 11696 /* 11697 * XXX CHD this is where we want to send IO to other side if 11698 * this LUN is secondary on this SC. We will need to make a copy 11699 * of the IO and flag the IO on this side as SENT_2OTHER and the flag 11700 * the copy we send as FROM_OTHER. 11701 * We also need to stuff the address of the original IO so we can 11702 * find it easily. Something similar will need be done on the other 11703 * side so when we are done we can find the copy. 11704 */ 11705 if ((lun->flags & CTL_LUN_PRIMARY_SC) == 0) { 11706 union ctl_ha_msg msg_info; 11707 int isc_retval; 11708 11709 ctsio->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11710 11711 msg_info.hdr.msg_type = CTL_MSG_SERIALIZE; 11712 msg_info.hdr.original_sc = (union ctl_io *)ctsio; 11713#if 0 11714 printf("1. ctsio %p\n", ctsio); 11715#endif 11716 msg_info.hdr.serializing_sc = NULL; 11717 msg_info.hdr.nexus = ctsio->io_hdr.nexus; 11718 msg_info.scsi.tag_num = ctsio->tag_num; 11719 msg_info.scsi.tag_type = ctsio->tag_type; 11720 memcpy(msg_info.scsi.cdb, ctsio->cdb, CTL_MAX_CDBLEN); 11721 11722 ctsio->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 11723 11724 if ((isc_retval=ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11725 (void *)&msg_info, sizeof(msg_info), 0)) > 11726 CTL_HA_STATUS_SUCCESS) { 11727 printf("CTL:precheck, ctl_ha_msg_send returned %d\n", 11728 isc_retval); 11729 printf("CTL:opcode is %x\n", ctsio->cdb[0]); 11730 } else { 11731#if 0 11732 printf("CTL:Precheck sent msg, opcode is %x\n",opcode); 11733#endif 11734 } 11735 11736 /* 11737 * XXX KDM this I/O is off the incoming queue, but hasn't 11738 * been inserted on any other queue. We may need to come 11739 * up with a holding queue while we wait for serialization 11740 * so that we have an idea of what we're waiting for from 11741 * the other side. 11742 */ 11743 mtx_unlock(&lun->lun_lock); 11744 return (retval); 11745 } 11746 11747 switch (ctl_check_ooa(lun, (union ctl_io *)ctsio, 11748 (union ctl_io *)TAILQ_PREV(&ctsio->io_hdr, 11749 ctl_ooaq, ooa_links))) { 11750 case CTL_ACTION_BLOCK: 11751 ctsio->io_hdr.flags |= CTL_FLAG_BLOCKED; 11752 TAILQ_INSERT_TAIL(&lun->blocked_queue, &ctsio->io_hdr, 11753 blocked_links); 11754 mtx_unlock(&lun->lun_lock); 11755 return (retval); 11756 case CTL_ACTION_PASS: 11757 case CTL_ACTION_SKIP: 11758 ctsio->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 11759 mtx_unlock(&lun->lun_lock); 11760 ctl_enqueue_rtr((union ctl_io *)ctsio); 11761 break; 11762 case CTL_ACTION_OVERLAP: 11763 mtx_unlock(&lun->lun_lock); 11764 ctl_set_overlapped_cmd(ctsio); 11765 ctl_done((union ctl_io *)ctsio); 11766 break; 11767 case CTL_ACTION_OVERLAP_TAG: 11768 mtx_unlock(&lun->lun_lock); 11769 ctl_set_overlapped_tag(ctsio, ctsio->tag_num & 0xff); 11770 ctl_done((union ctl_io *)ctsio); 11771 break; 11772 case CTL_ACTION_ERROR: 11773 default: 11774 mtx_unlock(&lun->lun_lock); 11775 ctl_set_internal_failure(ctsio, 11776 /*sks_valid*/ 0, 11777 /*retry_count*/ 0); 11778 ctl_done((union ctl_io *)ctsio); 11779 break; 11780 } 11781 return (retval); 11782} 11783 11784const struct ctl_cmd_entry * 11785ctl_get_cmd_entry(struct ctl_scsiio *ctsio, int *sa) 11786{ 11787 const struct ctl_cmd_entry *entry; 11788 int service_action; 11789 11790 entry = &ctl_cmd_table[ctsio->cdb[0]]; 11791 if (sa) 11792 *sa = ((entry->flags & CTL_CMD_FLAG_SA5) != 0); 11793 if (entry->flags & CTL_CMD_FLAG_SA5) { 11794 service_action = ctsio->cdb[1] & SERVICE_ACTION_MASK; 11795 entry = &((const struct ctl_cmd_entry *) 11796 entry->execute)[service_action]; 11797 } 11798 return (entry); 11799} 11800 11801const struct ctl_cmd_entry * 11802ctl_validate_command(struct ctl_scsiio *ctsio) 11803{ 11804 const struct ctl_cmd_entry *entry; 11805 int i, sa; 11806 uint8_t diff; 11807 11808 entry = ctl_get_cmd_entry(ctsio, &sa); 11809 if (entry->execute == NULL) { 11810 if (sa) 11811 ctl_set_invalid_field(ctsio, 11812 /*sks_valid*/ 1, 11813 /*command*/ 1, 11814 /*field*/ 1, 11815 /*bit_valid*/ 1, 11816 /*bit*/ 4); 11817 else 11818 ctl_set_invalid_opcode(ctsio); 11819 ctl_done((union ctl_io *)ctsio); 11820 return (NULL); 11821 } 11822 KASSERT(entry->length > 0, 11823 ("Not defined length for command 0x%02x/0x%02x", 11824 ctsio->cdb[0], ctsio->cdb[1])); 11825 for (i = 1; i < entry->length; i++) { 11826 diff = ctsio->cdb[i] & ~entry->usage[i - 1]; 11827 if (diff == 0) 11828 continue; 11829 ctl_set_invalid_field(ctsio, 11830 /*sks_valid*/ 1, 11831 /*command*/ 1, 11832 /*field*/ i, 11833 /*bit_valid*/ 1, 11834 /*bit*/ fls(diff) - 1); 11835 ctl_done((union ctl_io *)ctsio); 11836 return (NULL); 11837 } 11838 return (entry); 11839} 11840 11841static int 11842ctl_cmd_applicable(uint8_t lun_type, const struct ctl_cmd_entry *entry) 11843{ 11844 11845 switch (lun_type) { 11846 case T_PROCESSOR: 11847 if (((entry->flags & CTL_CMD_FLAG_OK_ON_PROC) == 0) && 11848 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11849 return (0); 11850 break; 11851 case T_DIRECT: 11852 if (((entry->flags & CTL_CMD_FLAG_OK_ON_SLUN) == 0) && 11853 ((entry->flags & CTL_CMD_FLAG_OK_ON_ALL_LUNS) == 0)) 11854 return (0); 11855 break; 11856 default: 11857 return (0); 11858 } 11859 return (1); 11860} 11861 11862static int 11863ctl_scsiio(struct ctl_scsiio *ctsio) 11864{ 11865 int retval; 11866 const struct ctl_cmd_entry *entry; 11867 11868 retval = CTL_RETVAL_COMPLETE; 11869 11870 CTL_DEBUG_PRINT(("ctl_scsiio cdb[0]=%02X\n", ctsio->cdb[0])); 11871 11872 entry = ctl_get_cmd_entry(ctsio, NULL); 11873 11874 /* 11875 * If this I/O has been aborted, just send it straight to 11876 * ctl_done() without executing it. 11877 */ 11878 if (ctsio->io_hdr.flags & CTL_FLAG_ABORT) { 11879 ctl_done((union ctl_io *)ctsio); 11880 goto bailout; 11881 } 11882 11883 /* 11884 * All the checks should have been handled by ctl_scsiio_precheck(). 11885 * We should be clear now to just execute the I/O. 11886 */ 11887 retval = entry->execute(ctsio); 11888 11889bailout: 11890 return (retval); 11891} 11892 11893/* 11894 * Since we only implement one target right now, a bus reset simply resets 11895 * our single target. 11896 */ 11897static int 11898ctl_bus_reset(struct ctl_softc *softc, union ctl_io *io) 11899{ 11900 return(ctl_target_reset(softc, io, CTL_UA_BUS_RESET)); 11901} 11902 11903static int 11904ctl_target_reset(struct ctl_softc *softc, union ctl_io *io, 11905 ctl_ua_type ua_type) 11906{ 11907 struct ctl_lun *lun; 11908 int retval; 11909 11910 if (!(io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 11911 union ctl_ha_msg msg_info; 11912 11913 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 11914 msg_info.hdr.nexus = io->io_hdr.nexus; 11915 if (ua_type==CTL_UA_TARG_RESET) 11916 msg_info.task.task_action = CTL_TASK_TARGET_RESET; 11917 else 11918 msg_info.task.task_action = CTL_TASK_BUS_RESET; 11919 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 11920 msg_info.hdr.original_sc = NULL; 11921 msg_info.hdr.serializing_sc = NULL; 11922 if (CTL_HA_STATUS_SUCCESS != ctl_ha_msg_send(CTL_HA_CHAN_CTL, 11923 (void *)&msg_info, sizeof(msg_info), 0)) { 11924 } 11925 } 11926 retval = 0; 11927 11928 mtx_lock(&softc->ctl_lock); 11929 STAILQ_FOREACH(lun, &softc->lun_list, links) 11930 retval += ctl_lun_reset(lun, io, ua_type); 11931 mtx_unlock(&softc->ctl_lock); 11932 11933 return (retval); 11934} 11935 11936/* 11937 * The LUN should always be set. The I/O is optional, and is used to 11938 * distinguish between I/Os sent by this initiator, and by other 11939 * initiators. We set unit attention for initiators other than this one. 11940 * SAM-3 is vague on this point. It does say that a unit attention should 11941 * be established for other initiators when a LUN is reset (see section 11942 * 5.7.3), but it doesn't specifically say that the unit attention should 11943 * be established for this particular initiator when a LUN is reset. Here 11944 * is the relevant text, from SAM-3 rev 8: 11945 * 11946 * 5.7.2 When a SCSI initiator port aborts its own tasks 11947 * 11948 * When a SCSI initiator port causes its own task(s) to be aborted, no 11949 * notification that the task(s) have been aborted shall be returned to 11950 * the SCSI initiator port other than the completion response for the 11951 * command or task management function action that caused the task(s) to 11952 * be aborted and notification(s) associated with related effects of the 11953 * action (e.g., a reset unit attention condition). 11954 * 11955 * XXX KDM for now, we're setting unit attention for all initiators. 11956 */ 11957static int 11958ctl_lun_reset(struct ctl_lun *lun, union ctl_io *io, ctl_ua_type ua_type) 11959{ 11960 union ctl_io *xio; 11961#if 0 11962 uint32_t initidx; 11963#endif 11964#ifdef CTL_WITH_CA 11965 int i; 11966#endif 11967 11968 mtx_lock(&lun->lun_lock); 11969 /* 11970 * Run through the OOA queue and abort each I/O. 11971 */ 11972#if 0 11973 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 11974#endif 11975 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 11976 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 11977 xio->io_hdr.flags |= CTL_FLAG_ABORT | CTL_FLAG_ABORT_STATUS; 11978 } 11979 11980 /* 11981 * This version sets unit attention for every 11982 */ 11983#if 0 11984 initidx = ctl_get_initindex(&io->io_hdr.nexus); 11985 ctl_est_ua_all(lun, initidx, ua_type); 11986#else 11987 ctl_est_ua_all(lun, -1, ua_type); 11988#endif 11989 11990 /* 11991 * A reset (any kind, really) clears reservations established with 11992 * RESERVE/RELEASE. It does not clear reservations established 11993 * with PERSISTENT RESERVE OUT, but we don't support that at the 11994 * moment anyway. See SPC-2, section 5.6. SPC-3 doesn't address 11995 * reservations made with the RESERVE/RELEASE commands, because 11996 * those commands are obsolete in SPC-3. 11997 */ 11998 lun->flags &= ~CTL_LUN_RESERVED; 11999 12000#ifdef CTL_WITH_CA 12001 for (i = 0; i < CTL_MAX_INITIATORS; i++) 12002 ctl_clear_mask(lun->have_ca, i); 12003#endif 12004 mtx_unlock(&lun->lun_lock); 12005 12006 return (0); 12007} 12008 12009static void 12010ctl_abort_tasks_lun(struct ctl_lun *lun, uint32_t targ_port, uint32_t init_id, 12011 int other_sc) 12012{ 12013 union ctl_io *xio; 12014 12015 mtx_assert(&lun->lun_lock, MA_OWNED); 12016 12017 /* 12018 * Run through the OOA queue and attempt to find the given I/O. 12019 * The target port, initiator ID, tag type and tag number have to 12020 * match the values that we got from the initiator. If we have an 12021 * untagged command to abort, simply abort the first untagged command 12022 * we come to. We only allow one untagged command at a time of course. 12023 */ 12024 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12025 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12026 12027 if ((targ_port == UINT32_MAX || 12028 targ_port == xio->io_hdr.nexus.targ_port) && 12029 (init_id == UINT32_MAX || 12030 init_id == xio->io_hdr.nexus.initid.id)) { 12031 if (targ_port != xio->io_hdr.nexus.targ_port || 12032 init_id != xio->io_hdr.nexus.initid.id) 12033 xio->io_hdr.flags |= CTL_FLAG_ABORT_STATUS; 12034 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12035 if (!other_sc && !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12036 union ctl_ha_msg msg_info; 12037 12038 msg_info.hdr.nexus = xio->io_hdr.nexus; 12039 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12040 msg_info.task.tag_num = xio->scsiio.tag_num; 12041 msg_info.task.tag_type = xio->scsiio.tag_type; 12042 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12043 msg_info.hdr.original_sc = NULL; 12044 msg_info.hdr.serializing_sc = NULL; 12045 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12046 (void *)&msg_info, sizeof(msg_info), 0); 12047 } 12048 } 12049 } 12050} 12051 12052static int 12053ctl_abort_task_set(union ctl_io *io) 12054{ 12055 struct ctl_softc *softc = control_softc; 12056 struct ctl_lun *lun; 12057 uint32_t targ_lun; 12058 12059 /* 12060 * Look up the LUN. 12061 */ 12062 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12063 mtx_lock(&softc->ctl_lock); 12064 if ((targ_lun < CTL_MAX_LUNS) && (softc->ctl_luns[targ_lun] != NULL)) 12065 lun = softc->ctl_luns[targ_lun]; 12066 else { 12067 mtx_unlock(&softc->ctl_lock); 12068 return (1); 12069 } 12070 12071 mtx_lock(&lun->lun_lock); 12072 mtx_unlock(&softc->ctl_lock); 12073 if (io->taskio.task_action == CTL_TASK_ABORT_TASK_SET) { 12074 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12075 io->io_hdr.nexus.initid.id, 12076 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12077 } else { /* CTL_TASK_CLEAR_TASK_SET */ 12078 ctl_abort_tasks_lun(lun, UINT32_MAX, UINT32_MAX, 12079 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12080 } 12081 mtx_unlock(&lun->lun_lock); 12082 return (0); 12083} 12084 12085static int 12086ctl_i_t_nexus_reset(union ctl_io *io) 12087{ 12088 struct ctl_softc *softc = control_softc; 12089 struct ctl_lun *lun; 12090 uint32_t initidx, residx; 12091 12092 initidx = ctl_get_initindex(&io->io_hdr.nexus); 12093 residx = ctl_get_resindex(&io->io_hdr.nexus); 12094 mtx_lock(&softc->ctl_lock); 12095 STAILQ_FOREACH(lun, &softc->lun_list, links) { 12096 mtx_lock(&lun->lun_lock); 12097 ctl_abort_tasks_lun(lun, io->io_hdr.nexus.targ_port, 12098 io->io_hdr.nexus.initid.id, 12099 (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) != 0); 12100#ifdef CTL_WITH_CA 12101 ctl_clear_mask(lun->have_ca, initidx); 12102#endif 12103 if ((lun->flags & CTL_LUN_RESERVED) && (lun->res_idx == residx)) 12104 lun->flags &= ~CTL_LUN_RESERVED; 12105 ctl_est_ua(lun, initidx, CTL_UA_I_T_NEXUS_LOSS); 12106 mtx_unlock(&lun->lun_lock); 12107 } 12108 mtx_unlock(&softc->ctl_lock); 12109 return (0); 12110} 12111 12112static int 12113ctl_abort_task(union ctl_io *io) 12114{ 12115 union ctl_io *xio; 12116 struct ctl_lun *lun; 12117 struct ctl_softc *softc; 12118#if 0 12119 struct sbuf sb; 12120 char printbuf[128]; 12121#endif 12122 int found; 12123 uint32_t targ_lun; 12124 12125 softc = control_softc; 12126 found = 0; 12127 12128 /* 12129 * Look up the LUN. 12130 */ 12131 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12132 mtx_lock(&softc->ctl_lock); 12133 if ((targ_lun < CTL_MAX_LUNS) 12134 && (softc->ctl_luns[targ_lun] != NULL)) 12135 lun = softc->ctl_luns[targ_lun]; 12136 else { 12137 mtx_unlock(&softc->ctl_lock); 12138 return (1); 12139 } 12140 12141#if 0 12142 printf("ctl_abort_task: called for lun %lld, tag %d type %d\n", 12143 lun->lun, io->taskio.tag_num, io->taskio.tag_type); 12144#endif 12145 12146 mtx_lock(&lun->lun_lock); 12147 mtx_unlock(&softc->ctl_lock); 12148 /* 12149 * Run through the OOA queue and attempt to find the given I/O. 12150 * The target port, initiator ID, tag type and tag number have to 12151 * match the values that we got from the initiator. If we have an 12152 * untagged command to abort, simply abort the first untagged command 12153 * we come to. We only allow one untagged command at a time of course. 12154 */ 12155#if 0 12156 TAILQ_FOREACH((struct ctl_io_hdr *)xio, &lun->ooa_queue, ooa_links) { 12157#endif 12158 for (xio = (union ctl_io *)TAILQ_FIRST(&lun->ooa_queue); xio != NULL; 12159 xio = (union ctl_io *)TAILQ_NEXT(&xio->io_hdr, ooa_links)) { 12160#if 0 12161 sbuf_new(&sb, printbuf, sizeof(printbuf), SBUF_FIXEDLEN); 12162 12163 sbuf_printf(&sb, "LUN %lld tag %d type %d%s%s%s%s: ", 12164 lun->lun, xio->scsiio.tag_num, 12165 xio->scsiio.tag_type, 12166 (xio->io_hdr.blocked_links.tqe_prev 12167 == NULL) ? "" : " BLOCKED", 12168 (xio->io_hdr.flags & 12169 CTL_FLAG_DMA_INPROG) ? " DMA" : "", 12170 (xio->io_hdr.flags & 12171 CTL_FLAG_ABORT) ? " ABORT" : "", 12172 (xio->io_hdr.flags & 12173 CTL_FLAG_IS_WAS_ON_RTR ? " RTR" : "")); 12174 ctl_scsi_command_string(&xio->scsiio, NULL, &sb); 12175 sbuf_finish(&sb); 12176 printf("%s\n", sbuf_data(&sb)); 12177#endif 12178 12179 if ((xio->io_hdr.nexus.targ_port != io->io_hdr.nexus.targ_port) 12180 || (xio->io_hdr.nexus.initid.id != io->io_hdr.nexus.initid.id) 12181 || (xio->io_hdr.flags & CTL_FLAG_ABORT)) 12182 continue; 12183 12184 /* 12185 * If the abort says that the task is untagged, the 12186 * task in the queue must be untagged. Otherwise, 12187 * we just check to see whether the tag numbers 12188 * match. This is because the QLogic firmware 12189 * doesn't pass back the tag type in an abort 12190 * request. 12191 */ 12192#if 0 12193 if (((xio->scsiio.tag_type == CTL_TAG_UNTAGGED) 12194 && (io->taskio.tag_type == CTL_TAG_UNTAGGED)) 12195 || (xio->scsiio.tag_num == io->taskio.tag_num)) { 12196#endif 12197 /* 12198 * XXX KDM we've got problems with FC, because it 12199 * doesn't send down a tag type with aborts. So we 12200 * can only really go by the tag number... 12201 * This may cause problems with parallel SCSI. 12202 * Need to figure that out!! 12203 */ 12204 if (xio->scsiio.tag_num == io->taskio.tag_num) { 12205 xio->io_hdr.flags |= CTL_FLAG_ABORT; 12206 found = 1; 12207 if ((io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) == 0 && 12208 !(lun->flags & CTL_LUN_PRIMARY_SC)) { 12209 union ctl_ha_msg msg_info; 12210 12211 io->io_hdr.flags |= CTL_FLAG_SENT_2OTHER_SC; 12212 msg_info.hdr.nexus = io->io_hdr.nexus; 12213 msg_info.task.task_action = CTL_TASK_ABORT_TASK; 12214 msg_info.task.tag_num = io->taskio.tag_num; 12215 msg_info.task.tag_type = io->taskio.tag_type; 12216 msg_info.hdr.msg_type = CTL_MSG_MANAGE_TASKS; 12217 msg_info.hdr.original_sc = NULL; 12218 msg_info.hdr.serializing_sc = NULL; 12219#if 0 12220 printf("Sent Abort to other side\n"); 12221#endif 12222 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12223 (void *)&msg_info, sizeof(msg_info), 0) != 12224 CTL_HA_STATUS_SUCCESS) { 12225 } 12226 } 12227#if 0 12228 printf("ctl_abort_task: found I/O to abort\n"); 12229#endif 12230 } 12231 } 12232 mtx_unlock(&lun->lun_lock); 12233 12234 if (found == 0) { 12235 /* 12236 * This isn't really an error. It's entirely possible for 12237 * the abort and command completion to cross on the wire. 12238 * This is more of an informative/diagnostic error. 12239 */ 12240#if 0 12241 printf("ctl_abort_task: ABORT sent for nonexistent I/O: " 12242 "%d:%d:%d:%d tag %d type %d\n", 12243 io->io_hdr.nexus.initid.id, 12244 io->io_hdr.nexus.targ_port, 12245 io->io_hdr.nexus.targ_target.id, 12246 io->io_hdr.nexus.targ_lun, io->taskio.tag_num, 12247 io->taskio.tag_type); 12248#endif 12249 } 12250 return (0); 12251} 12252 12253static void 12254ctl_run_task(union ctl_io *io) 12255{ 12256 struct ctl_softc *softc = control_softc; 12257 int retval = 1; 12258 const char *task_desc; 12259 12260 CTL_DEBUG_PRINT(("ctl_run_task\n")); 12261 12262 KASSERT(io->io_hdr.io_type == CTL_IO_TASK, 12263 ("ctl_run_task: Unextected io_type %d\n", 12264 io->io_hdr.io_type)); 12265 12266 task_desc = ctl_scsi_task_string(&io->taskio); 12267 if (task_desc != NULL) { 12268#ifdef NEEDTOPORT 12269 csevent_log(CSC_CTL | CSC_SHELF_SW | 12270 CTL_TASK_REPORT, 12271 csevent_LogType_Trace, 12272 csevent_Severity_Information, 12273 csevent_AlertLevel_Green, 12274 csevent_FRU_Firmware, 12275 csevent_FRU_Unknown, 12276 "CTL: received task: %s",task_desc); 12277#endif 12278 } else { 12279#ifdef NEEDTOPORT 12280 csevent_log(CSC_CTL | CSC_SHELF_SW | 12281 CTL_TASK_REPORT, 12282 csevent_LogType_Trace, 12283 csevent_Severity_Information, 12284 csevent_AlertLevel_Green, 12285 csevent_FRU_Firmware, 12286 csevent_FRU_Unknown, 12287 "CTL: received unknown task " 12288 "type: %d (%#x)", 12289 io->taskio.task_action, 12290 io->taskio.task_action); 12291#endif 12292 } 12293 switch (io->taskio.task_action) { 12294 case CTL_TASK_ABORT_TASK: 12295 retval = ctl_abort_task(io); 12296 break; 12297 case CTL_TASK_ABORT_TASK_SET: 12298 case CTL_TASK_CLEAR_TASK_SET: 12299 retval = ctl_abort_task_set(io); 12300 break; 12301 case CTL_TASK_CLEAR_ACA: 12302 break; 12303 case CTL_TASK_I_T_NEXUS_RESET: 12304 retval = ctl_i_t_nexus_reset(io); 12305 break; 12306 case CTL_TASK_LUN_RESET: { 12307 struct ctl_lun *lun; 12308 uint32_t targ_lun; 12309 12310 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12311 mtx_lock(&softc->ctl_lock); 12312 if ((targ_lun < CTL_MAX_LUNS) 12313 && (softc->ctl_luns[targ_lun] != NULL)) 12314 lun = softc->ctl_luns[targ_lun]; 12315 else { 12316 mtx_unlock(&softc->ctl_lock); 12317 retval = 1; 12318 break; 12319 } 12320 12321 if (!(io->io_hdr.flags & 12322 CTL_FLAG_FROM_OTHER_SC)) { 12323 union ctl_ha_msg msg_info; 12324 12325 io->io_hdr.flags |= 12326 CTL_FLAG_SENT_2OTHER_SC; 12327 msg_info.hdr.msg_type = 12328 CTL_MSG_MANAGE_TASKS; 12329 msg_info.hdr.nexus = io->io_hdr.nexus; 12330 msg_info.task.task_action = 12331 CTL_TASK_LUN_RESET; 12332 msg_info.hdr.original_sc = NULL; 12333 msg_info.hdr.serializing_sc = NULL; 12334 if (CTL_HA_STATUS_SUCCESS != 12335 ctl_ha_msg_send(CTL_HA_CHAN_CTL, 12336 (void *)&msg_info, 12337 sizeof(msg_info), 0)) { 12338 } 12339 } 12340 12341 retval = ctl_lun_reset(lun, io, 12342 CTL_UA_LUN_RESET); 12343 mtx_unlock(&softc->ctl_lock); 12344 break; 12345 } 12346 case CTL_TASK_TARGET_RESET: 12347 retval = ctl_target_reset(softc, io, CTL_UA_TARG_RESET); 12348 break; 12349 case CTL_TASK_BUS_RESET: 12350 retval = ctl_bus_reset(softc, io); 12351 break; 12352 case CTL_TASK_PORT_LOGIN: 12353 break; 12354 case CTL_TASK_PORT_LOGOUT: 12355 break; 12356 default: 12357 printf("ctl_run_task: got unknown task management event %d\n", 12358 io->taskio.task_action); 12359 break; 12360 } 12361 if (retval == 0) 12362 io->io_hdr.status = CTL_SUCCESS; 12363 else 12364 io->io_hdr.status = CTL_ERROR; 12365 ctl_done(io); 12366} 12367 12368/* 12369 * For HA operation. Handle commands that come in from the other 12370 * controller. 12371 */ 12372static void 12373ctl_handle_isc(union ctl_io *io) 12374{ 12375 int free_io; 12376 struct ctl_lun *lun; 12377 struct ctl_softc *softc; 12378 uint32_t targ_lun; 12379 12380 softc = control_softc; 12381 12382 targ_lun = io->io_hdr.nexus.targ_mapped_lun; 12383 lun = softc->ctl_luns[targ_lun]; 12384 12385 switch (io->io_hdr.msg_type) { 12386 case CTL_MSG_SERIALIZE: 12387 free_io = ctl_serialize_other_sc_cmd(&io->scsiio); 12388 break; 12389 case CTL_MSG_R2R: { 12390 const struct ctl_cmd_entry *entry; 12391 12392 /* 12393 * This is only used in SER_ONLY mode. 12394 */ 12395 free_io = 0; 12396 entry = ctl_get_cmd_entry(&io->scsiio, NULL); 12397 mtx_lock(&lun->lun_lock); 12398 if (ctl_scsiio_lun_check(lun, 12399 entry, (struct ctl_scsiio *)io) != 0) { 12400 mtx_unlock(&lun->lun_lock); 12401 ctl_done(io); 12402 break; 12403 } 12404 io->io_hdr.flags |= CTL_FLAG_IS_WAS_ON_RTR; 12405 mtx_unlock(&lun->lun_lock); 12406 ctl_enqueue_rtr(io); 12407 break; 12408 } 12409 case CTL_MSG_FINISH_IO: 12410 if (softc->ha_mode == CTL_HA_MODE_XFER) { 12411 free_io = 0; 12412 ctl_done(io); 12413 } else { 12414 free_io = 1; 12415 mtx_lock(&lun->lun_lock); 12416 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, 12417 ooa_links); 12418 ctl_check_blocked(lun); 12419 mtx_unlock(&lun->lun_lock); 12420 } 12421 break; 12422 case CTL_MSG_PERS_ACTION: 12423 ctl_hndl_per_res_out_on_other_sc( 12424 (union ctl_ha_msg *)&io->presio.pr_msg); 12425 free_io = 1; 12426 break; 12427 case CTL_MSG_BAD_JUJU: 12428 free_io = 0; 12429 ctl_done(io); 12430 break; 12431 case CTL_MSG_DATAMOVE: 12432 /* Only used in XFER mode */ 12433 free_io = 0; 12434 ctl_datamove_remote(io); 12435 break; 12436 case CTL_MSG_DATAMOVE_DONE: 12437 /* Only used in XFER mode */ 12438 free_io = 0; 12439 io->scsiio.be_move_done(io); 12440 break; 12441 default: 12442 free_io = 1; 12443 printf("%s: Invalid message type %d\n", 12444 __func__, io->io_hdr.msg_type); 12445 break; 12446 } 12447 if (free_io) 12448 ctl_free_io(io); 12449 12450} 12451 12452 12453/* 12454 * Returns the match type in the case of a match, or CTL_LUN_PAT_NONE if 12455 * there is no match. 12456 */ 12457static ctl_lun_error_pattern 12458ctl_cmd_pattern_match(struct ctl_scsiio *ctsio, struct ctl_error_desc *desc) 12459{ 12460 const struct ctl_cmd_entry *entry; 12461 ctl_lun_error_pattern filtered_pattern, pattern; 12462 12463 pattern = desc->error_pattern; 12464 12465 /* 12466 * XXX KDM we need more data passed into this function to match a 12467 * custom pattern, and we actually need to implement custom pattern 12468 * matching. 12469 */ 12470 if (pattern & CTL_LUN_PAT_CMD) 12471 return (CTL_LUN_PAT_CMD); 12472 12473 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_ANY) 12474 return (CTL_LUN_PAT_ANY); 12475 12476 entry = ctl_get_cmd_entry(ctsio, NULL); 12477 12478 filtered_pattern = entry->pattern & pattern; 12479 12480 /* 12481 * If the user requested specific flags in the pattern (e.g. 12482 * CTL_LUN_PAT_RANGE), make sure the command supports all of those 12483 * flags. 12484 * 12485 * If the user did not specify any flags, it doesn't matter whether 12486 * or not the command supports the flags. 12487 */ 12488 if ((filtered_pattern & ~CTL_LUN_PAT_MASK) != 12489 (pattern & ~CTL_LUN_PAT_MASK)) 12490 return (CTL_LUN_PAT_NONE); 12491 12492 /* 12493 * If the user asked for a range check, see if the requested LBA 12494 * range overlaps with this command's LBA range. 12495 */ 12496 if (filtered_pattern & CTL_LUN_PAT_RANGE) { 12497 uint64_t lba1; 12498 uint64_t len1; 12499 ctl_action action; 12500 int retval; 12501 12502 retval = ctl_get_lba_len((union ctl_io *)ctsio, &lba1, &len1); 12503 if (retval != 0) 12504 return (CTL_LUN_PAT_NONE); 12505 12506 action = ctl_extent_check_lba(lba1, len1, desc->lba_range.lba, 12507 desc->lba_range.len, FALSE); 12508 /* 12509 * A "pass" means that the LBA ranges don't overlap, so 12510 * this doesn't match the user's range criteria. 12511 */ 12512 if (action == CTL_ACTION_PASS) 12513 return (CTL_LUN_PAT_NONE); 12514 } 12515 12516 return (filtered_pattern); 12517} 12518 12519static void 12520ctl_inject_error(struct ctl_lun *lun, union ctl_io *io) 12521{ 12522 struct ctl_error_desc *desc, *desc2; 12523 12524 mtx_assert(&lun->lun_lock, MA_OWNED); 12525 12526 STAILQ_FOREACH_SAFE(desc, &lun->error_list, links, desc2) { 12527 ctl_lun_error_pattern pattern; 12528 /* 12529 * Check to see whether this particular command matches 12530 * the pattern in the descriptor. 12531 */ 12532 pattern = ctl_cmd_pattern_match(&io->scsiio, desc); 12533 if ((pattern & CTL_LUN_PAT_MASK) == CTL_LUN_PAT_NONE) 12534 continue; 12535 12536 switch (desc->lun_error & CTL_LUN_INJ_TYPE) { 12537 case CTL_LUN_INJ_ABORTED: 12538 ctl_set_aborted(&io->scsiio); 12539 break; 12540 case CTL_LUN_INJ_MEDIUM_ERR: 12541 ctl_set_medium_error(&io->scsiio); 12542 break; 12543 case CTL_LUN_INJ_UA: 12544 /* 29h/00h POWER ON, RESET, OR BUS DEVICE RESET 12545 * OCCURRED */ 12546 ctl_set_ua(&io->scsiio, 0x29, 0x00); 12547 break; 12548 case CTL_LUN_INJ_CUSTOM: 12549 /* 12550 * We're assuming the user knows what he is doing. 12551 * Just copy the sense information without doing 12552 * checks. 12553 */ 12554 bcopy(&desc->custom_sense, &io->scsiio.sense_data, 12555 MIN(sizeof(desc->custom_sense), 12556 sizeof(io->scsiio.sense_data))); 12557 io->scsiio.scsi_status = SCSI_STATUS_CHECK_COND; 12558 io->scsiio.sense_len = SSD_FULL_SIZE; 12559 io->io_hdr.status = CTL_SCSI_ERROR | CTL_AUTOSENSE; 12560 break; 12561 case CTL_LUN_INJ_NONE: 12562 default: 12563 /* 12564 * If this is an error injection type we don't know 12565 * about, clear the continuous flag (if it is set) 12566 * so it will get deleted below. 12567 */ 12568 desc->lun_error &= ~CTL_LUN_INJ_CONTINUOUS; 12569 break; 12570 } 12571 /* 12572 * By default, each error injection action is a one-shot 12573 */ 12574 if (desc->lun_error & CTL_LUN_INJ_CONTINUOUS) 12575 continue; 12576 12577 STAILQ_REMOVE(&lun->error_list, desc, ctl_error_desc, links); 12578 12579 free(desc, M_CTL); 12580 } 12581} 12582 12583#ifdef CTL_IO_DELAY 12584static void 12585ctl_datamove_timer_wakeup(void *arg) 12586{ 12587 union ctl_io *io; 12588 12589 io = (union ctl_io *)arg; 12590 12591 ctl_datamove(io); 12592} 12593#endif /* CTL_IO_DELAY */ 12594 12595void 12596ctl_datamove(union ctl_io *io) 12597{ 12598 void (*fe_datamove)(union ctl_io *io); 12599 12600 mtx_assert(&control_softc->ctl_lock, MA_NOTOWNED); 12601 12602 CTL_DEBUG_PRINT(("ctl_datamove\n")); 12603 12604#ifdef CTL_TIME_IO 12605 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 12606 char str[256]; 12607 char path_str[64]; 12608 struct sbuf sb; 12609 12610 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 12611 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 12612 12613 sbuf_cat(&sb, path_str); 12614 switch (io->io_hdr.io_type) { 12615 case CTL_IO_SCSI: 12616 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 12617 sbuf_printf(&sb, "\n"); 12618 sbuf_cat(&sb, path_str); 12619 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 12620 io->scsiio.tag_num, io->scsiio.tag_type); 12621 break; 12622 case CTL_IO_TASK: 12623 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 12624 "Tag Type: %d\n", io->taskio.task_action, 12625 io->taskio.tag_num, io->taskio.tag_type); 12626 break; 12627 default: 12628 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12629 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 12630 break; 12631 } 12632 sbuf_cat(&sb, path_str); 12633 sbuf_printf(&sb, "ctl_datamove: %jd seconds\n", 12634 (intmax_t)time_uptime - io->io_hdr.start_time); 12635 sbuf_finish(&sb); 12636 printf("%s", sbuf_data(&sb)); 12637 } 12638#endif /* CTL_TIME_IO */ 12639 12640#ifdef CTL_IO_DELAY 12641 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 12642 struct ctl_lun *lun; 12643 12644 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12645 12646 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 12647 } else { 12648 struct ctl_lun *lun; 12649 12650 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 12651 if ((lun != NULL) 12652 && (lun->delay_info.datamove_delay > 0)) { 12653 struct callout *callout; 12654 12655 callout = (struct callout *)&io->io_hdr.timer_bytes; 12656 callout_init(callout, /*mpsafe*/ 1); 12657 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 12658 callout_reset(callout, 12659 lun->delay_info.datamove_delay * hz, 12660 ctl_datamove_timer_wakeup, io); 12661 if (lun->delay_info.datamove_type == 12662 CTL_DELAY_TYPE_ONESHOT) 12663 lun->delay_info.datamove_delay = 0; 12664 return; 12665 } 12666 } 12667#endif 12668 12669 /* 12670 * This command has been aborted. Set the port status, so we fail 12671 * the data move. 12672 */ 12673 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 12674 printf("ctl_datamove: tag 0x%04x on (%ju:%d:%ju:%d) aborted\n", 12675 io->scsiio.tag_num,(uintmax_t)io->io_hdr.nexus.initid.id, 12676 io->io_hdr.nexus.targ_port, 12677 (uintmax_t)io->io_hdr.nexus.targ_target.id, 12678 io->io_hdr.nexus.targ_lun); 12679 io->io_hdr.port_status = 31337; 12680 /* 12681 * Note that the backend, in this case, will get the 12682 * callback in its context. In other cases it may get 12683 * called in the frontend's interrupt thread context. 12684 */ 12685 io->scsiio.be_move_done(io); 12686 return; 12687 } 12688 12689 /* Don't confuse frontend with zero length data move. */ 12690 if (io->scsiio.kern_data_len == 0) { 12691 io->scsiio.be_move_done(io); 12692 return; 12693 } 12694 12695 /* 12696 * If we're in XFER mode and this I/O is from the other shelf 12697 * controller, we need to send the DMA to the other side to 12698 * actually transfer the data to/from the host. In serialize only 12699 * mode the transfer happens below CTL and ctl_datamove() is only 12700 * called on the machine that originally received the I/O. 12701 */ 12702 if ((control_softc->ha_mode == CTL_HA_MODE_XFER) 12703 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 12704 union ctl_ha_msg msg; 12705 uint32_t sg_entries_sent; 12706 int do_sg_copy; 12707 int i; 12708 12709 memset(&msg, 0, sizeof(msg)); 12710 msg.hdr.msg_type = CTL_MSG_DATAMOVE; 12711 msg.hdr.original_sc = io->io_hdr.original_sc; 12712 msg.hdr.serializing_sc = io; 12713 msg.hdr.nexus = io->io_hdr.nexus; 12714 msg.dt.flags = io->io_hdr.flags; 12715 /* 12716 * We convert everything into a S/G list here. We can't 12717 * pass by reference, only by value between controllers. 12718 * So we can't pass a pointer to the S/G list, only as many 12719 * S/G entries as we can fit in here. If it's possible for 12720 * us to get more than CTL_HA_MAX_SG_ENTRIES S/G entries, 12721 * then we need to break this up into multiple transfers. 12722 */ 12723 if (io->scsiio.kern_sg_entries == 0) { 12724 msg.dt.kern_sg_entries = 1; 12725 /* 12726 * If this is in cached memory, flush the cache 12727 * before we send the DMA request to the other 12728 * controller. We want to do this in either the 12729 * read or the write case. The read case is 12730 * straightforward. In the write case, we want to 12731 * make sure nothing is in the local cache that 12732 * could overwrite the DMAed data. 12733 */ 12734 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12735 /* 12736 * XXX KDM use bus_dmamap_sync() here. 12737 */ 12738 } 12739 12740 /* 12741 * Convert to a physical address if this is a 12742 * virtual address. 12743 */ 12744 if (io->io_hdr.flags & CTL_FLAG_BUS_ADDR) { 12745 msg.dt.sg_list[0].addr = 12746 io->scsiio.kern_data_ptr; 12747 } else { 12748 /* 12749 * XXX KDM use busdma here! 12750 */ 12751#if 0 12752 msg.dt.sg_list[0].addr = (void *) 12753 vtophys(io->scsiio.kern_data_ptr); 12754#endif 12755 } 12756 12757 msg.dt.sg_list[0].len = io->scsiio.kern_data_len; 12758 do_sg_copy = 0; 12759 } else { 12760 struct ctl_sg_entry *sgl; 12761 12762 do_sg_copy = 1; 12763 msg.dt.kern_sg_entries = io->scsiio.kern_sg_entries; 12764 sgl = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr; 12765 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 12766 /* 12767 * XXX KDM use bus_dmamap_sync() here. 12768 */ 12769 } 12770 } 12771 12772 msg.dt.kern_data_len = io->scsiio.kern_data_len; 12773 msg.dt.kern_total_len = io->scsiio.kern_total_len; 12774 msg.dt.kern_data_resid = io->scsiio.kern_data_resid; 12775 msg.dt.kern_rel_offset = io->scsiio.kern_rel_offset; 12776 msg.dt.sg_sequence = 0; 12777 12778 /* 12779 * Loop until we've sent all of the S/G entries. On the 12780 * other end, we'll recompose these S/G entries into one 12781 * contiguous list before passing it to the 12782 */ 12783 for (sg_entries_sent = 0; sg_entries_sent < 12784 msg.dt.kern_sg_entries; msg.dt.sg_sequence++) { 12785 msg.dt.cur_sg_entries = MIN((sizeof(msg.dt.sg_list)/ 12786 sizeof(msg.dt.sg_list[0])), 12787 msg.dt.kern_sg_entries - sg_entries_sent); 12788 12789 if (do_sg_copy != 0) { 12790 struct ctl_sg_entry *sgl; 12791 int j; 12792 12793 sgl = (struct ctl_sg_entry *) 12794 io->scsiio.kern_data_ptr; 12795 /* 12796 * If this is in cached memory, flush the cache 12797 * before we send the DMA request to the other 12798 * controller. We want to do this in either 12799 * the * read or the write case. The read 12800 * case is straightforward. In the write 12801 * case, we want to make sure nothing is 12802 * in the local cache that could overwrite 12803 * the DMAed data. 12804 */ 12805 12806 for (i = sg_entries_sent, j = 0; 12807 i < msg.dt.cur_sg_entries; i++, j++) { 12808 if ((io->io_hdr.flags & 12809 CTL_FLAG_NO_DATASYNC) == 0) { 12810 /* 12811 * XXX KDM use bus_dmamap_sync() 12812 */ 12813 } 12814 if ((io->io_hdr.flags & 12815 CTL_FLAG_BUS_ADDR) == 0) { 12816 /* 12817 * XXX KDM use busdma. 12818 */ 12819#if 0 12820 msg.dt.sg_list[j].addr =(void *) 12821 vtophys(sgl[i].addr); 12822#endif 12823 } else { 12824 msg.dt.sg_list[j].addr = 12825 sgl[i].addr; 12826 } 12827 msg.dt.sg_list[j].len = sgl[i].len; 12828 } 12829 } 12830 12831 sg_entries_sent += msg.dt.cur_sg_entries; 12832 if (sg_entries_sent >= msg.dt.kern_sg_entries) 12833 msg.dt.sg_last = 1; 12834 else 12835 msg.dt.sg_last = 0; 12836 12837 /* 12838 * XXX KDM drop and reacquire the lock here? 12839 */ 12840 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 12841 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 12842 /* 12843 * XXX do something here. 12844 */ 12845 } 12846 12847 msg.dt.sent_sg_entries = sg_entries_sent; 12848 } 12849 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12850 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) 12851 ctl_failover_io(io, /*have_lock*/ 0); 12852 12853 } else { 12854 12855 /* 12856 * Lookup the fe_datamove() function for this particular 12857 * front end. 12858 */ 12859 fe_datamove = 12860 control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12861 12862 fe_datamove(io); 12863 } 12864} 12865 12866static void 12867ctl_send_datamove_done(union ctl_io *io, int have_lock) 12868{ 12869 union ctl_ha_msg msg; 12870 int isc_status; 12871 12872 memset(&msg, 0, sizeof(msg)); 12873 12874 msg.hdr.msg_type = CTL_MSG_DATAMOVE_DONE; 12875 msg.hdr.original_sc = io; 12876 msg.hdr.serializing_sc = io->io_hdr.serializing_sc; 12877 msg.hdr.nexus = io->io_hdr.nexus; 12878 msg.hdr.status = io->io_hdr.status; 12879 msg.scsi.tag_num = io->scsiio.tag_num; 12880 msg.scsi.tag_type = io->scsiio.tag_type; 12881 msg.scsi.scsi_status = io->scsiio.scsi_status; 12882 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 12883 sizeof(io->scsiio.sense_data)); 12884 msg.scsi.sense_len = io->scsiio.sense_len; 12885 msg.scsi.sense_residual = io->scsiio.sense_residual; 12886 msg.scsi.fetd_status = io->io_hdr.port_status; 12887 msg.scsi.residual = io->scsiio.residual; 12888 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 12889 12890 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 12891 ctl_failover_io(io, /*have_lock*/ have_lock); 12892 return; 12893 } 12894 12895 isc_status = ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0); 12896 if (isc_status > CTL_HA_STATUS_SUCCESS) { 12897 /* XXX do something if this fails */ 12898 } 12899 12900} 12901 12902/* 12903 * The DMA to the remote side is done, now we need to tell the other side 12904 * we're done so it can continue with its data movement. 12905 */ 12906static void 12907ctl_datamove_remote_write_cb(struct ctl_ha_dt_req *rq) 12908{ 12909 union ctl_io *io; 12910 12911 io = rq->context; 12912 12913 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 12914 printf("%s: ISC DMA write failed with error %d", __func__, 12915 rq->ret); 12916 ctl_set_internal_failure(&io->scsiio, 12917 /*sks_valid*/ 1, 12918 /*retry_count*/ rq->ret); 12919 } 12920 12921 ctl_dt_req_free(rq); 12922 12923 /* 12924 * In this case, we had to malloc the memory locally. Free it. 12925 */ 12926 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 12927 int i; 12928 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 12929 free(io->io_hdr.local_sglist[i].addr, M_CTL); 12930 } 12931 /* 12932 * The data is in local and remote memory, so now we need to send 12933 * status (good or back) back to the other side. 12934 */ 12935 ctl_send_datamove_done(io, /*have_lock*/ 0); 12936} 12937 12938/* 12939 * We've moved the data from the host/controller into local memory. Now we 12940 * need to push it over to the remote controller's memory. 12941 */ 12942static int 12943ctl_datamove_remote_dm_write_cb(union ctl_io *io) 12944{ 12945 int retval; 12946 12947 retval = 0; 12948 12949 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_WRITE, 12950 ctl_datamove_remote_write_cb); 12951 12952 return (retval); 12953} 12954 12955static void 12956ctl_datamove_remote_write(union ctl_io *io) 12957{ 12958 int retval; 12959 void (*fe_datamove)(union ctl_io *io); 12960 12961 /* 12962 * - Get the data from the host/HBA into local memory. 12963 * - DMA memory from the local controller to the remote controller. 12964 * - Send status back to the remote controller. 12965 */ 12966 12967 retval = ctl_datamove_remote_sgl_setup(io); 12968 if (retval != 0) 12969 return; 12970 12971 /* Switch the pointer over so the FETD knows what to do */ 12972 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 12973 12974 /* 12975 * Use a custom move done callback, since we need to send completion 12976 * back to the other controller, not to the backend on this side. 12977 */ 12978 io->scsiio.be_move_done = ctl_datamove_remote_dm_write_cb; 12979 12980 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 12981 12982 fe_datamove(io); 12983 12984 return; 12985 12986} 12987 12988static int 12989ctl_datamove_remote_dm_read_cb(union ctl_io *io) 12990{ 12991#if 0 12992 char str[256]; 12993 char path_str[64]; 12994 struct sbuf sb; 12995#endif 12996 12997 /* 12998 * In this case, we had to malloc the memory locally. Free it. 12999 */ 13000 if ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0) { 13001 int i; 13002 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13003 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13004 } 13005 13006#if 0 13007 scsi_path_string(io, path_str, sizeof(path_str)); 13008 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13009 sbuf_cat(&sb, path_str); 13010 scsi_command_string(&io->scsiio, NULL, &sb); 13011 sbuf_printf(&sb, "\n"); 13012 sbuf_cat(&sb, path_str); 13013 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13014 io->scsiio.tag_num, io->scsiio.tag_type); 13015 sbuf_cat(&sb, path_str); 13016 sbuf_printf(&sb, "%s: flags %#x, status %#x\n", __func__, 13017 io->io_hdr.flags, io->io_hdr.status); 13018 sbuf_finish(&sb); 13019 printk("%s", sbuf_data(&sb)); 13020#endif 13021 13022 13023 /* 13024 * The read is done, now we need to send status (good or bad) back 13025 * to the other side. 13026 */ 13027 ctl_send_datamove_done(io, /*have_lock*/ 0); 13028 13029 return (0); 13030} 13031 13032static void 13033ctl_datamove_remote_read_cb(struct ctl_ha_dt_req *rq) 13034{ 13035 union ctl_io *io; 13036 void (*fe_datamove)(union ctl_io *io); 13037 13038 io = rq->context; 13039 13040 if (rq->ret != CTL_HA_STATUS_SUCCESS) { 13041 printf("%s: ISC DMA read failed with error %d", __func__, 13042 rq->ret); 13043 ctl_set_internal_failure(&io->scsiio, 13044 /*sks_valid*/ 1, 13045 /*retry_count*/ rq->ret); 13046 } 13047 13048 ctl_dt_req_free(rq); 13049 13050 /* Switch the pointer over so the FETD knows what to do */ 13051 io->scsiio.kern_data_ptr = (uint8_t *)io->io_hdr.local_sglist; 13052 13053 /* 13054 * Use a custom move done callback, since we need to send completion 13055 * back to the other controller, not to the backend on this side. 13056 */ 13057 io->scsiio.be_move_done = ctl_datamove_remote_dm_read_cb; 13058 13059 /* XXX KDM add checks like the ones in ctl_datamove? */ 13060 13061 fe_datamove = control_softc->ctl_ports[ctl_port_idx(io->io_hdr.nexus.targ_port)]->fe_datamove; 13062 13063 fe_datamove(io); 13064} 13065 13066static int 13067ctl_datamove_remote_sgl_setup(union ctl_io *io) 13068{ 13069 struct ctl_sg_entry *local_sglist, *remote_sglist; 13070 struct ctl_sg_entry *local_dma_sglist, *remote_dma_sglist; 13071 struct ctl_softc *softc; 13072 int retval; 13073 int i; 13074 13075 retval = 0; 13076 softc = control_softc; 13077 13078 local_sglist = io->io_hdr.local_sglist; 13079 local_dma_sglist = io->io_hdr.local_dma_sglist; 13080 remote_sglist = io->io_hdr.remote_sglist; 13081 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 13082 13083 if (io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) { 13084 for (i = 0; i < io->scsiio.kern_sg_entries; i++) { 13085 local_sglist[i].len = remote_sglist[i].len; 13086 13087 /* 13088 * XXX Detect the situation where the RS-level I/O 13089 * redirector on the other side has already read the 13090 * data off of the AOR RS on this side, and 13091 * transferred it to remote (mirror) memory on the 13092 * other side. Since we already have the data in 13093 * memory here, we just need to use it. 13094 * 13095 * XXX KDM this can probably be removed once we 13096 * get the cache device code in and take the 13097 * current AOR implementation out. 13098 */ 13099#ifdef NEEDTOPORT 13100 if ((remote_sglist[i].addr >= 13101 (void *)vtophys(softc->mirr->addr)) 13102 && (remote_sglist[i].addr < 13103 ((void *)vtophys(softc->mirr->addr) + 13104 CacheMirrorOffset))) { 13105 local_sglist[i].addr = remote_sglist[i].addr - 13106 CacheMirrorOffset; 13107 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13108 CTL_FLAG_DATA_IN) 13109 io->io_hdr.flags |= CTL_FLAG_REDIR_DONE; 13110 } else { 13111 local_sglist[i].addr = remote_sglist[i].addr + 13112 CacheMirrorOffset; 13113 } 13114#endif 13115#if 0 13116 printf("%s: local %p, remote %p, len %d\n", 13117 __func__, local_sglist[i].addr, 13118 remote_sglist[i].addr, local_sglist[i].len); 13119#endif 13120 } 13121 } else { 13122 uint32_t len_to_go; 13123 13124 /* 13125 * In this case, we don't have automatically allocated 13126 * memory for this I/O on this controller. This typically 13127 * happens with internal CTL I/O -- e.g. inquiry, mode 13128 * sense, etc. Anything coming from RAIDCore will have 13129 * a mirror area available. 13130 */ 13131 len_to_go = io->scsiio.kern_data_len; 13132 13133 /* 13134 * Clear the no datasync flag, we have to use malloced 13135 * buffers. 13136 */ 13137 io->io_hdr.flags &= ~CTL_FLAG_NO_DATASYNC; 13138 13139 /* 13140 * The difficult thing here is that the size of the various 13141 * S/G segments may be different than the size from the 13142 * remote controller. That'll make it harder when DMAing 13143 * the data back to the other side. 13144 */ 13145 for (i = 0; (i < sizeof(io->io_hdr.remote_sglist) / 13146 sizeof(io->io_hdr.remote_sglist[0])) && 13147 (len_to_go > 0); i++) { 13148 local_sglist[i].len = MIN(len_to_go, 131072); 13149 CTL_SIZE_8B(local_dma_sglist[i].len, 13150 local_sglist[i].len); 13151 local_sglist[i].addr = 13152 malloc(local_dma_sglist[i].len, M_CTL,M_WAITOK); 13153 13154 local_dma_sglist[i].addr = local_sglist[i].addr; 13155 13156 if (local_sglist[i].addr == NULL) { 13157 int j; 13158 13159 printf("malloc failed for %zd bytes!", 13160 local_dma_sglist[i].len); 13161 for (j = 0; j < i; j++) { 13162 free(local_sglist[j].addr, M_CTL); 13163 } 13164 ctl_set_internal_failure(&io->scsiio, 13165 /*sks_valid*/ 1, 13166 /*retry_count*/ 4857); 13167 retval = 1; 13168 goto bailout_error; 13169 13170 } 13171 /* XXX KDM do we need a sync here? */ 13172 13173 len_to_go -= local_sglist[i].len; 13174 } 13175 /* 13176 * Reset the number of S/G entries accordingly. The 13177 * original number of S/G entries is available in 13178 * rem_sg_entries. 13179 */ 13180 io->scsiio.kern_sg_entries = i; 13181 13182#if 0 13183 printf("%s: kern_sg_entries = %d\n", __func__, 13184 io->scsiio.kern_sg_entries); 13185 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13186 printf("%s: sg[%d] = %p, %d (DMA: %d)\n", __func__, i, 13187 local_sglist[i].addr, local_sglist[i].len, 13188 local_dma_sglist[i].len); 13189#endif 13190 } 13191 13192 13193 return (retval); 13194 13195bailout_error: 13196 13197 ctl_send_datamove_done(io, /*have_lock*/ 0); 13198 13199 return (retval); 13200} 13201 13202static int 13203ctl_datamove_remote_xfer(union ctl_io *io, unsigned command, 13204 ctl_ha_dt_cb callback) 13205{ 13206 struct ctl_ha_dt_req *rq; 13207 struct ctl_sg_entry *remote_sglist, *local_sglist; 13208 struct ctl_sg_entry *remote_dma_sglist, *local_dma_sglist; 13209 uint32_t local_used, remote_used, total_used; 13210 int retval; 13211 int i, j; 13212 13213 retval = 0; 13214 13215 rq = ctl_dt_req_alloc(); 13216 13217 /* 13218 * If we failed to allocate the request, and if the DMA didn't fail 13219 * anyway, set busy status. This is just a resource allocation 13220 * failure. 13221 */ 13222 if ((rq == NULL) 13223 && ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) 13224 ctl_set_busy(&io->scsiio); 13225 13226 if ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE) { 13227 13228 if (rq != NULL) 13229 ctl_dt_req_free(rq); 13230 13231 /* 13232 * The data move failed. We need to return status back 13233 * to the other controller. No point in trying to DMA 13234 * data to the remote controller. 13235 */ 13236 13237 ctl_send_datamove_done(io, /*have_lock*/ 0); 13238 13239 retval = 1; 13240 13241 goto bailout; 13242 } 13243 13244 local_sglist = io->io_hdr.local_sglist; 13245 local_dma_sglist = io->io_hdr.local_dma_sglist; 13246 remote_sglist = io->io_hdr.remote_sglist; 13247 remote_dma_sglist = io->io_hdr.remote_dma_sglist; 13248 local_used = 0; 13249 remote_used = 0; 13250 total_used = 0; 13251 13252 if (io->io_hdr.flags & CTL_FLAG_REDIR_DONE) { 13253 rq->ret = CTL_HA_STATUS_SUCCESS; 13254 rq->context = io; 13255 callback(rq); 13256 goto bailout; 13257 } 13258 13259 /* 13260 * Pull/push the data over the wire from/to the other controller. 13261 * This takes into account the possibility that the local and 13262 * remote sglists may not be identical in terms of the size of 13263 * the elements and the number of elements. 13264 * 13265 * One fundamental assumption here is that the length allocated for 13266 * both the local and remote sglists is identical. Otherwise, we've 13267 * essentially got a coding error of some sort. 13268 */ 13269 for (i = 0, j = 0; total_used < io->scsiio.kern_data_len; ) { 13270 int isc_ret; 13271 uint32_t cur_len, dma_length; 13272 uint8_t *tmp_ptr; 13273 13274 rq->id = CTL_HA_DATA_CTL; 13275 rq->command = command; 13276 rq->context = io; 13277 13278 /* 13279 * Both pointers should be aligned. But it is possible 13280 * that the allocation length is not. They should both 13281 * also have enough slack left over at the end, though, 13282 * to round up to the next 8 byte boundary. 13283 */ 13284 cur_len = MIN(local_sglist[i].len - local_used, 13285 remote_sglist[j].len - remote_used); 13286 13287 /* 13288 * In this case, we have a size issue and need to decrease 13289 * the size, except in the case where we actually have less 13290 * than 8 bytes left. In that case, we need to increase 13291 * the DMA length to get the last bit. 13292 */ 13293 if ((cur_len & 0x7) != 0) { 13294 if (cur_len > 0x7) { 13295 cur_len = cur_len - (cur_len & 0x7); 13296 dma_length = cur_len; 13297 } else { 13298 CTL_SIZE_8B(dma_length, cur_len); 13299 } 13300 13301 } else 13302 dma_length = cur_len; 13303 13304 /* 13305 * If we had to allocate memory for this I/O, instead of using 13306 * the non-cached mirror memory, we'll need to flush the cache 13307 * before trying to DMA to the other controller. 13308 * 13309 * We could end up doing this multiple times for the same 13310 * segment if we have a larger local segment than remote 13311 * segment. That shouldn't be an issue. 13312 */ 13313 if ((io->io_hdr.flags & CTL_FLAG_NO_DATASYNC) == 0) { 13314 /* 13315 * XXX KDM use bus_dmamap_sync() here. 13316 */ 13317 } 13318 13319 rq->size = dma_length; 13320 13321 tmp_ptr = (uint8_t *)local_sglist[i].addr; 13322 tmp_ptr += local_used; 13323 13324 /* Use physical addresses when talking to ISC hardware */ 13325 if ((io->io_hdr.flags & CTL_FLAG_BUS_ADDR) == 0) { 13326 /* XXX KDM use busdma */ 13327#if 0 13328 rq->local = vtophys(tmp_ptr); 13329#endif 13330 } else 13331 rq->local = tmp_ptr; 13332 13333 tmp_ptr = (uint8_t *)remote_sglist[j].addr; 13334 tmp_ptr += remote_used; 13335 rq->remote = tmp_ptr; 13336 13337 rq->callback = NULL; 13338 13339 local_used += cur_len; 13340 if (local_used >= local_sglist[i].len) { 13341 i++; 13342 local_used = 0; 13343 } 13344 13345 remote_used += cur_len; 13346 if (remote_used >= remote_sglist[j].len) { 13347 j++; 13348 remote_used = 0; 13349 } 13350 total_used += cur_len; 13351 13352 if (total_used >= io->scsiio.kern_data_len) 13353 rq->callback = callback; 13354 13355 if ((rq->size & 0x7) != 0) { 13356 printf("%s: warning: size %d is not on 8b boundary\n", 13357 __func__, rq->size); 13358 } 13359 if (((uintptr_t)rq->local & 0x7) != 0) { 13360 printf("%s: warning: local %p not on 8b boundary\n", 13361 __func__, rq->local); 13362 } 13363 if (((uintptr_t)rq->remote & 0x7) != 0) { 13364 printf("%s: warning: remote %p not on 8b boundary\n", 13365 __func__, rq->local); 13366 } 13367#if 0 13368 printf("%s: %s: local %#x remote %#x size %d\n", __func__, 13369 (command == CTL_HA_DT_CMD_WRITE) ? "WRITE" : "READ", 13370 rq->local, rq->remote, rq->size); 13371#endif 13372 13373 isc_ret = ctl_dt_single(rq); 13374 if (isc_ret == CTL_HA_STATUS_WAIT) 13375 continue; 13376 13377 if (isc_ret == CTL_HA_STATUS_DISCONNECT) { 13378 rq->ret = CTL_HA_STATUS_SUCCESS; 13379 } else { 13380 rq->ret = isc_ret; 13381 } 13382 callback(rq); 13383 goto bailout; 13384 } 13385 13386bailout: 13387 return (retval); 13388 13389} 13390 13391static void 13392ctl_datamove_remote_read(union ctl_io *io) 13393{ 13394 int retval; 13395 int i; 13396 13397 /* 13398 * This will send an error to the other controller in the case of a 13399 * failure. 13400 */ 13401 retval = ctl_datamove_remote_sgl_setup(io); 13402 if (retval != 0) 13403 return; 13404 13405 retval = ctl_datamove_remote_xfer(io, CTL_HA_DT_CMD_READ, 13406 ctl_datamove_remote_read_cb); 13407 if ((retval != 0) 13408 && ((io->io_hdr.flags & CTL_FLAG_AUTO_MIRROR) == 0)) { 13409 /* 13410 * Make sure we free memory if there was an error.. The 13411 * ctl_datamove_remote_xfer() function will send the 13412 * datamove done message, or call the callback with an 13413 * error if there is a problem. 13414 */ 13415 for (i = 0; i < io->scsiio.kern_sg_entries; i++) 13416 free(io->io_hdr.local_sglist[i].addr, M_CTL); 13417 } 13418 13419 return; 13420} 13421 13422/* 13423 * Process a datamove request from the other controller. This is used for 13424 * XFER mode only, not SER_ONLY mode. For writes, we DMA into local memory 13425 * first. Once that is complete, the data gets DMAed into the remote 13426 * controller's memory. For reads, we DMA from the remote controller's 13427 * memory into our memory first, and then move it out to the FETD. 13428 */ 13429static void 13430ctl_datamove_remote(union ctl_io *io) 13431{ 13432 struct ctl_softc *softc; 13433 13434 softc = control_softc; 13435 13436 mtx_assert(&softc->ctl_lock, MA_NOTOWNED); 13437 13438 /* 13439 * Note that we look for an aborted I/O here, but don't do some of 13440 * the other checks that ctl_datamove() normally does. 13441 * We don't need to run the datamove delay code, since that should 13442 * have been done if need be on the other controller. 13443 */ 13444 if (io->io_hdr.flags & CTL_FLAG_ABORT) { 13445 printf("%s: tag 0x%04x on (%d:%d:%d:%d) aborted\n", __func__, 13446 io->scsiio.tag_num, io->io_hdr.nexus.initid.id, 13447 io->io_hdr.nexus.targ_port, 13448 io->io_hdr.nexus.targ_target.id, 13449 io->io_hdr.nexus.targ_lun); 13450 io->io_hdr.port_status = 31338; 13451 ctl_send_datamove_done(io, /*have_lock*/ 0); 13452 return; 13453 } 13454 13455 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT) { 13456 ctl_datamove_remote_write(io); 13457 } else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN){ 13458 ctl_datamove_remote_read(io); 13459 } else { 13460 union ctl_ha_msg msg; 13461 struct scsi_sense_data *sense; 13462 uint8_t sks[3]; 13463 int retry_count; 13464 13465 memset(&msg, 0, sizeof(msg)); 13466 13467 msg.hdr.msg_type = CTL_MSG_BAD_JUJU; 13468 msg.hdr.status = CTL_SCSI_ERROR; 13469 msg.scsi.scsi_status = SCSI_STATUS_CHECK_COND; 13470 13471 retry_count = 4243; 13472 13473 sense = &msg.scsi.sense_data; 13474 sks[0] = SSD_SCS_VALID; 13475 sks[1] = (retry_count >> 8) & 0xff; 13476 sks[2] = retry_count & 0xff; 13477 13478 /* "Internal target failure" */ 13479 scsi_set_sense_data(sense, 13480 /*sense_format*/ SSD_TYPE_NONE, 13481 /*current_error*/ 1, 13482 /*sense_key*/ SSD_KEY_HARDWARE_ERROR, 13483 /*asc*/ 0x44, 13484 /*ascq*/ 0x00, 13485 /*type*/ SSD_ELEM_SKS, 13486 /*size*/ sizeof(sks), 13487 /*data*/ sks, 13488 SSD_ELEM_NONE); 13489 13490 io->io_hdr.flags &= ~CTL_FLAG_IO_ACTIVE; 13491 if (io->io_hdr.flags & CTL_FLAG_FAILOVER) { 13492 ctl_failover_io(io, /*have_lock*/ 1); 13493 return; 13494 } 13495 13496 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, sizeof(msg), 0) > 13497 CTL_HA_STATUS_SUCCESS) { 13498 /* XXX KDM what to do if this fails? */ 13499 } 13500 return; 13501 } 13502 13503} 13504 13505static int 13506ctl_process_done(union ctl_io *io) 13507{ 13508 struct ctl_lun *lun; 13509 struct ctl_softc *softc = control_softc; 13510 void (*fe_done)(union ctl_io *io); 13511 uint32_t targ_port = ctl_port_idx(io->io_hdr.nexus.targ_port); 13512 13513 CTL_DEBUG_PRINT(("ctl_process_done\n")); 13514 13515 fe_done = softc->ctl_ports[targ_port]->fe_done; 13516 13517#ifdef CTL_TIME_IO 13518 if ((time_uptime - io->io_hdr.start_time) > ctl_time_io_secs) { 13519 char str[256]; 13520 char path_str[64]; 13521 struct sbuf sb; 13522 13523 ctl_scsi_path_string(io, path_str, sizeof(path_str)); 13524 sbuf_new(&sb, str, sizeof(str), SBUF_FIXEDLEN); 13525 13526 sbuf_cat(&sb, path_str); 13527 switch (io->io_hdr.io_type) { 13528 case CTL_IO_SCSI: 13529 ctl_scsi_command_string(&io->scsiio, NULL, &sb); 13530 sbuf_printf(&sb, "\n"); 13531 sbuf_cat(&sb, path_str); 13532 sbuf_printf(&sb, "Tag: 0x%04x, type %d\n", 13533 io->scsiio.tag_num, io->scsiio.tag_type); 13534 break; 13535 case CTL_IO_TASK: 13536 sbuf_printf(&sb, "Task I/O type: %d, Tag: 0x%04x, " 13537 "Tag Type: %d\n", io->taskio.task_action, 13538 io->taskio.tag_num, io->taskio.tag_type); 13539 break; 13540 default: 13541 printf("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13542 panic("Invalid CTL I/O type %d\n", io->io_hdr.io_type); 13543 break; 13544 } 13545 sbuf_cat(&sb, path_str); 13546 sbuf_printf(&sb, "ctl_process_done: %jd seconds\n", 13547 (intmax_t)time_uptime - io->io_hdr.start_time); 13548 sbuf_finish(&sb); 13549 printf("%s", sbuf_data(&sb)); 13550 } 13551#endif /* CTL_TIME_IO */ 13552 13553 switch (io->io_hdr.io_type) { 13554 case CTL_IO_SCSI: 13555 break; 13556 case CTL_IO_TASK: 13557 if (bootverbose || (ctl_debug & CTL_DEBUG_INFO)) 13558 ctl_io_error_print(io, NULL); 13559 if (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC) 13560 ctl_free_io(io); 13561 else 13562 fe_done(io); 13563 return (CTL_RETVAL_COMPLETE); 13564 default: 13565 panic("ctl_process_done: invalid io type %d\n", 13566 io->io_hdr.io_type); 13567 break; /* NOTREACHED */ 13568 } 13569 13570 lun = (struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13571 if (lun == NULL) { 13572 CTL_DEBUG_PRINT(("NULL LUN for lun %d\n", 13573 io->io_hdr.nexus.targ_mapped_lun)); 13574 goto bailout; 13575 } 13576 13577 mtx_lock(&lun->lun_lock); 13578 13579 /* 13580 * Check to see if we have any errors to inject here. We only 13581 * inject errors for commands that don't already have errors set. 13582 */ 13583 if ((STAILQ_FIRST(&lun->error_list) != NULL) && 13584 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) && 13585 ((io->io_hdr.flags & CTL_FLAG_STATUS_SENT) == 0)) 13586 ctl_inject_error(lun, io); 13587 13588 /* 13589 * XXX KDM how do we treat commands that aren't completed 13590 * successfully? 13591 * 13592 * XXX KDM should we also track I/O latency? 13593 */ 13594 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS && 13595 io->io_hdr.io_type == CTL_IO_SCSI) { 13596#ifdef CTL_TIME_IO 13597 struct bintime cur_bt; 13598#endif 13599 int type; 13600 13601 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13602 CTL_FLAG_DATA_IN) 13603 type = CTL_STATS_READ; 13604 else if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == 13605 CTL_FLAG_DATA_OUT) 13606 type = CTL_STATS_WRITE; 13607 else 13608 type = CTL_STATS_NO_IO; 13609 13610 lun->stats.ports[targ_port].bytes[type] += 13611 io->scsiio.kern_total_len; 13612 lun->stats.ports[targ_port].operations[type]++; 13613#ifdef CTL_TIME_IO 13614 bintime_add(&lun->stats.ports[targ_port].dma_time[type], 13615 &io->io_hdr.dma_bt); 13616 lun->stats.ports[targ_port].num_dmas[type] += 13617 io->io_hdr.num_dmas; 13618 getbintime(&cur_bt); 13619 bintime_sub(&cur_bt, &io->io_hdr.start_bt); 13620 bintime_add(&lun->stats.ports[targ_port].time[type], &cur_bt); 13621#endif 13622 } 13623 13624 /* 13625 * Remove this from the OOA queue. 13626 */ 13627 TAILQ_REMOVE(&lun->ooa_queue, &io->io_hdr, ooa_links); 13628 13629 /* 13630 * Run through the blocked queue on this LUN and see if anything 13631 * has become unblocked, now that this transaction is done. 13632 */ 13633 ctl_check_blocked(lun); 13634 13635 /* 13636 * If the LUN has been invalidated, free it if there is nothing 13637 * left on its OOA queue. 13638 */ 13639 if ((lun->flags & CTL_LUN_INVALID) 13640 && TAILQ_EMPTY(&lun->ooa_queue)) { 13641 mtx_unlock(&lun->lun_lock); 13642 mtx_lock(&softc->ctl_lock); 13643 ctl_free_lun(lun); 13644 mtx_unlock(&softc->ctl_lock); 13645 } else 13646 mtx_unlock(&lun->lun_lock); 13647 13648bailout: 13649 13650 /* 13651 * If this command has been aborted, make sure we set the status 13652 * properly. The FETD is responsible for freeing the I/O and doing 13653 * whatever it needs to do to clean up its state. 13654 */ 13655 if (io->io_hdr.flags & CTL_FLAG_ABORT) 13656 ctl_set_task_aborted(&io->scsiio); 13657 13658 /* 13659 * If enabled, print command error status. 13660 * We don't print UAs unless debugging was enabled explicitly. 13661 */ 13662 do { 13663 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS) 13664 break; 13665 if (!bootverbose && (ctl_debug & CTL_DEBUG_INFO) == 0) 13666 break; 13667 if ((ctl_debug & CTL_DEBUG_INFO) == 0 && 13668 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SCSI_ERROR) && 13669 (io->scsiio.scsi_status == SCSI_STATUS_CHECK_COND)) { 13670 int error_code, sense_key, asc, ascq; 13671 13672 scsi_extract_sense_len(&io->scsiio.sense_data, 13673 io->scsiio.sense_len, &error_code, &sense_key, 13674 &asc, &ascq, /*show_errors*/ 0); 13675 if (sense_key == SSD_KEY_UNIT_ATTENTION) 13676 break; 13677 } 13678 13679 ctl_io_error_print(io, NULL); 13680 } while (0); 13681 13682 /* 13683 * Tell the FETD or the other shelf controller we're done with this 13684 * command. Note that only SCSI commands get to this point. Task 13685 * management commands are completed above. 13686 * 13687 * We only send status to the other controller if we're in XFER 13688 * mode. In SER_ONLY mode, the I/O is done on the controller that 13689 * received the I/O (from CTL's perspective), and so the status is 13690 * generated there. 13691 * 13692 * XXX KDM if we hold the lock here, we could cause a deadlock 13693 * if the frontend comes back in in this context to queue 13694 * something. 13695 */ 13696 if ((softc->ha_mode == CTL_HA_MODE_XFER) 13697 && (io->io_hdr.flags & CTL_FLAG_FROM_OTHER_SC)) { 13698 union ctl_ha_msg msg; 13699 13700 memset(&msg, 0, sizeof(msg)); 13701 msg.hdr.msg_type = CTL_MSG_FINISH_IO; 13702 msg.hdr.original_sc = io->io_hdr.original_sc; 13703 msg.hdr.nexus = io->io_hdr.nexus; 13704 msg.hdr.status = io->io_hdr.status; 13705 msg.scsi.scsi_status = io->scsiio.scsi_status; 13706 msg.scsi.tag_num = io->scsiio.tag_num; 13707 msg.scsi.tag_type = io->scsiio.tag_type; 13708 msg.scsi.sense_len = io->scsiio.sense_len; 13709 msg.scsi.sense_residual = io->scsiio.sense_residual; 13710 msg.scsi.residual = io->scsiio.residual; 13711 memcpy(&msg.scsi.sense_data, &io->scsiio.sense_data, 13712 sizeof(io->scsiio.sense_data)); 13713 /* 13714 * We copy this whether or not this is an I/O-related 13715 * command. Otherwise, we'd have to go and check to see 13716 * whether it's a read/write command, and it really isn't 13717 * worth it. 13718 */ 13719 memcpy(&msg.scsi.lbalen, 13720 &io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes, 13721 sizeof(msg.scsi.lbalen)); 13722 13723 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg, 13724 sizeof(msg), 0) > CTL_HA_STATUS_SUCCESS) { 13725 /* XXX do something here */ 13726 } 13727 13728 ctl_free_io(io); 13729 } else 13730 fe_done(io); 13731 13732 return (CTL_RETVAL_COMPLETE); 13733} 13734 13735#ifdef CTL_WITH_CA 13736/* 13737 * Front end should call this if it doesn't do autosense. When the request 13738 * sense comes back in from the initiator, we'll dequeue this and send it. 13739 */ 13740int 13741ctl_queue_sense(union ctl_io *io) 13742{ 13743 struct ctl_lun *lun; 13744 struct ctl_softc *softc; 13745 uint32_t initidx, targ_lun; 13746 13747 softc = control_softc; 13748 13749 CTL_DEBUG_PRINT(("ctl_queue_sense\n")); 13750 13751 /* 13752 * LUN lookup will likely move to the ctl_work_thread() once we 13753 * have our new queueing infrastructure (that doesn't put things on 13754 * a per-LUN queue initially). That is so that we can handle 13755 * things like an INQUIRY to a LUN that we don't have enabled. We 13756 * can't deal with that right now. 13757 */ 13758 mtx_lock(&softc->ctl_lock); 13759 13760 /* 13761 * If we don't have a LUN for this, just toss the sense 13762 * information. 13763 */ 13764 targ_lun = io->io_hdr.nexus.targ_lun; 13765 targ_lun = ctl_map_lun(softc, io->io_hdr.nexus.targ_port, targ_lun); 13766 if ((targ_lun < CTL_MAX_LUNS) 13767 && (softc->ctl_luns[targ_lun] != NULL)) 13768 lun = softc->ctl_luns[targ_lun]; 13769 else 13770 goto bailout; 13771 13772 initidx = ctl_get_initindex(&io->io_hdr.nexus); 13773 13774 mtx_lock(&lun->lun_lock); 13775 /* 13776 * Already have CA set for this LUN...toss the sense information. 13777 */ 13778 if (ctl_is_set(lun->have_ca, initidx)) { 13779 mtx_unlock(&lun->lun_lock); 13780 goto bailout; 13781 } 13782 13783 memcpy(&lun->pending_sense[initidx], &io->scsiio.sense_data, 13784 MIN(sizeof(lun->pending_sense[initidx]), 13785 sizeof(io->scsiio.sense_data))); 13786 ctl_set_mask(lun->have_ca, initidx); 13787 mtx_unlock(&lun->lun_lock); 13788 13789bailout: 13790 mtx_unlock(&softc->ctl_lock); 13791 13792 ctl_free_io(io); 13793 13794 return (CTL_RETVAL_COMPLETE); 13795} 13796#endif 13797 13798/* 13799 * Primary command inlet from frontend ports. All SCSI and task I/O 13800 * requests must go through this function. 13801 */ 13802int 13803ctl_queue(union ctl_io *io) 13804{ 13805 13806 CTL_DEBUG_PRINT(("ctl_queue cdb[0]=%02X\n", io->scsiio.cdb[0])); 13807 13808#ifdef CTL_TIME_IO 13809 io->io_hdr.start_time = time_uptime; 13810 getbintime(&io->io_hdr.start_bt); 13811#endif /* CTL_TIME_IO */ 13812 13813 /* Map FE-specific LUN ID into global one. */ 13814 io->io_hdr.nexus.targ_mapped_lun = 13815 ctl_map_lun(control_softc, io->io_hdr.nexus.targ_port, 13816 io->io_hdr.nexus.targ_lun); 13817 13818 switch (io->io_hdr.io_type) { 13819 case CTL_IO_SCSI: 13820 case CTL_IO_TASK: 13821 if (ctl_debug & CTL_DEBUG_CDB) 13822 ctl_io_print(io); 13823 ctl_enqueue_incoming(io); 13824 break; 13825 default: 13826 printf("ctl_queue: unknown I/O type %d\n", io->io_hdr.io_type); 13827 return (EINVAL); 13828 } 13829 13830 return (CTL_RETVAL_COMPLETE); 13831} 13832 13833#ifdef CTL_IO_DELAY 13834static void 13835ctl_done_timer_wakeup(void *arg) 13836{ 13837 union ctl_io *io; 13838 13839 io = (union ctl_io *)arg; 13840 ctl_done(io); 13841} 13842#endif /* CTL_IO_DELAY */ 13843 13844void 13845ctl_done(union ctl_io *io) 13846{ 13847 13848 /* 13849 * Enable this to catch duplicate completion issues. 13850 */ 13851#if 0 13852 if (io->io_hdr.flags & CTL_FLAG_ALREADY_DONE) { 13853 printf("%s: type %d msg %d cdb %x iptl: " 13854 "%d:%d:%d:%d tag 0x%04x " 13855 "flag %#x status %x\n", 13856 __func__, 13857 io->io_hdr.io_type, 13858 io->io_hdr.msg_type, 13859 io->scsiio.cdb[0], 13860 io->io_hdr.nexus.initid.id, 13861 io->io_hdr.nexus.targ_port, 13862 io->io_hdr.nexus.targ_target.id, 13863 io->io_hdr.nexus.targ_lun, 13864 (io->io_hdr.io_type == 13865 CTL_IO_TASK) ? 13866 io->taskio.tag_num : 13867 io->scsiio.tag_num, 13868 io->io_hdr.flags, 13869 io->io_hdr.status); 13870 } else 13871 io->io_hdr.flags |= CTL_FLAG_ALREADY_DONE; 13872#endif 13873 13874 /* 13875 * This is an internal copy of an I/O, and should not go through 13876 * the normal done processing logic. 13877 */ 13878 if (io->io_hdr.flags & CTL_FLAG_INT_COPY) 13879 return; 13880 13881 /* 13882 * We need to send a msg to the serializing shelf to finish the IO 13883 * as well. We don't send a finish message to the other shelf if 13884 * this is a task management command. Task management commands 13885 * aren't serialized in the OOA queue, but rather just executed on 13886 * both shelf controllers for commands that originated on that 13887 * controller. 13888 */ 13889 if ((io->io_hdr.flags & CTL_FLAG_SENT_2OTHER_SC) 13890 && (io->io_hdr.io_type != CTL_IO_TASK)) { 13891 union ctl_ha_msg msg_io; 13892 13893 msg_io.hdr.msg_type = CTL_MSG_FINISH_IO; 13894 msg_io.hdr.serializing_sc = io->io_hdr.serializing_sc; 13895 if (ctl_ha_msg_send(CTL_HA_CHAN_CTL, &msg_io, 13896 sizeof(msg_io), 0 ) != CTL_HA_STATUS_SUCCESS) { 13897 } 13898 /* continue on to finish IO */ 13899 } 13900#ifdef CTL_IO_DELAY 13901 if (io->io_hdr.flags & CTL_FLAG_DELAY_DONE) { 13902 struct ctl_lun *lun; 13903 13904 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13905 13906 io->io_hdr.flags &= ~CTL_FLAG_DELAY_DONE; 13907 } else { 13908 struct ctl_lun *lun; 13909 13910 lun =(struct ctl_lun *)io->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13911 13912 if ((lun != NULL) 13913 && (lun->delay_info.done_delay > 0)) { 13914 struct callout *callout; 13915 13916 callout = (struct callout *)&io->io_hdr.timer_bytes; 13917 callout_init(callout, /*mpsafe*/ 1); 13918 io->io_hdr.flags |= CTL_FLAG_DELAY_DONE; 13919 callout_reset(callout, 13920 lun->delay_info.done_delay * hz, 13921 ctl_done_timer_wakeup, io); 13922 if (lun->delay_info.done_type == CTL_DELAY_TYPE_ONESHOT) 13923 lun->delay_info.done_delay = 0; 13924 return; 13925 } 13926 } 13927#endif /* CTL_IO_DELAY */ 13928 13929 ctl_enqueue_done(io); 13930} 13931 13932int 13933ctl_isc(struct ctl_scsiio *ctsio) 13934{ 13935 struct ctl_lun *lun; 13936 int retval; 13937 13938 lun = (struct ctl_lun *)ctsio->io_hdr.ctl_private[CTL_PRIV_LUN].ptr; 13939 13940 CTL_DEBUG_PRINT(("ctl_isc: command: %02x\n", ctsio->cdb[0])); 13941 13942 CTL_DEBUG_PRINT(("ctl_isc: calling data_submit()\n")); 13943 13944 retval = lun->backend->data_submit((union ctl_io *)ctsio); 13945 13946 return (retval); 13947} 13948 13949 13950static void 13951ctl_work_thread(void *arg) 13952{ 13953 struct ctl_thread *thr = (struct ctl_thread *)arg; 13954 struct ctl_softc *softc = thr->ctl_softc; 13955 union ctl_io *io; 13956 int retval; 13957 13958 CTL_DEBUG_PRINT(("ctl_work_thread starting\n")); 13959 13960 for (;;) { 13961 retval = 0; 13962 13963 /* 13964 * We handle the queues in this order: 13965 * - ISC 13966 * - done queue (to free up resources, unblock other commands) 13967 * - RtR queue 13968 * - incoming queue 13969 * 13970 * If those queues are empty, we break out of the loop and 13971 * go to sleep. 13972 */ 13973 mtx_lock(&thr->queue_lock); 13974 io = (union ctl_io *)STAILQ_FIRST(&thr->isc_queue); 13975 if (io != NULL) { 13976 STAILQ_REMOVE_HEAD(&thr->isc_queue, links); 13977 mtx_unlock(&thr->queue_lock); 13978 ctl_handle_isc(io); 13979 continue; 13980 } 13981 io = (union ctl_io *)STAILQ_FIRST(&thr->done_queue); 13982 if (io != NULL) { 13983 STAILQ_REMOVE_HEAD(&thr->done_queue, links); 13984 /* clear any blocked commands, call fe_done */ 13985 mtx_unlock(&thr->queue_lock); 13986 retval = ctl_process_done(io); 13987 continue; 13988 } 13989 io = (union ctl_io *)STAILQ_FIRST(&thr->incoming_queue); 13990 if (io != NULL) { 13991 STAILQ_REMOVE_HEAD(&thr->incoming_queue, links); 13992 mtx_unlock(&thr->queue_lock); 13993 if (io->io_hdr.io_type == CTL_IO_TASK) 13994 ctl_run_task(io); 13995 else 13996 ctl_scsiio_precheck(softc, &io->scsiio); 13997 continue; 13998 } 13999 if (!ctl_pause_rtr) { 14000 io = (union ctl_io *)STAILQ_FIRST(&thr->rtr_queue); 14001 if (io != NULL) { 14002 STAILQ_REMOVE_HEAD(&thr->rtr_queue, links); 14003 mtx_unlock(&thr->queue_lock); 14004 retval = ctl_scsiio(&io->scsiio); 14005 if (retval != CTL_RETVAL_COMPLETE) 14006 CTL_DEBUG_PRINT(("ctl_scsiio failed\n")); 14007 continue; 14008 } 14009 } 14010 14011 /* Sleep until we have something to do. */ 14012 mtx_sleep(thr, &thr->queue_lock, PDROP | PRIBIO, "-", 0); 14013 } 14014} 14015 14016static void 14017ctl_lun_thread(void *arg) 14018{ 14019 struct ctl_softc *softc = (struct ctl_softc *)arg; 14020 struct ctl_be_lun *be_lun; 14021 int retval; 14022 14023 CTL_DEBUG_PRINT(("ctl_lun_thread starting\n")); 14024 14025 for (;;) { 14026 retval = 0; 14027 mtx_lock(&softc->ctl_lock); 14028 be_lun = STAILQ_FIRST(&softc->pending_lun_queue); 14029 if (be_lun != NULL) { 14030 STAILQ_REMOVE_HEAD(&softc->pending_lun_queue, links); 14031 mtx_unlock(&softc->ctl_lock); 14032 ctl_create_lun(be_lun); 14033 continue; 14034 } 14035 14036 /* Sleep until we have something to do. */ 14037 mtx_sleep(&softc->pending_lun_queue, &softc->ctl_lock, 14038 PDROP | PRIBIO, "-", 0); 14039 } 14040} 14041 14042static void 14043ctl_thresh_thread(void *arg) 14044{ 14045 struct ctl_softc *softc = (struct ctl_softc *)arg; 14046 struct ctl_lun *lun; 14047 struct ctl_be_lun *be_lun; 14048 struct scsi_da_rw_recovery_page *rwpage; 14049 struct ctl_logical_block_provisioning_page *page; 14050 const char *attr; 14051 uint64_t thres, val; 14052 int i, e; 14053 14054 CTL_DEBUG_PRINT(("ctl_thresh_thread starting\n")); 14055 14056 for (;;) { 14057 mtx_lock(&softc->ctl_lock); 14058 STAILQ_FOREACH(lun, &softc->lun_list, links) { 14059 be_lun = lun->be_lun; 14060 if ((lun->flags & CTL_LUN_DISABLED) || 14061 (lun->flags & CTL_LUN_OFFLINE) || 14062 lun->backend->lun_attr == NULL) 14063 continue; 14064 rwpage = &lun->mode_pages.rw_er_page[CTL_PAGE_CURRENT]; 14065 if ((rwpage->byte8 & SMS_RWER_LBPERE) == 0) 14066 continue; 14067 e = 0; 14068 page = &lun->mode_pages.lbp_page[CTL_PAGE_CURRENT]; 14069 for (i = 0; i < CTL_NUM_LBP_THRESH; i++) { 14070 if ((page->descr[i].flags & SLBPPD_ENABLED) == 0) 14071 continue; 14072 thres = scsi_4btoul(page->descr[i].count); 14073 thres <<= CTL_LBP_EXPONENT; 14074 switch (page->descr[i].resource) { 14075 case 0x01: 14076 attr = "blocksavail"; 14077 break; 14078 case 0x02: 14079 attr = "blocksused"; 14080 break; 14081 case 0xf1: 14082 attr = "poolblocksavail"; 14083 break; 14084 case 0xf2: 14085 attr = "poolblocksused"; 14086 break; 14087 default: 14088 continue; 14089 } 14090 mtx_unlock(&softc->ctl_lock); // XXX 14091 val = lun->backend->lun_attr( 14092 lun->be_lun->be_lun, attr); 14093 mtx_lock(&softc->ctl_lock); 14094 if (val == UINT64_MAX) 14095 continue; 14096 if ((page->descr[i].flags & SLBPPD_ARMING_MASK) 14097 == SLBPPD_ARMING_INC) 14098 e |= (val >= thres); 14099 else 14100 e |= (val <= thres); 14101 } 14102 mtx_lock(&lun->lun_lock); 14103 if (e) { 14104 if (lun->lasttpt == 0 || 14105 time_uptime - lun->lasttpt >= CTL_LBP_UA_PERIOD) { 14106 lun->lasttpt = time_uptime; 14107 ctl_est_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 14108 } 14109 } else { 14110 lun->lasttpt = 0; 14111 ctl_clr_ua_all(lun, -1, CTL_UA_THIN_PROV_THRES); 14112 } 14113 mtx_unlock(&lun->lun_lock); 14114 } 14115 mtx_unlock(&softc->ctl_lock); 14116 pause("-", CTL_LBP_PERIOD * hz); 14117 } 14118} 14119 14120static void 14121ctl_enqueue_incoming(union ctl_io *io) 14122{ 14123 struct ctl_softc *softc = control_softc; 14124 struct ctl_thread *thr; 14125 u_int idx; 14126 14127 idx = (io->io_hdr.nexus.targ_port * 127 + 14128 io->io_hdr.nexus.initid.id) % worker_threads; 14129 thr = &softc->threads[idx]; 14130 mtx_lock(&thr->queue_lock); 14131 STAILQ_INSERT_TAIL(&thr->incoming_queue, &io->io_hdr, links); 14132 mtx_unlock(&thr->queue_lock); 14133 wakeup(thr); 14134} 14135 14136static void 14137ctl_enqueue_rtr(union ctl_io *io) 14138{ 14139 struct ctl_softc *softc = control_softc; 14140 struct ctl_thread *thr; 14141 14142 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14143 mtx_lock(&thr->queue_lock); 14144 STAILQ_INSERT_TAIL(&thr->rtr_queue, &io->io_hdr, links); 14145 mtx_unlock(&thr->queue_lock); 14146 wakeup(thr); 14147} 14148 14149static void 14150ctl_enqueue_done(union ctl_io *io) 14151{ 14152 struct ctl_softc *softc = control_softc; 14153 struct ctl_thread *thr; 14154 14155 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14156 mtx_lock(&thr->queue_lock); 14157 STAILQ_INSERT_TAIL(&thr->done_queue, &io->io_hdr, links); 14158 mtx_unlock(&thr->queue_lock); 14159 wakeup(thr); 14160} 14161 14162static void 14163ctl_enqueue_isc(union ctl_io *io) 14164{ 14165 struct ctl_softc *softc = control_softc; 14166 struct ctl_thread *thr; 14167 14168 thr = &softc->threads[io->io_hdr.nexus.targ_mapped_lun % worker_threads]; 14169 mtx_lock(&thr->queue_lock); 14170 STAILQ_INSERT_TAIL(&thr->isc_queue, &io->io_hdr, links); 14171 mtx_unlock(&thr->queue_lock); 14172 wakeup(thr); 14173} 14174 14175/* Initialization and failover */ 14176 14177void 14178ctl_init_isc_msg(void) 14179{ 14180 printf("CTL: Still calling this thing\n"); 14181} 14182 14183/* 14184 * Init component 14185 * Initializes component into configuration defined by bootMode 14186 * (see hasc-sv.c) 14187 * returns hasc_Status: 14188 * OK 14189 * ERROR - fatal error 14190 */ 14191static ctl_ha_comp_status 14192ctl_isc_init(struct ctl_ha_component *c) 14193{ 14194 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14195 14196 c->status = ret; 14197 return ret; 14198} 14199 14200/* Start component 14201 * Starts component in state requested. If component starts successfully, 14202 * it must set its own state to the requestrd state 14203 * When requested state is HASC_STATE_HA, the component may refine it 14204 * by adding _SLAVE or _MASTER flags. 14205 * Currently allowed state transitions are: 14206 * UNKNOWN->HA - initial startup 14207 * UNKNOWN->SINGLE - initial startup when no parter detected 14208 * HA->SINGLE - failover 14209 * returns ctl_ha_comp_status: 14210 * OK - component successfully started in requested state 14211 * FAILED - could not start the requested state, failover may 14212 * be possible 14213 * ERROR - fatal error detected, no future startup possible 14214 */ 14215static ctl_ha_comp_status 14216ctl_isc_start(struct ctl_ha_component *c, ctl_ha_state state) 14217{ 14218 ctl_ha_comp_status ret = CTL_HA_COMP_STATUS_OK; 14219 14220 printf("%s: go\n", __func__); 14221 14222 // UNKNOWN->HA or UNKNOWN->SINGLE (bootstrap) 14223 if (c->state == CTL_HA_STATE_UNKNOWN ) { 14224 control_softc->is_single = 0; 14225 if (ctl_ha_msg_create(CTL_HA_CHAN_CTL, ctl_isc_event_handler) 14226 != CTL_HA_STATUS_SUCCESS) { 14227 printf("ctl_isc_start: ctl_ha_msg_create failed.\n"); 14228 ret = CTL_HA_COMP_STATUS_ERROR; 14229 } 14230 } else if (CTL_HA_STATE_IS_HA(c->state) 14231 && CTL_HA_STATE_IS_SINGLE(state)){ 14232 // HA->SINGLE transition 14233 ctl_failover(); 14234 control_softc->is_single = 1; 14235 } else { 14236 printf("ctl_isc_start:Invalid state transition %X->%X\n", 14237 c->state, state); 14238 ret = CTL_HA_COMP_STATUS_ERROR; 14239 } 14240 if (CTL_HA_STATE_IS_SINGLE(state)) 14241 control_softc->is_single = 1; 14242 14243 c->state = state; 14244 c->status = ret; 14245 return ret; 14246} 14247 14248/* 14249 * Quiesce component 14250 * The component must clear any error conditions (set status to OK) and 14251 * prepare itself to another Start call 14252 * returns ctl_ha_comp_status: 14253 * OK 14254 * ERROR 14255 */ 14256static ctl_ha_comp_status 14257ctl_isc_quiesce(struct ctl_ha_component *c) 14258{ 14259 int ret = CTL_HA_COMP_STATUS_OK; 14260 14261 ctl_pause_rtr = 1; 14262 c->status = ret; 14263 return ret; 14264} 14265 14266struct ctl_ha_component ctl_ha_component_ctlisc = 14267{ 14268 .name = "CTL ISC", 14269 .state = CTL_HA_STATE_UNKNOWN, 14270 .init = ctl_isc_init, 14271 .start = ctl_isc_start, 14272 .quiesce = ctl_isc_quiesce 14273}; 14274 14275/* 14276 * vim: ts=8 14277 */ 14278