1/* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/11/sys/dev/mrsas/mrsas_cam.c 360305 2020-04-25 13:18:29Z dim $"); 35 36#include "dev/mrsas/mrsas.h" 37 38#include <cam/cam.h> 39#include <cam/cam_ccb.h> 40#include <cam/cam_sim.h> 41#include <cam/cam_xpt_sim.h> 42#include <cam/cam_debug.h> 43#include <cam/cam_periph.h> 44#include <cam/cam_xpt_periph.h> 45 46#include <cam/scsi/scsi_all.h> 47#include <cam/scsi/scsi_message.h> 48#include <sys/taskqueue.h> 49#include <sys/kernel.h> 50 51#include <sys/time.h> /* XXX for pcpu.h */ 52#include <sys/pcpu.h> /* XXX for PCPU_GET */ 53 54#define smp_processor_id() PCPU_GET(cpuid) 55 56/* 57 * Function prototypes 58 */ 59int mrsas_cam_attach(struct mrsas_softc *sc); 60int mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb); 61int mrsas_bus_scan(struct mrsas_softc *sc); 62int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 63int 64mrsas_map_request(struct mrsas_softc *sc, 65 struct mrsas_mpt_cmd *cmd, union ccb *ccb); 66int 67mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 68 union ccb *ccb); 69int 70mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 71 union ccb *ccb); 72int 73mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 74 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible); 75int 76mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 77 union ccb *ccb, u_int32_t device_id, 78 MRSAS_RAID_SCSI_IO_REQUEST * io_request); 79void mrsas_xpt_freeze(struct mrsas_softc *sc); 80void mrsas_xpt_release(struct mrsas_softc *sc); 81void mrsas_cam_detach(struct mrsas_softc *sc); 82void mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd); 83void mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 84void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 85void 86mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 87 u_int32_t req_desc_hi); 88void 89mrsas_set_pd_lba(MRSAS_RAID_SCSI_IO_REQUEST * io_request, 90 u_int8_t cdb_len, struct IO_REQUEST_INFO *io_info, union ccb *ccb, 91 MR_DRV_RAID_MAP_ALL * local_map_ptr, u_int32_t ref_tag, 92 u_int32_t ld_block_size); 93static void mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim); 94static void mrsas_cam_poll(struct cam_sim *sim); 95static void mrsas_action(struct cam_sim *sim, union ccb *ccb); 96static void mrsas_scsiio_timeout(void *data); 97static int mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t id, u_int32_t bus_id); 98static void mrsas_tm_response_code(struct mrsas_softc *sc, 99 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply); 100static int mrsas_issue_tm(struct mrsas_softc *sc, 101 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc); 102static void 103mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, 104 int nseg, int error); 105static int32_t 106mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, 107 union ccb *ccb); 108 109static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd, 110 bus_dma_segment_t *segs, int nsegs); 111static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, 112 bus_dma_segment_t *segs, int nseg); 113static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, 114 bus_dma_segment_t *segs, int nseg); 115 116struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 117MRSAS_REQUEST_DESCRIPTOR_UNION * 118 mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index); 119 120extern int mrsas_reset_targets(struct mrsas_softc *sc); 121extern u_int16_t MR_TargetIdToLdGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map); 122extern u_int32_t 123MR_LdBlockSizeGet(u_int32_t ldTgtId, MR_DRV_RAID_MAP_ALL * map, 124 struct mrsas_softc *sc); 125extern void mrsas_isr(void *arg); 126extern void mrsas_aen_handler(struct mrsas_softc *sc); 127extern u_int8_t 128MR_BuildRaidContext(struct mrsas_softc *sc, 129 struct IO_REQUEST_INFO *io_info, RAID_CONTEXT * pRAID_Context, 130 MR_DRV_RAID_MAP_ALL * map); 131extern u_int16_t 132MR_LdSpanArrayGet(u_int32_t ld, u_int32_t span, 133 MR_DRV_RAID_MAP_ALL * map); 134extern u_int16_t 135mrsas_get_updated_dev_handle(struct mrsas_softc *sc, 136 PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *io_info); 137extern int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 138extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 139extern void mrsas_disable_intr(struct mrsas_softc *sc); 140extern void mrsas_enable_intr(struct mrsas_softc *sc); 141void mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc, 142 struct mrsas_mpt_cmd *cmd); 143 144/* 145 * mrsas_cam_attach: Main entry to CAM subsystem 146 * input: Adapter instance soft state 147 * 148 * This function is called from mrsas_attach() during initialization to perform 149 * SIM allocations and XPT bus registration. If the kernel version is 7.4 or 150 * earlier, it would also initiate a bus scan. 151 */ 152int 153mrsas_cam_attach(struct mrsas_softc *sc) 154{ 155 struct cam_devq *devq; 156 int mrsas_cam_depth; 157 158 mrsas_cam_depth = sc->max_scsi_cmds; 159 160 if ((devq = cam_simq_alloc(mrsas_cam_depth)) == NULL) { 161 device_printf(sc->mrsas_dev, "Cannot allocate SIM queue\n"); 162 return (ENOMEM); 163 } 164 /* 165 * Create SIM for bus 0 and register, also create path 166 */ 167 sc->sim_0 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, 168 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, 169 mrsas_cam_depth, devq); 170 if (sc->sim_0 == NULL) { 171 cam_simq_free(devq); 172 device_printf(sc->mrsas_dev, "Cannot register SIM\n"); 173 return (ENXIO); 174 } 175 /* Initialize taskqueue for Event Handling */ 176 TASK_INIT(&sc->ev_task, 0, (void *)mrsas_aen_handler, sc); 177 sc->ev_tq = taskqueue_create("mrsas_taskq", M_NOWAIT | M_ZERO, 178 taskqueue_thread_enqueue, &sc->ev_tq); 179 180 /* Run the task queue with lowest priority */ 181 taskqueue_start_threads(&sc->ev_tq, 1, 255, "%s taskq", 182 device_get_nameunit(sc->mrsas_dev)); 183 mtx_lock(&sc->sim_lock); 184 if (xpt_bus_register(sc->sim_0, sc->mrsas_dev, 0) != CAM_SUCCESS) { 185 cam_sim_free(sc->sim_0, TRUE); /* passing true frees the devq */ 186 mtx_unlock(&sc->sim_lock); 187 return (ENXIO); 188 } 189 if (xpt_create_path(&sc->path_0, NULL, cam_sim_path(sc->sim_0), 190 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 191 xpt_bus_deregister(cam_sim_path(sc->sim_0)); 192 cam_sim_free(sc->sim_0, TRUE); /* passing true will free the 193 * devq */ 194 mtx_unlock(&sc->sim_lock); 195 return (ENXIO); 196 } 197 mtx_unlock(&sc->sim_lock); 198 199 /* 200 * Create SIM for bus 1 and register, also create path 201 */ 202 sc->sim_1 = cam_sim_alloc(mrsas_action, mrsas_cam_poll, "mrsas", sc, 203 device_get_unit(sc->mrsas_dev), &sc->sim_lock, mrsas_cam_depth, 204 mrsas_cam_depth, devq); 205 if (sc->sim_1 == NULL) { 206 cam_simq_free(devq); 207 device_printf(sc->mrsas_dev, "Cannot register SIM\n"); 208 return (ENXIO); 209 } 210 mtx_lock(&sc->sim_lock); 211 if (xpt_bus_register(sc->sim_1, sc->mrsas_dev, 1) != CAM_SUCCESS) { 212 cam_sim_free(sc->sim_1, TRUE); /* passing true frees the devq */ 213 mtx_unlock(&sc->sim_lock); 214 return (ENXIO); 215 } 216 if (xpt_create_path(&sc->path_1, NULL, cam_sim_path(sc->sim_1), 217 CAM_TARGET_WILDCARD, 218 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 219 xpt_bus_deregister(cam_sim_path(sc->sim_1)); 220 cam_sim_free(sc->sim_1, TRUE); 221 mtx_unlock(&sc->sim_lock); 222 return (ENXIO); 223 } 224 mtx_unlock(&sc->sim_lock); 225 226#if (__FreeBSD_version <= 704000) 227 if (mrsas_bus_scan(sc)) { 228 device_printf(sc->mrsas_dev, "Error in bus scan.\n"); 229 return (1); 230 } 231#endif 232 return (0); 233} 234 235/* 236 * mrsas_cam_detach: De-allocates and teardown CAM 237 * input: Adapter instance soft state 238 * 239 * De-registers and frees the paths and SIMs. 240 */ 241void 242mrsas_cam_detach(struct mrsas_softc *sc) 243{ 244 if (sc->ev_tq != NULL) 245 taskqueue_free(sc->ev_tq); 246 mtx_lock(&sc->sim_lock); 247 if (sc->path_0) 248 xpt_free_path(sc->path_0); 249 if (sc->sim_0) { 250 xpt_bus_deregister(cam_sim_path(sc->sim_0)); 251 cam_sim_free(sc->sim_0, FALSE); 252 } 253 if (sc->path_1) 254 xpt_free_path(sc->path_1); 255 if (sc->sim_1) { 256 xpt_bus_deregister(cam_sim_path(sc->sim_1)); 257 cam_sim_free(sc->sim_1, TRUE); 258 } 259 mtx_unlock(&sc->sim_lock); 260} 261 262/* 263 * mrsas_action: SIM callback entry point 264 * input: pointer to SIM pointer to CAM Control Block 265 * 266 * This function processes CAM subsystem requests. The type of request is stored 267 * in ccb->ccb_h.func_code. The preprocessor #ifdef is necessary because 268 * ccb->cpi.maxio is not supported for FreeBSD version 7.4 or earlier. 269 */ 270static void 271mrsas_action(struct cam_sim *sim, union ccb *ccb) 272{ 273 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); 274 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 275 u_int32_t device_id; 276 277 /* 278 * Check if the system going down 279 * or the adapter is in unrecoverable critical error 280 */ 281 if (sc->remove_in_progress || 282 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { 283 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 284 xpt_done(ccb); 285 return; 286 } 287 288 switch (ccb->ccb_h.func_code) { 289 case XPT_SCSI_IO: 290 { 291 device_id = ccb_h->target_id; 292 293 /* 294 * bus 0 is LD, bus 1 is for system-PD 295 */ 296 if (cam_sim_bus(sim) == 1 && 297 sc->pd_list[device_id].driveState != MR_PD_STATE_SYSTEM) { 298 ccb->ccb_h.status |= CAM_DEV_NOT_THERE; 299 xpt_done(ccb); 300 } else { 301 if (mrsas_startio(sc, sim, ccb)) { 302 ccb->ccb_h.status |= CAM_REQ_INVALID; 303 xpt_done(ccb); 304 } 305 } 306 break; 307 } 308 case XPT_ABORT: 309 { 310 ccb->ccb_h.status = CAM_UA_ABORT; 311 xpt_done(ccb); 312 break; 313 } 314 case XPT_RESET_BUS: 315 { 316 xpt_done(ccb); 317 break; 318 } 319 case XPT_GET_TRAN_SETTINGS: 320 { 321 ccb->cts.protocol = PROTO_SCSI; 322 ccb->cts.protocol_version = SCSI_REV_2; 323 ccb->cts.transport = XPORT_SPI; 324 ccb->cts.transport_version = 2; 325 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC; 326 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB; 327 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ; 328 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB; 329 ccb->ccb_h.status = CAM_REQ_CMP; 330 xpt_done(ccb); 331 break; 332 } 333 case XPT_SET_TRAN_SETTINGS: 334 { 335 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 336 xpt_done(ccb); 337 break; 338 } 339 case XPT_CALC_GEOMETRY: 340 { 341 cam_calc_geometry(&ccb->ccg, 1); 342 xpt_done(ccb); 343 break; 344 } 345 case XPT_PATH_INQ: 346 { 347 ccb->cpi.version_num = 1; 348 ccb->cpi.hba_inquiry = 0; 349 ccb->cpi.target_sprt = 0; 350#if (__FreeBSD_version >= 902001) 351 ccb->cpi.hba_misc = PIM_UNMAPPED; 352#else 353 ccb->cpi.hba_misc = 0; 354#endif 355 ccb->cpi.hba_eng_cnt = 0; 356 ccb->cpi.max_lun = MRSAS_SCSI_MAX_LUNS; 357 ccb->cpi.unit_number = cam_sim_unit(sim); 358 ccb->cpi.bus_id = cam_sim_bus(sim); 359 ccb->cpi.initiator_id = MRSAS_SCSI_INITIATOR_ID; 360 ccb->cpi.base_transfer_speed = 150000; 361 strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN); 362 strlcpy(ccb->cpi.hba_vid, "AVAGO", HBA_IDLEN); 363 strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN); 364 ccb->cpi.transport = XPORT_SPI; 365 ccb->cpi.transport_version = 2; 366 ccb->cpi.protocol = PROTO_SCSI; 367 ccb->cpi.protocol_version = SCSI_REV_2; 368 if (ccb->cpi.bus_id == 0) 369 ccb->cpi.max_target = MRSAS_MAX_PD - 1; 370 else 371 ccb->cpi.max_target = MRSAS_MAX_LD_IDS - 1; 372#if (__FreeBSD_version > 704000) 373 ccb->cpi.maxio = sc->max_num_sge * MRSAS_PAGE_SIZE; 374#endif 375 ccb->ccb_h.status = CAM_REQ_CMP; 376 xpt_done(ccb); 377 break; 378 } 379 default: 380 { 381 ccb->ccb_h.status = CAM_REQ_INVALID; 382 xpt_done(ccb); 383 break; 384 } 385 } 386} 387 388/* 389 * mrsas_scsiio_timeout: Callback function for IO timed out 390 * input: mpt command context 391 * 392 * This function will execute after timeout value provided by ccb header from 393 * CAM layer, if timer expires. Driver will run timer for all DCDM and LDIO 394 * coming from CAM layer. This function is callback function for IO timeout 395 * and it runs in no-sleep context. Set do_timedout_reset in Adapter context 396 * so that it will execute OCR/Kill adpter from ocr_thread context. 397 */ 398static void 399mrsas_scsiio_timeout(void *data) 400{ 401 struct mrsas_mpt_cmd *cmd; 402 struct mrsas_softc *sc; 403 u_int32_t target_id; 404 405 if (!data) 406 return; 407 408 cmd = (struct mrsas_mpt_cmd *)data; 409 sc = cmd->sc; 410 411 if (cmd->ccb_ptr == NULL) { 412 printf("command timeout with NULL ccb\n"); 413 return; 414 } 415 416 /* 417 * Below callout is dummy entry so that it will be cancelled from 418 * mrsas_cmd_done(). Now Controller will go to OCR/Kill Adapter based 419 * on OCR enable/disable property of Controller from ocr_thread 420 * context. 421 */ 422#if (__FreeBSD_version >= 1000510) 423 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0, 424 mrsas_scsiio_timeout, cmd, 0); 425#else 426 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000, 427 mrsas_scsiio_timeout, cmd); 428#endif 429 430 if (cmd->ccb_ptr->cpi.bus_id == 0) 431 target_id = cmd->ccb_ptr->ccb_h.target_id; 432 else 433 target_id = (cmd->ccb_ptr->ccb_h.target_id + (MRSAS_MAX_PD - 1)); 434 435 /* Save the cmd to be processed for TM, if it is not there in the array */ 436 if (sc->target_reset_pool[target_id] == NULL) { 437 sc->target_reset_pool[target_id] = cmd; 438 mrsas_atomic_inc(&sc->target_reset_outstanding); 439 } 440 441 return; 442} 443 444/* 445 * mrsas_startio: SCSI IO entry point 446 * input: Adapter instance soft state 447 * pointer to CAM Control Block 448 * 449 * This function is the SCSI IO entry point and it initiates IO processing. It 450 * copies the IO and depending if the IO is read/write or inquiry, it would 451 * call mrsas_build_ldio() or mrsas_build_dcdb(), respectively. It returns 0 452 * if the command is sent to firmware successfully, otherwise it returns 1. 453 */ 454static int32_t 455mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim, 456 union ccb *ccb) 457{ 458 struct mrsas_mpt_cmd *cmd, *r1_cmd = NULL; 459 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 460 struct ccb_scsiio *csio = &(ccb->csio); 461 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 462 u_int8_t cmd_type; 463 464 if ((csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE && 465 (!sc->fw_sync_cache_support)) { 466 ccb->ccb_h.status = CAM_REQ_CMP; 467 xpt_done(ccb); 468 return (0); 469 } 470 ccb_h->status |= CAM_SIM_QUEUED; 471 472 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) { 473 ccb_h->status |= CAM_REQUEUE_REQ; 474 xpt_done(ccb); 475 mrsas_atomic_dec(&sc->fw_outstanding); 476 return (0); 477 } 478 479 cmd = mrsas_get_mpt_cmd(sc); 480 481 if (!cmd) { 482 ccb_h->status |= CAM_REQUEUE_REQ; 483 xpt_done(ccb); 484 mrsas_atomic_dec(&sc->fw_outstanding); 485 return (0); 486 } 487 488 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 489 if (ccb_h->flags & CAM_DIR_IN) 490 cmd->flags |= MRSAS_DIR_IN; 491 if (ccb_h->flags & CAM_DIR_OUT) 492 cmd->flags |= MRSAS_DIR_OUT; 493 } else 494 cmd->flags = MRSAS_DIR_NONE; /* no data */ 495 496/* For FreeBSD 9.2 and higher */ 497#if (__FreeBSD_version >= 902001) 498 /* 499 * XXX We don't yet support physical addresses here. 500 */ 501 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { 502 case CAM_DATA_PADDR: 503 case CAM_DATA_SG_PADDR: 504 device_printf(sc->mrsas_dev, "%s: physical addresses not supported\n", 505 __func__); 506 mrsas_release_mpt_cmd(cmd); 507 ccb_h->status = CAM_REQ_INVALID; 508 ccb_h->status &= ~CAM_SIM_QUEUED; 509 goto done; 510 case CAM_DATA_SG: 511 device_printf(sc->mrsas_dev, "%s: scatter gather is not supported\n", 512 __func__); 513 mrsas_release_mpt_cmd(cmd); 514 ccb_h->status = CAM_REQ_INVALID; 515 goto done; 516 case CAM_DATA_VADDR: 517 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { 518 mrsas_release_mpt_cmd(cmd); 519 ccb_h->status = CAM_REQ_TOO_BIG; 520 goto done; 521 } 522 cmd->length = csio->dxfer_len; 523 if (cmd->length) 524 cmd->data = csio->data_ptr; 525 break; 526 case CAM_DATA_BIO: 527 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { 528 mrsas_release_mpt_cmd(cmd); 529 ccb_h->status = CAM_REQ_TOO_BIG; 530 goto done; 531 } 532 cmd->length = csio->dxfer_len; 533 if (cmd->length) 534 cmd->data = csio->data_ptr; 535 break; 536 default: 537 ccb->ccb_h.status = CAM_REQ_INVALID; 538 goto done; 539 } 540#else 541 if (!(ccb_h->flags & CAM_DATA_PHYS)) { /* Virtual data address */ 542 if (!(ccb_h->flags & CAM_SCATTER_VALID)) { 543 if (csio->dxfer_len > (sc->max_num_sge * MRSAS_PAGE_SIZE)) { 544 mrsas_release_mpt_cmd(cmd); 545 ccb_h->status = CAM_REQ_TOO_BIG; 546 goto done; 547 } 548 cmd->length = csio->dxfer_len; 549 if (cmd->length) 550 cmd->data = csio->data_ptr; 551 } else { 552 mrsas_release_mpt_cmd(cmd); 553 ccb_h->status = CAM_REQ_INVALID; 554 goto done; 555 } 556 } else { /* Data addresses are physical. */ 557 mrsas_release_mpt_cmd(cmd); 558 ccb_h->status = CAM_REQ_INVALID; 559 ccb_h->status &= ~CAM_SIM_QUEUED; 560 goto done; 561 } 562#endif 563 /* save ccb ptr */ 564 cmd->ccb_ptr = ccb; 565 566 req_desc = mrsas_get_request_desc(sc, (cmd->index) - 1); 567 if (!req_desc) { 568 device_printf(sc->mrsas_dev, "Cannot get request_descriptor.\n"); 569 return (FAIL); 570 } 571 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); 572 cmd->request_desc = req_desc; 573 574 if (ccb_h->flags & CAM_CDB_POINTER) 575 bcopy(csio->cdb_io.cdb_ptr, cmd->io_request->CDB.CDB32, csio->cdb_len); 576 else 577 bcopy(csio->cdb_io.cdb_bytes, cmd->io_request->CDB.CDB32, csio->cdb_len); 578 mtx_lock(&sc->raidmap_lock); 579 580 /* Check for IO type READ-WRITE targeted for Logical Volume */ 581 cmd_type = mrsas_find_io_type(sim, ccb); 582 switch (cmd_type) { 583 case READ_WRITE_LDIO: 584 /* Build READ-WRITE IO for Logical Volume */ 585 if (mrsas_build_ldio_rw(sc, cmd, ccb)) { 586 device_printf(sc->mrsas_dev, "Build RW LDIO failed.\n"); 587 mtx_unlock(&sc->raidmap_lock); 588 mrsas_release_mpt_cmd(cmd); 589 return (1); 590 } 591 break; 592 case NON_READ_WRITE_LDIO: 593 /* Build NON READ-WRITE IO for Logical Volume */ 594 if (mrsas_build_ldio_nonrw(sc, cmd, ccb)) { 595 device_printf(sc->mrsas_dev, "Build NON-RW LDIO failed.\n"); 596 mtx_unlock(&sc->raidmap_lock); 597 mrsas_release_mpt_cmd(cmd); 598 return (1); 599 } 600 break; 601 case READ_WRITE_SYSPDIO: 602 case NON_READ_WRITE_SYSPDIO: 603 if (sc->secure_jbod_support && 604 (cmd_type == NON_READ_WRITE_SYSPDIO)) { 605 /* Build NON-RW IO for JBOD */ 606 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 0)) { 607 device_printf(sc->mrsas_dev, 608 "Build SYSPDIO failed.\n"); 609 mtx_unlock(&sc->raidmap_lock); 610 mrsas_release_mpt_cmd(cmd); 611 return (1); 612 } 613 } else { 614 /* Build RW IO for JBOD */ 615 if (mrsas_build_syspdio(sc, cmd, ccb, sim, 1)) { 616 device_printf(sc->mrsas_dev, 617 "Build SYSPDIO failed.\n"); 618 mtx_unlock(&sc->raidmap_lock); 619 mrsas_release_mpt_cmd(cmd); 620 return (1); 621 } 622 } 623 } 624 mtx_unlock(&sc->raidmap_lock); 625 626 if (cmd->flags == MRSAS_DIR_IN) /* from device */ 627 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_READ; 628 else if (cmd->flags == MRSAS_DIR_OUT) /* to device */ 629 cmd->io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; 630 631 cmd->io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 632 cmd->io_request->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 633 cmd->io_request->SenseBufferLowAddress = cmd->sense_phys_addr; 634 cmd->io_request->SenseBufferLength = MRSAS_SCSI_SENSE_BUFFERSIZE; 635 636 req_desc = cmd->request_desc; 637 req_desc->SCSIIO.SMID = cmd->index; 638 639 /* 640 * Start timer for IO timeout. Default timeout value is 90 second. 641 */ 642 cmd->callout_owner = true; 643#if (__FreeBSD_version >= 1000510) 644 callout_reset_sbt(&cmd->cm_callout, SBT_1S * 180, 0, 645 mrsas_scsiio_timeout, cmd, 0); 646#else 647 callout_reset(&cmd->cm_callout, (180000 * hz) / 1000, 648 mrsas_scsiio_timeout, cmd); 649#endif 650 651 if (mrsas_atomic_read(&sc->fw_outstanding) > sc->io_cmds_highwater) 652 sc->io_cmds_highwater++; 653 654 /* 655 * if it is raid 1/10 fp write capable. 656 * try to get second command from pool and construct it. 657 * From FW, it has confirmed that lba values of two PDs corresponds to 658 * single R1/10 LD are always same 659 * 660 */ 661 /* 662 * driver side count always should be less than max_fw_cmds to get 663 * new command 664 */ 665 if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) { 666 mrsas_prepare_secondRaid1_IO(sc, cmd); 667 mrsas_fire_cmd(sc, req_desc->addr.u.low, 668 req_desc->addr.u.high); 669 r1_cmd = cmd->peer_cmd; 670 mrsas_fire_cmd(sc, r1_cmd->request_desc->addr.u.low, 671 r1_cmd->request_desc->addr.u.high); 672 } else { 673 mrsas_fire_cmd(sc, req_desc->addr.u.low, 674 req_desc->addr.u.high); 675 } 676 677 return (0); 678 679done: 680 xpt_done(ccb); 681 mrsas_atomic_dec(&sc->fw_outstanding); 682 return (0); 683} 684 685/* 686 * mrsas_find_io_type: Determines if IO is read/write or inquiry 687 * input: pointer to CAM Control Block 688 * 689 * This function determines if the IO is read/write or inquiry. It returns a 1 690 * if the IO is read/write and 0 if it is inquiry. 691 */ 692int 693mrsas_find_io_type(struct cam_sim *sim, union ccb *ccb) 694{ 695 struct ccb_scsiio *csio = &(ccb->csio); 696 697 switch (csio->cdb_io.cdb_bytes[0]) { 698 case READ_10: 699 case WRITE_10: 700 case READ_12: 701 case WRITE_12: 702 case READ_6: 703 case WRITE_6: 704 case READ_16: 705 case WRITE_16: 706 return (cam_sim_bus(sim) ? 707 READ_WRITE_SYSPDIO : READ_WRITE_LDIO); 708 default: 709 return (cam_sim_bus(sim) ? 710 NON_READ_WRITE_SYSPDIO : NON_READ_WRITE_LDIO); 711 } 712} 713 714/* 715 * mrsas_get_mpt_cmd: Get a cmd from free command pool 716 * input: Adapter instance soft state 717 * 718 * This function removes an MPT command from the command free list and 719 * initializes it. 720 */ 721struct mrsas_mpt_cmd * 722mrsas_get_mpt_cmd(struct mrsas_softc *sc) 723{ 724 struct mrsas_mpt_cmd *cmd = NULL; 725 726 mtx_lock(&sc->mpt_cmd_pool_lock); 727 if (!TAILQ_EMPTY(&sc->mrsas_mpt_cmd_list_head)) { 728 cmd = TAILQ_FIRST(&sc->mrsas_mpt_cmd_list_head); 729 TAILQ_REMOVE(&sc->mrsas_mpt_cmd_list_head, cmd, next); 730 } else { 731 goto out; 732 } 733 734 memset((uint8_t *)cmd->io_request, 0, MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE); 735 cmd->data = NULL; 736 cmd->length = 0; 737 cmd->flags = 0; 738 cmd->error_code = 0; 739 cmd->load_balance = 0; 740 cmd->ccb_ptr = NULL; 741out: 742 mtx_unlock(&sc->mpt_cmd_pool_lock); 743 return cmd; 744} 745 746/* 747 * mrsas_release_mpt_cmd: Return a cmd to free command pool 748 * input: Command packet for return to free command pool 749 * 750 * This function returns an MPT command to the free command list. 751 */ 752void 753mrsas_release_mpt_cmd(struct mrsas_mpt_cmd *cmd) 754{ 755 struct mrsas_softc *sc = cmd->sc; 756 757 mtx_lock(&sc->mpt_cmd_pool_lock); 758 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 759 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 760 cmd->peer_cmd = NULL; 761 cmd->cmd_completed = 0; 762 memset((uint8_t *)cmd->io_request, 0, 763 sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 764 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 765 mtx_unlock(&sc->mpt_cmd_pool_lock); 766 767 return; 768} 769 770/* 771 * mrsas_get_request_desc: Get request descriptor from array 772 * input: Adapter instance soft state 773 * SMID index 774 * 775 * This function returns a pointer to the request descriptor. 776 */ 777MRSAS_REQUEST_DESCRIPTOR_UNION * 778mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index) 779{ 780 u_int8_t *p; 781 782 KASSERT(index < sc->max_fw_cmds, ("req_desc is out of range")); 783 p = sc->req_desc + sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * index; 784 785 return (MRSAS_REQUEST_DESCRIPTOR_UNION *) p; 786} 787 788 789 790 791/* mrsas_prepare_secondRaid1_IO 792 * It prepares the raid 1 second IO 793 */ 794void 795mrsas_prepare_secondRaid1_IO(struct mrsas_softc *sc, 796 struct mrsas_mpt_cmd *cmd) 797{ 798 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL; 799 struct mrsas_mpt_cmd *r1_cmd; 800 801 r1_cmd = cmd->peer_cmd; 802 req_desc = cmd->request_desc; 803 804 /* 805 * copy the io request frame as well as 8 SGEs data for r1 806 * command 807 */ 808 memcpy(r1_cmd->io_request, cmd->io_request, 809 (sizeof(MRSAS_RAID_SCSI_IO_REQUEST))); 810 memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL, 811 (sc->max_sge_in_main_msg * sizeof(MPI2_SGE_IO_UNION))); 812 813 /* sense buffer is different for r1 command */ 814 r1_cmd->io_request->SenseBufferLowAddress = r1_cmd->sense_phys_addr; 815 r1_cmd->ccb_ptr = cmd->ccb_ptr; 816 817 req_desc2 = mrsas_get_request_desc(sc, r1_cmd->index - 1); 818 req_desc2->addr.Words = 0; 819 r1_cmd->request_desc = req_desc2; 820 req_desc2->SCSIIO.SMID = r1_cmd->index; 821 req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags; 822 r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle; 823 r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle; 824 r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle; 825 cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID = 826 r1_cmd->index; 827 r1_cmd->io_request->RaidContext.raid_context_g35.smid.peerSMID = 828 cmd->index; 829 /* 830 * MSIxIndex of both commands request descriptors 831 * should be same 832 */ 833 r1_cmd->request_desc->SCSIIO.MSIxIndex = cmd->request_desc->SCSIIO.MSIxIndex; 834 /* span arm is different for r1 cmd */ 835 r1_cmd->io_request->RaidContext.raid_context_g35.spanArm = 836 cmd->io_request->RaidContext.raid_context_g35.spanArm + 1; 837 838} 839 840 841/* 842 * mrsas_build_ldio_rw: Builds an LDIO command 843 * input: Adapter instance soft state 844 * Pointer to command packet 845 * Pointer to CCB 846 * 847 * This function builds the LDIO command packet. It returns 0 if the command is 848 * built successfully, otherwise it returns a 1. 849 */ 850int 851mrsas_build_ldio_rw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 852 union ccb *ccb) 853{ 854 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 855 struct ccb_scsiio *csio = &(ccb->csio); 856 u_int32_t device_id; 857 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 858 859 device_id = ccb_h->target_id; 860 861 io_request = cmd->io_request; 862 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 863 io_request->RaidContext.raid_context.status = 0; 864 io_request->RaidContext.raid_context.exStatus = 0; 865 866 /* just the cdb len, other flags zero, and ORed-in later for FP */ 867 io_request->IoFlags = csio->cdb_len; 868 869 if (mrsas_setup_io(sc, cmd, ccb, device_id, io_request) != SUCCESS) 870 device_printf(sc->mrsas_dev, "Build ldio or fpio error\n"); 871 872 io_request->DataLength = cmd->length; 873 874 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { 875 if (cmd->sge_count > sc->max_num_sge) { 876 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" 877 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); 878 return (FAIL); 879 } 880 if (sc->is_ventura || sc->is_aero) 881 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; 882 else { 883 /* 884 * numSGE store lower 8 bit of sge_count. numSGEExt store 885 * higher 8 bit of sge_count 886 */ 887 io_request->RaidContext.raid_context.numSGE = cmd->sge_count; 888 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); 889 } 890 891 } else { 892 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 893 return (FAIL); 894 } 895 return (0); 896} 897 898/* stream detection on read and and write IOs */ 899static void 900mrsas_stream_detect(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 901 struct IO_REQUEST_INFO *io_info) 902{ 903 u_int32_t device_id = io_info->ldTgtId; 904 LD_STREAM_DETECT *current_ld_SD = sc->streamDetectByLD[device_id]; 905 u_int32_t *track_stream = ¤t_ld_SD->mruBitMap; 906 u_int32_t streamNum, shiftedValues, unshiftedValues; 907 u_int32_t indexValueMask, shiftedValuesMask; 908 int i; 909 boolean_t isReadAhead = false; 910 STREAM_DETECT *current_SD; 911 912 /* find possible stream */ 913 for (i = 0; i < MAX_STREAMS_TRACKED; ++i) { 914 streamNum = (*track_stream >> (i * BITS_PER_INDEX_STREAM)) & 915 STREAM_MASK; 916 current_SD = ¤t_ld_SD->streamTrack[streamNum]; 917 /* 918 * if we found a stream, update the raid context and 919 * also update the mruBitMap 920 */ 921 if (current_SD->nextSeqLBA && 922 io_info->ldStartBlock >= current_SD->nextSeqLBA && 923 (io_info->ldStartBlock <= (current_SD->nextSeqLBA+32)) && 924 (current_SD->isRead == io_info->isRead)) { 925 if (io_info->ldStartBlock != current_SD->nextSeqLBA && 926 (!io_info->isRead || !isReadAhead)) { 927 /* 928 * Once the API availible we need to change this. 929 * At this point we are not allowing any gap 930 */ 931 continue; 932 } 933 cmd->io_request->RaidContext.raid_context_g35.streamDetected = TRUE; 934 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks; 935 /* 936 * update the mruBitMap LRU 937 */ 938 shiftedValuesMask = (1 << i * BITS_PER_INDEX_STREAM) - 1 ; 939 shiftedValues = ((*track_stream & shiftedValuesMask) << 940 BITS_PER_INDEX_STREAM); 941 indexValueMask = STREAM_MASK << i * BITS_PER_INDEX_STREAM; 942 unshiftedValues = (*track_stream) & 943 (~(shiftedValuesMask | indexValueMask)); 944 *track_stream = 945 (unshiftedValues | shiftedValues | streamNum); 946 return; 947 } 948 } 949 /* 950 * if we did not find any stream, create a new one from the least recently used 951 */ 952 streamNum = (*track_stream >> 953 ((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) & STREAM_MASK; 954 current_SD = ¤t_ld_SD->streamTrack[streamNum]; 955 current_SD->isRead = io_info->isRead; 956 current_SD->nextSeqLBA = io_info->ldStartBlock + io_info->numBlocks; 957 *track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | streamNum); 958 return; 959} 960 961 962/* 963 * mrsas_setup_io: Set up data including Fast Path I/O 964 * input: Adapter instance soft state 965 * Pointer to command packet 966 * Pointer to CCB 967 * 968 * This function builds the DCDB inquiry command. It returns 0 if the command 969 * is built successfully, otherwise it returns a 1. 970 */ 971int 972mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 973 union ccb *ccb, u_int32_t device_id, 974 MRSAS_RAID_SCSI_IO_REQUEST * io_request) 975{ 976 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 977 struct ccb_scsiio *csio = &(ccb->csio); 978 struct IO_REQUEST_INFO io_info; 979 MR_DRV_RAID_MAP_ALL *map_ptr; 980 struct mrsas_mpt_cmd *r1_cmd = NULL; 981 982 MR_LD_RAID *raid; 983 u_int8_t fp_possible; 984 u_int32_t start_lba_hi, start_lba_lo, ld_block_size, ld; 985 u_int32_t datalength = 0; 986 987 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 988 989 start_lba_lo = 0; 990 start_lba_hi = 0; 991 fp_possible = 0; 992 993 /* 994 * READ_6 (0x08) or WRITE_6 (0x0A) cdb 995 */ 996 if (csio->cdb_len == 6) { 997 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[4]; 998 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[1] << 16) | 999 ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 8) | 1000 (u_int32_t)csio->cdb_io.cdb_bytes[3]; 1001 start_lba_lo &= 0x1FFFFF; 1002 } 1003 /* 1004 * READ_10 (0x28) or WRITE_6 (0x2A) cdb 1005 */ 1006 else if (csio->cdb_len == 10) { 1007 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[8] | 1008 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 8); 1009 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | 1010 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | 1011 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | 1012 ((u_int32_t)csio->cdb_io.cdb_bytes[5]); 1013 } 1014 /* 1015 * READ_12 (0xA8) or WRITE_12 (0xAA) cdb 1016 */ 1017 else if (csio->cdb_len == 12) { 1018 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[6] << 24 | 1019 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | 1020 ((u_int32_t)csio->cdb_io.cdb_bytes[8] << 8) | 1021 ((u_int32_t)csio->cdb_io.cdb_bytes[9]); 1022 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | 1023 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | 1024 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | 1025 ((u_int32_t)csio->cdb_io.cdb_bytes[5]); 1026 } 1027 /* 1028 * READ_16 (0x88) or WRITE_16 (0xx8A) cdb 1029 */ 1030 else if (csio->cdb_len == 16) { 1031 datalength = (u_int32_t)csio->cdb_io.cdb_bytes[10] << 24 | 1032 ((u_int32_t)csio->cdb_io.cdb_bytes[11] << 16) | 1033 ((u_int32_t)csio->cdb_io.cdb_bytes[12] << 8) | 1034 ((u_int32_t)csio->cdb_io.cdb_bytes[13]); 1035 start_lba_lo = ((u_int32_t)csio->cdb_io.cdb_bytes[6] << 24) | 1036 ((u_int32_t)csio->cdb_io.cdb_bytes[7] << 16) | 1037 (u_int32_t)csio->cdb_io.cdb_bytes[8] << 8 | 1038 ((u_int32_t)csio->cdb_io.cdb_bytes[9]); 1039 start_lba_hi = ((u_int32_t)csio->cdb_io.cdb_bytes[2] << 24) | 1040 ((u_int32_t)csio->cdb_io.cdb_bytes[3] << 16) | 1041 (u_int32_t)csio->cdb_io.cdb_bytes[4] << 8 | 1042 ((u_int32_t)csio->cdb_io.cdb_bytes[5]); 1043 } 1044 memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO)); 1045 io_info.ldStartBlock = ((u_int64_t)start_lba_hi << 32) | start_lba_lo; 1046 io_info.numBlocks = datalength; 1047 io_info.ldTgtId = device_id; 1048 io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 1049 1050 io_request->DataLength = cmd->length; 1051 1052 switch (ccb_h->flags & CAM_DIR_MASK) { 1053 case CAM_DIR_IN: 1054 io_info.isRead = 1; 1055 break; 1056 case CAM_DIR_OUT: 1057 io_info.isRead = 0; 1058 break; 1059 case CAM_DIR_NONE: 1060 default: 1061 mrsas_dprint(sc, MRSAS_TRACE, "From %s : DMA Flag is %d \n", __func__, ccb_h->flags & CAM_DIR_MASK); 1062 break; 1063 } 1064 1065 map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1066 ld_block_size = MR_LdBlockSizeGet(device_id, map_ptr, sc); 1067 1068 ld = MR_TargetIdToLdGet(device_id, map_ptr); 1069 if ((ld >= MAX_LOGICAL_DRIVES_EXT) || (!sc->fast_path_io)) { 1070 io_request->RaidContext.raid_context.regLockFlags = 0; 1071 fp_possible = 0; 1072 } else { 1073 if (MR_BuildRaidContext(sc, &io_info, &io_request->RaidContext.raid_context, map_ptr)) 1074 fp_possible = io_info.fpOkForIo; 1075 } 1076 1077 raid = MR_LdRaidGet(ld, map_ptr); 1078 /* Store the TM capability value in cmd */ 1079 cmd->tmCapable = raid->capability.tmCapable; 1080 1081 cmd->request_desc->SCSIIO.MSIxIndex = 1082 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; 1083 1084 if (sc->is_ventura || sc->is_aero) { 1085 if (sc->streamDetectByLD) { 1086 mtx_lock(&sc->stream_lock); 1087 mrsas_stream_detect(sc, cmd, &io_info); 1088 mtx_unlock(&sc->stream_lock); 1089 /* In ventura if stream detected for a read and 1090 * it is read ahead capable make this IO as LDIO */ 1091 if (io_request->RaidContext.raid_context_g35.streamDetected && 1092 io_info.isRead && io_info.raCapable) 1093 fp_possible = FALSE; 1094 } 1095 1096 /* Set raid 1/10 fast path write capable bit in io_info. 1097 * Note - reset peer_cmd and r1_alt_dev_handle if fp_possible 1098 * disabled after this point. Try not to add more check for 1099 * fp_possible toggle after this. 1100 */ 1101 if (fp_possible && 1102 (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) && 1103 (raid->level == 1) && !io_info.isRead) { 1104 r1_cmd = mrsas_get_mpt_cmd(sc); 1105 if (mrsas_atomic_inc_return(&sc->fw_outstanding) > sc->max_scsi_cmds) { 1106 fp_possible = FALSE; 1107 mrsas_atomic_dec(&sc->fw_outstanding); 1108 } else { 1109 r1_cmd = mrsas_get_mpt_cmd(sc); 1110 if (!r1_cmd) { 1111 fp_possible = FALSE; 1112 mrsas_atomic_dec(&sc->fw_outstanding); 1113 } 1114 else { 1115 cmd->peer_cmd = r1_cmd; 1116 r1_cmd->peer_cmd = cmd; 1117 } 1118 } 1119 } 1120 } 1121 1122 if (fp_possible) { 1123 mrsas_set_pd_lba(io_request, csio->cdb_len, &io_info, ccb, map_ptr, 1124 start_lba_lo, ld_block_size); 1125 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1126 cmd->request_desc->SCSIIO.RequestFlags = 1127 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 1128 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1129 if (sc->mrsas_gen3_ctrl) { 1130 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED) 1131 cmd->request_desc->SCSIIO.RequestFlags = 1132 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1133 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1134 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; 1135 io_request->RaidContext.raid_context.nseg = 0x1; 1136 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 1137 io_request->RaidContext.raid_context.regLockFlags |= 1138 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | 1139 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1140 } else if (sc->is_ventura || sc->is_aero) { 1141 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA; 1142 io_request->RaidContext.raid_context_g35.nseg = 0x1; 1143 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; 1144 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 1145 if (io_request->RaidContext.raid_context_g35.routingFlags.bits.sld) { 1146 io_request->RaidContext.raid_context_g35.RAIDFlags = 1147 (MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS 1148 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT); 1149 } 1150 } 1151 if ((sc->load_balance_info[device_id].loadBalanceFlag) && 1152 (io_info.isRead)) { 1153 io_info.devHandle = 1154 mrsas_get_updated_dev_handle(sc, 1155 &sc->load_balance_info[device_id], &io_info); 1156 cmd->load_balance = MRSAS_LOAD_BALANCE_FLAG; 1157 cmd->pd_r1_lb = io_info.pd_after_lb; 1158 if (sc->is_ventura || sc->is_aero) 1159 io_request->RaidContext.raid_context_g35.spanArm = io_info.span_arm; 1160 else 1161 io_request->RaidContext.raid_context.spanArm = io_info.span_arm; 1162 } else 1163 cmd->load_balance = 0; 1164 1165 if (sc->is_ventura || sc->is_aero) 1166 cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle; 1167 else 1168 cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID; 1169 1170 cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; 1171 io_request->DevHandle = io_info.devHandle; 1172 cmd->pdInterface = io_info.pdInterface; 1173 } else { 1174 /* Not FP IO */ 1175 io_request->RaidContext.raid_context.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec; 1176 cmd->request_desc->SCSIIO.RequestFlags = 1177 (MRSAS_REQ_DESCRIPT_FLAGS_LD_IO << 1178 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1179 if (sc->mrsas_gen3_ctrl) { 1180 if (io_request->RaidContext.raid_context.regLockFlags == REGION_TYPE_UNUSED) 1181 cmd->request_desc->SCSIIO.RequestFlags = 1182 (MRSAS_REQ_DESCRIPT_FLAGS_NO_LOCK << 1183 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1184 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; 1185 io_request->RaidContext.raid_context.regLockFlags |= 1186 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | 1187 MR_RL_FLAGS_SEQ_NUM_ENABLE); 1188 io_request->RaidContext.raid_context.nseg = 0x1; 1189 } else if (sc->is_ventura || sc->is_aero) { 1190 io_request->RaidContext.raid_context_g35.Type = MPI2_TYPE_CUDA; 1191 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; 1192 io_request->RaidContext.raid_context_g35.nseg = 0x1; 1193 } 1194 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 1195 io_request->DevHandle = device_id; 1196 } 1197 return (0); 1198} 1199 1200/* 1201 * mrsas_build_ldio_nonrw: Builds an LDIO command 1202 * input: Adapter instance soft state 1203 * Pointer to command packet 1204 * Pointer to CCB 1205 * 1206 * This function builds the LDIO command packet. It returns 0 if the command is 1207 * built successfully, otherwise it returns a 1. 1208 */ 1209int 1210mrsas_build_ldio_nonrw(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 1211 union ccb *ccb) 1212{ 1213 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1214 u_int32_t device_id, ld; 1215 MR_DRV_RAID_MAP_ALL *map_ptr; 1216 MR_LD_RAID *raid; 1217 RAID_CONTEXT *pRAID_Context; 1218 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 1219 1220 io_request = cmd->io_request; 1221 device_id = ccb_h->target_id; 1222 1223 map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1224 ld = MR_TargetIdToLdGet(device_id, map_ptr); 1225 raid = MR_LdRaidGet(ld, map_ptr); 1226 /* get RAID_Context pointer */ 1227 pRAID_Context = &io_request->RaidContext.raid_context; 1228 /* Store the TM capability value in cmd */ 1229 cmd->tmCapable = raid->capability.tmCapable; 1230 1231 /* FW path for LD Non-RW (SCSI management commands) */ 1232 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 1233 io_request->DevHandle = device_id; 1234 cmd->request_desc->SCSIIO.RequestFlags = 1235 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1236 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1237 1238 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 1239 io_request->LUN[1] = ccb_h->target_lun & 0xF; 1240 io_request->DataLength = cmd->length; 1241 1242 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { 1243 if (cmd->sge_count > sc->max_num_sge) { 1244 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" 1245 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); 1246 return (1); 1247 } 1248 if (sc->is_ventura || sc->is_aero) 1249 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; 1250 else { 1251 /* 1252 * numSGE store lower 8 bit of sge_count. numSGEExt store 1253 * higher 8 bit of sge_count 1254 */ 1255 io_request->RaidContext.raid_context.numSGE = cmd->sge_count; 1256 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); 1257 } 1258 } else { 1259 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 1260 return (1); 1261 } 1262 return (0); 1263} 1264 1265/* 1266 * mrsas_build_syspdio: Builds an DCDB command 1267 * input: Adapter instance soft state 1268 * Pointer to command packet 1269 * Pointer to CCB 1270 * 1271 * This function builds the DCDB inquiry command. It returns 0 if the command 1272 * is built successfully, otherwise it returns a 1. 1273 */ 1274int 1275mrsas_build_syspdio(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd, 1276 union ccb *ccb, struct cam_sim *sim, u_int8_t fp_possible) 1277{ 1278 struct ccb_hdr *ccb_h = &(ccb->ccb_h); 1279 u_int32_t device_id; 1280 MR_DRV_RAID_MAP_ALL *local_map_ptr; 1281 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 1282 RAID_CONTEXT *pRAID_Context; 1283 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 1284 1285 io_request = cmd->io_request; 1286 /* get RAID_Context pointer */ 1287 pRAID_Context = &io_request->RaidContext.raid_context; 1288 device_id = ccb_h->target_id; 1289 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1290 io_request->RaidContext.raid_context.RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD 1291 << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT; 1292 io_request->RaidContext.raid_context.regLockFlags = 0; 1293 io_request->RaidContext.raid_context.regLockRowLBA = 0; 1294 io_request->RaidContext.raid_context.regLockLength = 0; 1295 1296 cmd->pdInterface = sc->target_list[device_id].interface_type; 1297 1298 /* If FW supports PD sequence number */ 1299 if (sc->use_seqnum_jbod_fp && 1300 sc->pd_list[device_id].driveType == 0x00) { 1301 //printf("Using Drv seq num\n"); 1302 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id - 1) & 1]; 1303 cmd->tmCapable = pd_sync->seq[device_id].capability.tmCapable; 1304 /* More than 256 PD/JBOD support for Ventura */ 1305 if (sc->support_morethan256jbod) 1306 io_request->RaidContext.raid_context.VirtualDiskTgtId = 1307 pd_sync->seq[device_id].pdTargetId; 1308 else 1309 io_request->RaidContext.raid_context.VirtualDiskTgtId = 1310 device_id + 255; 1311 io_request->RaidContext.raid_context.configSeqNum = pd_sync->seq[device_id].seqNum; 1312 io_request->DevHandle = pd_sync->seq[device_id].devHandle; 1313 if (sc->is_ventura || sc->is_aero) 1314 io_request->RaidContext.raid_context_g35.routingFlags.bits.sqn = 1; 1315 else 1316 io_request->RaidContext.raid_context.regLockFlags |= 1317 (MR_RL_FLAGS_SEQ_NUM_ENABLE | MR_RL_FLAGS_GRANT_DESTINATION_CUDA); 1318 /* raid_context.Type = MPI2_TYPE_CUDA is valid only, 1319 * if FW support Jbod Sequence number 1320 */ 1321 io_request->RaidContext.raid_context.Type = MPI2_TYPE_CUDA; 1322 io_request->RaidContext.raid_context.nseg = 0x1; 1323 } else if (sc->fast_path_io) { 1324 //printf("Using LD RAID map\n"); 1325 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 1326 io_request->RaidContext.raid_context.configSeqNum = 0; 1327 local_map_ptr = sc->ld_drv_map[(sc->map_id & 1)]; 1328 io_request->DevHandle = 1329 local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl; 1330 } else { 1331 //printf("Using FW PATH\n"); 1332 /* Want to send all IO via FW path */ 1333 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 1334 io_request->RaidContext.raid_context.configSeqNum = 0; 1335 io_request->DevHandle = MR_DEVHANDLE_INVALID; 1336 } 1337 1338 cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; 1339 cmd->request_desc->SCSIIO.MSIxIndex = 1340 sc->msix_vectors ? smp_processor_id() % sc->msix_vectors : 0; 1341 1342 if (!fp_possible) { 1343 /* system pd firmware path */ 1344 io_request->Function = MRSAS_MPI2_FUNCTION_LD_IO_REQUEST; 1345 cmd->request_desc->SCSIIO.RequestFlags = 1346 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << 1347 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1348 io_request->RaidContext.raid_context.timeoutValue = 1349 local_map_ptr->raidMap.fpPdIoTimeoutSec; 1350 io_request->RaidContext.raid_context.VirtualDiskTgtId = device_id; 1351 } else { 1352 /* system pd fast path */ 1353 io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1354 io_request->RaidContext.raid_context.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec; 1355 1356 /* 1357 * NOTE - For system pd RW cmds only IoFlags will be FAST_PATH 1358 * Because the NON RW cmds will now go via FW Queue 1359 * and not the Exception queue 1360 */ 1361 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) 1362 io_request->IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; 1363 1364 cmd->request_desc->SCSIIO.RequestFlags = 1365 (MPI2_REQ_DESCRIPT_FLAGS_FP_IO << 1366 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 1367 } 1368 1369 io_request->LUN[1] = ccb_h->target_lun & 0xF; 1370 io_request->DataLength = cmd->length; 1371 1372 if (mrsas_map_request(sc, cmd, ccb) == SUCCESS) { 1373 if (cmd->sge_count > sc->max_num_sge) { 1374 device_printf(sc->mrsas_dev, "Error: sge_count (0x%x) exceeds" 1375 "max (0x%x) allowed\n", cmd->sge_count, sc->max_num_sge); 1376 return (1); 1377 } 1378 if (sc->is_ventura || sc->is_aero) 1379 io_request->RaidContext.raid_context_g35.numSGE = cmd->sge_count; 1380 else { 1381 /* 1382 * numSGE store lower 8 bit of sge_count. numSGEExt store 1383 * higher 8 bit of sge_count 1384 */ 1385 io_request->RaidContext.raid_context.numSGE = cmd->sge_count; 1386 io_request->RaidContext.raid_context.numSGEExt = (uint8_t)(cmd->sge_count >> 8); 1387 } 1388 } else { 1389 device_printf(sc->mrsas_dev, "Data map/load failed.\n"); 1390 return (1); 1391 } 1392 return (0); 1393} 1394 1395/* 1396 * mrsas_is_prp_possible: This function will tell whether PRPs should be built or not 1397 * sc: Adapter instance soft state 1398 * cmd: MPT command frame pointer 1399 * nsesg: Number of OS SGEs 1400 * 1401 * This function will check whether IO is qualified to build PRPs 1402 * return: true: if PRP should be built 1403 * false: if IEEE SGLs should be built 1404 */ 1405static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd, 1406 bus_dma_segment_t *segs, int nsegs) 1407{ 1408 struct mrsas_softc *sc = cmd->sc; 1409 int i; 1410 u_int32_t data_length = 0; 1411 bool build_prp = false; 1412 u_int32_t mr_nvme_pg_size; 1413 1414 mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE); 1415 data_length = cmd->length; 1416 1417 if (data_length > (mr_nvme_pg_size * 5)) 1418 build_prp = true; 1419 else if ((data_length > (mr_nvme_pg_size * 4)) && 1420 (data_length <= (mr_nvme_pg_size * 5))) { 1421 /* check if 1st SG entry size is < residual beyond 4 pages */ 1422 if ((segs[0].ds_len) < (data_length - (mr_nvme_pg_size * 4))) 1423 build_prp = true; 1424 } 1425 1426 /*check for SGE holes here*/ 1427 for (i = 0; i < nsegs; i++) { 1428 /* check for mid SGEs */ 1429 if ((i != 0) && (i != (nsegs - 1))) { 1430 if ((segs[i].ds_addr % mr_nvme_pg_size) || 1431 (segs[i].ds_len % mr_nvme_pg_size)) { 1432 build_prp = false; 1433 mrsas_atomic_inc(&sc->sge_holes); 1434 break; 1435 } 1436 } 1437 1438 /* check for first SGE*/ 1439 if ((nsegs > 1) && (i == 0)) { 1440 if ((segs[i].ds_addr + segs[i].ds_len) % mr_nvme_pg_size) { 1441 build_prp = false; 1442 mrsas_atomic_inc(&sc->sge_holes); 1443 break; 1444 } 1445 } 1446 1447 /* check for Last SGE*/ 1448 if ((nsegs > 1) && (i == (nsegs - 1))) { 1449 if (segs[i].ds_addr % mr_nvme_pg_size) { 1450 build_prp = false; 1451 mrsas_atomic_inc(&sc->sge_holes); 1452 break; 1453 } 1454 } 1455 1456 } 1457 1458 return build_prp; 1459} 1460 1461/* 1462 * mrsas_map_request: Map and load data 1463 * input: Adapter instance soft state 1464 * Pointer to command packet 1465 * 1466 * For data from OS, map and load the data buffer into bus space. The SG list 1467 * is built in the callback. If the bus dmamap load is not successful, 1468 * cmd->error_code will contain the error code and a 1 is returned. 1469 */ 1470int 1471mrsas_map_request(struct mrsas_softc *sc, 1472 struct mrsas_mpt_cmd *cmd, union ccb *ccb) 1473{ 1474 u_int32_t retcode = 0; 1475 struct cam_sim *sim; 1476 1477 sim = xpt_path_sim(cmd->ccb_ptr->ccb_h.path); 1478 1479 if (cmd->data != NULL) { 1480 /* Map data buffer into bus space */ 1481 mtx_lock(&sc->io_lock); 1482#if (__FreeBSD_version >= 902001) 1483 retcode = bus_dmamap_load_ccb(sc->data_tag, cmd->data_dmamap, ccb, 1484 mrsas_data_load_cb, cmd, 0); 1485#else 1486 retcode = bus_dmamap_load(sc->data_tag, cmd->data_dmamap, cmd->data, 1487 cmd->length, mrsas_data_load_cb, cmd, BUS_DMA_NOWAIT); 1488#endif 1489 mtx_unlock(&sc->io_lock); 1490 if (retcode) 1491 device_printf(sc->mrsas_dev, "bus_dmamap_load(): retcode = %d\n", retcode); 1492 if (retcode == EINPROGRESS) { 1493 device_printf(sc->mrsas_dev, "request load in progress\n"); 1494 mrsas_freeze_simq(cmd, sim); 1495 } 1496 } 1497 if (cmd->error_code) 1498 return (1); 1499 return (retcode); 1500} 1501 1502/* 1503 * mrsas_unmap_request: Unmap and unload data 1504 * input: Adapter instance soft state 1505 * Pointer to command packet 1506 * 1507 * This function unmaps and unloads data from OS. 1508 */ 1509void 1510mrsas_unmap_request(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) 1511{ 1512 if (cmd->data != NULL) { 1513 if (cmd->flags & MRSAS_DIR_IN) 1514 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTREAD); 1515 if (cmd->flags & MRSAS_DIR_OUT) 1516 bus_dmamap_sync(sc->data_tag, cmd->data_dmamap, BUS_DMASYNC_POSTWRITE); 1517 mtx_lock(&sc->io_lock); 1518 bus_dmamap_unload(sc->data_tag, cmd->data_dmamap); 1519 mtx_unlock(&sc->io_lock); 1520 } 1521} 1522 1523/** 1524 * mrsas_build_ieee_sgl - Prepare IEEE SGLs 1525 * @sc: Adapter soft state 1526 * @segs: OS SGEs pointers 1527 * @nseg: Number of OS SGEs 1528 * @cmd: Fusion command frame 1529 * return: void 1530 */ 1531static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg) 1532{ 1533 struct mrsas_softc *sc = cmd->sc; 1534 MRSAS_RAID_SCSI_IO_REQUEST *io_request; 1535 pMpi25IeeeSgeChain64_t sgl_ptr; 1536 int i = 0, sg_processed = 0; 1537 1538 io_request = cmd->io_request; 1539 sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL; 1540 1541 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 1542 pMpi25IeeeSgeChain64_t sgl_ptr_end = sgl_ptr; 1543 1544 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 1545 sgl_ptr_end->Flags = 0; 1546 } 1547 if (nseg != 0) { 1548 for (i = 0; i < nseg; i++) { 1549 sgl_ptr->Address = segs[i].ds_addr; 1550 sgl_ptr->Length = segs[i].ds_len; 1551 sgl_ptr->Flags = 0; 1552 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 1553 if (i == nseg - 1) 1554 sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; 1555 } 1556 sgl_ptr++; 1557 sg_processed = i + 1; 1558 if ((sg_processed == (sc->max_sge_in_main_msg - 1)) && 1559 (nseg > sc->max_sge_in_main_msg)) { 1560 pMpi25IeeeSgeChain64_t sg_chain; 1561 1562 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) { 1563 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1564 != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) 1565 cmd->io_request->ChainOffset = sc->chain_offset_io_request; 1566 else 1567 cmd->io_request->ChainOffset = 0; 1568 } else 1569 cmd->io_request->ChainOffset = sc->chain_offset_io_request; 1570 sg_chain = sgl_ptr; 1571 if (sc->mrsas_gen3_ctrl || sc->is_ventura || sc->is_aero) 1572 sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; 1573 else 1574 sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); 1575 sg_chain->Length = (sizeof(MPI2_SGE_IO_UNION) * (nseg - sg_processed)); 1576 sg_chain->Address = cmd->chain_frame_phys_addr; 1577 sgl_ptr = (pMpi25IeeeSgeChain64_t)cmd->chain_frame; 1578 } 1579 } 1580 } 1581} 1582 1583/** 1584 * mrsas_build_prp_nvme - Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only 1585 * @sc: Adapter soft state 1586 * @segs: OS SGEs pointers 1587 * @nseg: Number of OS SGEs 1588 * @cmd: Fusion command frame 1589 * return: void 1590 */ 1591static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg) 1592{ 1593 struct mrsas_softc *sc = cmd->sc; 1594 int sge_len, offset, num_prp_in_chain = 0; 1595 pMpi25IeeeSgeChain64_t main_chain_element, ptr_first_sgl, sgl_ptr; 1596 u_int64_t *ptr_sgl; 1597 bus_addr_t ptr_sgl_phys; 1598 u_int64_t sge_addr; 1599 u_int32_t page_mask, page_mask_result, i = 0; 1600 u_int32_t first_prp_len; 1601 int data_len = cmd->length; 1602 u_int32_t mr_nvme_pg_size = max(sc->nvme_page_size, 1603 MR_DEFAULT_NVME_PAGE_SIZE); 1604 1605 sgl_ptr = (pMpi25IeeeSgeChain64_t) &cmd->io_request->SGL; 1606 /* 1607 * NVMe has a very convoluted PRP format. One PRP is required 1608 * for each page or partial page. We need to split up OS SG 1609 * entries if they are longer than one page or cross a page 1610 * boundary. We also have to insert a PRP list pointer entry as 1611 * the last entry in each physical page of the PRP list. 1612 * 1613 * NOTE: The first PRP "entry" is actually placed in the first 1614 * SGL entry in the main message in IEEE 64 format. The 2nd 1615 * entry in the main message is the chain element, and the rest 1616 * of the PRP entries are built in the contiguous PCIe buffer. 1617 */ 1618 page_mask = mr_nvme_pg_size - 1; 1619 ptr_sgl = (u_int64_t *) cmd->chain_frame; 1620 ptr_sgl_phys = cmd->chain_frame_phys_addr; 1621 memset(ptr_sgl, 0, sc->max_chain_frame_sz); 1622 1623 /* Build chain frame element which holds all PRPs except first*/ 1624 main_chain_element = (pMpi25IeeeSgeChain64_t) 1625 ((u_int8_t *)sgl_ptr + sizeof(MPI25_IEEE_SGE_CHAIN64)); 1626 1627 1628 main_chain_element->Address = cmd->chain_frame_phys_addr; 1629 main_chain_element->NextChainOffset = 0; 1630 main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 1631 IEEE_SGE_FLAGS_SYSTEM_ADDR | 1632 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 1633 1634 1635 /* Build first PRP, SGE need not to be PAGE aligned*/ 1636 ptr_first_sgl = sgl_ptr; 1637 sge_addr = segs[i].ds_addr; 1638 sge_len = segs[i].ds_len; 1639 i++; 1640 1641 offset = (u_int32_t) (sge_addr & page_mask); 1642 first_prp_len = mr_nvme_pg_size - offset; 1643 1644 ptr_first_sgl->Address = sge_addr; 1645 ptr_first_sgl->Length = first_prp_len; 1646 1647 data_len -= first_prp_len; 1648 1649 if (sge_len > first_prp_len) { 1650 sge_addr += first_prp_len; 1651 sge_len -= first_prp_len; 1652 } else if (sge_len == first_prp_len) { 1653 sge_addr = segs[i].ds_addr; 1654 sge_len = segs[i].ds_len; 1655 i++; 1656 } 1657 1658 for (;;) { 1659 1660 offset = (u_int32_t) (sge_addr & page_mask); 1661 1662 /* Put PRP pointer due to page boundary*/ 1663 page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask; 1664 if (!page_mask_result) { 1665 device_printf(sc->mrsas_dev, "BRCM: Put prp pointer as we are at page boundary" 1666 " ptr_sgl: 0x%p\n", ptr_sgl); 1667 ptr_sgl_phys++; 1668 *ptr_sgl = (uintptr_t)ptr_sgl_phys; 1669 ptr_sgl++; 1670 num_prp_in_chain++; 1671 } 1672 1673 *ptr_sgl = sge_addr; 1674 ptr_sgl++; 1675 ptr_sgl_phys++; 1676 num_prp_in_chain++; 1677 1678 1679 sge_addr += mr_nvme_pg_size; 1680 sge_len -= mr_nvme_pg_size; 1681 data_len -= mr_nvme_pg_size; 1682 1683 if (data_len <= 0) 1684 break; 1685 1686 if (sge_len > 0) 1687 continue; 1688 1689 sge_addr = segs[i].ds_addr; 1690 sge_len = segs[i].ds_len; 1691 i++; 1692 } 1693 1694 main_chain_element->Length = num_prp_in_chain * sizeof(u_int64_t); 1695 mrsas_atomic_inc(&sc->prp_count); 1696 1697} 1698 1699/* 1700 * mrsas_data_load_cb: Callback entry point to build SGLs 1701 * input: Pointer to command packet as argument 1702 * Pointer to segment 1703 * Number of segments Error 1704 * 1705 * This is the callback function of the bus dma map load. It builds SG list 1706 */ 1707static void 1708mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1709{ 1710 struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg; 1711 struct mrsas_softc *sc = cmd->sc; 1712 boolean_t build_prp = false; 1713 1714 if (error) { 1715 cmd->error_code = error; 1716 device_printf(sc->mrsas_dev, "mrsas_data_load_cb_prp: error=%d\n", error); 1717 if (error == EFBIG) { 1718 cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG; 1719 return; 1720 } 1721 } 1722 if (cmd->flags & MRSAS_DIR_IN) 1723 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, 1724 BUS_DMASYNC_PREREAD); 1725 if (cmd->flags & MRSAS_DIR_OUT) 1726 bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap, 1727 BUS_DMASYNC_PREWRITE); 1728 if (nseg > sc->max_num_sge) { 1729 device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n"); 1730 return; 1731 } 1732 1733 /* Check for whether PRPs should be built or IEEE SGLs*/ 1734 if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) && 1735 (cmd->pdInterface == NVME_PD)) 1736 build_prp = mrsas_is_prp_possible(cmd, segs, nseg); 1737 1738 if (build_prp == true) 1739 mrsas_build_prp_nvme(cmd, segs, nseg); 1740 else 1741 mrsas_build_ieee_sgl(cmd, segs, nseg); 1742 1743 cmd->sge_count = nseg; 1744} 1745 1746/* 1747 * mrsas_freeze_simq: Freeze SIM queue 1748 * input: Pointer to command packet 1749 * Pointer to SIM 1750 * 1751 * This function freezes the sim queue. 1752 */ 1753static void 1754mrsas_freeze_simq(struct mrsas_mpt_cmd *cmd, struct cam_sim *sim) 1755{ 1756 union ccb *ccb = (union ccb *)(cmd->ccb_ptr); 1757 1758 xpt_freeze_simq(sim, 1); 1759 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1760 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1761} 1762 1763void 1764mrsas_xpt_freeze(struct mrsas_softc *sc) 1765{ 1766 xpt_freeze_simq(sc->sim_0, 1); 1767 xpt_freeze_simq(sc->sim_1, 1); 1768} 1769 1770void 1771mrsas_xpt_release(struct mrsas_softc *sc) 1772{ 1773 xpt_release_simq(sc->sim_0, 1); 1774 xpt_release_simq(sc->sim_1, 1); 1775} 1776 1777/* 1778 * mrsas_cmd_done: Perform remaining command completion 1779 * input: Adapter instance soft state Pointer to command packet 1780 * 1781 * This function calls ummap request and releases the MPT command. 1782 */ 1783void 1784mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd) 1785{ 1786 mrsas_unmap_request(sc, cmd); 1787 1788 mtx_lock(&sc->sim_lock); 1789 if (cmd->callout_owner) { 1790 callout_stop(&cmd->cm_callout); 1791 cmd->callout_owner = false; 1792 } 1793 xpt_done(cmd->ccb_ptr); 1794 cmd->ccb_ptr = NULL; 1795 mtx_unlock(&sc->sim_lock); 1796 mrsas_release_mpt_cmd(cmd); 1797} 1798 1799/* 1800 * mrsas_cam_poll: Polling entry point 1801 * input: Pointer to SIM 1802 * 1803 * This is currently a stub function. 1804 */ 1805static void 1806mrsas_cam_poll(struct cam_sim *sim) 1807{ 1808 int i; 1809 struct mrsas_softc *sc = (struct mrsas_softc *)cam_sim_softc(sim); 1810 1811 if (sc->msix_vectors != 0){ 1812 for (i=0; i<sc->msix_vectors; i++){ 1813 mrsas_complete_cmd(sc, i); 1814 } 1815 } else { 1816 mrsas_complete_cmd(sc, 0); 1817 } 1818} 1819 1820/* 1821 * mrsas_bus_scan: Perform bus scan 1822 * input: Adapter instance soft state 1823 * 1824 * This mrsas_bus_scan function is needed for FreeBSD 7.x. Also, it should not 1825 * be called in FreeBSD 8.x and later versions, where the bus scan is 1826 * automatic. 1827 */ 1828int 1829mrsas_bus_scan(struct mrsas_softc *sc) 1830{ 1831 union ccb *ccb_0; 1832 union ccb *ccb_1; 1833 1834 if ((ccb_0 = xpt_alloc_ccb()) == NULL) { 1835 return (ENOMEM); 1836 } 1837 if ((ccb_1 = xpt_alloc_ccb()) == NULL) { 1838 xpt_free_ccb(ccb_0); 1839 return (ENOMEM); 1840 } 1841 mtx_lock(&sc->sim_lock); 1842 if (xpt_create_path(&ccb_0->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_0), 1843 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1844 xpt_free_ccb(ccb_0); 1845 xpt_free_ccb(ccb_1); 1846 mtx_unlock(&sc->sim_lock); 1847 return (EIO); 1848 } 1849 if (xpt_create_path(&ccb_1->ccb_h.path, xpt_periph, cam_sim_path(sc->sim_1), 1850 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1851 xpt_free_ccb(ccb_0); 1852 xpt_free_ccb(ccb_1); 1853 mtx_unlock(&sc->sim_lock); 1854 return (EIO); 1855 } 1856 mtx_unlock(&sc->sim_lock); 1857 xpt_rescan(ccb_0); 1858 xpt_rescan(ccb_1); 1859 1860 return (0); 1861} 1862 1863/* 1864 * mrsas_bus_scan_sim: Perform bus scan per SIM 1865 * input: adapter instance soft state 1866 * 1867 * This function will be called from Event handler on LD creation/deletion, 1868 * JBOD on/off. 1869 */ 1870int 1871mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim) 1872{ 1873 union ccb *ccb; 1874 1875 if ((ccb = xpt_alloc_ccb()) == NULL) { 1876 return (ENOMEM); 1877 } 1878 mtx_lock(&sc->sim_lock); 1879 if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(sim), 1880 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1881 xpt_free_ccb(ccb); 1882 mtx_unlock(&sc->sim_lock); 1883 return (EIO); 1884 } 1885 mtx_unlock(&sc->sim_lock); 1886 xpt_rescan(ccb); 1887 1888 return (0); 1889} 1890 1891/* 1892 * mrsas_track_scsiio: Track IOs for a given target in the mpt_cmd_list 1893 * input: Adapter instance soft state 1894 * Target ID of target 1895 * Bus ID of the target 1896 * 1897 * This function checks for any pending IO in the whole mpt_cmd_list pool 1898 * with the bus_id and target_id passed in arguments. If some IO is found 1899 * that means target reset is not successfully completed. 1900 * 1901 * Returns FAIL if IOs pending to the target device, else return SUCCESS 1902 */ 1903static int 1904mrsas_track_scsiio(struct mrsas_softc *sc, target_id_t tgt_id, u_int32_t bus_id) 1905{ 1906 int i; 1907 struct mrsas_mpt_cmd *mpt_cmd = NULL; 1908 1909 for (i = 0 ; i < sc->max_fw_cmds; i++) { 1910 mpt_cmd = sc->mpt_cmd_list[i]; 1911 1912 /* 1913 * Check if the target_id and bus_id is same as the timeout IO 1914 */ 1915 if (mpt_cmd->ccb_ptr) { 1916 /* bus_id = 1 denotes a VD */ 1917 if (bus_id == 1) 1918 tgt_id = 1919 (mpt_cmd->ccb_ptr->ccb_h.target_id - (MRSAS_MAX_PD - 1)); 1920 1921 if (mpt_cmd->ccb_ptr->cpi.bus_id == bus_id && 1922 mpt_cmd->ccb_ptr->ccb_h.target_id == tgt_id) { 1923 device_printf(sc->mrsas_dev, 1924 "IO commands pending to target id %d\n", tgt_id); 1925 return FAIL; 1926 } 1927 } 1928 } 1929 1930 return SUCCESS; 1931} 1932 1933#if TM_DEBUG 1934/* 1935 * mrsas_tm_response_code: Prints TM response code received from FW 1936 * input: Adapter instance soft state 1937 * MPI reply returned from firmware 1938 * 1939 * Returns nothing. 1940 */ 1941static void 1942mrsas_tm_response_code(struct mrsas_softc *sc, 1943 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply) 1944{ 1945 char *desc; 1946 1947 switch (mpi_reply->ResponseCode) { 1948 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 1949 desc = "task management request completed"; 1950 break; 1951 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 1952 desc = "invalid frame"; 1953 break; 1954 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 1955 desc = "task management request not supported"; 1956 break; 1957 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 1958 desc = "task management request failed"; 1959 break; 1960 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 1961 desc = "task management request succeeded"; 1962 break; 1963 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 1964 desc = "invalid lun"; 1965 break; 1966 case 0xA: 1967 desc = "overlapped tag attempted"; 1968 break; 1969 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 1970 desc = "task queued, however not sent to target"; 1971 break; 1972 default: 1973 desc = "unknown"; 1974 break; 1975 } 1976 device_printf(sc->mrsas_dev, "response_code(%01x): %s\n", 1977 mpi_reply->ResponseCode, desc); 1978 device_printf(sc->mrsas_dev, 1979 "TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo\n" 1980 "0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n", 1981 mpi_reply->TerminationCount, mpi_reply->DevHandle, 1982 mpi_reply->Function, mpi_reply->TaskType, 1983 mpi_reply->IOCStatus, mpi_reply->IOCLogInfo); 1984} 1985#endif 1986 1987/* 1988 * mrsas_issue_tm: Fires the TM command to FW and waits for completion 1989 * input: Adapter instance soft state 1990 * reqest descriptor compiled by mrsas_reset_targets 1991 * 1992 * Returns FAIL if TM command TIMEDOUT from FW else SUCCESS. 1993 */ 1994static int 1995mrsas_issue_tm(struct mrsas_softc *sc, 1996 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc) 1997{ 1998 int sleep_stat; 1999 2000 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 2001 sleep_stat = msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "tm_sleep", 50*hz); 2002 2003 if (sleep_stat == EWOULDBLOCK) { 2004 device_printf(sc->mrsas_dev, "tm cmd TIMEDOUT\n"); 2005 return FAIL; 2006 } 2007 2008 return SUCCESS; 2009} 2010 2011/* 2012 * mrsas_reset_targets : Gathers info to fire a target reset command 2013 * input: Adapter instance soft state 2014 * 2015 * This function compiles data for a target reset command to be fired to the FW 2016 * and then traverse the target_reset_pool to see targets with TIMEDOUT IOs. 2017 * 2018 * Returns SUCCESS or FAIL 2019 */ 2020int mrsas_reset_targets(struct mrsas_softc *sc) 2021{ 2022 struct mrsas_mpt_cmd *tm_mpt_cmd = NULL; 2023 struct mrsas_mpt_cmd *tgt_mpt_cmd = NULL; 2024 MR_TASK_MANAGE_REQUEST *mr_request; 2025 MPI2_SCSI_TASK_MANAGE_REQUEST *tm_mpi_request; 2026 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2027 int retCode = FAIL, count, i, outstanding; 2028 u_int32_t MSIxIndex, bus_id; 2029 target_id_t tgt_id; 2030#if TM_DEBUG 2031 MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply; 2032#endif 2033 2034 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 2035 2036 if (!outstanding) { 2037 device_printf(sc->mrsas_dev, "NO IOs pending...\n"); 2038 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 2039 retCode = SUCCESS; 2040 goto return_status; 2041 } else if (sc->adprecovery != MRSAS_HBA_OPERATIONAL) { 2042 device_printf(sc->mrsas_dev, "Controller is not operational\n"); 2043 goto return_status; 2044 } else { 2045 /* Some more error checks will be added in future */ 2046 } 2047 2048 /* Get an mpt frame and an index to fire the TM cmd */ 2049 tm_mpt_cmd = mrsas_get_mpt_cmd(sc); 2050 if (!tm_mpt_cmd) { 2051 retCode = FAIL; 2052 goto return_status; 2053 } 2054 2055 req_desc = mrsas_get_request_desc(sc, (tm_mpt_cmd->index) - 1); 2056 if (!req_desc) { 2057 device_printf(sc->mrsas_dev, "Cannot get request_descriptor for tm.\n"); 2058 retCode = FAIL; 2059 goto release_mpt; 2060 } 2061 memset(req_desc, 0, sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION)); 2062 2063 req_desc->HighPriority.SMID = tm_mpt_cmd->index; 2064 req_desc->HighPriority.RequestFlags = 2065 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << 2066 MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2067 req_desc->HighPriority.MSIxIndex = 0; 2068 req_desc->HighPriority.LMID = 0; 2069 req_desc->HighPriority.Reserved1 = 0; 2070 tm_mpt_cmd->request_desc = req_desc; 2071 2072 mr_request = (MR_TASK_MANAGE_REQUEST *) tm_mpt_cmd->io_request; 2073 memset(mr_request, 0, sizeof(MR_TASK_MANAGE_REQUEST)); 2074 2075 tm_mpi_request = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest; 2076 tm_mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 2077 tm_mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 2078 tm_mpi_request->TaskMID = 0; /* smid task */ 2079 tm_mpi_request->LUN[1] = 0; 2080 2081 /* Traverse the tm_mpt pool to get valid entries */ 2082 for (i = 0 ; i < MRSAS_MAX_TM_TARGETS; i++) { 2083 if(!sc->target_reset_pool[i]) { 2084 continue; 2085 } else { 2086 tgt_mpt_cmd = sc->target_reset_pool[i]; 2087 } 2088 2089 tgt_id = i; 2090 2091 /* See if the target is tm capable or NOT */ 2092 if (!tgt_mpt_cmd->tmCapable) { 2093 device_printf(sc->mrsas_dev, "Task management NOT SUPPORTED for " 2094 "CAM target:%d\n", tgt_id); 2095 2096 retCode = FAIL; 2097 goto release_mpt; 2098 } 2099 2100 tm_mpi_request->DevHandle = tgt_mpt_cmd->io_request->DevHandle; 2101 2102 if (i < (MRSAS_MAX_PD - 1)) { 2103 mr_request->uTmReqReply.tmReqFlags.isTMForPD = 1; 2104 bus_id = 0; 2105 } else { 2106 mr_request->uTmReqReply.tmReqFlags.isTMForLD = 1; 2107 bus_id = 1; 2108 } 2109 2110 device_printf(sc->mrsas_dev, "TM will be fired for " 2111 "CAM target:%d and bus_id %d\n", tgt_id, bus_id); 2112 2113 sc->ocr_chan = (void *)&tm_mpt_cmd; 2114 retCode = mrsas_issue_tm(sc, req_desc); 2115 if (retCode == FAIL) 2116 goto release_mpt; 2117 2118#if TM_DEBUG 2119 mpi_reply = 2120 (MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->uTmReqReply.TMReply; 2121 mrsas_tm_response_code(sc, mpi_reply); 2122#endif 2123 mrsas_atomic_dec(&sc->target_reset_outstanding); 2124 sc->target_reset_pool[i] = NULL; 2125 2126 /* Check for pending cmds in the mpt_cmd_pool with the tgt_id */ 2127 mrsas_disable_intr(sc); 2128 /* Wait for 1 second to complete parallel ISR calling same 2129 * mrsas_complete_cmd() 2130 */ 2131 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_wakeup", 2132 1 * hz); 2133 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2134 mtx_unlock(&sc->sim_lock); 2135 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 2136 mrsas_complete_cmd(sc, MSIxIndex); 2137 mtx_lock(&sc->sim_lock); 2138 retCode = mrsas_track_scsiio(sc, tgt_id, bus_id); 2139 mrsas_enable_intr(sc); 2140 2141 if (retCode == FAIL) 2142 goto release_mpt; 2143 } 2144 2145 device_printf(sc->mrsas_dev, "Number of targets outstanding " 2146 "after reset: %d\n", mrsas_atomic_read(&sc->target_reset_outstanding)); 2147 2148release_mpt: 2149 mrsas_release_mpt_cmd(tm_mpt_cmd); 2150return_status: 2151 device_printf(sc->mrsas_dev, "target reset %s!!\n", 2152 (retCode == SUCCESS) ? "SUCCESS" : "FAIL"); 2153 2154 return retCode; 2155} 2156 2157