1/*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011, 2012 LSI Corp. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * LSI MPT-Fusion Host Adapter FreeBSD 28 * 29 * $FreeBSD$ 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD$"); 34 35/* Communications core for LSI MPT2 */ 36 37/* TODO Move headers to mpsvar */ 38#include <sys/types.h> 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/kernel.h> 42#include <sys/selinfo.h> 43#include <sys/module.h> 44#include <sys/bus.h> 45#include <sys/conf.h> 46#include <sys/bio.h> 47#include <sys/malloc.h> 48#include <sys/uio.h> 49#include <sys/sysctl.h> 50#include <sys/endian.h> 51#include <sys/queue.h> 52#include <sys/kthread.h> 53#include <sys/taskqueue.h> 54#include <sys/sbuf.h> 55 56#include <machine/bus.h> 57#include <machine/resource.h> 58#include <sys/rman.h> 59 60#include <machine/stdarg.h> 61 62#include <cam/cam.h> 63#include <cam/cam_ccb.h> 64#include <cam/cam_xpt.h> 65#include <cam/cam_debug.h> 66#include <cam/cam_sim.h> 67#include <cam/cam_xpt_sim.h> 68#include <cam/cam_xpt_periph.h> 69#include <cam/cam_periph.h> 70#include <cam/scsi/scsi_all.h> 71#include <cam/scsi/scsi_message.h> 72#if __FreeBSD_version >= 900026 73#include <cam/scsi/smp_all.h> 74#endif 75 76#include <dev/mps/mpi/mpi2_type.h> 77#include <dev/mps/mpi/mpi2.h> 78#include <dev/mps/mpi/mpi2_ioc.h> 79#include <dev/mps/mpi/mpi2_sas.h> 80#include <dev/mps/mpi/mpi2_cnfg.h> 81#include <dev/mps/mpi/mpi2_init.h> 82#include <dev/mps/mpi/mpi2_tool.h> 83#include <dev/mps/mps_ioctl.h> 84#include <dev/mps/mpsvar.h> 85#include <dev/mps/mps_table.h> 86#include <dev/mps/mps_sas.h> 87 88#define MPSSAS_DISCOVERY_TIMEOUT 20 89#define MPSSAS_MAX_DISCOVERY_TIMEOUTS 10 /* 200 seconds */ 90 91/* 92 * static array to check SCSI OpCode for EEDP protection bits 93 */ 94#define PRO_R MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP 95#define PRO_W MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 96#define PRO_V MPI2_SCSIIO_EEDPFLAGS_INSERT_OP 97static uint8_t op_code_prot[256] = { 98 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 99 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 101 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 102 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 103 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 104 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 105 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 106 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 107 0, 0, 0, PRO_W, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 108 0, 0, 0, 0, 0, 0, 0, 0, PRO_R, 0, PRO_W, 0, 0, 0, PRO_W, PRO_V, 109 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 110 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 111 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 112 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 113 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 114}; 115 116MALLOC_DEFINE(M_MPSSAS, "MPSSAS", "MPS SAS memory"); 117 118static void mpssas_discovery_timeout(void *data); 119static void mpssas_remove_device(struct mps_softc *, struct mps_command *); 120static void mpssas_remove_complete(struct mps_softc *, struct mps_command *); 121static void mpssas_action(struct cam_sim *sim, union ccb *ccb); 122static void mpssas_poll(struct cam_sim *sim); 123static void mpssas_scsiio_timeout(void *data); 124static void mpssas_abort_complete(struct mps_softc *sc, struct mps_command *cm); 125static void mpssas_direct_drive_io(struct mpssas_softc *sassc, 126 struct mps_command *cm, union ccb *ccb); 127static void mpssas_action_scsiio(struct mpssas_softc *, union ccb *); 128static void mpssas_scsiio_complete(struct mps_softc *, struct mps_command *); 129static void mpssas_action_resetdev(struct mpssas_softc *, union ccb *); 130#if __FreeBSD_version >= 900026 131static void mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm); 132static void mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, 133 uint64_t sasaddr); 134static void mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb); 135#endif //FreeBSD_version >= 900026 136static void mpssas_resetdev_complete(struct mps_softc *, struct mps_command *); 137static int mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm); 138static int mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type); 139static void mpssas_async(void *callback_arg, uint32_t code, 140 struct cam_path *path, void *arg); 141#if (__FreeBSD_version < 901503) || \ 142 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 143static void mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path, 144 struct ccb_getdev *cgd); 145static void mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb); 146#endif 147static int mpssas_send_portenable(struct mps_softc *sc); 148static void mpssas_portenable_complete(struct mps_softc *sc, 149 struct mps_command *cm); 150 151struct mpssas_target * 152mpssas_find_target_by_handle(struct mpssas_softc *sassc, int start, uint16_t handle) 153{ 154 struct mpssas_target *target; 155 int i; 156 157 for (i = start; i < sassc->sc->facts->MaxTargets; i++) { 158 target = &sassc->targets[i]; 159 if (target->handle == handle) 160 return (target); 161 } 162 163 return (NULL); 164} 165 166/* we need to freeze the simq during attach and diag reset, to avoid failing 167 * commands before device handles have been found by discovery. Since 168 * discovery involves reading config pages and possibly sending commands, 169 * discovery actions may continue even after we receive the end of discovery 170 * event, so refcount discovery actions instead of assuming we can unfreeze 171 * the simq when we get the event. 172 */ 173void 174mpssas_startup_increment(struct mpssas_softc *sassc) 175{ 176 MPS_FUNCTRACE(sassc->sc); 177 178 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { 179 if (sassc->startup_refcount++ == 0) { 180 /* just starting, freeze the simq */ 181 mps_dprint(sassc->sc, MPS_INIT, 182 "%s freezing simq\n", __func__); 183#if (__FreeBSD_version >= 1000039) || \ 184 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 185 xpt_hold_boot(); 186#endif 187 xpt_freeze_simq(sassc->sim, 1); 188 } 189 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__, 190 sassc->startup_refcount); 191 } 192} 193 194void 195mpssas_startup_decrement(struct mpssas_softc *sassc) 196{ 197 MPS_FUNCTRACE(sassc->sc); 198 199 if ((sassc->flags & MPSSAS_IN_STARTUP) != 0) { 200 if (--sassc->startup_refcount == 0) { 201 /* finished all discovery-related actions, release 202 * the simq and rescan for the latest topology. 203 */ 204 mps_dprint(sassc->sc, MPS_INIT, 205 "%s releasing simq\n", __func__); 206 sassc->flags &= ~MPSSAS_IN_STARTUP; 207 xpt_release_simq(sassc->sim, 1); 208#if (__FreeBSD_version >= 1000039) || \ 209 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 210 xpt_release_boot(); 211#else 212 mpssas_rescan_target(sassc->sc, NULL); 213#endif 214 } 215 mps_dprint(sassc->sc, MPS_INIT, "%s refcount %u\n", __func__, 216 sassc->startup_refcount); 217 } 218} 219 220/* LSI's firmware requires us to stop sending commands when we're doing task 221 * management, so refcount the TMs and keep the simq frozen when any are in 222 * use. 223 */ 224struct mps_command * 225mpssas_alloc_tm(struct mps_softc *sc) 226{ 227 struct mps_command *tm; 228 229 MPS_FUNCTRACE(sc); 230 tm = mps_alloc_high_priority_command(sc); 231 if (tm != NULL) { 232 if (sc->sassc->tm_count++ == 0) { 233 mps_dprint(sc, MPS_RECOVERY, 234 "%s freezing simq\n", __func__); 235 xpt_freeze_simq(sc->sassc->sim, 1); 236 } 237 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__, 238 sc->sassc->tm_count); 239 } 240 return tm; 241} 242 243void 244mpssas_free_tm(struct mps_softc *sc, struct mps_command *tm) 245{ 246 mps_dprint(sc, MPS_TRACE, "%s", __func__); 247 if (tm == NULL) 248 return; 249 250 /* if there are no TMs in use, we can release the simq. We use our 251 * own refcount so that it's easier for a diag reset to cleanup and 252 * release the simq. 253 */ 254 if (--sc->sassc->tm_count == 0) { 255 mps_dprint(sc, MPS_RECOVERY, "%s releasing simq\n", __func__); 256 xpt_release_simq(sc->sassc->sim, 1); 257 } 258 mps_dprint(sc, MPS_RECOVERY, "%s tm_count %u\n", __func__, 259 sc->sassc->tm_count); 260 261 mps_free_high_priority_command(sc, tm); 262} 263 264void 265mpssas_rescan_target(struct mps_softc *sc, struct mpssas_target *targ) 266{ 267 struct mpssas_softc *sassc = sc->sassc; 268 path_id_t pathid; 269 target_id_t targetid; 270 union ccb *ccb; 271 272 MPS_FUNCTRACE(sc); 273 pathid = cam_sim_path(sassc->sim); 274 if (targ == NULL) 275 targetid = CAM_TARGET_WILDCARD; 276 else 277 targetid = targ - sassc->targets; 278 279 /* 280 * Allocate a CCB and schedule a rescan. 281 */ 282 ccb = xpt_alloc_ccb_nowait(); 283 if (ccb == NULL) { 284 mps_dprint(sc, MPS_ERROR, "unable to alloc CCB for rescan\n"); 285 return; 286 } 287 288 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, 289 targetid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 290 mps_dprint(sc, MPS_ERROR, "unable to create path for rescan\n"); 291 xpt_free_ccb(ccb); 292 return; 293 } 294 295 if (targetid == CAM_TARGET_WILDCARD) 296 ccb->ccb_h.func_code = XPT_SCAN_BUS; 297 else 298 ccb->ccb_h.func_code = XPT_SCAN_TGT; 299 300 mps_dprint(sc, MPS_TRACE, "%s targetid %u\n", __func__, targetid); 301 xpt_rescan(ccb); 302} 303 304static void 305mpssas_log_command(struct mps_command *cm, u_int level, const char *fmt, ...) 306{ 307 struct sbuf sb; 308 va_list ap; 309 char str[192]; 310 char path_str[64]; 311 312 if (cm == NULL) 313 return; 314 315 /* No need to be in here if debugging isn't enabled */ 316 if ((cm->cm_sc->mps_debug & level) == 0) 317 return; 318 319 sbuf_new(&sb, str, sizeof(str), 0); 320 321 va_start(ap, fmt); 322 323 if (cm->cm_ccb != NULL) { 324 xpt_path_string(cm->cm_ccb->csio.ccb_h.path, path_str, 325 sizeof(path_str)); 326 sbuf_cat(&sb, path_str); 327 if (cm->cm_ccb->ccb_h.func_code == XPT_SCSI_IO) { 328 scsi_command_string(&cm->cm_ccb->csio, &sb); 329 sbuf_printf(&sb, "length %d ", 330 cm->cm_ccb->csio.dxfer_len); 331 } 332 } 333 else { 334 sbuf_printf(&sb, "(noperiph:%s%d:%u:%u:%u): ", 335 cam_sim_name(cm->cm_sc->sassc->sim), 336 cam_sim_unit(cm->cm_sc->sassc->sim), 337 cam_sim_bus(cm->cm_sc->sassc->sim), 338 cm->cm_targ ? cm->cm_targ->tid : 0xFFFFFFFF, 339 cm->cm_lun); 340 } 341 342 sbuf_printf(&sb, "SMID %u ", cm->cm_desc.Default.SMID); 343 sbuf_vprintf(&sb, fmt, ap); 344 sbuf_finish(&sb); 345 mps_dprint_field(cm->cm_sc, level, "%s", sbuf_data(&sb)); 346 347 va_end(ap); 348} 349 350 351static void 352mpssas_remove_volume(struct mps_softc *sc, struct mps_command *tm) 353{ 354 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 355 struct mpssas_target *targ; 356 uint16_t handle; 357 358 MPS_FUNCTRACE(sc); 359 360 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 361 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 362 targ = tm->cm_targ; 363 364 if (reply == NULL) { 365 /* XXX retry the remove after the diag reset completes? */ 366 mps_dprint(sc, MPS_FAULT, 367 "%s NULL reply reseting device 0x%04x\n", __func__, handle); 368 mpssas_free_tm(sc, tm); 369 return; 370 } 371 372 if (reply->IOCStatus != MPI2_IOCSTATUS_SUCCESS) { 373 mps_dprint(sc, MPS_FAULT, 374 "IOCStatus = 0x%x while resetting device 0x%x\n", 375 reply->IOCStatus, handle); 376 mpssas_free_tm(sc, tm); 377 return; 378 } 379 380 mps_dprint(sc, MPS_XINFO, 381 "Reset aborted %u commands\n", reply->TerminationCount); 382 mps_free_reply(sc, tm->cm_reply_data); 383 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 384 385 mps_dprint(sc, MPS_XINFO, 386 "clearing target %u handle 0x%04x\n", targ->tid, handle); 387 388 /* 389 * Don't clear target if remove fails because things will get confusing. 390 * Leave the devname and sasaddr intact so that we know to avoid reusing 391 * this target id if possible, and so we can assign the same target id 392 * to this device if it comes back in the future. 393 */ 394 if (reply->IOCStatus == MPI2_IOCSTATUS_SUCCESS) { 395 targ = tm->cm_targ; 396 targ->handle = 0x0; 397 targ->encl_handle = 0x0; 398 targ->encl_slot = 0x0; 399 targ->exp_dev_handle = 0x0; 400 targ->phy_num = 0x0; 401 targ->linkrate = 0x0; 402 targ->devinfo = 0x0; 403 targ->flags = 0x0; 404 } 405 406 mpssas_free_tm(sc, tm); 407} 408 409 410/* 411 * No Need to call "MPI2_SAS_OP_REMOVE_DEVICE" For Volume removal. 412 * Otherwise Volume Delete is same as Bare Drive Removal. 413 */ 414void 415mpssas_prepare_volume_remove(struct mpssas_softc *sassc, uint16_t handle) 416{ 417 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 418 struct mps_softc *sc; 419 struct mps_command *cm; 420 struct mpssas_target *targ = NULL; 421 422 MPS_FUNCTRACE(sassc->sc); 423 sc = sassc->sc; 424 425#ifdef WD_SUPPORT 426 /* 427 * If this is a WD controller, determine if the disk should be exposed 428 * to the OS or not. If disk should be exposed, return from this 429 * function without doing anything. 430 */ 431 if (sc->WD_available && (sc->WD_hide_expose == 432 MPS_WD_EXPOSE_ALWAYS)) { 433 return; 434 } 435#endif //WD_SUPPORT 436 437 targ = mpssas_find_target_by_handle(sassc, 0, handle); 438 if (targ == NULL) { 439 /* FIXME: what is the action? */ 440 /* We don't know about this device? */ 441 mps_dprint(sc, MPS_ERROR, 442 "%s %d : invalid handle 0x%x \n", __func__,__LINE__, handle); 443 return; 444 } 445 446 targ->flags |= MPSSAS_TARGET_INREMOVAL; 447 448 cm = mpssas_alloc_tm(sc); 449 if (cm == NULL) { 450 mps_dprint(sc, MPS_ERROR, 451 "%s: command alloc failure\n", __func__); 452 return; 453 } 454 455 mpssas_rescan_target(sc, targ); 456 457 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 458 req->DevHandle = targ->handle; 459 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 460 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 461 462 /* SAS Hard Link Reset / SATA Link Reset */ 463 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 464 465 cm->cm_targ = targ; 466 cm->cm_data = NULL; 467 cm->cm_desc.HighPriority.RequestFlags = 468 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 469 cm->cm_complete = mpssas_remove_volume; 470 cm->cm_complete_data = (void *)(uintptr_t)handle; 471 mps_map_command(sc, cm); 472} 473 474/* 475 * The MPT2 firmware performs debounce on the link to avoid transient link 476 * errors and false removals. When it does decide that link has been lost 477 * and a device need to go away, it expects that the host will perform a 478 * target reset and then an op remove. The reset has the side-effect of 479 * aborting any outstanding requests for the device, which is required for 480 * the op-remove to succeed. It's not clear if the host should check for 481 * the device coming back alive after the reset. 482 */ 483void 484mpssas_prepare_remove(struct mpssas_softc *sassc, uint16_t handle) 485{ 486 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 487 struct mps_softc *sc; 488 struct mps_command *cm; 489 struct mpssas_target *targ = NULL; 490 491 MPS_FUNCTRACE(sassc->sc); 492 493 sc = sassc->sc; 494 495 targ = mpssas_find_target_by_handle(sassc, 0, handle); 496 if (targ == NULL) { 497 /* FIXME: what is the action? */ 498 /* We don't know about this device? */ 499 mps_dprint(sc, MPS_ERROR, 500 "%s : invalid handle 0x%x \n", __func__, handle); 501 return; 502 } 503 504 targ->flags |= MPSSAS_TARGET_INREMOVAL; 505 506 cm = mpssas_alloc_tm(sc); 507 if (cm == NULL) { 508 mps_dprint(sc, MPS_ERROR, 509 "%s: command alloc failure\n", __func__); 510 return; 511 } 512 513 mpssas_rescan_target(sc, targ); 514 515 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)cm->cm_req; 516 memset(req, 0, sizeof(*req)); 517 req->DevHandle = htole16(targ->handle); 518 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 519 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 520 521 /* SAS Hard Link Reset / SATA Link Reset */ 522 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 523 524 cm->cm_targ = targ; 525 cm->cm_data = NULL; 526 cm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 527 cm->cm_complete = mpssas_remove_device; 528 cm->cm_complete_data = (void *)(uintptr_t)handle; 529 mps_map_command(sc, cm); 530} 531 532static void 533mpssas_remove_device(struct mps_softc *sc, struct mps_command *tm) 534{ 535 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 536 MPI2_SAS_IOUNIT_CONTROL_REQUEST *req; 537 struct mpssas_target *targ; 538 struct mps_command *next_cm; 539 uint16_t handle; 540 541 MPS_FUNCTRACE(sc); 542 543 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 544 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 545 targ = tm->cm_targ; 546 547 /* 548 * Currently there should be no way we can hit this case. It only 549 * happens when we have a failure to allocate chain frames, and 550 * task management commands don't have S/G lists. 551 */ 552 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 553 mps_dprint(sc, MPS_ERROR, 554 "%s: cm_flags = %#x for remove of handle %#04x! " 555 "This should not happen!\n", __func__, tm->cm_flags, 556 handle); 557 mpssas_free_tm(sc, tm); 558 return; 559 } 560 561 if (reply == NULL) { 562 /* XXX retry the remove after the diag reset completes? */ 563 mps_dprint(sc, MPS_FAULT, 564 "%s NULL reply reseting device 0x%04x\n", __func__, handle); 565 mpssas_free_tm(sc, tm); 566 return; 567 } 568 569 if (le16toh(reply->IOCStatus) != MPI2_IOCSTATUS_SUCCESS) { 570 mps_dprint(sc, MPS_FAULT, 571 "IOCStatus = 0x%x while resetting device 0x%x\n", 572 le16toh(reply->IOCStatus), handle); 573 mpssas_free_tm(sc, tm); 574 return; 575 } 576 577 mps_dprint(sc, MPS_XINFO, "Reset aborted %u commands\n", 578 le32toh(reply->TerminationCount)); 579 mps_free_reply(sc, tm->cm_reply_data); 580 tm->cm_reply = NULL; /* Ensures the reply won't get re-freed */ 581 582 /* Reuse the existing command */ 583 req = (MPI2_SAS_IOUNIT_CONTROL_REQUEST *)tm->cm_req; 584 memset(req, 0, sizeof(*req)); 585 req->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; 586 req->Operation = MPI2_SAS_OP_REMOVE_DEVICE; 587 req->DevHandle = htole16(handle); 588 tm->cm_data = NULL; 589 tm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 590 tm->cm_complete = mpssas_remove_complete; 591 tm->cm_complete_data = (void *)(uintptr_t)handle; 592 593 mps_map_command(sc, tm); 594 595 mps_dprint(sc, MPS_XINFO, "clearing target %u handle 0x%04x\n", 596 targ->tid, handle); 597 TAILQ_FOREACH_SAFE(tm, &targ->commands, cm_link, next_cm) { 598 union ccb *ccb; 599 600 mps_dprint(sc, MPS_XINFO, "Completing missed command %p\n", tm); 601 ccb = tm->cm_complete_data; 602 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 603 mpssas_scsiio_complete(sc, tm); 604 } 605} 606 607static void 608mpssas_remove_complete(struct mps_softc *sc, struct mps_command *tm) 609{ 610 MPI2_SAS_IOUNIT_CONTROL_REPLY *reply; 611 uint16_t handle; 612 struct mpssas_target *targ; 613 struct mpssas_lun *lun; 614 615 MPS_FUNCTRACE(sc); 616 617 reply = (MPI2_SAS_IOUNIT_CONTROL_REPLY *)tm->cm_reply; 618 handle = (uint16_t)(uintptr_t)tm->cm_complete_data; 619 620 /* 621 * Currently there should be no way we can hit this case. It only 622 * happens when we have a failure to allocate chain frames, and 623 * task management commands don't have S/G lists. 624 */ 625 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 626 mps_dprint(sc, MPS_XINFO, 627 "%s: cm_flags = %#x for remove of handle %#04x! " 628 "This should not happen!\n", __func__, tm->cm_flags, 629 handle); 630 mpssas_free_tm(sc, tm); 631 return; 632 } 633 634 if (reply == NULL) { 635 /* most likely a chip reset */ 636 mps_dprint(sc, MPS_FAULT, 637 "%s NULL reply removing device 0x%04x\n", __func__, handle); 638 mpssas_free_tm(sc, tm); 639 return; 640 } 641 642 mps_dprint(sc, MPS_XINFO, 643 "%s on handle 0x%04x, IOCStatus= 0x%x\n", __func__, 644 handle, le16toh(reply->IOCStatus)); 645 646 /* 647 * Don't clear target if remove fails because things will get confusing. 648 * Leave the devname and sasaddr intact so that we know to avoid reusing 649 * this target id if possible, and so we can assign the same target id 650 * to this device if it comes back in the future. 651 */ 652 if (le16toh(reply->IOCStatus) == MPI2_IOCSTATUS_SUCCESS) { 653 targ = tm->cm_targ; 654 targ->handle = 0x0; 655 targ->encl_handle = 0x0; 656 targ->encl_slot = 0x0; 657 targ->exp_dev_handle = 0x0; 658 targ->phy_num = 0x0; 659 targ->linkrate = 0x0; 660 targ->devinfo = 0x0; 661 targ->flags = 0x0; 662 663 while(!SLIST_EMPTY(&targ->luns)) { 664 lun = SLIST_FIRST(&targ->luns); 665 SLIST_REMOVE_HEAD(&targ->luns, lun_link); 666 free(lun, M_MPT2); 667 } 668 } 669 670 671 mpssas_free_tm(sc, tm); 672} 673 674static int 675mpssas_register_events(struct mps_softc *sc) 676{ 677 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; 678 679 bzero(events, 16); 680 setbit(events, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); 681 setbit(events, MPI2_EVENT_SAS_DISCOVERY); 682 setbit(events, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); 683 setbit(events, MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE); 684 setbit(events, MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW); 685 setbit(events, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); 686 setbit(events, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); 687 setbit(events, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); 688 setbit(events, MPI2_EVENT_IR_VOLUME); 689 setbit(events, MPI2_EVENT_IR_PHYSICAL_DISK); 690 setbit(events, MPI2_EVENT_IR_OPERATION_STATUS); 691 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 692 693 mps_register_events(sc, events, mpssas_evt_handler, NULL, 694 &sc->sassc->mpssas_eh); 695 696 return (0); 697} 698 699int 700mps_attach_sas(struct mps_softc *sc) 701{ 702 struct mpssas_softc *sassc; 703 cam_status status; 704 int unit, error = 0; 705 706 MPS_FUNCTRACE(sc); 707 708 sassc = malloc(sizeof(struct mpssas_softc), M_MPT2, M_WAITOK|M_ZERO); 709 if(!sassc) { 710 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n", 711 __func__, __LINE__); 712 return (ENOMEM); 713 } 714 sassc->targets = malloc(sizeof(struct mpssas_target) * 715 sc->facts->MaxTargets, M_MPT2, M_WAITOK|M_ZERO); 716 if(!sassc->targets) { 717 device_printf(sc->mps_dev, "Cannot allocate memory %s %d\n", 718 __func__, __LINE__); 719 free(sassc, M_MPT2); 720 return (ENOMEM); 721 } 722 sc->sassc = sassc; 723 sassc->sc = sc; 724 725 if ((sassc->devq = cam_simq_alloc(sc->num_reqs)) == NULL) { 726 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIMQ\n"); 727 error = ENOMEM; 728 goto out; 729 } 730 731 unit = device_get_unit(sc->mps_dev); 732 sassc->sim = cam_sim_alloc(mpssas_action, mpssas_poll, "mps", sassc, 733 unit, &sc->mps_mtx, sc->num_reqs, sc->num_reqs, sassc->devq); 734 if (sassc->sim == NULL) { 735 mps_dprint(sc, MPS_ERROR, "Cannot allocate SIM\n"); 736 error = EINVAL; 737 goto out; 738 } 739 740 TAILQ_INIT(&sassc->ev_queue); 741 742 /* Initialize taskqueue for Event Handling */ 743 TASK_INIT(&sassc->ev_task, 0, mpssas_firmware_event_work, sc); 744 sassc->ev_tq = taskqueue_create("mps_taskq", M_NOWAIT | M_ZERO, 745 taskqueue_thread_enqueue, &sassc->ev_tq); 746 747 /* Run the task queue with lowest priority */ 748 taskqueue_start_threads(&sassc->ev_tq, 1, 255, "%s taskq", 749 device_get_nameunit(sc->mps_dev)); 750 751 mps_lock(sc); 752 753 /* 754 * XXX There should be a bus for every port on the adapter, but since 755 * we're just going to fake the topology for now, we'll pretend that 756 * everything is just a target on a single bus. 757 */ 758 if ((error = xpt_bus_register(sassc->sim, sc->mps_dev, 0)) != 0) { 759 mps_dprint(sc, MPS_ERROR, "Error %d registering SCSI bus\n", 760 error); 761 mps_unlock(sc); 762 goto out; 763 } 764 765 /* 766 * Assume that discovery events will start right away. 767 * 768 * Hold off boot until discovery is complete. 769 */ 770 sassc->flags |= MPSSAS_IN_STARTUP | MPSSAS_IN_DISCOVERY; 771 sc->sassc->startup_refcount = 0; 772 mpssas_startup_increment(sassc); 773 774 callout_init(&sassc->discovery_callout, 1 /*mpsafe*/); 775 sassc->discovery_timeouts = 0; 776 777 sassc->tm_count = 0; 778 779 /* 780 * Register for async events so we can determine the EEDP 781 * capabilities of devices. 782 */ 783 status = xpt_create_path(&sassc->path, /*periph*/NULL, 784 cam_sim_path(sc->sassc->sim), CAM_TARGET_WILDCARD, 785 CAM_LUN_WILDCARD); 786 if (status != CAM_REQ_CMP) { 787 mps_printf(sc, "Error %#x creating sim path\n", status); 788 sassc->path = NULL; 789 } else { 790 int event; 791 792#if (__FreeBSD_version >= 1000006) || \ 793 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 794 event = AC_ADVINFO_CHANGED; 795#else 796 event = AC_FOUND_DEVICE; 797#endif 798 status = xpt_register_async(event, mpssas_async, sc, 799 sassc->path); 800 if (status != CAM_REQ_CMP) { 801 mps_dprint(sc, MPS_ERROR, 802 "Error %#x registering async handler for " 803 "AC_ADVINFO_CHANGED events\n", status); 804 xpt_free_path(sassc->path); 805 sassc->path = NULL; 806 } 807 } 808 if (status != CAM_REQ_CMP) { 809 /* 810 * EEDP use is the exception, not the rule. 811 * Warn the user, but do not fail to attach. 812 */ 813 mps_printf(sc, "EEDP capabilities disabled.\n"); 814 } 815 816 mps_unlock(sc); 817 818 mpssas_register_events(sc); 819out: 820 if (error) 821 mps_detach_sas(sc); 822 return (error); 823} 824 825int 826mps_detach_sas(struct mps_softc *sc) 827{ 828 struct mpssas_softc *sassc; 829 struct mpssas_lun *lun, *lun_tmp; 830 struct mpssas_target *targ; 831 int i; 832 833 MPS_FUNCTRACE(sc); 834 835 if (sc->sassc == NULL) 836 return (0); 837 838 sassc = sc->sassc; 839 mps_deregister_events(sc, sassc->mpssas_eh); 840 841 /* 842 * Drain and free the event handling taskqueue with the lock 843 * unheld so that any parallel processing tasks drain properly 844 * without deadlocking. 845 */ 846 if (sassc->ev_tq != NULL) 847 taskqueue_free(sassc->ev_tq); 848 849 /* Make sure CAM doesn't wedge if we had to bail out early. */ 850 mps_lock(sc); 851 852 /* Deregister our async handler */ 853 if (sassc->path != NULL) { 854 xpt_register_async(0, mpssas_async, sc, sassc->path); 855 xpt_free_path(sassc->path); 856 sassc->path = NULL; 857 } 858 859 if (sassc->flags & MPSSAS_IN_STARTUP) 860 xpt_release_simq(sassc->sim, 1); 861 862 if (sassc->sim != NULL) { 863 xpt_bus_deregister(cam_sim_path(sassc->sim)); 864 cam_sim_free(sassc->sim, FALSE); 865 } 866 867 sassc->flags |= MPSSAS_SHUTDOWN; 868 mps_unlock(sc); 869 870 if (sassc->devq != NULL) 871 cam_simq_free(sassc->devq); 872 873 for(i=0; i< sc->facts->MaxTargets ;i++) { 874 targ = &sassc->targets[i]; 875 SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) { 876 free(lun, M_MPT2); 877 } 878 } 879 free(sassc->targets, M_MPT2); 880 free(sassc, M_MPT2); 881 sc->sassc = NULL; 882 883 return (0); 884} 885 886void 887mpssas_discovery_end(struct mpssas_softc *sassc) 888{ 889 struct mps_softc *sc = sassc->sc; 890 891 MPS_FUNCTRACE(sc); 892 893 if (sassc->flags & MPSSAS_DISCOVERY_TIMEOUT_PENDING) 894 callout_stop(&sassc->discovery_callout); 895 896} 897 898static void 899mpssas_discovery_timeout(void *data) 900{ 901 struct mpssas_softc *sassc = data; 902 struct mps_softc *sc; 903 904 sc = sassc->sc; 905 MPS_FUNCTRACE(sc); 906 907 mps_lock(sc); 908 mps_dprint(sc, MPS_INFO, 909 "Timeout waiting for discovery, interrupts may not be working!\n"); 910 sassc->flags &= ~MPSSAS_DISCOVERY_TIMEOUT_PENDING; 911 912 /* Poll the hardware for events in case interrupts aren't working */ 913 mps_intr_locked(sc); 914 915 mps_dprint(sassc->sc, MPS_INFO, 916 "Finished polling after discovery timeout at %d\n", ticks); 917 918 if ((sassc->flags & MPSSAS_IN_DISCOVERY) == 0) { 919 mpssas_discovery_end(sassc); 920 } else { 921 if (sassc->discovery_timeouts < MPSSAS_MAX_DISCOVERY_TIMEOUTS) { 922 sassc->flags |= MPSSAS_DISCOVERY_TIMEOUT_PENDING; 923 callout_reset(&sassc->discovery_callout, 924 MPSSAS_DISCOVERY_TIMEOUT * hz, 925 mpssas_discovery_timeout, sassc); 926 sassc->discovery_timeouts++; 927 } else { 928 mps_dprint(sassc->sc, MPS_FAULT, 929 "Discovery timed out, continuing.\n"); 930 sassc->flags &= ~MPSSAS_IN_DISCOVERY; 931 mpssas_discovery_end(sassc); 932 } 933 } 934 935 mps_unlock(sc); 936} 937 938static void 939mpssas_action(struct cam_sim *sim, union ccb *ccb) 940{ 941 struct mpssas_softc *sassc; 942 943 sassc = cam_sim_softc(sim); 944 945 MPS_FUNCTRACE(sassc->sc); 946 mps_dprint(sassc->sc, MPS_TRACE, "ccb func_code 0x%x\n", 947 ccb->ccb_h.func_code); 948 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); 949 950 switch (ccb->ccb_h.func_code) { 951 case XPT_PATH_INQ: 952 { 953 struct ccb_pathinq *cpi = &ccb->cpi; 954 955 cpi->version_num = 1; 956 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 957 cpi->target_sprt = 0; 958#if (__FreeBSD_version >= 1000039) || \ 959 ((__FreeBSD_version < 1000000) && (__FreeBSD_version >= 902502)) 960 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_NOSCAN; 961#else 962 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 963#endif 964 cpi->hba_eng_cnt = 0; 965 cpi->max_target = sassc->sc->facts->MaxTargets - 1; 966 cpi->max_lun = 255; 967 cpi->initiator_id = sassc->sc->facts->MaxTargets - 1; 968 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 969 strncpy(cpi->hba_vid, "LSILogic", HBA_IDLEN); 970 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 971 cpi->unit_number = cam_sim_unit(sim); 972 cpi->bus_id = cam_sim_bus(sim); 973 cpi->base_transfer_speed = 150000; 974 cpi->transport = XPORT_SAS; 975 cpi->transport_version = 0; 976 cpi->protocol = PROTO_SCSI; 977 cpi->protocol_version = SCSI_REV_SPC; 978#if __FreeBSD_version >= 800001 979 /* 980 * XXX KDM where does this number come from? 981 */ 982 cpi->maxio = 256 * 1024; 983#endif 984 cpi->ccb_h.status = CAM_REQ_CMP; 985 break; 986 } 987 case XPT_GET_TRAN_SETTINGS: 988 { 989 struct ccb_trans_settings *cts; 990 struct ccb_trans_settings_sas *sas; 991 struct ccb_trans_settings_scsi *scsi; 992 struct mpssas_target *targ; 993 994 cts = &ccb->cts; 995 sas = &cts->xport_specific.sas; 996 scsi = &cts->proto_specific.scsi; 997 998 targ = &sassc->targets[cts->ccb_h.target_id]; 999 if (targ->handle == 0x0) { 1000 cts->ccb_h.status = CAM_SEL_TIMEOUT; 1001 break; 1002 } 1003 1004 cts->protocol_version = SCSI_REV_SPC2; 1005 cts->transport = XPORT_SAS; 1006 cts->transport_version = 0; 1007 1008 sas->valid = CTS_SAS_VALID_SPEED; 1009 switch (targ->linkrate) { 1010 case 0x08: 1011 sas->bitrate = 150000; 1012 break; 1013 case 0x09: 1014 sas->bitrate = 300000; 1015 break; 1016 case 0x0a: 1017 sas->bitrate = 600000; 1018 break; 1019 default: 1020 sas->valid = 0; 1021 } 1022 1023 cts->protocol = PROTO_SCSI; 1024 scsi->valid = CTS_SCSI_VALID_TQ; 1025 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 1026 1027 cts->ccb_h.status = CAM_REQ_CMP; 1028 break; 1029 } 1030 case XPT_CALC_GEOMETRY: 1031 cam_calc_geometry(&ccb->ccg, /*extended*/1); 1032 ccb->ccb_h.status = CAM_REQ_CMP; 1033 break; 1034 case XPT_RESET_DEV: 1035 mps_dprint(sassc->sc, MPS_XINFO, "mpssas_action XPT_RESET_DEV\n"); 1036 mpssas_action_resetdev(sassc, ccb); 1037 return; 1038 case XPT_RESET_BUS: 1039 case XPT_ABORT: 1040 case XPT_TERM_IO: 1041 mps_dprint(sassc->sc, MPS_XINFO, 1042 "mpssas_action faking success for abort or reset\n"); 1043 ccb->ccb_h.status = CAM_REQ_CMP; 1044 break; 1045 case XPT_SCSI_IO: 1046 mpssas_action_scsiio(sassc, ccb); 1047 return; 1048#if __FreeBSD_version >= 900026 1049 case XPT_SMP_IO: 1050 mpssas_action_smpio(sassc, ccb); 1051 return; 1052#endif 1053 default: 1054 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; 1055 break; 1056 } 1057 xpt_done(ccb); 1058 1059} 1060 1061static void 1062mpssas_announce_reset(struct mps_softc *sc, uint32_t ac_code, 1063 target_id_t target_id, lun_id_t lun_id) 1064{ 1065 path_id_t path_id = cam_sim_path(sc->sassc->sim); 1066 struct cam_path *path; 1067 1068 mps_dprint(sc, MPS_XINFO, "%s code %x target %d lun %d\n", __func__, 1069 ac_code, target_id, lun_id); 1070 1071 if (xpt_create_path(&path, NULL, 1072 path_id, target_id, lun_id) != CAM_REQ_CMP) { 1073 mps_dprint(sc, MPS_ERROR, "unable to create path for reset " 1074 "notification\n"); 1075 return; 1076 } 1077 1078 xpt_async(ac_code, path, NULL); 1079 xpt_free_path(path); 1080} 1081 1082static void 1083mpssas_complete_all_commands(struct mps_softc *sc) 1084{ 1085 struct mps_command *cm; 1086 int i; 1087 int completed; 1088 1089 MPS_FUNCTRACE(sc); 1090 mtx_assert(&sc->mps_mtx, MA_OWNED); 1091 1092 /* complete all commands with a NULL reply */ 1093 for (i = 1; i < sc->num_reqs; i++) { 1094 cm = &sc->commands[i]; 1095 cm->cm_reply = NULL; 1096 completed = 0; 1097 1098 if (cm->cm_flags & MPS_CM_FLAGS_POLLED) 1099 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE; 1100 1101 if (cm->cm_complete != NULL) { 1102 mpssas_log_command(cm, MPS_RECOVERY, 1103 "completing cm %p state %x ccb %p for diag reset\n", 1104 cm, cm->cm_state, cm->cm_ccb); 1105 1106 cm->cm_complete(sc, cm); 1107 completed = 1; 1108 } 1109 1110 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) { 1111 mpssas_log_command(cm, MPS_RECOVERY, 1112 "waking up cm %p state %x ccb %p for diag reset\n", 1113 cm, cm->cm_state, cm->cm_ccb); 1114 wakeup(cm); 1115 completed = 1; 1116 } 1117 1118 if ((completed == 0) && (cm->cm_state != MPS_CM_STATE_FREE)) { 1119 /* this should never happen, but if it does, log */ 1120 mpssas_log_command(cm, MPS_RECOVERY, 1121 "cm %p state %x flags 0x%x ccb %p during diag " 1122 "reset\n", cm, cm->cm_state, cm->cm_flags, 1123 cm->cm_ccb); 1124 } 1125 } 1126} 1127 1128void 1129mpssas_handle_reinit(struct mps_softc *sc) 1130{ 1131 int i; 1132 1133 /* Go back into startup mode and freeze the simq, so that CAM 1134 * doesn't send any commands until after we've rediscovered all 1135 * targets and found the proper device handles for them. 1136 * 1137 * After the reset, portenable will trigger discovery, and after all 1138 * discovery-related activities have finished, the simq will be 1139 * released. 1140 */ 1141 mps_dprint(sc, MPS_INIT, "%s startup\n", __func__); 1142 sc->sassc->flags |= MPSSAS_IN_STARTUP; 1143 sc->sassc->flags |= MPSSAS_IN_DISCOVERY; 1144 mpssas_startup_increment(sc->sassc); 1145 1146 /* notify CAM of a bus reset */ 1147 mpssas_announce_reset(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD, 1148 CAM_LUN_WILDCARD); 1149 1150 /* complete and cleanup after all outstanding commands */ 1151 mpssas_complete_all_commands(sc); 1152 1153 mps_dprint(sc, MPS_INIT, 1154 "%s startup %u tm %u after command completion\n", 1155 __func__, sc->sassc->startup_refcount, sc->sassc->tm_count); 1156 1157 /* zero all the target handles, since they may change after the 1158 * reset, and we have to rediscover all the targets and use the new 1159 * handles. 1160 */ 1161 for (i = 0; i < sc->facts->MaxTargets; i++) { 1162 if (sc->sassc->targets[i].outstanding != 0) 1163 mps_dprint(sc, MPS_INIT, "target %u outstanding %u\n", 1164 i, sc->sassc->targets[i].outstanding); 1165 sc->sassc->targets[i].handle = 0x0; 1166 sc->sassc->targets[i].exp_dev_handle = 0x0; 1167 sc->sassc->targets[i].outstanding = 0; 1168 sc->sassc->targets[i].flags = MPSSAS_TARGET_INDIAGRESET; 1169 } 1170} 1171 1172static void 1173mpssas_tm_timeout(void *data) 1174{ 1175 struct mps_command *tm = data; 1176 struct mps_softc *sc = tm->cm_sc; 1177 1178 mtx_assert(&sc->mps_mtx, MA_OWNED); 1179 1180 mpssas_log_command(tm, MPS_INFO|MPS_RECOVERY, 1181 "task mgmt %p timed out\n", tm); 1182 mps_reinit(sc); 1183} 1184 1185static void 1186mpssas_logical_unit_reset_complete(struct mps_softc *sc, struct mps_command *tm) 1187{ 1188 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1189 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1190 unsigned int cm_count = 0; 1191 struct mps_command *cm; 1192 struct mpssas_target *targ; 1193 1194 callout_stop(&tm->cm_callout); 1195 1196 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1197 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1198 targ = tm->cm_targ; 1199 1200 /* 1201 * Currently there should be no way we can hit this case. It only 1202 * happens when we have a failure to allocate chain frames, and 1203 * task management commands don't have S/G lists. 1204 * XXXSL So should it be an assertion? 1205 */ 1206 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 1207 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for LUN reset! " 1208 "This should not happen!\n", __func__, tm->cm_flags); 1209 mpssas_free_tm(sc, tm); 1210 return; 1211 } 1212 1213 if (reply == NULL) { 1214 mpssas_log_command(tm, MPS_RECOVERY, 1215 "NULL reset reply for tm %p\n", tm); 1216 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 1217 /* this completion was due to a reset, just cleanup */ 1218 targ->flags &= ~MPSSAS_TARGET_INRESET; 1219 targ->tm = NULL; 1220 mpssas_free_tm(sc, tm); 1221 } 1222 else { 1223 /* we should have gotten a reply. */ 1224 mps_reinit(sc); 1225 } 1226 return; 1227 } 1228 1229 mpssas_log_command(tm, MPS_RECOVERY, 1230 "logical unit reset status 0x%x code 0x%x count %u\n", 1231 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1232 le32toh(reply->TerminationCount)); 1233 1234 /* See if there are any outstanding commands for this LUN. 1235 * This could be made more efficient by using a per-LU data 1236 * structure of some sort. 1237 */ 1238 TAILQ_FOREACH(cm, &targ->commands, cm_link) { 1239 if (cm->cm_lun == tm->cm_lun) 1240 cm_count++; 1241 } 1242 1243 if (cm_count == 0) { 1244 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1245 "logical unit %u finished recovery after reset\n", 1246 tm->cm_lun, tm); 1247 1248 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1249 tm->cm_lun); 1250 1251 /* we've finished recovery for this logical unit. check and 1252 * see if some other logical unit has a timedout command 1253 * that needs to be processed. 1254 */ 1255 cm = TAILQ_FIRST(&targ->timedout_commands); 1256 if (cm) { 1257 mpssas_send_abort(sc, tm, cm); 1258 } 1259 else { 1260 targ->tm = NULL; 1261 mpssas_free_tm(sc, tm); 1262 } 1263 } 1264 else { 1265 /* if we still have commands for this LUN, the reset 1266 * effectively failed, regardless of the status reported. 1267 * Escalate to a target reset. 1268 */ 1269 mpssas_log_command(tm, MPS_RECOVERY, 1270 "logical unit reset complete for tm %p, but still have %u command(s)\n", 1271 tm, cm_count); 1272 mpssas_send_reset(sc, tm, 1273 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET); 1274 } 1275} 1276 1277static void 1278mpssas_target_reset_complete(struct mps_softc *sc, struct mps_command *tm) 1279{ 1280 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1281 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1282 struct mpssas_target *targ; 1283 1284 callout_stop(&tm->cm_callout); 1285 1286 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1287 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1288 targ = tm->cm_targ; 1289 1290 /* 1291 * Currently there should be no way we can hit this case. It only 1292 * happens when we have a failure to allocate chain frames, and 1293 * task management commands don't have S/G lists. 1294 */ 1295 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 1296 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x for target reset! " 1297 "This should not happen!\n", __func__, tm->cm_flags); 1298 mpssas_free_tm(sc, tm); 1299 return; 1300 } 1301 1302 if (reply == NULL) { 1303 mpssas_log_command(tm, MPS_RECOVERY, 1304 "NULL reset reply for tm %p\n", tm); 1305 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 1306 /* this completion was due to a reset, just cleanup */ 1307 targ->flags &= ~MPSSAS_TARGET_INRESET; 1308 targ->tm = NULL; 1309 mpssas_free_tm(sc, tm); 1310 } 1311 else { 1312 /* we should have gotten a reply. */ 1313 mps_reinit(sc); 1314 } 1315 return; 1316 } 1317 1318 mpssas_log_command(tm, MPS_RECOVERY, 1319 "target reset status 0x%x code 0x%x count %u\n", 1320 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1321 le32toh(reply->TerminationCount)); 1322 1323 targ->flags &= ~MPSSAS_TARGET_INRESET; 1324 1325 if (targ->outstanding == 0) { 1326 /* we've finished recovery for this target and all 1327 * of its logical units. 1328 */ 1329 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1330 "recovery finished after target reset\n"); 1331 1332 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 1333 CAM_LUN_WILDCARD); 1334 1335 targ->tm = NULL; 1336 mpssas_free_tm(sc, tm); 1337 } 1338 else { 1339 /* after a target reset, if this target still has 1340 * outstanding commands, the reset effectively failed, 1341 * regardless of the status reported. escalate. 1342 */ 1343 mpssas_log_command(tm, MPS_RECOVERY, 1344 "target reset complete for tm %p, but still have %u command(s)\n", 1345 tm, targ->outstanding); 1346 mps_reinit(sc); 1347 } 1348} 1349 1350#define MPS_RESET_TIMEOUT 30 1351 1352static int 1353mpssas_send_reset(struct mps_softc *sc, struct mps_command *tm, uint8_t type) 1354{ 1355 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1356 struct mpssas_target *target; 1357 int err; 1358 1359 target = tm->cm_targ; 1360 if (target->handle == 0) { 1361 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n", 1362 __func__, target->tid); 1363 return -1; 1364 } 1365 1366 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1367 req->DevHandle = htole16(target->handle); 1368 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1369 req->TaskType = type; 1370 1371 if (type == MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET) { 1372 /* XXX Need to handle invalid LUNs */ 1373 MPS_SET_LUN(req->LUN, tm->cm_lun); 1374 tm->cm_targ->logical_unit_resets++; 1375 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1376 "sending logical unit reset\n"); 1377 tm->cm_complete = mpssas_logical_unit_reset_complete; 1378 } 1379 else if (type == MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET) { 1380 /* Target reset method = SAS Hard Link Reset / SATA Link Reset */ 1381 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 1382 tm->cm_targ->target_resets++; 1383 tm->cm_targ->flags |= MPSSAS_TARGET_INRESET; 1384 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1385 "sending target reset\n"); 1386 tm->cm_complete = mpssas_target_reset_complete; 1387 } 1388 else { 1389 mps_dprint(sc, MPS_ERROR, "unexpected reset type 0x%x\n", type); 1390 return -1; 1391 } 1392 1393 tm->cm_data = NULL; 1394 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1395 tm->cm_complete_data = (void *)tm; 1396 1397 callout_reset(&tm->cm_callout, MPS_RESET_TIMEOUT * hz, 1398 mpssas_tm_timeout, tm); 1399 1400 err = mps_map_command(sc, tm); 1401 if (err) 1402 mpssas_log_command(tm, MPS_RECOVERY, 1403 "error %d sending reset type %u\n", 1404 err, type); 1405 1406 return err; 1407} 1408 1409 1410static void 1411mpssas_abort_complete(struct mps_softc *sc, struct mps_command *tm) 1412{ 1413 struct mps_command *cm; 1414 MPI2_SCSI_TASK_MANAGE_REPLY *reply; 1415 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1416 struct mpssas_target *targ; 1417 1418 callout_stop(&tm->cm_callout); 1419 1420 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1421 reply = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 1422 targ = tm->cm_targ; 1423 1424 /* 1425 * Currently there should be no way we can hit this case. It only 1426 * happens when we have a failure to allocate chain frames, and 1427 * task management commands don't have S/G lists. 1428 */ 1429 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 1430 mpssas_log_command(tm, MPS_RECOVERY, 1431 "cm_flags = %#x for abort %p TaskMID %u!\n", 1432 tm->cm_flags, tm, le16toh(req->TaskMID)); 1433 mpssas_free_tm(sc, tm); 1434 return; 1435 } 1436 1437 if (reply == NULL) { 1438 mpssas_log_command(tm, MPS_RECOVERY, 1439 "NULL abort reply for tm %p TaskMID %u\n", 1440 tm, le16toh(req->TaskMID)); 1441 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 1442 /* this completion was due to a reset, just cleanup */ 1443 targ->tm = NULL; 1444 mpssas_free_tm(sc, tm); 1445 } 1446 else { 1447 /* we should have gotten a reply. */ 1448 mps_reinit(sc); 1449 } 1450 return; 1451 } 1452 1453 mpssas_log_command(tm, MPS_RECOVERY, 1454 "abort TaskMID %u status 0x%x code 0x%x count %u\n", 1455 le16toh(req->TaskMID), 1456 le16toh(reply->IOCStatus), le32toh(reply->ResponseCode), 1457 le32toh(reply->TerminationCount)); 1458 1459 cm = TAILQ_FIRST(&tm->cm_targ->timedout_commands); 1460 if (cm == NULL) { 1461 /* if there are no more timedout commands, we're done with 1462 * error recovery for this target. 1463 */ 1464 mpssas_log_command(tm, MPS_RECOVERY, 1465 "finished recovery after aborting TaskMID %u\n", 1466 le16toh(req->TaskMID)); 1467 1468 targ->tm = NULL; 1469 mpssas_free_tm(sc, tm); 1470 } 1471 else if (le16toh(req->TaskMID) != cm->cm_desc.Default.SMID) { 1472 /* abort success, but we have more timedout commands to abort */ 1473 mpssas_log_command(tm, MPS_RECOVERY, 1474 "continuing recovery after aborting TaskMID %u\n", 1475 le16toh(req->TaskMID)); 1476 1477 mpssas_send_abort(sc, tm, cm); 1478 } 1479 else { 1480 /* we didn't get a command completion, so the abort 1481 * failed as far as we're concerned. escalate. 1482 */ 1483 mpssas_log_command(tm, MPS_RECOVERY, 1484 "abort failed for TaskMID %u tm %p\n", 1485 le16toh(req->TaskMID), tm); 1486 1487 mpssas_send_reset(sc, tm, 1488 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET); 1489 } 1490} 1491 1492#define MPS_ABORT_TIMEOUT 5 1493 1494static int 1495mpssas_send_abort(struct mps_softc *sc, struct mps_command *tm, struct mps_command *cm) 1496{ 1497 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 1498 struct mpssas_target *targ; 1499 int err; 1500 1501 targ = cm->cm_targ; 1502 if (targ->handle == 0) { 1503 mps_dprint(sc, MPS_ERROR,"%s null devhandle for target_id %d\n", 1504 __func__, cm->cm_ccb->ccb_h.target_id); 1505 return -1; 1506 } 1507 1508 mpssas_log_command(tm, MPS_RECOVERY|MPS_INFO, 1509 "Aborting command %p\n", cm); 1510 1511 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 1512 req->DevHandle = htole16(targ->handle); 1513 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 1514 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK; 1515 1516 /* XXX Need to handle invalid LUNs */ 1517 MPS_SET_LUN(req->LUN, cm->cm_ccb->ccb_h.target_lun); 1518 1519 req->TaskMID = htole16(cm->cm_desc.Default.SMID); 1520 1521 tm->cm_data = NULL; 1522 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 1523 tm->cm_complete = mpssas_abort_complete; 1524 tm->cm_complete_data = (void *)tm; 1525 tm->cm_targ = cm->cm_targ; 1526 tm->cm_lun = cm->cm_lun; 1527 1528 callout_reset(&tm->cm_callout, MPS_ABORT_TIMEOUT * hz, 1529 mpssas_tm_timeout, tm); 1530 1531 targ->aborts++; 1532 1533 err = mps_map_command(sc, tm); 1534 if (err) 1535 mpssas_log_command(tm, MPS_RECOVERY, 1536 "error %d sending abort for cm %p SMID %u\n", 1537 err, cm, req->TaskMID); 1538 return err; 1539} 1540 1541 1542static void 1543mpssas_scsiio_timeout(void *data) 1544{ 1545 struct mps_softc *sc; 1546 struct mps_command *cm; 1547 struct mpssas_target *targ; 1548 1549 cm = (struct mps_command *)data; 1550 sc = cm->cm_sc; 1551 1552 MPS_FUNCTRACE(sc); 1553 mtx_assert(&sc->mps_mtx, MA_OWNED); 1554 1555 mps_dprint(sc, MPS_XINFO, "Timeout checking cm %p\n", sc); 1556 1557 /* 1558 * Run the interrupt handler to make sure it's not pending. This 1559 * isn't perfect because the command could have already completed 1560 * and been re-used, though this is unlikely. 1561 */ 1562 mps_intr_locked(sc); 1563 if (cm->cm_state == MPS_CM_STATE_FREE) { 1564 mpssas_log_command(cm, MPS_XINFO, 1565 "SCSI command %p almost timed out\n", cm); 1566 return; 1567 } 1568 1569 if (cm->cm_ccb == NULL) { 1570 mps_dprint(sc, MPS_ERROR, "command timeout with NULL ccb\n"); 1571 return; 1572 } 1573 1574 mpssas_log_command(cm, MPS_INFO, "command timeout cm %p ccb %p\n", 1575 cm, cm->cm_ccb); 1576 1577 targ = cm->cm_targ; 1578 targ->timeouts++; 1579 1580 /* XXX first, check the firmware state, to see if it's still 1581 * operational. if not, do a diag reset. 1582 */ 1583 1584 cm->cm_ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1585 cm->cm_state = MPS_CM_STATE_TIMEDOUT; 1586 TAILQ_INSERT_TAIL(&targ->timedout_commands, cm, cm_recovery); 1587 1588 if (targ->tm != NULL) { 1589 /* target already in recovery, just queue up another 1590 * timedout command to be processed later. 1591 */ 1592 mps_dprint(sc, MPS_RECOVERY, 1593 "queued timedout cm %p for processing by tm %p\n", 1594 cm, targ->tm); 1595 } 1596 else if ((targ->tm = mpssas_alloc_tm(sc)) != NULL) { 1597 mps_dprint(sc, MPS_RECOVERY, "timedout cm %p allocated tm %p\n", 1598 cm, targ->tm); 1599 1600 /* start recovery by aborting the first timedout command */ 1601 mpssas_send_abort(sc, targ->tm, cm); 1602 } 1603 else { 1604 /* XXX queue this target up for recovery once a TM becomes 1605 * available. The firmware only has a limited number of 1606 * HighPriority credits for the high priority requests used 1607 * for task management, and we ran out. 1608 * 1609 * Isilon: don't worry about this for now, since we have 1610 * more credits than disks in an enclosure, and limit 1611 * ourselves to one TM per target for recovery. 1612 */ 1613 mps_dprint(sc, MPS_RECOVERY, 1614 "timedout cm %p failed to allocate a tm\n", cm); 1615 } 1616 1617} 1618 1619static void 1620mpssas_action_scsiio(struct mpssas_softc *sassc, union ccb *ccb) 1621{ 1622 MPI2_SCSI_IO_REQUEST *req; 1623 struct ccb_scsiio *csio; 1624 struct mps_softc *sc; 1625 struct mpssas_target *targ; 1626 struct mpssas_lun *lun; 1627 struct mps_command *cm; 1628 uint8_t i, lba_byte, *ref_tag_addr; 1629 uint16_t eedp_flags; 1630 uint32_t mpi_control; 1631 1632 sc = sassc->sc; 1633 MPS_FUNCTRACE(sc); 1634 mtx_assert(&sc->mps_mtx, MA_OWNED); 1635 1636 csio = &ccb->csio; 1637 targ = &sassc->targets[csio->ccb_h.target_id]; 1638 mps_dprint(sc, MPS_TRACE, "ccb %p target flag %x\n", ccb, targ->flags); 1639 if (targ->handle == 0x0) { 1640 mps_dprint(sc, MPS_ERROR, "%s NULL handle for target %u\n", 1641 __func__, csio->ccb_h.target_id); 1642 csio->ccb_h.status = CAM_SEL_TIMEOUT; 1643 xpt_done(ccb); 1644 return; 1645 } 1646 if (targ->flags & MPS_TARGET_FLAGS_RAID_COMPONENT) { 1647 mps_dprint(sc, MPS_ERROR, "%s Raid component no SCSI IO " 1648 "supported %u\n", __func__, csio->ccb_h.target_id); 1649 csio->ccb_h.status = CAM_TID_INVALID; 1650 xpt_done(ccb); 1651 return; 1652 } 1653 /* 1654 * Sometimes, it is possible to get a command that is not "In 1655 * Progress" and was actually aborted by the upper layer. Check for 1656 * this here and complete the command without error. 1657 */ 1658 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 1659 mps_dprint(sc, MPS_TRACE, "%s Command is not in progress for " 1660 "target %u\n", __func__, csio->ccb_h.target_id); 1661 xpt_done(ccb); 1662 return; 1663 } 1664 /* 1665 * If devinfo is 0 this will be a volume. In that case don't tell CAM 1666 * that the volume has timed out. We want volumes to be enumerated 1667 * until they are deleted/removed, not just failed. 1668 */ 1669 if (targ->flags & MPSSAS_TARGET_INREMOVAL) { 1670 if (targ->devinfo == 0) 1671 csio->ccb_h.status = CAM_REQ_CMP; 1672 else 1673 csio->ccb_h.status = CAM_SEL_TIMEOUT; 1674 xpt_done(ccb); 1675 return; 1676 } 1677 1678 if ((sc->mps_flags & MPS_FLAGS_SHUTDOWN) != 0) { 1679 mps_dprint(sc, MPS_INFO, "%s shutting down\n", __func__); 1680 csio->ccb_h.status = CAM_TID_INVALID; 1681 xpt_done(ccb); 1682 return; 1683 } 1684 1685 cm = mps_alloc_command(sc); 1686 if (cm == NULL) { 1687 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { 1688 xpt_freeze_simq(sassc->sim, 1); 1689 sassc->flags |= MPSSAS_QUEUE_FROZEN; 1690 } 1691 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1692 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1693 xpt_done(ccb); 1694 return; 1695 } 1696 1697 req = (MPI2_SCSI_IO_REQUEST *)cm->cm_req; 1698 bzero(req, sizeof(*req)); 1699 req->DevHandle = htole16(targ->handle); 1700 req->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; 1701 req->MsgFlags = 0; 1702 req->SenseBufferLowAddress = htole32(cm->cm_sense_busaddr); 1703 req->SenseBufferLength = MPS_SENSE_LEN; 1704 req->SGLFlags = 0; 1705 req->ChainOffset = 0; 1706 req->SGLOffset0 = 24; /* 32bit word offset to the SGL */ 1707 req->SGLOffset1= 0; 1708 req->SGLOffset2= 0; 1709 req->SGLOffset3= 0; 1710 req->SkipCount = 0; 1711 req->DataLength = htole32(csio->dxfer_len); 1712 req->BidirectionalDataLength = 0; 1713 req->IoFlags = htole16(csio->cdb_len); 1714 req->EEDPFlags = 0; 1715 1716 /* Note: BiDirectional transfers are not supported */ 1717 switch (csio->ccb_h.flags & CAM_DIR_MASK) { 1718 case CAM_DIR_IN: 1719 mpi_control = MPI2_SCSIIO_CONTROL_READ; 1720 cm->cm_flags |= MPS_CM_FLAGS_DATAIN; 1721 break; 1722 case CAM_DIR_OUT: 1723 mpi_control = MPI2_SCSIIO_CONTROL_WRITE; 1724 cm->cm_flags |= MPS_CM_FLAGS_DATAOUT; 1725 break; 1726 case CAM_DIR_NONE: 1727 default: 1728 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; 1729 break; 1730 } 1731 1732 if (csio->cdb_len == 32) 1733 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; 1734 /* 1735 * It looks like the hardware doesn't require an explicit tag 1736 * number for each transaction. SAM Task Management not supported 1737 * at the moment. 1738 */ 1739 switch (csio->tag_action) { 1740 case MSG_HEAD_OF_Q_TAG: 1741 mpi_control |= MPI2_SCSIIO_CONTROL_HEADOFQ; 1742 break; 1743 case MSG_ORDERED_Q_TAG: 1744 mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; 1745 break; 1746 case MSG_ACA_TASK: 1747 mpi_control |= MPI2_SCSIIO_CONTROL_ACAQ; 1748 break; 1749 case CAM_TAG_ACTION_NONE: 1750 case MSG_SIMPLE_Q_TAG: 1751 default: 1752 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; 1753 break; 1754 } 1755 mpi_control |= sc->mapping_table[csio->ccb_h.target_id].TLR_bits; 1756 req->Control = htole32(mpi_control); 1757 if (MPS_SET_LUN(req->LUN, csio->ccb_h.target_lun) != 0) { 1758 mps_free_command(sc, cm); 1759 ccb->ccb_h.status = CAM_LUN_INVALID; 1760 xpt_done(ccb); 1761 return; 1762 } 1763 1764 if (csio->ccb_h.flags & CAM_CDB_POINTER) 1765 bcopy(csio->cdb_io.cdb_ptr, &req->CDB.CDB32[0], csio->cdb_len); 1766 else 1767 bcopy(csio->cdb_io.cdb_bytes, &req->CDB.CDB32[0],csio->cdb_len); 1768 req->IoFlags = htole16(csio->cdb_len); 1769 1770 /* 1771 * Check if EEDP is supported and enabled. If it is then check if the 1772 * SCSI opcode could be using EEDP. If so, make sure the LUN exists and 1773 * is formatted for EEDP support. If all of this is true, set CDB up 1774 * for EEDP transfer. 1775 */ 1776 eedp_flags = op_code_prot[req->CDB.CDB32[0]]; 1777 if (sc->eedp_enabled && eedp_flags) { 1778 SLIST_FOREACH(lun, &targ->luns, lun_link) { 1779 if (lun->lun_id == csio->ccb_h.target_lun) { 1780 break; 1781 } 1782 } 1783 1784 if ((lun != NULL) && (lun->eedp_formatted)) { 1785 req->EEDPBlockSize = htole16(lun->eedp_block_size); 1786 eedp_flags |= (MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | 1787 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | 1788 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); 1789 req->EEDPFlags = htole16(eedp_flags); 1790 1791 /* 1792 * If CDB less than 32, fill in Primary Ref Tag with 1793 * low 4 bytes of LBA. If CDB is 32, tag stuff is 1794 * already there. Also, set protection bit. FreeBSD 1795 * currently does not support CDBs bigger than 16, but 1796 * the code doesn't hurt, and will be here for the 1797 * future. 1798 */ 1799 if (csio->cdb_len != 32) { 1800 lba_byte = (csio->cdb_len == 16) ? 6 : 2; 1801 ref_tag_addr = (uint8_t *)&req->CDB.EEDP32. 1802 PrimaryReferenceTag; 1803 for (i = 0; i < 4; i++) { 1804 *ref_tag_addr = 1805 req->CDB.CDB32[lba_byte + i]; 1806 ref_tag_addr++; 1807 } 1808 req->CDB.EEDP32.PrimaryReferenceTag = 1809 htole32(req->CDB.EEDP32.PrimaryReferenceTag); 1810 req->CDB.EEDP32.PrimaryApplicationTagMask = 1811 0xFFFF; 1812 req->CDB.CDB32[1] = (req->CDB.CDB32[1] & 0x1F) | 1813 0x20; 1814 } else { 1815 eedp_flags |= 1816 MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG; 1817 req->EEDPFlags = htole16(eedp_flags); 1818 req->CDB.CDB32[10] = (req->CDB.CDB32[10] & 1819 0x1F) | 0x20; 1820 } 1821 } 1822 } 1823 1824 cm->cm_length = csio->dxfer_len; 1825 if (cm->cm_length != 0) { 1826 cm->cm_data = ccb; 1827 cm->cm_flags |= MPS_CM_FLAGS_USE_CCB; 1828 } else { 1829 cm->cm_data = NULL; 1830 } 1831 cm->cm_sge = &req->SGL; 1832 cm->cm_sglsize = (32 - 24) * 4; 1833 cm->cm_desc.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; 1834 cm->cm_desc.SCSIIO.DevHandle = htole16(targ->handle); 1835 cm->cm_complete = mpssas_scsiio_complete; 1836 cm->cm_complete_data = ccb; 1837 cm->cm_targ = targ; 1838 cm->cm_lun = csio->ccb_h.target_lun; 1839 cm->cm_ccb = ccb; 1840 1841 /* 1842 * If HBA is a WD and the command is not for a retry, try to build a 1843 * direct I/O message. If failed, or the command is for a retry, send 1844 * the I/O to the IR volume itself. 1845 */ 1846 if (sc->WD_valid_config) { 1847 if (ccb->ccb_h.status != MPS_WD_RETRY) { 1848 mpssas_direct_drive_io(sassc, cm, ccb); 1849 } else { 1850 ccb->ccb_h.status = CAM_REQ_INPROG; 1851 } 1852 } 1853 1854 callout_reset(&cm->cm_callout, (ccb->ccb_h.timeout * hz) / 1000, 1855 mpssas_scsiio_timeout, cm); 1856 1857 targ->issued++; 1858 targ->outstanding++; 1859 TAILQ_INSERT_TAIL(&targ->commands, cm, cm_link); 1860 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1861 1862 mpssas_log_command(cm, MPS_XINFO, "%s cm %p ccb %p outstanding %u\n", 1863 __func__, cm, ccb, targ->outstanding); 1864 1865 mps_map_command(sc, cm); 1866 return; 1867} 1868 1869static void 1870mps_response_code(struct mps_softc *sc, u8 response_code) 1871{ 1872 char *desc; 1873 1874 switch (response_code) { 1875 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: 1876 desc = "task management request completed"; 1877 break; 1878 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: 1879 desc = "invalid frame"; 1880 break; 1881 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: 1882 desc = "task management request not supported"; 1883 break; 1884 case MPI2_SCSITASKMGMT_RSP_TM_FAILED: 1885 desc = "task management request failed"; 1886 break; 1887 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: 1888 desc = "task management request succeeded"; 1889 break; 1890 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: 1891 desc = "invalid lun"; 1892 break; 1893 case 0xA: 1894 desc = "overlapped tag attempted"; 1895 break; 1896 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: 1897 desc = "task queued, however not sent to target"; 1898 break; 1899 default: 1900 desc = "unknown"; 1901 break; 1902 } 1903 mps_dprint(sc, MPS_XINFO, "response_code(0x%01x): %s\n", 1904 response_code, desc); 1905} 1906/** 1907 * mps_sc_failed_io_info - translated non-succesfull SCSI_IO request 1908 */ 1909static void 1910mps_sc_failed_io_info(struct mps_softc *sc, struct ccb_scsiio *csio, 1911 Mpi2SCSIIOReply_t *mpi_reply) 1912{ 1913 u32 response_info; 1914 u8 *response_bytes; 1915 u16 ioc_status = le16toh(mpi_reply->IOCStatus) & 1916 MPI2_IOCSTATUS_MASK; 1917 u8 scsi_state = mpi_reply->SCSIState; 1918 u8 scsi_status = mpi_reply->SCSIStatus; 1919 char *desc_ioc_state = NULL; 1920 char *desc_scsi_status = NULL; 1921 char *desc_scsi_state = sc->tmp_string; 1922 u32 log_info = le32toh(mpi_reply->IOCLogInfo); 1923 1924 if (log_info == 0x31170000) 1925 return; 1926 1927 switch (ioc_status) { 1928 case MPI2_IOCSTATUS_SUCCESS: 1929 desc_ioc_state = "success"; 1930 break; 1931 case MPI2_IOCSTATUS_INVALID_FUNCTION: 1932 desc_ioc_state = "invalid function"; 1933 break; 1934 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 1935 desc_ioc_state = "scsi recovered error"; 1936 break; 1937 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 1938 desc_ioc_state = "scsi invalid dev handle"; 1939 break; 1940 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 1941 desc_ioc_state = "scsi device not there"; 1942 break; 1943 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 1944 desc_ioc_state = "scsi data overrun"; 1945 break; 1946 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 1947 desc_ioc_state = "scsi data underrun"; 1948 break; 1949 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 1950 desc_ioc_state = "scsi io data error"; 1951 break; 1952 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 1953 desc_ioc_state = "scsi protocol error"; 1954 break; 1955 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 1956 desc_ioc_state = "scsi task terminated"; 1957 break; 1958 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 1959 desc_ioc_state = "scsi residual mismatch"; 1960 break; 1961 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 1962 desc_ioc_state = "scsi task mgmt failed"; 1963 break; 1964 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 1965 desc_ioc_state = "scsi ioc terminated"; 1966 break; 1967 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 1968 desc_ioc_state = "scsi ext terminated"; 1969 break; 1970 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: 1971 desc_ioc_state = "eedp guard error"; 1972 break; 1973 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: 1974 desc_ioc_state = "eedp ref tag error"; 1975 break; 1976 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: 1977 desc_ioc_state = "eedp app tag error"; 1978 break; 1979 default: 1980 desc_ioc_state = "unknown"; 1981 break; 1982 } 1983 1984 switch (scsi_status) { 1985 case MPI2_SCSI_STATUS_GOOD: 1986 desc_scsi_status = "good"; 1987 break; 1988 case MPI2_SCSI_STATUS_CHECK_CONDITION: 1989 desc_scsi_status = "check condition"; 1990 break; 1991 case MPI2_SCSI_STATUS_CONDITION_MET: 1992 desc_scsi_status = "condition met"; 1993 break; 1994 case MPI2_SCSI_STATUS_BUSY: 1995 desc_scsi_status = "busy"; 1996 break; 1997 case MPI2_SCSI_STATUS_INTERMEDIATE: 1998 desc_scsi_status = "intermediate"; 1999 break; 2000 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: 2001 desc_scsi_status = "intermediate condmet"; 2002 break; 2003 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: 2004 desc_scsi_status = "reservation conflict"; 2005 break; 2006 case MPI2_SCSI_STATUS_COMMAND_TERMINATED: 2007 desc_scsi_status = "command terminated"; 2008 break; 2009 case MPI2_SCSI_STATUS_TASK_SET_FULL: 2010 desc_scsi_status = "task set full"; 2011 break; 2012 case MPI2_SCSI_STATUS_ACA_ACTIVE: 2013 desc_scsi_status = "aca active"; 2014 break; 2015 case MPI2_SCSI_STATUS_TASK_ABORTED: 2016 desc_scsi_status = "task aborted"; 2017 break; 2018 default: 2019 desc_scsi_status = "unknown"; 2020 break; 2021 } 2022 2023 desc_scsi_state[0] = '\0'; 2024 if (!scsi_state) 2025 desc_scsi_state = " "; 2026 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) 2027 strcat(desc_scsi_state, "response info "); 2028 if (scsi_state & MPI2_SCSI_STATE_TERMINATED) 2029 strcat(desc_scsi_state, "state terminated "); 2030 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) 2031 strcat(desc_scsi_state, "no status "); 2032 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) 2033 strcat(desc_scsi_state, "autosense failed "); 2034 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) 2035 strcat(desc_scsi_state, "autosense valid "); 2036 2037 mps_dprint(sc, MPS_XINFO, "\thandle(0x%04x), ioc_status(%s)(0x%04x)\n", 2038 le16toh(mpi_reply->DevHandle), desc_ioc_state, ioc_status); 2039 /* We can add more detail about underflow data here 2040 * TO-DO 2041 * */ 2042 mps_dprint(sc, MPS_XINFO, "\tscsi_status(%s)(0x%02x), " 2043 "scsi_state(%s)(0x%02x)\n", desc_scsi_status, scsi_status, 2044 desc_scsi_state, scsi_state); 2045 2046 if (sc->mps_debug & MPS_XINFO && 2047 scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2048 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : Start :\n"); 2049 scsi_sense_print(csio); 2050 mps_dprint(sc, MPS_XINFO, "-> Sense Buffer Data : End :\n"); 2051 } 2052 2053 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { 2054 response_info = le32toh(mpi_reply->ResponseInfo); 2055 response_bytes = (u8 *)&response_info; 2056 mps_response_code(sc,response_bytes[0]); 2057 } 2058} 2059 2060static void 2061mpssas_scsiio_complete(struct mps_softc *sc, struct mps_command *cm) 2062{ 2063 MPI2_SCSI_IO_REPLY *rep; 2064 union ccb *ccb; 2065 struct ccb_scsiio *csio; 2066 struct mpssas_softc *sassc; 2067 struct scsi_vpd_supported_page_list *vpd_list = NULL; 2068 u8 *TLR_bits, TLR_on; 2069 int dir = 0, i; 2070 u16 alloc_len; 2071 2072 MPS_FUNCTRACE(sc); 2073 mps_dprint(sc, MPS_TRACE, 2074 "cm %p SMID %u ccb %p reply %p outstanding %u\n", cm, 2075 cm->cm_desc.Default.SMID, cm->cm_ccb, cm->cm_reply, 2076 cm->cm_targ->outstanding); 2077 2078 callout_stop(&cm->cm_callout); 2079 mtx_assert(&sc->mps_mtx, MA_OWNED); 2080 2081 sassc = sc->sassc; 2082 ccb = cm->cm_complete_data; 2083 csio = &ccb->csio; 2084 rep = (MPI2_SCSI_IO_REPLY *)cm->cm_reply; 2085 /* 2086 * XXX KDM if the chain allocation fails, does it matter if we do 2087 * the sync and unload here? It is simpler to do it in every case, 2088 * assuming it doesn't cause problems. 2089 */ 2090 if (cm->cm_data != NULL) { 2091 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) 2092 dir = BUS_DMASYNC_POSTREAD; 2093 else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) 2094 dir = BUS_DMASYNC_POSTWRITE; 2095 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2096 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2097 } 2098 2099 cm->cm_targ->completed++; 2100 cm->cm_targ->outstanding--; 2101 TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); 2102 ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); 2103 2104 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) { 2105 TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); 2106 if (cm->cm_reply != NULL) 2107 mpssas_log_command(cm, MPS_RECOVERY, 2108 "completed timedout cm %p ccb %p during recovery " 2109 "ioc %x scsi %x state %x xfer %u\n", 2110 cm, cm->cm_ccb, 2111 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2112 le32toh(rep->TransferCount)); 2113 else 2114 mpssas_log_command(cm, MPS_RECOVERY, 2115 "completed timedout cm %p ccb %p during recovery\n", 2116 cm, cm->cm_ccb); 2117 } else if (cm->cm_targ->tm != NULL) { 2118 if (cm->cm_reply != NULL) 2119 mpssas_log_command(cm, MPS_RECOVERY, 2120 "completed cm %p ccb %p during recovery " 2121 "ioc %x scsi %x state %x xfer %u\n", 2122 cm, cm->cm_ccb, 2123 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2124 le32toh(rep->TransferCount)); 2125 else 2126 mpssas_log_command(cm, MPS_RECOVERY, 2127 "completed cm %p ccb %p during recovery\n", 2128 cm, cm->cm_ccb); 2129 } else if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) { 2130 mpssas_log_command(cm, MPS_RECOVERY, 2131 "reset completed cm %p ccb %p\n", 2132 cm, cm->cm_ccb); 2133 } 2134 2135 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 2136 /* 2137 * We ran into an error after we tried to map the command, 2138 * so we're getting a callback without queueing the command 2139 * to the hardware. So we set the status here, and it will 2140 * be retained below. We'll go through the "fast path", 2141 * because there can be no reply when we haven't actually 2142 * gone out to the hardware. 2143 */ 2144 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2145 2146 /* 2147 * Currently the only error included in the mask is 2148 * MPS_CM_FLAGS_CHAIN_FAILED, which means we're out of 2149 * chain frames. We need to freeze the queue until we get 2150 * a command that completed without this error, which will 2151 * hopefully have some chain frames attached that we can 2152 * use. If we wanted to get smarter about it, we would 2153 * only unfreeze the queue in this condition when we're 2154 * sure that we're getting some chain frames back. That's 2155 * probably unnecessary. 2156 */ 2157 if ((sassc->flags & MPSSAS_QUEUE_FROZEN) == 0) { 2158 xpt_freeze_simq(sassc->sim, 1); 2159 sassc->flags |= MPSSAS_QUEUE_FROZEN; 2160 mps_dprint(sc, MPS_XINFO, "Error sending command, " 2161 "freezing SIM queue\n"); 2162 } 2163 } 2164 2165 /* Take the fast path to completion */ 2166 if (cm->cm_reply == NULL) { 2167 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 2168 if ((sc->mps_flags & MPS_FLAGS_DIAGRESET) != 0) 2169 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2170 else { 2171 ccb->ccb_h.status = CAM_REQ_CMP; 2172 ccb->csio.scsi_status = SCSI_STATUS_OK; 2173 } 2174 if (sassc->flags & MPSSAS_QUEUE_FROZEN) { 2175 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2176 sassc->flags &= ~MPSSAS_QUEUE_FROZEN; 2177 mps_dprint(sc, MPS_XINFO, 2178 "Unfreezing SIM queue\n"); 2179 } 2180 } 2181 2182 /* 2183 * There are two scenarios where the status won't be 2184 * CAM_REQ_CMP. The first is if MPS_CM_FLAGS_ERROR_MASK is 2185 * set, the second is in the MPS_FLAGS_DIAGRESET above. 2186 */ 2187 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2188 /* 2189 * Freeze the dev queue so that commands are 2190 * executed in the correct order with after error 2191 * recovery. 2192 */ 2193 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2194 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2195 } 2196 mps_free_command(sc, cm); 2197 xpt_done(ccb); 2198 return; 2199 } 2200 2201 mpssas_log_command(cm, MPS_XINFO, 2202 "ioc %x scsi %x state %x xfer %u\n", 2203 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2204 le32toh(rep->TransferCount)); 2205 2206 /* 2207 * If this is a Direct Drive I/O, reissue the I/O to the original IR 2208 * Volume if an error occurred (normal I/O retry). Use the original 2209 * CCB, but set a flag that this will be a retry so that it's sent to 2210 * the original volume. Free the command but reuse the CCB. 2211 */ 2212 if (cm->cm_flags & MPS_CM_FLAGS_DD_IO) { 2213 mps_free_command(sc, cm); 2214 ccb->ccb_h.status = MPS_WD_RETRY; 2215 mpssas_action_scsiio(sassc, ccb); 2216 return; 2217 } 2218 2219 switch (le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) { 2220 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: 2221 csio->resid = cm->cm_length - le32toh(rep->TransferCount); 2222 /* FALLTHROUGH */ 2223 case MPI2_IOCSTATUS_SUCCESS: 2224 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: 2225 2226 if ((le16toh(rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == 2227 MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR) 2228 mpssas_log_command(cm, MPS_XINFO, "recovered error\n"); 2229 2230 /* Completion failed at the transport level. */ 2231 if (rep->SCSIState & (MPI2_SCSI_STATE_NO_SCSI_STATUS | 2232 MPI2_SCSI_STATE_TERMINATED)) { 2233 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2234 break; 2235 } 2236 2237 /* In a modern packetized environment, an autosense failure 2238 * implies that there's not much else that can be done to 2239 * recover the command. 2240 */ 2241 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_FAILED) { 2242 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 2243 break; 2244 } 2245 2246 /* 2247 * CAM doesn't care about SAS Response Info data, but if this is 2248 * the state check if TLR should be done. If not, clear the 2249 * TLR_bits for the target. 2250 */ 2251 if ((rep->SCSIState & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) && 2252 ((le32toh(rep->ResponseInfo) & MPI2_SCSI_RI_MASK_REASONCODE) == 2253 MPS_SCSI_RI_INVALID_FRAME)) { 2254 sc->mapping_table[csio->ccb_h.target_id].TLR_bits = 2255 (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2256 } 2257 2258 /* 2259 * Intentionally override the normal SCSI status reporting 2260 * for these two cases. These are likely to happen in a 2261 * multi-initiator environment, and we want to make sure that 2262 * CAM retries these commands rather than fail them. 2263 */ 2264 if ((rep->SCSIStatus == MPI2_SCSI_STATUS_COMMAND_TERMINATED) || 2265 (rep->SCSIStatus == MPI2_SCSI_STATUS_TASK_ABORTED)) { 2266 ccb->ccb_h.status = CAM_REQ_ABORTED; 2267 break; 2268 } 2269 2270 /* Handle normal status and sense */ 2271 csio->scsi_status = rep->SCSIStatus; 2272 if (rep->SCSIStatus == MPI2_SCSI_STATUS_GOOD) 2273 ccb->ccb_h.status = CAM_REQ_CMP; 2274 else 2275 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 2276 2277 if (rep->SCSIState & MPI2_SCSI_STATE_AUTOSENSE_VALID) { 2278 int sense_len, returned_sense_len; 2279 2280 returned_sense_len = min(le32toh(rep->SenseCount), 2281 sizeof(struct scsi_sense_data)); 2282 if (returned_sense_len < ccb->csio.sense_len) 2283 ccb->csio.sense_resid = ccb->csio.sense_len - 2284 returned_sense_len; 2285 else 2286 ccb->csio.sense_resid = 0; 2287 2288 sense_len = min(returned_sense_len, 2289 ccb->csio.sense_len - ccb->csio.sense_resid); 2290 bzero(&ccb->csio.sense_data, 2291 sizeof(ccb->csio.sense_data)); 2292 bcopy(cm->cm_sense, &ccb->csio.sense_data, sense_len); 2293 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 2294 } 2295 2296 /* 2297 * Check if this is an INQUIRY command. If it's a VPD inquiry, 2298 * and it's page code 0 (Supported Page List), and there is 2299 * inquiry data, and this is for a sequential access device, and 2300 * the device is an SSP target, and TLR is supported by the 2301 * controller, turn the TLR_bits value ON if page 0x90 is 2302 * supported. 2303 */ 2304 if ((csio->cdb_io.cdb_bytes[0] == INQUIRY) && 2305 (csio->cdb_io.cdb_bytes[1] & SI_EVPD) && 2306 (csio->cdb_io.cdb_bytes[2] == SVPD_SUPPORTED_PAGE_LIST) && 2307 ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) && 2308 (csio->data_ptr != NULL) && 2309 ((csio->data_ptr[0] & 0x1f) == T_SEQUENTIAL) && 2310 (sc->control_TLR) && 2311 (sc->mapping_table[csio->ccb_h.target_id].device_info & 2312 MPI2_SAS_DEVICE_INFO_SSP_TARGET)) { 2313 vpd_list = (struct scsi_vpd_supported_page_list *) 2314 csio->data_ptr; 2315 TLR_bits = &sc->mapping_table[csio->ccb_h.target_id]. 2316 TLR_bits; 2317 *TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; 2318 TLR_on = (u8)MPI2_SCSIIO_CONTROL_TLR_ON; 2319 alloc_len = ((u16)csio->cdb_io.cdb_bytes[3] << 8) + 2320 csio->cdb_io.cdb_bytes[4]; 2321 alloc_len -= csio->resid; 2322 for (i = 0; i < MIN(vpd_list->length, alloc_len); i++) { 2323 if (vpd_list->list[i] == 0x90) { 2324 *TLR_bits = TLR_on; 2325 break; 2326 } 2327 } 2328 } 2329 break; 2330 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: 2331 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 2332 /* 2333 * If devinfo is 0 this will be a volume. In that case don't 2334 * tell CAM that the volume is not there. We want volumes to 2335 * be enumerated until they are deleted/removed, not just 2336 * failed. 2337 */ 2338 if (cm->cm_targ->devinfo == 0) 2339 ccb->ccb_h.status = CAM_REQ_CMP; 2340 else 2341 ccb->ccb_h.status = CAM_DEV_NOT_THERE; 2342 break; 2343 case MPI2_IOCSTATUS_INVALID_SGL: 2344 mps_print_scsiio_cmd(sc, cm); 2345 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 2346 break; 2347 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: 2348 /* 2349 * This is one of the responses that comes back when an I/O 2350 * has been aborted. If it is because of a timeout that we 2351 * initiated, just set the status to CAM_CMD_TIMEOUT. 2352 * Otherwise set it to CAM_REQ_ABORTED. The effect on the 2353 * command is the same (it gets retried, subject to the 2354 * retry counter), the only difference is what gets printed 2355 * on the console. 2356 */ 2357 if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) 2358 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 2359 else 2360 ccb->ccb_h.status = CAM_REQ_ABORTED; 2361 break; 2362 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: 2363 /* resid is ignored for this condition */ 2364 csio->resid = 0; 2365 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 2366 break; 2367 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: 2368 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: 2369 /* 2370 * Since these are generally external (i.e. hopefully 2371 * transient transport-related) errors, retry these without 2372 * decrementing the retry count. 2373 */ 2374 ccb->ccb_h.status = CAM_REQUEUE_REQ; 2375 mpssas_log_command(cm, MPS_INFO, 2376 "terminated ioc %x scsi %x state %x xfer %u\n", 2377 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2378 le32toh(rep->TransferCount)); 2379 break; 2380 case MPI2_IOCSTATUS_INVALID_FUNCTION: 2381 case MPI2_IOCSTATUS_INTERNAL_ERROR: 2382 case MPI2_IOCSTATUS_INVALID_VPID: 2383 case MPI2_IOCSTATUS_INVALID_FIELD: 2384 case MPI2_IOCSTATUS_INVALID_STATE: 2385 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: 2386 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: 2387 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: 2388 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 2389 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 2390 default: 2391 mpssas_log_command(cm, MPS_XINFO, 2392 "completed ioc %x scsi %x state %x xfer %u\n", 2393 le16toh(rep->IOCStatus), rep->SCSIStatus, rep->SCSIState, 2394 le32toh(rep->TransferCount)); 2395 csio->resid = cm->cm_length; 2396 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2397 break; 2398 } 2399 2400 mps_sc_failed_io_info(sc,csio,rep); 2401 2402 if (sassc->flags & MPSSAS_QUEUE_FROZEN) { 2403 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2404 sassc->flags &= ~MPSSAS_QUEUE_FROZEN; 2405 mps_dprint(sc, MPS_XINFO, "Command completed, " 2406 "unfreezing SIM queue\n"); 2407 } 2408 2409 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 2410 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2411 xpt_freeze_devq(ccb->ccb_h.path, /*count*/ 1); 2412 } 2413 2414 mps_free_command(sc, cm); 2415 xpt_done(ccb); 2416} 2417 2418/* All Request reached here are Endian safe */ 2419static void 2420mpssas_direct_drive_io(struct mpssas_softc *sassc, struct mps_command *cm, 2421 union ccb *ccb) { 2422 pMpi2SCSIIORequest_t pIO_req; 2423 struct mps_softc *sc = sassc->sc; 2424 uint64_t virtLBA; 2425 uint32_t physLBA, stripe_offset, stripe_unit; 2426 uint32_t io_size, column; 2427 uint8_t *ptrLBA, lba_idx, physLBA_byte, *CDB; 2428 2429 /* 2430 * If this is a valid SCSI command (Read6, Read10, Read16, Write6, 2431 * Write10, or Write16), build a direct I/O message. Otherwise, the I/O 2432 * will be sent to the IR volume itself. Since Read6 and Write6 are a 2433 * bit different than the 10/16 CDBs, handle them separately. 2434 */ 2435 pIO_req = (pMpi2SCSIIORequest_t)cm->cm_req; 2436 CDB = pIO_req->CDB.CDB32; 2437 2438 /* 2439 * Handle 6 byte CDBs. 2440 */ 2441 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_6) || 2442 (CDB[0] == WRITE_6))) { 2443 /* 2444 * Get the transfer size in blocks. 2445 */ 2446 io_size = (cm->cm_length >> sc->DD_block_exponent); 2447 2448 /* 2449 * Get virtual LBA given in the CDB. 2450 */ 2451 virtLBA = ((uint64_t)(CDB[1] & 0x1F) << 16) | 2452 ((uint64_t)CDB[2] << 8) | (uint64_t)CDB[3]; 2453 2454 /* 2455 * Check that LBA range for I/O does not exceed volume's 2456 * MaxLBA. 2457 */ 2458 if ((virtLBA + (uint64_t)io_size - 1) <= 2459 sc->DD_max_lba) { 2460 /* 2461 * Check if the I/O crosses a stripe boundary. If not, 2462 * translate the virtual LBA to a physical LBA and set 2463 * the DevHandle for the PhysDisk to be used. If it 2464 * does cross a boundry, do normal I/O. To get the 2465 * right DevHandle to use, get the map number for the 2466 * column, then use that map number to look up the 2467 * DevHandle of the PhysDisk. 2468 */ 2469 stripe_offset = (uint32_t)virtLBA & 2470 (sc->DD_stripe_size - 1); 2471 if ((stripe_offset + io_size) <= sc->DD_stripe_size) { 2472 physLBA = (uint32_t)virtLBA >> 2473 sc->DD_stripe_exponent; 2474 stripe_unit = physLBA / sc->DD_num_phys_disks; 2475 column = physLBA % sc->DD_num_phys_disks; 2476 pIO_req->DevHandle = 2477 htole16(sc->DD_column_map[column].dev_handle); 2478 /* ???? Is this endian safe*/ 2479 cm->cm_desc.SCSIIO.DevHandle = 2480 pIO_req->DevHandle; 2481 2482 physLBA = (stripe_unit << 2483 sc->DD_stripe_exponent) + stripe_offset; 2484 ptrLBA = &pIO_req->CDB.CDB32[1]; 2485 physLBA_byte = (uint8_t)(physLBA >> 16); 2486 *ptrLBA = physLBA_byte; 2487 ptrLBA = &pIO_req->CDB.CDB32[2]; 2488 physLBA_byte = (uint8_t)(physLBA >> 8); 2489 *ptrLBA = physLBA_byte; 2490 ptrLBA = &pIO_req->CDB.CDB32[3]; 2491 physLBA_byte = (uint8_t)physLBA; 2492 *ptrLBA = physLBA_byte; 2493 2494 /* 2495 * Set flag that Direct Drive I/O is 2496 * being done. 2497 */ 2498 cm->cm_flags |= MPS_CM_FLAGS_DD_IO; 2499 } 2500 } 2501 return; 2502 } 2503 2504 /* 2505 * Handle 10, 12 or 16 byte CDBs. 2506 */ 2507 if ((pIO_req->DevHandle == sc->DD_dev_handle) && ((CDB[0] == READ_10) || 2508 (CDB[0] == WRITE_10) || (CDB[0] == READ_16) || 2509 (CDB[0] == WRITE_16) || (CDB[0] == READ_12) || 2510 (CDB[0] == WRITE_12))) { 2511 /* 2512 * For 16-byte CDB's, verify that the upper 4 bytes of the CDB 2513 * are 0. If not, this is accessing beyond 2TB so handle it in 2514 * the else section. 10-byte and 12-byte CDB's are OK. 2515 * FreeBSD sends very rare 12 byte READ/WRITE, but driver is 2516 * ready to accept 12byte CDB for Direct IOs. 2517 */ 2518 if ((CDB[0] == READ_10 || CDB[0] == WRITE_10) || 2519 (CDB[0] == READ_12 || CDB[0] == WRITE_12) || 2520 !(CDB[2] | CDB[3] | CDB[4] | CDB[5])) { 2521 /* 2522 * Get the transfer size in blocks. 2523 */ 2524 io_size = (cm->cm_length >> sc->DD_block_exponent); 2525 2526 /* 2527 * Get virtual LBA. Point to correct lower 4 bytes of 2528 * LBA in the CDB depending on command. 2529 */ 2530 lba_idx = ((CDB[0] == READ_12) || 2531 (CDB[0] == WRITE_12) || 2532 (CDB[0] == READ_10) || 2533 (CDB[0] == WRITE_10))? 2 : 6; 2534 virtLBA = ((uint64_t)CDB[lba_idx] << 24) | 2535 ((uint64_t)CDB[lba_idx + 1] << 16) | 2536 ((uint64_t)CDB[lba_idx + 2] << 8) | 2537 (uint64_t)CDB[lba_idx + 3]; 2538 2539 /* 2540 * Check that LBA range for I/O does not exceed volume's 2541 * MaxLBA. 2542 */ 2543 if ((virtLBA + (uint64_t)io_size - 1) <= 2544 sc->DD_max_lba) { 2545 /* 2546 * Check if the I/O crosses a stripe boundary. 2547 * If not, translate the virtual LBA to a 2548 * physical LBA and set the DevHandle for the 2549 * PhysDisk to be used. If it does cross a 2550 * boundry, do normal I/O. To get the right 2551 * DevHandle to use, get the map number for the 2552 * column, then use that map number to look up 2553 * the DevHandle of the PhysDisk. 2554 */ 2555 stripe_offset = (uint32_t)virtLBA & 2556 (sc->DD_stripe_size - 1); 2557 if ((stripe_offset + io_size) <= 2558 sc->DD_stripe_size) { 2559 physLBA = (uint32_t)virtLBA >> 2560 sc->DD_stripe_exponent; 2561 stripe_unit = physLBA / 2562 sc->DD_num_phys_disks; 2563 column = physLBA % 2564 sc->DD_num_phys_disks; 2565 pIO_req->DevHandle = 2566 htole16(sc->DD_column_map[column]. 2567 dev_handle); 2568 cm->cm_desc.SCSIIO.DevHandle = 2569 pIO_req->DevHandle; 2570 2571 physLBA = (stripe_unit << 2572 sc->DD_stripe_exponent) + 2573 stripe_offset; 2574 ptrLBA = 2575 &pIO_req->CDB.CDB32[lba_idx]; 2576 physLBA_byte = (uint8_t)(physLBA >> 24); 2577 *ptrLBA = physLBA_byte; 2578 ptrLBA = 2579 &pIO_req->CDB.CDB32[lba_idx + 1]; 2580 physLBA_byte = (uint8_t)(physLBA >> 16); 2581 *ptrLBA = physLBA_byte; 2582 ptrLBA = 2583 &pIO_req->CDB.CDB32[lba_idx + 2]; 2584 physLBA_byte = (uint8_t)(physLBA >> 8); 2585 *ptrLBA = physLBA_byte; 2586 ptrLBA = 2587 &pIO_req->CDB.CDB32[lba_idx + 3]; 2588 physLBA_byte = (uint8_t)physLBA; 2589 *ptrLBA = physLBA_byte; 2590 2591 /* 2592 * Set flag that Direct Drive I/O is 2593 * being done. 2594 */ 2595 cm->cm_flags |= MPS_CM_FLAGS_DD_IO; 2596 } 2597 } 2598 } else { 2599 /* 2600 * 16-byte CDB and the upper 4 bytes of the CDB are not 2601 * 0. Get the transfer size in blocks. 2602 */ 2603 io_size = (cm->cm_length >> sc->DD_block_exponent); 2604 2605 /* 2606 * Get virtual LBA. 2607 */ 2608 virtLBA = ((uint64_t)CDB[2] << 54) | 2609 ((uint64_t)CDB[3] << 48) | 2610 ((uint64_t)CDB[4] << 40) | 2611 ((uint64_t)CDB[5] << 32) | 2612 ((uint64_t)CDB[6] << 24) | 2613 ((uint64_t)CDB[7] << 16) | 2614 ((uint64_t)CDB[8] << 8) | 2615 (uint64_t)CDB[9]; 2616 2617 /* 2618 * Check that LBA range for I/O does not exceed volume's 2619 * MaxLBA. 2620 */ 2621 if ((virtLBA + (uint64_t)io_size - 1) <= 2622 sc->DD_max_lba) { 2623 /* 2624 * Check if the I/O crosses a stripe boundary. 2625 * If not, translate the virtual LBA to a 2626 * physical LBA and set the DevHandle for the 2627 * PhysDisk to be used. If it does cross a 2628 * boundry, do normal I/O. To get the right 2629 * DevHandle to use, get the map number for the 2630 * column, then use that map number to look up 2631 * the DevHandle of the PhysDisk. 2632 */ 2633 stripe_offset = (uint32_t)virtLBA & 2634 (sc->DD_stripe_size - 1); 2635 if ((stripe_offset + io_size) <= 2636 sc->DD_stripe_size) { 2637 physLBA = (uint32_t)(virtLBA >> 2638 sc->DD_stripe_exponent); 2639 stripe_unit = physLBA / 2640 sc->DD_num_phys_disks; 2641 column = physLBA % 2642 sc->DD_num_phys_disks; 2643 pIO_req->DevHandle = 2644 htole16(sc->DD_column_map[column]. 2645 dev_handle); 2646 cm->cm_desc.SCSIIO.DevHandle = 2647 pIO_req->DevHandle; 2648 2649 physLBA = (stripe_unit << 2650 sc->DD_stripe_exponent) + 2651 stripe_offset; 2652 2653 /* 2654 * Set upper 4 bytes of LBA to 0. We 2655 * assume that the phys disks are less 2656 * than 2 TB's in size. Then, set the 2657 * lower 4 bytes. 2658 */ 2659 pIO_req->CDB.CDB32[2] = 0; 2660 pIO_req->CDB.CDB32[3] = 0; 2661 pIO_req->CDB.CDB32[4] = 0; 2662 pIO_req->CDB.CDB32[5] = 0; 2663 ptrLBA = &pIO_req->CDB.CDB32[6]; 2664 physLBA_byte = (uint8_t)(physLBA >> 24); 2665 *ptrLBA = physLBA_byte; 2666 ptrLBA = &pIO_req->CDB.CDB32[7]; 2667 physLBA_byte = (uint8_t)(physLBA >> 16); 2668 *ptrLBA = physLBA_byte; 2669 ptrLBA = &pIO_req->CDB.CDB32[8]; 2670 physLBA_byte = (uint8_t)(physLBA >> 8); 2671 *ptrLBA = physLBA_byte; 2672 ptrLBA = &pIO_req->CDB.CDB32[9]; 2673 physLBA_byte = (uint8_t)physLBA; 2674 *ptrLBA = physLBA_byte; 2675 2676 /* 2677 * Set flag that Direct Drive I/O is 2678 * being done. 2679 */ 2680 cm->cm_flags |= MPS_CM_FLAGS_DD_IO; 2681 } 2682 } 2683 } 2684 } 2685} 2686 2687#if __FreeBSD_version >= 900026 2688static void 2689mpssas_smpio_complete(struct mps_softc *sc, struct mps_command *cm) 2690{ 2691 MPI2_SMP_PASSTHROUGH_REPLY *rpl; 2692 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2693 uint64_t sasaddr; 2694 union ccb *ccb; 2695 2696 ccb = cm->cm_complete_data; 2697 2698 /* 2699 * Currently there should be no way we can hit this case. It only 2700 * happens when we have a failure to allocate chain frames, and SMP 2701 * commands require two S/G elements only. That should be handled 2702 * in the standard request size. 2703 */ 2704 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 2705 mps_dprint(sc, MPS_ERROR,"%s: cm_flags = %#x on SMP request!\n", 2706 __func__, cm->cm_flags); 2707 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2708 goto bailout; 2709 } 2710 2711 rpl = (MPI2_SMP_PASSTHROUGH_REPLY *)cm->cm_reply; 2712 if (rpl == NULL) { 2713 mps_dprint(sc, MPS_ERROR, "%s: NULL cm_reply!\n", __func__); 2714 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2715 goto bailout; 2716 } 2717 2718 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2719 sasaddr = le32toh(req->SASAddress.Low); 2720 sasaddr |= ((uint64_t)(le32toh(req->SASAddress.High))) << 32; 2721 2722 if ((le16toh(rpl->IOCStatus) & MPI2_IOCSTATUS_MASK) != 2723 MPI2_IOCSTATUS_SUCCESS || 2724 rpl->SASStatus != MPI2_SASSTATUS_SUCCESS) { 2725 mps_dprint(sc, MPS_XINFO, "%s: IOCStatus %04x SASStatus %02x\n", 2726 __func__, le16toh(rpl->IOCStatus), rpl->SASStatus); 2727 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 2728 goto bailout; 2729 } 2730 2731 mps_dprint(sc, MPS_XINFO, "%s: SMP request to SAS address " 2732 "%#jx completed successfully\n", __func__, 2733 (uintmax_t)sasaddr); 2734 2735 if (ccb->smpio.smp_response[2] == SMP_FR_ACCEPTED) 2736 ccb->ccb_h.status = CAM_REQ_CMP; 2737 else 2738 ccb->ccb_h.status = CAM_SMP_STATUS_ERROR; 2739 2740bailout: 2741 /* 2742 * We sync in both directions because we had DMAs in the S/G list 2743 * in both directions. 2744 */ 2745 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2746 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2747 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2748 mps_free_command(sc, cm); 2749 xpt_done(ccb); 2750} 2751 2752static void 2753mpssas_send_smpcmd(struct mpssas_softc *sassc, union ccb *ccb, uint64_t sasaddr) 2754{ 2755 struct mps_command *cm; 2756 uint8_t *request, *response; 2757 MPI2_SMP_PASSTHROUGH_REQUEST *req; 2758 struct mps_softc *sc; 2759 struct sglist *sg; 2760 int error; 2761 2762 sc = sassc->sc; 2763 sg = NULL; 2764 error = 0; 2765 2766 /* 2767 * XXX We don't yet support physical addresses here. 2768 */ 2769 switch ((ccb->ccb_h.flags & CAM_DATA_MASK)) { 2770 case CAM_DATA_PADDR: 2771 case CAM_DATA_SG_PADDR: 2772 mps_dprint(sc, MPS_ERROR, 2773 "%s: physical addresses not supported\n", __func__); 2774 ccb->ccb_h.status = CAM_REQ_INVALID; 2775 xpt_done(ccb); 2776 return; 2777 case CAM_DATA_SG: 2778 /* 2779 * The chip does not support more than one buffer for the 2780 * request or response. 2781 */ 2782 if ((ccb->smpio.smp_request_sglist_cnt > 1) 2783 || (ccb->smpio.smp_response_sglist_cnt > 1)) { 2784 mps_dprint(sc, MPS_ERROR, 2785 "%s: multiple request or response " 2786 "buffer segments not supported for SMP\n", 2787 __func__); 2788 ccb->ccb_h.status = CAM_REQ_INVALID; 2789 xpt_done(ccb); 2790 return; 2791 } 2792 2793 /* 2794 * The CAM_SCATTER_VALID flag was originally implemented 2795 * for the XPT_SCSI_IO CCB, which only has one data pointer. 2796 * We have two. So, just take that flag to mean that we 2797 * might have S/G lists, and look at the S/G segment count 2798 * to figure out whether that is the case for each individual 2799 * buffer. 2800 */ 2801 if (ccb->smpio.smp_request_sglist_cnt != 0) { 2802 bus_dma_segment_t *req_sg; 2803 2804 req_sg = (bus_dma_segment_t *)ccb->smpio.smp_request; 2805 request = (uint8_t *)(uintptr_t)req_sg[0].ds_addr; 2806 } else 2807 request = ccb->smpio.smp_request; 2808 2809 if (ccb->smpio.smp_response_sglist_cnt != 0) { 2810 bus_dma_segment_t *rsp_sg; 2811 2812 rsp_sg = (bus_dma_segment_t *)ccb->smpio.smp_response; 2813 response = (uint8_t *)(uintptr_t)rsp_sg[0].ds_addr; 2814 } else 2815 response = ccb->smpio.smp_response; 2816 break; 2817 case CAM_DATA_VADDR: 2818 request = ccb->smpio.smp_request; 2819 response = ccb->smpio.smp_response; 2820 break; 2821 default: 2822 ccb->ccb_h.status = CAM_REQ_INVALID; 2823 xpt_done(ccb); 2824 return; 2825 } 2826 2827 cm = mps_alloc_command(sc); 2828 if (cm == NULL) { 2829 mps_dprint(sc, MPS_ERROR, 2830 "%s: cannot allocate command\n", __func__); 2831 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 2832 xpt_done(ccb); 2833 return; 2834 } 2835 2836 req = (MPI2_SMP_PASSTHROUGH_REQUEST *)cm->cm_req; 2837 bzero(req, sizeof(*req)); 2838 req->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; 2839 2840 /* Allow the chip to use any route to this SAS address. */ 2841 req->PhysicalPort = 0xff; 2842 2843 req->RequestDataLength = htole16(ccb->smpio.smp_request_len); 2844 req->SGLFlags = 2845 MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE | MPI2_SGLFLAGS_SGL_TYPE_MPI; 2846 2847 mps_dprint(sc, MPS_XINFO, "%s: sending SMP request to SAS " 2848 "address %#jx\n", __func__, (uintmax_t)sasaddr); 2849 2850 mpi_init_sge(cm, req, &req->SGL); 2851 2852 /* 2853 * Set up a uio to pass into mps_map_command(). This allows us to 2854 * do one map command, and one busdma call in there. 2855 */ 2856 cm->cm_uio.uio_iov = cm->cm_iovec; 2857 cm->cm_uio.uio_iovcnt = 2; 2858 cm->cm_uio.uio_segflg = UIO_SYSSPACE; 2859 2860 /* 2861 * The read/write flag isn't used by busdma, but set it just in 2862 * case. This isn't exactly accurate, either, since we're going in 2863 * both directions. 2864 */ 2865 cm->cm_uio.uio_rw = UIO_WRITE; 2866 2867 cm->cm_iovec[0].iov_base = request; 2868 cm->cm_iovec[0].iov_len = le16toh(req->RequestDataLength); 2869 cm->cm_iovec[1].iov_base = response; 2870 cm->cm_iovec[1].iov_len = ccb->smpio.smp_response_len; 2871 2872 cm->cm_uio.uio_resid = cm->cm_iovec[0].iov_len + 2873 cm->cm_iovec[1].iov_len; 2874 2875 /* 2876 * Trigger a warning message in mps_data_cb() for the user if we 2877 * wind up exceeding two S/G segments. The chip expects one 2878 * segment for the request and another for the response. 2879 */ 2880 cm->cm_max_segs = 2; 2881 2882 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2883 cm->cm_complete = mpssas_smpio_complete; 2884 cm->cm_complete_data = ccb; 2885 2886 /* 2887 * Tell the mapping code that we're using a uio, and that this is 2888 * an SMP passthrough request. There is a little special-case 2889 * logic there (in mps_data_cb()) to handle the bidirectional 2890 * transfer. 2891 */ 2892 cm->cm_flags |= MPS_CM_FLAGS_USE_UIO | MPS_CM_FLAGS_SMP_PASS | 2893 MPS_CM_FLAGS_DATAIN | MPS_CM_FLAGS_DATAOUT; 2894 2895 /* The chip data format is little endian. */ 2896 req->SASAddress.High = htole32(sasaddr >> 32); 2897 req->SASAddress.Low = htole32(sasaddr); 2898 2899 /* 2900 * XXX Note that we don't have a timeout/abort mechanism here. 2901 * From the manual, it looks like task management requests only 2902 * work for SCSI IO and SATA passthrough requests. We may need to 2903 * have a mechanism to retry requests in the event of a chip reset 2904 * at least. Hopefully the chip will insure that any errors short 2905 * of that are relayed back to the driver. 2906 */ 2907 error = mps_map_command(sc, cm); 2908 if ((error != 0) && (error != EINPROGRESS)) { 2909 mps_dprint(sc, MPS_ERROR, 2910 "%s: error %d returned from mps_map_command()\n", 2911 __func__, error); 2912 goto bailout_error; 2913 } 2914 2915 return; 2916 2917bailout_error: 2918 mps_free_command(sc, cm); 2919 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 2920 xpt_done(ccb); 2921 return; 2922 2923} 2924 2925static void 2926mpssas_action_smpio(struct mpssas_softc *sassc, union ccb *ccb) 2927{ 2928 struct mps_softc *sc; 2929 struct mpssas_target *targ; 2930 uint64_t sasaddr = 0; 2931 2932 sc = sassc->sc; 2933 2934 /* 2935 * Make sure the target exists. 2936 */ 2937 targ = &sassc->targets[ccb->ccb_h.target_id]; 2938 if (targ->handle == 0x0) { 2939 mps_dprint(sc, MPS_ERROR, 2940 "%s: target %d does not exist!\n", __func__, 2941 ccb->ccb_h.target_id); 2942 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2943 xpt_done(ccb); 2944 return; 2945 } 2946 2947 /* 2948 * If this device has an embedded SMP target, we'll talk to it 2949 * directly. 2950 * figure out what the expander's address is. 2951 */ 2952 if ((targ->devinfo & MPI2_SAS_DEVICE_INFO_SMP_TARGET) != 0) 2953 sasaddr = targ->sasaddr; 2954 2955 /* 2956 * If we don't have a SAS address for the expander yet, try 2957 * grabbing it from the page 0x83 information cached in the 2958 * transport layer for this target. LSI expanders report the 2959 * expander SAS address as the port-associated SAS address in 2960 * Inquiry VPD page 0x83. Maxim expanders don't report it in page 2961 * 0x83. 2962 * 2963 * XXX KDM disable this for now, but leave it commented out so that 2964 * it is obvious that this is another possible way to get the SAS 2965 * address. 2966 * 2967 * The parent handle method below is a little more reliable, and 2968 * the other benefit is that it works for devices other than SES 2969 * devices. So you can send a SMP request to a da(4) device and it 2970 * will get routed to the expander that device is attached to. 2971 * (Assuming the da(4) device doesn't contain an SMP target...) 2972 */ 2973#if 0 2974 if (sasaddr == 0) 2975 sasaddr = xpt_path_sas_addr(ccb->ccb_h.path); 2976#endif 2977 2978 /* 2979 * If we still don't have a SAS address for the expander, look for 2980 * the parent device of this device, which is probably the expander. 2981 */ 2982 if (sasaddr == 0) { 2983#ifdef OLD_MPS_PROBE 2984 struct mpssas_target *parent_target; 2985#endif 2986 2987 if (targ->parent_handle == 0x0) { 2988 mps_dprint(sc, MPS_ERROR, 2989 "%s: handle %d does not have a valid " 2990 "parent handle!\n", __func__, targ->handle); 2991 ccb->ccb_h.status = CAM_REQ_INVALID; 2992 goto bailout; 2993 } 2994#ifdef OLD_MPS_PROBE 2995 parent_target = mpssas_find_target_by_handle(sassc, 0, 2996 targ->parent_handle); 2997 2998 if (parent_target == NULL) { 2999 mps_dprint(sc, MPS_ERROR, 3000 "%s: handle %d does not have a valid " 3001 "parent target!\n", __func__, targ->handle); 3002 ccb->ccb_h.status = CAM_REQ_INVALID; 3003 goto bailout; 3004 } 3005 3006 if ((parent_target->devinfo & 3007 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3008 mps_dprint(sc, MPS_ERROR, 3009 "%s: handle %d parent %d does not " 3010 "have an SMP target!\n", __func__, 3011 targ->handle, parent_target->handle); 3012 ccb->ccb_h.status = CAM_REQ_INVALID; 3013 goto bailout; 3014 3015 } 3016 3017 sasaddr = parent_target->sasaddr; 3018#else /* OLD_MPS_PROBE */ 3019 if ((targ->parent_devinfo & 3020 MPI2_SAS_DEVICE_INFO_SMP_TARGET) == 0) { 3021 mps_dprint(sc, MPS_ERROR, 3022 "%s: handle %d parent %d does not " 3023 "have an SMP target!\n", __func__, 3024 targ->handle, targ->parent_handle); 3025 ccb->ccb_h.status = CAM_REQ_INVALID; 3026 goto bailout; 3027 3028 } 3029 if (targ->parent_sasaddr == 0x0) { 3030 mps_dprint(sc, MPS_ERROR, 3031 "%s: handle %d parent handle %d does " 3032 "not have a valid SAS address!\n", 3033 __func__, targ->handle, targ->parent_handle); 3034 ccb->ccb_h.status = CAM_REQ_INVALID; 3035 goto bailout; 3036 } 3037 3038 sasaddr = targ->parent_sasaddr; 3039#endif /* OLD_MPS_PROBE */ 3040 3041 } 3042 3043 if (sasaddr == 0) { 3044 mps_dprint(sc, MPS_INFO, 3045 "%s: unable to find SAS address for handle %d\n", 3046 __func__, targ->handle); 3047 ccb->ccb_h.status = CAM_REQ_INVALID; 3048 goto bailout; 3049 } 3050 mpssas_send_smpcmd(sassc, ccb, sasaddr); 3051 3052 return; 3053 3054bailout: 3055 xpt_done(ccb); 3056 3057} 3058#endif //__FreeBSD_version >= 900026 3059 3060static void 3061mpssas_action_resetdev(struct mpssas_softc *sassc, union ccb *ccb) 3062{ 3063 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3064 struct mps_softc *sc; 3065 struct mps_command *tm; 3066 struct mpssas_target *targ; 3067 3068 MPS_FUNCTRACE(sassc->sc); 3069 mtx_assert(&sassc->sc->mps_mtx, MA_OWNED); 3070 3071 sc = sassc->sc; 3072 tm = mps_alloc_command(sc); 3073 if (tm == NULL) { 3074 mps_dprint(sc, MPS_ERROR, 3075 "command alloc failure in mpssas_action_resetdev\n"); 3076 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 3077 xpt_done(ccb); 3078 return; 3079 } 3080 3081 targ = &sassc->targets[ccb->ccb_h.target_id]; 3082 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3083 req->DevHandle = htole16(targ->handle); 3084 req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; 3085 req->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; 3086 3087 /* SAS Hard Link Reset / SATA Link Reset */ 3088 req->MsgFlags = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; 3089 3090 tm->cm_data = NULL; 3091 tm->cm_desc.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; 3092 tm->cm_complete = mpssas_resetdev_complete; 3093 tm->cm_complete_data = ccb; 3094 tm->cm_targ = targ; 3095 mps_map_command(sc, tm); 3096} 3097 3098static void 3099mpssas_resetdev_complete(struct mps_softc *sc, struct mps_command *tm) 3100{ 3101 MPI2_SCSI_TASK_MANAGE_REPLY *resp; 3102 union ccb *ccb; 3103 3104 MPS_FUNCTRACE(sc); 3105 mtx_assert(&sc->mps_mtx, MA_OWNED); 3106 3107 resp = (MPI2_SCSI_TASK_MANAGE_REPLY *)tm->cm_reply; 3108 ccb = tm->cm_complete_data; 3109 3110 /* 3111 * Currently there should be no way we can hit this case. It only 3112 * happens when we have a failure to allocate chain frames, and 3113 * task management commands don't have S/G lists. 3114 */ 3115 if ((tm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 3116 MPI2_SCSI_TASK_MANAGE_REQUEST *req; 3117 3118 req = (MPI2_SCSI_TASK_MANAGE_REQUEST *)tm->cm_req; 3119 3120 mps_dprint(sc, MPS_ERROR, 3121 "%s: cm_flags = %#x for reset of handle %#04x! " 3122 "This should not happen!\n", __func__, tm->cm_flags, 3123 req->DevHandle); 3124 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3125 goto bailout; 3126 } 3127 3128 mps_dprint(sc, MPS_XINFO, 3129 "%s: IOCStatus = 0x%x ResponseCode = 0x%x\n", __func__, 3130 le16toh(resp->IOCStatus), le32toh(resp->ResponseCode)); 3131 3132 if (le32toh(resp->ResponseCode) == MPI2_SCSITASKMGMT_RSP_TM_COMPLETE) { 3133 ccb->ccb_h.status = CAM_REQ_CMP; 3134 mpssas_announce_reset(sc, AC_SENT_BDR, tm->cm_targ->tid, 3135 CAM_LUN_WILDCARD); 3136 } 3137 else 3138 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 3139 3140bailout: 3141 3142 mpssas_free_tm(sc, tm); 3143 xpt_done(ccb); 3144} 3145 3146static void 3147mpssas_poll(struct cam_sim *sim) 3148{ 3149 struct mpssas_softc *sassc; 3150 3151 sassc = cam_sim_softc(sim); 3152 3153 if (sassc->sc->mps_debug & MPS_TRACE) { 3154 /* frequent debug messages during a panic just slow 3155 * everything down too much. 3156 */ 3157 mps_printf(sassc->sc, "%s clearing MPS_TRACE\n", __func__); 3158 sassc->sc->mps_debug &= ~MPS_TRACE; 3159 } 3160 3161 mps_intr_locked(sassc->sc); 3162} 3163 3164static void 3165mpssas_async(void *callback_arg, uint32_t code, struct cam_path *path, 3166 void *arg) 3167{ 3168 struct mps_softc *sc; 3169 3170 sc = (struct mps_softc *)callback_arg; 3171 3172 switch (code) { 3173#if (__FreeBSD_version >= 1000006) || \ 3174 ((__FreeBSD_version >= 901503) && (__FreeBSD_version < 1000000)) 3175 case AC_ADVINFO_CHANGED: { 3176 struct mpssas_target *target; 3177 struct mpssas_softc *sassc; 3178 struct scsi_read_capacity_data_long rcap_buf; 3179 struct ccb_dev_advinfo cdai; 3180 struct mpssas_lun *lun; 3181 lun_id_t lunid; 3182 int found_lun; 3183 uintptr_t buftype; 3184 3185 buftype = (uintptr_t)arg; 3186 3187 found_lun = 0; 3188 sassc = sc->sassc; 3189 3190 /* 3191 * We're only interested in read capacity data changes. 3192 */ 3193 if (buftype != CDAI_TYPE_RCAPLONG) 3194 break; 3195 3196 /* 3197 * We should have a handle for this, but check to make sure. 3198 */ 3199 target = &sassc->targets[xpt_path_target_id(path)]; 3200 if (target->handle == 0) 3201 break; 3202 3203 lunid = xpt_path_lun_id(path); 3204 3205 SLIST_FOREACH(lun, &target->luns, lun_link) { 3206 if (lun->lun_id == lunid) { 3207 found_lun = 1; 3208 break; 3209 } 3210 } 3211 3212 if (found_lun == 0) { 3213 lun = malloc(sizeof(struct mpssas_lun), M_MPT2, 3214 M_NOWAIT | M_ZERO); 3215 if (lun == NULL) { 3216 mps_dprint(sc, MPS_ERROR, "Unable to alloc " 3217 "LUN for EEDP support.\n"); 3218 break; 3219 } 3220 lun->lun_id = lunid; 3221 SLIST_INSERT_HEAD(&target->luns, lun, lun_link); 3222 } 3223 3224 bzero(&rcap_buf, sizeof(rcap_buf)); 3225 xpt_setup_ccb(&cdai.ccb_h, path, CAM_PRIORITY_NORMAL); 3226 cdai.ccb_h.func_code = XPT_DEV_ADVINFO; 3227 cdai.ccb_h.flags = CAM_DIR_IN; 3228 cdai.buftype = CDAI_TYPE_RCAPLONG; 3229 cdai.flags = 0; 3230 cdai.bufsiz = sizeof(rcap_buf); 3231 cdai.buf = (uint8_t *)&rcap_buf; 3232 xpt_action((union ccb *)&cdai); 3233 if ((cdai.ccb_h.status & CAM_DEV_QFRZN) != 0) 3234 cam_release_devq(cdai.ccb_h.path, 3235 0, 0, 0, FALSE); 3236 3237 if (((cdai.ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) 3238 && (rcap_buf.prot & SRC16_PROT_EN)) { 3239 lun->eedp_formatted = TRUE; 3240 lun->eedp_block_size = scsi_4btoul(rcap_buf.length); 3241 } else { 3242 lun->eedp_formatted = FALSE; 3243 lun->eedp_block_size = 0; 3244 } 3245 break; 3246 } 3247#else 3248 case AC_FOUND_DEVICE: { 3249 struct ccb_getdev *cgd; 3250 3251 cgd = arg; 3252 mpssas_check_eedp(sc, path, cgd); 3253 break; 3254 } 3255#endif 3256 default: 3257 break; 3258 } 3259} 3260 3261#if (__FreeBSD_version < 901503) || \ 3262 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) 3263static void 3264mpssas_check_eedp(struct mps_softc *sc, struct cam_path *path, 3265 struct ccb_getdev *cgd) 3266{ 3267 struct mpssas_softc *sassc = sc->sassc; 3268 struct ccb_scsiio *csio; 3269 struct scsi_read_capacity_16 *scsi_cmd; 3270 struct scsi_read_capacity_eedp *rcap_buf; 3271 path_id_t pathid; 3272 target_id_t targetid; 3273 lun_id_t lunid; 3274 union ccb *ccb; 3275 struct cam_path *local_path; 3276 struct mpssas_target *target; 3277 struct mpssas_lun *lun; 3278 uint8_t found_lun; 3279 char path_str[64]; 3280 3281 sassc = sc->sassc; 3282 pathid = cam_sim_path(sassc->sim); 3283 targetid = xpt_path_target_id(path); 3284 lunid = xpt_path_lun_id(path); 3285 3286 target = &sassc->targets[targetid]; 3287 if (target->handle == 0x0) 3288 return; 3289 3290 /* 3291 * Determine if the device is EEDP capable. 3292 * 3293 * If this flag is set in the inquiry data, 3294 * the device supports protection information, 3295 * and must support the 16 byte read 3296 * capacity command, otherwise continue without 3297 * sending read cap 16 3298 */ 3299 if ((cgd->inq_data.spc3_flags & SPC3_SID_PROTECT) == 0) 3300 return; 3301 3302 /* 3303 * Issue a READ CAPACITY 16 command. This info 3304 * is used to determine if the LUN is formatted 3305 * for EEDP support. 3306 */ 3307 ccb = xpt_alloc_ccb_nowait(); 3308 if (ccb == NULL) { 3309 mps_dprint(sc, MPS_ERROR, "Unable to alloc CCB " 3310 "for EEDP support.\n"); 3311 return; 3312 } 3313 3314 if (xpt_create_path(&local_path, xpt_periph, 3315 pathid, targetid, lunid) != CAM_REQ_CMP) { 3316 mps_dprint(sc, MPS_ERROR, "Unable to create " 3317 "path for EEDP support\n"); 3318 xpt_free_ccb(ccb); 3319 return; 3320 } 3321 3322 /* 3323 * If LUN is already in list, don't create a new 3324 * one. 3325 */ 3326 found_lun = FALSE; 3327 SLIST_FOREACH(lun, &target->luns, lun_link) { 3328 if (lun->lun_id == lunid) { 3329 found_lun = TRUE; 3330 break; 3331 } 3332 } 3333 if (!found_lun) { 3334 lun = malloc(sizeof(struct mpssas_lun), M_MPT2, 3335 M_NOWAIT | M_ZERO); 3336 if (lun == NULL) { 3337 mps_dprint(sc, MPS_ERROR, 3338 "Unable to alloc LUN for EEDP support.\n"); 3339 xpt_free_path(local_path); 3340 xpt_free_ccb(ccb); 3341 return; 3342 } 3343 lun->lun_id = lunid; 3344 SLIST_INSERT_HEAD(&target->luns, lun, 3345 lun_link); 3346 } 3347 3348 xpt_path_string(local_path, path_str, sizeof(path_str)); 3349 mps_dprint(sc, MPS_INFO, "Sending read cap: path %s handle %d\n", 3350 path_str, target->handle); 3351 3352 /* 3353 * Issue a READ CAPACITY 16 command for the LUN. 3354 * The mpssas_read_cap_done function will load 3355 * the read cap info into the LUN struct. 3356 */ 3357 rcap_buf = malloc(sizeof(struct scsi_read_capacity_eedp), 3358 M_MPT2, M_NOWAIT | M_ZERO); 3359 if (rcap_buf == NULL) { 3360 mps_dprint(sc, MPS_FAULT, 3361 "Unable to alloc read capacity buffer for EEDP support.\n"); 3362 xpt_free_path(ccb->ccb_h.path); 3363 xpt_free_ccb(ccb); 3364 return; 3365 } 3366 xpt_setup_ccb(&ccb->ccb_h, local_path, CAM_PRIORITY_XPT); 3367 csio = &ccb->csio; 3368 csio->ccb_h.func_code = XPT_SCSI_IO; 3369 csio->ccb_h.flags = CAM_DIR_IN; 3370 csio->ccb_h.retry_count = 4; 3371 csio->ccb_h.cbfcnp = mpssas_read_cap_done; 3372 csio->ccb_h.timeout = 60000; 3373 csio->data_ptr = (uint8_t *)rcap_buf; 3374 csio->dxfer_len = sizeof(struct scsi_read_capacity_eedp); 3375 csio->sense_len = MPS_SENSE_LEN; 3376 csio->cdb_len = sizeof(*scsi_cmd); 3377 csio->tag_action = MSG_SIMPLE_Q_TAG; 3378 3379 scsi_cmd = (struct scsi_read_capacity_16 *)&csio->cdb_io.cdb_bytes; 3380 bzero(scsi_cmd, sizeof(*scsi_cmd)); 3381 scsi_cmd->opcode = 0x9E; 3382 scsi_cmd->service_action = SRC16_SERVICE_ACTION; 3383 ((uint8_t *)scsi_cmd)[13] = sizeof(struct scsi_read_capacity_eedp); 3384 3385 ccb->ccb_h.ppriv_ptr1 = sassc; 3386 xpt_action(ccb); 3387} 3388 3389static void 3390mpssas_read_cap_done(struct cam_periph *periph, union ccb *done_ccb) 3391{ 3392 struct mpssas_softc *sassc; 3393 struct mpssas_target *target; 3394 struct mpssas_lun *lun; 3395 struct scsi_read_capacity_eedp *rcap_buf; 3396 3397 if (done_ccb == NULL) 3398 return; 3399 3400 /* Driver need to release devq, it Scsi command is 3401 * generated by driver internally. 3402 * Currently there is a single place where driver 3403 * calls scsi command internally. In future if driver 3404 * calls more scsi command internally, it needs to release 3405 * devq internally, since those command will not go back to 3406 * cam_periph. 3407 */ 3408 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) ) { 3409 done_ccb->ccb_h.status &= ~CAM_DEV_QFRZN; 3410 xpt_release_devq(done_ccb->ccb_h.path, 3411 /*count*/ 1, /*run_queue*/TRUE); 3412 } 3413 3414 rcap_buf = (struct scsi_read_capacity_eedp *)done_ccb->csio.data_ptr; 3415 3416 /* 3417 * Get the LUN ID for the path and look it up in the LUN list for the 3418 * target. 3419 */ 3420 sassc = (struct mpssas_softc *)done_ccb->ccb_h.ppriv_ptr1; 3421 target = &sassc->targets[done_ccb->ccb_h.target_id]; 3422 SLIST_FOREACH(lun, &target->luns, lun_link) { 3423 if (lun->lun_id != done_ccb->ccb_h.target_lun) 3424 continue; 3425 3426 /* 3427 * Got the LUN in the target's LUN list. Fill it in 3428 * with EEDP info. If the READ CAP 16 command had some 3429 * SCSI error (common if command is not supported), mark 3430 * the lun as not supporting EEDP and set the block size 3431 * to 0. 3432 */ 3433 if (((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 3434 || (done_ccb->csio.scsi_status != SCSI_STATUS_OK)) { 3435 lun->eedp_formatted = FALSE; 3436 lun->eedp_block_size = 0; 3437 break; 3438 } 3439 3440 if (rcap_buf->protect & 0x01) { 3441 mps_dprint(sassc->sc, MPS_INFO, "LUN %d for " 3442 "target ID %d is formatted for EEDP " 3443 "support.\n", done_ccb->ccb_h.target_lun, 3444 done_ccb->ccb_h.target_id); 3445 lun->eedp_formatted = TRUE; 3446 lun->eedp_block_size = scsi_4btoul(rcap_buf->length); 3447 } 3448 break; 3449 } 3450 3451 // Finished with this CCB and path. 3452 free(rcap_buf, M_MPT2); 3453 xpt_free_path(done_ccb->ccb_h.path); 3454 xpt_free_ccb(done_ccb); 3455} 3456#endif /* (__FreeBSD_version < 901503) || \ 3457 ((__FreeBSD_version >= 1000000) && (__FreeBSD_version < 1000006)) */ 3458 3459int 3460mpssas_startup(struct mps_softc *sc) 3461{ 3462 3463 /* 3464 * Send the port enable message and set the wait_for_port_enable flag. 3465 * This flag helps to keep the simq frozen until all discovery events 3466 * are processed. 3467 */ 3468 sc->wait_for_port_enable = 1; 3469 mpssas_send_portenable(sc); 3470 return (0); 3471} 3472 3473static int 3474mpssas_send_portenable(struct mps_softc *sc) 3475{ 3476 MPI2_PORT_ENABLE_REQUEST *request; 3477 struct mps_command *cm; 3478 3479 MPS_FUNCTRACE(sc); 3480 3481 if ((cm = mps_alloc_command(sc)) == NULL) 3482 return (EBUSY); 3483 request = (MPI2_PORT_ENABLE_REQUEST *)cm->cm_req; 3484 request->Function = MPI2_FUNCTION_PORT_ENABLE; 3485 request->MsgFlags = 0; 3486 request->VP_ID = 0; 3487 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3488 cm->cm_complete = mpssas_portenable_complete; 3489 cm->cm_data = NULL; 3490 cm->cm_sge = NULL; 3491 3492 mps_map_command(sc, cm); 3493 mps_dprint(sc, MPS_XINFO, 3494 "mps_send_portenable finished cm %p req %p complete %p\n", 3495 cm, cm->cm_req, cm->cm_complete); 3496 return (0); 3497} 3498 3499static void 3500mpssas_portenable_complete(struct mps_softc *sc, struct mps_command *cm) 3501{ 3502 MPI2_PORT_ENABLE_REPLY *reply; 3503 struct mpssas_softc *sassc; 3504 3505 MPS_FUNCTRACE(sc); 3506 sassc = sc->sassc; 3507 3508 /* 3509 * Currently there should be no way we can hit this case. It only 3510 * happens when we have a failure to allocate chain frames, and 3511 * port enable commands don't have S/G lists. 3512 */ 3513 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 3514 mps_dprint(sc, MPS_ERROR, "%s: cm_flags = %#x for port enable! " 3515 "This should not happen!\n", __func__, cm->cm_flags); 3516 } 3517 3518 reply = (MPI2_PORT_ENABLE_REPLY *)cm->cm_reply; 3519 if (reply == NULL) 3520 mps_dprint(sc, MPS_FAULT, "Portenable NULL reply\n"); 3521 else if (le16toh(reply->IOCStatus & MPI2_IOCSTATUS_MASK) != 3522 MPI2_IOCSTATUS_SUCCESS) 3523 mps_dprint(sc, MPS_FAULT, "Portenable failed\n"); 3524 3525 mps_free_command(sc, cm); 3526 if (sc->mps_ich.ich_arg != NULL) { 3527 mps_dprint(sc, MPS_XINFO, "disestablish config intrhook\n"); 3528 config_intrhook_disestablish(&sc->mps_ich); 3529 sc->mps_ich.ich_arg = NULL; 3530 } 3531 3532 /* 3533 * Get WarpDrive info after discovery is complete but before the scan 3534 * starts. At this point, all devices are ready to be exposed to the 3535 * OS. If devices should be hidden instead, take them out of the 3536 * 'targets' array before the scan. The devinfo for a disk will have 3537 * some info and a volume's will be 0. Use that to remove disks. 3538 */ 3539 mps_wd_config_pages(sc); 3540 3541 /* 3542 * Done waiting for port enable to complete. Decrement the refcount. 3543 * If refcount is 0, discovery is complete and a rescan of the bus can 3544 * take place. Since the simq was explicitly frozen before port 3545 * enable, it must be explicitly released here to keep the 3546 * freeze/release count in sync. 3547 */ 3548 sc->wait_for_port_enable = 0; 3549 sc->port_enable_complete = 1; 3550 wakeup(&sc->port_enable_complete); 3551 mpssas_startup_decrement(sassc); 3552} 3553 3554