mpt_cam.c revision 315822
1/*- 2 * FreeBSD/CAM specific routines for LSI '909 FC adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28/*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * Support from LSI-Logic has also gone a great deal toward making this a 62 * workable subsystem and is gratefully acknowledged. 63 */ 64/*- 65 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 66 * Copyright (c) 2005, WHEEL Sp. z o.o. 67 * Copyright (c) 2004, 2005 Justin T. Gibbs 68 * All rights reserved. 69 * 70 * Redistribution and use in source and binary forms, with or without 71 * modification, are permitted provided that the following conditions are 72 * met: 73 * 1. Redistributions of source code must retain the above copyright 74 * notice, this list of conditions and the following disclaimer. 75 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 76 * substantially similar to the "NO WARRANTY" disclaimer below 77 * ("Disclaimer") and any redistribution must be conditioned upon including 78 * a substantially similar Disclaimer requirement for further binary 79 * redistribution. 80 * 3. Neither the names of the above listed copyright holders nor the names 81 * of any contributors may be used to endorse or promote products derived 82 * from this software without specific prior written permission. 83 * 84 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 85 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 86 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 87 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 88 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 89 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 90 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 91 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 92 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 93 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 94 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 95 */ 96#include <sys/cdefs.h> 97__FBSDID("$FreeBSD: stable/11/sys/dev/mpt/mpt_cam.c 315822 2017-03-23 06:49:36Z mav $"); 98 99#include <dev/mpt/mpt.h> 100#include <dev/mpt/mpt_cam.h> 101#include <dev/mpt/mpt_raid.h> 102 103#include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ 104#include "dev/mpt/mpilib/mpi_init.h" 105#include "dev/mpt/mpilib/mpi_targ.h" 106#include "dev/mpt/mpilib/mpi_fc.h" 107#include "dev/mpt/mpilib/mpi_sas.h" 108 109#include <sys/callout.h> 110#include <sys/kthread.h> 111#include <sys/sysctl.h> 112 113static void mpt_poll(struct cam_sim *); 114static timeout_t mpt_timeout; 115static void mpt_action(struct cam_sim *, union ccb *); 116static int 117mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); 118static void mpt_setwidth(struct mpt_softc *, int, int); 119static void mpt_setsync(struct mpt_softc *, int, int, int); 120static int mpt_update_spi_config(struct mpt_softc *, int); 121 122static mpt_reply_handler_t mpt_scsi_reply_handler; 123static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; 124static mpt_reply_handler_t mpt_fc_els_reply_handler; 125static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, 126 MSG_DEFAULT_REPLY *); 127static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); 128static int mpt_fc_reset_link(struct mpt_softc *, int); 129 130static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); 131static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); 132static void mpt_recovery_thread(void *arg); 133static void mpt_recover_commands(struct mpt_softc *mpt); 134 135static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, 136 u_int, u_int, u_int, int); 137 138static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); 139static void mpt_post_target_command(struct mpt_softc *, request_t *, int); 140static int mpt_add_els_buffers(struct mpt_softc *mpt); 141static int mpt_add_target_commands(struct mpt_softc *mpt); 142static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); 143static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); 144static void mpt_target_start_io(struct mpt_softc *, union ccb *); 145static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); 146static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); 147static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, 148 uint8_t, uint8_t const *, u_int); 149static void 150mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, 151 tgt_resource_t *, int); 152static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); 153static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); 154static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; 155static mpt_reply_handler_t mpt_sata_pass_reply_handler; 156 157static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; 158static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; 159static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; 160static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; 161 162static mpt_probe_handler_t mpt_cam_probe; 163static mpt_attach_handler_t mpt_cam_attach; 164static mpt_enable_handler_t mpt_cam_enable; 165static mpt_ready_handler_t mpt_cam_ready; 166static mpt_event_handler_t mpt_cam_event; 167static mpt_reset_handler_t mpt_cam_ioc_reset; 168static mpt_detach_handler_t mpt_cam_detach; 169 170static struct mpt_personality mpt_cam_personality = 171{ 172 .name = "mpt_cam", 173 .probe = mpt_cam_probe, 174 .attach = mpt_cam_attach, 175 .enable = mpt_cam_enable, 176 .ready = mpt_cam_ready, 177 .event = mpt_cam_event, 178 .reset = mpt_cam_ioc_reset, 179 .detach = mpt_cam_detach, 180}; 181 182DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); 183MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); 184 185int mpt_enable_sata_wc = -1; 186TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); 187 188static int 189mpt_cam_probe(struct mpt_softc *mpt) 190{ 191 int role; 192 193 /* 194 * Only attach to nodes that support the initiator or target role 195 * (or want to) or have RAID physical devices that need CAM pass-thru 196 * support. 197 */ 198 if (mpt->do_cfg_role) { 199 role = mpt->cfg_role; 200 } else { 201 role = mpt->role; 202 } 203 if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || 204 (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { 205 return (0); 206 } 207 return (ENODEV); 208} 209 210static int 211mpt_cam_attach(struct mpt_softc *mpt) 212{ 213 struct cam_devq *devq; 214 mpt_handler_t handler; 215 int maxq; 216 int error; 217 218 MPT_LOCK(mpt); 219 TAILQ_INIT(&mpt->request_timeout_list); 220 maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? 221 mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); 222 223 handler.reply_handler = mpt_scsi_reply_handler; 224 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 225 &scsi_io_handler_id); 226 if (error != 0) { 227 MPT_UNLOCK(mpt); 228 goto cleanup; 229 } 230 231 handler.reply_handler = mpt_scsi_tmf_reply_handler; 232 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 233 &scsi_tmf_handler_id); 234 if (error != 0) { 235 MPT_UNLOCK(mpt); 236 goto cleanup; 237 } 238 239 /* 240 * If we're fibre channel and could support target mode, we register 241 * an ELS reply handler and give it resources. 242 */ 243 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 244 handler.reply_handler = mpt_fc_els_reply_handler; 245 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 246 &fc_els_handler_id); 247 if (error != 0) { 248 MPT_UNLOCK(mpt); 249 goto cleanup; 250 } 251 if (mpt_add_els_buffers(mpt) == FALSE) { 252 error = ENOMEM; 253 MPT_UNLOCK(mpt); 254 goto cleanup; 255 } 256 maxq -= mpt->els_cmds_allocated; 257 } 258 259 /* 260 * If we support target mode, we register a reply handler for it, 261 * but don't add command resources until we actually enable target 262 * mode. 263 */ 264 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { 265 handler.reply_handler = mpt_scsi_tgt_reply_handler; 266 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 267 &mpt->scsi_tgt_handler_id); 268 if (error != 0) { 269 MPT_UNLOCK(mpt); 270 goto cleanup; 271 } 272 } 273 274 if (mpt->is_sas) { 275 handler.reply_handler = mpt_sata_pass_reply_handler; 276 error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, 277 &sata_pass_handler_id); 278 if (error != 0) { 279 MPT_UNLOCK(mpt); 280 goto cleanup; 281 } 282 } 283 284 /* 285 * We keep one request reserved for timeout TMF requests. 286 */ 287 mpt->tmf_req = mpt_get_request(mpt, FALSE); 288 if (mpt->tmf_req == NULL) { 289 mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); 290 error = ENOMEM; 291 MPT_UNLOCK(mpt); 292 goto cleanup; 293 } 294 295 /* 296 * Mark the request as free even though not on the free list. 297 * There is only one TMF request allowed to be outstanding at 298 * a time and the TMF routines perform their own allocation 299 * tracking using the standard state flags. 300 */ 301 mpt->tmf_req->state = REQ_STATE_FREE; 302 maxq--; 303 304 /* 305 * The rest of this is CAM foo, for which we need to drop our lock 306 */ 307 MPT_UNLOCK(mpt); 308 309 if (mpt_spawn_recovery_thread(mpt) != 0) { 310 mpt_prt(mpt, "Unable to spawn recovery thread!\n"); 311 error = ENOMEM; 312 goto cleanup; 313 } 314 315 /* 316 * Create the device queue for our SIM(s). 317 */ 318 devq = cam_simq_alloc(maxq); 319 if (devq == NULL) { 320 mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); 321 error = ENOMEM; 322 goto cleanup; 323 } 324 325 /* 326 * Construct our SIM entry. 327 */ 328 mpt->sim = 329 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 330 if (mpt->sim == NULL) { 331 mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); 332 cam_simq_free(devq); 333 error = ENOMEM; 334 goto cleanup; 335 } 336 337 /* 338 * Register exactly this bus. 339 */ 340 MPT_LOCK(mpt); 341 if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { 342 mpt_prt(mpt, "Bus registration Failed!\n"); 343 error = ENOMEM; 344 MPT_UNLOCK(mpt); 345 goto cleanup; 346 } 347 348 if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), 349 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 350 mpt_prt(mpt, "Unable to allocate Path!\n"); 351 error = ENOMEM; 352 MPT_UNLOCK(mpt); 353 goto cleanup; 354 } 355 MPT_UNLOCK(mpt); 356 357 /* 358 * Only register a second bus for RAID physical 359 * devices if the controller supports RAID. 360 */ 361 if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { 362 return (0); 363 } 364 365 /* 366 * Create a "bus" to export all hidden disks to CAM. 367 */ 368 mpt->phydisk_sim = 369 mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); 370 if (mpt->phydisk_sim == NULL) { 371 mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); 372 error = ENOMEM; 373 goto cleanup; 374 } 375 376 /* 377 * Register this bus. 378 */ 379 MPT_LOCK(mpt); 380 if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != 381 CAM_SUCCESS) { 382 mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); 383 error = ENOMEM; 384 MPT_UNLOCK(mpt); 385 goto cleanup; 386 } 387 388 if (xpt_create_path(&mpt->phydisk_path, NULL, 389 cam_sim_path(mpt->phydisk_sim), 390 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 391 mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); 392 error = ENOMEM; 393 MPT_UNLOCK(mpt); 394 goto cleanup; 395 } 396 MPT_UNLOCK(mpt); 397 mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); 398 return (0); 399 400cleanup: 401 mpt_cam_detach(mpt); 402 return (error); 403} 404 405/* 406 * Read FC configuration information 407 */ 408static int 409mpt_read_config_info_fc(struct mpt_softc *mpt) 410{ 411 struct sysctl_ctx_list *ctx; 412 struct sysctl_oid *tree; 413 char *topology = NULL; 414 int rv; 415 416 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 417 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); 418 if (rv) { 419 return (-1); 420 } 421 mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", 422 mpt->mpt_fcport_page0.Header.PageVersion, 423 mpt->mpt_fcport_page0.Header.PageLength, 424 mpt->mpt_fcport_page0.Header.PageNumber, 425 mpt->mpt_fcport_page0.Header.PageType); 426 427 428 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, 429 sizeof(mpt->mpt_fcport_page0), FALSE, 5000); 430 if (rv) { 431 mpt_prt(mpt, "failed to read FC Port Page 0\n"); 432 return (-1); 433 } 434 mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); 435 436 switch (mpt->mpt_fcport_page0.CurrentSpeed) { 437 case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT: 438 mpt->mpt_fcport_speed = 1; 439 break; 440 case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT: 441 mpt->mpt_fcport_speed = 2; 442 break; 443 case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT: 444 mpt->mpt_fcport_speed = 10; 445 break; 446 case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT: 447 mpt->mpt_fcport_speed = 4; 448 break; 449 default: 450 mpt->mpt_fcport_speed = 0; 451 break; 452 } 453 454 switch (mpt->mpt_fcport_page0.Flags & 455 MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { 456 case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: 457 mpt->mpt_fcport_speed = 0; 458 topology = "<NO LOOP>"; 459 break; 460 case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: 461 topology = "N-Port"; 462 break; 463 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: 464 topology = "NL-Port"; 465 break; 466 case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: 467 topology = "F-Port"; 468 break; 469 case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: 470 topology = "FL-Port"; 471 break; 472 default: 473 mpt->mpt_fcport_speed = 0; 474 topology = "?"; 475 break; 476 } 477 478 mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32) 479 | mpt->mpt_fcport_page0.WWNN.Low; 480 mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32) 481 | mpt->mpt_fcport_page0.WWPN.Low; 482 mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier; 483 484 mpt_lprt(mpt, MPT_PRT_INFO, 485 "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx " 486 "Speed %u-Gbit\n", topology, 487 (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn, 488 mpt->mpt_fcport_speed); 489 MPT_UNLOCK(mpt); 490 ctx = device_get_sysctl_ctx(mpt->dev); 491 tree = device_get_sysctl_tree(mpt->dev); 492 493 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 494 "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn, 495 "World Wide Node Name"); 496 497 SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 498 "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn, 499 "World Wide Port Name"); 500 501 MPT_LOCK(mpt); 502 return (0); 503} 504 505/* 506 * Set FC configuration information. 507 */ 508static int 509mpt_set_initial_config_fc(struct mpt_softc *mpt) 510{ 511 CONFIG_PAGE_FC_PORT_1 fc; 512 U32 fl; 513 int r, doit = 0; 514 int role; 515 516 r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, 517 &fc.Header, FALSE, 5000); 518 if (r) { 519 mpt_prt(mpt, "failed to read FC page 1 header\n"); 520 return (mpt_fc_reset_link(mpt, 1)); 521 } 522 523 r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, 524 &fc.Header, sizeof (fc), FALSE, 5000); 525 if (r) { 526 mpt_prt(mpt, "failed to read FC page 1\n"); 527 return (mpt_fc_reset_link(mpt, 1)); 528 } 529 mpt2host_config_page_fc_port_1(&fc); 530 531 /* 532 * Check our flags to make sure we support the role we want. 533 */ 534 doit = 0; 535 role = 0; 536 fl = fc.Flags; 537 538 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { 539 role |= MPT_ROLE_INITIATOR; 540 } 541 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 542 role |= MPT_ROLE_TARGET; 543 } 544 545 fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; 546 547 if (mpt->do_cfg_role == 0) { 548 role = mpt->cfg_role; 549 } else { 550 mpt->do_cfg_role = 0; 551 } 552 553 if (role != mpt->cfg_role) { 554 if (mpt->cfg_role & MPT_ROLE_INITIATOR) { 555 if ((role & MPT_ROLE_INITIATOR) == 0) { 556 mpt_prt(mpt, "adding initiator role\n"); 557 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; 558 doit++; 559 } else { 560 mpt_prt(mpt, "keeping initiator role\n"); 561 } 562 } else if (role & MPT_ROLE_INITIATOR) { 563 mpt_prt(mpt, "removing initiator role\n"); 564 doit++; 565 } 566 if (mpt->cfg_role & MPT_ROLE_TARGET) { 567 if ((role & MPT_ROLE_TARGET) == 0) { 568 mpt_prt(mpt, "adding target role\n"); 569 fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; 570 doit++; 571 } else { 572 mpt_prt(mpt, "keeping target role\n"); 573 } 574 } else if (role & MPT_ROLE_TARGET) { 575 mpt_prt(mpt, "removing target role\n"); 576 doit++; 577 } 578 mpt->role = mpt->cfg_role; 579 } 580 581 if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { 582 if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { 583 mpt_prt(mpt, "adding OXID option\n"); 584 fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; 585 doit++; 586 } 587 } 588 589 if (doit) { 590 fc.Flags = fl; 591 host2mpt_config_page_fc_port_1(&fc); 592 r = mpt_write_cfg_page(mpt, 593 MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, 594 sizeof(fc), FALSE, 5000); 595 if (r != 0) { 596 mpt_prt(mpt, "failed to update NVRAM with changes\n"); 597 return (0); 598 } 599 mpt_prt(mpt, "NOTE: NVRAM changes will not take " 600 "effect until next reboot or IOC reset\n"); 601 } 602 return (0); 603} 604 605static int 606mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) 607{ 608 ConfigExtendedPageHeader_t hdr; 609 struct mptsas_phyinfo *phyinfo; 610 SasIOUnitPage0_t *buffer; 611 int error, len, i; 612 613 error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 614 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, 615 &hdr, 0, 10000); 616 if (error) 617 goto out; 618 if (hdr.ExtPageLength == 0) { 619 error = ENXIO; 620 goto out; 621 } 622 623 len = hdr.ExtPageLength * 4; 624 buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); 625 if (buffer == NULL) { 626 error = ENOMEM; 627 goto out; 628 } 629 630 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 631 0, &hdr, buffer, len, 0, 10000); 632 if (error) { 633 free(buffer, M_DEVBUF); 634 goto out; 635 } 636 637 portinfo->num_phys = buffer->NumPhys; 638 portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * 639 portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); 640 if (portinfo->phy_info == NULL) { 641 free(buffer, M_DEVBUF); 642 error = ENOMEM; 643 goto out; 644 } 645 646 for (i = 0; i < portinfo->num_phys; i++) { 647 phyinfo = &portinfo->phy_info[i]; 648 phyinfo->phy_num = i; 649 phyinfo->port_id = buffer->PhyData[i].Port; 650 phyinfo->negotiated_link_rate = 651 buffer->PhyData[i].NegotiatedLinkRate; 652 phyinfo->handle = 653 le16toh(buffer->PhyData[i].ControllerDevHandle); 654 } 655 656 free(buffer, M_DEVBUF); 657out: 658 return (error); 659} 660 661static int 662mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, 663 uint32_t form, uint32_t form_specific) 664{ 665 ConfigExtendedPageHeader_t hdr; 666 SasPhyPage0_t *buffer; 667 int error; 668 669 error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, 670 MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 671 0, 10000); 672 if (error) 673 goto out; 674 if (hdr.ExtPageLength == 0) { 675 error = ENXIO; 676 goto out; 677 } 678 679 buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 680 if (buffer == NULL) { 681 error = ENOMEM; 682 goto out; 683 } 684 685 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 686 form + form_specific, &hdr, buffer, 687 sizeof(SasPhyPage0_t), 0, 10000); 688 if (error) { 689 free(buffer, M_DEVBUF); 690 goto out; 691 } 692 693 phy_info->hw_link_rate = buffer->HwLinkRate; 694 phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; 695 phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); 696 phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); 697 698 free(buffer, M_DEVBUF); 699out: 700 return (error); 701} 702 703static int 704mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, 705 uint32_t form, uint32_t form_specific) 706{ 707 ConfigExtendedPageHeader_t hdr; 708 SasDevicePage0_t *buffer; 709 uint64_t sas_address; 710 int error = 0; 711 712 bzero(device_info, sizeof(*device_info)); 713 error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, 714 MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, 715 &hdr, 0, 10000); 716 if (error) 717 goto out; 718 if (hdr.ExtPageLength == 0) { 719 error = ENXIO; 720 goto out; 721 } 722 723 buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); 724 if (buffer == NULL) { 725 error = ENOMEM; 726 goto out; 727 } 728 729 error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 730 form + form_specific, &hdr, buffer, 731 sizeof(SasDevicePage0_t), 0, 10000); 732 if (error) { 733 free(buffer, M_DEVBUF); 734 goto out; 735 } 736 737 device_info->dev_handle = le16toh(buffer->DevHandle); 738 device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); 739 device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); 740 device_info->slot = le16toh(buffer->Slot); 741 device_info->phy_num = buffer->PhyNum; 742 device_info->physical_port = buffer->PhysicalPort; 743 device_info->target_id = buffer->TargetID; 744 device_info->bus = buffer->Bus; 745 bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); 746 device_info->sas_address = le64toh(sas_address); 747 device_info->device_info = le32toh(buffer->DeviceInfo); 748 749 free(buffer, M_DEVBUF); 750out: 751 return (error); 752} 753 754/* 755 * Read SAS configuration information. Nothing to do yet. 756 */ 757static int 758mpt_read_config_info_sas(struct mpt_softc *mpt) 759{ 760 struct mptsas_portinfo *portinfo; 761 struct mptsas_phyinfo *phyinfo; 762 int error, i; 763 764 portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); 765 if (portinfo == NULL) 766 return (ENOMEM); 767 768 error = mptsas_sas_io_unit_pg0(mpt, portinfo); 769 if (error) { 770 free(portinfo, M_DEVBUF); 771 return (0); 772 } 773 774 for (i = 0; i < portinfo->num_phys; i++) { 775 phyinfo = &portinfo->phy_info[i]; 776 error = mptsas_sas_phy_pg0(mpt, phyinfo, 777 (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << 778 MPI_SAS_PHY_PGAD_FORM_SHIFT), i); 779 if (error) 780 break; 781 error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, 782 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 783 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 784 phyinfo->handle); 785 if (error) 786 break; 787 phyinfo->identify.phy_num = phyinfo->phy_num = i; 788 if (phyinfo->attached.dev_handle) 789 error = mptsas_sas_device_pg0(mpt, 790 &phyinfo->attached, 791 (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << 792 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), 793 phyinfo->attached.dev_handle); 794 if (error) 795 break; 796 } 797 mpt->sas_portinfo = portinfo; 798 return (0); 799} 800 801static void 802mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, 803 int enabled) 804{ 805 SataPassthroughRequest_t *pass; 806 request_t *req; 807 int error, status; 808 809 req = mpt_get_request(mpt, 0); 810 if (req == NULL) 811 return; 812 813 pass = req->req_vbuf; 814 bzero(pass, sizeof(SataPassthroughRequest_t)); 815 pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; 816 pass->TargetID = devinfo->target_id; 817 pass->Bus = devinfo->bus; 818 pass->PassthroughFlags = 0; 819 pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; 820 pass->DataLength = 0; 821 pass->MsgContext = htole32(req->index | sata_pass_handler_id); 822 pass->CommandFIS[0] = 0x27; 823 pass->CommandFIS[1] = 0x80; 824 pass->CommandFIS[2] = 0xef; 825 pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; 826 pass->CommandFIS[7] = 0x40; 827 pass->CommandFIS[15] = 0x08; 828 829 mpt_check_doorbell(mpt); 830 mpt_send_cmd(mpt, req); 831 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 832 10 * 1000); 833 if (error) { 834 mpt_free_request(mpt, req); 835 printf("error %d sending passthrough\n", error); 836 return; 837 } 838 839 status = le16toh(req->IOCStatus); 840 if (status != MPI_IOCSTATUS_SUCCESS) { 841 mpt_free_request(mpt, req); 842 printf("IOCSTATUS %d\n", status); 843 return; 844 } 845 846 mpt_free_request(mpt, req); 847} 848 849/* 850 * Set SAS configuration information. Nothing to do yet. 851 */ 852static int 853mpt_set_initial_config_sas(struct mpt_softc *mpt) 854{ 855 struct mptsas_phyinfo *phyinfo; 856 int i; 857 858 if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { 859 for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { 860 phyinfo = &mpt->sas_portinfo->phy_info[i]; 861 if (phyinfo->attached.dev_handle == 0) 862 continue; 863 if ((phyinfo->attached.device_info & 864 MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) 865 continue; 866 if (bootverbose) 867 device_printf(mpt->dev, 868 "%sabling SATA WC on phy %d\n", 869 (mpt_enable_sata_wc) ? "En" : "Dis", i); 870 mptsas_set_sata_wc(mpt, &phyinfo->attached, 871 mpt_enable_sata_wc); 872 } 873 } 874 875 return (0); 876} 877 878static int 879mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, 880 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 881{ 882 883 if (req != NULL) { 884 if (reply_frame != NULL) { 885 req->IOCStatus = le16toh(reply_frame->IOCStatus); 886 } 887 req->state &= ~REQ_STATE_QUEUED; 888 req->state |= REQ_STATE_DONE; 889 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 890 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 891 wakeup(req); 892 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 893 /* 894 * Whew- we can free this request (late completion) 895 */ 896 mpt_free_request(mpt, req); 897 } 898 } 899 900 return (TRUE); 901} 902 903/* 904 * Read SCSI configuration information 905 */ 906static int 907mpt_read_config_info_spi(struct mpt_softc *mpt) 908{ 909 int rv, i; 910 911 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, 912 &mpt->mpt_port_page0.Header, FALSE, 5000); 913 if (rv) { 914 return (-1); 915 } 916 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", 917 mpt->mpt_port_page0.Header.PageVersion, 918 mpt->mpt_port_page0.Header.PageLength, 919 mpt->mpt_port_page0.Header.PageNumber, 920 mpt->mpt_port_page0.Header.PageType); 921 922 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, 923 &mpt->mpt_port_page1.Header, FALSE, 5000); 924 if (rv) { 925 return (-1); 926 } 927 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 928 mpt->mpt_port_page1.Header.PageVersion, 929 mpt->mpt_port_page1.Header.PageLength, 930 mpt->mpt_port_page1.Header.PageNumber, 931 mpt->mpt_port_page1.Header.PageType); 932 933 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, 934 &mpt->mpt_port_page2.Header, FALSE, 5000); 935 if (rv) { 936 return (-1); 937 } 938 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", 939 mpt->mpt_port_page2.Header.PageVersion, 940 mpt->mpt_port_page2.Header.PageLength, 941 mpt->mpt_port_page2.Header.PageNumber, 942 mpt->mpt_port_page2.Header.PageType); 943 944 for (i = 0; i < 16; i++) { 945 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 946 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); 947 if (rv) { 948 return (-1); 949 } 950 mpt_lprt(mpt, MPT_PRT_DEBUG, 951 "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, 952 mpt->mpt_dev_page0[i].Header.PageVersion, 953 mpt->mpt_dev_page0[i].Header.PageLength, 954 mpt->mpt_dev_page0[i].Header.PageNumber, 955 mpt->mpt_dev_page0[i].Header.PageType); 956 957 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 958 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); 959 if (rv) { 960 return (-1); 961 } 962 mpt_lprt(mpt, MPT_PRT_DEBUG, 963 "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, 964 mpt->mpt_dev_page1[i].Header.PageVersion, 965 mpt->mpt_dev_page1[i].Header.PageLength, 966 mpt->mpt_dev_page1[i].Header.PageNumber, 967 mpt->mpt_dev_page1[i].Header.PageType); 968 } 969 970 /* 971 * At this point, we don't *have* to fail. As long as we have 972 * valid config header information, we can (barely) lurch 973 * along. 974 */ 975 976 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, 977 sizeof(mpt->mpt_port_page0), FALSE, 5000); 978 if (rv) { 979 mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 980 } else { 981 mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); 982 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 983 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 984 mpt->mpt_port_page0.Capabilities, 985 mpt->mpt_port_page0.PhysicalInterface); 986 } 987 988 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, 989 sizeof(mpt->mpt_port_page1), FALSE, 5000); 990 if (rv) { 991 mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 992 } else { 993 mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); 994 mpt_lprt(mpt, MPT_PRT_DEBUG, 995 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 996 mpt->mpt_port_page1.Configuration, 997 mpt->mpt_port_page1.OnBusTimerValue); 998 } 999 1000 rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, 1001 sizeof(mpt->mpt_port_page2), FALSE, 5000); 1002 if (rv) { 1003 mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1004 } else { 1005 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1006 "Port Page 2: Flags %x Settings %x\n", 1007 mpt->mpt_port_page2.PortFlags, 1008 mpt->mpt_port_page2.PortSettings); 1009 mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); 1010 for (i = 0; i < 16; i++) { 1011 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1012 " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1013 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1014 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1015 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1016 } 1017 } 1018 1019 for (i = 0; i < 16; i++) { 1020 rv = mpt_read_cur_cfg_page(mpt, i, 1021 &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), 1022 FALSE, 5000); 1023 if (rv) { 1024 mpt_prt(mpt, 1025 "cannot read SPI Target %d Device Page 0\n", i); 1026 continue; 1027 } 1028 mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); 1029 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1030 "target %d page 0: Negotiated Params %x Information %x\n", 1031 i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1032 mpt->mpt_dev_page0[i].Information); 1033 1034 rv = mpt_read_cur_cfg_page(mpt, i, 1035 &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), 1036 FALSE, 5000); 1037 if (rv) { 1038 mpt_prt(mpt, 1039 "cannot read SPI Target %d Device Page 1\n", i); 1040 continue; 1041 } 1042 mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); 1043 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1044 "target %d page 1: Requested Params %x Configuration %x\n", 1045 i, mpt->mpt_dev_page1[i].RequestedParameters, 1046 mpt->mpt_dev_page1[i].Configuration); 1047 } 1048 return (0); 1049} 1050 1051/* 1052 * Validate SPI configuration information. 1053 * 1054 * In particular, validate SPI Port Page 1. 1055 */ 1056static int 1057mpt_set_initial_config_spi(struct mpt_softc *mpt) 1058{ 1059 int error, i, pp1val; 1060 1061 mpt->mpt_disc_enable = 0xff; 1062 mpt->mpt_tag_enable = 0; 1063 1064 pp1val = ((1 << mpt->mpt_ini_id) << 1065 MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; 1066 if (mpt->mpt_port_page1.Configuration != pp1val) { 1067 CONFIG_PAGE_SCSI_PORT_1 tmp; 1068 1069 mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " 1070 "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); 1071 tmp = mpt->mpt_port_page1; 1072 tmp.Configuration = pp1val; 1073 host2mpt_config_page_scsi_port_1(&tmp); 1074 error = mpt_write_cur_cfg_page(mpt, 0, 1075 &tmp.Header, sizeof(tmp), FALSE, 5000); 1076 if (error) { 1077 return (-1); 1078 } 1079 error = mpt_read_cur_cfg_page(mpt, 0, 1080 &tmp.Header, sizeof(tmp), FALSE, 5000); 1081 if (error) { 1082 return (-1); 1083 } 1084 mpt2host_config_page_scsi_port_1(&tmp); 1085 if (tmp.Configuration != pp1val) { 1086 mpt_prt(mpt, 1087 "failed to reset SPI Port Page 1 Config value\n"); 1088 return (-1); 1089 } 1090 mpt->mpt_port_page1 = tmp; 1091 } 1092 1093 /* 1094 * The purpose of this exercise is to get 1095 * all targets back to async/narrow. 1096 * 1097 * We skip this step if the BIOS has already negotiated 1098 * speeds with the targets. 1099 */ 1100 i = mpt->mpt_port_page2.PortSettings & 1101 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 1102 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { 1103 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 1104 "honoring BIOS transfer negotiations\n"); 1105 } else { 1106 for (i = 0; i < 16; i++) { 1107 mpt->mpt_dev_page1[i].RequestedParameters = 0; 1108 mpt->mpt_dev_page1[i].Configuration = 0; 1109 (void) mpt_update_spi_config(mpt, i); 1110 } 1111 } 1112 return (0); 1113} 1114 1115static int 1116mpt_cam_enable(struct mpt_softc *mpt) 1117{ 1118 int error; 1119 1120 MPT_LOCK(mpt); 1121 1122 error = EIO; 1123 if (mpt->is_fc) { 1124 if (mpt_read_config_info_fc(mpt)) { 1125 goto out; 1126 } 1127 if (mpt_set_initial_config_fc(mpt)) { 1128 goto out; 1129 } 1130 } else if (mpt->is_sas) { 1131 if (mpt_read_config_info_sas(mpt)) { 1132 goto out; 1133 } 1134 if (mpt_set_initial_config_sas(mpt)) { 1135 goto out; 1136 } 1137 } else if (mpt->is_spi) { 1138 if (mpt_read_config_info_spi(mpt)) { 1139 goto out; 1140 } 1141 if (mpt_set_initial_config_spi(mpt)) { 1142 goto out; 1143 } 1144 } 1145 error = 0; 1146 1147out: 1148 MPT_UNLOCK(mpt); 1149 return (error); 1150} 1151 1152static void 1153mpt_cam_ready(struct mpt_softc *mpt) 1154{ 1155 1156 /* 1157 * If we're in target mode, hang out resources now 1158 * so we don't cause the world to hang talking to us. 1159 */ 1160 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 1161 /* 1162 * Try to add some target command resources 1163 */ 1164 MPT_LOCK(mpt); 1165 if (mpt_add_target_commands(mpt) == FALSE) { 1166 mpt_prt(mpt, "failed to add target commands\n"); 1167 } 1168 MPT_UNLOCK(mpt); 1169 } 1170 mpt->ready = 1; 1171} 1172 1173static void 1174mpt_cam_detach(struct mpt_softc *mpt) 1175{ 1176 mpt_handler_t handler; 1177 1178 MPT_LOCK(mpt); 1179 mpt->ready = 0; 1180 mpt_terminate_recovery_thread(mpt); 1181 1182 handler.reply_handler = mpt_scsi_reply_handler; 1183 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1184 scsi_io_handler_id); 1185 handler.reply_handler = mpt_scsi_tmf_reply_handler; 1186 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1187 scsi_tmf_handler_id); 1188 handler.reply_handler = mpt_fc_els_reply_handler; 1189 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1190 fc_els_handler_id); 1191 handler.reply_handler = mpt_scsi_tgt_reply_handler; 1192 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1193 mpt->scsi_tgt_handler_id); 1194 handler.reply_handler = mpt_sata_pass_reply_handler; 1195 mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, 1196 sata_pass_handler_id); 1197 1198 if (mpt->tmf_req != NULL) { 1199 mpt->tmf_req->state = REQ_STATE_ALLOCATED; 1200 mpt_free_request(mpt, mpt->tmf_req); 1201 mpt->tmf_req = NULL; 1202 } 1203 if (mpt->sas_portinfo != NULL) { 1204 free(mpt->sas_portinfo, M_DEVBUF); 1205 mpt->sas_portinfo = NULL; 1206 } 1207 1208 if (mpt->sim != NULL) { 1209 xpt_free_path(mpt->path); 1210 xpt_bus_deregister(cam_sim_path(mpt->sim)); 1211 cam_sim_free(mpt->sim, TRUE); 1212 mpt->sim = NULL; 1213 } 1214 1215 if (mpt->phydisk_sim != NULL) { 1216 xpt_free_path(mpt->phydisk_path); 1217 xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); 1218 cam_sim_free(mpt->phydisk_sim, TRUE); 1219 mpt->phydisk_sim = NULL; 1220 } 1221 MPT_UNLOCK(mpt); 1222} 1223 1224/* This routine is used after a system crash to dump core onto the swap device. 1225 */ 1226static void 1227mpt_poll(struct cam_sim *sim) 1228{ 1229 struct mpt_softc *mpt; 1230 1231 mpt = (struct mpt_softc *)cam_sim_softc(sim); 1232 mpt_intr(mpt); 1233} 1234 1235/* 1236 * Watchdog timeout routine for SCSI requests. 1237 */ 1238static void 1239mpt_timeout(void *arg) 1240{ 1241 union ccb *ccb; 1242 struct mpt_softc *mpt; 1243 request_t *req; 1244 1245 ccb = (union ccb *)arg; 1246 mpt = ccb->ccb_h.ccb_mpt_ptr; 1247 1248 MPT_LOCK_ASSERT(mpt); 1249 req = ccb->ccb_h.ccb_req_ptr; 1250 mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, 1251 req->serno, ccb, req->ccb); 1252/* XXX: WHAT ARE WE TRYING TO DO HERE? */ 1253 if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { 1254 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 1255 TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); 1256 req->state |= REQ_STATE_TIMEDOUT; 1257 mpt_wakeup_recovery_thread(mpt); 1258 } 1259} 1260 1261/* 1262 * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called 1263 * directly. 1264 * 1265 * Takes a list of physical segments and builds the SGL for SCSI IO command 1266 * and forwards the commard to the IOC after one last check that CAM has not 1267 * aborted the transaction. 1268 */ 1269static void 1270mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1271{ 1272 request_t *req, *trq; 1273 char *mpt_off; 1274 union ccb *ccb; 1275 struct mpt_softc *mpt; 1276 bus_addr_t chain_list_addr; 1277 int first_lim, seg, this_seg_lim; 1278 uint32_t addr, cur_off, flags, nxt_off, tf; 1279 void *sglp = NULL; 1280 MSG_REQUEST_HEADER *hdrp; 1281 SGE_SIMPLE64 *se; 1282 SGE_CHAIN64 *ce; 1283 int istgt = 0; 1284 1285 req = (request_t *)arg; 1286 ccb = req->ccb; 1287 1288 mpt = ccb->ccb_h.ccb_mpt_ptr; 1289 req = ccb->ccb_h.ccb_req_ptr; 1290 1291 hdrp = req->req_vbuf; 1292 mpt_off = req->req_vbuf; 1293 1294 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1295 error = EFBIG; 1296 } 1297 1298 if (error == 0) { 1299 switch (hdrp->Function) { 1300 case MPI_FUNCTION_SCSI_IO_REQUEST: 1301 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1302 istgt = 0; 1303 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1304 break; 1305 case MPI_FUNCTION_TARGET_ASSIST: 1306 istgt = 1; 1307 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1308 break; 1309 default: 1310 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", 1311 hdrp->Function); 1312 error = EINVAL; 1313 break; 1314 } 1315 } 1316 1317 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1318 error = EFBIG; 1319 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1320 nseg, mpt->max_seg_cnt); 1321 } 1322 1323bad: 1324 if (error != 0) { 1325 if (error != EFBIG && error != ENOMEM) { 1326 mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); 1327 } 1328 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1329 cam_status status; 1330 mpt_freeze_ccb(ccb); 1331 if (error == EFBIG) { 1332 status = CAM_REQ_TOO_BIG; 1333 } else if (error == ENOMEM) { 1334 if (mpt->outofbeer == 0) { 1335 mpt->outofbeer = 1; 1336 xpt_freeze_simq(mpt->sim, 1); 1337 mpt_lprt(mpt, MPT_PRT_DEBUG, 1338 "FREEZEQ\n"); 1339 } 1340 status = CAM_REQUEUE_REQ; 1341 } else { 1342 status = CAM_REQ_CMP_ERR; 1343 } 1344 mpt_set_ccb_status(ccb, status); 1345 } 1346 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1347 request_t *cmd_req = 1348 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1349 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1350 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1351 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1352 } 1353 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1354 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1355 xpt_done(ccb); 1356 mpt_free_request(mpt, req); 1357 return; 1358 } 1359 1360 /* 1361 * No data to transfer? 1362 * Just make a single simple SGL with zero length. 1363 */ 1364 1365 if (mpt->verbose >= MPT_PRT_DEBUG) { 1366 int tidx = ((char *)sglp) - mpt_off; 1367 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1368 } 1369 1370 if (nseg == 0) { 1371 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1372 MPI_pSGE_SET_FLAGS(se1, 1373 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1374 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1375 se1->FlagsLength = htole32(se1->FlagsLength); 1376 goto out; 1377 } 1378 1379 1380 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1381 if (istgt == 0) { 1382 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1383 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1384 } 1385 } else { 1386 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1387 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1388 } 1389 } 1390 1391 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1392 bus_dmasync_op_t op; 1393 if (istgt == 0) { 1394 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1395 op = BUS_DMASYNC_PREREAD; 1396 } else { 1397 op = BUS_DMASYNC_PREWRITE; 1398 } 1399 } else { 1400 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1401 op = BUS_DMASYNC_PREWRITE; 1402 } else { 1403 op = BUS_DMASYNC_PREREAD; 1404 } 1405 } 1406 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1407 } 1408 1409 /* 1410 * Okay, fill in what we can at the end of the command frame. 1411 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1412 * the command frame. 1413 * 1414 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1415 * SIMPLE64 pointers and start doing CHAIN64 entries after 1416 * that. 1417 */ 1418 1419 if (nseg < MPT_NSGL_FIRST(mpt)) { 1420 first_lim = nseg; 1421 } else { 1422 /* 1423 * Leave room for CHAIN element 1424 */ 1425 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1426 } 1427 1428 se = (SGE_SIMPLE64 *) sglp; 1429 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1430 tf = flags; 1431 memset(se, 0, sizeof (*se)); 1432 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1433 se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); 1434 if (sizeof(bus_addr_t) > 4) { 1435 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1436 /* SAS1078 36GB limitation WAR */ 1437 if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + 1438 MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { 1439 addr |= (1U << 31); 1440 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1441 } 1442 se->Address.High = htole32(addr); 1443 } 1444 if (seg == first_lim - 1) { 1445 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1446 } 1447 if (seg == nseg - 1) { 1448 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1449 MPI_SGE_FLAGS_END_OF_BUFFER; 1450 } 1451 MPI_pSGE_SET_FLAGS(se, tf); 1452 se->FlagsLength = htole32(se->FlagsLength); 1453 } 1454 1455 if (seg == nseg) { 1456 goto out; 1457 } 1458 1459 /* 1460 * Tell the IOC where to find the first chain element. 1461 */ 1462 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1463 nxt_off = MPT_RQSL(mpt); 1464 trq = req; 1465 1466 /* 1467 * Make up the rest of the data segments out of a chain element 1468 * (contained in the current request frame) which points to 1469 * SIMPLE64 elements in the next request frame, possibly ending 1470 * with *another* chain element (if there's more). 1471 */ 1472 while (seg < nseg) { 1473 /* 1474 * Point to the chain descriptor. Note that the chain 1475 * descriptor is at the end of the *previous* list (whether 1476 * chain or simple). 1477 */ 1478 ce = (SGE_CHAIN64 *) se; 1479 1480 /* 1481 * Before we change our current pointer, make sure we won't 1482 * overflow the request area with this frame. Note that we 1483 * test against 'greater than' here as it's okay in this case 1484 * to have next offset be just outside the request area. 1485 */ 1486 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1487 nxt_off = MPT_REQUEST_AREA; 1488 goto next_chain; 1489 } 1490 1491 /* 1492 * Set our SGE element pointer to the beginning of the chain 1493 * list and update our next chain list offset. 1494 */ 1495 se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; 1496 cur_off = nxt_off; 1497 nxt_off += MPT_RQSL(mpt); 1498 1499 /* 1500 * Now initialize the chain descriptor. 1501 */ 1502 memset(ce, 0, sizeof (*ce)); 1503 1504 /* 1505 * Get the physical address of the chain list. 1506 */ 1507 chain_list_addr = trq->req_pbuf; 1508 chain_list_addr += cur_off; 1509 if (sizeof (bus_addr_t) > 4) { 1510 ce->Address.High = 1511 htole32(((uint64_t)chain_list_addr) >> 32); 1512 } 1513 ce->Address.Low = htole32(chain_list_addr & 0xffffffff); 1514 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | 1515 MPI_SGE_FLAGS_64_BIT_ADDRESSING; 1516 1517 /* 1518 * If we have more than a frame's worth of segments left, 1519 * set up the chain list to have the last element be another 1520 * chain descriptor. 1521 */ 1522 if ((nseg - seg) > MPT_NSGL(mpt)) { 1523 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1524 /* 1525 * The length of the chain is the length in bytes of the 1526 * number of segments plus the next chain element. 1527 * 1528 * The next chain descriptor offset is the length, 1529 * in words, of the number of segments. 1530 */ 1531 ce->Length = (this_seg_lim - seg) * 1532 sizeof (SGE_SIMPLE64); 1533 ce->NextChainOffset = ce->Length >> 2; 1534 ce->Length += sizeof (SGE_CHAIN64); 1535 } else { 1536 this_seg_lim = nseg; 1537 ce->Length = (this_seg_lim - seg) * 1538 sizeof (SGE_SIMPLE64); 1539 } 1540 ce->Length = htole16(ce->Length); 1541 1542 /* 1543 * Fill in the chain list SGE elements with our segment data. 1544 * 1545 * If we're the last element in this chain list, set the last 1546 * element flag. If we're the completely last element period, 1547 * set the end of list and end of buffer flags. 1548 */ 1549 while (seg < this_seg_lim) { 1550 tf = flags; 1551 memset(se, 0, sizeof (*se)); 1552 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1553 se->Address.Low = htole32(dm_segs->ds_addr & 1554 0xffffffff); 1555 if (sizeof (bus_addr_t) > 4) { 1556 addr = ((uint64_t)dm_segs->ds_addr) >> 32; 1557 /* SAS1078 36GB limitation WAR */ 1558 if (mpt->is_1078 && 1559 (((uint64_t)dm_segs->ds_addr + 1560 MPI_SGE_LENGTH(se->FlagsLength)) >> 1561 32) == 9) { 1562 addr |= (1U << 31); 1563 tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; 1564 } 1565 se->Address.High = htole32(addr); 1566 } 1567 if (seg == this_seg_lim - 1) { 1568 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1569 } 1570 if (seg == nseg - 1) { 1571 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1572 MPI_SGE_FLAGS_END_OF_BUFFER; 1573 } 1574 MPI_pSGE_SET_FLAGS(se, tf); 1575 se->FlagsLength = htole32(se->FlagsLength); 1576 se++; 1577 seg++; 1578 dm_segs++; 1579 } 1580 1581 next_chain: 1582 /* 1583 * If we have more segments to do and we've used up all of 1584 * the space in a request area, go allocate another one 1585 * and chain to that. 1586 */ 1587 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1588 request_t *nrq; 1589 1590 nrq = mpt_get_request(mpt, FALSE); 1591 1592 if (nrq == NULL) { 1593 error = ENOMEM; 1594 goto bad; 1595 } 1596 1597 /* 1598 * Append the new request area on the tail of our list. 1599 */ 1600 if ((trq = req->chain) == NULL) { 1601 req->chain = nrq; 1602 } else { 1603 while (trq->chain != NULL) { 1604 trq = trq->chain; 1605 } 1606 trq->chain = nrq; 1607 } 1608 trq = nrq; 1609 mpt_off = trq->req_vbuf; 1610 if (mpt->verbose >= MPT_PRT_DEBUG) { 1611 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1612 } 1613 nxt_off = 0; 1614 } 1615 } 1616out: 1617 1618 /* 1619 * Last time we need to check if this CCB needs to be aborted. 1620 */ 1621 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 1622 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1623 request_t *cmd_req = 1624 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1625 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1626 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1627 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1628 } 1629 mpt_prt(mpt, 1630 "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", 1631 ccb->ccb_h.status & CAM_STATUS_MASK); 1632 if (nseg) { 1633 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 1634 } 1635 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1636 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1637 xpt_done(ccb); 1638 mpt_free_request(mpt, req); 1639 return; 1640 } 1641 1642 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1643 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1644 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout, 1645 mpt_timeout, ccb); 1646 } 1647 if (mpt->verbose > MPT_PRT_DEBUG) { 1648 int nc = 0; 1649 mpt_print_request(req->req_vbuf); 1650 for (trq = req->chain; trq; trq = trq->chain) { 1651 printf(" Additional Chain Area %d\n", nc++); 1652 mpt_dump_sgl(trq->req_vbuf, 0); 1653 } 1654 } 1655 1656 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1657 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1658 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 1659#ifdef WE_TRUST_AUTO_GOOD_STATUS 1660 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 1661 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 1662 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 1663 } else { 1664 tgt->state = TGT_STATE_MOVING_DATA; 1665 } 1666#else 1667 tgt->state = TGT_STATE_MOVING_DATA; 1668#endif 1669 } 1670 mpt_send_cmd(mpt, req); 1671} 1672 1673static void 1674mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1675{ 1676 request_t *req, *trq; 1677 char *mpt_off; 1678 union ccb *ccb; 1679 struct mpt_softc *mpt; 1680 int seg, first_lim; 1681 uint32_t flags, nxt_off; 1682 void *sglp = NULL; 1683 MSG_REQUEST_HEADER *hdrp; 1684 SGE_SIMPLE32 *se; 1685 SGE_CHAIN32 *ce; 1686 int istgt = 0; 1687 1688 req = (request_t *)arg; 1689 ccb = req->ccb; 1690 1691 mpt = ccb->ccb_h.ccb_mpt_ptr; 1692 req = ccb->ccb_h.ccb_req_ptr; 1693 1694 hdrp = req->req_vbuf; 1695 mpt_off = req->req_vbuf; 1696 1697 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1698 error = EFBIG; 1699 } 1700 1701 if (error == 0) { 1702 switch (hdrp->Function) { 1703 case MPI_FUNCTION_SCSI_IO_REQUEST: 1704 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 1705 sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; 1706 break; 1707 case MPI_FUNCTION_TARGET_ASSIST: 1708 istgt = 1; 1709 sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; 1710 break; 1711 default: 1712 mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", 1713 hdrp->Function); 1714 error = EINVAL; 1715 break; 1716 } 1717 } 1718 1719 if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { 1720 error = EFBIG; 1721 mpt_prt(mpt, "segment count %d too large (max %u)\n", 1722 nseg, mpt->max_seg_cnt); 1723 } 1724 1725bad: 1726 if (error != 0) { 1727 if (error != EFBIG && error != ENOMEM) { 1728 mpt_prt(mpt, "mpt_execute_req: err %d\n", error); 1729 } 1730 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { 1731 cam_status status; 1732 mpt_freeze_ccb(ccb); 1733 if (error == EFBIG) { 1734 status = CAM_REQ_TOO_BIG; 1735 } else if (error == ENOMEM) { 1736 if (mpt->outofbeer == 0) { 1737 mpt->outofbeer = 1; 1738 xpt_freeze_simq(mpt->sim, 1); 1739 mpt_lprt(mpt, MPT_PRT_DEBUG, 1740 "FREEZEQ\n"); 1741 } 1742 status = CAM_REQUEUE_REQ; 1743 } else { 1744 status = CAM_REQ_CMP_ERR; 1745 } 1746 mpt_set_ccb_status(ccb, status); 1747 } 1748 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 1749 request_t *cmd_req = 1750 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 1751 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 1752 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 1753 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 1754 } 1755 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1756 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 1757 xpt_done(ccb); 1758 mpt_free_request(mpt, req); 1759 return; 1760 } 1761 1762 /* 1763 * No data to transfer? 1764 * Just make a single simple SGL with zero length. 1765 */ 1766 1767 if (mpt->verbose >= MPT_PRT_DEBUG) { 1768 int tidx = ((char *)sglp) - mpt_off; 1769 memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); 1770 } 1771 1772 if (nseg == 0) { 1773 SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; 1774 MPI_pSGE_SET_FLAGS(se1, 1775 (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1776 MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); 1777 se1->FlagsLength = htole32(se1->FlagsLength); 1778 goto out; 1779 } 1780 1781 1782 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 1783 if (istgt == 0) { 1784 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1785 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1786 } 1787 } else { 1788 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1789 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 1790 } 1791 } 1792 1793 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1794 bus_dmasync_op_t op; 1795 if (istgt) { 1796 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1797 op = BUS_DMASYNC_PREREAD; 1798 } else { 1799 op = BUS_DMASYNC_PREWRITE; 1800 } 1801 } else { 1802 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1803 op = BUS_DMASYNC_PREWRITE; 1804 } else { 1805 op = BUS_DMASYNC_PREREAD; 1806 } 1807 } 1808 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 1809 } 1810 1811 /* 1812 * Okay, fill in what we can at the end of the command frame. 1813 * If we have up to MPT_NSGL_FIRST, we can fit them all into 1814 * the command frame. 1815 * 1816 * Otherwise, we fill up through MPT_NSGL_FIRST less one 1817 * SIMPLE32 pointers and start doing CHAIN32 entries after 1818 * that. 1819 */ 1820 1821 if (nseg < MPT_NSGL_FIRST(mpt)) { 1822 first_lim = nseg; 1823 } else { 1824 /* 1825 * Leave room for CHAIN element 1826 */ 1827 first_lim = MPT_NSGL_FIRST(mpt) - 1; 1828 } 1829 1830 se = (SGE_SIMPLE32 *) sglp; 1831 for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { 1832 uint32_t tf; 1833 1834 memset(se, 0,sizeof (*se)); 1835 se->Address = htole32(dm_segs->ds_addr); 1836 1837 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1838 tf = flags; 1839 if (seg == first_lim - 1) { 1840 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1841 } 1842 if (seg == nseg - 1) { 1843 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1844 MPI_SGE_FLAGS_END_OF_BUFFER; 1845 } 1846 MPI_pSGE_SET_FLAGS(se, tf); 1847 se->FlagsLength = htole32(se->FlagsLength); 1848 } 1849 1850 if (seg == nseg) { 1851 goto out; 1852 } 1853 1854 /* 1855 * Tell the IOC where to find the first chain element. 1856 */ 1857 hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; 1858 nxt_off = MPT_RQSL(mpt); 1859 trq = req; 1860 1861 /* 1862 * Make up the rest of the data segments out of a chain element 1863 * (contained in the current request frame) which points to 1864 * SIMPLE32 elements in the next request frame, possibly ending 1865 * with *another* chain element (if there's more). 1866 */ 1867 while (seg < nseg) { 1868 int this_seg_lim; 1869 uint32_t tf, cur_off; 1870 bus_addr_t chain_list_addr; 1871 1872 /* 1873 * Point to the chain descriptor. Note that the chain 1874 * descriptor is at the end of the *previous* list (whether 1875 * chain or simple). 1876 */ 1877 ce = (SGE_CHAIN32 *) se; 1878 1879 /* 1880 * Before we change our current pointer, make sure we won't 1881 * overflow the request area with this frame. Note that we 1882 * test against 'greater than' here as it's okay in this case 1883 * to have next offset be just outside the request area. 1884 */ 1885 if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { 1886 nxt_off = MPT_REQUEST_AREA; 1887 goto next_chain; 1888 } 1889 1890 /* 1891 * Set our SGE element pointer to the beginning of the chain 1892 * list and update our next chain list offset. 1893 */ 1894 se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; 1895 cur_off = nxt_off; 1896 nxt_off += MPT_RQSL(mpt); 1897 1898 /* 1899 * Now initialize the chain descriptor. 1900 */ 1901 memset(ce, 0, sizeof (*ce)); 1902 1903 /* 1904 * Get the physical address of the chain list. 1905 */ 1906 chain_list_addr = trq->req_pbuf; 1907 chain_list_addr += cur_off; 1908 1909 1910 1911 ce->Address = htole32(chain_list_addr); 1912 ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; 1913 1914 1915 /* 1916 * If we have more than a frame's worth of segments left, 1917 * set up the chain list to have the last element be another 1918 * chain descriptor. 1919 */ 1920 if ((nseg - seg) > MPT_NSGL(mpt)) { 1921 this_seg_lim = seg + MPT_NSGL(mpt) - 1; 1922 /* 1923 * The length of the chain is the length in bytes of the 1924 * number of segments plus the next chain element. 1925 * 1926 * The next chain descriptor offset is the length, 1927 * in words, of the number of segments. 1928 */ 1929 ce->Length = (this_seg_lim - seg) * 1930 sizeof (SGE_SIMPLE32); 1931 ce->NextChainOffset = ce->Length >> 2; 1932 ce->Length += sizeof (SGE_CHAIN32); 1933 } else { 1934 this_seg_lim = nseg; 1935 ce->Length = (this_seg_lim - seg) * 1936 sizeof (SGE_SIMPLE32); 1937 } 1938 ce->Length = htole16(ce->Length); 1939 1940 /* 1941 * Fill in the chain list SGE elements with our segment data. 1942 * 1943 * If we're the last element in this chain list, set the last 1944 * element flag. If we're the completely last element period, 1945 * set the end of list and end of buffer flags. 1946 */ 1947 while (seg < this_seg_lim) { 1948 memset(se, 0, sizeof (*se)); 1949 se->Address = htole32(dm_segs->ds_addr); 1950 1951 MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); 1952 tf = flags; 1953 if (seg == this_seg_lim - 1) { 1954 tf |= MPI_SGE_FLAGS_LAST_ELEMENT; 1955 } 1956 if (seg == nseg - 1) { 1957 tf |= MPI_SGE_FLAGS_END_OF_LIST | 1958 MPI_SGE_FLAGS_END_OF_BUFFER; 1959 } 1960 MPI_pSGE_SET_FLAGS(se, tf); 1961 se->FlagsLength = htole32(se->FlagsLength); 1962 se++; 1963 seg++; 1964 dm_segs++; 1965 } 1966 1967 next_chain: 1968 /* 1969 * If we have more segments to do and we've used up all of 1970 * the space in a request area, go allocate another one 1971 * and chain to that. 1972 */ 1973 if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { 1974 request_t *nrq; 1975 1976 nrq = mpt_get_request(mpt, FALSE); 1977 1978 if (nrq == NULL) { 1979 error = ENOMEM; 1980 goto bad; 1981 } 1982 1983 /* 1984 * Append the new request area on the tail of our list. 1985 */ 1986 if ((trq = req->chain) == NULL) { 1987 req->chain = nrq; 1988 } else { 1989 while (trq->chain != NULL) { 1990 trq = trq->chain; 1991 } 1992 trq->chain = nrq; 1993 } 1994 trq = nrq; 1995 mpt_off = trq->req_vbuf; 1996 if (mpt->verbose >= MPT_PRT_DEBUG) { 1997 memset(mpt_off, 0xff, MPT_REQUEST_AREA); 1998 } 1999 nxt_off = 0; 2000 } 2001 } 2002out: 2003 2004 /* 2005 * Last time we need to check if this CCB needs to be aborted. 2006 */ 2007 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { 2008 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2009 request_t *cmd_req = 2010 MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2011 MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; 2012 MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; 2013 MPT_TGT_STATE(mpt, cmd_req)->req = NULL; 2014 } 2015 mpt_prt(mpt, 2016 "mpt_execute_req: I/O cancelled (status 0x%x)\n", 2017 ccb->ccb_h.status & CAM_STATUS_MASK); 2018 if (nseg) { 2019 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2020 } 2021 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2022 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2023 xpt_done(ccb); 2024 mpt_free_request(mpt, req); 2025 return; 2026 } 2027 2028 ccb->ccb_h.status |= CAM_SIM_QUEUED; 2029 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 2030 mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout, 2031 mpt_timeout, ccb); 2032 } 2033 if (mpt->verbose > MPT_PRT_DEBUG) { 2034 int nc = 0; 2035 mpt_print_request(req->req_vbuf); 2036 for (trq = req->chain; trq; trq = trq->chain) { 2037 printf(" Additional Chain Area %d\n", nc++); 2038 mpt_dump_sgl(trq->req_vbuf, 0); 2039 } 2040 } 2041 2042 if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { 2043 request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); 2044 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 2045#ifdef WE_TRUST_AUTO_GOOD_STATUS 2046 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 2047 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 2048 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 2049 } else { 2050 tgt->state = TGT_STATE_MOVING_DATA; 2051 } 2052#else 2053 tgt->state = TGT_STATE_MOVING_DATA; 2054#endif 2055 } 2056 mpt_send_cmd(mpt, req); 2057} 2058 2059static void 2060mpt_start(struct cam_sim *sim, union ccb *ccb) 2061{ 2062 request_t *req; 2063 struct mpt_softc *mpt; 2064 MSG_SCSI_IO_REQUEST *mpt_req; 2065 struct ccb_scsiio *csio = &ccb->csio; 2066 struct ccb_hdr *ccbh = &ccb->ccb_h; 2067 bus_dmamap_callback_t *cb; 2068 target_id_t tgt; 2069 int raid_passthru; 2070 int error; 2071 2072 /* Get the pointer for the physical addapter */ 2073 mpt = ccb->ccb_h.ccb_mpt_ptr; 2074 raid_passthru = (sim == mpt->phydisk_sim); 2075 2076 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 2077 if (mpt->outofbeer == 0) { 2078 mpt->outofbeer = 1; 2079 xpt_freeze_simq(mpt->sim, 1); 2080 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 2081 } 2082 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2083 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 2084 xpt_done(ccb); 2085 return; 2086 } 2087#ifdef INVARIANTS 2088 mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); 2089#endif 2090 2091 if (sizeof (bus_addr_t) > 4) { 2092 cb = mpt_execute_req_a64; 2093 } else { 2094 cb = mpt_execute_req; 2095 } 2096 2097 /* 2098 * Link the ccb and the request structure so we can find 2099 * the other knowing either the request or the ccb 2100 */ 2101 req->ccb = ccb; 2102 ccb->ccb_h.ccb_req_ptr = req; 2103 2104 /* Now we build the command for the IOC */ 2105 mpt_req = req->req_vbuf; 2106 memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); 2107 2108 mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; 2109 if (raid_passthru) { 2110 mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; 2111 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 2112 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2113 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 2114 xpt_done(ccb); 2115 return; 2116 } 2117 mpt_req->Bus = 0; /* we never set bus here */ 2118 } else { 2119 tgt = ccb->ccb_h.target_id; 2120 mpt_req->Bus = 0; /* XXX */ 2121 2122 } 2123 mpt_req->SenseBufferLength = 2124 (csio->sense_len < MPT_SENSE_SIZE) ? 2125 csio->sense_len : MPT_SENSE_SIZE; 2126 2127 /* 2128 * We use the message context to find the request structure when we 2129 * Get the command completion interrupt from the IOC. 2130 */ 2131 mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); 2132 2133 /* Which physical device to do the I/O on */ 2134 mpt_req->TargetID = tgt; 2135 2136 /* We assume a single level LUN type */ 2137 if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) { 2138 mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f); 2139 mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff; 2140 } else { 2141 mpt_req->LUN[1] = ccb->ccb_h.target_lun; 2142 } 2143 2144 /* Set the direction of the transfer */ 2145 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2146 mpt_req->Control = MPI_SCSIIO_CONTROL_READ; 2147 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 2148 mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; 2149 } else { 2150 mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; 2151 } 2152 2153 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 2154 switch(ccb->csio.tag_action) { 2155 case MSG_HEAD_OF_Q_TAG: 2156 mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; 2157 break; 2158 case MSG_ACA_TASK: 2159 mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; 2160 break; 2161 case MSG_ORDERED_Q_TAG: 2162 mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; 2163 break; 2164 case MSG_SIMPLE_Q_TAG: 2165 default: 2166 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2167 break; 2168 } 2169 } else { 2170 if (mpt->is_fc || mpt->is_sas) { 2171 mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; 2172 } else { 2173 /* XXX No such thing for a target doing packetized. */ 2174 mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; 2175 } 2176 } 2177 2178 if (mpt->is_spi) { 2179 if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { 2180 mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; 2181 } 2182 } 2183 mpt_req->Control = htole32(mpt_req->Control); 2184 2185 /* Copy the scsi command block into place */ 2186 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 2187 bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); 2188 } else { 2189 bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); 2190 } 2191 2192 mpt_req->CDBLength = csio->cdb_len; 2193 mpt_req->DataLength = htole32(csio->dxfer_len); 2194 mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); 2195 2196 /* 2197 * Do a *short* print here if we're set to MPT_PRT_DEBUG 2198 */ 2199 if (mpt->verbose == MPT_PRT_DEBUG) { 2200 U32 df; 2201 mpt_prt(mpt, "mpt_start: %s op 0x%x ", 2202 (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? 2203 "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); 2204 df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; 2205 if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { 2206 mpt_prtc(mpt, "(%s %u byte%s ", 2207 (df == MPI_SCSIIO_CONTROL_READ)? 2208 "read" : "write", csio->dxfer_len, 2209 (csio->dxfer_len == 1)? ")" : "s)"); 2210 } 2211 mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt, 2212 (uintmax_t)ccb->ccb_h.target_lun, req, req->serno); 2213 } 2214 2215 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, 2216 req, 0); 2217 if (error == EINPROGRESS) { 2218 /* 2219 * So as to maintain ordering, freeze the controller queue 2220 * until our mapping is returned. 2221 */ 2222 xpt_freeze_simq(mpt->sim, 1); 2223 ccbh->status |= CAM_RELEASE_SIMQ; 2224 } 2225} 2226 2227static int 2228mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, 2229 int sleep_ok) 2230{ 2231 int error; 2232 uint16_t status; 2233 uint8_t response; 2234 2235 error = mpt_scsi_send_tmf(mpt, 2236 (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? 2237 MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : 2238 MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, 2239 mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 2240 0, /* XXX How do I get the channel ID? */ 2241 tgt != CAM_TARGET_WILDCARD ? tgt : 0, 2242 lun != CAM_LUN_WILDCARD ? lun : 0, 2243 0, sleep_ok); 2244 2245 if (error != 0) { 2246 /* 2247 * mpt_scsi_send_tmf hard resets on failure, so no 2248 * need to do so here. 2249 */ 2250 mpt_prt(mpt, 2251 "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); 2252 return (EIO); 2253 } 2254 2255 /* Wait for bus reset to be processed by the IOC. */ 2256 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 2257 REQ_STATE_DONE, sleep_ok, 5000); 2258 2259 status = le16toh(mpt->tmf_req->IOCStatus); 2260 response = mpt->tmf_req->ResponseCode; 2261 mpt->tmf_req->state = REQ_STATE_FREE; 2262 2263 if (error) { 2264 mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " 2265 "Resetting controller.\n"); 2266 mpt_reset(mpt, TRUE); 2267 return (ETIMEDOUT); 2268 } 2269 2270 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 2271 mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " 2272 "Resetting controller.\n", status); 2273 mpt_reset(mpt, TRUE); 2274 return (EIO); 2275 } 2276 2277 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 2278 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 2279 mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " 2280 "Resetting controller.\n", response); 2281 mpt_reset(mpt, TRUE); 2282 return (EIO); 2283 } 2284 return (0); 2285} 2286 2287static int 2288mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) 2289{ 2290 int r = 0; 2291 request_t *req; 2292 PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; 2293 2294 req = mpt_get_request(mpt, FALSE); 2295 if (req == NULL) { 2296 return (ENOMEM); 2297 } 2298 fc = req->req_vbuf; 2299 memset(fc, 0, sizeof(*fc)); 2300 fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; 2301 fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; 2302 fc->MsgContext = htole32(req->index | fc_els_handler_id); 2303 mpt_send_cmd(mpt, req); 2304 if (dowait) { 2305 r = mpt_wait_req(mpt, req, REQ_STATE_DONE, 2306 REQ_STATE_DONE, FALSE, 60 * 1000); 2307 if (r == 0) { 2308 mpt_free_request(mpt, req); 2309 } 2310 } 2311 return (r); 2312} 2313 2314static int 2315mpt_cam_event(struct mpt_softc *mpt, request_t *req, 2316 MSG_EVENT_NOTIFY_REPLY *msg) 2317{ 2318 uint32_t data0, data1; 2319 2320 data0 = le32toh(msg->Data[0]); 2321 data1 = le32toh(msg->Data[1]); 2322 switch(msg->Event & 0xFF) { 2323 case MPI_EVENT_UNIT_ATTENTION: 2324 mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", 2325 (data0 >> 8) & 0xff, data0 & 0xff); 2326 break; 2327 2328 case MPI_EVENT_IOC_BUS_RESET: 2329 /* We generated a bus reset */ 2330 mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", 2331 (data0 >> 8) & 0xff); 2332 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2333 break; 2334 2335 case MPI_EVENT_EXT_BUS_RESET: 2336 /* Someone else generated a bus reset */ 2337 mpt_prt(mpt, "External Bus Reset Detected\n"); 2338 /* 2339 * These replies don't return EventData like the MPI 2340 * spec says they do 2341 */ 2342 xpt_async(AC_BUS_RESET, mpt->path, NULL); 2343 break; 2344 2345 case MPI_EVENT_RESCAN: 2346 { 2347 union ccb *ccb; 2348 uint32_t pathid; 2349 /* 2350 * In general this means a device has been added to the loop. 2351 */ 2352 mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); 2353 if (mpt->ready == 0) { 2354 break; 2355 } 2356 if (mpt->phydisk_sim) { 2357 pathid = cam_sim_path(mpt->phydisk_sim); 2358 } else { 2359 pathid = cam_sim_path(mpt->sim); 2360 } 2361 /* 2362 * Allocate a CCB, create a wildcard path for this bus, 2363 * and schedule a rescan. 2364 */ 2365 ccb = xpt_alloc_ccb_nowait(); 2366 if (ccb == NULL) { 2367 mpt_prt(mpt, "unable to alloc CCB for rescan\n"); 2368 break; 2369 } 2370 2371 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, 2372 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2373 mpt_prt(mpt, "unable to create path for rescan\n"); 2374 xpt_free_ccb(ccb); 2375 break; 2376 } 2377 xpt_rescan(ccb); 2378 break; 2379 } 2380 2381 case MPI_EVENT_LINK_STATUS_CHANGE: 2382 mpt_prt(mpt, "Port %d: LinkState: %s\n", 2383 (data1 >> 8) & 0xff, 2384 ((data0 & 0xff) == 0)? "Failed" : "Active"); 2385 break; 2386 2387 case MPI_EVENT_LOOP_STATE_CHANGE: 2388 switch ((data0 >> 16) & 0xff) { 2389 case 0x01: 2390 mpt_prt(mpt, 2391 "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " 2392 "(Loop Initialization)\n", 2393 (data1 >> 8) & 0xff, 2394 (data0 >> 8) & 0xff, 2395 (data0 ) & 0xff); 2396 switch ((data0 >> 8) & 0xff) { 2397 case 0xF7: 2398 if ((data0 & 0xff) == 0xF7) { 2399 mpt_prt(mpt, "Device needs AL_PA\n"); 2400 } else { 2401 mpt_prt(mpt, "Device %02x doesn't like " 2402 "FC performance\n", 2403 data0 & 0xFF); 2404 } 2405 break; 2406 case 0xF8: 2407 if ((data0 & 0xff) == 0xF7) { 2408 mpt_prt(mpt, "Device had loop failure " 2409 "at its receiver prior to acquiring" 2410 " AL_PA\n"); 2411 } else { 2412 mpt_prt(mpt, "Device %02x detected loop" 2413 " failure at its receiver\n", 2414 data0 & 0xFF); 2415 } 2416 break; 2417 default: 2418 mpt_prt(mpt, "Device %02x requests that device " 2419 "%02x reset itself\n", 2420 data0 & 0xFF, 2421 (data0 >> 8) & 0xFF); 2422 break; 2423 } 2424 break; 2425 case 0x02: 2426 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2427 "LPE(%02x,%02x) (Loop Port Enable)\n", 2428 (data1 >> 8) & 0xff, /* Port */ 2429 (data0 >> 8) & 0xff, /* Character 3 */ 2430 (data0 ) & 0xff /* Character 4 */); 2431 break; 2432 case 0x03: 2433 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " 2434 "LPB(%02x,%02x) (Loop Port Bypass)\n", 2435 (data1 >> 8) & 0xff, /* Port */ 2436 (data0 >> 8) & 0xff, /* Character 3 */ 2437 (data0 ) & 0xff /* Character 4 */); 2438 break; 2439 default: 2440 mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " 2441 "FC event (%02x %02x %02x)\n", 2442 (data1 >> 8) & 0xff, /* Port */ 2443 (data0 >> 16) & 0xff, /* Event */ 2444 (data0 >> 8) & 0xff, /* Character 3 */ 2445 (data0 ) & 0xff /* Character 4 */); 2446 } 2447 break; 2448 2449 case MPI_EVENT_LOGOUT: 2450 mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", 2451 (data1 >> 8) & 0xff, data0); 2452 break; 2453 case MPI_EVENT_QUEUE_FULL: 2454 { 2455 struct cam_sim *sim; 2456 struct cam_path *tmppath; 2457 struct ccb_relsim crs; 2458 PTR_EVENT_DATA_QUEUE_FULL pqf; 2459 lun_id_t lun_id; 2460 2461 pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; 2462 pqf->CurrentDepth = le16toh(pqf->CurrentDepth); 2463 if (bootverbose) { 2464 mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x " 2465 "Depth %d\n", 2466 pqf->Bus, pqf->TargetID, pqf->CurrentDepth); 2467 } 2468 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2469 pqf->TargetID) != 0) { 2470 sim = mpt->phydisk_sim; 2471 } else { 2472 sim = mpt->sim; 2473 } 2474 for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { 2475 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2476 pqf->TargetID, lun_id) != CAM_REQ_CMP) { 2477 mpt_prt(mpt, "unable to create a path to send " 2478 "XPT_REL_SIMQ"); 2479 break; 2480 } 2481 xpt_setup_ccb(&crs.ccb_h, tmppath, 5); 2482 crs.ccb_h.func_code = XPT_REL_SIMQ; 2483 crs.ccb_h.flags = CAM_DEV_QFREEZE; 2484 crs.release_flags = RELSIM_ADJUST_OPENINGS; 2485 crs.openings = pqf->CurrentDepth - 1; 2486 xpt_action((union ccb *)&crs); 2487 if (crs.ccb_h.status != CAM_REQ_CMP) { 2488 mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); 2489 } 2490 xpt_free_path(tmppath); 2491 } 2492 break; 2493 } 2494 case MPI_EVENT_IR_RESYNC_UPDATE: 2495 mpt_prt(mpt, "IR resync update %d completed\n", 2496 (data0 >> 16) & 0xff); 2497 break; 2498 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 2499 { 2500 union ccb *ccb; 2501 struct cam_sim *sim; 2502 struct cam_path *tmppath; 2503 PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; 2504 2505 psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; 2506 if (mpt->phydisk_sim && mpt_is_raid_member(mpt, 2507 psdsc->TargetID) != 0) 2508 sim = mpt->phydisk_sim; 2509 else 2510 sim = mpt->sim; 2511 switch(psdsc->ReasonCode) { 2512 case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: 2513 ccb = xpt_alloc_ccb_nowait(); 2514 if (ccb == NULL) { 2515 mpt_prt(mpt, 2516 "unable to alloc CCB for rescan\n"); 2517 break; 2518 } 2519 if (xpt_create_path(&ccb->ccb_h.path, NULL, 2520 cam_sim_path(sim), psdsc->TargetID, 2521 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 2522 mpt_prt(mpt, 2523 "unable to create path for rescan\n"); 2524 xpt_free_ccb(ccb); 2525 break; 2526 } 2527 xpt_rescan(ccb); 2528 break; 2529 case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: 2530 if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), 2531 psdsc->TargetID, CAM_LUN_WILDCARD) != 2532 CAM_REQ_CMP) { 2533 mpt_prt(mpt, 2534 "unable to create path for async event"); 2535 break; 2536 } 2537 xpt_async(AC_LOST_DEVICE, tmppath, NULL); 2538 xpt_free_path(tmppath); 2539 break; 2540 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: 2541 case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: 2542 case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: 2543 break; 2544 default: 2545 mpt_lprt(mpt, MPT_PRT_WARN, 2546 "SAS device status change: Bus: 0x%02x TargetID: " 2547 "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, 2548 psdsc->TargetID, psdsc->ReasonCode); 2549 break; 2550 } 2551 break; 2552 } 2553 case MPI_EVENT_SAS_DISCOVERY_ERROR: 2554 { 2555 PTR_EVENT_DATA_DISCOVERY_ERROR pde; 2556 2557 pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; 2558 pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); 2559 mpt_lprt(mpt, MPT_PRT_WARN, 2560 "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", 2561 pde->Port, pde->DiscoveryStatus); 2562 break; 2563 } 2564 case MPI_EVENT_EVENT_CHANGE: 2565 case MPI_EVENT_INTEGRATED_RAID: 2566 case MPI_EVENT_IR2: 2567 case MPI_EVENT_LOG_ENTRY_ADDED: 2568 case MPI_EVENT_SAS_DISCOVERY: 2569 case MPI_EVENT_SAS_PHY_LINK_STATUS: 2570 case MPI_EVENT_SAS_SES: 2571 break; 2572 default: 2573 mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", 2574 msg->Event & 0xFF); 2575 return (0); 2576 } 2577 return (1); 2578} 2579 2580/* 2581 * Reply path for all SCSI I/O requests, called from our 2582 * interrupt handler by extracting our handler index from 2583 * the MsgContext field of the reply from the IOC. 2584 * 2585 * This routine is optimized for the common case of a 2586 * completion without error. All exception handling is 2587 * offloaded to non-inlined helper routines to minimize 2588 * cache footprint. 2589 */ 2590static int 2591mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, 2592 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2593{ 2594 MSG_SCSI_IO_REQUEST *scsi_req; 2595 union ccb *ccb; 2596 2597 if (req->state == REQ_STATE_FREE) { 2598 mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); 2599 return (TRUE); 2600 } 2601 2602 scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; 2603 ccb = req->ccb; 2604 if (ccb == NULL) { 2605 mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", 2606 req, req->serno); 2607 return (TRUE); 2608 } 2609 2610 mpt_req_untimeout(req, mpt_timeout, ccb); 2611 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 2612 2613 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 2614 bus_dmasync_op_t op; 2615 2616 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 2617 op = BUS_DMASYNC_POSTREAD; 2618 else 2619 op = BUS_DMASYNC_POSTWRITE; 2620 bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); 2621 bus_dmamap_unload(mpt->buffer_dmat, req->dmap); 2622 } 2623 2624 if (reply_frame == NULL) { 2625 /* 2626 * Context only reply, completion without error status. 2627 */ 2628 ccb->csio.resid = 0; 2629 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 2630 ccb->csio.scsi_status = SCSI_STATUS_OK; 2631 } else { 2632 mpt_scsi_reply_frame_handler(mpt, req, reply_frame); 2633 } 2634 2635 if (mpt->outofbeer) { 2636 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2637 mpt->outofbeer = 0; 2638 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 2639 } 2640 if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { 2641 struct scsi_inquiry_data *iq = 2642 (struct scsi_inquiry_data *)ccb->csio.data_ptr; 2643 if (scsi_req->Function == 2644 MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { 2645 /* 2646 * Fake out the device type so that only the 2647 * pass-thru device will attach. 2648 */ 2649 iq->device &= ~0x1F; 2650 iq->device |= T_NODEVICE; 2651 } 2652 } 2653 if (mpt->verbose == MPT_PRT_DEBUG) { 2654 mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", 2655 req, req->serno); 2656 } 2657 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 2658 xpt_done(ccb); 2659 if ((req->state & REQ_STATE_TIMEDOUT) == 0) { 2660 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2661 } else { 2662 mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", 2663 req, req->serno); 2664 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 2665 } 2666 KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, 2667 ("CCB req needed wakeup")); 2668#ifdef INVARIANTS 2669 mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); 2670#endif 2671 mpt_free_request(mpt, req); 2672 return (TRUE); 2673} 2674 2675static int 2676mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, 2677 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2678{ 2679 MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; 2680 2681 KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); 2682#ifdef INVARIANTS 2683 mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); 2684#endif 2685 tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; 2686 /* Record IOC Status and Response Code of TMF for any waiters. */ 2687 req->IOCStatus = le16toh(tmf_reply->IOCStatus); 2688 req->ResponseCode = tmf_reply->ResponseCode; 2689 2690 mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", 2691 req, req->serno, le16toh(tmf_reply->IOCStatus)); 2692 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2693 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 2694 req->state |= REQ_STATE_DONE; 2695 wakeup(req); 2696 } else { 2697 mpt->tmf_req->state = REQ_STATE_FREE; 2698 } 2699 return (TRUE); 2700} 2701 2702/* 2703 * XXX: Move to definitions file 2704 */ 2705#define ELS 0x22 2706#define FC4LS 0x32 2707#define ABTS 0x81 2708#define BA_ACC 0x84 2709 2710#define LS_RJT 0x01 2711#define LS_ACC 0x02 2712#define PLOGI 0x03 2713#define LOGO 0x05 2714#define SRR 0x14 2715#define PRLI 0x20 2716#define PRLO 0x21 2717#define ADISC 0x52 2718#define RSCN 0x61 2719 2720static void 2721mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, 2722 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) 2723{ 2724 uint32_t fl; 2725 MSG_LINK_SERVICE_RSP_REQUEST tmp; 2726 PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; 2727 2728 /* 2729 * We are going to reuse the ELS request to send this response back. 2730 */ 2731 rsp = &tmp; 2732 memset(rsp, 0, sizeof(*rsp)); 2733 2734#ifdef USE_IMMEDIATE_LINK_DATA 2735 /* 2736 * Apparently the IMMEDIATE stuff doesn't seem to work. 2737 */ 2738 rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; 2739#endif 2740 rsp->RspLength = length; 2741 rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; 2742 rsp->MsgContext = htole32(req->index | fc_els_handler_id); 2743 2744 /* 2745 * Copy over information from the original reply frame to 2746 * it's correct place in the response. 2747 */ 2748 memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); 2749 2750 /* 2751 * And now copy back the temporary area to the original frame. 2752 */ 2753 memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); 2754 rsp = req->req_vbuf; 2755 2756#ifdef USE_IMMEDIATE_LINK_DATA 2757 memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); 2758#else 2759{ 2760 PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; 2761 bus_addr_t paddr = req->req_pbuf; 2762 paddr += MPT_RQSL(mpt); 2763 2764 fl = 2765 MPI_SGE_FLAGS_HOST_TO_IOC | 2766 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 2767 MPI_SGE_FLAGS_LAST_ELEMENT | 2768 MPI_SGE_FLAGS_END_OF_LIST | 2769 MPI_SGE_FLAGS_END_OF_BUFFER; 2770 fl <<= MPI_SGE_FLAGS_SHIFT; 2771 fl |= (length); 2772 se->FlagsLength = htole32(fl); 2773 se->Address = htole32((uint32_t) paddr); 2774} 2775#endif 2776 2777 /* 2778 * Send it on... 2779 */ 2780 mpt_send_cmd(mpt, req); 2781} 2782 2783static int 2784mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, 2785 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 2786{ 2787 PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = 2788 (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; 2789 U8 rctl; 2790 U8 type; 2791 U8 cmd; 2792 U16 status = le16toh(reply_frame->IOCStatus); 2793 U32 *elsbuf; 2794 int ioindex; 2795 int do_refresh = TRUE; 2796 2797#ifdef INVARIANTS 2798 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 2799 ("fc_els_reply_handler: req %p:%u for function %x on freelist!", 2800 req, req->serno, rp->Function)); 2801 if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2802 mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2803 } else { 2804 mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); 2805 } 2806#endif 2807 mpt_lprt(mpt, MPT_PRT_DEBUG, 2808 "FC_ELS Complete: req %p:%u, reply %p function %x\n", 2809 req, req->serno, reply_frame, reply_frame->Function); 2810 2811 if (status != MPI_IOCSTATUS_SUCCESS) { 2812 mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", 2813 status, reply_frame->Function); 2814 if (status == MPI_IOCSTATUS_INVALID_STATE) { 2815 /* 2816 * XXX: to get around shutdown issue 2817 */ 2818 mpt->disabled = 1; 2819 return (TRUE); 2820 } 2821 return (TRUE); 2822 } 2823 2824 /* 2825 * If the function of a link service response, we recycle the 2826 * response to be a refresh for a new link service request. 2827 * 2828 * The request pointer is bogus in this case and we have to fetch 2829 * it based upon the TransactionContext. 2830 */ 2831 if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { 2832 /* Freddie Uncle Charlie Katie */ 2833 /* We don't get the IOINDEX as part of the Link Svc Rsp */ 2834 for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) 2835 if (mpt->els_cmd_ptrs[ioindex] == req) { 2836 break; 2837 } 2838 2839 KASSERT(ioindex < mpt->els_cmds_allocated, 2840 ("can't find my mommie!")); 2841 2842 /* remove from active list as we're going to re-post it */ 2843 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2844 req->state &= ~REQ_STATE_QUEUED; 2845 req->state |= REQ_STATE_DONE; 2846 mpt_fc_post_els(mpt, req, ioindex); 2847 return (TRUE); 2848 } 2849 2850 if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { 2851 /* remove from active list as we're done */ 2852 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2853 req->state &= ~REQ_STATE_QUEUED; 2854 req->state |= REQ_STATE_DONE; 2855 if (req->state & REQ_STATE_TIMEDOUT) { 2856 mpt_lprt(mpt, MPT_PRT_DEBUG, 2857 "Sync Primitive Send Completed After Timeout\n"); 2858 mpt_free_request(mpt, req); 2859 } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { 2860 mpt_lprt(mpt, MPT_PRT_DEBUG, 2861 "Async Primitive Send Complete\n"); 2862 mpt_free_request(mpt, req); 2863 } else { 2864 mpt_lprt(mpt, MPT_PRT_DEBUG, 2865 "Sync Primitive Send Complete- Waking Waiter\n"); 2866 wakeup(req); 2867 } 2868 return (TRUE); 2869 } 2870 2871 if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { 2872 mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " 2873 "Length %d Message Flags %x\n", rp->Function, rp->Flags, 2874 rp->MsgLength, rp->MsgFlags); 2875 return (TRUE); 2876 } 2877 2878 if (rp->MsgLength <= 5) { 2879 /* 2880 * This is just a ack of an original ELS buffer post 2881 */ 2882 mpt_lprt(mpt, MPT_PRT_DEBUG, 2883 "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); 2884 return (TRUE); 2885 } 2886 2887 2888 rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; 2889 type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; 2890 2891 elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; 2892 cmd = be32toh(elsbuf[0]) >> 24; 2893 2894 if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { 2895 mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); 2896 return (TRUE); 2897 } 2898 2899 ioindex = le32toh(rp->TransactionContext); 2900 req = mpt->els_cmd_ptrs[ioindex]; 2901 2902 if (rctl == ELS && type == 1) { 2903 switch (cmd) { 2904 case PRLI: 2905 /* 2906 * Send back a PRLI ACC 2907 */ 2908 mpt_prt(mpt, "PRLI from 0x%08x%08x\n", 2909 le32toh(rp->Wwn.PortNameHigh), 2910 le32toh(rp->Wwn.PortNameLow)); 2911 elsbuf[0] = htobe32(0x02100014); 2912 elsbuf[1] |= htobe32(0x00000100); 2913 elsbuf[4] = htobe32(0x00000002); 2914 if (mpt->role & MPT_ROLE_TARGET) 2915 elsbuf[4] |= htobe32(0x00000010); 2916 if (mpt->role & MPT_ROLE_INITIATOR) 2917 elsbuf[4] |= htobe32(0x00000020); 2918 /* remove from active list as we're done */ 2919 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2920 req->state &= ~REQ_STATE_QUEUED; 2921 req->state |= REQ_STATE_DONE; 2922 mpt_fc_els_send_response(mpt, req, rp, 20); 2923 do_refresh = FALSE; 2924 break; 2925 case PRLO: 2926 memset(elsbuf, 0, 5 * (sizeof (U32))); 2927 elsbuf[0] = htobe32(0x02100014); 2928 elsbuf[1] = htobe32(0x08000100); 2929 mpt_prt(mpt, "PRLO from 0x%08x%08x\n", 2930 le32toh(rp->Wwn.PortNameHigh), 2931 le32toh(rp->Wwn.PortNameLow)); 2932 /* remove from active list as we're done */ 2933 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 2934 req->state &= ~REQ_STATE_QUEUED; 2935 req->state |= REQ_STATE_DONE; 2936 mpt_fc_els_send_response(mpt, req, rp, 20); 2937 do_refresh = FALSE; 2938 break; 2939 default: 2940 mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); 2941 break; 2942 } 2943 } else if (rctl == ABTS && type == 0) { 2944 uint16_t rx_id = le16toh(rp->Rxid); 2945 uint16_t ox_id = le16toh(rp->Oxid); 2946 request_t *tgt_req = NULL; 2947 2948 mpt_prt(mpt, 2949 "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", 2950 ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), 2951 le32toh(rp->Wwn.PortNameLow)); 2952 if (rx_id >= mpt->mpt_max_tgtcmds) { 2953 mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); 2954 } else if (mpt->tgt_cmd_ptrs == NULL) { 2955 mpt_prt(mpt, "No TGT CMD PTRS\n"); 2956 } else { 2957 tgt_req = mpt->tgt_cmd_ptrs[rx_id]; 2958 } 2959 if (tgt_req) { 2960 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req); 2961 union ccb *ccb; 2962 uint32_t ct_id; 2963 2964 /* 2965 * Check to make sure we have the correct command 2966 * The reply descriptor in the target state should 2967 * should contain an IoIndex that should match the 2968 * RX_ID. 2969 * 2970 * It'd be nice to have OX_ID to crosscheck with 2971 * as well. 2972 */ 2973 ct_id = GET_IO_INDEX(tgt->reply_desc); 2974 2975 if (ct_id != rx_id) { 2976 mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " 2977 "RX_ID received=0x%x; RX_ID in cmd=0x%x\n", 2978 rx_id, ct_id); 2979 goto skip; 2980 } 2981 2982 ccb = tgt->ccb; 2983 if (ccb) { 2984 mpt_prt(mpt, 2985 "CCB (%p): lun %jx flags %x status %x\n", 2986 ccb, (uintmax_t)ccb->ccb_h.target_lun, 2987 ccb->ccb_h.flags, ccb->ccb_h.status); 2988 } 2989 mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " 2990 "%x nxfers %x\n", tgt->state, 2991 tgt->resid, tgt->bytes_xfered, tgt->reply_desc, 2992 tgt->nxfers); 2993 skip: 2994 if (mpt_abort_target_cmd(mpt, tgt_req)) { 2995 mpt_prt(mpt, "unable to start TargetAbort\n"); 2996 } 2997 } else { 2998 mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); 2999 } 3000 memset(elsbuf, 0, 5 * (sizeof (U32))); 3001 elsbuf[0] = htobe32(0); 3002 elsbuf[1] = htobe32((ox_id << 16) | rx_id); 3003 elsbuf[2] = htobe32(0x000ffff); 3004 /* 3005 * Dork with the reply frame so that the response to it 3006 * will be correct. 3007 */ 3008 rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); 3009 /* remove from active list as we're done */ 3010 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3011 req->state &= ~REQ_STATE_QUEUED; 3012 req->state |= REQ_STATE_DONE; 3013 mpt_fc_els_send_response(mpt, req, rp, 12); 3014 do_refresh = FALSE; 3015 } else { 3016 mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); 3017 } 3018 if (do_refresh == TRUE) { 3019 /* remove from active list as we're done */ 3020 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 3021 req->state &= ~REQ_STATE_QUEUED; 3022 req->state |= REQ_STATE_DONE; 3023 mpt_fc_post_els(mpt, req, ioindex); 3024 } 3025 return (TRUE); 3026} 3027 3028/* 3029 * Clean up all SCSI Initiator personality state in response 3030 * to a controller reset. 3031 */ 3032static void 3033mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) 3034{ 3035 3036 /* 3037 * The pending list is already run down by 3038 * the generic handler. Perform the same 3039 * operation on the timed out request list. 3040 */ 3041 mpt_complete_request_chain(mpt, &mpt->request_timeout_list, 3042 MPI_IOCSTATUS_INVALID_STATE); 3043 3044 /* 3045 * XXX: We need to repost ELS and Target Command Buffers? 3046 */ 3047 3048 /* 3049 * Inform the XPT that a bus reset has occurred. 3050 */ 3051 xpt_async(AC_BUS_RESET, mpt->path, NULL); 3052} 3053 3054/* 3055 * Parse additional completion information in the reply 3056 * frame for SCSI I/O requests. 3057 */ 3058static int 3059mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, 3060 MSG_DEFAULT_REPLY *reply_frame) 3061{ 3062 union ccb *ccb; 3063 MSG_SCSI_IO_REPLY *scsi_io_reply; 3064 u_int ioc_status; 3065 u_int sstate; 3066 3067 MPT_DUMP_REPLY_FRAME(mpt, reply_frame); 3068 KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST 3069 || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, 3070 ("MPT SCSI I/O Handler called with incorrect reply type")); 3071 KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, 3072 ("MPT SCSI I/O Handler called with continuation reply")); 3073 3074 scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; 3075 ioc_status = le16toh(scsi_io_reply->IOCStatus); 3076 ioc_status &= MPI_IOCSTATUS_MASK; 3077 sstate = scsi_io_reply->SCSIState; 3078 3079 ccb = req->ccb; 3080 ccb->csio.resid = 3081 ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); 3082 3083 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 3084 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { 3085 uint32_t sense_returned; 3086 3087 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 3088 3089 sense_returned = le32toh(scsi_io_reply->SenseCount); 3090 if (sense_returned < ccb->csio.sense_len) 3091 ccb->csio.sense_resid = ccb->csio.sense_len - 3092 sense_returned; 3093 else 3094 ccb->csio.sense_resid = 0; 3095 3096 bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); 3097 bcopy(req->sense_vbuf, &ccb->csio.sense_data, 3098 min(ccb->csio.sense_len, sense_returned)); 3099 } 3100 3101 if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { 3102 /* 3103 * Tag messages rejected, but non-tagged retry 3104 * was successful. 3105XXXX 3106 mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); 3107 */ 3108 } 3109 3110 switch(ioc_status) { 3111 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: 3112 /* 3113 * XXX 3114 * Linux driver indicates that a zero 3115 * transfer length with this error code 3116 * indicates a CRC error. 3117 * 3118 * No need to swap the bytes for checking 3119 * against zero. 3120 */ 3121 if (scsi_io_reply->TransferCount == 0) { 3122 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3123 break; 3124 } 3125 /* FALLTHROUGH */ 3126 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: 3127 case MPI_IOCSTATUS_SUCCESS: 3128 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: 3129 if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { 3130 /* 3131 * Status was never returned for this transaction. 3132 */ 3133 mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); 3134 } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { 3135 ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; 3136 mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); 3137 if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) 3138 mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); 3139 } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { 3140 3141 /* XXX Handle SPI-Packet and FCP-2 response info. */ 3142 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3143 } else 3144 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3145 break; 3146 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: 3147 mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); 3148 break; 3149 case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: 3150 mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); 3151 break; 3152 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: 3153 /* 3154 * Since selection timeouts and "device really not 3155 * there" are grouped into this error code, report 3156 * selection timeout. Selection timeouts are 3157 * typically retried before giving up on the device 3158 * whereas "device not there" errors are considered 3159 * unretryable. 3160 */ 3161 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3162 break; 3163 case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: 3164 mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); 3165 break; 3166 case MPI_IOCSTATUS_SCSI_INVALID_BUS: 3167 mpt_set_ccb_status(ccb, CAM_PATH_INVALID); 3168 break; 3169 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: 3170 mpt_set_ccb_status(ccb, CAM_TID_INVALID); 3171 break; 3172 case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: 3173 ccb->ccb_h.status = CAM_UA_TERMIO; 3174 break; 3175 case MPI_IOCSTATUS_INVALID_STATE: 3176 /* 3177 * The IOC has been reset. Emulate a bus reset. 3178 */ 3179 /* FALLTHROUGH */ 3180 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: 3181 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 3182 break; 3183 case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: 3184 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: 3185 /* 3186 * Don't clobber any timeout status that has 3187 * already been set for this transaction. We 3188 * want the SCSI layer to be able to differentiate 3189 * between the command we aborted due to timeout 3190 * and any innocent bystanders. 3191 */ 3192 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) 3193 break; 3194 mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); 3195 break; 3196 3197 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: 3198 mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); 3199 break; 3200 case MPI_IOCSTATUS_BUSY: 3201 mpt_set_ccb_status(ccb, CAM_BUSY); 3202 break; 3203 case MPI_IOCSTATUS_INVALID_FUNCTION: 3204 case MPI_IOCSTATUS_INVALID_SGL: 3205 case MPI_IOCSTATUS_INTERNAL_ERROR: 3206 case MPI_IOCSTATUS_INVALID_FIELD: 3207 default: 3208 /* XXX 3209 * Some of the above may need to kick 3210 * of a recovery action!!!! 3211 */ 3212 ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; 3213 break; 3214 } 3215 3216 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 3217 mpt_freeze_ccb(ccb); 3218 } 3219 3220 return (TRUE); 3221} 3222 3223static void 3224mpt_action(struct cam_sim *sim, union ccb *ccb) 3225{ 3226 struct mpt_softc *mpt; 3227 struct ccb_trans_settings *cts; 3228 target_id_t tgt; 3229 lun_id_t lun; 3230 int raid_passthru; 3231 3232 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); 3233 3234 mpt = (struct mpt_softc *)cam_sim_softc(sim); 3235 raid_passthru = (sim == mpt->phydisk_sim); 3236 MPT_LOCK_ASSERT(mpt); 3237 3238 tgt = ccb->ccb_h.target_id; 3239 lun = ccb->ccb_h.target_lun; 3240 if (raid_passthru && 3241 ccb->ccb_h.func_code != XPT_PATH_INQ && 3242 ccb->ccb_h.func_code != XPT_RESET_BUS && 3243 ccb->ccb_h.func_code != XPT_RESET_DEV) { 3244 if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { 3245 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3246 mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); 3247 xpt_done(ccb); 3248 return; 3249 } 3250 } 3251 ccb->ccb_h.ccb_mpt_ptr = mpt; 3252 3253 switch (ccb->ccb_h.func_code) { 3254 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 3255 /* 3256 * Do a couple of preliminary checks... 3257 */ 3258 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 3259 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { 3260 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3261 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3262 break; 3263 } 3264 } 3265 /* Max supported CDB length is 16 bytes */ 3266 /* XXX Unless we implement the new 32byte message type */ 3267 if (ccb->csio.cdb_len > 3268 sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { 3269 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3270 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3271 break; 3272 } 3273#ifdef MPT_TEST_MULTIPATH 3274 if (mpt->failure_id == ccb->ccb_h.target_id) { 3275 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3276 mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); 3277 break; 3278 } 3279#endif 3280 ccb->csio.scsi_status = SCSI_STATUS_OK; 3281 mpt_start(sim, ccb); 3282 return; 3283 3284 case XPT_RESET_BUS: 3285 if (raid_passthru) { 3286 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3287 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3288 break; 3289 } 3290 case XPT_RESET_DEV: 3291 if (ccb->ccb_h.func_code == XPT_RESET_BUS) { 3292 if (bootverbose) { 3293 xpt_print(ccb->ccb_h.path, "reset bus\n"); 3294 } 3295 } else { 3296 xpt_print(ccb->ccb_h.path, "reset device\n"); 3297 } 3298 (void) mpt_bus_reset(mpt, tgt, lun, FALSE); 3299 3300 /* 3301 * mpt_bus_reset is always successful in that it 3302 * will fall back to a hard reset should a bus 3303 * reset attempt fail. 3304 */ 3305 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3306 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3307 break; 3308 3309 case XPT_ABORT: 3310 { 3311 union ccb *accb = ccb->cab.abort_ccb; 3312 switch (accb->ccb_h.func_code) { 3313 case XPT_ACCEPT_TARGET_IO: 3314 case XPT_IMMEDIATE_NOTIFY: 3315 ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); 3316 break; 3317 case XPT_CONT_TARGET_IO: 3318 mpt_prt(mpt, "cannot abort active CTIOs yet\n"); 3319 ccb->ccb_h.status = CAM_UA_ABORT; 3320 break; 3321 case XPT_SCSI_IO: 3322 ccb->ccb_h.status = CAM_UA_ABORT; 3323 break; 3324 default: 3325 ccb->ccb_h.status = CAM_REQ_INVALID; 3326 break; 3327 } 3328 break; 3329 } 3330 3331#define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) 3332 3333#define DP_DISC_ENABLE 0x1 3334#define DP_DISC_DISABL 0x2 3335#define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) 3336 3337#define DP_TQING_ENABLE 0x4 3338#define DP_TQING_DISABL 0x8 3339#define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) 3340 3341#define DP_WIDE 0x10 3342#define DP_NARROW 0x20 3343#define DP_WIDTH (DP_WIDE|DP_NARROW) 3344 3345#define DP_SYNC 0x40 3346 3347 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ 3348 { 3349 struct ccb_trans_settings_scsi *scsi; 3350 struct ccb_trans_settings_spi *spi; 3351 uint8_t dval; 3352 u_int period; 3353 u_int offset; 3354 int i, j; 3355 3356 cts = &ccb->cts; 3357 3358 if (mpt->is_fc || mpt->is_sas) { 3359 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3360 break; 3361 } 3362 3363 scsi = &cts->proto_specific.scsi; 3364 spi = &cts->xport_specific.spi; 3365 3366 /* 3367 * We can be called just to valid transport and proto versions 3368 */ 3369 if (scsi->valid == 0 && spi->valid == 0) { 3370 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3371 break; 3372 } 3373 3374 /* 3375 * Skip attempting settings on RAID volume disks. 3376 * Other devices on the bus get the normal treatment. 3377 */ 3378 if (mpt->phydisk_sim && raid_passthru == 0 && 3379 mpt_is_raid_volume(mpt, tgt) != 0) { 3380 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3381 "no transfer settings for RAID vols\n"); 3382 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3383 break; 3384 } 3385 3386 i = mpt->mpt_port_page2.PortSettings & 3387 MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; 3388 j = mpt->mpt_port_page2.PortFlags & 3389 MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; 3390 if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && 3391 j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { 3392 mpt_lprt(mpt, MPT_PRT_ALWAYS, 3393 "honoring BIOS transfer negotiations\n"); 3394 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3395 break; 3396 } 3397 3398 dval = 0; 3399 period = 0; 3400 offset = 0; 3401 3402 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 3403 dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? 3404 DP_DISC_ENABLE : DP_DISC_DISABL; 3405 } 3406 3407 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 3408 dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? 3409 DP_TQING_ENABLE : DP_TQING_DISABL; 3410 } 3411 3412 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 3413 dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? 3414 DP_WIDE : DP_NARROW; 3415 } 3416 3417 if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { 3418 dval |= DP_SYNC; 3419 offset = spi->sync_offset; 3420 } else { 3421 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3422 &mpt->mpt_dev_page1[tgt]; 3423 offset = ptr->RequestedParameters; 3424 offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3425 offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3426 } 3427 if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { 3428 dval |= DP_SYNC; 3429 period = spi->sync_period; 3430 } else { 3431 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = 3432 &mpt->mpt_dev_page1[tgt]; 3433 period = ptr->RequestedParameters; 3434 period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3435 period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3436 } 3437 3438 if (dval & DP_DISC_ENABLE) { 3439 mpt->mpt_disc_enable |= (1 << tgt); 3440 } else if (dval & DP_DISC_DISABL) { 3441 mpt->mpt_disc_enable &= ~(1 << tgt); 3442 } 3443 if (dval & DP_TQING_ENABLE) { 3444 mpt->mpt_tag_enable |= (1 << tgt); 3445 } else if (dval & DP_TQING_DISABL) { 3446 mpt->mpt_tag_enable &= ~(1 << tgt); 3447 } 3448 if (dval & DP_WIDTH) { 3449 mpt_setwidth(mpt, tgt, 1); 3450 } 3451 if (dval & DP_SYNC) { 3452 mpt_setsync(mpt, tgt, period, offset); 3453 } 3454 if (dval == 0) { 3455 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3456 break; 3457 } 3458 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3459 "set [%d]: 0x%x period 0x%x offset %d\n", 3460 tgt, dval, period, offset); 3461 if (mpt_update_spi_config(mpt, tgt)) { 3462 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3463 } else { 3464 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3465 } 3466 break; 3467 } 3468 case XPT_GET_TRAN_SETTINGS: 3469 { 3470 struct ccb_trans_settings_scsi *scsi; 3471 cts = &ccb->cts; 3472 cts->protocol = PROTO_SCSI; 3473 if (mpt->is_fc) { 3474 struct ccb_trans_settings_fc *fc = 3475 &cts->xport_specific.fc; 3476 cts->protocol_version = SCSI_REV_SPC; 3477 cts->transport = XPORT_FC; 3478 cts->transport_version = 0; 3479 if (mpt->mpt_fcport_speed != 0) { 3480 fc->valid = CTS_FC_VALID_SPEED; 3481 fc->bitrate = 100000 * mpt->mpt_fcport_speed; 3482 } 3483 } else if (mpt->is_sas) { 3484 struct ccb_trans_settings_sas *sas = 3485 &cts->xport_specific.sas; 3486 cts->protocol_version = SCSI_REV_SPC2; 3487 cts->transport = XPORT_SAS; 3488 cts->transport_version = 0; 3489 sas->valid = CTS_SAS_VALID_SPEED; 3490 sas->bitrate = 300000; 3491 } else { 3492 cts->protocol_version = SCSI_REV_2; 3493 cts->transport = XPORT_SPI; 3494 cts->transport_version = 2; 3495 if (mpt_get_spi_settings(mpt, cts) != 0) { 3496 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3497 break; 3498 } 3499 } 3500 scsi = &cts->proto_specific.scsi; 3501 scsi->valid = CTS_SCSI_VALID_TQ; 3502 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; 3503 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3504 break; 3505 } 3506 case XPT_CALC_GEOMETRY: 3507 { 3508 struct ccb_calc_geometry *ccg; 3509 3510 ccg = &ccb->ccg; 3511 if (ccg->block_size == 0) { 3512 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 3513 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3514 break; 3515 } 3516 cam_calc_geometry(ccg, /* extended */ 1); 3517 KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); 3518 break; 3519 } 3520 case XPT_GET_SIM_KNOB: 3521 { 3522 struct ccb_sim_knob *kp = &ccb->knob; 3523 3524 if (mpt->is_fc) { 3525 kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn; 3526 kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn; 3527 switch (mpt->role) { 3528 case MPT_ROLE_NONE: 3529 kp->xport_specific.fc.role = KNOB_ROLE_NONE; 3530 break; 3531 case MPT_ROLE_INITIATOR: 3532 kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; 3533 break; 3534 case MPT_ROLE_TARGET: 3535 kp->xport_specific.fc.role = KNOB_ROLE_TARGET; 3536 break; 3537 case MPT_ROLE_BOTH: 3538 kp->xport_specific.fc.role = KNOB_ROLE_BOTH; 3539 break; 3540 } 3541 kp->xport_specific.fc.valid = 3542 KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; 3543 ccb->ccb_h.status = CAM_REQ_CMP; 3544 } else { 3545 ccb->ccb_h.status = CAM_REQ_INVALID; 3546 } 3547 xpt_done(ccb); 3548 break; 3549 } 3550 case XPT_PATH_INQ: /* Path routing inquiry */ 3551 { 3552 struct ccb_pathinq *cpi = &ccb->cpi; 3553 3554 cpi->version_num = 1; 3555 cpi->target_sprt = 0; 3556 cpi->hba_eng_cnt = 0; 3557 cpi->max_target = mpt->port_facts[0].MaxDevices - 1; 3558 cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; 3559 /* 3560 * FC cards report MAX_DEVICES of 512, but 3561 * the MSG_SCSI_IO_REQUEST target id field 3562 * is only 8 bits. Until we fix the driver 3563 * to support 'channels' for bus overflow, 3564 * just limit it. 3565 */ 3566 if (cpi->max_target > 255) { 3567 cpi->max_target = 255; 3568 } 3569 3570 /* 3571 * VMware ESX reports > 16 devices and then dies when we probe. 3572 */ 3573 if (mpt->is_spi && cpi->max_target > 15) { 3574 cpi->max_target = 15; 3575 } 3576 if (mpt->is_spi) 3577 cpi->max_lun = 7; 3578 else 3579 cpi->max_lun = MPT_MAX_LUNS; 3580 cpi->initiator_id = mpt->mpt_ini_id; 3581 cpi->bus_id = cam_sim_bus(sim); 3582 3583 /* 3584 * The base speed is the speed of the underlying connection. 3585 */ 3586 cpi->protocol = PROTO_SCSI; 3587 if (mpt->is_fc) { 3588 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 3589 cpi->base_transfer_speed = 100000; 3590 cpi->hba_inquiry = PI_TAG_ABLE; 3591 cpi->transport = XPORT_FC; 3592 cpi->transport_version = 0; 3593 cpi->protocol_version = SCSI_REV_SPC; 3594 cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn; 3595 cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn; 3596 cpi->xport_specific.fc.port = mpt->scinfo.fc.portid; 3597 cpi->xport_specific.fc.bitrate = 3598 100000 * mpt->mpt_fcport_speed; 3599 } else if (mpt->is_sas) { 3600 cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED; 3601 cpi->base_transfer_speed = 300000; 3602 cpi->hba_inquiry = PI_TAG_ABLE; 3603 cpi->transport = XPORT_SAS; 3604 cpi->transport_version = 0; 3605 cpi->protocol_version = SCSI_REV_SPC2; 3606 } else { 3607 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 3608 cpi->base_transfer_speed = 3300; 3609 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; 3610 cpi->transport = XPORT_SPI; 3611 cpi->transport_version = 2; 3612 cpi->protocol_version = SCSI_REV_2; 3613 } 3614 3615 /* 3616 * We give our fake RAID passhtru bus a width that is MaxVolumes 3617 * wide and restrict it to one lun. 3618 */ 3619 if (raid_passthru) { 3620 cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; 3621 cpi->initiator_id = cpi->max_target + 1; 3622 cpi->max_lun = 0; 3623 } 3624 3625 if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { 3626 cpi->hba_misc |= PIM_NOINITIATOR; 3627 } 3628 if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { 3629 cpi->target_sprt = 3630 PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; 3631 } else { 3632 cpi->target_sprt = 0; 3633 } 3634 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 3635 strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN); 3636 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 3637 cpi->unit_number = cam_sim_unit(sim); 3638 cpi->ccb_h.status = CAM_REQ_CMP; 3639 break; 3640 } 3641 case XPT_EN_LUN: /* Enable LUN as a target */ 3642 { 3643 int result; 3644 3645 if (ccb->cel.enable) 3646 result = mpt_enable_lun(mpt, 3647 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3648 else 3649 result = mpt_disable_lun(mpt, 3650 ccb->ccb_h.target_id, ccb->ccb_h.target_lun); 3651 if (result == 0) { 3652 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 3653 } else { 3654 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 3655 } 3656 break; 3657 } 3658 case XPT_NOTIFY_ACKNOWLEDGE: /* recycle notify ack */ 3659 case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ 3660 case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ 3661 { 3662 tgt_resource_t *trtp; 3663 lun_id_t lun = ccb->ccb_h.target_lun; 3664 ccb->ccb_h.sim_priv.entries[0].field = 0; 3665 ccb->ccb_h.sim_priv.entries[1].ptr = mpt; 3666 3667 if (lun == CAM_LUN_WILDCARD) { 3668 if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { 3669 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3670 break; 3671 } 3672 trtp = &mpt->trt_wildcard; 3673 } else if (lun >= MPT_MAX_LUNS) { 3674 mpt_set_ccb_status(ccb, CAM_REQ_INVALID); 3675 break; 3676 } else { 3677 trtp = &mpt->trt[lun]; 3678 } 3679 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 3680 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3681 "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun); 3682 STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, 3683 sim_links.stqe); 3684 } else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 3685 mpt_lprt(mpt, MPT_PRT_DEBUG1, 3686 "Put FREE INOT lun %jx\n", (uintmax_t)lun); 3687 STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, 3688 sim_links.stqe); 3689 } else { 3690 mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n"); 3691 } 3692 mpt_set_ccb_status(ccb, CAM_REQ_INPROG); 3693 return; 3694 } 3695 case XPT_CONT_TARGET_IO: 3696 mpt_target_start_io(mpt, ccb); 3697 return; 3698 3699 default: 3700 ccb->ccb_h.status = CAM_REQ_INVALID; 3701 break; 3702 } 3703 xpt_done(ccb); 3704} 3705 3706static int 3707mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) 3708{ 3709 struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; 3710 struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; 3711 target_id_t tgt; 3712 uint32_t dval, pval, oval; 3713 int rv; 3714 3715 if (IS_CURRENT_SETTINGS(cts) == 0) { 3716 tgt = cts->ccb_h.target_id; 3717 } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { 3718 if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { 3719 return (-1); 3720 } 3721 } else { 3722 tgt = cts->ccb_h.target_id; 3723 } 3724 3725 /* 3726 * We aren't looking at Port Page 2 BIOS settings here- 3727 * sometimes these have been known to be bogus XXX. 3728 * 3729 * For user settings, we pick the max from port page 0 3730 * 3731 * For current settings we read the current settings out from 3732 * device page 0 for that target. 3733 */ 3734 if (IS_CURRENT_SETTINGS(cts)) { 3735 CONFIG_PAGE_SCSI_DEVICE_0 tmp; 3736 dval = 0; 3737 3738 tmp = mpt->mpt_dev_page0[tgt]; 3739 rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, 3740 sizeof(tmp), FALSE, 5000); 3741 if (rv) { 3742 mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); 3743 return (rv); 3744 } 3745 mpt2host_config_page_scsi_device_0(&tmp); 3746 3747 mpt_lprt(mpt, MPT_PRT_DEBUG, 3748 "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, 3749 tmp.NegotiatedParameters, tmp.Information); 3750 dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? 3751 DP_WIDE : DP_NARROW; 3752 dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? 3753 DP_DISC_ENABLE : DP_DISC_DISABL; 3754 dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? 3755 DP_TQING_ENABLE : DP_TQING_DISABL; 3756 oval = tmp.NegotiatedParameters; 3757 oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; 3758 oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; 3759 pval = tmp.NegotiatedParameters; 3760 pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; 3761 pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; 3762 mpt->mpt_dev_page0[tgt] = tmp; 3763 } else { 3764 dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; 3765 oval = mpt->mpt_port_page0.Capabilities; 3766 oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); 3767 pval = mpt->mpt_port_page0.Capabilities; 3768 pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); 3769 } 3770 3771 spi->valid = 0; 3772 scsi->valid = 0; 3773 spi->flags = 0; 3774 scsi->flags = 0; 3775 spi->sync_offset = oval; 3776 spi->sync_period = pval; 3777 spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; 3778 spi->valid |= CTS_SPI_VALID_SYNC_RATE; 3779 spi->valid |= CTS_SPI_VALID_BUS_WIDTH; 3780 if (dval & DP_WIDE) { 3781 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 3782 } else { 3783 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 3784 } 3785 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 3786 scsi->valid = CTS_SCSI_VALID_TQ; 3787 if (dval & DP_TQING_ENABLE) { 3788 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 3789 } 3790 spi->valid |= CTS_SPI_VALID_DISC; 3791 if (dval & DP_DISC_ENABLE) { 3792 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 3793 } 3794 } 3795 3796 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3797 "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, 3798 IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval); 3799 return (0); 3800} 3801 3802static void 3803mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) 3804{ 3805 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3806 3807 ptr = &mpt->mpt_dev_page1[tgt]; 3808 if (onoff) { 3809 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; 3810 } else { 3811 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; 3812 } 3813} 3814 3815static void 3816mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) 3817{ 3818 PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; 3819 3820 ptr = &mpt->mpt_dev_page1[tgt]; 3821 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; 3822 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; 3823 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; 3824 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; 3825 ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; 3826 if (period == 0) { 3827 return; 3828 } 3829 ptr->RequestedParameters |= 3830 period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; 3831 ptr->RequestedParameters |= 3832 offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; 3833 if (period < 0xa) { 3834 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; 3835 } 3836 if (period < 0x9) { 3837 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; 3838 ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; 3839 } 3840} 3841 3842static int 3843mpt_update_spi_config(struct mpt_softc *mpt, int tgt) 3844{ 3845 CONFIG_PAGE_SCSI_DEVICE_1 tmp; 3846 int rv; 3847 3848 mpt_lprt(mpt, MPT_PRT_NEGOTIATION, 3849 "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", 3850 tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); 3851 tmp = mpt->mpt_dev_page1[tgt]; 3852 host2mpt_config_page_scsi_device_1(&tmp); 3853 rv = mpt_write_cur_cfg_page(mpt, tgt, 3854 &tmp.Header, sizeof(tmp), FALSE, 5000); 3855 if (rv) { 3856 mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); 3857 return (-1); 3858 } 3859 return (0); 3860} 3861 3862/****************************** Timeout Recovery ******************************/ 3863static int 3864mpt_spawn_recovery_thread(struct mpt_softc *mpt) 3865{ 3866 int error; 3867 3868 error = kproc_create(mpt_recovery_thread, mpt, 3869 &mpt->recovery_thread, /*flags*/0, 3870 /*altstack*/0, "mpt_recovery%d", mpt->unit); 3871 return (error); 3872} 3873 3874static void 3875mpt_terminate_recovery_thread(struct mpt_softc *mpt) 3876{ 3877 3878 if (mpt->recovery_thread == NULL) { 3879 return; 3880 } 3881 mpt->shutdwn_recovery = 1; 3882 wakeup(mpt); 3883 /* 3884 * Sleep on a slightly different location 3885 * for this interlock just for added safety. 3886 */ 3887 mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); 3888} 3889 3890static void 3891mpt_recovery_thread(void *arg) 3892{ 3893 struct mpt_softc *mpt; 3894 3895 mpt = (struct mpt_softc *)arg; 3896 MPT_LOCK(mpt); 3897 for (;;) { 3898 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3899 if (mpt->shutdwn_recovery == 0) { 3900 mpt_sleep(mpt, mpt, PUSER, "idle", 0); 3901 } 3902 } 3903 if (mpt->shutdwn_recovery != 0) { 3904 break; 3905 } 3906 mpt_recover_commands(mpt); 3907 } 3908 mpt->recovery_thread = NULL; 3909 wakeup(&mpt->recovery_thread); 3910 MPT_UNLOCK(mpt); 3911 kproc_exit(0); 3912} 3913 3914static int 3915mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, 3916 u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok) 3917{ 3918 MSG_SCSI_TASK_MGMT *tmf_req; 3919 int error; 3920 3921 /* 3922 * Wait for any current TMF request to complete. 3923 * We're only allowed to issue one TMF at a time. 3924 */ 3925 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, 3926 sleep_ok, MPT_TMF_MAX_TIMEOUT); 3927 if (error != 0) { 3928 mpt_reset(mpt, TRUE); 3929 return (ETIMEDOUT); 3930 } 3931 3932 mpt_assign_serno(mpt, mpt->tmf_req); 3933 mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; 3934 3935 tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; 3936 memset(tmf_req, 0, sizeof(*tmf_req)); 3937 tmf_req->TargetID = target; 3938 tmf_req->Bus = channel; 3939 tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; 3940 tmf_req->TaskType = type; 3941 tmf_req->MsgFlags = flags; 3942 tmf_req->MsgContext = 3943 htole32(mpt->tmf_req->index | scsi_tmf_handler_id); 3944 if (lun > MPT_MAX_LUNS) { 3945 tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 3946 tmf_req->LUN[1] = lun & 0xff; 3947 } else { 3948 tmf_req->LUN[1] = lun; 3949 } 3950 tmf_req->TaskMsgContext = abort_ctx; 3951 3952 mpt_lprt(mpt, MPT_PRT_DEBUG, 3953 "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, 3954 mpt->tmf_req->serno, tmf_req->MsgContext); 3955 if (mpt->verbose > MPT_PRT_DEBUG) { 3956 mpt_print_request(tmf_req); 3957 } 3958 3959 KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, 3960 ("mpt_scsi_send_tmf: tmf_req already on pending list")); 3961 TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); 3962 error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); 3963 if (error != MPT_OK) { 3964 TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); 3965 mpt->tmf_req->state = REQ_STATE_FREE; 3966 mpt_reset(mpt, TRUE); 3967 } 3968 return (error); 3969} 3970 3971/* 3972 * When a command times out, it is placed on the requeust_timeout_list 3973 * and we wake our recovery thread. The MPT-Fusion architecture supports 3974 * only a single TMF operation at a time, so we serially abort/bdr, etc, 3975 * the timedout transactions. The next TMF is issued either by the 3976 * completion handler of the current TMF waking our recovery thread, 3977 * or the TMF timeout handler causing a hard reset sequence. 3978 */ 3979static void 3980mpt_recover_commands(struct mpt_softc *mpt) 3981{ 3982 request_t *req; 3983 union ccb *ccb; 3984 int error; 3985 3986 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 3987 /* 3988 * No work to do- leave. 3989 */ 3990 mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); 3991 return; 3992 } 3993 3994 /* 3995 * Flush any commands whose completion coincides with their timeout. 3996 */ 3997 mpt_intr(mpt); 3998 3999 if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { 4000 /* 4001 * The timedout commands have already 4002 * completed. This typically means 4003 * that either the timeout value was on 4004 * the hairy edge of what the device 4005 * requires or - more likely - interrupts 4006 * are not happening. 4007 */ 4008 mpt_prt(mpt, "Timedout requests already complete. " 4009 "Interrupts may not be functioning.\n"); 4010 mpt_enable_ints(mpt); 4011 return; 4012 } 4013 4014 /* 4015 * We have no visibility into the current state of the 4016 * controller, so attempt to abort the commands in the 4017 * order they timed-out. For initiator commands, we 4018 * depend on the reply handler pulling requests off 4019 * the timeout list. 4020 */ 4021 while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { 4022 uint16_t status; 4023 uint8_t response; 4024 MSG_REQUEST_HEADER *hdrp = req->req_vbuf; 4025 4026 mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", 4027 req, req->serno, hdrp->Function); 4028 ccb = req->ccb; 4029 if (ccb == NULL) { 4030 mpt_prt(mpt, "null ccb in timed out request. " 4031 "Resetting Controller.\n"); 4032 mpt_reset(mpt, TRUE); 4033 continue; 4034 } 4035 mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); 4036 4037 /* 4038 * Check to see if this is not an initiator command and 4039 * deal with it differently if it is. 4040 */ 4041 switch (hdrp->Function) { 4042 case MPI_FUNCTION_SCSI_IO_REQUEST: 4043 case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: 4044 break; 4045 default: 4046 /* 4047 * XXX: FIX ME: need to abort target assists... 4048 */ 4049 mpt_prt(mpt, "just putting it back on the pend q\n"); 4050 TAILQ_REMOVE(&mpt->request_timeout_list, req, links); 4051 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, 4052 links); 4053 continue; 4054 } 4055 4056 error = mpt_scsi_send_tmf(mpt, 4057 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 4058 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, 4059 htole32(req->index | scsi_io_handler_id), TRUE); 4060 4061 if (error != 0) { 4062 /* 4063 * mpt_scsi_send_tmf hard resets on failure, so no 4064 * need to do so here. Our queue should be emptied 4065 * by the hard reset. 4066 */ 4067 continue; 4068 } 4069 4070 error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, 4071 REQ_STATE_DONE, TRUE, 500); 4072 4073 status = le16toh(mpt->tmf_req->IOCStatus); 4074 response = mpt->tmf_req->ResponseCode; 4075 mpt->tmf_req->state = REQ_STATE_FREE; 4076 4077 if (error != 0) { 4078 /* 4079 * If we've errored out,, reset the controller. 4080 */ 4081 mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " 4082 "Resetting controller\n"); 4083 mpt_reset(mpt, TRUE); 4084 continue; 4085 } 4086 4087 if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 4088 mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " 4089 "Resetting controller.\n", status); 4090 mpt_reset(mpt, TRUE); 4091 continue; 4092 } 4093 4094 if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && 4095 response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { 4096 mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " 4097 "Resetting controller.\n", response); 4098 mpt_reset(mpt, TRUE); 4099 continue; 4100 } 4101 mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); 4102 } 4103} 4104 4105/************************ Target Mode Support ****************************/ 4106static void 4107mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) 4108{ 4109 MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; 4110 PTR_SGE_TRANSACTION32 tep; 4111 PTR_SGE_SIMPLE32 se; 4112 bus_addr_t paddr; 4113 uint32_t fl; 4114 4115 paddr = req->req_pbuf; 4116 paddr += MPT_RQSL(mpt); 4117 4118 fc = req->req_vbuf; 4119 memset(fc, 0, MPT_REQUEST_AREA); 4120 fc->BufferCount = 1; 4121 fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; 4122 fc->MsgContext = htole32(req->index | fc_els_handler_id); 4123 4124 /* 4125 * Okay, set up ELS buffer pointers. ELS buffer pointers 4126 * consist of a TE SGL element (with details length of zero) 4127 * followed by a SIMPLE SGL element which holds the address 4128 * of the buffer. 4129 */ 4130 4131 tep = (PTR_SGE_TRANSACTION32) &fc->SGL; 4132 4133 tep->ContextSize = 4; 4134 tep->Flags = 0; 4135 tep->TransactionContext[0] = htole32(ioindex); 4136 4137 se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; 4138 fl = 4139 MPI_SGE_FLAGS_HOST_TO_IOC | 4140 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4141 MPI_SGE_FLAGS_LAST_ELEMENT | 4142 MPI_SGE_FLAGS_END_OF_LIST | 4143 MPI_SGE_FLAGS_END_OF_BUFFER; 4144 fl <<= MPI_SGE_FLAGS_SHIFT; 4145 fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); 4146 se->FlagsLength = htole32(fl); 4147 se->Address = htole32((uint32_t) paddr); 4148 mpt_lprt(mpt, MPT_PRT_DEBUG, 4149 "add ELS index %d ioindex %d for %p:%u\n", 4150 req->index, ioindex, req, req->serno); 4151 KASSERT(((req->state & REQ_STATE_LOCKED) != 0), 4152 ("mpt_fc_post_els: request not locked")); 4153 mpt_send_cmd(mpt, req); 4154} 4155 4156static void 4157mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) 4158{ 4159 PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; 4160 PTR_CMD_BUFFER_DESCRIPTOR cb; 4161 bus_addr_t paddr; 4162 4163 paddr = req->req_pbuf; 4164 paddr += MPT_RQSL(mpt); 4165 memset(req->req_vbuf, 0, MPT_REQUEST_AREA); 4166 MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; 4167 4168 fc = req->req_vbuf; 4169 fc->BufferCount = 1; 4170 fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; 4171 fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX); 4172 fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4173 4174 cb = &fc->Buffer[0]; 4175 cb->IoIndex = htole16(ioindex); 4176 cb->u.PhysicalAddress32 = htole32((U32) paddr); 4177 4178 mpt_check_doorbell(mpt); 4179 mpt_send_cmd(mpt, req); 4180} 4181 4182static int 4183mpt_add_els_buffers(struct mpt_softc *mpt) 4184{ 4185 int i; 4186 4187 if (mpt->is_fc == 0) { 4188 return (TRUE); 4189 } 4190 4191 if (mpt->els_cmds_allocated) { 4192 return (TRUE); 4193 } 4194 4195 mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), 4196 M_DEVBUF, M_NOWAIT | M_ZERO); 4197 4198 if (mpt->els_cmd_ptrs == NULL) { 4199 return (FALSE); 4200 } 4201 4202 /* 4203 * Feed the chip some ELS buffer resources 4204 */ 4205 for (i = 0; i < MPT_MAX_ELS; i++) { 4206 request_t *req = mpt_get_request(mpt, FALSE); 4207 if (req == NULL) { 4208 break; 4209 } 4210 req->state |= REQ_STATE_LOCKED; 4211 mpt->els_cmd_ptrs[i] = req; 4212 mpt_fc_post_els(mpt, req, i); 4213 } 4214 4215 if (i == 0) { 4216 mpt_prt(mpt, "unable to add ELS buffer resources\n"); 4217 free(mpt->els_cmd_ptrs, M_DEVBUF); 4218 mpt->els_cmd_ptrs = NULL; 4219 return (FALSE); 4220 } 4221 if (i != MPT_MAX_ELS) { 4222 mpt_lprt(mpt, MPT_PRT_INFO, 4223 "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); 4224 } 4225 mpt->els_cmds_allocated = i; 4226 return(TRUE); 4227} 4228 4229static int 4230mpt_add_target_commands(struct mpt_softc *mpt) 4231{ 4232 int i, max; 4233 4234 if (mpt->tgt_cmd_ptrs) { 4235 return (TRUE); 4236 } 4237 4238 max = MPT_MAX_REQUESTS(mpt) >> 1; 4239 if (max > mpt->mpt_max_tgtcmds) { 4240 max = mpt->mpt_max_tgtcmds; 4241 } 4242 mpt->tgt_cmd_ptrs = 4243 malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); 4244 if (mpt->tgt_cmd_ptrs == NULL) { 4245 mpt_prt(mpt, 4246 "mpt_add_target_commands: could not allocate cmd ptrs\n"); 4247 return (FALSE); 4248 } 4249 4250 for (i = 0; i < max; i++) { 4251 request_t *req; 4252 4253 req = mpt_get_request(mpt, FALSE); 4254 if (req == NULL) { 4255 break; 4256 } 4257 req->state |= REQ_STATE_LOCKED; 4258 mpt->tgt_cmd_ptrs[i] = req; 4259 mpt_post_target_command(mpt, req, i); 4260 } 4261 4262 4263 if (i == 0) { 4264 mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); 4265 free(mpt->tgt_cmd_ptrs, M_DEVBUF); 4266 mpt->tgt_cmd_ptrs = NULL; 4267 return (FALSE); 4268 } 4269 4270 mpt->tgt_cmds_allocated = i; 4271 4272 if (i < max) { 4273 mpt_lprt(mpt, MPT_PRT_INFO, 4274 "added %d of %d target bufs\n", i, max); 4275 } 4276 return (i); 4277} 4278 4279static int 4280mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4281{ 4282 4283 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4284 mpt->twildcard = 1; 4285 } else if (lun >= MPT_MAX_LUNS) { 4286 return (EINVAL); 4287 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4288 return (EINVAL); 4289 } 4290 if (mpt->tenabled == 0) { 4291 if (mpt->is_fc) { 4292 (void) mpt_fc_reset_link(mpt, 0); 4293 } 4294 mpt->tenabled = 1; 4295 } 4296 if (lun == CAM_LUN_WILDCARD) { 4297 mpt->trt_wildcard.enabled = 1; 4298 } else { 4299 mpt->trt[lun].enabled = 1; 4300 } 4301 return (0); 4302} 4303 4304static int 4305mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) 4306{ 4307 int i; 4308 4309 if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { 4310 mpt->twildcard = 0; 4311 } else if (lun >= MPT_MAX_LUNS) { 4312 return (EINVAL); 4313 } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { 4314 return (EINVAL); 4315 } 4316 if (lun == CAM_LUN_WILDCARD) { 4317 mpt->trt_wildcard.enabled = 0; 4318 } else { 4319 mpt->trt[lun].enabled = 0; 4320 } 4321 for (i = 0; i < MPT_MAX_LUNS; i++) { 4322 if (mpt->trt[i].enabled) { 4323 break; 4324 } 4325 } 4326 if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { 4327 if (mpt->is_fc) { 4328 (void) mpt_fc_reset_link(mpt, 0); 4329 } 4330 mpt->tenabled = 0; 4331 } 4332 return (0); 4333} 4334 4335/* 4336 * Called with MPT lock held 4337 */ 4338static void 4339mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) 4340{ 4341 struct ccb_scsiio *csio = &ccb->csio; 4342 request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); 4343 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); 4344 4345 switch (tgt->state) { 4346 case TGT_STATE_IN_CAM: 4347 break; 4348 case TGT_STATE_MOVING_DATA: 4349 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4350 xpt_freeze_simq(mpt->sim, 1); 4351 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4352 tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4353 xpt_done(ccb); 4354 return; 4355 default: 4356 mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " 4357 "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); 4358 mpt_tgt_dump_req_state(mpt, cmd_req); 4359 mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); 4360 xpt_done(ccb); 4361 return; 4362 } 4363 4364 if (csio->dxfer_len) { 4365 bus_dmamap_callback_t *cb; 4366 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4367 request_t *req; 4368 int error; 4369 4370 KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, 4371 ("dxfer_len %u but direction is NONE", csio->dxfer_len)); 4372 4373 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4374 if (mpt->outofbeer == 0) { 4375 mpt->outofbeer = 1; 4376 xpt_freeze_simq(mpt->sim, 1); 4377 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4378 } 4379 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4380 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4381 xpt_done(ccb); 4382 return; 4383 } 4384 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4385 if (sizeof (bus_addr_t) > 4) { 4386 cb = mpt_execute_req_a64; 4387 } else { 4388 cb = mpt_execute_req; 4389 } 4390 4391 req->ccb = ccb; 4392 ccb->ccb_h.ccb_req_ptr = req; 4393 4394 /* 4395 * Record the currently active ccb and the 4396 * request for it in our target state area. 4397 */ 4398 tgt->ccb = ccb; 4399 tgt->req = req; 4400 4401 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4402 ta = req->req_vbuf; 4403 4404 if (mpt->is_sas) { 4405 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4406 cmd_req->req_vbuf; 4407 ta->QueueTag = ssp->InitiatorTag; 4408 } else if (mpt->is_spi) { 4409 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4410 cmd_req->req_vbuf; 4411 ta->QueueTag = sp->Tag; 4412 } 4413 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4414 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4415 ta->ReplyWord = htole32(tgt->reply_desc); 4416 if (csio->ccb_h.target_lun > MPT_MAX_LUNS) { 4417 ta->LUN[0] = 4418 0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f); 4419 ta->LUN[1] = csio->ccb_h.target_lun & 0xff; 4420 } else { 4421 ta->LUN[1] = csio->ccb_h.target_lun; 4422 } 4423 4424 ta->RelativeOffset = tgt->bytes_xfered; 4425 ta->DataLength = ccb->csio.dxfer_len; 4426 if (ta->DataLength > tgt->resid) { 4427 ta->DataLength = tgt->resid; 4428 } 4429 4430 /* 4431 * XXX Should be done after data transfer completes? 4432 */ 4433 tgt->resid -= csio->dxfer_len; 4434 tgt->bytes_xfered += csio->dxfer_len; 4435 4436 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 4437 ta->TargetAssistFlags |= 4438 TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4439 } 4440 4441#ifdef WE_TRUST_AUTO_GOOD_STATUS 4442 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && 4443 csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { 4444 ta->TargetAssistFlags |= 4445 TARGET_ASSIST_FLAGS_AUTO_STATUS; 4446 } 4447#endif 4448 tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; 4449 4450 mpt_lprt(mpt, MPT_PRT_DEBUG, 4451 "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " 4452 "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, 4453 tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); 4454 4455 error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, 4456 cb, req, 0); 4457 if (error == EINPROGRESS) { 4458 xpt_freeze_simq(mpt->sim, 1); 4459 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 4460 } 4461 } else { 4462 /* 4463 * XXX: I don't know why this seems to happen, but 4464 * XXX: completing the CCB seems to make things happy. 4465 * XXX: This seems to happen if the initiator requests 4466 * XXX: enough data that we have to do multiple CTIOs. 4467 */ 4468 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 4469 mpt_lprt(mpt, MPT_PRT_DEBUG, 4470 "Meaningless STATUS CCB (%p): flags %x status %x " 4471 "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, 4472 ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); 4473 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 4474 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4475 xpt_done(ccb); 4476 return; 4477 } 4478 mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, 4479 (void *)&csio->sense_data, 4480 (ccb->ccb_h.flags & CAM_SEND_SENSE) ? 4481 csio->sense_len : 0); 4482 } 4483} 4484 4485static void 4486mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, 4487 uint32_t lun, int send, uint8_t *data, size_t length) 4488{ 4489 mpt_tgt_state_t *tgt; 4490 PTR_MSG_TARGET_ASSIST_REQUEST ta; 4491 SGE_SIMPLE32 *se; 4492 uint32_t flags; 4493 uint8_t *dptr; 4494 bus_addr_t pptr; 4495 request_t *req; 4496 4497 /* 4498 * We enter with resid set to the data load for the command. 4499 */ 4500 tgt = MPT_TGT_STATE(mpt, cmd_req); 4501 if (length == 0 || tgt->resid == 0) { 4502 tgt->resid = 0; 4503 mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0); 4504 return; 4505 } 4506 4507 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4508 mpt_prt(mpt, "out of resources- dropping local response\n"); 4509 return; 4510 } 4511 tgt->is_local = 1; 4512 4513 4514 memset(req->req_vbuf, 0, MPT_RQSL(mpt)); 4515 ta = req->req_vbuf; 4516 4517 if (mpt->is_sas) { 4518 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; 4519 ta->QueueTag = ssp->InitiatorTag; 4520 } else if (mpt->is_spi) { 4521 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; 4522 ta->QueueTag = sp->Tag; 4523 } 4524 ta->Function = MPI_FUNCTION_TARGET_ASSIST; 4525 ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4526 ta->ReplyWord = htole32(tgt->reply_desc); 4527 if (lun > MPT_MAX_LUNS) { 4528 ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f); 4529 ta->LUN[1] = lun & 0xff; 4530 } else { 4531 ta->LUN[1] = lun; 4532 } 4533 ta->RelativeOffset = 0; 4534 ta->DataLength = length; 4535 4536 dptr = req->req_vbuf; 4537 dptr += MPT_RQSL(mpt); 4538 pptr = req->req_pbuf; 4539 pptr += MPT_RQSL(mpt); 4540 memcpy(dptr, data, min(length, MPT_RQSL(mpt))); 4541 4542 se = (SGE_SIMPLE32 *) &ta->SGL[0]; 4543 memset(se, 0,sizeof (*se)); 4544 4545 flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; 4546 if (send) { 4547 ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; 4548 flags |= MPI_SGE_FLAGS_HOST_TO_IOC; 4549 } 4550 se->Address = pptr; 4551 MPI_pSGE_SET_LENGTH(se, length); 4552 flags |= MPI_SGE_FLAGS_LAST_ELEMENT; 4553 flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; 4554 MPI_pSGE_SET_FLAGS(se, flags); 4555 4556 tgt->ccb = NULL; 4557 tgt->req = req; 4558 tgt->resid -= length; 4559 tgt->bytes_xfered = length; 4560#ifdef WE_TRUST_AUTO_GOOD_STATUS 4561 tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; 4562#else 4563 tgt->state = TGT_STATE_MOVING_DATA; 4564#endif 4565 mpt_send_cmd(mpt, req); 4566} 4567 4568/* 4569 * Abort queued up CCBs 4570 */ 4571static cam_status 4572mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) 4573{ 4574 struct mpt_hdr_stailq *lp; 4575 struct ccb_hdr *srch; 4576 int found = 0; 4577 union ccb *accb = ccb->cab.abort_ccb; 4578 tgt_resource_t *trtp; 4579 4580 mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); 4581 4582 if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 4583 trtp = &mpt->trt_wildcard; 4584 } else { 4585 trtp = &mpt->trt[ccb->ccb_h.target_lun]; 4586 } 4587 4588 if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 4589 lp = &trtp->atios; 4590 } else if (accb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) { 4591 lp = &trtp->inots; 4592 } else { 4593 return (CAM_REQ_INVALID); 4594 } 4595 4596 STAILQ_FOREACH(srch, lp, sim_links.stqe) { 4597 if (srch == &accb->ccb_h) { 4598 found = 1; 4599 STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); 4600 break; 4601 } 4602 } 4603 if (found) { 4604 accb->ccb_h.status = CAM_REQ_ABORTED; 4605 xpt_done(accb); 4606 return (CAM_REQ_CMP); 4607 } 4608 mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb); 4609 return (CAM_PATH_INVALID); 4610} 4611 4612/* 4613 * Ask the MPT to abort the current target command 4614 */ 4615static int 4616mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) 4617{ 4618 int error; 4619 request_t *req; 4620 PTR_MSG_TARGET_MODE_ABORT abtp; 4621 4622 req = mpt_get_request(mpt, FALSE); 4623 if (req == NULL) { 4624 return (-1); 4625 } 4626 abtp = req->req_vbuf; 4627 memset(abtp, 0, sizeof (*abtp)); 4628 4629 abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4630 abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; 4631 abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; 4632 abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); 4633 error = 0; 4634 if (mpt->is_fc || mpt->is_sas) { 4635 mpt_send_cmd(mpt, req); 4636 } else { 4637 error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); 4638 } 4639 return (error); 4640} 4641 4642/* 4643 * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting 4644 * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the 4645 * FC929 to set bogus FC_RSP fields (nonzero residuals 4646 * but w/o RESID fields set). This causes QLogic initiators 4647 * to think maybe that a frame was lost. 4648 * 4649 * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because 4650 * we use allocated requests to do TARGET_ASSIST and we 4651 * need to know when to release them. 4652 */ 4653 4654static void 4655mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, 4656 uint8_t status, uint8_t const *sense_data, u_int sense_len) 4657{ 4658 uint8_t *cmd_vbuf; 4659 mpt_tgt_state_t *tgt; 4660 PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; 4661 request_t *req; 4662 bus_addr_t paddr; 4663 int resplen = 0; 4664 uint32_t fl; 4665 4666 cmd_vbuf = cmd_req->req_vbuf; 4667 cmd_vbuf += MPT_RQSL(mpt); 4668 tgt = MPT_TGT_STATE(mpt, cmd_req); 4669 4670 if ((req = mpt_get_request(mpt, FALSE)) == NULL) { 4671 if (mpt->outofbeer == 0) { 4672 mpt->outofbeer = 1; 4673 xpt_freeze_simq(mpt->sim, 1); 4674 mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); 4675 } 4676 if (ccb) { 4677 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 4678 mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); 4679 xpt_done(ccb); 4680 } else { 4681 mpt_prt(mpt, 4682 "could not allocate status request- dropping\n"); 4683 } 4684 return; 4685 } 4686 req->ccb = ccb; 4687 if (ccb) { 4688 ccb->ccb_h.ccb_mpt_ptr = mpt; 4689 ccb->ccb_h.ccb_req_ptr = req; 4690 } 4691 4692 /* 4693 * Record the currently active ccb, if any, and the 4694 * request for it in our target state area. 4695 */ 4696 tgt->ccb = ccb; 4697 tgt->req = req; 4698 tgt->state = TGT_STATE_SENDING_STATUS; 4699 4700 tp = req->req_vbuf; 4701 paddr = req->req_pbuf; 4702 paddr += MPT_RQSL(mpt); 4703 4704 memset(tp, 0, sizeof (*tp)); 4705 tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; 4706 if (mpt->is_fc) { 4707 PTR_MPI_TARGET_FCP_CMD_BUFFER fc = 4708 (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; 4709 uint8_t *sts_vbuf; 4710 uint32_t *rsp; 4711 4712 sts_vbuf = req->req_vbuf; 4713 sts_vbuf += MPT_RQSL(mpt); 4714 rsp = (uint32_t *) sts_vbuf; 4715 memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); 4716 4717 /* 4718 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. 4719 * It has to be big-endian in memory and is organized 4720 * in 32 bit words, which are much easier to deal with 4721 * as words which are swizzled as needed. 4722 * 4723 * All we're filling here is the FC_RSP payload. 4724 * We may just have the chip synthesize it if 4725 * we have no residual and an OK status. 4726 * 4727 */ 4728 memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); 4729 4730 rsp[2] = htobe32(status); 4731#define MIN_FCP_RESPONSE_SIZE 24 4732#ifndef WE_TRUST_AUTO_GOOD_STATUS 4733 resplen = MIN_FCP_RESPONSE_SIZE; 4734#endif 4735 if (tgt->resid) { 4736 rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */ 4737 rsp[3] = htobe32(tgt->resid); 4738 resplen = MIN_FCP_RESPONSE_SIZE; 4739 } 4740 if (sense_len > 0) { 4741 rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */ 4742 rsp[4] = htobe32(sense_len); 4743 memcpy(&rsp[6], sense_data, sense_len); 4744 resplen = MIN_FCP_RESPONSE_SIZE + sense_len; 4745 } 4746 } else if (mpt->is_sas) { 4747 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = 4748 (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; 4749 memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); 4750 } else { 4751 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = 4752 (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; 4753 tp->StatusCode = status; 4754 tp->QueueTag = htole16(sp->Tag); 4755 memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); 4756 } 4757 4758 tp->ReplyWord = htole32(tgt->reply_desc); 4759 tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); 4760 4761#ifdef WE_CAN_USE_AUTO_REPOST 4762 tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; 4763#endif 4764 if (status == SCSI_STATUS_OK && resplen == 0) { 4765 tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; 4766 } else { 4767 tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); 4768 fl = 4769 MPI_SGE_FLAGS_HOST_TO_IOC | 4770 MPI_SGE_FLAGS_SIMPLE_ELEMENT | 4771 MPI_SGE_FLAGS_LAST_ELEMENT | 4772 MPI_SGE_FLAGS_END_OF_LIST | 4773 MPI_SGE_FLAGS_END_OF_BUFFER; 4774 fl <<= MPI_SGE_FLAGS_SHIFT; 4775 fl |= resplen; 4776 tp->StatusDataSGE.FlagsLength = htole32(fl); 4777 } 4778 4779 mpt_lprt(mpt, MPT_PRT_DEBUG, 4780 "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n", 4781 ccb, sense_len > 0 ? "" : "out", ccb ? ccb->csio.tag_id : -1, 4782 req, req->serno, tgt->resid); 4783 if (ccb) { 4784 ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; 4785 mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb); 4786 } 4787 mpt_send_cmd(mpt, req); 4788} 4789 4790static void 4791mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, 4792 tgt_resource_t *trtp, int init_id) 4793{ 4794 struct ccb_immediate_notify *inot; 4795 mpt_tgt_state_t *tgt; 4796 4797 tgt = MPT_TGT_STATE(mpt, req); 4798 inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots); 4799 if (inot == NULL) { 4800 mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); 4801 mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0); 4802 return; 4803 } 4804 STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); 4805 mpt_lprt(mpt, MPT_PRT_DEBUG1, 4806 "Get FREE INOT %p lun %jx\n", inot, 4807 (uintmax_t)inot->ccb_h.target_lun); 4808 4809 inot->initiator_id = init_id; /* XXX */ 4810 /* 4811 * This is a somewhat grotesque attempt to map from task management 4812 * to old style SCSI messages. God help us all. 4813 */ 4814 switch (fc) { 4815 case MPT_ABORT_TASK_SET: 4816 inot->arg = MSG_ABORT_TAG; 4817 break; 4818 case MPT_CLEAR_TASK_SET: 4819 inot->arg = MSG_CLEAR_TASK_SET; 4820 break; 4821 case MPT_TARGET_RESET: 4822 inot->arg = MSG_TARGET_RESET; 4823 break; 4824 case MPT_CLEAR_ACA: 4825 inot->arg = MSG_CLEAR_ACA; 4826 break; 4827 case MPT_TERMINATE_TASK: 4828 inot->arg = MSG_ABORT_TAG; 4829 break; 4830 default: 4831 inot->arg = MSG_NOOP; 4832 break; 4833 } 4834 /* 4835 * XXX KDM we need the sequence/tag number for the target of the 4836 * task management operation, especially if it is an abort. 4837 */ 4838 tgt->ccb = (union ccb *) inot; 4839 inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 4840 xpt_done((union ccb *)inot); 4841} 4842 4843static void 4844mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) 4845{ 4846 static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 4847 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 4848 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 4849 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 4850 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', 4851 '0', '0', '0', '1' 4852 }; 4853 struct ccb_accept_tio *atiop; 4854 lun_id_t lun; 4855 int tag_action = 0; 4856 mpt_tgt_state_t *tgt; 4857 tgt_resource_t *trtp = NULL; 4858 U8 *lunptr; 4859 U8 *vbuf; 4860 U16 itag; 4861 U16 ioindex; 4862 mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; 4863 uint8_t *cdbp; 4864 4865 /* 4866 * Stash info for the current command where we can get at it later. 4867 */ 4868 vbuf = req->req_vbuf; 4869 vbuf += MPT_RQSL(mpt); 4870 4871 /* 4872 * Get our state pointer set up. 4873 */ 4874 tgt = MPT_TGT_STATE(mpt, req); 4875 if (tgt->state != TGT_STATE_LOADED) { 4876 mpt_tgt_dump_req_state(mpt, req); 4877 panic("bad target state in mpt_scsi_tgt_atio"); 4878 } 4879 memset(tgt, 0, sizeof (mpt_tgt_state_t)); 4880 tgt->state = TGT_STATE_IN_CAM; 4881 tgt->reply_desc = reply_desc; 4882 ioindex = GET_IO_INDEX(reply_desc); 4883 if (mpt->verbose >= MPT_PRT_DEBUG) { 4884 mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, 4885 max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), 4886 max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), 4887 sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); 4888 } 4889 if (mpt->is_fc) { 4890 PTR_MPI_TARGET_FCP_CMD_BUFFER fc; 4891 fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; 4892 if (fc->FcpCntl[2]) { 4893 /* 4894 * Task Management Request 4895 */ 4896 switch (fc->FcpCntl[2]) { 4897 case 0x2: 4898 fct = MPT_ABORT_TASK_SET; 4899 break; 4900 case 0x4: 4901 fct = MPT_CLEAR_TASK_SET; 4902 break; 4903 case 0x20: 4904 fct = MPT_TARGET_RESET; 4905 break; 4906 case 0x40: 4907 fct = MPT_CLEAR_ACA; 4908 break; 4909 case 0x80: 4910 fct = MPT_TERMINATE_TASK; 4911 break; 4912 default: 4913 mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", 4914 fc->FcpCntl[2]); 4915 mpt_scsi_tgt_status(mpt, NULL, req, 4916 SCSI_STATUS_OK, NULL, 0); 4917 return; 4918 } 4919 } else { 4920 switch (fc->FcpCntl[1]) { 4921 case 0: 4922 tag_action = MSG_SIMPLE_Q_TAG; 4923 break; 4924 case 1: 4925 tag_action = MSG_HEAD_OF_Q_TAG; 4926 break; 4927 case 2: 4928 tag_action = MSG_ORDERED_Q_TAG; 4929 break; 4930 default: 4931 /* 4932 * Bah. Ignore Untagged Queing and ACA 4933 */ 4934 tag_action = MSG_SIMPLE_Q_TAG; 4935 break; 4936 } 4937 } 4938 tgt->resid = be32toh(fc->FcpDl); 4939 cdbp = fc->FcpCdb; 4940 lunptr = fc->FcpLun; 4941 itag = be16toh(fc->OptionalOxid); 4942 } else if (mpt->is_sas) { 4943 PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; 4944 ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; 4945 cdbp = ssp->CDB; 4946 lunptr = ssp->LogicalUnitNumber; 4947 itag = ssp->InitiatorTag; 4948 } else { 4949 PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; 4950 sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; 4951 cdbp = sp->CDB; 4952 lunptr = sp->LogicalUnitNumber; 4953 itag = sp->Tag; 4954 } 4955 4956 /* 4957 * Generate a simple lun 4958 */ 4959 switch (lunptr[0] & 0xc0) { 4960 case 0x40: 4961 lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1]; 4962 break; 4963 case 0: 4964 lun = lunptr[1]; 4965 break; 4966 default: 4967 mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n"); 4968 lun = 0xffff; 4969 break; 4970 } 4971 4972 /* 4973 * Deal with non-enabled or bad luns here. 4974 */ 4975 if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || 4976 mpt->trt[lun].enabled == 0) { 4977 if (mpt->twildcard) { 4978 trtp = &mpt->trt_wildcard; 4979 } else if (fct == MPT_NIL_TMT_VALUE) { 4980 /* 4981 * In this case, we haven't got an upstream listener 4982 * for either a specific lun or wildcard luns. We 4983 * have to make some sensible response. For regular 4984 * inquiry, just return some NOT HERE inquiry data. 4985 * For VPD inquiry, report illegal field in cdb. 4986 * For REQUEST SENSE, just return NO SENSE data. 4987 * REPORT LUNS gets illegal command. 4988 * All other commands get 'no such device'. 4989 */ 4990 uint8_t sense[MPT_SENSE_SIZE]; 4991 size_t len; 4992 4993 memset(sense, 0, sizeof(sense)); 4994 sense[0] = 0xf0; 4995 sense[2] = 0x5; 4996 sense[7] = 0x8; 4997 tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 4998 4999 switch (cdbp[0]) { 5000 case INQUIRY: 5001 { 5002 if (cdbp[1] != 0) { 5003 sense[12] = 0x26; 5004 sense[13] = 0x01; 5005 break; 5006 } 5007 len = min(tgt->resid, cdbp[4]); 5008 len = min(len, sizeof (null_iqd)); 5009 mpt_lprt(mpt, MPT_PRT_DEBUG, 5010 "local inquiry %ld bytes\n", (long) len); 5011 mpt_scsi_tgt_local(mpt, req, lun, 1, 5012 null_iqd, len); 5013 return; 5014 } 5015 case REQUEST_SENSE: 5016 { 5017 sense[2] = 0x0; 5018 len = min(tgt->resid, cdbp[4]); 5019 len = min(len, sizeof (sense)); 5020 mpt_lprt(mpt, MPT_PRT_DEBUG, 5021 "local reqsense %ld bytes\n", (long) len); 5022 mpt_scsi_tgt_local(mpt, req, lun, 1, 5023 sense, len); 5024 return; 5025 } 5026 case REPORT_LUNS: 5027 mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); 5028 sense[12] = 0x26; 5029 return; 5030 default: 5031 mpt_lprt(mpt, MPT_PRT_DEBUG, 5032 "CMD 0x%x to unmanaged lun %jx\n", 5033 cdbp[0], (uintmax_t)lun); 5034 sense[12] = 0x25; 5035 break; 5036 } 5037 mpt_scsi_tgt_status(mpt, NULL, req, 5038 SCSI_STATUS_CHECK_COND, sense, sizeof(sense)); 5039 return; 5040 } 5041 /* otherwise, leave trtp NULL */ 5042 } else { 5043 trtp = &mpt->trt[lun]; 5044 } 5045 5046 /* 5047 * Deal with any task management 5048 */ 5049 if (fct != MPT_NIL_TMT_VALUE) { 5050 if (trtp == NULL) { 5051 mpt_prt(mpt, "task mgmt function %x but no listener\n", 5052 fct); 5053 mpt_scsi_tgt_status(mpt, NULL, req, 5054 SCSI_STATUS_OK, NULL, 0); 5055 } else { 5056 mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, 5057 GET_INITIATOR_INDEX(reply_desc)); 5058 } 5059 return; 5060 } 5061 5062 5063 atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); 5064 if (atiop == NULL) { 5065 mpt_lprt(mpt, MPT_PRT_WARN, 5066 "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun, 5067 mpt->tenabled? "QUEUE FULL" : "BUSY"); 5068 mpt_scsi_tgt_status(mpt, NULL, req, 5069 mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, 5070 NULL, 0); 5071 return; 5072 } 5073 STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); 5074 mpt_lprt(mpt, MPT_PRT_DEBUG1, 5075 "Get FREE ATIO %p lun %jx\n", atiop, 5076 (uintmax_t)atiop->ccb_h.target_lun); 5077 atiop->ccb_h.ccb_mpt_ptr = mpt; 5078 atiop->ccb_h.status = CAM_CDB_RECVD; 5079 atiop->ccb_h.target_lun = lun; 5080 atiop->sense_len = 0; 5081 atiop->init_id = GET_INITIATOR_INDEX(reply_desc); 5082 atiop->cdb_len = 16; 5083 memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); 5084 5085 /* 5086 * The tag we construct here allows us to find the 5087 * original request that the command came in with. 5088 * 5089 * This way we don't have to depend on anything but the 5090 * tag to find things when CCBs show back up from CAM. 5091 */ 5092 atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); 5093 tgt->tag_id = atiop->tag_id; 5094 if (tag_action) { 5095 atiop->tag_action = tag_action; 5096 atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; 5097 } 5098 if (mpt->verbose >= MPT_PRT_DEBUG) { 5099 int i; 5100 mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop, 5101 (uintmax_t)atiop->ccb_h.target_lun); 5102 for (i = 0; i < atiop->cdb_len; i++) { 5103 mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, 5104 (i == (atiop->cdb_len - 1))? '>' : ' '); 5105 } 5106 mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", 5107 itag, atiop->tag_id, tgt->reply_desc, tgt->resid); 5108 } 5109 5110 xpt_done((union ccb *)atiop); 5111} 5112 5113static void 5114mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) 5115{ 5116 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5117 5118 mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " 5119 "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc, 5120 tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, 5121 tgt->tag_id, tgt->state); 5122} 5123 5124static void 5125mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) 5126{ 5127 5128 mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, 5129 req->index, req->index, req->state); 5130 mpt_tgt_dump_tgt_state(mpt, req); 5131} 5132 5133static int 5134mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, 5135 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 5136{ 5137 int dbg; 5138 union ccb *ccb; 5139 U16 status; 5140 5141 if (reply_frame == NULL) { 5142 /* 5143 * Figure out what the state of the command is. 5144 */ 5145 mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); 5146 5147#ifdef INVARIANTS 5148 mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); 5149 if (tgt->req) { 5150 mpt_req_not_spcl(mpt, tgt->req, 5151 "turbo scsi_tgt_reply associated req", __LINE__); 5152 } 5153#endif 5154 switch(tgt->state) { 5155 case TGT_STATE_LOADED: 5156 /* 5157 * This is a new command starting. 5158 */ 5159 mpt_scsi_tgt_atio(mpt, req, reply_desc); 5160 break; 5161 case TGT_STATE_MOVING_DATA: 5162 { 5163 ccb = tgt->ccb; 5164 if (tgt->req == NULL) { 5165 panic("mpt: turbo target reply with null " 5166 "associated request moving data"); 5167 /* NOTREACHED */ 5168 } 5169 if (ccb == NULL) { 5170 if (tgt->is_local == 0) { 5171 panic("mpt: turbo target reply with " 5172 "null associated ccb moving data"); 5173 /* NOTREACHED */ 5174 } 5175 mpt_lprt(mpt, MPT_PRT_DEBUG, 5176 "TARGET_ASSIST local done\n"); 5177 TAILQ_REMOVE(&mpt->request_pending_list, 5178 tgt->req, links); 5179 mpt_free_request(mpt, tgt->req); 5180 tgt->req = NULL; 5181 mpt_scsi_tgt_status(mpt, NULL, req, 5182 0, NULL, 0); 5183 return (TRUE); 5184 } 5185 tgt->ccb = NULL; 5186 tgt->nxfers++; 5187 mpt_req_untimeout(tgt->req, mpt_timeout, ccb); 5188 mpt_lprt(mpt, MPT_PRT_DEBUG, 5189 "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", 5190 ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); 5191 /* 5192 * Free the Target Assist Request 5193 */ 5194 KASSERT(tgt->req->ccb == ccb, 5195 ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, 5196 tgt->req->serno, tgt->req->ccb)); 5197 TAILQ_REMOVE(&mpt->request_pending_list, 5198 tgt->req, links); 5199 mpt_free_request(mpt, tgt->req); 5200 tgt->req = NULL; 5201 5202 /* 5203 * Do we need to send status now? That is, are 5204 * we done with all our data transfers? 5205 */ 5206 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { 5207 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5208 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5209 KASSERT(ccb->ccb_h.status, 5210 ("zero ccb sts at %d", __LINE__)); 5211 tgt->state = TGT_STATE_IN_CAM; 5212 if (mpt->outofbeer) { 5213 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5214 mpt->outofbeer = 0; 5215 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5216 } 5217 xpt_done(ccb); 5218 break; 5219 } 5220 /* 5221 * Otherwise, send status (and sense) 5222 */ 5223 mpt_scsi_tgt_status(mpt, ccb, req, 5224 ccb->csio.scsi_status, 5225 (void *)&ccb->csio.sense_data, 5226 (ccb->ccb_h.flags & CAM_SEND_SENSE) ? 5227 ccb->csio.sense_len : 0); 5228 break; 5229 } 5230 case TGT_STATE_SENDING_STATUS: 5231 case TGT_STATE_MOVING_DATA_AND_STATUS: 5232 { 5233 int ioindex; 5234 ccb = tgt->ccb; 5235 5236 if (tgt->req == NULL) { 5237 panic("mpt: turbo target reply with null " 5238 "associated request sending status"); 5239 /* NOTREACHED */ 5240 } 5241 5242 if (ccb) { 5243 tgt->ccb = NULL; 5244 if (tgt->state == 5245 TGT_STATE_MOVING_DATA_AND_STATUS) { 5246 tgt->nxfers++; 5247 } 5248 mpt_req_untimeout(tgt->req, mpt_timeout, ccb); 5249 if (ccb->ccb_h.flags & CAM_SEND_SENSE) { 5250 ccb->ccb_h.status |= CAM_SENT_SENSE; 5251 } 5252 mpt_lprt(mpt, MPT_PRT_DEBUG, 5253 "TARGET_STATUS tag %x sts %x flgs %x req " 5254 "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, 5255 ccb->ccb_h.flags, tgt->req); 5256 /* 5257 * Free the Target Send Status Request 5258 */ 5259 KASSERT(tgt->req->ccb == ccb, 5260 ("tgt->req %p:%u tgt->req->ccb %p", 5261 tgt->req, tgt->req->serno, tgt->req->ccb)); 5262 /* 5263 * Notify CAM that we're done 5264 */ 5265 mpt_set_ccb_status(ccb, CAM_REQ_CMP); 5266 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 5267 KASSERT(ccb->ccb_h.status, 5268 ("ZERO ccb sts at %d", __LINE__)); 5269 tgt->ccb = NULL; 5270 } else { 5271 mpt_lprt(mpt, MPT_PRT_DEBUG, 5272 "TARGET_STATUS non-CAM for req %p:%u\n", 5273 tgt->req, tgt->req->serno); 5274 } 5275 TAILQ_REMOVE(&mpt->request_pending_list, 5276 tgt->req, links); 5277 mpt_free_request(mpt, tgt->req); 5278 tgt->req = NULL; 5279 5280 /* 5281 * And re-post the Command Buffer. 5282 * This will reset the state. 5283 */ 5284 ioindex = GET_IO_INDEX(reply_desc); 5285 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5286 tgt->is_local = 0; 5287 mpt_post_target_command(mpt, req, ioindex); 5288 5289 /* 5290 * And post a done for anyone who cares 5291 */ 5292 if (ccb) { 5293 if (mpt->outofbeer) { 5294 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 5295 mpt->outofbeer = 0; 5296 mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); 5297 } 5298 xpt_done(ccb); 5299 } 5300 break; 5301 } 5302 case TGT_STATE_NIL: /* XXX This Never Happens XXX */ 5303 tgt->state = TGT_STATE_LOADED; 5304 break; 5305 default: 5306 mpt_prt(mpt, "Unknown Target State 0x%x in Context " 5307 "Reply Function\n", tgt->state); 5308 } 5309 return (TRUE); 5310 } 5311 5312 status = le16toh(reply_frame->IOCStatus); 5313 if (status != MPI_IOCSTATUS_SUCCESS) { 5314 dbg = MPT_PRT_ERROR; 5315 } else { 5316 dbg = MPT_PRT_DEBUG1; 5317 } 5318 5319 mpt_lprt(mpt, dbg, 5320 "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", 5321 req, req->serno, reply_frame, reply_frame->Function, status); 5322 5323 switch (reply_frame->Function) { 5324 case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: 5325 { 5326 mpt_tgt_state_t *tgt; 5327#ifdef INVARIANTS 5328 mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); 5329#endif 5330 if (status != MPI_IOCSTATUS_SUCCESS) { 5331 /* 5332 * XXX What to do? 5333 */ 5334 break; 5335 } 5336 tgt = MPT_TGT_STATE(mpt, req); 5337 KASSERT(tgt->state == TGT_STATE_LOADING, 5338 ("bad state 0x%x on reply to buffer post", tgt->state)); 5339 mpt_assign_serno(mpt, req); 5340 tgt->state = TGT_STATE_LOADED; 5341 break; 5342 } 5343 case MPI_FUNCTION_TARGET_ASSIST: 5344#ifdef INVARIANTS 5345 mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); 5346#endif 5347 mpt_prt(mpt, "target assist completion\n"); 5348 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5349 mpt_free_request(mpt, req); 5350 break; 5351 case MPI_FUNCTION_TARGET_STATUS_SEND: 5352#ifdef INVARIANTS 5353 mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); 5354#endif 5355 mpt_prt(mpt, "status send completion\n"); 5356 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5357 mpt_free_request(mpt, req); 5358 break; 5359 case MPI_FUNCTION_TARGET_MODE_ABORT: 5360 { 5361 PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = 5362 (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; 5363 PTR_MSG_TARGET_MODE_ABORT abtp = 5364 (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; 5365 uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); 5366#ifdef INVARIANTS 5367 mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); 5368#endif 5369 mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", 5370 cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); 5371 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 5372 mpt_free_request(mpt, req); 5373 break; 5374 } 5375 default: 5376 mpt_prt(mpt, "Unknown Target Address Reply Function code: " 5377 "0x%x\n", reply_frame->Function); 5378 break; 5379 } 5380 return (TRUE); 5381} 5382