mpt.c revision 157662
1/*- 2 * Generic routines for LSI Fusion adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28/*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 */ 61/*- 62 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 63 * Copyright (c) 2005, WHEEL Sp. z o.o. 64 * Copyright (c) 2004, 2005 Justin T. Gibbs 65 * All rights reserved. 66 * 67 * Redistribution and use in source and binary forms, with or without 68 * modification, are permitted provided that the following conditions are 69 * met: 70 * 1. Redistributions of source code must retain the above copyright 71 * notice, this list of conditions and the following disclaimer. 72 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 73 * substantially similar to the "NO WARRANTY" disclaimer below 74 * ("Disclaimer") and any redistribution must be conditioned upon including 75 * a substantially similar Disclaimer requirement for further binary 76 * redistribution. 77 * 3. Neither the names of the above listed copyright holders nor the names 78 * of any contributors may be used to endorse or promote products derived 79 * from this software without specific prior written permission. 80 * 81 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 82 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 83 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 84 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 85 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 91 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 92 */ 93 94#include <sys/cdefs.h> 95__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 157662 2006-04-11 16:47:30Z mjacob $"); 96 97#include <dev/mpt/mpt.h> 98#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 99#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 100 101#include <dev/mpt/mpilib/mpi.h> 102#include <dev/mpt/mpilib/mpi_ioc.h> 103#include <dev/mpt/mpilib/mpi_fc.h> 104#include <dev/mpt/mpilib/mpi_targ.h> 105 106#include <sys/sysctl.h> 107 108#define MPT_MAX_TRYS 3 109#define MPT_MAX_WAIT 300000 110 111static int maxwait_ack = 0; 112static int maxwait_int = 0; 113static int maxwait_state = 0; 114 115TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 116mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 117 118static mpt_reply_handler_t mpt_default_reply_handler; 119static mpt_reply_handler_t mpt_config_reply_handler; 120static mpt_reply_handler_t mpt_handshake_reply_handler; 121static mpt_reply_handler_t mpt_event_reply_handler; 122static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 123 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 124static int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 125static int mpt_soft_reset(struct mpt_softc *mpt); 126static void mpt_hard_reset(struct mpt_softc *mpt); 127static int mpt_configure_ioc(struct mpt_softc *mpt); 128static int mpt_enable_ioc(struct mpt_softc *mpt, int); 129 130/************************* Personality Module Support *************************/ 131/* 132 * We include one extra entry that is guaranteed to be NULL 133 * to simplify our itterator. 134 */ 135static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 136static __inline struct mpt_personality* 137 mpt_pers_find(struct mpt_softc *, u_int); 138static __inline struct mpt_personality* 139 mpt_pers_find_reverse(struct mpt_softc *, u_int); 140 141static __inline struct mpt_personality * 142mpt_pers_find(struct mpt_softc *mpt, u_int start_at) 143{ 144 KASSERT(start_at <= MPT_MAX_PERSONALITIES, 145 ("mpt_pers_find: starting position out of range\n")); 146 147 while (start_at < MPT_MAX_PERSONALITIES 148 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 149 start_at++; 150 } 151 return (mpt_personalities[start_at]); 152} 153 154/* 155 * Used infrequently, so no need to optimize like a forward 156 * traversal where we use the MAX+1 is guaranteed to be NULL 157 * trick. 158 */ 159static __inline struct mpt_personality * 160mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 161{ 162 while (start_at < MPT_MAX_PERSONALITIES 163 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 164 start_at--; 165 } 166 if (start_at < MPT_MAX_PERSONALITIES) 167 return (mpt_personalities[start_at]); 168 return (NULL); 169} 170 171#define MPT_PERS_FOREACH(mpt, pers) \ 172 for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 173 pers != NULL; \ 174 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 175 176#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 177 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 178 pers != NULL; \ 179 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 180 181static mpt_load_handler_t mpt_stdload; 182static mpt_probe_handler_t mpt_stdprobe; 183static mpt_attach_handler_t mpt_stdattach; 184static mpt_enable_handler_t mpt_stdenable; 185static mpt_event_handler_t mpt_stdevent; 186static mpt_reset_handler_t mpt_stdreset; 187static mpt_shutdown_handler_t mpt_stdshutdown; 188static mpt_detach_handler_t mpt_stddetach; 189static mpt_unload_handler_t mpt_stdunload; 190static struct mpt_personality mpt_default_personality = 191{ 192 .load = mpt_stdload, 193 .probe = mpt_stdprobe, 194 .attach = mpt_stdattach, 195 .enable = mpt_stdenable, 196 .event = mpt_stdevent, 197 .reset = mpt_stdreset, 198 .shutdown = mpt_stdshutdown, 199 .detach = mpt_stddetach, 200 .unload = mpt_stdunload 201}; 202 203static mpt_load_handler_t mpt_core_load; 204static mpt_attach_handler_t mpt_core_attach; 205static mpt_enable_handler_t mpt_core_enable; 206static mpt_reset_handler_t mpt_core_ioc_reset; 207static mpt_event_handler_t mpt_core_event; 208static mpt_shutdown_handler_t mpt_core_shutdown; 209static mpt_shutdown_handler_t mpt_core_detach; 210static mpt_unload_handler_t mpt_core_unload; 211static struct mpt_personality mpt_core_personality = 212{ 213 .name = "mpt_core", 214 .load = mpt_core_load, 215 .attach = mpt_core_attach, 216 .enable = mpt_core_enable, 217 .event = mpt_core_event, 218 .reset = mpt_core_ioc_reset, 219 .shutdown = mpt_core_shutdown, 220 .detach = mpt_core_detach, 221 .unload = mpt_core_unload, 222}; 223 224/* 225 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 226 * ordering information. We want the core to always register FIRST. 227 * other modules are set to SI_ORDER_SECOND. 228 */ 229static moduledata_t mpt_core_mod = { 230 "mpt_core", mpt_modevent, &mpt_core_personality 231}; 232DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 233MODULE_VERSION(mpt_core, 1); 234 235#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id)) 236 237 238int 239mpt_modevent(module_t mod, int type, void *data) 240{ 241 struct mpt_personality *pers; 242 int error; 243 244 pers = (struct mpt_personality *)data; 245 246 error = 0; 247 switch (type) { 248 case MOD_LOAD: 249 { 250 mpt_load_handler_t **def_handler; 251 mpt_load_handler_t **pers_handler; 252 int i; 253 254 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 255 if (mpt_personalities[i] == NULL) 256 break; 257 } 258 if (i >= MPT_MAX_PERSONALITIES) { 259 error = ENOMEM; 260 break; 261 } 262 pers->id = i; 263 mpt_personalities[i] = pers; 264 265 /* Install standard/noop handlers for any NULL entries. */ 266 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 267 pers_handler = MPT_PERS_FIRST_HANDLER(pers); 268 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 269 if (*pers_handler == NULL) 270 *pers_handler = *def_handler; 271 pers_handler++; 272 def_handler++; 273 } 274 275 error = (pers->load(pers)); 276 if (error != 0) 277 mpt_personalities[i] = NULL; 278 break; 279 } 280 case MOD_SHUTDOWN: 281 break; 282#if __FreeBSD_version >= 500000 283 case MOD_QUIESCE: 284 break; 285#endif 286 case MOD_UNLOAD: 287 error = pers->unload(pers); 288 mpt_personalities[pers->id] = NULL; 289 break; 290 default: 291 error = EINVAL; 292 break; 293 } 294 return (error); 295} 296 297int 298mpt_stdload(struct mpt_personality *pers) 299{ 300 /* Load is always successfull. */ 301 return (0); 302} 303 304int 305mpt_stdprobe(struct mpt_softc *mpt) 306{ 307 /* Probe is always successfull. */ 308 return (0); 309} 310 311int 312mpt_stdattach(struct mpt_softc *mpt) 313{ 314 /* Attach is always successfull. */ 315 return (0); 316} 317 318int 319mpt_stdenable(struct mpt_softc *mpt) 320{ 321 /* Enable is always successfull. */ 322 return (0); 323} 324 325int 326mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 327{ 328 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 329 /* Event was not for us. */ 330 return (0); 331} 332 333void 334mpt_stdreset(struct mpt_softc *mpt, int type) 335{ 336} 337 338void 339mpt_stdshutdown(struct mpt_softc *mpt) 340{ 341} 342 343void 344mpt_stddetach(struct mpt_softc *mpt) 345{ 346} 347 348int 349mpt_stdunload(struct mpt_personality *pers) 350{ 351 /* Unload is always successfull. */ 352 return (0); 353} 354 355/******************************* Bus DMA Support ******************************/ 356void 357mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 358{ 359 struct mpt_map_info *map_info; 360 361 map_info = (struct mpt_map_info *)arg; 362 map_info->error = error; 363 map_info->phys = segs->ds_addr; 364} 365 366/**************************** Reply/Event Handling ****************************/ 367int 368mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 369 mpt_handler_t handler, uint32_t *phandler_id) 370{ 371 372 switch (type) { 373 case MPT_HANDLER_REPLY: 374 { 375 u_int cbi; 376 u_int free_cbi; 377 378 if (phandler_id == NULL) 379 return (EINVAL); 380 381 free_cbi = MPT_HANDLER_ID_NONE; 382 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 383 /* 384 * If the same handler is registered multiple 385 * times, don't error out. Just return the 386 * index of the original registration. 387 */ 388 if (mpt_reply_handlers[cbi] == handler.reply_handler) { 389 *phandler_id = MPT_CBI_TO_HID(cbi); 390 return (0); 391 } 392 393 /* 394 * Fill from the front in the hope that 395 * all registered handlers consume only a 396 * single cache line. 397 * 398 * We don't break on the first empty slot so 399 * that the full table is checked to see if 400 * this handler was previously registered. 401 */ 402 if (free_cbi == MPT_HANDLER_ID_NONE && 403 (mpt_reply_handlers[cbi] 404 == mpt_default_reply_handler)) 405 free_cbi = cbi; 406 } 407 if (free_cbi == MPT_HANDLER_ID_NONE) { 408 return (ENOMEM); 409 } 410 mpt_reply_handlers[free_cbi] = handler.reply_handler; 411 *phandler_id = MPT_CBI_TO_HID(free_cbi); 412 break; 413 } 414 default: 415 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 416 return (EINVAL); 417 } 418 return (0); 419} 420 421int 422mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 423 mpt_handler_t handler, uint32_t handler_id) 424{ 425 426 switch (type) { 427 case MPT_HANDLER_REPLY: 428 { 429 u_int cbi; 430 431 cbi = MPT_CBI(handler_id); 432 if (cbi >= MPT_NUM_REPLY_HANDLERS 433 || mpt_reply_handlers[cbi] != handler.reply_handler) 434 return (ENOENT); 435 mpt_reply_handlers[cbi] = mpt_default_reply_handler; 436 break; 437 } 438 default: 439 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 440 return (EINVAL); 441 } 442 return (0); 443} 444 445static int 446mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 447 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 448{ 449 mpt_prt(mpt, 450 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n", 451 req, req->serno, reply_desc, reply_frame); 452 453 if (reply_frame != NULL) 454 mpt_dump_reply_frame(mpt, reply_frame); 455 456 mpt_prt(mpt, "Reply Frame Ignored\n"); 457 458 return (/*free_reply*/TRUE); 459} 460 461static int 462mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 463 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 464{ 465 if (req != NULL) { 466 467 if (reply_frame != NULL) { 468 MSG_CONFIG *cfgp; 469 MSG_CONFIG_REPLY *reply; 470 471 cfgp = (MSG_CONFIG *)req->req_vbuf; 472 reply = (MSG_CONFIG_REPLY *)reply_frame; 473 req->IOCStatus = le16toh(reply_frame->IOCStatus); 474 bcopy(&reply->Header, &cfgp->Header, 475 sizeof(cfgp->Header)); 476 } 477 req->state &= ~REQ_STATE_QUEUED; 478 req->state |= REQ_STATE_DONE; 479 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 480 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 481 wakeup(req); 482 } 483 } 484 485 return (TRUE); 486} 487 488static int 489mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 490 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 491{ 492 /* Nothing to be done. */ 493 return (TRUE); 494} 495 496static int 497mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 498 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 499{ 500 int free_reply; 501 502 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler")); 503 KASSERT(req != NULL, ("null request in mpt_event_reply_handler")); 504 505 free_reply = TRUE; 506 switch (reply_frame->Function) { 507 case MPI_FUNCTION_EVENT_NOTIFICATION: 508 { 509 MSG_EVENT_NOTIFY_REPLY *msg; 510 struct mpt_personality *pers; 511 u_int handled; 512 513 handled = 0; 514 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 515 MPT_PERS_FOREACH(mpt, pers) 516 handled += pers->event(mpt, req, msg); 517 518 if (handled == 0 && mpt->mpt_pers_mask == 0) { 519 mpt_lprt(mpt, MPT_PRT_INFO, 520 "No Handlers For Any Event Notify Frames. " 521 "Event %#x (ACK %sequired).\n", 522 msg->Event, msg->AckRequired? "r" : "not r"); 523 } else if (handled == 0) { 524 mpt_lprt(mpt, MPT_PRT_WARN, 525 "Unhandled Event Notify Frame. Event %#x " 526 "(ACK %sequired).\n", 527 msg->Event, msg->AckRequired? "r" : "not r"); 528 } 529 530 if (msg->AckRequired) { 531 request_t *ack_req; 532 uint32_t context; 533 534 context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS); 535 ack_req = mpt_get_request(mpt, FALSE); 536 if (ack_req == NULL) { 537 struct mpt_evtf_record *evtf; 538 539 evtf = (struct mpt_evtf_record *)reply_frame; 540 evtf->context = context; 541 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 542 free_reply = FALSE; 543 break; 544 } 545 mpt_send_event_ack(mpt, ack_req, msg, context); 546 /* 547 * Don't check for CONTINUATION_REPLY here 548 */ 549 return (free_reply); 550 } 551 break; 552 } 553 case MPI_FUNCTION_PORT_ENABLE: 554 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n"); 555 break; 556 case MPI_FUNCTION_EVENT_ACK: 557 break; 558 default: 559 mpt_prt(mpt, "unknown event function: %x\n", 560 reply_frame->Function); 561 break; 562 } 563 564 /* 565 * I'm not sure that this continuation stuff works as it should. 566 * 567 * I've had FC async events occur that free the frame up because 568 * the continuation bit isn't set, and then additional async events 569 * then occur using the same context. As you might imagine, this 570 * leads to Very Bad Thing. 571 * 572 * Let's just be safe for now and not free them up until we figure 573 * out what's actually happening here. 574 */ 575#if 0 576 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 577 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 578 mpt_free_request(mpt, req); 579 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", 580 reply_frame->Function, req, req->serno); 581 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 582 MSG_EVENT_NOTIFY_REPLY *msg = 583 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 584 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 585 msg->Event, msg->AckRequired); 586 } 587 } else { 588 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", 589 reply_frame->Function, req, req->serno); 590 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 591 MSG_EVENT_NOTIFY_REPLY *msg = 592 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 593 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 594 msg->Event, msg->AckRequired); 595 } 596 mpt_prtc(mpt, "\n"); 597 } 598#endif 599 return (free_reply); 600} 601 602/* 603 * Process an asynchronous event from the IOC. 604 */ 605static int 606mpt_core_event(struct mpt_softc *mpt, request_t *req, 607 MSG_EVENT_NOTIFY_REPLY *msg) 608{ 609 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 610 msg->Event & 0xFF); 611 switch(msg->Event & 0xFF) { 612 case MPI_EVENT_NONE: 613 break; 614 case MPI_EVENT_LOG_DATA: 615 { 616 int i; 617 618 /* Some error occured that LSI wants logged */ 619 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 620 msg->IOCLogInfo); 621 mpt_prt(mpt, "\tEvtLogData: Event Data:"); 622 for (i = 0; i < msg->EventDataLength; i++) 623 mpt_prtc(mpt, " %08x", msg->Data[i]); 624 mpt_prtc(mpt, "\n"); 625 break; 626 } 627 case MPI_EVENT_EVENT_CHANGE: 628 /* 629 * This is just an acknowledgement 630 * of our mpt_send_event_request. 631 */ 632 break; 633 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 634 break; 635 default: 636 return (0); 637 break; 638 } 639 return (1); 640} 641 642static void 643mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 644 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 645{ 646 MSG_EVENT_ACK *ackp; 647 648 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 649 memset(ackp, 0, sizeof (*ackp)); 650 ackp->Function = MPI_FUNCTION_EVENT_ACK; 651 ackp->Event = msg->Event; 652 ackp->EventContext = msg->EventContext; 653 ackp->MsgContext = context; 654 mpt_check_doorbell(mpt); 655 mpt_send_cmd(mpt, ack_req); 656} 657 658/***************************** Interrupt Handling *****************************/ 659void 660mpt_intr(void *arg) 661{ 662 struct mpt_softc *mpt; 663 uint32_t reply_desc; 664 uint32_t last_reply_desc = MPT_REPLY_EMPTY; 665 int ntrips = 0; 666 667 mpt = (struct mpt_softc *)arg; 668 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n"); 669 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 670 request_t *req; 671 MSG_DEFAULT_REPLY *reply_frame; 672 uint32_t reply_baddr; 673 uint32_t ctxt_idx; 674 u_int cb_index; 675 u_int req_index; 676 int free_rf; 677 678 if (reply_desc == last_reply_desc) { 679 mpt_prt(mpt, "debounce reply_desc 0x%x\n", reply_desc); 680 if (ntrips++ == 1000) { 681 break; 682 } 683 continue; 684 } 685 last_reply_desc = reply_desc; 686 687 req = NULL; 688 reply_frame = NULL; 689 reply_baddr = 0; 690 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 691 u_int offset; 692 /* 693 * Insure that the reply frame is coherent. 694 */ 695 reply_baddr = MPT_REPLY_BADDR(reply_desc); 696 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 697 bus_dmamap_sync_range(mpt->reply_dmat, 698 mpt->reply_dmap, offset, MPT_REPLY_SIZE, 699 BUS_DMASYNC_POSTREAD); 700 reply_frame = MPT_REPLY_OTOV(mpt, offset); 701 ctxt_idx = le32toh(reply_frame->MsgContext); 702 } else { 703 uint32_t type; 704 705 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc); 706 ctxt_idx = reply_desc; 707 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n", 708 reply_desc); 709 710 switch (type) { 711 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: 712 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK; 713 break; 714 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: 715 ctxt_idx = GET_IO_INDEX(reply_desc); 716 if (mpt->tgt_cmd_ptrs == NULL) { 717 mpt_prt(mpt, 718 "mpt_intr: no target cmd ptrs\n"); 719 reply_desc = MPT_REPLY_EMPTY; 720 break; 721 } 722 if (ctxt_idx >= mpt->tgt_cmds_allocated) { 723 mpt_prt(mpt, 724 "mpt_intr: bad tgt cmd ctxt %u\n", 725 ctxt_idx); 726 reply_desc = MPT_REPLY_EMPTY; 727 ntrips = 1000; 728 break; 729 } 730 req = mpt->tgt_cmd_ptrs[ctxt_idx]; 731 if (req == NULL) { 732 mpt_prt(mpt, "no request backpointer " 733 "at index %u", ctxt_idx); 734 reply_desc = MPT_REPLY_EMPTY; 735 ntrips = 1000; 736 break; 737 } 738 /* 739 * Reformulate ctxt_idx to be just as if 740 * it were another type of context reply 741 * so the code below will find the request 742 * via indexing into the pool. 743 */ 744 ctxt_idx = 745 req->index | mpt->scsi_tgt_handler_id; 746 req = NULL; 747 break; 748 case MPI_CONTEXT_REPLY_TYPE_LAN: 749 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n", 750 reply_desc); 751 reply_desc = MPT_REPLY_EMPTY; 752 break; 753 default: 754 mpt_prt(mpt, "Context Reply 0x%08x?\n", type); 755 reply_desc = MPT_REPLY_EMPTY; 756 break; 757 } 758 if (reply_desc == MPT_REPLY_EMPTY) { 759 if (ntrips++ > 1000) { 760 break; 761 } 762 continue; 763 } 764 } 765 766 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx); 767 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx); 768 if (req_index < MPT_MAX_REQUESTS(mpt)) { 769 req = &mpt->request_pool[req_index]; 770 } else { 771 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc ==" 772 " 0x%x)\n", req_index, reply_desc); 773 } 774 775 free_rf = mpt_reply_handlers[cb_index](mpt, req, 776 reply_desc, reply_frame); 777 778 if (reply_frame != NULL && free_rf) { 779 mpt_free_reply(mpt, reply_baddr); 780 } 781 782 /* 783 * If we got ourselves disabled, don't get stuck in a loop 784 */ 785 if (mpt->disabled) { 786 mpt_disable_ints(mpt); 787 break; 788 } 789 if (ntrips++ > 1000) { 790 break; 791 } 792 } 793 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n"); 794} 795 796/******************************* Error Recovery *******************************/ 797void 798mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 799 u_int iocstatus) 800{ 801 MSG_DEFAULT_REPLY ioc_status_frame; 802 request_t *req; 803 804 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame)); 805 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 806 ioc_status_frame.IOCStatus = iocstatus; 807 while((req = TAILQ_FIRST(chain)) != NULL) { 808 MSG_REQUEST_HEADER *msg_hdr; 809 u_int cb_index; 810 811 TAILQ_REMOVE(chain, req, links); 812 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 813 ioc_status_frame.Function = msg_hdr->Function; 814 ioc_status_frame.MsgContext = msg_hdr->MsgContext; 815 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 816 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, 817 &ioc_status_frame); 818 } 819} 820 821/********************************* Diagnostics ********************************/ 822/* 823 * Perform a diagnostic dump of a reply frame. 824 */ 825void 826mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 827{ 828 mpt_prt(mpt, "Address Reply:\n"); 829 mpt_print_reply(reply_frame); 830} 831 832/******************************* Doorbell Access ******************************/ 833static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 834static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 835 836static __inline uint32_t 837mpt_rd_db(struct mpt_softc *mpt) 838{ 839 return mpt_read(mpt, MPT_OFFSET_DOORBELL); 840} 841 842static __inline uint32_t 843mpt_rd_intr(struct mpt_softc *mpt) 844{ 845 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 846} 847 848/* Busy wait for a door bell to be read by IOC */ 849static int 850mpt_wait_db_ack(struct mpt_softc *mpt) 851{ 852 int i; 853 for (i=0; i < MPT_MAX_WAIT; i++) { 854 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 855 maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 856 return (MPT_OK); 857 } 858 DELAY(200); 859 } 860 return (MPT_FAIL); 861} 862 863/* Busy wait for a door bell interrupt */ 864static int 865mpt_wait_db_int(struct mpt_softc *mpt) 866{ 867 int i; 868 for (i=0; i < MPT_MAX_WAIT; i++) { 869 if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 870 maxwait_int = i > maxwait_int ? i : maxwait_int; 871 return MPT_OK; 872 } 873 DELAY(100); 874 } 875 return (MPT_FAIL); 876} 877 878/* Wait for IOC to transition to a give state */ 879void 880mpt_check_doorbell(struct mpt_softc *mpt) 881{ 882 uint32_t db = mpt_rd_db(mpt); 883 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 884 mpt_prt(mpt, "Device not running\n"); 885 mpt_print_db(db); 886 } 887} 888 889/* Wait for IOC to transition to a give state */ 890static int 891mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 892{ 893 int i; 894 895 for (i = 0; i < MPT_MAX_WAIT; i++) { 896 uint32_t db = mpt_rd_db(mpt); 897 if (MPT_STATE(db) == state) { 898 maxwait_state = i > maxwait_state ? i : maxwait_state; 899 return (MPT_OK); 900 } 901 DELAY(100); 902 } 903 return (MPT_FAIL); 904} 905 906 907/************************* Intialization/Configuration ************************/ 908static int mpt_download_fw(struct mpt_softc *mpt); 909 910/* Issue the reset COMMAND to the IOC */ 911static int 912mpt_soft_reset(struct mpt_softc *mpt) 913{ 914 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 915 916 /* Have to use hard reset if we are not in Running state */ 917 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 918 mpt_prt(mpt, "soft reset failed: device not running\n"); 919 return (MPT_FAIL); 920 } 921 922 /* If door bell is in use we don't have a chance of getting 923 * a word in since the IOC probably crashed in message 924 * processing. So don't waste our time. 925 */ 926 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 927 mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 928 return (MPT_FAIL); 929 } 930 931 /* Send the reset request to the IOC */ 932 mpt_write(mpt, MPT_OFFSET_DOORBELL, 933 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 934 if (mpt_wait_db_ack(mpt) != MPT_OK) { 935 mpt_prt(mpt, "soft reset failed: ack timeout\n"); 936 return (MPT_FAIL); 937 } 938 939 /* Wait for the IOC to reload and come out of reset state */ 940 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 941 mpt_prt(mpt, "soft reset failed: device did not restart\n"); 942 return (MPT_FAIL); 943 } 944 945 return MPT_OK; 946} 947 948static int 949mpt_enable_diag_mode(struct mpt_softc *mpt) 950{ 951 int try; 952 953 try = 20; 954 while (--try) { 955 956 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 957 break; 958 959 /* Enable diagnostic registers */ 960 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 961 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 962 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 963 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 964 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 965 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 966 967 DELAY(100000); 968 } 969 if (try == 0) 970 return (EIO); 971 return (0); 972} 973 974static void 975mpt_disable_diag_mode(struct mpt_softc *mpt) 976{ 977 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 978} 979 980/* This is a magic diagnostic reset that resets all the ARM 981 * processors in the chip. 982 */ 983static void 984mpt_hard_reset(struct mpt_softc *mpt) 985{ 986 int error; 987 int wait; 988 uint32_t diagreg; 989 990 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 991 992 error = mpt_enable_diag_mode(mpt); 993 if (error) { 994 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 995 mpt_prt(mpt, "Trying to reset anyway.\n"); 996 } 997 998 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 999 1000 /* 1001 * This appears to be a workaround required for some 1002 * firmware or hardware revs. 1003 */ 1004 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 1005 DELAY(1000); 1006 1007 /* Diag. port is now active so we can now hit the reset bit */ 1008 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 1009 1010 /* 1011 * Ensure that the reset has finished. We delay 1ms 1012 * prior to reading the register to make sure the chip 1013 * has sufficiently completed its reset to handle register 1014 * accesses. 1015 */ 1016 wait = 5000; 1017 do { 1018 DELAY(1000); 1019 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1020 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 1021 1022 if (wait == 0) { 1023 mpt_prt(mpt, "WARNING - Failed hard reset! " 1024 "Trying to initialize anyway.\n"); 1025 } 1026 1027 /* 1028 * If we have firmware to download, it must be loaded before 1029 * the controller will become operational. Do so now. 1030 */ 1031 if (mpt->fw_image != NULL) { 1032 1033 error = mpt_download_fw(mpt); 1034 1035 if (error) { 1036 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 1037 mpt_prt(mpt, "Trying to initialize anyway.\n"); 1038 } 1039 } 1040 1041 /* 1042 * Reseting the controller should have disabled write 1043 * access to the diagnostic registers, but disable 1044 * manually to be sure. 1045 */ 1046 mpt_disable_diag_mode(mpt); 1047} 1048 1049static void 1050mpt_core_ioc_reset(struct mpt_softc *mpt, int type) 1051{ 1052 /* 1053 * Complete all pending requests with a status 1054 * appropriate for an IOC reset. 1055 */ 1056 mpt_complete_request_chain(mpt, &mpt->request_pending_list, 1057 MPI_IOCSTATUS_INVALID_STATE); 1058} 1059 1060 1061/* 1062 * Reset the IOC when needed. Try software command first then if needed 1063 * poke at the magic diagnostic reset. Note that a hard reset resets 1064 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 1065 * fouls up the PCI configuration registers. 1066 */ 1067int 1068mpt_reset(struct mpt_softc *mpt, int reinit) 1069{ 1070 struct mpt_personality *pers; 1071 int ret; 1072 int retry_cnt = 0; 1073 1074 /* 1075 * Try a soft reset. If that fails, get out the big hammer. 1076 */ 1077 again: 1078 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 1079 int cnt; 1080 for (cnt = 0; cnt < 5; cnt++) { 1081 /* Failed; do a hard reset */ 1082 mpt_hard_reset(mpt); 1083 1084 /* 1085 * Wait for the IOC to reload 1086 * and come out of reset state 1087 */ 1088 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1089 if (ret == MPT_OK) { 1090 break; 1091 } 1092 /* 1093 * Okay- try to check again... 1094 */ 1095 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1096 if (ret == MPT_OK) { 1097 break; 1098 } 1099 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 1100 retry_cnt, cnt); 1101 } 1102 } 1103 1104 if (retry_cnt == 0) { 1105 /* 1106 * Invoke reset handlers. We bump the reset count so 1107 * that mpt_wait_req() understands that regardless of 1108 * the specified wait condition, it should stop its wait. 1109 */ 1110 mpt->reset_cnt++; 1111 MPT_PERS_FOREACH(mpt, pers) 1112 pers->reset(mpt, ret); 1113 } 1114 1115 if (reinit) { 1116 ret = mpt_enable_ioc(mpt, 1); 1117 if (ret == MPT_OK) { 1118 mpt_enable_ints(mpt); 1119 } 1120 } 1121 if (ret != MPT_OK && retry_cnt++ < 2) { 1122 goto again; 1123 } 1124 return ret; 1125} 1126 1127/* Return a command buffer to the free queue */ 1128void 1129mpt_free_request(struct mpt_softc *mpt, request_t *req) 1130{ 1131 request_t *nxt; 1132 struct mpt_evtf_record *record; 1133 uint32_t reply_baddr; 1134 1135 if (req == NULL || req != &mpt->request_pool[req->index]) { 1136 panic("mpt_free_request bad req ptr\n"); 1137 return; 1138 } 1139 if ((nxt = req->chain) != NULL) { 1140 req->chain = NULL; 1141 mpt_free_request(mpt, nxt); /* NB: recursion */ 1142 } 1143 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); 1144 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); 1145 KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n")); 1146 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1147 ("mpt_free_request: req %p:%u func %x already on freelist", 1148 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1149 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1150 ("mpt_free_request: req %p:%u func %x on pending list", 1151 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1152#ifdef INVARIANTS 1153 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__); 1154#endif 1155 1156 req->ccb = NULL; 1157 if (LIST_EMPTY(&mpt->ack_frames)) { 1158 /* 1159 * Insert free ones at the tail 1160 */ 1161 req->serno = 0; 1162 req->state = REQ_STATE_FREE; 1163#ifdef INVARIANTS 1164 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER)); 1165#endif 1166 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); 1167 if (mpt->getreqwaiter != 0) { 1168 mpt->getreqwaiter = 0; 1169 wakeup(&mpt->request_free_list); 1170 } 1171 return; 1172 } 1173 1174 /* 1175 * Process an ack frame deferred due to resource shortage. 1176 */ 1177 record = LIST_FIRST(&mpt->ack_frames); 1178 LIST_REMOVE(record, links); 1179 req->state = REQ_STATE_ALLOCATED; 1180 mpt_assign_serno(mpt, req); 1181 mpt_send_event_ack(mpt, req, &record->reply, record->context); 1182 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) 1183 + (mpt->reply_phys & 0xFFFFFFFF); 1184 mpt_free_reply(mpt, reply_baddr); 1185} 1186 1187/* Get a command buffer from the free queue */ 1188request_t * 1189mpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1190{ 1191 request_t *req; 1192 1193retry: 1194 KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n")); 1195 req = TAILQ_FIRST(&mpt->request_free_list); 1196 if (req != NULL) { 1197 KASSERT(req == &mpt->request_pool[req->index], 1198 ("mpt_get_request: corrupted request free list\n")); 1199 KASSERT(req->state == REQ_STATE_FREE, 1200 ("req %p:%u not free on free list %x index %d function %x", 1201 req, req->serno, req->state, req->index, 1202 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1203 TAILQ_REMOVE(&mpt->request_free_list, req, links); 1204 req->state = REQ_STATE_ALLOCATED; 1205 req->chain = NULL; 1206 mpt_assign_serno(mpt, req); 1207 } else if (sleep_ok != 0) { 1208 mpt->getreqwaiter = 1; 1209 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1210 goto retry; 1211 } 1212 return (req); 1213} 1214 1215/* Pass the command to the IOC */ 1216void 1217mpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1218{ 1219 uint32_t *pReq; 1220 1221 pReq = req->req_vbuf; 1222 if (mpt->verbose > MPT_PRT_TRACE) { 1223 int offset; 1224#if __FreeBSD_version >= 500000 1225 mpt_prt(mpt, "Send Request %d (%jx):", 1226 req->index, (uintmax_t) req->req_pbuf); 1227#else 1228 mpt_prt(mpt, "Send Request %d (%llx):", 1229 req->index, (unsigned long long) req->req_pbuf); 1230#endif 1231 for (offset = 0; offset < mpt->request_frame_size; offset++) { 1232 if ((offset & 0x7) == 0) { 1233 mpt_prtc(mpt, "\n"); 1234 mpt_prt(mpt, " "); 1235 } 1236 mpt_prtc(mpt, " %08x", pReq[offset]); 1237 } 1238 mpt_prtc(mpt, "\n"); 1239 } 1240 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1241 BUS_DMASYNC_PREWRITE); 1242 req->state |= REQ_STATE_QUEUED; 1243 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1244 ("req %p:%u func %x on freelist list in mpt_send_cmd", 1245 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1246 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1247 ("req %p:%u func %x already on pending list in mpt_send_cmd", 1248 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1249 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1250 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1251} 1252 1253/* 1254 * Wait for a request to complete. 1255 * 1256 * Inputs: 1257 * mpt softc of controller executing request 1258 * req request to wait for 1259 * sleep_ok nonzero implies may sleep in this context 1260 * time_ms timeout in ms. 0 implies no timeout. 1261 * 1262 * Return Values: 1263 * 0 Request completed 1264 * non-0 Timeout fired before request completion. 1265 */ 1266int 1267mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1268 mpt_req_state_t state, mpt_req_state_t mask, 1269 int sleep_ok, int time_ms) 1270{ 1271 int error; 1272 int timeout; 1273 u_int saved_cnt; 1274 1275 /* 1276 * timeout is in ms. 0 indicates infinite wait. 1277 * Convert to ticks or 500us units depending on 1278 * our sleep mode. 1279 */ 1280 if (sleep_ok != 0) { 1281 timeout = (time_ms * hz) / 1000; 1282 } else { 1283 timeout = time_ms * 2; 1284 } 1285 req->state |= REQ_STATE_NEED_WAKEUP; 1286 mask &= ~REQ_STATE_NEED_WAKEUP; 1287 saved_cnt = mpt->reset_cnt; 1288 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { 1289 if (sleep_ok != 0) { 1290 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1291 if (error == EWOULDBLOCK) { 1292 timeout = 0; 1293 break; 1294 } 1295 } else { 1296 if (time_ms != 0 && --timeout == 0) { 1297 break; 1298 } 1299 DELAY(500); 1300 mpt_intr(mpt); 1301 } 1302 } 1303 req->state &= ~REQ_STATE_NEED_WAKEUP; 1304 if (mpt->reset_cnt != saved_cnt) { 1305 return (EIO); 1306 } 1307 if (time_ms && timeout <= 0) { 1308 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; 1309 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); 1310 return (ETIMEDOUT); 1311 } 1312 return (0); 1313} 1314 1315/* 1316 * Send a command to the IOC via the handshake register. 1317 * 1318 * Only done at initialization time and for certain unusual 1319 * commands such as device/bus reset as specified by LSI. 1320 */ 1321int 1322mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1323{ 1324 int i; 1325 uint32_t data, *data32; 1326 1327 /* Check condition of the IOC */ 1328 data = mpt_rd_db(mpt); 1329 if ((MPT_STATE(data) != MPT_DB_STATE_READY 1330 && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1331 && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1332 || MPT_DB_IS_IN_USE(data)) { 1333 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1334 mpt_print_db(data); 1335 return (EBUSY); 1336 } 1337 1338 /* We move things in 32 bit chunks */ 1339 len = (len + 3) >> 2; 1340 data32 = cmd; 1341 1342 /* Clear any left over pending doorbell interupts */ 1343 if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1344 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1345 1346 /* 1347 * Tell the handshake reg. we are going to send a command 1348 * and how long it is going to be. 1349 */ 1350 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1351 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1352 mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1353 1354 /* Wait for the chip to notice */ 1355 if (mpt_wait_db_int(mpt) != MPT_OK) { 1356 mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n"); 1357 return (ETIMEDOUT); 1358 } 1359 1360 /* Clear the interrupt */ 1361 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1362 1363 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1364 mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n"); 1365 return (ETIMEDOUT); 1366 } 1367 1368 /* Send the command */ 1369 for (i = 0; i < len; i++) { 1370 mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++); 1371 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1372 mpt_prt(mpt, 1373 "mpt_send_handshake_cmd timeout! index = %d\n", 1374 i); 1375 return (ETIMEDOUT); 1376 } 1377 } 1378 return MPT_OK; 1379} 1380 1381/* Get the response from the handshake register */ 1382int 1383mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1384{ 1385 int left, reply_left; 1386 u_int16_t *data16; 1387 MSG_DEFAULT_REPLY *hdr; 1388 1389 /* We move things out in 16 bit chunks */ 1390 reply_len >>= 1; 1391 data16 = (u_int16_t *)reply; 1392 1393 hdr = (MSG_DEFAULT_REPLY *)reply; 1394 1395 /* Get first word */ 1396 if (mpt_wait_db_int(mpt) != MPT_OK) { 1397 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1398 return ETIMEDOUT; 1399 } 1400 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1401 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1402 1403 /* Get Second Word */ 1404 if (mpt_wait_db_int(mpt) != MPT_OK) { 1405 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1406 return ETIMEDOUT; 1407 } 1408 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1409 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1410 1411 /* 1412 * With the second word, we can now look at the length. 1413 * Warn about a reply that's too short (except for IOC FACTS REPLY) 1414 */ 1415 if ((reply_len >> 1) != hdr->MsgLength && 1416 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ 1417#if __FreeBSD_version >= 500000 1418 mpt_prt(mpt, "reply length does not match message length: " 1419 "got %x; expected %zx for function %x\n", 1420 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1421#else 1422 mpt_prt(mpt, "reply length does not match message length: " 1423 "got %x; expected %x for function %x\n", 1424 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1425#endif 1426 } 1427 1428 /* Get rest of the reply; but don't overflow the provided buffer */ 1429 left = (hdr->MsgLength << 1) - 2; 1430 reply_left = reply_len - 2; 1431 while (left--) { 1432 u_int16_t datum; 1433 1434 if (mpt_wait_db_int(mpt) != MPT_OK) { 1435 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1436 return ETIMEDOUT; 1437 } 1438 datum = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1439 1440 if (reply_left-- > 0) 1441 *data16++ = datum & MPT_DB_DATA_MASK; 1442 1443 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1444 } 1445 1446 /* One more wait & clear at the end */ 1447 if (mpt_wait_db_int(mpt) != MPT_OK) { 1448 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1449 return ETIMEDOUT; 1450 } 1451 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1452 1453 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1454 if (mpt->verbose >= MPT_PRT_TRACE) 1455 mpt_print_reply(hdr); 1456 return (MPT_FAIL | hdr->IOCStatus); 1457 } 1458 1459 return (0); 1460} 1461 1462static int 1463mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1464{ 1465 MSG_IOC_FACTS f_req; 1466 int error; 1467 1468 memset(&f_req, 0, sizeof f_req); 1469 f_req.Function = MPI_FUNCTION_IOC_FACTS; 1470 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1471 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1472 if (error) 1473 return(error); 1474 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1475 return (error); 1476} 1477 1478static int 1479mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp) 1480{ 1481 MSG_PORT_FACTS f_req; 1482 int error; 1483 1484 /* XXX: Only getting PORT FACTS for Port 0 */ 1485 memset(&f_req, 0, sizeof f_req); 1486 f_req.Function = MPI_FUNCTION_PORT_FACTS; 1487 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1488 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1489 if (error) 1490 return(error); 1491 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1492 return (error); 1493} 1494 1495/* 1496 * Send the initialization request. This is where we specify how many 1497 * SCSI busses and how many devices per bus we wish to emulate. 1498 * This is also the command that specifies the max size of the reply 1499 * frames from the IOC that we will be allocating. 1500 */ 1501static int 1502mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1503{ 1504 int error = 0; 1505 MSG_IOC_INIT init; 1506 MSG_IOC_INIT_REPLY reply; 1507 1508 memset(&init, 0, sizeof init); 1509 init.WhoInit = who; 1510 init.Function = MPI_FUNCTION_IOC_INIT; 1511 if (mpt->is_fc) { 1512 init.MaxDevices = 255; 1513 } else if (mpt->is_sas) { 1514 init.MaxDevices = mpt->mpt_max_devices; 1515 } else { 1516 init.MaxDevices = 16; 1517 } 1518 init.MaxBuses = 1; 1519 1520 init.MsgVersion = htole16(MPI_VERSION); 1521 init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1522 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1523 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1524 1525 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1526 return(error); 1527 } 1528 1529 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1530 return (error); 1531} 1532 1533 1534/* 1535 * Utiltity routine to read configuration headers and pages 1536 */ 1537int 1538mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action, 1539 u_int PageVersion, u_int PageLength, u_int PageNumber, 1540 u_int PageType, uint32_t PageAddress, bus_addr_t addr, 1541 bus_size_t len, int sleep_ok, int timeout_ms) 1542{ 1543 MSG_CONFIG *cfgp; 1544 SGE_SIMPLE32 *se; 1545 1546 cfgp = req->req_vbuf; 1547 memset(cfgp, 0, sizeof *cfgp); 1548 cfgp->Action = Action; 1549 cfgp->Function = MPI_FUNCTION_CONFIG; 1550 cfgp->Header.PageVersion = PageVersion; 1551 cfgp->Header.PageLength = PageLength; 1552 cfgp->Header.PageNumber = PageNumber; 1553 cfgp->Header.PageType = PageType; 1554 cfgp->PageAddress = PageAddress; 1555 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1556 se->Address = addr; 1557 MPI_pSGE_SET_LENGTH(se, len); 1558 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1559 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1560 MPI_SGE_FLAGS_END_OF_LIST | 1561 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1562 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1563 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1564 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1565 1566 mpt_check_doorbell(mpt); 1567 mpt_send_cmd(mpt, req); 1568 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1569 sleep_ok, timeout_ms)); 1570} 1571 1572 1573int 1574mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1575 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1576 int sleep_ok, int timeout_ms) 1577{ 1578 request_t *req; 1579 MSG_CONFIG *cfgp; 1580 int error; 1581 1582 req = mpt_get_request(mpt, sleep_ok); 1583 if (req == NULL) { 1584 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1585 return (ENOMEM); 1586 } 1587 1588 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER, 1589 /*PageVersion*/0, /*PageLength*/0, PageNumber, 1590 PageType, PageAddress, /*addr*/0, /*len*/0, 1591 sleep_ok, timeout_ms); 1592 if (error != 0) { 1593 mpt_free_request(mpt, req); 1594 mpt_prt(mpt, "read_cfg_header timed out\n"); 1595 return (ETIMEDOUT); 1596 } 1597 1598 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1599 case MPI_IOCSTATUS_SUCCESS: 1600 cfgp = req->req_vbuf; 1601 bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1602 error = 0; 1603 break; 1604 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1605 mpt_lprt(mpt, MPT_PRT_DEBUG, 1606 "Invalid Page Type %d Number %d Addr 0x%0x\n", 1607 PageType, PageNumber, PageAddress); 1608 error = EINVAL; 1609 break; 1610 default: 1611 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1612 req->IOCStatus); 1613 error = EIO; 1614 break; 1615 } 1616 mpt_free_request(mpt, req); 1617 return (error); 1618} 1619 1620int 1621mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1622 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1623 int timeout_ms) 1624{ 1625 request_t *req; 1626 int error; 1627 1628 req = mpt_get_request(mpt, sleep_ok); 1629 if (req == NULL) { 1630 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1631 return (-1); 1632 } 1633 1634 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1635 hdr->PageLength, hdr->PageNumber, 1636 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1637 PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1638 len, sleep_ok, timeout_ms); 1639 if (error != 0) { 1640 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1641 return (-1); 1642 } 1643 1644 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1645 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1646 req->IOCStatus); 1647 mpt_free_request(mpt, req); 1648 return (-1); 1649 } 1650 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1651 BUS_DMASYNC_POSTREAD); 1652 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1653 mpt_free_request(mpt, req); 1654 return (0); 1655} 1656 1657int 1658mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1659 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1660 int timeout_ms) 1661{ 1662 request_t *req; 1663 u_int hdr_attr; 1664 int error; 1665 1666 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1667 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1668 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1669 mpt_prt(mpt, "page type 0x%x not changeable\n", 1670 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1671 return (-1); 1672 } 1673 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK, 1674 1675 req = mpt_get_request(mpt, sleep_ok); 1676 if (req == NULL) 1677 return (-1); 1678 1679 memcpy(((caddr_t)req->req_vbuf)+MPT_RQSL(mpt), hdr, len); 1680 /* Restore stripped out attributes */ 1681 hdr->PageType |= hdr_attr; 1682 1683 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1684 hdr->PageLength, hdr->PageNumber, 1685 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1686 PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1687 len, sleep_ok, timeout_ms); 1688 if (error != 0) { 1689 mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1690 return (-1); 1691 } 1692 1693 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1694 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1695 req->IOCStatus); 1696 mpt_free_request(mpt, req); 1697 return (-1); 1698 } 1699 mpt_free_request(mpt, req); 1700 return (0); 1701} 1702 1703/* 1704 * Read IOC configuration information 1705 */ 1706static int 1707mpt_read_config_info_ioc(struct mpt_softc *mpt) 1708{ 1709 CONFIG_PAGE_HEADER hdr; 1710 struct mpt_raid_volume *mpt_raid; 1711 int rv; 1712 int i; 1713 size_t len; 1714 1715 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1716 /*PageNumber*/2, /*PageAddress*/0, &hdr, 1717 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1718 /* 1719 * If it's an invalid page, so what? Not a supported function.... 1720 */ 1721 if (rv == EINVAL) 1722 return (0); 1723 if (rv) 1724 return (rv); 1725 1726#if __FreeBSD_version >= 500000 1727 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %zx, " 1728 "num %x, type %x\n", hdr.PageVersion, 1729 hdr.PageLength * sizeof(uint32_t), 1730 hdr.PageNumber, hdr.PageType); 1731#else 1732 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %z, " 1733 "num %x, type %x\n", hdr.PageVersion, 1734 hdr.PageLength * sizeof(uint32_t), 1735 hdr.PageNumber, hdr.PageType); 1736#endif 1737 1738 len = hdr.PageLength * sizeof(uint32_t); 1739 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1740 if (mpt->ioc_page2 == NULL) 1741 return (ENOMEM); 1742 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1743 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1744 &mpt->ioc_page2->Header, len, 1745 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1746 if (rv) { 1747 mpt_prt(mpt, "failed to read IOC Page 2\n"); 1748 } else if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1749 uint32_t mask; 1750 1751 mpt_prt(mpt, "Capabilities: ("); 1752 for (mask = 1; mask != 0; mask <<= 1) { 1753 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) 1754 continue; 1755 1756 switch (mask) { 1757 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1758 mpt_prtc(mpt, " RAID-0"); 1759 break; 1760 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1761 mpt_prtc(mpt, " RAID-1E"); 1762 break; 1763 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1764 mpt_prtc(mpt, " RAID-1"); 1765 break; 1766 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1767 mpt_prtc(mpt, " SES"); 1768 break; 1769 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1770 mpt_prtc(mpt, " SAFTE"); 1771 break; 1772 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1773 mpt_prtc(mpt, " Multi-Channel-Arrays"); 1774 default: 1775 break; 1776 } 1777 } 1778 mpt_prtc(mpt, " )\n"); 1779 if ((mpt->ioc_page2->CapabilitiesFlags 1780 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1781 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1782 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1783 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1784 mpt->ioc_page2->NumActiveVolumes, 1785 mpt->ioc_page2->NumActiveVolumes != 1 1786 ? "s " : " ", 1787 mpt->ioc_page2->MaxVolumes); 1788 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1789 mpt->ioc_page2->NumActivePhysDisks, 1790 mpt->ioc_page2->NumActivePhysDisks != 1 1791 ? "s " : " ", 1792 mpt->ioc_page2->MaxPhysDisks); 1793 } 1794 } 1795 1796 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1797 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT); 1798 if (mpt->raid_volumes == NULL) { 1799 mpt_prt(mpt, "Could not allocate RAID volume data\n"); 1800 } else { 1801 memset(mpt->raid_volumes, 0, len); 1802 } 1803 1804 /* 1805 * Copy critical data out of ioc_page2 so that we can 1806 * safely refresh the page without windows of unreliable 1807 * data. 1808 */ 1809 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 1810 1811 len = sizeof(*mpt->raid_volumes->config_page) 1812 + (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1)); 1813 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 1814 mpt_raid = &mpt->raid_volumes[i]; 1815 mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT); 1816 if (mpt_raid->config_page == NULL) { 1817 mpt_prt(mpt, "Could not allocate RAID page data\n"); 1818 break; 1819 } 1820 memset(mpt_raid->config_page, 0, len); 1821 } 1822 mpt->raid_page0_len = len; 1823 1824 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 1825 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT); 1826 if (mpt->raid_disks == NULL) { 1827 mpt_prt(mpt, "Could not allocate RAID disk data\n"); 1828 } else { 1829 memset(mpt->raid_disks, 0, len); 1830 } 1831 1832 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 1833 1834 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1835 /*PageNumber*/3, /*PageAddress*/0, &hdr, 1836 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1837 if (rv) 1838 return (EIO); 1839 1840 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 1841 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 1842 1843 if (mpt->ioc_page3 != NULL) 1844 free(mpt->ioc_page3, M_DEVBUF); 1845 len = hdr.PageLength * sizeof(uint32_t); 1846 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1847 if (mpt->ioc_page3 == NULL) 1848 return (-1); 1849 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 1850 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1851 &mpt->ioc_page3->Header, len, 1852 /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1853 if (rv) { 1854 mpt_prt(mpt, "failed to read IOC Page 3\n"); 1855 } 1856 1857 mpt_raid_wakeup(mpt); 1858 1859 return (0); 1860} 1861 1862/* 1863 * Enable IOC port 1864 */ 1865static int 1866mpt_send_port_enable(struct mpt_softc *mpt, int port) 1867{ 1868 request_t *req; 1869 MSG_PORT_ENABLE *enable_req; 1870 int error; 1871 1872 req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1873 if (req == NULL) 1874 return (-1); 1875 1876 enable_req = req->req_vbuf; 1877 memset(enable_req, 0, MPT_RQSL(mpt)); 1878 1879 enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 1880 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1881 enable_req->PortNumber = port; 1882 1883 mpt_check_doorbell(mpt); 1884 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 1885 1886 mpt_send_cmd(mpt, req); 1887 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1888 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000); 1889 if (error != 0) { 1890 mpt_prt(mpt, "port %d enable timed out\n", port); 1891 return (-1); 1892 } 1893 mpt_free_request(mpt, req); 1894 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port); 1895 return (0); 1896} 1897 1898/* 1899 * Enable/Disable asynchronous event reporting. 1900 */ 1901static int 1902mpt_send_event_request(struct mpt_softc *mpt, int onoff) 1903{ 1904 request_t *req; 1905 MSG_EVENT_NOTIFY *enable_req; 1906 1907 req = mpt_get_request(mpt, FALSE); 1908 if (req == NULL) { 1909 return (ENOMEM); 1910 } 1911 enable_req = req->req_vbuf; 1912 memset(enable_req, 0, sizeof *enable_req); 1913 1914 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 1915 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 1916 enable_req->Switch = onoff; 1917 1918 mpt_check_doorbell(mpt); 1919 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n", 1920 onoff ? "en" : "dis"); 1921 /* 1922 * Send the command off, but don't wait for it. 1923 */ 1924 mpt_send_cmd(mpt, req); 1925 return (0); 1926} 1927 1928/* 1929 * Un-mask the interupts on the chip. 1930 */ 1931void 1932mpt_enable_ints(struct mpt_softc *mpt) 1933{ 1934 /* Unmask every thing except door bell int */ 1935 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 1936} 1937 1938/* 1939 * Mask the interupts on the chip. 1940 */ 1941void 1942mpt_disable_ints(struct mpt_softc *mpt) 1943{ 1944 /* Mask all interrupts */ 1945 mpt_write(mpt, MPT_OFFSET_INTR_MASK, 1946 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 1947} 1948 1949static void 1950mpt_sysctl_attach(struct mpt_softc *mpt) 1951{ 1952#if __FreeBSD_version >= 500000 1953 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 1954 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 1955 1956 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1957 "debug", CTLFLAG_RW, &mpt->verbose, 0, 1958 "Debugging/Verbose level"); 1959#endif 1960} 1961 1962int 1963mpt_attach(struct mpt_softc *mpt) 1964{ 1965 struct mpt_personality *pers; 1966 int i; 1967 int error; 1968 1969 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1970 pers = mpt_personalities[i]; 1971 if (pers == NULL) { 1972 continue; 1973 } 1974 if (pers->probe(mpt) == 0) { 1975 error = pers->attach(mpt); 1976 if (error != 0) { 1977 mpt_detach(mpt); 1978 return (error); 1979 } 1980 mpt->mpt_pers_mask |= (0x1 << pers->id); 1981 pers->use_count++; 1982 } 1983 } 1984 1985 /* 1986 * Now that we've attached everything, do the enable function 1987 * for all of the personalities. This allows the personalities 1988 * to do setups that are appropriate for them prior to enabling 1989 * any ports. 1990 */ 1991 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1992 pers = mpt_personalities[i]; 1993 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) { 1994 error = pers->enable(mpt); 1995 if (error != 0) { 1996 mpt_prt(mpt, "personality %s attached but would" 1997 " not enable (%d)\n", pers->name, error); 1998 mpt_detach(mpt); 1999 return (error); 2000 } 2001 } 2002 } 2003 return (0); 2004} 2005 2006int 2007mpt_shutdown(struct mpt_softc *mpt) 2008{ 2009 struct mpt_personality *pers; 2010 2011 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2012 pers->shutdown(mpt); 2013 } 2014 return (0); 2015} 2016 2017int 2018mpt_detach(struct mpt_softc *mpt) 2019{ 2020 struct mpt_personality *pers; 2021 2022 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2023 pers->detach(mpt); 2024 mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2025 pers->use_count--; 2026 } 2027 2028 return (0); 2029} 2030 2031int 2032mpt_core_load(struct mpt_personality *pers) 2033{ 2034 int i; 2035 2036 /* 2037 * Setup core handlers and insert the default handler 2038 * into all "empty slots". 2039 */ 2040 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { 2041 mpt_reply_handlers[i] = mpt_default_reply_handler; 2042 } 2043 2044 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2045 mpt_event_reply_handler; 2046 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2047 mpt_config_reply_handler; 2048 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2049 mpt_handshake_reply_handler; 2050 return (0); 2051} 2052 2053/* 2054 * Initialize per-instance driver data and perform 2055 * initial controller configuration. 2056 */ 2057int 2058mpt_core_attach(struct mpt_softc *mpt) 2059{ 2060 int val; 2061 int error; 2062 2063 2064 LIST_INIT(&mpt->ack_frames); 2065 2066 /* Put all request buffers on the free list */ 2067 TAILQ_INIT(&mpt->request_pending_list); 2068 TAILQ_INIT(&mpt->request_free_list); 2069 TAILQ_INIT(&mpt->request_timeout_list); 2070 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2071 request_t *req = &mpt->request_pool[val]; 2072 req->state = REQ_STATE_ALLOCATED; 2073 mpt_free_request(mpt, req); 2074 } 2075 2076 for (val = 0; val < MPT_MAX_LUNS; val++) { 2077 STAILQ_INIT(&mpt->trt[val].atios); 2078 STAILQ_INIT(&mpt->trt[val].inots); 2079 } 2080 STAILQ_INIT(&mpt->trt_wildcard.atios); 2081 STAILQ_INIT(&mpt->trt_wildcard.inots); 2082 2083 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; 2084 2085 mpt_sysctl_attach(mpt); 2086 2087 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2088 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2089 2090 error = mpt_configure_ioc(mpt); 2091 2092 return (error); 2093} 2094 2095int 2096mpt_core_enable(struct mpt_softc *mpt) 2097{ 2098 /* 2099 * We enter with the IOC enabled, but async events 2100 * not enabled, ports not enabled and interrupts 2101 * not enabled. 2102 */ 2103 2104 /* 2105 * Enable asynchronous event reporting- all personalities 2106 * have attached so that they should be able to now field 2107 * async events. 2108 */ 2109 mpt_send_event_request(mpt, 1); 2110 2111 /* 2112 * Catch any pending interrupts 2113 * 2114 * This seems to be crucial- otherwise 2115 * the portenable below times out. 2116 */ 2117 mpt_intr(mpt); 2118 2119 /* 2120 * Enable Interrupts 2121 */ 2122 mpt_enable_ints(mpt); 2123 2124 /* 2125 * Catch any pending interrupts 2126 * 2127 * This seems to be crucial- otherwise 2128 * the portenable below times out. 2129 */ 2130 mpt_intr(mpt); 2131 2132 /* 2133 * Enable the port. 2134 */ 2135 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2136 mpt_prt(mpt, "failed to enable port 0\n"); 2137 return (ENXIO); 2138 } 2139 return (0); 2140} 2141 2142void 2143mpt_core_shutdown(struct mpt_softc *mpt) 2144{ 2145 mpt_disable_ints(mpt); 2146} 2147 2148void 2149mpt_core_detach(struct mpt_softc *mpt) 2150{ 2151 mpt_disable_ints(mpt); 2152} 2153 2154int 2155mpt_core_unload(struct mpt_personality *pers) 2156{ 2157 /* Unload is always successfull. */ 2158 return (0); 2159} 2160 2161#define FW_UPLOAD_REQ_SIZE \ 2162 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2163 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2164 2165static int 2166mpt_upload_fw(struct mpt_softc *mpt) 2167{ 2168 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2169 MSG_FW_UPLOAD_REPLY fw_reply; 2170 MSG_FW_UPLOAD *fw_req; 2171 FW_UPLOAD_TCSGE *tsge; 2172 SGE_SIMPLE32 *sge; 2173 uint32_t flags; 2174 int error; 2175 2176 memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2177 fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2178 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2179 fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2180 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2181 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2182 tsge->DetailsLength = 12; 2183 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2184 tsge->ImageSize = htole32(mpt->fw_image_size); 2185 sge = (SGE_SIMPLE32 *)(tsge + 1); 2186 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2187 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2188 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2189 flags <<= MPI_SGE_FLAGS_SHIFT; 2190 sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2191 sge->Address = htole32(mpt->fw_phys); 2192 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2193 if (error) 2194 return(error); 2195 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2196 return (error); 2197} 2198 2199static void 2200mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2201 uint32_t *data, bus_size_t len) 2202{ 2203 uint32_t *data_end; 2204 2205 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2206 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2207 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2208 while (data != data_end) { 2209 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2210 data++; 2211 } 2212 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2213} 2214 2215static int 2216mpt_download_fw(struct mpt_softc *mpt) 2217{ 2218 MpiFwHeader_t *fw_hdr; 2219 int error; 2220 uint32_t ext_offset; 2221 uint32_t data; 2222 2223 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2224 mpt->fw_image_size); 2225 2226 error = mpt_enable_diag_mode(mpt); 2227 if (error != 0) { 2228 mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2229 return (EIO); 2230 } 2231 2232 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2233 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2234 2235 fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2236 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2237 fw_hdr->ImageSize); 2238 2239 ext_offset = fw_hdr->NextImageHeaderOffset; 2240 while (ext_offset != 0) { 2241 MpiExtImageHeader_t *ext; 2242 2243 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2244 ext_offset = ext->NextImageHeaderOffset; 2245 2246 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2247 ext->ImageSize); 2248 } 2249 2250 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2251 /* Setup the address to jump to on reset. */ 2252 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2253 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2254 2255 /* 2256 * The controller sets the "flash bad" status after attempting 2257 * to auto-boot from flash. Clear the status so that the controller 2258 * will continue the boot process with our newly installed firmware. 2259 */ 2260 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2261 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2262 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2263 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2264 2265 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2266 2267 /* 2268 * Re-enable the processor and clear the boot halt flag. 2269 */ 2270 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2271 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2272 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2273 2274 mpt_disable_diag_mode(mpt); 2275 return (0); 2276} 2277 2278/* 2279 * Allocate/Initialize data structures for the controller. Called 2280 * once at instance startup. 2281 */ 2282static int 2283mpt_configure_ioc(struct mpt_softc *mpt) 2284{ 2285 MSG_PORT_FACTS_REPLY pfp; 2286 MSG_IOC_FACTS_REPLY facts; 2287 int try; 2288 int needreset; 2289 uint32_t max_chain_depth; 2290 2291 needreset = 0; 2292 for (try = 0; try < MPT_MAX_TRYS; try++) { 2293 2294 /* 2295 * No need to reset if the IOC is already in the READY state. 2296 * 2297 * Force reset if initialization failed previously. 2298 * Note that a hard_reset of the second channel of a '929 2299 * will stop operation of the first channel. Hopefully, if the 2300 * first channel is ok, the second will not require a hard 2301 * reset. 2302 */ 2303 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != 2304 MPT_DB_STATE_READY) { 2305 if (mpt_reset(mpt, FALSE) != MPT_OK) { 2306 continue; 2307 } 2308 } 2309 needreset = 0; 2310 2311 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) { 2312 mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2313 needreset = 1; 2314 continue; 2315 } 2316 2317 mpt->mpt_global_credits = le16toh(facts.GlobalCredits); 2318 mpt->request_frame_size = le16toh(facts.RequestFrameSize); 2319 mpt->ioc_facts_flags = facts.Flags; 2320 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2321 le16toh(facts.MsgVersion) >> 8, 2322 le16toh(facts.MsgVersion) & 0xFF, 2323 le16toh(facts.HeaderVersion) >> 8, 2324 le16toh(facts.HeaderVersion) & 0xFF); 2325 2326 /* 2327 * Now that we know request frame size, we can calculate 2328 * the actual (reasonable) segment limit for read/write I/O. 2329 * 2330 * This limit is constrained by: 2331 * 2332 * + The size of each area we allocate per command (and how 2333 * many chain segments we can fit into it). 2334 * + The total number of areas we've set up. 2335 * + The actual chain depth the card will allow. 2336 * 2337 * The first area's segment count is limited by the I/O request 2338 * at the head of it. We cannot allocate realistically more 2339 * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2340 * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2341 * 2342 */ 2343 max_chain_depth = facts.MaxChainDepth; 2344 2345 /* total number of request areas we (can) allocate */ 2346 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2347 2348 /* converted to the number of chain areas possible */ 2349 mpt->max_seg_cnt *= MPT_NRFM(mpt); 2350 2351 /* limited by the number of chain areas the card will support */ 2352 if (mpt->max_seg_cnt > max_chain_depth) { 2353 mpt_lprt(mpt, MPT_PRT_DEBUG, 2354 "chain depth limited to %u (from %u)\n", 2355 max_chain_depth, mpt->max_seg_cnt); 2356 mpt->max_seg_cnt = max_chain_depth; 2357 } 2358 2359 /* converted to the number of simple sges in chain segments. */ 2360 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2361 2362 mpt_lprt(mpt, MPT_PRT_DEBUG, 2363 "Maximum Segment Count: %u\n", mpt->max_seg_cnt); 2364 mpt_lprt(mpt, MPT_PRT_DEBUG, 2365 "MsgLength=%u IOCNumber = %d\n", 2366 facts.MsgLength, facts.IOCNumber); 2367 mpt_lprt(mpt, MPT_PRT_DEBUG, 2368 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2369 "Request Frame Size %u bytes Max Chain Depth %u\n", 2370 mpt->mpt_global_credits, facts.BlockSize, 2371 mpt->request_frame_size << 2, max_chain_depth); 2372 mpt_lprt(mpt, MPT_PRT_DEBUG, 2373 "IOCFACTS: Num Ports %d, FWImageSize %d, " 2374 "Flags=%#x\n", facts.NumberOfPorts, 2375 le32toh(facts.FWImageSize), facts.Flags); 2376 2377 2378 if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) { 2379 struct mpt_map_info mi; 2380 int error; 2381 2382 /* 2383 * In some configurations, the IOC's firmware is 2384 * stored in a shared piece of system NVRAM that 2385 * is only accessable via the BIOS. In this 2386 * case, the firmware keeps a copy of firmware in 2387 * RAM until the OS driver retrieves it. Once 2388 * retrieved, we are responsible for re-downloading 2389 * the firmware after any hard-reset. 2390 */ 2391 mpt->fw_image_size = le32toh(facts.FWImageSize); 2392 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 2393 /*alignment*/1, /*boundary*/0, 2394 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2395 /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, 2396 /*filterarg*/NULL, mpt->fw_image_size, 2397 /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size, 2398 /*flags*/0, &mpt->fw_dmat); 2399 if (error != 0) { 2400 mpt_prt(mpt, "cannot create fw dma tag\n"); 2401 return (ENOMEM); 2402 } 2403 error = bus_dmamem_alloc(mpt->fw_dmat, 2404 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, 2405 &mpt->fw_dmap); 2406 if (error != 0) { 2407 mpt_prt(mpt, "cannot allocate fw mem.\n"); 2408 bus_dma_tag_destroy(mpt->fw_dmat); 2409 return (ENOMEM); 2410 } 2411 mi.mpt = mpt; 2412 mi.error = 0; 2413 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2414 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, 2415 &mi, 0); 2416 mpt->fw_phys = mi.phys; 2417 2418 error = mpt_upload_fw(mpt); 2419 if (error != 0) { 2420 mpt_prt(mpt, "fw upload failed.\n"); 2421 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2422 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2423 mpt->fw_dmap); 2424 bus_dma_tag_destroy(mpt->fw_dmat); 2425 mpt->fw_image = NULL; 2426 return (EIO); 2427 } 2428 } 2429 2430 if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) { 2431 mpt_prt(mpt, "mpt_get_portfacts failed\n"); 2432 needreset = 1; 2433 continue; 2434 } 2435 2436 mpt_lprt(mpt, MPT_PRT_DEBUG, 2437 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n", 2438 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID, 2439 pfp.MaxDevices); 2440 2441 mpt->mpt_port_type = pfp.PortType; 2442 mpt->mpt_proto_flags = pfp.ProtocolFlags; 2443 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI && 2444 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS && 2445 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) { 2446 mpt_prt(mpt, "Unsupported Port Type (%x)\n", 2447 pfp.PortType); 2448 return (ENXIO); 2449 } 2450 mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers); 2451 2452 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2453 mpt->is_fc = 1; 2454 mpt->is_sas = 0; 2455 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2456 mpt->is_fc = 0; 2457 mpt->is_sas = 1; 2458 } else { 2459 mpt->is_fc = 0; 2460 mpt->is_sas = 0; 2461 } 2462 mpt->mpt_ini_id = pfp.PortSCSIID; 2463 mpt->mpt_max_devices = pfp.MaxDevices; 2464 2465 /* 2466 * Set our expected role with what this port supports. 2467 */ 2468 2469 mpt->role = MPT_ROLE_NONE; 2470 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2471 mpt->role |= MPT_ROLE_INITIATOR; 2472 } 2473 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2474 mpt->role |= MPT_ROLE_TARGET; 2475 } 2476 if (mpt->role == MPT_ROLE_NONE) { 2477 mpt_prt(mpt, "port does not support either target or " 2478 "initiator role\n"); 2479 return (ENXIO); 2480 } 2481 2482 if (mpt_enable_ioc(mpt, 0) != MPT_OK) { 2483 mpt_prt(mpt, "unable to initialize IOC\n"); 2484 return (ENXIO); 2485 } 2486 2487 /* 2488 * Read IOC configuration information. 2489 */ 2490 mpt_read_config_info_ioc(mpt); 2491 2492 /* Everything worked */ 2493 break; 2494 } 2495 2496 if (try >= MPT_MAX_TRYS) { 2497 mpt_prt(mpt, "failed to initialize IOC"); 2498 return (EIO); 2499 } 2500 2501 return (0); 2502} 2503 2504static int 2505mpt_enable_ioc(struct mpt_softc *mpt, int portenable) 2506{ 2507 uint32_t pptr; 2508 int val; 2509 2510 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2511 mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2512 return (EIO); 2513 } 2514 2515 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2516 2517 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2518 mpt_prt(mpt, "IOC failed to go to run state\n"); 2519 return (ENXIO); 2520 } 2521 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2522 2523 /* 2524 * Give it reply buffers 2525 * 2526 * Do *not* exceed global credits. 2527 */ 2528 for (val = 0, pptr = mpt->reply_phys; 2529 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2530 pptr += MPT_REPLY_SIZE) { 2531 mpt_free_reply(mpt, pptr); 2532 if (++val == mpt->mpt_global_credits - 1) 2533 break; 2534 } 2535 2536 2537 /* 2538 * Enable the port if asked. This is only done if we're resetting 2539 * the IOC after initial startup. 2540 */ 2541 if (portenable) { 2542 /* 2543 * Enable asynchronous event reporting 2544 */ 2545 mpt_send_event_request(mpt, 1); 2546 2547 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2548 mpt_prt(mpt, "failed to enable port 0\n"); 2549 return (ENXIO); 2550 } 2551 } 2552 return (MPT_OK); 2553} 2554