mpt.c revision 231623
1/*- 2 * Generic routines for LSI Fusion adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28/*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * 62 * Support from LSI-Logic has also gone a great deal toward making this a 63 * workable subsystem and is gratefully acknowledged. 64 */ 65/*- 66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 67 * Copyright (c) 2005, WHEEL Sp. z o.o. 68 * Copyright (c) 2004, 2005 Justin T. Gibbs 69 * All rights reserved. 70 * 71 * Redistribution and use in source and binary forms, with or without 72 * modification, are permitted provided that the following conditions are 73 * met: 74 * 1. Redistributions of source code must retain the above copyright 75 * notice, this list of conditions and the following disclaimer. 76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 77 * substantially similar to the "NO WARRANTY" disclaimer below 78 * ("Disclaimer") and any redistribution must be conditioned upon including 79 * a substantially similar Disclaimer requirement for further binary 80 * redistribution. 81 * 3. Neither the names of the above listed copyright holders nor the names 82 * of any contributors may be used to endorse or promote products derived 83 * from this software without specific prior written permission. 84 * 85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 96 */ 97 98#include <sys/cdefs.h> 99__FBSDID("$FreeBSD: stable/9/sys/dev/mpt/mpt.c 231623 2012-02-14 00:54:50Z marius $"); 100 101#include <dev/mpt/mpt.h> 102#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 103#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 104 105#include <dev/mpt/mpilib/mpi.h> 106#include <dev/mpt/mpilib/mpi_ioc.h> 107#include <dev/mpt/mpilib/mpi_fc.h> 108#include <dev/mpt/mpilib/mpi_targ.h> 109 110#include <sys/sysctl.h> 111 112#define MPT_MAX_TRYS 3 113#define MPT_MAX_WAIT 300000 114 115static int maxwait_ack = 0; 116static int maxwait_int = 0; 117static int maxwait_state = 0; 118 119static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 120mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 121 122static mpt_reply_handler_t mpt_default_reply_handler; 123static mpt_reply_handler_t mpt_config_reply_handler; 124static mpt_reply_handler_t mpt_handshake_reply_handler; 125static mpt_reply_handler_t mpt_event_reply_handler; 126static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 127 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 128static int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 129static int mpt_soft_reset(struct mpt_softc *mpt); 130static void mpt_hard_reset(struct mpt_softc *mpt); 131static int mpt_dma_buf_alloc(struct mpt_softc *mpt); 132static void mpt_dma_buf_free(struct mpt_softc *mpt); 133static int mpt_configure_ioc(struct mpt_softc *mpt, int, int); 134static int mpt_enable_ioc(struct mpt_softc *mpt, int); 135 136/************************* Personality Module Support *************************/ 137/* 138 * We include one extra entry that is guaranteed to be NULL 139 * to simplify our itterator. 140 */ 141static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 142static __inline struct mpt_personality* 143 mpt_pers_find(struct mpt_softc *, u_int); 144static __inline struct mpt_personality* 145 mpt_pers_find_reverse(struct mpt_softc *, u_int); 146 147static __inline struct mpt_personality * 148mpt_pers_find(struct mpt_softc *mpt, u_int start_at) 149{ 150 KASSERT(start_at <= MPT_MAX_PERSONALITIES, 151 ("mpt_pers_find: starting position out of range\n")); 152 153 while (start_at < MPT_MAX_PERSONALITIES 154 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 155 start_at++; 156 } 157 return (mpt_personalities[start_at]); 158} 159 160/* 161 * Used infrequently, so no need to optimize like a forward 162 * traversal where we use the MAX+1 is guaranteed to be NULL 163 * trick. 164 */ 165static __inline struct mpt_personality * 166mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 167{ 168 while (start_at < MPT_MAX_PERSONALITIES 169 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 170 start_at--; 171 } 172 if (start_at < MPT_MAX_PERSONALITIES) 173 return (mpt_personalities[start_at]); 174 return (NULL); 175} 176 177#define MPT_PERS_FOREACH(mpt, pers) \ 178 for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 179 pers != NULL; \ 180 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 181 182#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 183 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 184 pers != NULL; \ 185 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 186 187static mpt_load_handler_t mpt_stdload; 188static mpt_probe_handler_t mpt_stdprobe; 189static mpt_attach_handler_t mpt_stdattach; 190static mpt_enable_handler_t mpt_stdenable; 191static mpt_ready_handler_t mpt_stdready; 192static mpt_event_handler_t mpt_stdevent; 193static mpt_reset_handler_t mpt_stdreset; 194static mpt_shutdown_handler_t mpt_stdshutdown; 195static mpt_detach_handler_t mpt_stddetach; 196static mpt_unload_handler_t mpt_stdunload; 197static struct mpt_personality mpt_default_personality = 198{ 199 .load = mpt_stdload, 200 .probe = mpt_stdprobe, 201 .attach = mpt_stdattach, 202 .enable = mpt_stdenable, 203 .ready = mpt_stdready, 204 .event = mpt_stdevent, 205 .reset = mpt_stdreset, 206 .shutdown = mpt_stdshutdown, 207 .detach = mpt_stddetach, 208 .unload = mpt_stdunload 209}; 210 211static mpt_load_handler_t mpt_core_load; 212static mpt_attach_handler_t mpt_core_attach; 213static mpt_enable_handler_t mpt_core_enable; 214static mpt_reset_handler_t mpt_core_ioc_reset; 215static mpt_event_handler_t mpt_core_event; 216static mpt_shutdown_handler_t mpt_core_shutdown; 217static mpt_shutdown_handler_t mpt_core_detach; 218static mpt_unload_handler_t mpt_core_unload; 219static struct mpt_personality mpt_core_personality = 220{ 221 .name = "mpt_core", 222 .load = mpt_core_load, 223// .attach = mpt_core_attach, 224// .enable = mpt_core_enable, 225 .event = mpt_core_event, 226 .reset = mpt_core_ioc_reset, 227 .shutdown = mpt_core_shutdown, 228 .detach = mpt_core_detach, 229 .unload = mpt_core_unload, 230}; 231 232/* 233 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 234 * ordering information. We want the core to always register FIRST. 235 * other modules are set to SI_ORDER_SECOND. 236 */ 237static moduledata_t mpt_core_mod = { 238 "mpt_core", mpt_modevent, &mpt_core_personality 239}; 240DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 241MODULE_VERSION(mpt_core, 1); 242 243#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id)) 244 245int 246mpt_modevent(module_t mod, int type, void *data) 247{ 248 struct mpt_personality *pers; 249 int error; 250 251 pers = (struct mpt_personality *)data; 252 253 error = 0; 254 switch (type) { 255 case MOD_LOAD: 256 { 257 mpt_load_handler_t **def_handler; 258 mpt_load_handler_t **pers_handler; 259 int i; 260 261 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 262 if (mpt_personalities[i] == NULL) 263 break; 264 } 265 if (i >= MPT_MAX_PERSONALITIES) { 266 error = ENOMEM; 267 break; 268 } 269 pers->id = i; 270 mpt_personalities[i] = pers; 271 272 /* Install standard/noop handlers for any NULL entries. */ 273 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 274 pers_handler = MPT_PERS_FIRST_HANDLER(pers); 275 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 276 if (*pers_handler == NULL) 277 *pers_handler = *def_handler; 278 pers_handler++; 279 def_handler++; 280 } 281 282 error = (pers->load(pers)); 283 if (error != 0) 284 mpt_personalities[i] = NULL; 285 break; 286 } 287 case MOD_SHUTDOWN: 288 break; 289#if __FreeBSD_version >= 500000 290 case MOD_QUIESCE: 291 break; 292#endif 293 case MOD_UNLOAD: 294 error = pers->unload(pers); 295 mpt_personalities[pers->id] = NULL; 296 break; 297 default: 298 error = EINVAL; 299 break; 300 } 301 return (error); 302} 303 304static int 305mpt_stdload(struct mpt_personality *pers) 306{ 307 308 /* Load is always successful. */ 309 return (0); 310} 311 312static int 313mpt_stdprobe(struct mpt_softc *mpt) 314{ 315 316 /* Probe is always successful. */ 317 return (0); 318} 319 320static int 321mpt_stdattach(struct mpt_softc *mpt) 322{ 323 324 /* Attach is always successful. */ 325 return (0); 326} 327 328static int 329mpt_stdenable(struct mpt_softc *mpt) 330{ 331 332 /* Enable is always successful. */ 333 return (0); 334} 335 336static void 337mpt_stdready(struct mpt_softc *mpt) 338{ 339 340} 341 342static int 343mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 344{ 345 346 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 347 /* Event was not for us. */ 348 return (0); 349} 350 351static void 352mpt_stdreset(struct mpt_softc *mpt, int type) 353{ 354 355} 356 357static void 358mpt_stdshutdown(struct mpt_softc *mpt) 359{ 360 361} 362 363static void 364mpt_stddetach(struct mpt_softc *mpt) 365{ 366 367} 368 369static int 370mpt_stdunload(struct mpt_personality *pers) 371{ 372 373 /* Unload is always successful. */ 374 return (0); 375} 376 377/* 378 * Post driver attachment, we may want to perform some global actions. 379 * Here is the hook to do so. 380 */ 381 382static void 383mpt_postattach(void *unused) 384{ 385 struct mpt_softc *mpt; 386 struct mpt_personality *pers; 387 388 TAILQ_FOREACH(mpt, &mpt_tailq, links) { 389 MPT_PERS_FOREACH(mpt, pers) 390 pers->ready(mpt); 391 } 392} 393SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL); 394 395/******************************* Bus DMA Support ******************************/ 396void 397mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 398{ 399 struct mpt_map_info *map_info; 400 401 map_info = (struct mpt_map_info *)arg; 402 map_info->error = error; 403 map_info->phys = segs->ds_addr; 404} 405 406/**************************** Reply/Event Handling ****************************/ 407int 408mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 409 mpt_handler_t handler, uint32_t *phandler_id) 410{ 411 412 switch (type) { 413 case MPT_HANDLER_REPLY: 414 { 415 u_int cbi; 416 u_int free_cbi; 417 418 if (phandler_id == NULL) 419 return (EINVAL); 420 421 free_cbi = MPT_HANDLER_ID_NONE; 422 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 423 /* 424 * If the same handler is registered multiple 425 * times, don't error out. Just return the 426 * index of the original registration. 427 */ 428 if (mpt_reply_handlers[cbi] == handler.reply_handler) { 429 *phandler_id = MPT_CBI_TO_HID(cbi); 430 return (0); 431 } 432 433 /* 434 * Fill from the front in the hope that 435 * all registered handlers consume only a 436 * single cache line. 437 * 438 * We don't break on the first empty slot so 439 * that the full table is checked to see if 440 * this handler was previously registered. 441 */ 442 if (free_cbi == MPT_HANDLER_ID_NONE && 443 (mpt_reply_handlers[cbi] 444 == mpt_default_reply_handler)) 445 free_cbi = cbi; 446 } 447 if (free_cbi == MPT_HANDLER_ID_NONE) { 448 return (ENOMEM); 449 } 450 mpt_reply_handlers[free_cbi] = handler.reply_handler; 451 *phandler_id = MPT_CBI_TO_HID(free_cbi); 452 break; 453 } 454 default: 455 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 456 return (EINVAL); 457 } 458 return (0); 459} 460 461int 462mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 463 mpt_handler_t handler, uint32_t handler_id) 464{ 465 466 switch (type) { 467 case MPT_HANDLER_REPLY: 468 { 469 u_int cbi; 470 471 cbi = MPT_CBI(handler_id); 472 if (cbi >= MPT_NUM_REPLY_HANDLERS 473 || mpt_reply_handlers[cbi] != handler.reply_handler) 474 return (ENOENT); 475 mpt_reply_handlers[cbi] = mpt_default_reply_handler; 476 break; 477 } 478 default: 479 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 480 return (EINVAL); 481 } 482 return (0); 483} 484 485static int 486mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 487 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 488{ 489 490 mpt_prt(mpt, 491 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n", 492 req, req->serno, reply_desc, reply_frame); 493 494 if (reply_frame != NULL) 495 mpt_dump_reply_frame(mpt, reply_frame); 496 497 mpt_prt(mpt, "Reply Frame Ignored\n"); 498 499 return (/*free_reply*/TRUE); 500} 501 502static int 503mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 504 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 505{ 506 507 if (req != NULL) { 508 if (reply_frame != NULL) { 509 MSG_CONFIG *cfgp; 510 MSG_CONFIG_REPLY *reply; 511 512 cfgp = (MSG_CONFIG *)req->req_vbuf; 513 reply = (MSG_CONFIG_REPLY *)reply_frame; 514 req->IOCStatus = le16toh(reply_frame->IOCStatus); 515 bcopy(&reply->Header, &cfgp->Header, 516 sizeof(cfgp->Header)); 517 cfgp->ExtPageLength = reply->ExtPageLength; 518 cfgp->ExtPageType = reply->ExtPageType; 519 } 520 req->state &= ~REQ_STATE_QUEUED; 521 req->state |= REQ_STATE_DONE; 522 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 523 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 524 wakeup(req); 525 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 526 /* 527 * Whew- we can free this request (late completion) 528 */ 529 mpt_free_request(mpt, req); 530 } 531 } 532 533 return (TRUE); 534} 535 536static int 537mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 538 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 539{ 540 541 /* Nothing to be done. */ 542 return (TRUE); 543} 544 545static int 546mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 547 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 548{ 549 int free_reply; 550 551 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler")); 552 KASSERT(req != NULL, ("null request in mpt_event_reply_handler")); 553 554 free_reply = TRUE; 555 switch (reply_frame->Function) { 556 case MPI_FUNCTION_EVENT_NOTIFICATION: 557 { 558 MSG_EVENT_NOTIFY_REPLY *msg; 559 struct mpt_personality *pers; 560 u_int handled; 561 562 handled = 0; 563 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 564 msg->EventDataLength = le16toh(msg->EventDataLength); 565 msg->IOCStatus = le16toh(msg->IOCStatus); 566 msg->IOCLogInfo = le32toh(msg->IOCLogInfo); 567 msg->Event = le32toh(msg->Event); 568 MPT_PERS_FOREACH(mpt, pers) 569 handled += pers->event(mpt, req, msg); 570 571 if (handled == 0 && mpt->mpt_pers_mask == 0) { 572 mpt_lprt(mpt, MPT_PRT_INFO, 573 "No Handlers For Any Event Notify Frames. " 574 "Event %#x (ACK %sequired).\n", 575 msg->Event, msg->AckRequired? "r" : "not r"); 576 } else if (handled == 0) { 577 mpt_lprt(mpt, 578 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO, 579 "Unhandled Event Notify Frame. Event %#x " 580 "(ACK %sequired).\n", 581 msg->Event, msg->AckRequired? "r" : "not r"); 582 } 583 584 if (msg->AckRequired) { 585 request_t *ack_req; 586 uint32_t context; 587 588 context = req->index | MPT_REPLY_HANDLER_EVENTS; 589 ack_req = mpt_get_request(mpt, FALSE); 590 if (ack_req == NULL) { 591 struct mpt_evtf_record *evtf; 592 593 evtf = (struct mpt_evtf_record *)reply_frame; 594 evtf->context = context; 595 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 596 free_reply = FALSE; 597 break; 598 } 599 mpt_send_event_ack(mpt, ack_req, msg, context); 600 /* 601 * Don't check for CONTINUATION_REPLY here 602 */ 603 return (free_reply); 604 } 605 break; 606 } 607 case MPI_FUNCTION_PORT_ENABLE: 608 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n"); 609 break; 610 case MPI_FUNCTION_EVENT_ACK: 611 break; 612 default: 613 mpt_prt(mpt, "unknown event function: %x\n", 614 reply_frame->Function); 615 break; 616 } 617 618 /* 619 * I'm not sure that this continuation stuff works as it should. 620 * 621 * I've had FC async events occur that free the frame up because 622 * the continuation bit isn't set, and then additional async events 623 * then occur using the same context. As you might imagine, this 624 * leads to Very Bad Thing. 625 * 626 * Let's just be safe for now and not free them up until we figure 627 * out what's actually happening here. 628 */ 629#if 0 630 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 631 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 632 mpt_free_request(mpt, req); 633 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", 634 reply_frame->Function, req, req->serno); 635 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 636 MSG_EVENT_NOTIFY_REPLY *msg = 637 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 638 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 639 msg->Event, msg->AckRequired); 640 } 641 } else { 642 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", 643 reply_frame->Function, req, req->serno); 644 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 645 MSG_EVENT_NOTIFY_REPLY *msg = 646 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 647 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 648 msg->Event, msg->AckRequired); 649 } 650 mpt_prtc(mpt, "\n"); 651 } 652#endif 653 return (free_reply); 654} 655 656/* 657 * Process an asynchronous event from the IOC. 658 */ 659static int 660mpt_core_event(struct mpt_softc *mpt, request_t *req, 661 MSG_EVENT_NOTIFY_REPLY *msg) 662{ 663 664 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 665 msg->Event & 0xFF); 666 switch(msg->Event & 0xFF) { 667 case MPI_EVENT_NONE: 668 break; 669 case MPI_EVENT_LOG_DATA: 670 { 671 int i; 672 673 /* Some error occurred that LSI wants logged */ 674 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 675 msg->IOCLogInfo); 676 mpt_prt(mpt, "\tEvtLogData: Event Data:"); 677 for (i = 0; i < msg->EventDataLength; i++) 678 mpt_prtc(mpt, " %08x", msg->Data[i]); 679 mpt_prtc(mpt, "\n"); 680 break; 681 } 682 case MPI_EVENT_EVENT_CHANGE: 683 /* 684 * This is just an acknowledgement 685 * of our mpt_send_event_request. 686 */ 687 break; 688 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 689 break; 690 default: 691 return (0); 692 break; 693 } 694 return (1); 695} 696 697static void 698mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 699 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 700{ 701 MSG_EVENT_ACK *ackp; 702 703 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 704 memset(ackp, 0, sizeof (*ackp)); 705 ackp->Function = MPI_FUNCTION_EVENT_ACK; 706 ackp->Event = htole32(msg->Event); 707 ackp->EventContext = htole32(msg->EventContext); 708 ackp->MsgContext = htole32(context); 709 mpt_check_doorbell(mpt); 710 mpt_send_cmd(mpt, ack_req); 711} 712 713/***************************** Interrupt Handling *****************************/ 714void 715mpt_intr(void *arg) 716{ 717 struct mpt_softc *mpt; 718 uint32_t reply_desc; 719 int ntrips = 0; 720 721 mpt = (struct mpt_softc *)arg; 722 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n"); 723 MPT_LOCK_ASSERT(mpt); 724 725 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 726 request_t *req; 727 MSG_DEFAULT_REPLY *reply_frame; 728 uint32_t reply_baddr; 729 uint32_t ctxt_idx; 730 u_int cb_index; 731 u_int req_index; 732 u_int offset; 733 int free_rf; 734 735 req = NULL; 736 reply_frame = NULL; 737 reply_baddr = 0; 738 offset = 0; 739 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 740 /* 741 * Ensure that the reply frame is coherent. 742 */ 743 reply_baddr = MPT_REPLY_BADDR(reply_desc); 744 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 745 bus_dmamap_sync_range(mpt->reply_dmat, 746 mpt->reply_dmap, offset, MPT_REPLY_SIZE, 747 BUS_DMASYNC_POSTREAD); 748 reply_frame = MPT_REPLY_OTOV(mpt, offset); 749 ctxt_idx = le32toh(reply_frame->MsgContext); 750 } else { 751 uint32_t type; 752 753 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc); 754 ctxt_idx = reply_desc; 755 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n", 756 reply_desc); 757 758 switch (type) { 759 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: 760 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK; 761 break; 762 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: 763 ctxt_idx = GET_IO_INDEX(reply_desc); 764 if (mpt->tgt_cmd_ptrs == NULL) { 765 mpt_prt(mpt, 766 "mpt_intr: no target cmd ptrs\n"); 767 reply_desc = MPT_REPLY_EMPTY; 768 break; 769 } 770 if (ctxt_idx >= mpt->tgt_cmds_allocated) { 771 mpt_prt(mpt, 772 "mpt_intr: bad tgt cmd ctxt %u\n", 773 ctxt_idx); 774 reply_desc = MPT_REPLY_EMPTY; 775 ntrips = 1000; 776 break; 777 } 778 req = mpt->tgt_cmd_ptrs[ctxt_idx]; 779 if (req == NULL) { 780 mpt_prt(mpt, "no request backpointer " 781 "at index %u", ctxt_idx); 782 reply_desc = MPT_REPLY_EMPTY; 783 ntrips = 1000; 784 break; 785 } 786 /* 787 * Reformulate ctxt_idx to be just as if 788 * it were another type of context reply 789 * so the code below will find the request 790 * via indexing into the pool. 791 */ 792 ctxt_idx = 793 req->index | mpt->scsi_tgt_handler_id; 794 req = NULL; 795 break; 796 case MPI_CONTEXT_REPLY_TYPE_LAN: 797 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n", 798 reply_desc); 799 reply_desc = MPT_REPLY_EMPTY; 800 break; 801 default: 802 mpt_prt(mpt, "Context Reply 0x%08x?\n", type); 803 reply_desc = MPT_REPLY_EMPTY; 804 break; 805 } 806 if (reply_desc == MPT_REPLY_EMPTY) { 807 if (ntrips++ > 1000) { 808 break; 809 } 810 continue; 811 } 812 } 813 814 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx); 815 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx); 816 if (req_index < MPT_MAX_REQUESTS(mpt)) { 817 req = &mpt->request_pool[req_index]; 818 } else { 819 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc ==" 820 " 0x%x)\n", req_index, reply_desc); 821 } 822 823 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 824 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 825 free_rf = mpt_reply_handlers[cb_index](mpt, req, 826 reply_desc, reply_frame); 827 828 if (reply_frame != NULL && free_rf) { 829 bus_dmamap_sync_range(mpt->reply_dmat, 830 mpt->reply_dmap, offset, MPT_REPLY_SIZE, 831 BUS_DMASYNC_PREREAD); 832 mpt_free_reply(mpt, reply_baddr); 833 } 834 835 /* 836 * If we got ourselves disabled, don't get stuck in a loop 837 */ 838 if (mpt->disabled) { 839 mpt_disable_ints(mpt); 840 break; 841 } 842 if (ntrips++ > 1000) { 843 break; 844 } 845 } 846 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n"); 847} 848 849/******************************* Error Recovery *******************************/ 850void 851mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 852 u_int iocstatus) 853{ 854 MSG_DEFAULT_REPLY ioc_status_frame; 855 request_t *req; 856 857 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame)); 858 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 859 ioc_status_frame.IOCStatus = iocstatus; 860 while((req = TAILQ_FIRST(chain)) != NULL) { 861 MSG_REQUEST_HEADER *msg_hdr; 862 u_int cb_index; 863 864 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 865 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 866 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 867 ioc_status_frame.Function = msg_hdr->Function; 868 ioc_status_frame.MsgContext = msg_hdr->MsgContext; 869 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 870 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, 871 &ioc_status_frame); 872 if (mpt_req_on_pending_list(mpt, req) != 0) 873 TAILQ_REMOVE(chain, req, links); 874 } 875} 876 877/********************************* Diagnostics ********************************/ 878/* 879 * Perform a diagnostic dump of a reply frame. 880 */ 881void 882mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 883{ 884 885 mpt_prt(mpt, "Address Reply:\n"); 886 mpt_print_reply(reply_frame); 887} 888 889/******************************* Doorbell Access ******************************/ 890static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 891static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 892 893static __inline uint32_t 894mpt_rd_db(struct mpt_softc *mpt) 895{ 896 897 return mpt_read(mpt, MPT_OFFSET_DOORBELL); 898} 899 900static __inline uint32_t 901mpt_rd_intr(struct mpt_softc *mpt) 902{ 903 904 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 905} 906 907/* Busy wait for a door bell to be read by IOC */ 908static int 909mpt_wait_db_ack(struct mpt_softc *mpt) 910{ 911 int i; 912 913 for (i=0; i < MPT_MAX_WAIT; i++) { 914 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 915 maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 916 return (MPT_OK); 917 } 918 DELAY(200); 919 } 920 return (MPT_FAIL); 921} 922 923/* Busy wait for a door bell interrupt */ 924static int 925mpt_wait_db_int(struct mpt_softc *mpt) 926{ 927 int i; 928 929 for (i = 0; i < MPT_MAX_WAIT; i++) { 930 if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 931 maxwait_int = i > maxwait_int ? i : maxwait_int; 932 return MPT_OK; 933 } 934 DELAY(100); 935 } 936 return (MPT_FAIL); 937} 938 939/* Wait for IOC to transition to a give state */ 940void 941mpt_check_doorbell(struct mpt_softc *mpt) 942{ 943 uint32_t db = mpt_rd_db(mpt); 944 945 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 946 mpt_prt(mpt, "Device not running\n"); 947 mpt_print_db(db); 948 } 949} 950 951/* Wait for IOC to transition to a give state */ 952static int 953mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 954{ 955 int i; 956 957 for (i = 0; i < MPT_MAX_WAIT; i++) { 958 uint32_t db = mpt_rd_db(mpt); 959 if (MPT_STATE(db) == state) { 960 maxwait_state = i > maxwait_state ? i : maxwait_state; 961 return (MPT_OK); 962 } 963 DELAY(100); 964 } 965 return (MPT_FAIL); 966} 967 968 969/************************* Intialization/Configuration ************************/ 970static int mpt_download_fw(struct mpt_softc *mpt); 971 972/* Issue the reset COMMAND to the IOC */ 973static int 974mpt_soft_reset(struct mpt_softc *mpt) 975{ 976 977 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 978 979 /* Have to use hard reset if we are not in Running state */ 980 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 981 mpt_prt(mpt, "soft reset failed: device not running\n"); 982 return (MPT_FAIL); 983 } 984 985 /* If door bell is in use we don't have a chance of getting 986 * a word in since the IOC probably crashed in message 987 * processing. So don't waste our time. 988 */ 989 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 990 mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 991 return (MPT_FAIL); 992 } 993 994 /* Send the reset request to the IOC */ 995 mpt_write(mpt, MPT_OFFSET_DOORBELL, 996 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 997 if (mpt_wait_db_ack(mpt) != MPT_OK) { 998 mpt_prt(mpt, "soft reset failed: ack timeout\n"); 999 return (MPT_FAIL); 1000 } 1001 1002 /* Wait for the IOC to reload and come out of reset state */ 1003 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 1004 mpt_prt(mpt, "soft reset failed: device did not restart\n"); 1005 return (MPT_FAIL); 1006 } 1007 1008 return MPT_OK; 1009} 1010 1011static int 1012mpt_enable_diag_mode(struct mpt_softc *mpt) 1013{ 1014 int try; 1015 1016 try = 20; 1017 while (--try) { 1018 1019 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 1020 break; 1021 1022 /* Enable diagnostic registers */ 1023 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 1024 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 1025 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 1026 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 1027 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 1028 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 1029 1030 DELAY(100000); 1031 } 1032 if (try == 0) 1033 return (EIO); 1034 return (0); 1035} 1036 1037static void 1038mpt_disable_diag_mode(struct mpt_softc *mpt) 1039{ 1040 1041 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 1042} 1043 1044/* This is a magic diagnostic reset that resets all the ARM 1045 * processors in the chip. 1046 */ 1047static void 1048mpt_hard_reset(struct mpt_softc *mpt) 1049{ 1050 int error; 1051 int wait; 1052 uint32_t diagreg; 1053 1054 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 1055 1056 if (mpt->is_1078) { 1057 mpt_write(mpt, MPT_OFFSET_RESET_1078, 0x07); 1058 DELAY(1000); 1059 return; 1060 } 1061 1062 error = mpt_enable_diag_mode(mpt); 1063 if (error) { 1064 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 1065 mpt_prt(mpt, "Trying to reset anyway.\n"); 1066 } 1067 1068 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1069 1070 /* 1071 * This appears to be a workaround required for some 1072 * firmware or hardware revs. 1073 */ 1074 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 1075 DELAY(1000); 1076 1077 /* Diag. port is now active so we can now hit the reset bit */ 1078 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 1079 1080 /* 1081 * Ensure that the reset has finished. We delay 1ms 1082 * prior to reading the register to make sure the chip 1083 * has sufficiently completed its reset to handle register 1084 * accesses. 1085 */ 1086 wait = 5000; 1087 do { 1088 DELAY(1000); 1089 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1090 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 1091 1092 if (wait == 0) { 1093 mpt_prt(mpt, "WARNING - Failed hard reset! " 1094 "Trying to initialize anyway.\n"); 1095 } 1096 1097 /* 1098 * If we have firmware to download, it must be loaded before 1099 * the controller will become operational. Do so now. 1100 */ 1101 if (mpt->fw_image != NULL) { 1102 1103 error = mpt_download_fw(mpt); 1104 1105 if (error) { 1106 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 1107 mpt_prt(mpt, "Trying to initialize anyway.\n"); 1108 } 1109 } 1110 1111 /* 1112 * Reseting the controller should have disabled write 1113 * access to the diagnostic registers, but disable 1114 * manually to be sure. 1115 */ 1116 mpt_disable_diag_mode(mpt); 1117} 1118 1119static void 1120mpt_core_ioc_reset(struct mpt_softc *mpt, int type) 1121{ 1122 1123 /* 1124 * Complete all pending requests with a status 1125 * appropriate for an IOC reset. 1126 */ 1127 mpt_complete_request_chain(mpt, &mpt->request_pending_list, 1128 MPI_IOCSTATUS_INVALID_STATE); 1129} 1130 1131/* 1132 * Reset the IOC when needed. Try software command first then if needed 1133 * poke at the magic diagnostic reset. Note that a hard reset resets 1134 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 1135 * fouls up the PCI configuration registers. 1136 */ 1137int 1138mpt_reset(struct mpt_softc *mpt, int reinit) 1139{ 1140 struct mpt_personality *pers; 1141 int ret; 1142 int retry_cnt = 0; 1143 1144 /* 1145 * Try a soft reset. If that fails, get out the big hammer. 1146 */ 1147 again: 1148 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 1149 int cnt; 1150 for (cnt = 0; cnt < 5; cnt++) { 1151 /* Failed; do a hard reset */ 1152 mpt_hard_reset(mpt); 1153 1154 /* 1155 * Wait for the IOC to reload 1156 * and come out of reset state 1157 */ 1158 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1159 if (ret == MPT_OK) { 1160 break; 1161 } 1162 /* 1163 * Okay- try to check again... 1164 */ 1165 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1166 if (ret == MPT_OK) { 1167 break; 1168 } 1169 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 1170 retry_cnt, cnt); 1171 } 1172 } 1173 1174 if (retry_cnt == 0) { 1175 /* 1176 * Invoke reset handlers. We bump the reset count so 1177 * that mpt_wait_req() understands that regardless of 1178 * the specified wait condition, it should stop its wait. 1179 */ 1180 mpt->reset_cnt++; 1181 MPT_PERS_FOREACH(mpt, pers) 1182 pers->reset(mpt, ret); 1183 } 1184 1185 if (reinit) { 1186 ret = mpt_enable_ioc(mpt, 1); 1187 if (ret == MPT_OK) { 1188 mpt_enable_ints(mpt); 1189 } 1190 } 1191 if (ret != MPT_OK && retry_cnt++ < 2) { 1192 goto again; 1193 } 1194 return ret; 1195} 1196 1197/* Return a command buffer to the free queue */ 1198void 1199mpt_free_request(struct mpt_softc *mpt, request_t *req) 1200{ 1201 request_t *nxt; 1202 struct mpt_evtf_record *record; 1203 uint32_t offset, reply_baddr; 1204 1205 if (req == NULL || req != &mpt->request_pool[req->index]) { 1206 panic("mpt_free_request bad req ptr\n"); 1207 return; 1208 } 1209 if ((nxt = req->chain) != NULL) { 1210 req->chain = NULL; 1211 mpt_free_request(mpt, nxt); /* NB: recursion */ 1212 } 1213 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); 1214 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); 1215 MPT_LOCK_ASSERT(mpt); 1216 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1217 ("mpt_free_request: req %p:%u func %x already on freelist", 1218 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1219 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1220 ("mpt_free_request: req %p:%u func %x on pending list", 1221 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1222#ifdef INVARIANTS 1223 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__); 1224#endif 1225 1226 req->ccb = NULL; 1227 if (LIST_EMPTY(&mpt->ack_frames)) { 1228 /* 1229 * Insert free ones at the tail 1230 */ 1231 req->serno = 0; 1232 req->state = REQ_STATE_FREE; 1233#ifdef INVARIANTS 1234 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER)); 1235#endif 1236 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); 1237 if (mpt->getreqwaiter != 0) { 1238 mpt->getreqwaiter = 0; 1239 wakeup(&mpt->request_free_list); 1240 } 1241 return; 1242 } 1243 1244 /* 1245 * Process an ack frame deferred due to resource shortage. 1246 */ 1247 record = LIST_FIRST(&mpt->ack_frames); 1248 LIST_REMOVE(record, links); 1249 req->state = REQ_STATE_ALLOCATED; 1250 mpt_assign_serno(mpt, req); 1251 mpt_send_event_ack(mpt, req, &record->reply, record->context); 1252 offset = (uint32_t)((uint8_t *)record - mpt->reply); 1253 reply_baddr = offset + (mpt->reply_phys & 0xFFFFFFFF); 1254 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset, 1255 MPT_REPLY_SIZE, BUS_DMASYNC_PREREAD); 1256 mpt_free_reply(mpt, reply_baddr); 1257} 1258 1259/* Get a command buffer from the free queue */ 1260request_t * 1261mpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1262{ 1263 request_t *req; 1264 1265retry: 1266 MPT_LOCK_ASSERT(mpt); 1267 req = TAILQ_FIRST(&mpt->request_free_list); 1268 if (req != NULL) { 1269 KASSERT(req == &mpt->request_pool[req->index], 1270 ("mpt_get_request: corrupted request free list\n")); 1271 KASSERT(req->state == REQ_STATE_FREE, 1272 ("req %p:%u not free on free list %x index %d function %x", 1273 req, req->serno, req->state, req->index, 1274 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1275 TAILQ_REMOVE(&mpt->request_free_list, req, links); 1276 req->state = REQ_STATE_ALLOCATED; 1277 req->chain = NULL; 1278 mpt_assign_serno(mpt, req); 1279 } else if (sleep_ok != 0) { 1280 mpt->getreqwaiter = 1; 1281 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1282 goto retry; 1283 } 1284 return (req); 1285} 1286 1287/* Pass the command to the IOC */ 1288void 1289mpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1290{ 1291 1292 if (mpt->verbose > MPT_PRT_DEBUG2) { 1293 mpt_dump_request(mpt, req); 1294 } 1295 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1296 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1297 req->state |= REQ_STATE_QUEUED; 1298 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1299 ("req %p:%u func %x on freelist list in mpt_send_cmd", 1300 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1301 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1302 ("req %p:%u func %x already on pending list in mpt_send_cmd", 1303 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1304 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1305 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1306} 1307 1308/* 1309 * Wait for a request to complete. 1310 * 1311 * Inputs: 1312 * mpt softc of controller executing request 1313 * req request to wait for 1314 * sleep_ok nonzero implies may sleep in this context 1315 * time_ms timeout in ms. 0 implies no timeout. 1316 * 1317 * Return Values: 1318 * 0 Request completed 1319 * non-0 Timeout fired before request completion. 1320 */ 1321int 1322mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1323 mpt_req_state_t state, mpt_req_state_t mask, 1324 int sleep_ok, int time_ms) 1325{ 1326 int error; 1327 int timeout; 1328 u_int saved_cnt; 1329 1330 /* 1331 * timeout is in ms. 0 indicates infinite wait. 1332 * Convert to ticks or 500us units depending on 1333 * our sleep mode. 1334 */ 1335 if (sleep_ok != 0) { 1336 timeout = (time_ms * hz) / 1000; 1337 } else { 1338 timeout = time_ms * 2; 1339 } 1340 req->state |= REQ_STATE_NEED_WAKEUP; 1341 mask &= ~REQ_STATE_NEED_WAKEUP; 1342 saved_cnt = mpt->reset_cnt; 1343 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { 1344 if (sleep_ok != 0) { 1345 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1346 if (error == EWOULDBLOCK) { 1347 timeout = 0; 1348 break; 1349 } 1350 } else { 1351 if (time_ms != 0 && --timeout == 0) { 1352 break; 1353 } 1354 DELAY(500); 1355 mpt_intr(mpt); 1356 } 1357 } 1358 req->state &= ~REQ_STATE_NEED_WAKEUP; 1359 if (mpt->reset_cnt != saved_cnt) { 1360 return (EIO); 1361 } 1362 if (time_ms && timeout <= 0) { 1363 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; 1364 req->state |= REQ_STATE_TIMEDOUT; 1365 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); 1366 return (ETIMEDOUT); 1367 } 1368 return (0); 1369} 1370 1371/* 1372 * Send a command to the IOC via the handshake register. 1373 * 1374 * Only done at initialization time and for certain unusual 1375 * commands such as device/bus reset as specified by LSI. 1376 */ 1377int 1378mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1379{ 1380 int i; 1381 uint32_t data, *data32; 1382 1383 /* Check condition of the IOC */ 1384 data = mpt_rd_db(mpt); 1385 if ((MPT_STATE(data) != MPT_DB_STATE_READY 1386 && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1387 && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1388 || MPT_DB_IS_IN_USE(data)) { 1389 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1390 mpt_print_db(data); 1391 return (EBUSY); 1392 } 1393 1394 /* We move things in 32 bit chunks */ 1395 len = (len + 3) >> 2; 1396 data32 = cmd; 1397 1398 /* Clear any left over pending doorbell interrupts */ 1399 if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1400 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1401 1402 /* 1403 * Tell the handshake reg. we are going to send a command 1404 * and how long it is going to be. 1405 */ 1406 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1407 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1408 mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1409 1410 /* Wait for the chip to notice */ 1411 if (mpt_wait_db_int(mpt) != MPT_OK) { 1412 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n"); 1413 return (ETIMEDOUT); 1414 } 1415 1416 /* Clear the interrupt */ 1417 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1418 1419 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1420 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n"); 1421 return (ETIMEDOUT); 1422 } 1423 1424 /* Send the command */ 1425 for (i = 0; i < len; i++) { 1426 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++)); 1427 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1428 mpt_prt(mpt, 1429 "mpt_send_handshake_cmd: timeout @ index %d\n", i); 1430 return (ETIMEDOUT); 1431 } 1432 } 1433 return MPT_OK; 1434} 1435 1436/* Get the response from the handshake register */ 1437int 1438mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1439{ 1440 int left, reply_left; 1441 u_int16_t *data16; 1442 uint32_t data; 1443 MSG_DEFAULT_REPLY *hdr; 1444 1445 /* We move things out in 16 bit chunks */ 1446 reply_len >>= 1; 1447 data16 = (u_int16_t *)reply; 1448 1449 hdr = (MSG_DEFAULT_REPLY *)reply; 1450 1451 /* Get first word */ 1452 if (mpt_wait_db_int(mpt) != MPT_OK) { 1453 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1454 return ETIMEDOUT; 1455 } 1456 data = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1457 *data16++ = le16toh(data & MPT_DB_DATA_MASK); 1458 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1459 1460 /* Get Second Word */ 1461 if (mpt_wait_db_int(mpt) != MPT_OK) { 1462 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1463 return ETIMEDOUT; 1464 } 1465 data = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1466 *data16++ = le16toh(data & MPT_DB_DATA_MASK); 1467 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1468 1469 /* 1470 * With the second word, we can now look at the length. 1471 * Warn about a reply that's too short (except for IOC FACTS REPLY) 1472 */ 1473 if ((reply_len >> 1) != hdr->MsgLength && 1474 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ 1475#if __FreeBSD_version >= 500000 1476 mpt_prt(mpt, "reply length does not match message length: " 1477 "got %x; expected %zx for function %x\n", 1478 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1479#else 1480 mpt_prt(mpt, "reply length does not match message length: " 1481 "got %x; expected %x for function %x\n", 1482 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1483#endif 1484 } 1485 1486 /* Get rest of the reply; but don't overflow the provided buffer */ 1487 left = (hdr->MsgLength << 1) - 2; 1488 reply_left = reply_len - 2; 1489 while (left--) { 1490 u_int16_t datum; 1491 1492 if (mpt_wait_db_int(mpt) != MPT_OK) { 1493 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1494 return ETIMEDOUT; 1495 } 1496 data = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1497 datum = le16toh(data & MPT_DB_DATA_MASK); 1498 1499 if (reply_left-- > 0) 1500 *data16++ = datum; 1501 1502 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1503 } 1504 1505 /* One more wait & clear at the end */ 1506 if (mpt_wait_db_int(mpt) != MPT_OK) { 1507 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1508 return ETIMEDOUT; 1509 } 1510 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1511 1512 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1513 if (mpt->verbose >= MPT_PRT_TRACE) 1514 mpt_print_reply(hdr); 1515 return (MPT_FAIL | hdr->IOCStatus); 1516 } 1517 1518 return (0); 1519} 1520 1521static int 1522mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1523{ 1524 MSG_IOC_FACTS f_req; 1525 int error; 1526 1527 memset(&f_req, 0, sizeof f_req); 1528 f_req.Function = MPI_FUNCTION_IOC_FACTS; 1529 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1530 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1531 if (error) { 1532 return(error); 1533 } 1534 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1535 return (error); 1536} 1537 1538static int 1539mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp) 1540{ 1541 MSG_PORT_FACTS f_req; 1542 int error; 1543 1544 memset(&f_req, 0, sizeof f_req); 1545 f_req.Function = MPI_FUNCTION_PORT_FACTS; 1546 f_req.PortNumber = port; 1547 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1548 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1549 if (error) { 1550 return(error); 1551 } 1552 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1553 return (error); 1554} 1555 1556/* 1557 * Send the initialization request. This is where we specify how many 1558 * SCSI busses and how many devices per bus we wish to emulate. 1559 * This is also the command that specifies the max size of the reply 1560 * frames from the IOC that we will be allocating. 1561 */ 1562static int 1563mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1564{ 1565 int error = 0; 1566 MSG_IOC_INIT init; 1567 MSG_IOC_INIT_REPLY reply; 1568 1569 memset(&init, 0, sizeof init); 1570 init.WhoInit = who; 1571 init.Function = MPI_FUNCTION_IOC_INIT; 1572 init.MaxDevices = 0; /* at least 256 devices per bus */ 1573 init.MaxBuses = 16; /* at least 16 busses */ 1574 1575 init.MsgVersion = htole16(MPI_VERSION); 1576 init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1577 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1578 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1579 1580 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1581 return(error); 1582 } 1583 1584 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1585 return (error); 1586} 1587 1588 1589/* 1590 * Utiltity routine to read configuration headers and pages 1591 */ 1592int 1593mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params, 1594 bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms) 1595{ 1596 MSG_CONFIG *cfgp; 1597 SGE_SIMPLE32 *se; 1598 1599 cfgp = req->req_vbuf; 1600 memset(cfgp, 0, sizeof *cfgp); 1601 cfgp->Action = params->Action; 1602 cfgp->Function = MPI_FUNCTION_CONFIG; 1603 cfgp->Header.PageVersion = params->PageVersion; 1604 cfgp->Header.PageNumber = params->PageNumber; 1605 cfgp->PageAddress = htole32(params->PageAddress); 1606 if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) == 1607 MPI_CONFIG_PAGETYPE_EXTENDED) { 1608 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 1609 cfgp->Header.PageLength = 0; 1610 cfgp->ExtPageLength = htole16(params->ExtPageLength); 1611 cfgp->ExtPageType = params->ExtPageType; 1612 } else { 1613 cfgp->Header.PageType = params->PageType; 1614 cfgp->Header.PageLength = params->PageLength; 1615 } 1616 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1617 se->Address = htole32(addr); 1618 MPI_pSGE_SET_LENGTH(se, len); 1619 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1620 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1621 MPI_SGE_FLAGS_END_OF_LIST | 1622 ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1623 || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1624 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1625 se->FlagsLength = htole32(se->FlagsLength); 1626 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1627 1628 mpt_check_doorbell(mpt); 1629 mpt_send_cmd(mpt, req); 1630 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1631 sleep_ok, timeout_ms)); 1632} 1633 1634int 1635mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber, 1636 uint32_t PageAddress, int ExtPageType, 1637 CONFIG_EXTENDED_PAGE_HEADER *rslt, 1638 int sleep_ok, int timeout_ms) 1639{ 1640 request_t *req; 1641 cfgparms_t params; 1642 MSG_CONFIG_REPLY *cfgp; 1643 int error; 1644 1645 req = mpt_get_request(mpt, sleep_ok); 1646 if (req == NULL) { 1647 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n"); 1648 return (ENOMEM); 1649 } 1650 1651 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER; 1652 params.PageVersion = PageVersion; 1653 params.PageLength = 0; 1654 params.PageNumber = PageNumber; 1655 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 1656 params.PageAddress = PageAddress; 1657 params.ExtPageType = ExtPageType; 1658 params.ExtPageLength = 0; 1659 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0, 1660 sleep_ok, timeout_ms); 1661 if (error != 0) { 1662 /* 1663 * Leave the request. Without resetting the chip, it's 1664 * still owned by it and we'll just get into trouble 1665 * freeing it now. Mark it as abandoned so that if it 1666 * shows up later it can be freed. 1667 */ 1668 mpt_prt(mpt, "read_extcfg_header timed out\n"); 1669 return (ETIMEDOUT); 1670 } 1671 1672 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1673 case MPI_IOCSTATUS_SUCCESS: 1674 cfgp = req->req_vbuf; 1675 rslt->PageVersion = cfgp->Header.PageVersion; 1676 rslt->PageNumber = cfgp->Header.PageNumber; 1677 rslt->PageType = cfgp->Header.PageType; 1678 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength); 1679 rslt->ExtPageType = cfgp->ExtPageType; 1680 error = 0; 1681 break; 1682 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1683 mpt_lprt(mpt, MPT_PRT_DEBUG, 1684 "Invalid Page Type %d Number %d Addr 0x%0x\n", 1685 MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress); 1686 error = EINVAL; 1687 break; 1688 default: 1689 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n", 1690 req->IOCStatus); 1691 error = EIO; 1692 break; 1693 } 1694 mpt_free_request(mpt, req); 1695 return (error); 1696} 1697 1698int 1699mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1700 CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len, 1701 int sleep_ok, int timeout_ms) 1702{ 1703 request_t *req; 1704 cfgparms_t params; 1705 int error; 1706 1707 req = mpt_get_request(mpt, sleep_ok); 1708 if (req == NULL) { 1709 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n"); 1710 return (-1); 1711 } 1712 1713 params.Action = Action; 1714 params.PageVersion = hdr->PageVersion; 1715 params.PageLength = 0; 1716 params.PageNumber = hdr->PageNumber; 1717 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 1718 params.PageAddress = PageAddress; 1719 params.ExtPageType = hdr->ExtPageType; 1720 params.ExtPageLength = hdr->ExtPageLength; 1721 error = mpt_issue_cfg_req(mpt, req, ¶ms, 1722 req->req_pbuf + MPT_RQSL(mpt), 1723 len, sleep_ok, timeout_ms); 1724 if (error != 0) { 1725 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action); 1726 return (-1); 1727 } 1728 1729 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1730 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n", 1731 req->IOCStatus); 1732 mpt_free_request(mpt, req); 1733 return (-1); 1734 } 1735 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1736 mpt_free_request(mpt, req); 1737 return (0); 1738} 1739 1740int 1741mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1742 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1743 int sleep_ok, int timeout_ms) 1744{ 1745 request_t *req; 1746 cfgparms_t params; 1747 MSG_CONFIG *cfgp; 1748 int error; 1749 1750 req = mpt_get_request(mpt, sleep_ok); 1751 if (req == NULL) { 1752 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1753 return (ENOMEM); 1754 } 1755 1756 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER; 1757 params.PageVersion = 0; 1758 params.PageLength = 0; 1759 params.PageNumber = PageNumber; 1760 params.PageType = PageType; 1761 params.PageAddress = PageAddress; 1762 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0, 1763 sleep_ok, timeout_ms); 1764 if (error != 0) { 1765 /* 1766 * Leave the request. Without resetting the chip, it's 1767 * still owned by it and we'll just get into trouble 1768 * freeing it now. Mark it as abandoned so that if it 1769 * shows up later it can be freed. 1770 */ 1771 mpt_prt(mpt, "read_cfg_header timed out\n"); 1772 return (ETIMEDOUT); 1773 } 1774 1775 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1776 case MPI_IOCSTATUS_SUCCESS: 1777 cfgp = req->req_vbuf; 1778 bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1779 error = 0; 1780 break; 1781 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1782 mpt_lprt(mpt, MPT_PRT_DEBUG, 1783 "Invalid Page Type %d Number %d Addr 0x%0x\n", 1784 PageType, PageNumber, PageAddress); 1785 error = EINVAL; 1786 break; 1787 default: 1788 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1789 req->IOCStatus); 1790 error = EIO; 1791 break; 1792 } 1793 mpt_free_request(mpt, req); 1794 return (error); 1795} 1796 1797int 1798mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1799 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1800 int timeout_ms) 1801{ 1802 request_t *req; 1803 cfgparms_t params; 1804 int error; 1805 1806 req = mpt_get_request(mpt, sleep_ok); 1807 if (req == NULL) { 1808 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1809 return (-1); 1810 } 1811 1812 params.Action = Action; 1813 params.PageVersion = hdr->PageVersion; 1814 params.PageLength = hdr->PageLength; 1815 params.PageNumber = hdr->PageNumber; 1816 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; 1817 params.PageAddress = PageAddress; 1818 error = mpt_issue_cfg_req(mpt, req, ¶ms, 1819 req->req_pbuf + MPT_RQSL(mpt), 1820 len, sleep_ok, timeout_ms); 1821 if (error != 0) { 1822 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1823 return (-1); 1824 } 1825 1826 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1827 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1828 req->IOCStatus); 1829 mpt_free_request(mpt, req); 1830 return (-1); 1831 } 1832 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1833 mpt_free_request(mpt, req); 1834 return (0); 1835} 1836 1837int 1838mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1839 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1840 int timeout_ms) 1841{ 1842 request_t *req; 1843 cfgparms_t params; 1844 u_int hdr_attr; 1845 int error; 1846 1847 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1848 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1849 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1850 mpt_prt(mpt, "page type 0x%x not changeable\n", 1851 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1852 return (-1); 1853 } 1854 1855#if 0 1856 /* 1857 * We shouldn't mask off other bits here. 1858 */ 1859 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK; 1860#endif 1861 1862 req = mpt_get_request(mpt, sleep_ok); 1863 if (req == NULL) 1864 return (-1); 1865 1866 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len); 1867 1868 /* 1869 * There isn't any point in restoring stripped out attributes 1870 * if you then mask them going down to issue the request. 1871 */ 1872 1873 params.Action = Action; 1874 params.PageVersion = hdr->PageVersion; 1875 params.PageLength = hdr->PageLength; 1876 params.PageNumber = hdr->PageNumber; 1877 params.PageAddress = PageAddress; 1878#if 0 1879 /* Restore stripped out attributes */ 1880 hdr->PageType |= hdr_attr; 1881 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; 1882#else 1883 params.PageType = hdr->PageType; 1884#endif 1885 error = mpt_issue_cfg_req(mpt, req, ¶ms, 1886 req->req_pbuf + MPT_RQSL(mpt), 1887 len, sleep_ok, timeout_ms); 1888 if (error != 0) { 1889 mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1890 return (-1); 1891 } 1892 1893 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1894 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1895 req->IOCStatus); 1896 mpt_free_request(mpt, req); 1897 return (-1); 1898 } 1899 mpt_free_request(mpt, req); 1900 return (0); 1901} 1902 1903/* 1904 * Read IOC configuration information 1905 */ 1906static int 1907mpt_read_config_info_ioc(struct mpt_softc *mpt) 1908{ 1909 CONFIG_PAGE_HEADER hdr; 1910 struct mpt_raid_volume *mpt_raid; 1911 int rv; 1912 int i; 1913 size_t len; 1914 1915 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1916 2, 0, &hdr, FALSE, 5000); 1917 /* 1918 * If it's an invalid page, so what? Not a supported function.... 1919 */ 1920 if (rv == EINVAL) { 1921 return (0); 1922 } 1923 if (rv) { 1924 return (rv); 1925 } 1926 1927 mpt_lprt(mpt, MPT_PRT_DEBUG, 1928 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n", 1929 hdr.PageVersion, hdr.PageLength << 2, 1930 hdr.PageNumber, hdr.PageType); 1931 1932 len = hdr.PageLength * sizeof(uint32_t); 1933 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1934 if (mpt->ioc_page2 == NULL) { 1935 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n"); 1936 mpt_raid_free_mem(mpt); 1937 return (ENOMEM); 1938 } 1939 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1940 rv = mpt_read_cur_cfg_page(mpt, 0, 1941 &mpt->ioc_page2->Header, len, FALSE, 5000); 1942 if (rv) { 1943 mpt_prt(mpt, "failed to read IOC Page 2\n"); 1944 mpt_raid_free_mem(mpt); 1945 return (EIO); 1946 } 1947 mpt2host_config_page_ioc2(mpt->ioc_page2); 1948 1949 if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1950 uint32_t mask; 1951 1952 mpt_prt(mpt, "Capabilities: ("); 1953 for (mask = 1; mask != 0; mask <<= 1) { 1954 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) { 1955 continue; 1956 } 1957 switch (mask) { 1958 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1959 mpt_prtc(mpt, " RAID-0"); 1960 break; 1961 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1962 mpt_prtc(mpt, " RAID-1E"); 1963 break; 1964 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1965 mpt_prtc(mpt, " RAID-1"); 1966 break; 1967 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1968 mpt_prtc(mpt, " SES"); 1969 break; 1970 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1971 mpt_prtc(mpt, " SAFTE"); 1972 break; 1973 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1974 mpt_prtc(mpt, " Multi-Channel-Arrays"); 1975 default: 1976 break; 1977 } 1978 } 1979 mpt_prtc(mpt, " )\n"); 1980 if ((mpt->ioc_page2->CapabilitiesFlags 1981 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1982 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1983 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1984 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1985 mpt->ioc_page2->NumActiveVolumes, 1986 mpt->ioc_page2->NumActiveVolumes != 1 1987 ? "s " : " ", 1988 mpt->ioc_page2->MaxVolumes); 1989 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1990 mpt->ioc_page2->NumActivePhysDisks, 1991 mpt->ioc_page2->NumActivePhysDisks != 1 1992 ? "s " : " ", 1993 mpt->ioc_page2->MaxPhysDisks); 1994 } 1995 } 1996 1997 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1998 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1999 if (mpt->raid_volumes == NULL) { 2000 mpt_prt(mpt, "Could not allocate RAID volume data\n"); 2001 mpt_raid_free_mem(mpt); 2002 return (ENOMEM); 2003 } 2004 2005 /* 2006 * Copy critical data out of ioc_page2 so that we can 2007 * safely refresh the page without windows of unreliable 2008 * data. 2009 */ 2010 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 2011 2012 len = sizeof(*mpt->raid_volumes->config_page) + 2013 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1)); 2014 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 2015 mpt_raid = &mpt->raid_volumes[i]; 2016 mpt_raid->config_page = 2017 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 2018 if (mpt_raid->config_page == NULL) { 2019 mpt_prt(mpt, "Could not allocate RAID page data\n"); 2020 mpt_raid_free_mem(mpt); 2021 return (ENOMEM); 2022 } 2023 } 2024 mpt->raid_page0_len = len; 2025 2026 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 2027 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 2028 if (mpt->raid_disks == NULL) { 2029 mpt_prt(mpt, "Could not allocate RAID disk data\n"); 2030 mpt_raid_free_mem(mpt); 2031 return (ENOMEM); 2032 } 2033 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 2034 2035 /* 2036 * Load page 3. 2037 */ 2038 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2039 3, 0, &hdr, FALSE, 5000); 2040 if (rv) { 2041 mpt_raid_free_mem(mpt); 2042 return (EIO); 2043 } 2044 2045 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 2046 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 2047 2048 len = hdr.PageLength * sizeof(uint32_t); 2049 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 2050 if (mpt->ioc_page3 == NULL) { 2051 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n"); 2052 mpt_raid_free_mem(mpt); 2053 return (ENOMEM); 2054 } 2055 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 2056 rv = mpt_read_cur_cfg_page(mpt, 0, 2057 &mpt->ioc_page3->Header, len, FALSE, 5000); 2058 if (rv) { 2059 mpt_raid_free_mem(mpt); 2060 return (EIO); 2061 } 2062 mpt2host_config_page_ioc3(mpt->ioc_page3); 2063 mpt_raid_wakeup(mpt); 2064 return (0); 2065} 2066 2067/* 2068 * Enable IOC port 2069 */ 2070static int 2071mpt_send_port_enable(struct mpt_softc *mpt, int port) 2072{ 2073 request_t *req; 2074 MSG_PORT_ENABLE *enable_req; 2075 int error; 2076 2077 req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 2078 if (req == NULL) 2079 return (-1); 2080 2081 enable_req = req->req_vbuf; 2082 memset(enable_req, 0, MPT_RQSL(mpt)); 2083 2084 enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 2085 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 2086 enable_req->PortNumber = port; 2087 2088 mpt_check_doorbell(mpt); 2089 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 2090 2091 mpt_send_cmd(mpt, req); 2092 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 2093 FALSE, (mpt->is_sas || mpt->is_fc)? 300000 : 30000); 2094 if (error != 0) { 2095 mpt_prt(mpt, "port %d enable timed out\n", port); 2096 return (-1); 2097 } 2098 mpt_free_request(mpt, req); 2099 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port); 2100 return (0); 2101} 2102 2103/* 2104 * Enable/Disable asynchronous event reporting. 2105 */ 2106static int 2107mpt_send_event_request(struct mpt_softc *mpt, int onoff) 2108{ 2109 request_t *req; 2110 MSG_EVENT_NOTIFY *enable_req; 2111 2112 req = mpt_get_request(mpt, FALSE); 2113 if (req == NULL) { 2114 return (ENOMEM); 2115 } 2116 enable_req = req->req_vbuf; 2117 memset(enable_req, 0, sizeof *enable_req); 2118 2119 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 2120 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 2121 enable_req->Switch = onoff; 2122 2123 mpt_check_doorbell(mpt); 2124 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n", 2125 onoff ? "en" : "dis"); 2126 /* 2127 * Send the command off, but don't wait for it. 2128 */ 2129 mpt_send_cmd(mpt, req); 2130 return (0); 2131} 2132 2133/* 2134 * Un-mask the interrupts on the chip. 2135 */ 2136void 2137mpt_enable_ints(struct mpt_softc *mpt) 2138{ 2139 2140 /* Unmask every thing except door bell int */ 2141 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 2142} 2143 2144/* 2145 * Mask the interrupts on the chip. 2146 */ 2147void 2148mpt_disable_ints(struct mpt_softc *mpt) 2149{ 2150 2151 /* Mask all interrupts */ 2152 mpt_write(mpt, MPT_OFFSET_INTR_MASK, 2153 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 2154} 2155 2156static void 2157mpt_sysctl_attach(struct mpt_softc *mpt) 2158{ 2159#if __FreeBSD_version >= 500000 2160 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 2161 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 2162 2163 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 2164 "debug", CTLFLAG_RW, &mpt->verbose, 0, 2165 "Debugging/Verbose level"); 2166 SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 2167 "role", CTLFLAG_RD, &mpt->role, 0, 2168 "HBA role"); 2169#ifdef MPT_TEST_MULTIPATH 2170 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 2171 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1, 2172 "Next Target to Fail"); 2173#endif 2174#endif 2175} 2176 2177int 2178mpt_attach(struct mpt_softc *mpt) 2179{ 2180 struct mpt_personality *pers; 2181 int i; 2182 int error; 2183 2184 mpt_core_attach(mpt); 2185 mpt_core_enable(mpt); 2186 2187 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links); 2188 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 2189 pers = mpt_personalities[i]; 2190 if (pers == NULL) { 2191 continue; 2192 } 2193 if (pers->probe(mpt) == 0) { 2194 error = pers->attach(mpt); 2195 if (error != 0) { 2196 mpt_detach(mpt); 2197 return (error); 2198 } 2199 mpt->mpt_pers_mask |= (0x1 << pers->id); 2200 pers->use_count++; 2201 } 2202 } 2203 2204 /* 2205 * Now that we've attached everything, do the enable function 2206 * for all of the personalities. This allows the personalities 2207 * to do setups that are appropriate for them prior to enabling 2208 * any ports. 2209 */ 2210 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 2211 pers = mpt_personalities[i]; 2212 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) { 2213 error = pers->enable(mpt); 2214 if (error != 0) { 2215 mpt_prt(mpt, "personality %s attached but would" 2216 " not enable (%d)\n", pers->name, error); 2217 mpt_detach(mpt); 2218 return (error); 2219 } 2220 } 2221 } 2222 return (0); 2223} 2224 2225int 2226mpt_shutdown(struct mpt_softc *mpt) 2227{ 2228 struct mpt_personality *pers; 2229 2230 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2231 pers->shutdown(mpt); 2232 } 2233 return (0); 2234} 2235 2236int 2237mpt_detach(struct mpt_softc *mpt) 2238{ 2239 struct mpt_personality *pers; 2240 2241 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2242 pers->detach(mpt); 2243 mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2244 pers->use_count--; 2245 } 2246 TAILQ_REMOVE(&mpt_tailq, mpt, links); 2247 return (0); 2248} 2249 2250static int 2251mpt_core_load(struct mpt_personality *pers) 2252{ 2253 int i; 2254 2255 /* 2256 * Setup core handlers and insert the default handler 2257 * into all "empty slots". 2258 */ 2259 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { 2260 mpt_reply_handlers[i] = mpt_default_reply_handler; 2261 } 2262 2263 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2264 mpt_event_reply_handler; 2265 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2266 mpt_config_reply_handler; 2267 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2268 mpt_handshake_reply_handler; 2269 return (0); 2270} 2271 2272/* 2273 * Initialize per-instance driver data and perform 2274 * initial controller configuration. 2275 */ 2276static int 2277mpt_core_attach(struct mpt_softc *mpt) 2278{ 2279 int val, error; 2280 2281 LIST_INIT(&mpt->ack_frames); 2282 /* Put all request buffers on the free list */ 2283 TAILQ_INIT(&mpt->request_pending_list); 2284 TAILQ_INIT(&mpt->request_free_list); 2285 TAILQ_INIT(&mpt->request_timeout_list); 2286 for (val = 0; val < MPT_MAX_LUNS; val++) { 2287 STAILQ_INIT(&mpt->trt[val].atios); 2288 STAILQ_INIT(&mpt->trt[val].inots); 2289 } 2290 STAILQ_INIT(&mpt->trt_wildcard.atios); 2291 STAILQ_INIT(&mpt->trt_wildcard.inots); 2292#ifdef MPT_TEST_MULTIPATH 2293 mpt->failure_id = -1; 2294#endif 2295 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; 2296 mpt_sysctl_attach(mpt); 2297 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2298 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2299 2300 MPT_LOCK(mpt); 2301 error = mpt_configure_ioc(mpt, 0, 0); 2302 MPT_UNLOCK(mpt); 2303 2304 return (error); 2305} 2306 2307static int 2308mpt_core_enable(struct mpt_softc *mpt) 2309{ 2310 2311 /* 2312 * We enter with the IOC enabled, but async events 2313 * not enabled, ports not enabled and interrupts 2314 * not enabled. 2315 */ 2316 MPT_LOCK(mpt); 2317 2318 /* 2319 * Enable asynchronous event reporting- all personalities 2320 * have attached so that they should be able to now field 2321 * async events. 2322 */ 2323 mpt_send_event_request(mpt, 1); 2324 2325 /* 2326 * Catch any pending interrupts 2327 * 2328 * This seems to be crucial- otherwise 2329 * the portenable below times out. 2330 */ 2331 mpt_intr(mpt); 2332 2333 /* 2334 * Enable Interrupts 2335 */ 2336 mpt_enable_ints(mpt); 2337 2338 /* 2339 * Catch any pending interrupts 2340 * 2341 * This seems to be crucial- otherwise 2342 * the portenable below times out. 2343 */ 2344 mpt_intr(mpt); 2345 2346 /* 2347 * Enable the port. 2348 */ 2349 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2350 mpt_prt(mpt, "failed to enable port 0\n"); 2351 MPT_UNLOCK(mpt); 2352 return (ENXIO); 2353 } 2354 MPT_UNLOCK(mpt); 2355 return (0); 2356} 2357 2358static void 2359mpt_core_shutdown(struct mpt_softc *mpt) 2360{ 2361 2362 mpt_disable_ints(mpt); 2363} 2364 2365static void 2366mpt_core_detach(struct mpt_softc *mpt) 2367{ 2368 int val; 2369 2370 /* 2371 * XXX: FREE MEMORY 2372 */ 2373 mpt_disable_ints(mpt); 2374 2375 /* Make sure no request has pending timeouts. */ 2376 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2377 request_t *req = &mpt->request_pool[val]; 2378 mpt_callout_drain(mpt, &req->callout); 2379 } 2380 2381 mpt_dma_buf_free(mpt); 2382} 2383 2384static int 2385mpt_core_unload(struct mpt_personality *pers) 2386{ 2387 2388 /* Unload is always successful. */ 2389 return (0); 2390} 2391 2392#define FW_UPLOAD_REQ_SIZE \ 2393 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2394 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2395 2396static int 2397mpt_upload_fw(struct mpt_softc *mpt) 2398{ 2399 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2400 MSG_FW_UPLOAD_REPLY fw_reply; 2401 MSG_FW_UPLOAD *fw_req; 2402 FW_UPLOAD_TCSGE *tsge; 2403 SGE_SIMPLE32 *sge; 2404 uint32_t flags; 2405 int error; 2406 2407 memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2408 fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2409 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2410 fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2411 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2412 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2413 tsge->DetailsLength = 12; 2414 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2415 tsge->ImageSize = htole32(mpt->fw_image_size); 2416 sge = (SGE_SIMPLE32 *)(tsge + 1); 2417 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2418 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2419 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2420 flags <<= MPI_SGE_FLAGS_SHIFT; 2421 sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2422 sge->Address = htole32(mpt->fw_phys); 2423 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREREAD); 2424 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2425 if (error) 2426 return(error); 2427 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2428 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTREAD); 2429 return (error); 2430} 2431 2432static void 2433mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2434 uint32_t *data, bus_size_t len) 2435{ 2436 uint32_t *data_end; 2437 2438 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2439 if (mpt->is_sas) { 2440 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2441 } 2442 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2443 while (data != data_end) { 2444 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2445 data++; 2446 } 2447 if (mpt->is_sas) { 2448 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2449 } 2450} 2451 2452static int 2453mpt_download_fw(struct mpt_softc *mpt) 2454{ 2455 MpiFwHeader_t *fw_hdr; 2456 int error; 2457 uint32_t ext_offset; 2458 uint32_t data; 2459 2460 if (mpt->pci_pio_reg == NULL) { 2461 mpt_prt(mpt, "No PIO resource!\n"); 2462 return (ENXIO); 2463 } 2464 2465 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2466 mpt->fw_image_size); 2467 2468 error = mpt_enable_diag_mode(mpt); 2469 if (error != 0) { 2470 mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2471 return (EIO); 2472 } 2473 2474 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2475 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2476 2477 fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2478 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREWRITE); 2479 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2480 fw_hdr->ImageSize); 2481 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTWRITE); 2482 2483 ext_offset = fw_hdr->NextImageHeaderOffset; 2484 while (ext_offset != 0) { 2485 MpiExtImageHeader_t *ext; 2486 2487 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2488 ext_offset = ext->NextImageHeaderOffset; 2489 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, 2490 BUS_DMASYNC_PREWRITE); 2491 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2492 ext->ImageSize); 2493 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, 2494 BUS_DMASYNC_POSTWRITE); 2495 } 2496 2497 if (mpt->is_sas) { 2498 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2499 } 2500 /* Setup the address to jump to on reset. */ 2501 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2502 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2503 2504 /* 2505 * The controller sets the "flash bad" status after attempting 2506 * to auto-boot from flash. Clear the status so that the controller 2507 * will continue the boot process with our newly installed firmware. 2508 */ 2509 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2510 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2511 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2512 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2513 2514 if (mpt->is_sas) { 2515 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2516 } 2517 2518 /* 2519 * Re-enable the processor and clear the boot halt flag. 2520 */ 2521 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2522 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2523 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2524 2525 mpt_disable_diag_mode(mpt); 2526 return (0); 2527} 2528 2529static int 2530mpt_dma_buf_alloc(struct mpt_softc *mpt) 2531{ 2532 struct mpt_map_info mi; 2533 uint8_t *vptr; 2534 uint32_t pptr, end; 2535 int i, error; 2536 2537 /* Create a child tag for data buffers */ 2538 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 2539 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2540 NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE, 2541 mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0, 2542 &mpt->buffer_dmat) != 0) { 2543 mpt_prt(mpt, "cannot create a dma tag for data buffers\n"); 2544 return (1); 2545 } 2546 2547 /* Create a child tag for request buffers */ 2548 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0, 2549 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 2550 NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0, 2551 &mpt->request_dmat) != 0) { 2552 mpt_prt(mpt, "cannot create a dma tag for requests\n"); 2553 return (1); 2554 } 2555 2556 /* Allocate some DMA accessible memory for requests */ 2557 if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request, 2558 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &mpt->request_dmap) != 0) { 2559 mpt_prt(mpt, "cannot allocate %d bytes of request memory\n", 2560 MPT_REQ_MEM_SIZE(mpt)); 2561 return (1); 2562 } 2563 2564 mi.mpt = mpt; 2565 mi.error = 0; 2566 2567 /* Load and lock it into "bus space" */ 2568 bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request, 2569 MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0); 2570 2571 if (mi.error) { 2572 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n", 2573 mi.error); 2574 return (1); 2575 } 2576 mpt->request_phys = mi.phys; 2577 2578 /* 2579 * Now create per-request dma maps 2580 */ 2581 i = 0; 2582 pptr = mpt->request_phys; 2583 vptr = mpt->request; 2584 end = pptr + MPT_REQ_MEM_SIZE(mpt); 2585 while(pptr < end) { 2586 request_t *req = &mpt->request_pool[i]; 2587 req->index = i++; 2588 2589 /* Store location of Request Data */ 2590 req->req_pbuf = pptr; 2591 req->req_vbuf = vptr; 2592 2593 pptr += MPT_REQUEST_AREA; 2594 vptr += MPT_REQUEST_AREA; 2595 2596 req->sense_pbuf = (pptr - MPT_SENSE_SIZE); 2597 req->sense_vbuf = (vptr - MPT_SENSE_SIZE); 2598 2599 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap); 2600 if (error) { 2601 mpt_prt(mpt, "error %d creating per-cmd DMA maps\n", 2602 error); 2603 return (1); 2604 } 2605 } 2606 2607 return (0); 2608} 2609 2610static void 2611mpt_dma_buf_free(struct mpt_softc *mpt) 2612{ 2613 int i; 2614 2615 if (mpt->request_dmat == 0) { 2616 mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n"); 2617 return; 2618 } 2619 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) { 2620 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap); 2621 } 2622 bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap); 2623 bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap); 2624 bus_dma_tag_destroy(mpt->request_dmat); 2625 mpt->request_dmat = 0; 2626 bus_dma_tag_destroy(mpt->buffer_dmat); 2627} 2628 2629/* 2630 * Allocate/Initialize data structures for the controller. Called 2631 * once at instance startup. 2632 */ 2633static int 2634mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset) 2635{ 2636 PTR_MSG_PORT_FACTS_REPLY pfp; 2637 int error, port, val; 2638 size_t len; 2639 2640 if (tn == MPT_MAX_TRYS) { 2641 return (-1); 2642 } 2643 2644 /* 2645 * No need to reset if the IOC is already in the READY state. 2646 * 2647 * Force reset if initialization failed previously. 2648 * Note that a hard_reset of the second channel of a '929 2649 * will stop operation of the first channel. Hopefully, if the 2650 * first channel is ok, the second will not require a hard 2651 * reset. 2652 */ 2653 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) { 2654 if (mpt_reset(mpt, FALSE) != MPT_OK) { 2655 return (mpt_configure_ioc(mpt, tn++, 1)); 2656 } 2657 needreset = 0; 2658 } 2659 2660 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) { 2661 mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2662 return (mpt_configure_ioc(mpt, tn++, 1)); 2663 } 2664 mpt2host_iocfacts_reply(&mpt->ioc_facts); 2665 2666 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2667 mpt->ioc_facts.MsgVersion >> 8, 2668 mpt->ioc_facts.MsgVersion & 0xFF, 2669 mpt->ioc_facts.HeaderVersion >> 8, 2670 mpt->ioc_facts.HeaderVersion & 0xFF); 2671 2672 /* 2673 * Now that we know request frame size, we can calculate 2674 * the actual (reasonable) segment limit for read/write I/O. 2675 * 2676 * This limit is constrained by: 2677 * 2678 * + The size of each area we allocate per command (and how 2679 * many chain segments we can fit into it). 2680 * + The total number of areas we've set up. 2681 * + The actual chain depth the card will allow. 2682 * 2683 * The first area's segment count is limited by the I/O request 2684 * at the head of it. We cannot allocate realistically more 2685 * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2686 * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2687 * 2688 */ 2689 /* total number of request areas we (can) allocate */ 2690 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2691 2692 /* converted to the number of chain areas possible */ 2693 mpt->max_seg_cnt *= MPT_NRFM(mpt); 2694 2695 /* limited by the number of chain areas the card will support */ 2696 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) { 2697 mpt_lprt(mpt, MPT_PRT_INFO, 2698 "chain depth limited to %u (from %u)\n", 2699 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt); 2700 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth; 2701 } 2702 2703 /* converted to the number of simple sges in chain segments. */ 2704 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2705 2706 /* 2707 * Use this as the basis for reporting the maximum I/O size to CAM. 2708 */ 2709 mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1); 2710 2711 error = mpt_dma_buf_alloc(mpt); 2712 if (error != 0) { 2713 mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n"); 2714 return (EIO); 2715 } 2716 2717 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2718 request_t *req = &mpt->request_pool[val]; 2719 req->state = REQ_STATE_ALLOCATED; 2720 mpt_callout_init(mpt, &req->callout); 2721 mpt_free_request(mpt, req); 2722 } 2723 2724 mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum " 2725 "CAM Segment Count: %u\n", mpt->max_seg_cnt, 2726 mpt->max_cam_seg_cnt); 2727 2728 mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n", 2729 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber); 2730 mpt_lprt(mpt, MPT_PRT_INFO, 2731 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2732 "Request Frame Size %u bytes Max Chain Depth %u\n", 2733 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize, 2734 mpt->ioc_facts.RequestFrameSize << 2, 2735 mpt->ioc_facts.MaxChainDepth); 2736 mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, " 2737 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts, 2738 mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags); 2739 2740 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY); 2741 mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 2742 if (mpt->port_facts == NULL) { 2743 mpt_prt(mpt, "unable to allocate memory for port facts\n"); 2744 return (ENOMEM); 2745 } 2746 2747 2748 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) && 2749 (mpt->fw_uploaded == 0)) { 2750 struct mpt_map_info mi; 2751 2752 /* 2753 * In some configurations, the IOC's firmware is 2754 * stored in a shared piece of system NVRAM that 2755 * is only accessible via the BIOS. In this 2756 * case, the firmware keeps a copy of firmware in 2757 * RAM until the OS driver retrieves it. Once 2758 * retrieved, we are responsible for re-downloading 2759 * the firmware after any hard-reset. 2760 */ 2761 mpt->fw_image_size = mpt->ioc_facts.FWImageSize; 2762 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0, 2763 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 2764 mpt->fw_image_size, 1, mpt->fw_image_size, 0, 2765 &mpt->fw_dmat); 2766 if (error != 0) { 2767 mpt_prt(mpt, "cannot create firmware dma tag\n"); 2768 return (ENOMEM); 2769 } 2770 error = bus_dmamem_alloc(mpt->fw_dmat, 2771 (void **)&mpt->fw_image, BUS_DMA_NOWAIT | 2772 BUS_DMA_COHERENT, &mpt->fw_dmap); 2773 if (error != 0) { 2774 mpt_prt(mpt, "cannot allocate firmware memory\n"); 2775 bus_dma_tag_destroy(mpt->fw_dmat); 2776 return (ENOMEM); 2777 } 2778 mi.mpt = mpt; 2779 mi.error = 0; 2780 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2781 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0); 2782 mpt->fw_phys = mi.phys; 2783 2784 error = mpt_upload_fw(mpt); 2785 if (error != 0) { 2786 mpt_prt(mpt, "firmware upload failed.\n"); 2787 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2788 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2789 mpt->fw_dmap); 2790 bus_dma_tag_destroy(mpt->fw_dmat); 2791 mpt->fw_image = NULL; 2792 return (EIO); 2793 } 2794 mpt->fw_uploaded = 1; 2795 } 2796 2797 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) { 2798 pfp = &mpt->port_facts[port]; 2799 error = mpt_get_portfacts(mpt, 0, pfp); 2800 if (error != MPT_OK) { 2801 mpt_prt(mpt, 2802 "mpt_get_portfacts on port %d failed\n", port); 2803 free(mpt->port_facts, M_DEVBUF); 2804 mpt->port_facts = NULL; 2805 return (mpt_configure_ioc(mpt, tn++, 1)); 2806 } 2807 mpt2host_portfacts_reply(pfp); 2808 2809 if (port > 0) { 2810 error = MPT_PRT_INFO; 2811 } else { 2812 error = MPT_PRT_DEBUG; 2813 } 2814 mpt_lprt(mpt, error, 2815 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n", 2816 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID, 2817 pfp->MaxDevices); 2818 2819 } 2820 2821 /* 2822 * XXX: Not yet supporting more than port 0 2823 */ 2824 pfp = &mpt->port_facts[0]; 2825 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2826 mpt->is_fc = 1; 2827 mpt->is_sas = 0; 2828 mpt->is_spi = 0; 2829 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2830 mpt->is_fc = 0; 2831 mpt->is_sas = 1; 2832 mpt->is_spi = 0; 2833 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) { 2834 mpt->is_fc = 0; 2835 mpt->is_sas = 0; 2836 mpt->is_spi = 1; 2837 if (mpt->mpt_ini_id == MPT_INI_ID_NONE) 2838 mpt->mpt_ini_id = pfp->PortSCSIID; 2839 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) { 2840 mpt_prt(mpt, "iSCSI not supported yet\n"); 2841 return (ENXIO); 2842 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) { 2843 mpt_prt(mpt, "Inactive Port\n"); 2844 return (ENXIO); 2845 } else { 2846 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType); 2847 return (ENXIO); 2848 } 2849 2850 /* 2851 * Set our role with what this port supports. 2852 * 2853 * Note this might be changed later in different modules 2854 * if this is different from what is wanted. 2855 */ 2856 mpt->role = MPT_ROLE_NONE; 2857 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2858 mpt->role |= MPT_ROLE_INITIATOR; 2859 } 2860 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2861 mpt->role |= MPT_ROLE_TARGET; 2862 } 2863 2864 /* 2865 * Enable the IOC 2866 */ 2867 if (mpt_enable_ioc(mpt, 1) != MPT_OK) { 2868 mpt_prt(mpt, "unable to initialize IOC\n"); 2869 return (ENXIO); 2870 } 2871 2872 /* 2873 * Read IOC configuration information. 2874 * 2875 * We need this to determine whether or not we have certain 2876 * settings for Integrated Mirroring (e.g.). 2877 */ 2878 mpt_read_config_info_ioc(mpt); 2879 2880 return (0); 2881} 2882 2883static int 2884mpt_enable_ioc(struct mpt_softc *mpt, int portenable) 2885{ 2886 uint32_t pptr; 2887 int val; 2888 2889 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2890 mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2891 return (EIO); 2892 } 2893 2894 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2895 2896 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2897 mpt_prt(mpt, "IOC failed to go to run state\n"); 2898 return (ENXIO); 2899 } 2900 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2901 2902 /* 2903 * Give it reply buffers 2904 * 2905 * Do *not* exceed global credits. 2906 */ 2907 for (val = 0, pptr = mpt->reply_phys; 2908 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2909 pptr += MPT_REPLY_SIZE) { 2910 mpt_free_reply(mpt, pptr); 2911 if (++val == mpt->ioc_facts.GlobalCredits - 1) 2912 break; 2913 } 2914 2915 2916 /* 2917 * Enable the port if asked. This is only done if we're resetting 2918 * the IOC after initial startup. 2919 */ 2920 if (portenable) { 2921 /* 2922 * Enable asynchronous event reporting 2923 */ 2924 mpt_send_event_request(mpt, 1); 2925 2926 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2927 mpt_prt(mpt, "%s: failed to enable port 0\n", __func__); 2928 return (ENXIO); 2929 } 2930 } 2931 return (MPT_OK); 2932} 2933 2934/* 2935 * Endian Conversion Functions- only used on Big Endian machines 2936 */ 2937#if _BYTE_ORDER == _BIG_ENDIAN 2938void 2939mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge) 2940{ 2941 2942 MPT_2_HOST32(sge, FlagsLength); 2943 MPT_2_HOST32(sge, u.Address64.Low); 2944 MPT_2_HOST32(sge, u.Address64.High); 2945} 2946 2947void 2948mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp) 2949{ 2950 2951 MPT_2_HOST16(rp, MsgVersion); 2952 MPT_2_HOST16(rp, HeaderVersion); 2953 MPT_2_HOST32(rp, MsgContext); 2954 MPT_2_HOST16(rp, IOCExceptions); 2955 MPT_2_HOST16(rp, IOCStatus); 2956 MPT_2_HOST32(rp, IOCLogInfo); 2957 MPT_2_HOST16(rp, ReplyQueueDepth); 2958 MPT_2_HOST16(rp, RequestFrameSize); 2959 MPT_2_HOST16(rp, Reserved_0101_FWVersion); 2960 MPT_2_HOST16(rp, ProductID); 2961 MPT_2_HOST32(rp, CurrentHostMfaHighAddr); 2962 MPT_2_HOST16(rp, GlobalCredits); 2963 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr); 2964 MPT_2_HOST16(rp, CurReplyFrameSize); 2965 MPT_2_HOST32(rp, FWImageSize); 2966 MPT_2_HOST32(rp, IOCCapabilities); 2967 MPT_2_HOST32(rp, FWVersion.Word); 2968 MPT_2_HOST16(rp, HighPriorityQueueDepth); 2969 MPT_2_HOST16(rp, Reserved2); 2970 mpt2host_sge_simple_union(&rp->HostPageBufferSGE); 2971 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr); 2972} 2973 2974void 2975mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp) 2976{ 2977 2978 MPT_2_HOST16(pfp, Reserved); 2979 MPT_2_HOST16(pfp, Reserved1); 2980 MPT_2_HOST32(pfp, MsgContext); 2981 MPT_2_HOST16(pfp, Reserved2); 2982 MPT_2_HOST16(pfp, IOCStatus); 2983 MPT_2_HOST32(pfp, IOCLogInfo); 2984 MPT_2_HOST16(pfp, MaxDevices); 2985 MPT_2_HOST16(pfp, PortSCSIID); 2986 MPT_2_HOST16(pfp, ProtocolFlags); 2987 MPT_2_HOST16(pfp, MaxPostedCmdBuffers); 2988 MPT_2_HOST16(pfp, MaxPersistentIDs); 2989 MPT_2_HOST16(pfp, MaxLanBuckets); 2990 MPT_2_HOST16(pfp, Reserved4); 2991 MPT_2_HOST32(pfp, Reserved5); 2992} 2993 2994void 2995mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2) 2996{ 2997 int i; 2998 2999 MPT_2_HOST32(ioc2, CapabilitiesFlags); 3000 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) { 3001 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3); 3002 } 3003} 3004 3005void 3006mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3) 3007{ 3008 3009 MPT_2_HOST16(ioc3, Reserved2); 3010} 3011 3012void 3013mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0) 3014{ 3015 3016 MPT_2_HOST32(sp0, Capabilities); 3017 MPT_2_HOST32(sp0, PhysicalInterface); 3018} 3019 3020void 3021mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1) 3022{ 3023 3024 MPT_2_HOST32(sp1, Configuration); 3025 MPT_2_HOST32(sp1, OnBusTimerValue); 3026 MPT_2_HOST16(sp1, IDConfig); 3027} 3028 3029void 3030host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1) 3031{ 3032 3033 HOST_2_MPT32(sp1, Configuration); 3034 HOST_2_MPT32(sp1, OnBusTimerValue); 3035 HOST_2_MPT16(sp1, IDConfig); 3036} 3037 3038void 3039mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2) 3040{ 3041 int i; 3042 3043 MPT_2_HOST32(sp2, PortFlags); 3044 MPT_2_HOST32(sp2, PortSettings); 3045 for (i = 0; i < sizeof(sp2->DeviceSettings) / 3046 sizeof(*sp2->DeviceSettings); i++) { 3047 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags); 3048 } 3049} 3050 3051void 3052mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0) 3053{ 3054 3055 MPT_2_HOST32(sd0, NegotiatedParameters); 3056 MPT_2_HOST32(sd0, Information); 3057} 3058 3059void 3060mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1) 3061{ 3062 3063 MPT_2_HOST32(sd1, RequestedParameters); 3064 MPT_2_HOST32(sd1, Reserved); 3065 MPT_2_HOST32(sd1, Configuration); 3066} 3067 3068void 3069host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1) 3070{ 3071 3072 HOST_2_MPT32(sd1, RequestedParameters); 3073 HOST_2_MPT32(sd1, Reserved); 3074 HOST_2_MPT32(sd1, Configuration); 3075} 3076 3077void 3078mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0) 3079{ 3080 3081 MPT_2_HOST32(fp0, Flags); 3082 MPT_2_HOST32(fp0, PortIdentifier); 3083 MPT_2_HOST32(fp0, WWNN.Low); 3084 MPT_2_HOST32(fp0, WWNN.High); 3085 MPT_2_HOST32(fp0, WWPN.Low); 3086 MPT_2_HOST32(fp0, WWPN.High); 3087 MPT_2_HOST32(fp0, SupportedServiceClass); 3088 MPT_2_HOST32(fp0, SupportedSpeeds); 3089 MPT_2_HOST32(fp0, CurrentSpeed); 3090 MPT_2_HOST32(fp0, MaxFrameSize); 3091 MPT_2_HOST32(fp0, FabricWWNN.Low); 3092 MPT_2_HOST32(fp0, FabricWWNN.High); 3093 MPT_2_HOST32(fp0, FabricWWPN.Low); 3094 MPT_2_HOST32(fp0, FabricWWPN.High); 3095 MPT_2_HOST32(fp0, DiscoveredPortsCount); 3096 MPT_2_HOST32(fp0, MaxInitiators); 3097} 3098 3099void 3100mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1) 3101{ 3102 3103 MPT_2_HOST32(fp1, Flags); 3104 MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low); 3105 MPT_2_HOST32(fp1, NoSEEPROMWWNN.High); 3106 MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low); 3107 MPT_2_HOST32(fp1, NoSEEPROMWWPN.High); 3108} 3109 3110void 3111host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1) 3112{ 3113 3114 HOST_2_MPT32(fp1, Flags); 3115 HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low); 3116 HOST_2_MPT32(fp1, NoSEEPROMWWNN.High); 3117 HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low); 3118 HOST_2_MPT32(fp1, NoSEEPROMWWPN.High); 3119} 3120 3121void 3122mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp) 3123{ 3124 int i; 3125 3126 MPT_2_HOST16(volp, VolumeStatus.Reserved); 3127 MPT_2_HOST16(volp, VolumeSettings.Settings); 3128 MPT_2_HOST32(volp, MaxLBA); 3129 MPT_2_HOST32(volp, MaxLBAHigh); 3130 MPT_2_HOST32(volp, StripeSize); 3131 MPT_2_HOST32(volp, Reserved2); 3132 MPT_2_HOST32(volp, Reserved3); 3133 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) { 3134 MPT_2_HOST16(volp, PhysDisk[i].Reserved); 3135 } 3136} 3137 3138void 3139mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0) 3140{ 3141 3142 MPT_2_HOST32(rpd0, Reserved1); 3143 MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved); 3144 MPT_2_HOST32(rpd0, MaxLBA); 3145 MPT_2_HOST16(rpd0, ErrorData.Reserved); 3146 MPT_2_HOST16(rpd0, ErrorData.ErrorCount); 3147 MPT_2_HOST16(rpd0, ErrorData.SmartCount); 3148} 3149 3150void 3151mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi) 3152{ 3153 3154 MPT_2_HOST16(vi, TotalBlocks.High); 3155 MPT_2_HOST16(vi, TotalBlocks.Low); 3156 MPT_2_HOST16(vi, BlocksRemaining.High); 3157 MPT_2_HOST16(vi, BlocksRemaining.Low); 3158} 3159#endif 3160