mpt.c revision 209599
1/*- 2 * Generic routines for LSI Fusion adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 2000, 2001 by Greg Ansley 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28/*- 29 * Copyright (c) 2002, 2006 by Matthew Jacob 30 * All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions are 34 * met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38 * substantially similar to the "NO WARRANTY" disclaimer below 39 * ("Disclaimer") and any redistribution must be conditioned upon including 40 * a substantially similar Disclaimer requirement for further binary 41 * redistribution. 42 * 3. Neither the names of the above listed copyright holders nor the names 43 * of any contributors may be used to endorse or promote products derived 44 * from this software without specific prior written permission. 45 * 46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 * 58 * Support from Chris Ellsworth in order to make SAS adapters work 59 * is gratefully acknowledged. 60 * 61 * 62 * Support from LSI-Logic has also gone a great deal toward making this a 63 * workable subsystem and is gratefully acknowledged. 64 */ 65/*- 66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 67 * Copyright (c) 2005, WHEEL Sp. z o.o. 68 * Copyright (c) 2004, 2005 Justin T. Gibbs 69 * All rights reserved. 70 * 71 * Redistribution and use in source and binary forms, with or without 72 * modification, are permitted provided that the following conditions are 73 * met: 74 * 1. Redistributions of source code must retain the above copyright 75 * notice, this list of conditions and the following disclaimer. 76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 77 * substantially similar to the "NO WARRANTY" disclaimer below 78 * ("Disclaimer") and any redistribution must be conditioned upon including 79 * a substantially similar Disclaimer requirement for further binary 80 * redistribution. 81 * 3. Neither the names of the above listed copyright holders nor the names 82 * of any contributors may be used to endorse or promote products derived 83 * from this software without specific prior written permission. 84 * 85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 96 */ 97 98#include <sys/cdefs.h> 99__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 209599 2010-06-29 22:07:53Z ken $"); 100 101#include <dev/mpt/mpt.h> 102#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 103#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 104 105#include <dev/mpt/mpilib/mpi.h> 106#include <dev/mpt/mpilib/mpi_ioc.h> 107#include <dev/mpt/mpilib/mpi_fc.h> 108#include <dev/mpt/mpilib/mpi_targ.h> 109 110#include <sys/sysctl.h> 111 112#define MPT_MAX_TRYS 3 113#define MPT_MAX_WAIT 300000 114 115static int maxwait_ack = 0; 116static int maxwait_int = 0; 117static int maxwait_state = 0; 118 119static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 120mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 121 122static mpt_reply_handler_t mpt_default_reply_handler; 123static mpt_reply_handler_t mpt_config_reply_handler; 124static mpt_reply_handler_t mpt_handshake_reply_handler; 125static mpt_reply_handler_t mpt_event_reply_handler; 126static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 127 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 128static int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 129static int mpt_soft_reset(struct mpt_softc *mpt); 130static void mpt_hard_reset(struct mpt_softc *mpt); 131static void mpt_dma_buf_free(struct mpt_softc *mpt); 132static int mpt_configure_ioc(struct mpt_softc *mpt, int, int); 133static int mpt_enable_ioc(struct mpt_softc *mpt, int); 134 135/************************* Personality Module Support *************************/ 136/* 137 * We include one extra entry that is guaranteed to be NULL 138 * to simplify our itterator. 139 */ 140static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 141static __inline struct mpt_personality* 142 mpt_pers_find(struct mpt_softc *, u_int); 143static __inline struct mpt_personality* 144 mpt_pers_find_reverse(struct mpt_softc *, u_int); 145 146static __inline struct mpt_personality * 147mpt_pers_find(struct mpt_softc *mpt, u_int start_at) 148{ 149 KASSERT(start_at <= MPT_MAX_PERSONALITIES, 150 ("mpt_pers_find: starting position out of range\n")); 151 152 while (start_at < MPT_MAX_PERSONALITIES 153 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 154 start_at++; 155 } 156 return (mpt_personalities[start_at]); 157} 158 159/* 160 * Used infrequently, so no need to optimize like a forward 161 * traversal where we use the MAX+1 is guaranteed to be NULL 162 * trick. 163 */ 164static __inline struct mpt_personality * 165mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 166{ 167 while (start_at < MPT_MAX_PERSONALITIES 168 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 169 start_at--; 170 } 171 if (start_at < MPT_MAX_PERSONALITIES) 172 return (mpt_personalities[start_at]); 173 return (NULL); 174} 175 176#define MPT_PERS_FOREACH(mpt, pers) \ 177 for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 178 pers != NULL; \ 179 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 180 181#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 182 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 183 pers != NULL; \ 184 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 185 186static mpt_load_handler_t mpt_stdload; 187static mpt_probe_handler_t mpt_stdprobe; 188static mpt_attach_handler_t mpt_stdattach; 189static mpt_enable_handler_t mpt_stdenable; 190static mpt_ready_handler_t mpt_stdready; 191static mpt_event_handler_t mpt_stdevent; 192static mpt_reset_handler_t mpt_stdreset; 193static mpt_shutdown_handler_t mpt_stdshutdown; 194static mpt_detach_handler_t mpt_stddetach; 195static mpt_unload_handler_t mpt_stdunload; 196static struct mpt_personality mpt_default_personality = 197{ 198 .load = mpt_stdload, 199 .probe = mpt_stdprobe, 200 .attach = mpt_stdattach, 201 .enable = mpt_stdenable, 202 .ready = mpt_stdready, 203 .event = mpt_stdevent, 204 .reset = mpt_stdreset, 205 .shutdown = mpt_stdshutdown, 206 .detach = mpt_stddetach, 207 .unload = mpt_stdunload 208}; 209 210static mpt_load_handler_t mpt_core_load; 211static mpt_attach_handler_t mpt_core_attach; 212static mpt_enable_handler_t mpt_core_enable; 213static mpt_reset_handler_t mpt_core_ioc_reset; 214static mpt_event_handler_t mpt_core_event; 215static mpt_shutdown_handler_t mpt_core_shutdown; 216static mpt_shutdown_handler_t mpt_core_detach; 217static mpt_unload_handler_t mpt_core_unload; 218static struct mpt_personality mpt_core_personality = 219{ 220 .name = "mpt_core", 221 .load = mpt_core_load, 222// .attach = mpt_core_attach, 223// .enable = mpt_core_enable, 224 .event = mpt_core_event, 225 .reset = mpt_core_ioc_reset, 226 .shutdown = mpt_core_shutdown, 227 .detach = mpt_core_detach, 228 .unload = mpt_core_unload, 229}; 230 231/* 232 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 233 * ordering information. We want the core to always register FIRST. 234 * other modules are set to SI_ORDER_SECOND. 235 */ 236static moduledata_t mpt_core_mod = { 237 "mpt_core", mpt_modevent, &mpt_core_personality 238}; 239DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 240MODULE_VERSION(mpt_core, 1); 241 242#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id)) 243 244int 245mpt_modevent(module_t mod, int type, void *data) 246{ 247 struct mpt_personality *pers; 248 int error; 249 250 pers = (struct mpt_personality *)data; 251 252 error = 0; 253 switch (type) { 254 case MOD_LOAD: 255 { 256 mpt_load_handler_t **def_handler; 257 mpt_load_handler_t **pers_handler; 258 int i; 259 260 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 261 if (mpt_personalities[i] == NULL) 262 break; 263 } 264 if (i >= MPT_MAX_PERSONALITIES) { 265 error = ENOMEM; 266 break; 267 } 268 pers->id = i; 269 mpt_personalities[i] = pers; 270 271 /* Install standard/noop handlers for any NULL entries. */ 272 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 273 pers_handler = MPT_PERS_FIRST_HANDLER(pers); 274 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 275 if (*pers_handler == NULL) 276 *pers_handler = *def_handler; 277 pers_handler++; 278 def_handler++; 279 } 280 281 error = (pers->load(pers)); 282 if (error != 0) 283 mpt_personalities[i] = NULL; 284 break; 285 } 286 case MOD_SHUTDOWN: 287 break; 288#if __FreeBSD_version >= 500000 289 case MOD_QUIESCE: 290 break; 291#endif 292 case MOD_UNLOAD: 293 error = pers->unload(pers); 294 mpt_personalities[pers->id] = NULL; 295 break; 296 default: 297 error = EINVAL; 298 break; 299 } 300 return (error); 301} 302 303int 304mpt_stdload(struct mpt_personality *pers) 305{ 306 /* Load is always successfull. */ 307 return (0); 308} 309 310int 311mpt_stdprobe(struct mpt_softc *mpt) 312{ 313 /* Probe is always successfull. */ 314 return (0); 315} 316 317int 318mpt_stdattach(struct mpt_softc *mpt) 319{ 320 /* Attach is always successfull. */ 321 return (0); 322} 323 324int 325mpt_stdenable(struct mpt_softc *mpt) 326{ 327 /* Enable is always successfull. */ 328 return (0); 329} 330 331void 332mpt_stdready(struct mpt_softc *mpt) 333{ 334} 335 336 337int 338mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 339{ 340 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 341 /* Event was not for us. */ 342 return (0); 343} 344 345void 346mpt_stdreset(struct mpt_softc *mpt, int type) 347{ 348} 349 350void 351mpt_stdshutdown(struct mpt_softc *mpt) 352{ 353} 354 355void 356mpt_stddetach(struct mpt_softc *mpt) 357{ 358} 359 360int 361mpt_stdunload(struct mpt_personality *pers) 362{ 363 /* Unload is always successfull. */ 364 return (0); 365} 366 367/* 368 * Post driver attachment, we may want to perform some global actions. 369 * Here is the hook to do so. 370 */ 371 372static void 373mpt_postattach(void *unused) 374{ 375 struct mpt_softc *mpt; 376 struct mpt_personality *pers; 377 378 TAILQ_FOREACH(mpt, &mpt_tailq, links) { 379 MPT_PERS_FOREACH(mpt, pers) 380 pers->ready(mpt); 381 } 382} 383SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL); 384 385 386/******************************* Bus DMA Support ******************************/ 387void 388mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 389{ 390 struct mpt_map_info *map_info; 391 392 map_info = (struct mpt_map_info *)arg; 393 map_info->error = error; 394 map_info->phys = segs->ds_addr; 395} 396 397/**************************** Reply/Event Handling ****************************/ 398int 399mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 400 mpt_handler_t handler, uint32_t *phandler_id) 401{ 402 403 switch (type) { 404 case MPT_HANDLER_REPLY: 405 { 406 u_int cbi; 407 u_int free_cbi; 408 409 if (phandler_id == NULL) 410 return (EINVAL); 411 412 free_cbi = MPT_HANDLER_ID_NONE; 413 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 414 /* 415 * If the same handler is registered multiple 416 * times, don't error out. Just return the 417 * index of the original registration. 418 */ 419 if (mpt_reply_handlers[cbi] == handler.reply_handler) { 420 *phandler_id = MPT_CBI_TO_HID(cbi); 421 return (0); 422 } 423 424 /* 425 * Fill from the front in the hope that 426 * all registered handlers consume only a 427 * single cache line. 428 * 429 * We don't break on the first empty slot so 430 * that the full table is checked to see if 431 * this handler was previously registered. 432 */ 433 if (free_cbi == MPT_HANDLER_ID_NONE && 434 (mpt_reply_handlers[cbi] 435 == mpt_default_reply_handler)) 436 free_cbi = cbi; 437 } 438 if (free_cbi == MPT_HANDLER_ID_NONE) { 439 return (ENOMEM); 440 } 441 mpt_reply_handlers[free_cbi] = handler.reply_handler; 442 *phandler_id = MPT_CBI_TO_HID(free_cbi); 443 break; 444 } 445 default: 446 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 447 return (EINVAL); 448 } 449 return (0); 450} 451 452int 453mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 454 mpt_handler_t handler, uint32_t handler_id) 455{ 456 457 switch (type) { 458 case MPT_HANDLER_REPLY: 459 { 460 u_int cbi; 461 462 cbi = MPT_CBI(handler_id); 463 if (cbi >= MPT_NUM_REPLY_HANDLERS 464 || mpt_reply_handlers[cbi] != handler.reply_handler) 465 return (ENOENT); 466 mpt_reply_handlers[cbi] = mpt_default_reply_handler; 467 break; 468 } 469 default: 470 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 471 return (EINVAL); 472 } 473 return (0); 474} 475 476static int 477mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 478 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 479{ 480 mpt_prt(mpt, 481 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n", 482 req, req->serno, reply_desc, reply_frame); 483 484 if (reply_frame != NULL) 485 mpt_dump_reply_frame(mpt, reply_frame); 486 487 mpt_prt(mpt, "Reply Frame Ignored\n"); 488 489 return (/*free_reply*/TRUE); 490} 491 492static int 493mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 494 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 495{ 496 if (req != NULL) { 497 498 if (reply_frame != NULL) { 499 MSG_CONFIG *cfgp; 500 MSG_CONFIG_REPLY *reply; 501 502 cfgp = (MSG_CONFIG *)req->req_vbuf; 503 reply = (MSG_CONFIG_REPLY *)reply_frame; 504 req->IOCStatus = le16toh(reply_frame->IOCStatus); 505 bcopy(&reply->Header, &cfgp->Header, 506 sizeof(cfgp->Header)); 507 cfgp->ExtPageLength = reply->ExtPageLength; 508 cfgp->ExtPageType = reply->ExtPageType; 509 } 510 req->state &= ~REQ_STATE_QUEUED; 511 req->state |= REQ_STATE_DONE; 512 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 513 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 514 wakeup(req); 515 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { 516 /* 517 * Whew- we can free this request (late completion) 518 */ 519 mpt_free_request(mpt, req); 520 } 521 } 522 523 return (TRUE); 524} 525 526static int 527mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 528 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 529{ 530 /* Nothing to be done. */ 531 return (TRUE); 532} 533 534static int 535mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 536 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 537{ 538 int free_reply; 539 540 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler")); 541 KASSERT(req != NULL, ("null request in mpt_event_reply_handler")); 542 543 free_reply = TRUE; 544 switch (reply_frame->Function) { 545 case MPI_FUNCTION_EVENT_NOTIFICATION: 546 { 547 MSG_EVENT_NOTIFY_REPLY *msg; 548 struct mpt_personality *pers; 549 u_int handled; 550 551 handled = 0; 552 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 553 msg->EventDataLength = le16toh(msg->EventDataLength); 554 msg->IOCStatus = le16toh(msg->IOCStatus); 555 msg->IOCLogInfo = le32toh(msg->IOCLogInfo); 556 msg->Event = le32toh(msg->Event); 557 MPT_PERS_FOREACH(mpt, pers) 558 handled += pers->event(mpt, req, msg); 559 560 if (handled == 0 && mpt->mpt_pers_mask == 0) { 561 mpt_lprt(mpt, MPT_PRT_INFO, 562 "No Handlers For Any Event Notify Frames. " 563 "Event %#x (ACK %sequired).\n", 564 msg->Event, msg->AckRequired? "r" : "not r"); 565 } else if (handled == 0) { 566 mpt_lprt(mpt, 567 msg->AckRequired? MPT_PRT_WARN : MPT_PRT_INFO, 568 "Unhandled Event Notify Frame. Event %#x " 569 "(ACK %sequired).\n", 570 msg->Event, msg->AckRequired? "r" : "not r"); 571 } 572 573 if (msg->AckRequired) { 574 request_t *ack_req; 575 uint32_t context; 576 577 context = req->index | MPT_REPLY_HANDLER_EVENTS; 578 ack_req = mpt_get_request(mpt, FALSE); 579 if (ack_req == NULL) { 580 struct mpt_evtf_record *evtf; 581 582 evtf = (struct mpt_evtf_record *)reply_frame; 583 evtf->context = context; 584 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 585 free_reply = FALSE; 586 break; 587 } 588 mpt_send_event_ack(mpt, ack_req, msg, context); 589 /* 590 * Don't check for CONTINUATION_REPLY here 591 */ 592 return (free_reply); 593 } 594 break; 595 } 596 case MPI_FUNCTION_PORT_ENABLE: 597 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n"); 598 break; 599 case MPI_FUNCTION_EVENT_ACK: 600 break; 601 default: 602 mpt_prt(mpt, "unknown event function: %x\n", 603 reply_frame->Function); 604 break; 605 } 606 607 /* 608 * I'm not sure that this continuation stuff works as it should. 609 * 610 * I've had FC async events occur that free the frame up because 611 * the continuation bit isn't set, and then additional async events 612 * then occur using the same context. As you might imagine, this 613 * leads to Very Bad Thing. 614 * 615 * Let's just be safe for now and not free them up until we figure 616 * out what's actually happening here. 617 */ 618#if 0 619 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 620 TAILQ_REMOVE(&mpt->request_pending_list, req, links); 621 mpt_free_request(mpt, req); 622 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", 623 reply_frame->Function, req, req->serno); 624 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 625 MSG_EVENT_NOTIFY_REPLY *msg = 626 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 627 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 628 msg->Event, msg->AckRequired); 629 } 630 } else { 631 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", 632 reply_frame->Function, req, req->serno); 633 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 634 MSG_EVENT_NOTIFY_REPLY *msg = 635 (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 636 mpt_prtc(mpt, " Event=0x%x AckReq=%d", 637 msg->Event, msg->AckRequired); 638 } 639 mpt_prtc(mpt, "\n"); 640 } 641#endif 642 return (free_reply); 643} 644 645/* 646 * Process an asynchronous event from the IOC. 647 */ 648static int 649mpt_core_event(struct mpt_softc *mpt, request_t *req, 650 MSG_EVENT_NOTIFY_REPLY *msg) 651{ 652 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 653 msg->Event & 0xFF); 654 switch(msg->Event & 0xFF) { 655 case MPI_EVENT_NONE: 656 break; 657 case MPI_EVENT_LOG_DATA: 658 { 659 int i; 660 661 /* Some error occured that LSI wants logged */ 662 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 663 msg->IOCLogInfo); 664 mpt_prt(mpt, "\tEvtLogData: Event Data:"); 665 for (i = 0; i < msg->EventDataLength; i++) 666 mpt_prtc(mpt, " %08x", msg->Data[i]); 667 mpt_prtc(mpt, "\n"); 668 break; 669 } 670 case MPI_EVENT_EVENT_CHANGE: 671 /* 672 * This is just an acknowledgement 673 * of our mpt_send_event_request. 674 */ 675 break; 676 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 677 break; 678 default: 679 return (0); 680 break; 681 } 682 return (1); 683} 684 685static void 686mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 687 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 688{ 689 MSG_EVENT_ACK *ackp; 690 691 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 692 memset(ackp, 0, sizeof (*ackp)); 693 ackp->Function = MPI_FUNCTION_EVENT_ACK; 694 ackp->Event = htole32(msg->Event); 695 ackp->EventContext = htole32(msg->EventContext); 696 ackp->MsgContext = htole32(context); 697 mpt_check_doorbell(mpt); 698 mpt_send_cmd(mpt, ack_req); 699} 700 701/***************************** Interrupt Handling *****************************/ 702void 703mpt_intr(void *arg) 704{ 705 struct mpt_softc *mpt; 706 uint32_t reply_desc; 707 int ntrips = 0; 708 709 mpt = (struct mpt_softc *)arg; 710 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n"); 711 MPT_LOCK_ASSERT(mpt); 712 713 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 714 request_t *req; 715 MSG_DEFAULT_REPLY *reply_frame; 716 uint32_t reply_baddr; 717 uint32_t ctxt_idx; 718 u_int cb_index; 719 u_int req_index; 720 int free_rf; 721 722 req = NULL; 723 reply_frame = NULL; 724 reply_baddr = 0; 725 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 726 u_int offset; 727 /* 728 * Insure that the reply frame is coherent. 729 */ 730 reply_baddr = MPT_REPLY_BADDR(reply_desc); 731 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 732 bus_dmamap_sync_range(mpt->reply_dmat, 733 mpt->reply_dmap, offset, MPT_REPLY_SIZE, 734 BUS_DMASYNC_POSTREAD); 735 reply_frame = MPT_REPLY_OTOV(mpt, offset); 736 ctxt_idx = le32toh(reply_frame->MsgContext); 737 } else { 738 uint32_t type; 739 740 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc); 741 ctxt_idx = reply_desc; 742 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n", 743 reply_desc); 744 745 switch (type) { 746 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: 747 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK; 748 break; 749 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: 750 ctxt_idx = GET_IO_INDEX(reply_desc); 751 if (mpt->tgt_cmd_ptrs == NULL) { 752 mpt_prt(mpt, 753 "mpt_intr: no target cmd ptrs\n"); 754 reply_desc = MPT_REPLY_EMPTY; 755 break; 756 } 757 if (ctxt_idx >= mpt->tgt_cmds_allocated) { 758 mpt_prt(mpt, 759 "mpt_intr: bad tgt cmd ctxt %u\n", 760 ctxt_idx); 761 reply_desc = MPT_REPLY_EMPTY; 762 ntrips = 1000; 763 break; 764 } 765 req = mpt->tgt_cmd_ptrs[ctxt_idx]; 766 if (req == NULL) { 767 mpt_prt(mpt, "no request backpointer " 768 "at index %u", ctxt_idx); 769 reply_desc = MPT_REPLY_EMPTY; 770 ntrips = 1000; 771 break; 772 } 773 /* 774 * Reformulate ctxt_idx to be just as if 775 * it were another type of context reply 776 * so the code below will find the request 777 * via indexing into the pool. 778 */ 779 ctxt_idx = 780 req->index | mpt->scsi_tgt_handler_id; 781 req = NULL; 782 break; 783 case MPI_CONTEXT_REPLY_TYPE_LAN: 784 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n", 785 reply_desc); 786 reply_desc = MPT_REPLY_EMPTY; 787 break; 788 default: 789 mpt_prt(mpt, "Context Reply 0x%08x?\n", type); 790 reply_desc = MPT_REPLY_EMPTY; 791 break; 792 } 793 if (reply_desc == MPT_REPLY_EMPTY) { 794 if (ntrips++ > 1000) { 795 break; 796 } 797 continue; 798 } 799 } 800 801 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx); 802 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx); 803 if (req_index < MPT_MAX_REQUESTS(mpt)) { 804 req = &mpt->request_pool[req_index]; 805 } else { 806 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc ==" 807 " 0x%x)\n", req_index, reply_desc); 808 } 809 810 free_rf = mpt_reply_handlers[cb_index](mpt, req, 811 reply_desc, reply_frame); 812 813 if (reply_frame != NULL && free_rf) { 814 mpt_free_reply(mpt, reply_baddr); 815 } 816 817 /* 818 * If we got ourselves disabled, don't get stuck in a loop 819 */ 820 if (mpt->disabled) { 821 mpt_disable_ints(mpt); 822 break; 823 } 824 if (ntrips++ > 1000) { 825 break; 826 } 827 } 828 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n"); 829} 830 831/******************************* Error Recovery *******************************/ 832void 833mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 834 u_int iocstatus) 835{ 836 MSG_DEFAULT_REPLY ioc_status_frame; 837 request_t *req; 838 839 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame)); 840 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 841 ioc_status_frame.IOCStatus = iocstatus; 842 while((req = TAILQ_FIRST(chain)) != NULL) { 843 MSG_REQUEST_HEADER *msg_hdr; 844 u_int cb_index; 845 846 TAILQ_REMOVE(chain, req, links); 847 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 848 ioc_status_frame.Function = msg_hdr->Function; 849 ioc_status_frame.MsgContext = msg_hdr->MsgContext; 850 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 851 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, 852 &ioc_status_frame); 853 } 854} 855 856/********************************* Diagnostics ********************************/ 857/* 858 * Perform a diagnostic dump of a reply frame. 859 */ 860void 861mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 862{ 863 mpt_prt(mpt, "Address Reply:\n"); 864 mpt_print_reply(reply_frame); 865} 866 867/******************************* Doorbell Access ******************************/ 868static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 869static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 870 871static __inline uint32_t 872mpt_rd_db(struct mpt_softc *mpt) 873{ 874 return mpt_read(mpt, MPT_OFFSET_DOORBELL); 875} 876 877static __inline uint32_t 878mpt_rd_intr(struct mpt_softc *mpt) 879{ 880 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 881} 882 883/* Busy wait for a door bell to be read by IOC */ 884static int 885mpt_wait_db_ack(struct mpt_softc *mpt) 886{ 887 int i; 888 for (i=0; i < MPT_MAX_WAIT; i++) { 889 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 890 maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 891 return (MPT_OK); 892 } 893 DELAY(200); 894 } 895 return (MPT_FAIL); 896} 897 898/* Busy wait for a door bell interrupt */ 899static int 900mpt_wait_db_int(struct mpt_softc *mpt) 901{ 902 int i; 903 for (i = 0; i < MPT_MAX_WAIT; i++) { 904 if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 905 maxwait_int = i > maxwait_int ? i : maxwait_int; 906 return MPT_OK; 907 } 908 DELAY(100); 909 } 910 return (MPT_FAIL); 911} 912 913/* Wait for IOC to transition to a give state */ 914void 915mpt_check_doorbell(struct mpt_softc *mpt) 916{ 917 uint32_t db = mpt_rd_db(mpt); 918 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 919 mpt_prt(mpt, "Device not running\n"); 920 mpt_print_db(db); 921 } 922} 923 924/* Wait for IOC to transition to a give state */ 925static int 926mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 927{ 928 int i; 929 930 for (i = 0; i < MPT_MAX_WAIT; i++) { 931 uint32_t db = mpt_rd_db(mpt); 932 if (MPT_STATE(db) == state) { 933 maxwait_state = i > maxwait_state ? i : maxwait_state; 934 return (MPT_OK); 935 } 936 DELAY(100); 937 } 938 return (MPT_FAIL); 939} 940 941 942/************************* Intialization/Configuration ************************/ 943static int mpt_download_fw(struct mpt_softc *mpt); 944 945/* Issue the reset COMMAND to the IOC */ 946static int 947mpt_soft_reset(struct mpt_softc *mpt) 948{ 949 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 950 951 /* Have to use hard reset if we are not in Running state */ 952 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 953 mpt_prt(mpt, "soft reset failed: device not running\n"); 954 return (MPT_FAIL); 955 } 956 957 /* If door bell is in use we don't have a chance of getting 958 * a word in since the IOC probably crashed in message 959 * processing. So don't waste our time. 960 */ 961 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 962 mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 963 return (MPT_FAIL); 964 } 965 966 /* Send the reset request to the IOC */ 967 mpt_write(mpt, MPT_OFFSET_DOORBELL, 968 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 969 if (mpt_wait_db_ack(mpt) != MPT_OK) { 970 mpt_prt(mpt, "soft reset failed: ack timeout\n"); 971 return (MPT_FAIL); 972 } 973 974 /* Wait for the IOC to reload and come out of reset state */ 975 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 976 mpt_prt(mpt, "soft reset failed: device did not restart\n"); 977 return (MPT_FAIL); 978 } 979 980 return MPT_OK; 981} 982 983static int 984mpt_enable_diag_mode(struct mpt_softc *mpt) 985{ 986 int try; 987 988 try = 20; 989 while (--try) { 990 991 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 992 break; 993 994 /* Enable diagnostic registers */ 995 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 996 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 997 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 998 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 999 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 1000 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 1001 1002 DELAY(100000); 1003 } 1004 if (try == 0) 1005 return (EIO); 1006 return (0); 1007} 1008 1009static void 1010mpt_disable_diag_mode(struct mpt_softc *mpt) 1011{ 1012 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 1013} 1014 1015/* This is a magic diagnostic reset that resets all the ARM 1016 * processors in the chip. 1017 */ 1018static void 1019mpt_hard_reset(struct mpt_softc *mpt) 1020{ 1021 int error; 1022 int wait; 1023 uint32_t diagreg; 1024 1025 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 1026 1027 error = mpt_enable_diag_mode(mpt); 1028 if (error) { 1029 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 1030 mpt_prt(mpt, "Trying to reset anyway.\n"); 1031 } 1032 1033 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1034 1035 /* 1036 * This appears to be a workaround required for some 1037 * firmware or hardware revs. 1038 */ 1039 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 1040 DELAY(1000); 1041 1042 /* Diag. port is now active so we can now hit the reset bit */ 1043 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 1044 1045 /* 1046 * Ensure that the reset has finished. We delay 1ms 1047 * prior to reading the register to make sure the chip 1048 * has sufficiently completed its reset to handle register 1049 * accesses. 1050 */ 1051 wait = 5000; 1052 do { 1053 DELAY(1000); 1054 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1055 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 1056 1057 if (wait == 0) { 1058 mpt_prt(mpt, "WARNING - Failed hard reset! " 1059 "Trying to initialize anyway.\n"); 1060 } 1061 1062 /* 1063 * If we have firmware to download, it must be loaded before 1064 * the controller will become operational. Do so now. 1065 */ 1066 if (mpt->fw_image != NULL) { 1067 1068 error = mpt_download_fw(mpt); 1069 1070 if (error) { 1071 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 1072 mpt_prt(mpt, "Trying to initialize anyway.\n"); 1073 } 1074 } 1075 1076 /* 1077 * Reseting the controller should have disabled write 1078 * access to the diagnostic registers, but disable 1079 * manually to be sure. 1080 */ 1081 mpt_disable_diag_mode(mpt); 1082} 1083 1084static void 1085mpt_core_ioc_reset(struct mpt_softc *mpt, int type) 1086{ 1087 /* 1088 * Complete all pending requests with a status 1089 * appropriate for an IOC reset. 1090 */ 1091 mpt_complete_request_chain(mpt, &mpt->request_pending_list, 1092 MPI_IOCSTATUS_INVALID_STATE); 1093} 1094 1095 1096/* 1097 * Reset the IOC when needed. Try software command first then if needed 1098 * poke at the magic diagnostic reset. Note that a hard reset resets 1099 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 1100 * fouls up the PCI configuration registers. 1101 */ 1102int 1103mpt_reset(struct mpt_softc *mpt, int reinit) 1104{ 1105 struct mpt_personality *pers; 1106 int ret; 1107 int retry_cnt = 0; 1108 1109 /* 1110 * Try a soft reset. If that fails, get out the big hammer. 1111 */ 1112 again: 1113 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 1114 int cnt; 1115 for (cnt = 0; cnt < 5; cnt++) { 1116 /* Failed; do a hard reset */ 1117 mpt_hard_reset(mpt); 1118 1119 /* 1120 * Wait for the IOC to reload 1121 * and come out of reset state 1122 */ 1123 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1124 if (ret == MPT_OK) { 1125 break; 1126 } 1127 /* 1128 * Okay- try to check again... 1129 */ 1130 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1131 if (ret == MPT_OK) { 1132 break; 1133 } 1134 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 1135 retry_cnt, cnt); 1136 } 1137 } 1138 1139 if (retry_cnt == 0) { 1140 /* 1141 * Invoke reset handlers. We bump the reset count so 1142 * that mpt_wait_req() understands that regardless of 1143 * the specified wait condition, it should stop its wait. 1144 */ 1145 mpt->reset_cnt++; 1146 MPT_PERS_FOREACH(mpt, pers) 1147 pers->reset(mpt, ret); 1148 } 1149 1150 if (reinit) { 1151 ret = mpt_enable_ioc(mpt, 1); 1152 if (ret == MPT_OK) { 1153 mpt_enable_ints(mpt); 1154 } 1155 } 1156 if (ret != MPT_OK && retry_cnt++ < 2) { 1157 goto again; 1158 } 1159 return ret; 1160} 1161 1162/* Return a command buffer to the free queue */ 1163void 1164mpt_free_request(struct mpt_softc *mpt, request_t *req) 1165{ 1166 request_t *nxt; 1167 struct mpt_evtf_record *record; 1168 uint32_t reply_baddr; 1169 1170 if (req == NULL || req != &mpt->request_pool[req->index]) { 1171 panic("mpt_free_request bad req ptr\n"); 1172 return; 1173 } 1174 if ((nxt = req->chain) != NULL) { 1175 req->chain = NULL; 1176 mpt_free_request(mpt, nxt); /* NB: recursion */ 1177 } 1178 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); 1179 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); 1180 MPT_LOCK_ASSERT(mpt); 1181 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1182 ("mpt_free_request: req %p:%u func %x already on freelist", 1183 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1184 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1185 ("mpt_free_request: req %p:%u func %x on pending list", 1186 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1187#ifdef INVARIANTS 1188 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__); 1189#endif 1190 1191 req->ccb = NULL; 1192 if (LIST_EMPTY(&mpt->ack_frames)) { 1193 /* 1194 * Insert free ones at the tail 1195 */ 1196 req->serno = 0; 1197 req->state = REQ_STATE_FREE; 1198#ifdef INVARIANTS 1199 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER)); 1200#endif 1201 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); 1202 if (mpt->getreqwaiter != 0) { 1203 mpt->getreqwaiter = 0; 1204 wakeup(&mpt->request_free_list); 1205 } 1206 return; 1207 } 1208 1209 /* 1210 * Process an ack frame deferred due to resource shortage. 1211 */ 1212 record = LIST_FIRST(&mpt->ack_frames); 1213 LIST_REMOVE(record, links); 1214 req->state = REQ_STATE_ALLOCATED; 1215 mpt_assign_serno(mpt, req); 1216 mpt_send_event_ack(mpt, req, &record->reply, record->context); 1217 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) 1218 + (mpt->reply_phys & 0xFFFFFFFF); 1219 mpt_free_reply(mpt, reply_baddr); 1220} 1221 1222/* Get a command buffer from the free queue */ 1223request_t * 1224mpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1225{ 1226 request_t *req; 1227 1228retry: 1229 MPT_LOCK_ASSERT(mpt); 1230 req = TAILQ_FIRST(&mpt->request_free_list); 1231 if (req != NULL) { 1232 KASSERT(req == &mpt->request_pool[req->index], 1233 ("mpt_get_request: corrupted request free list\n")); 1234 KASSERT(req->state == REQ_STATE_FREE, 1235 ("req %p:%u not free on free list %x index %d function %x", 1236 req, req->serno, req->state, req->index, 1237 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1238 TAILQ_REMOVE(&mpt->request_free_list, req, links); 1239 req->state = REQ_STATE_ALLOCATED; 1240 req->chain = NULL; 1241 mpt_assign_serno(mpt, req); 1242 } else if (sleep_ok != 0) { 1243 mpt->getreqwaiter = 1; 1244 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1245 goto retry; 1246 } 1247 return (req); 1248} 1249 1250/* Pass the command to the IOC */ 1251void 1252mpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1253{ 1254 if (mpt->verbose > MPT_PRT_DEBUG2) { 1255 mpt_dump_request(mpt, req); 1256 } 1257 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1258 BUS_DMASYNC_PREWRITE); 1259 req->state |= REQ_STATE_QUEUED; 1260 KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1261 ("req %p:%u func %x on freelist list in mpt_send_cmd", 1262 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1263 KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1264 ("req %p:%u func %x already on pending list in mpt_send_cmd", 1265 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1266 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1267 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1268} 1269 1270/* 1271 * Wait for a request to complete. 1272 * 1273 * Inputs: 1274 * mpt softc of controller executing request 1275 * req request to wait for 1276 * sleep_ok nonzero implies may sleep in this context 1277 * time_ms timeout in ms. 0 implies no timeout. 1278 * 1279 * Return Values: 1280 * 0 Request completed 1281 * non-0 Timeout fired before request completion. 1282 */ 1283int 1284mpt_wait_req(struct mpt_softc *mpt, request_t *req, 1285 mpt_req_state_t state, mpt_req_state_t mask, 1286 int sleep_ok, int time_ms) 1287{ 1288 int error; 1289 int timeout; 1290 u_int saved_cnt; 1291 1292 /* 1293 * timeout is in ms. 0 indicates infinite wait. 1294 * Convert to ticks or 500us units depending on 1295 * our sleep mode. 1296 */ 1297 if (sleep_ok != 0) { 1298 timeout = (time_ms * hz) / 1000; 1299 } else { 1300 timeout = time_ms * 2; 1301 } 1302 req->state |= REQ_STATE_NEED_WAKEUP; 1303 mask &= ~REQ_STATE_NEED_WAKEUP; 1304 saved_cnt = mpt->reset_cnt; 1305 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { 1306 if (sleep_ok != 0) { 1307 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1308 if (error == EWOULDBLOCK) { 1309 timeout = 0; 1310 break; 1311 } 1312 } else { 1313 if (time_ms != 0 && --timeout == 0) { 1314 break; 1315 } 1316 DELAY(500); 1317 mpt_intr(mpt); 1318 } 1319 } 1320 req->state &= ~REQ_STATE_NEED_WAKEUP; 1321 if (mpt->reset_cnt != saved_cnt) { 1322 return (EIO); 1323 } 1324 if (time_ms && timeout <= 0) { 1325 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; 1326 req->state |= REQ_STATE_TIMEDOUT; 1327 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); 1328 return (ETIMEDOUT); 1329 } 1330 return (0); 1331} 1332 1333/* 1334 * Send a command to the IOC via the handshake register. 1335 * 1336 * Only done at initialization time and for certain unusual 1337 * commands such as device/bus reset as specified by LSI. 1338 */ 1339int 1340mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1341{ 1342 int i; 1343 uint32_t data, *data32; 1344 1345 /* Check condition of the IOC */ 1346 data = mpt_rd_db(mpt); 1347 if ((MPT_STATE(data) != MPT_DB_STATE_READY 1348 && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1349 && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1350 || MPT_DB_IS_IN_USE(data)) { 1351 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1352 mpt_print_db(data); 1353 return (EBUSY); 1354 } 1355 1356 /* We move things in 32 bit chunks */ 1357 len = (len + 3) >> 2; 1358 data32 = cmd; 1359 1360 /* Clear any left over pending doorbell interrupts */ 1361 if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1362 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1363 1364 /* 1365 * Tell the handshake reg. we are going to send a command 1366 * and how long it is going to be. 1367 */ 1368 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1369 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1370 mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1371 1372 /* Wait for the chip to notice */ 1373 if (mpt_wait_db_int(mpt) != MPT_OK) { 1374 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n"); 1375 return (ETIMEDOUT); 1376 } 1377 1378 /* Clear the interrupt */ 1379 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1380 1381 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1382 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n"); 1383 return (ETIMEDOUT); 1384 } 1385 1386 /* Send the command */ 1387 for (i = 0; i < len; i++) { 1388 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++)); 1389 if (mpt_wait_db_ack(mpt) != MPT_OK) { 1390 mpt_prt(mpt, 1391 "mpt_send_handshake_cmd: timeout @ index %d\n", i); 1392 return (ETIMEDOUT); 1393 } 1394 } 1395 return MPT_OK; 1396} 1397 1398/* Get the response from the handshake register */ 1399int 1400mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1401{ 1402 int left, reply_left; 1403 u_int16_t *data16; 1404 uint32_t data; 1405 MSG_DEFAULT_REPLY *hdr; 1406 1407 /* We move things out in 16 bit chunks */ 1408 reply_len >>= 1; 1409 data16 = (u_int16_t *)reply; 1410 1411 hdr = (MSG_DEFAULT_REPLY *)reply; 1412 1413 /* Get first word */ 1414 if (mpt_wait_db_int(mpt) != MPT_OK) { 1415 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1416 return ETIMEDOUT; 1417 } 1418 data = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1419 *data16++ = le16toh(data & MPT_DB_DATA_MASK); 1420 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1421 1422 /* Get Second Word */ 1423 if (mpt_wait_db_int(mpt) != MPT_OK) { 1424 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1425 return ETIMEDOUT; 1426 } 1427 data = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1428 *data16++ = le16toh(data & MPT_DB_DATA_MASK); 1429 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1430 1431 /* 1432 * With the second word, we can now look at the length. 1433 * Warn about a reply that's too short (except for IOC FACTS REPLY) 1434 */ 1435 if ((reply_len >> 1) != hdr->MsgLength && 1436 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ 1437#if __FreeBSD_version >= 500000 1438 mpt_prt(mpt, "reply length does not match message length: " 1439 "got %x; expected %zx for function %x\n", 1440 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1441#else 1442 mpt_prt(mpt, "reply length does not match message length: " 1443 "got %x; expected %x for function %x\n", 1444 hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1445#endif 1446 } 1447 1448 /* Get rest of the reply; but don't overflow the provided buffer */ 1449 left = (hdr->MsgLength << 1) - 2; 1450 reply_left = reply_len - 2; 1451 while (left--) { 1452 u_int16_t datum; 1453 1454 if (mpt_wait_db_int(mpt) != MPT_OK) { 1455 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1456 return ETIMEDOUT; 1457 } 1458 data = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1459 datum = le16toh(data & MPT_DB_DATA_MASK); 1460 1461 if (reply_left-- > 0) 1462 *data16++ = datum; 1463 1464 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1465 } 1466 1467 /* One more wait & clear at the end */ 1468 if (mpt_wait_db_int(mpt) != MPT_OK) { 1469 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1470 return ETIMEDOUT; 1471 } 1472 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1473 1474 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1475 if (mpt->verbose >= MPT_PRT_TRACE) 1476 mpt_print_reply(hdr); 1477 return (MPT_FAIL | hdr->IOCStatus); 1478 } 1479 1480 return (0); 1481} 1482 1483static int 1484mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1485{ 1486 MSG_IOC_FACTS f_req; 1487 int error; 1488 1489 memset(&f_req, 0, sizeof f_req); 1490 f_req.Function = MPI_FUNCTION_IOC_FACTS; 1491 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1492 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1493 if (error) { 1494 return(error); 1495 } 1496 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1497 return (error); 1498} 1499 1500static int 1501mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp) 1502{ 1503 MSG_PORT_FACTS f_req; 1504 int error; 1505 1506 memset(&f_req, 0, sizeof f_req); 1507 f_req.Function = MPI_FUNCTION_PORT_FACTS; 1508 f_req.PortNumber = port; 1509 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1510 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1511 if (error) { 1512 return(error); 1513 } 1514 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1515 return (error); 1516} 1517 1518/* 1519 * Send the initialization request. This is where we specify how many 1520 * SCSI busses and how many devices per bus we wish to emulate. 1521 * This is also the command that specifies the max size of the reply 1522 * frames from the IOC that we will be allocating. 1523 */ 1524static int 1525mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1526{ 1527 int error = 0; 1528 MSG_IOC_INIT init; 1529 MSG_IOC_INIT_REPLY reply; 1530 1531 memset(&init, 0, sizeof init); 1532 init.WhoInit = who; 1533 init.Function = MPI_FUNCTION_IOC_INIT; 1534 init.MaxDevices = 0; /* at least 256 devices per bus */ 1535 init.MaxBuses = 16; /* at least 16 busses */ 1536 1537 init.MsgVersion = htole16(MPI_VERSION); 1538 init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1539 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1540 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1541 1542 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1543 return(error); 1544 } 1545 1546 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1547 return (error); 1548} 1549 1550 1551/* 1552 * Utiltity routine to read configuration headers and pages 1553 */ 1554int 1555mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params, 1556 bus_addr_t addr, bus_size_t len, int sleep_ok, int timeout_ms) 1557{ 1558 MSG_CONFIG *cfgp; 1559 SGE_SIMPLE32 *se; 1560 1561 cfgp = req->req_vbuf; 1562 memset(cfgp, 0, sizeof *cfgp); 1563 cfgp->Action = params->Action; 1564 cfgp->Function = MPI_FUNCTION_CONFIG; 1565 cfgp->Header.PageVersion = params->PageVersion; 1566 cfgp->Header.PageNumber = params->PageNumber; 1567 cfgp->PageAddress = htole32(params->PageAddress); 1568 if ((params->PageType & MPI_CONFIG_PAGETYPE_MASK) == 1569 MPI_CONFIG_PAGETYPE_EXTENDED) { 1570 cfgp->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 1571 cfgp->Header.PageLength = 0; 1572 cfgp->ExtPageLength = htole16(params->ExtPageLength); 1573 cfgp->ExtPageType = params->ExtPageType; 1574 } else { 1575 cfgp->Header.PageType = params->PageType; 1576 cfgp->Header.PageLength = params->PageLength; 1577 } 1578 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1579 se->Address = htole32(addr); 1580 MPI_pSGE_SET_LENGTH(se, len); 1581 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1582 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1583 MPI_SGE_FLAGS_END_OF_LIST | 1584 ((params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1585 || params->Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1586 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1587 se->FlagsLength = htole32(se->FlagsLength); 1588 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1589 1590 mpt_check_doorbell(mpt); 1591 mpt_send_cmd(mpt, req); 1592 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1593 sleep_ok, timeout_ms)); 1594} 1595 1596int 1597mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber, 1598 uint32_t PageAddress, int ExtPageType, 1599 CONFIG_EXTENDED_PAGE_HEADER *rslt, 1600 int sleep_ok, int timeout_ms) 1601{ 1602 request_t *req; 1603 cfgparms_t params; 1604 MSG_CONFIG_REPLY *cfgp; 1605 int error; 1606 1607 req = mpt_get_request(mpt, sleep_ok); 1608 if (req == NULL) { 1609 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n"); 1610 return (ENOMEM); 1611 } 1612 1613 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER; 1614 params.PageVersion = PageVersion; 1615 params.PageLength = 0; 1616 params.PageNumber = PageNumber; 1617 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 1618 params.PageAddress = PageAddress; 1619 params.ExtPageType = ExtPageType; 1620 params.ExtPageLength = 0; 1621 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0, 1622 sleep_ok, timeout_ms); 1623 if (error != 0) { 1624 /* 1625 * Leave the request. Without resetting the chip, it's 1626 * still owned by it and we'll just get into trouble 1627 * freeing it now. Mark it as abandoned so that if it 1628 * shows up later it can be freed. 1629 */ 1630 mpt_prt(mpt, "read_extcfg_header timed out\n"); 1631 return (ETIMEDOUT); 1632 } 1633 1634 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1635 case MPI_IOCSTATUS_SUCCESS: 1636 cfgp = req->req_vbuf; 1637 rslt->PageVersion = cfgp->Header.PageVersion; 1638 rslt->PageNumber = cfgp->Header.PageNumber; 1639 rslt->PageType = cfgp->Header.PageType; 1640 rslt->ExtPageLength = le16toh(cfgp->ExtPageLength); 1641 rslt->ExtPageType = cfgp->ExtPageType; 1642 error = 0; 1643 break; 1644 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1645 mpt_lprt(mpt, MPT_PRT_DEBUG, 1646 "Invalid Page Type %d Number %d Addr 0x%0x\n", 1647 MPI_CONFIG_PAGETYPE_EXTENDED, PageNumber, PageAddress); 1648 error = EINVAL; 1649 break; 1650 default: 1651 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n", 1652 req->IOCStatus); 1653 error = EIO; 1654 break; 1655 } 1656 mpt_free_request(mpt, req); 1657 return (error); 1658} 1659 1660int 1661mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1662 CONFIG_EXTENDED_PAGE_HEADER *hdr, void *buf, size_t len, 1663 int sleep_ok, int timeout_ms) 1664{ 1665 request_t *req; 1666 cfgparms_t params; 1667 int error; 1668 1669 req = mpt_get_request(mpt, sleep_ok); 1670 if (req == NULL) { 1671 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n"); 1672 return (-1); 1673 } 1674 1675 params.Action = Action; 1676 params.PageVersion = hdr->PageVersion; 1677 params.PageLength = 0; 1678 params.PageNumber = hdr->PageNumber; 1679 params.PageType = MPI_CONFIG_PAGETYPE_EXTENDED; 1680 params.PageAddress = PageAddress; 1681 params.ExtPageType = hdr->ExtPageType; 1682 params.ExtPageLength = hdr->ExtPageLength; 1683 error = mpt_issue_cfg_req(mpt, req, ¶ms, 1684 req->req_pbuf + MPT_RQSL(mpt), 1685 len, sleep_ok, timeout_ms); 1686 if (error != 0) { 1687 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action); 1688 return (-1); 1689 } 1690 1691 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1692 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n", 1693 req->IOCStatus); 1694 mpt_free_request(mpt, req); 1695 return (-1); 1696 } 1697 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1698 BUS_DMASYNC_POSTREAD); 1699 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1700 mpt_free_request(mpt, req); 1701 return (0); 1702} 1703 1704int 1705mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1706 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1707 int sleep_ok, int timeout_ms) 1708{ 1709 request_t *req; 1710 cfgparms_t params; 1711 MSG_CONFIG *cfgp; 1712 int error; 1713 1714 req = mpt_get_request(mpt, sleep_ok); 1715 if (req == NULL) { 1716 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1717 return (ENOMEM); 1718 } 1719 1720 params.Action = MPI_CONFIG_ACTION_PAGE_HEADER; 1721 params.PageVersion = 0; 1722 params.PageLength = 0; 1723 params.PageNumber = PageNumber; 1724 params.PageType = PageType; 1725 params.PageAddress = PageAddress; 1726 error = mpt_issue_cfg_req(mpt, req, ¶ms, /*addr*/0, /*len*/0, 1727 sleep_ok, timeout_ms); 1728 if (error != 0) { 1729 /* 1730 * Leave the request. Without resetting the chip, it's 1731 * still owned by it and we'll just get into trouble 1732 * freeing it now. Mark it as abandoned so that if it 1733 * shows up later it can be freed. 1734 */ 1735 mpt_prt(mpt, "read_cfg_header timed out\n"); 1736 return (ETIMEDOUT); 1737 } 1738 1739 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1740 case MPI_IOCSTATUS_SUCCESS: 1741 cfgp = req->req_vbuf; 1742 bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1743 error = 0; 1744 break; 1745 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1746 mpt_lprt(mpt, MPT_PRT_DEBUG, 1747 "Invalid Page Type %d Number %d Addr 0x%0x\n", 1748 PageType, PageNumber, PageAddress); 1749 error = EINVAL; 1750 break; 1751 default: 1752 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1753 req->IOCStatus); 1754 error = EIO; 1755 break; 1756 } 1757 mpt_free_request(mpt, req); 1758 return (error); 1759} 1760 1761int 1762mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1763 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1764 int timeout_ms) 1765{ 1766 request_t *req; 1767 cfgparms_t params; 1768 int error; 1769 1770 req = mpt_get_request(mpt, sleep_ok); 1771 if (req == NULL) { 1772 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1773 return (-1); 1774 } 1775 1776 params.Action = Action; 1777 params.PageVersion = hdr->PageVersion; 1778 params.PageLength = hdr->PageLength; 1779 params.PageNumber = hdr->PageNumber; 1780 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; 1781 params.PageAddress = PageAddress; 1782 error = mpt_issue_cfg_req(mpt, req, ¶ms, 1783 req->req_pbuf + MPT_RQSL(mpt), 1784 len, sleep_ok, timeout_ms); 1785 if (error != 0) { 1786 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1787 return (-1); 1788 } 1789 1790 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1791 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1792 req->IOCStatus); 1793 mpt_free_request(mpt, req); 1794 return (-1); 1795 } 1796 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1797 BUS_DMASYNC_POSTREAD); 1798 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1799 mpt_free_request(mpt, req); 1800 return (0); 1801} 1802 1803int 1804mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1805 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1806 int timeout_ms) 1807{ 1808 request_t *req; 1809 cfgparms_t params; 1810 u_int hdr_attr; 1811 int error; 1812 1813 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1814 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1815 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1816 mpt_prt(mpt, "page type 0x%x not changeable\n", 1817 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1818 return (-1); 1819 } 1820 1821#if 0 1822 /* 1823 * We shouldn't mask off other bits here. 1824 */ 1825 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK; 1826#endif 1827 1828 req = mpt_get_request(mpt, sleep_ok); 1829 if (req == NULL) 1830 return (-1); 1831 1832 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len); 1833 1834 /* 1835 * There isn't any point in restoring stripped out attributes 1836 * if you then mask them going down to issue the request. 1837 */ 1838 1839 params.Action = Action; 1840 params.PageVersion = hdr->PageVersion; 1841 params.PageLength = hdr->PageLength; 1842 params.PageNumber = hdr->PageNumber; 1843 params.PageAddress = PageAddress; 1844#if 0 1845 /* Restore stripped out attributes */ 1846 hdr->PageType |= hdr_attr; 1847 params.PageType = hdr->PageType & MPI_CONFIG_PAGETYPE_MASK; 1848#else 1849 params.PageType = hdr->PageType; 1850#endif 1851 error = mpt_issue_cfg_req(mpt, req, ¶ms, 1852 req->req_pbuf + MPT_RQSL(mpt), 1853 len, sleep_ok, timeout_ms); 1854 if (error != 0) { 1855 mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1856 return (-1); 1857 } 1858 1859 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1860 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1861 req->IOCStatus); 1862 mpt_free_request(mpt, req); 1863 return (-1); 1864 } 1865 mpt_free_request(mpt, req); 1866 return (0); 1867} 1868 1869/* 1870 * Read IOC configuration information 1871 */ 1872static int 1873mpt_read_config_info_ioc(struct mpt_softc *mpt) 1874{ 1875 CONFIG_PAGE_HEADER hdr; 1876 struct mpt_raid_volume *mpt_raid; 1877 int rv; 1878 int i; 1879 size_t len; 1880 1881 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1882 2, 0, &hdr, FALSE, 5000); 1883 /* 1884 * If it's an invalid page, so what? Not a supported function.... 1885 */ 1886 if (rv == EINVAL) { 1887 return (0); 1888 } 1889 if (rv) { 1890 return (rv); 1891 } 1892 1893 mpt_lprt(mpt, MPT_PRT_DEBUG, 1894 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n", 1895 hdr.PageVersion, hdr.PageLength << 2, 1896 hdr.PageNumber, hdr.PageType); 1897 1898 len = hdr.PageLength * sizeof(uint32_t); 1899 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1900 if (mpt->ioc_page2 == NULL) { 1901 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n"); 1902 mpt_raid_free_mem(mpt); 1903 return (ENOMEM); 1904 } 1905 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1906 rv = mpt_read_cur_cfg_page(mpt, 0, 1907 &mpt->ioc_page2->Header, len, FALSE, 5000); 1908 if (rv) { 1909 mpt_prt(mpt, "failed to read IOC Page 2\n"); 1910 mpt_raid_free_mem(mpt); 1911 return (EIO); 1912 } 1913 mpt2host_config_page_ioc2(mpt->ioc_page2); 1914 1915 if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1916 uint32_t mask; 1917 1918 mpt_prt(mpt, "Capabilities: ("); 1919 for (mask = 1; mask != 0; mask <<= 1) { 1920 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) { 1921 continue; 1922 } 1923 switch (mask) { 1924 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1925 mpt_prtc(mpt, " RAID-0"); 1926 break; 1927 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1928 mpt_prtc(mpt, " RAID-1E"); 1929 break; 1930 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1931 mpt_prtc(mpt, " RAID-1"); 1932 break; 1933 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1934 mpt_prtc(mpt, " SES"); 1935 break; 1936 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1937 mpt_prtc(mpt, " SAFTE"); 1938 break; 1939 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1940 mpt_prtc(mpt, " Multi-Channel-Arrays"); 1941 default: 1942 break; 1943 } 1944 } 1945 mpt_prtc(mpt, " )\n"); 1946 if ((mpt->ioc_page2->CapabilitiesFlags 1947 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1948 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1949 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1950 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1951 mpt->ioc_page2->NumActiveVolumes, 1952 mpt->ioc_page2->NumActiveVolumes != 1 1953 ? "s " : " ", 1954 mpt->ioc_page2->MaxVolumes); 1955 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1956 mpt->ioc_page2->NumActivePhysDisks, 1957 mpt->ioc_page2->NumActivePhysDisks != 1 1958 ? "s " : " ", 1959 mpt->ioc_page2->MaxPhysDisks); 1960 } 1961 } 1962 1963 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1964 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1965 if (mpt->raid_volumes == NULL) { 1966 mpt_prt(mpt, "Could not allocate RAID volume data\n"); 1967 mpt_raid_free_mem(mpt); 1968 return (ENOMEM); 1969 } 1970 1971 /* 1972 * Copy critical data out of ioc_page2 so that we can 1973 * safely refresh the page without windows of unreliable 1974 * data. 1975 */ 1976 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 1977 1978 len = sizeof(*mpt->raid_volumes->config_page) + 1979 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1)); 1980 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 1981 mpt_raid = &mpt->raid_volumes[i]; 1982 mpt_raid->config_page = 1983 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1984 if (mpt_raid->config_page == NULL) { 1985 mpt_prt(mpt, "Could not allocate RAID page data\n"); 1986 mpt_raid_free_mem(mpt); 1987 return (ENOMEM); 1988 } 1989 } 1990 mpt->raid_page0_len = len; 1991 1992 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 1993 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1994 if (mpt->raid_disks == NULL) { 1995 mpt_prt(mpt, "Could not allocate RAID disk data\n"); 1996 mpt_raid_free_mem(mpt); 1997 return (ENOMEM); 1998 } 1999 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 2000 2001 /* 2002 * Load page 3. 2003 */ 2004 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 2005 3, 0, &hdr, FALSE, 5000); 2006 if (rv) { 2007 mpt_raid_free_mem(mpt); 2008 return (EIO); 2009 } 2010 2011 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 2012 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 2013 2014 len = hdr.PageLength * sizeof(uint32_t); 2015 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 2016 if (mpt->ioc_page3 == NULL) { 2017 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n"); 2018 mpt_raid_free_mem(mpt); 2019 return (ENOMEM); 2020 } 2021 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 2022 rv = mpt_read_cur_cfg_page(mpt, 0, 2023 &mpt->ioc_page3->Header, len, FALSE, 5000); 2024 if (rv) { 2025 mpt_raid_free_mem(mpt); 2026 return (EIO); 2027 } 2028 mpt2host_config_page_ioc3(mpt->ioc_page3); 2029 mpt_raid_wakeup(mpt); 2030 return (0); 2031} 2032 2033/* 2034 * Enable IOC port 2035 */ 2036static int 2037mpt_send_port_enable(struct mpt_softc *mpt, int port) 2038{ 2039 request_t *req; 2040 MSG_PORT_ENABLE *enable_req; 2041 int error; 2042 2043 req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 2044 if (req == NULL) 2045 return (-1); 2046 2047 enable_req = req->req_vbuf; 2048 memset(enable_req, 0, MPT_RQSL(mpt)); 2049 2050 enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 2051 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 2052 enable_req->PortNumber = port; 2053 2054 mpt_check_doorbell(mpt); 2055 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 2056 2057 mpt_send_cmd(mpt, req); 2058 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 2059 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000); 2060 if (error != 0) { 2061 mpt_prt(mpt, "port %d enable timed out\n", port); 2062 return (-1); 2063 } 2064 mpt_free_request(mpt, req); 2065 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port); 2066 return (0); 2067} 2068 2069/* 2070 * Enable/Disable asynchronous event reporting. 2071 */ 2072static int 2073mpt_send_event_request(struct mpt_softc *mpt, int onoff) 2074{ 2075 request_t *req; 2076 MSG_EVENT_NOTIFY *enable_req; 2077 2078 req = mpt_get_request(mpt, FALSE); 2079 if (req == NULL) { 2080 return (ENOMEM); 2081 } 2082 enable_req = req->req_vbuf; 2083 memset(enable_req, 0, sizeof *enable_req); 2084 2085 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 2086 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 2087 enable_req->Switch = onoff; 2088 2089 mpt_check_doorbell(mpt); 2090 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n", 2091 onoff ? "en" : "dis"); 2092 /* 2093 * Send the command off, but don't wait for it. 2094 */ 2095 mpt_send_cmd(mpt, req); 2096 return (0); 2097} 2098 2099/* 2100 * Un-mask the interrupts on the chip. 2101 */ 2102void 2103mpt_enable_ints(struct mpt_softc *mpt) 2104{ 2105 /* Unmask every thing except door bell int */ 2106 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 2107} 2108 2109/* 2110 * Mask the interrupts on the chip. 2111 */ 2112void 2113mpt_disable_ints(struct mpt_softc *mpt) 2114{ 2115 /* Mask all interrupts */ 2116 mpt_write(mpt, MPT_OFFSET_INTR_MASK, 2117 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 2118} 2119 2120static void 2121mpt_sysctl_attach(struct mpt_softc *mpt) 2122{ 2123#if __FreeBSD_version >= 500000 2124 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 2125 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 2126 2127 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 2128 "debug", CTLFLAG_RW, &mpt->verbose, 0, 2129 "Debugging/Verbose level"); 2130 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 2131 "role", CTLFLAG_RD, &mpt->role, 0, 2132 "HBA role"); 2133#ifdef MPT_TEST_MULTIPATH 2134 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 2135 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1, 2136 "Next Target to Fail"); 2137#endif 2138#endif 2139} 2140 2141int 2142mpt_attach(struct mpt_softc *mpt) 2143{ 2144 struct mpt_personality *pers; 2145 int i; 2146 int error; 2147 2148 mpt_core_attach(mpt); 2149 mpt_core_enable(mpt); 2150 2151 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links); 2152 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 2153 pers = mpt_personalities[i]; 2154 if (pers == NULL) { 2155 continue; 2156 } 2157 if (pers->probe(mpt) == 0) { 2158 error = pers->attach(mpt); 2159 if (error != 0) { 2160 mpt_detach(mpt); 2161 return (error); 2162 } 2163 mpt->mpt_pers_mask |= (0x1 << pers->id); 2164 pers->use_count++; 2165 } 2166 } 2167 2168 /* 2169 * Now that we've attached everything, do the enable function 2170 * for all of the personalities. This allows the personalities 2171 * to do setups that are appropriate for them prior to enabling 2172 * any ports. 2173 */ 2174 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 2175 pers = mpt_personalities[i]; 2176 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) { 2177 error = pers->enable(mpt); 2178 if (error != 0) { 2179 mpt_prt(mpt, "personality %s attached but would" 2180 " not enable (%d)\n", pers->name, error); 2181 mpt_detach(mpt); 2182 return (error); 2183 } 2184 } 2185 } 2186 return (0); 2187} 2188 2189int 2190mpt_shutdown(struct mpt_softc *mpt) 2191{ 2192 struct mpt_personality *pers; 2193 2194 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2195 pers->shutdown(mpt); 2196 } 2197 return (0); 2198} 2199 2200int 2201mpt_detach(struct mpt_softc *mpt) 2202{ 2203 struct mpt_personality *pers; 2204 2205 MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2206 pers->detach(mpt); 2207 mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2208 pers->use_count--; 2209 } 2210 TAILQ_REMOVE(&mpt_tailq, mpt, links); 2211 return (0); 2212} 2213 2214int 2215mpt_core_load(struct mpt_personality *pers) 2216{ 2217 int i; 2218 2219 /* 2220 * Setup core handlers and insert the default handler 2221 * into all "empty slots". 2222 */ 2223 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { 2224 mpt_reply_handlers[i] = mpt_default_reply_handler; 2225 } 2226 2227 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2228 mpt_event_reply_handler; 2229 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2230 mpt_config_reply_handler; 2231 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2232 mpt_handshake_reply_handler; 2233 return (0); 2234} 2235 2236/* 2237 * Initialize per-instance driver data and perform 2238 * initial controller configuration. 2239 */ 2240int 2241mpt_core_attach(struct mpt_softc *mpt) 2242{ 2243 int val, error; 2244 2245 LIST_INIT(&mpt->ack_frames); 2246 /* Put all request buffers on the free list */ 2247 TAILQ_INIT(&mpt->request_pending_list); 2248 TAILQ_INIT(&mpt->request_free_list); 2249 TAILQ_INIT(&mpt->request_timeout_list); 2250 for (val = 0; val < MPT_MAX_LUNS; val++) { 2251 STAILQ_INIT(&mpt->trt[val].atios); 2252 STAILQ_INIT(&mpt->trt[val].inots); 2253 } 2254 STAILQ_INIT(&mpt->trt_wildcard.atios); 2255 STAILQ_INIT(&mpt->trt_wildcard.inots); 2256#ifdef MPT_TEST_MULTIPATH 2257 mpt->failure_id = -1; 2258#endif 2259 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; 2260 mpt_sysctl_attach(mpt); 2261 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2262 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2263 2264 MPT_LOCK(mpt); 2265 error = mpt_configure_ioc(mpt, 0, 0); 2266 MPT_UNLOCK(mpt); 2267 2268 return (error); 2269} 2270 2271int 2272mpt_core_enable(struct mpt_softc *mpt) 2273{ 2274 /* 2275 * We enter with the IOC enabled, but async events 2276 * not enabled, ports not enabled and interrupts 2277 * not enabled. 2278 */ 2279 MPT_LOCK(mpt); 2280 2281 /* 2282 * Enable asynchronous event reporting- all personalities 2283 * have attached so that they should be able to now field 2284 * async events. 2285 */ 2286 mpt_send_event_request(mpt, 1); 2287 2288 /* 2289 * Catch any pending interrupts 2290 * 2291 * This seems to be crucial- otherwise 2292 * the portenable below times out. 2293 */ 2294 mpt_intr(mpt); 2295 2296 /* 2297 * Enable Interrupts 2298 */ 2299 mpt_enable_ints(mpt); 2300 2301 /* 2302 * Catch any pending interrupts 2303 * 2304 * This seems to be crucial- otherwise 2305 * the portenable below times out. 2306 */ 2307 mpt_intr(mpt); 2308 2309 /* 2310 * Enable the port. 2311 */ 2312 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2313 mpt_prt(mpt, "failed to enable port 0\n"); 2314 MPT_UNLOCK(mpt); 2315 return (ENXIO); 2316 } 2317 MPT_UNLOCK(mpt); 2318 return (0); 2319} 2320 2321void 2322mpt_core_shutdown(struct mpt_softc *mpt) 2323{ 2324 mpt_disable_ints(mpt); 2325} 2326 2327void 2328mpt_core_detach(struct mpt_softc *mpt) 2329{ 2330 int val; 2331 2332 /* 2333 * XXX: FREE MEMORY 2334 */ 2335 mpt_disable_ints(mpt); 2336 2337 /* Make sure no request has pending timeouts. */ 2338 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2339 request_t *req = &mpt->request_pool[val]; 2340 mpt_callout_drain(mpt, &req->callout); 2341 } 2342 2343 mpt_dma_buf_free(mpt); 2344} 2345 2346int 2347mpt_core_unload(struct mpt_personality *pers) 2348{ 2349 /* Unload is always successfull. */ 2350 return (0); 2351} 2352 2353#define FW_UPLOAD_REQ_SIZE \ 2354 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2355 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2356 2357static int 2358mpt_upload_fw(struct mpt_softc *mpt) 2359{ 2360 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2361 MSG_FW_UPLOAD_REPLY fw_reply; 2362 MSG_FW_UPLOAD *fw_req; 2363 FW_UPLOAD_TCSGE *tsge; 2364 SGE_SIMPLE32 *sge; 2365 uint32_t flags; 2366 int error; 2367 2368 memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2369 fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2370 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2371 fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2372 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2373 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2374 tsge->DetailsLength = 12; 2375 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2376 tsge->ImageSize = htole32(mpt->fw_image_size); 2377 sge = (SGE_SIMPLE32 *)(tsge + 1); 2378 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2379 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2380 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2381 flags <<= MPI_SGE_FLAGS_SHIFT; 2382 sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2383 sge->Address = htole32(mpt->fw_phys); 2384 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2385 if (error) 2386 return(error); 2387 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2388 return (error); 2389} 2390 2391static void 2392mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2393 uint32_t *data, bus_size_t len) 2394{ 2395 uint32_t *data_end; 2396 2397 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2398 if (mpt->is_sas) { 2399 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2400 } 2401 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2402 while (data != data_end) { 2403 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2404 data++; 2405 } 2406 if (mpt->is_sas) { 2407 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2408 } 2409} 2410 2411static int 2412mpt_download_fw(struct mpt_softc *mpt) 2413{ 2414 MpiFwHeader_t *fw_hdr; 2415 int error; 2416 uint32_t ext_offset; 2417 uint32_t data; 2418 2419 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2420 mpt->fw_image_size); 2421 2422 error = mpt_enable_diag_mode(mpt); 2423 if (error != 0) { 2424 mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2425 return (EIO); 2426 } 2427 2428 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2429 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2430 2431 fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2432 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2433 fw_hdr->ImageSize); 2434 2435 ext_offset = fw_hdr->NextImageHeaderOffset; 2436 while (ext_offset != 0) { 2437 MpiExtImageHeader_t *ext; 2438 2439 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2440 ext_offset = ext->NextImageHeaderOffset; 2441 2442 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2443 ext->ImageSize); 2444 } 2445 2446 if (mpt->is_sas) { 2447 pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2448 } 2449 /* Setup the address to jump to on reset. */ 2450 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2451 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2452 2453 /* 2454 * The controller sets the "flash bad" status after attempting 2455 * to auto-boot from flash. Clear the status so that the controller 2456 * will continue the boot process with our newly installed firmware. 2457 */ 2458 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2459 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2460 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2461 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2462 2463 if (mpt->is_sas) { 2464 pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2465 } 2466 2467 /* 2468 * Re-enable the processor and clear the boot halt flag. 2469 */ 2470 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2471 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2472 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2473 2474 mpt_disable_diag_mode(mpt); 2475 return (0); 2476} 2477 2478int 2479mpt_dma_buf_alloc(struct mpt_softc *mpt) 2480{ 2481 struct mpt_map_info mi; 2482 uint8_t *vptr; 2483 uint32_t pptr, end; 2484 int i, error; 2485 2486 /* Create a child tag for data buffers */ 2487 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 2488 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 2489 NULL, NULL, MAXBSIZE, mpt->max_cam_seg_cnt, 2490 BUS_SPACE_MAXSIZE_32BIT, 0, &mpt->buffer_dmat) != 0) { 2491 mpt_prt(mpt, "cannot create a dma tag for data buffers\n"); 2492 return (1); 2493 } 2494 2495 /* Create a child tag for request buffers */ 2496 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0, 2497 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 2498 NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0, 2499 &mpt->request_dmat) != 0) { 2500 mpt_prt(mpt, "cannot create a dma tag for requests\n"); 2501 return (1); 2502 } 2503 2504 /* Allocate some DMA accessable memory for requests */ 2505 if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request, 2506 BUS_DMA_NOWAIT, &mpt->request_dmap) != 0) { 2507 mpt_prt(mpt, "cannot allocate %d bytes of request memory\n", 2508 MPT_REQ_MEM_SIZE(mpt)); 2509 return (1); 2510 } 2511 2512 mi.mpt = mpt; 2513 mi.error = 0; 2514 2515 /* Load and lock it into "bus space" */ 2516 bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request, 2517 MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0); 2518 2519 if (mi.error) { 2520 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n", 2521 mi.error); 2522 return (1); 2523 } 2524 mpt->request_phys = mi.phys; 2525 2526 /* 2527 * Now create per-request dma maps 2528 */ 2529 i = 0; 2530 pptr = mpt->request_phys; 2531 vptr = mpt->request; 2532 end = pptr + MPT_REQ_MEM_SIZE(mpt); 2533 while(pptr < end) { 2534 request_t *req = &mpt->request_pool[i]; 2535 req->index = i++; 2536 2537 /* Store location of Request Data */ 2538 req->req_pbuf = pptr; 2539 req->req_vbuf = vptr; 2540 2541 pptr += MPT_REQUEST_AREA; 2542 vptr += MPT_REQUEST_AREA; 2543 2544 req->sense_pbuf = (pptr - MPT_SENSE_SIZE); 2545 req->sense_vbuf = (vptr - MPT_SENSE_SIZE); 2546 2547 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap); 2548 if (error) { 2549 mpt_prt(mpt, "error %d creating per-cmd DMA maps\n", 2550 error); 2551 return (1); 2552 } 2553 } 2554 2555 return (0); 2556} 2557 2558static void 2559mpt_dma_buf_free(struct mpt_softc *mpt) 2560{ 2561 int i; 2562 if (mpt->request_dmat == 0) { 2563 mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n"); 2564 return; 2565 } 2566 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) { 2567 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap); 2568 } 2569 bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap); 2570 bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap); 2571 bus_dma_tag_destroy(mpt->request_dmat); 2572 mpt->request_dmat = 0; 2573 bus_dma_tag_destroy(mpt->buffer_dmat); 2574} 2575 2576/* 2577 * Allocate/Initialize data structures for the controller. Called 2578 * once at instance startup. 2579 */ 2580static int 2581mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset) 2582{ 2583 PTR_MSG_PORT_FACTS_REPLY pfp; 2584 int error, port, val; 2585 size_t len; 2586 2587 if (tn == MPT_MAX_TRYS) { 2588 return (-1); 2589 } 2590 2591 /* 2592 * No need to reset if the IOC is already in the READY state. 2593 * 2594 * Force reset if initialization failed previously. 2595 * Note that a hard_reset of the second channel of a '929 2596 * will stop operation of the first channel. Hopefully, if the 2597 * first channel is ok, the second will not require a hard 2598 * reset. 2599 */ 2600 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) { 2601 if (mpt_reset(mpt, FALSE) != MPT_OK) { 2602 return (mpt_configure_ioc(mpt, tn++, 1)); 2603 } 2604 needreset = 0; 2605 } 2606 2607 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) { 2608 mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2609 return (mpt_configure_ioc(mpt, tn++, 1)); 2610 } 2611 mpt2host_iocfacts_reply(&mpt->ioc_facts); 2612 2613 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2614 mpt->ioc_facts.MsgVersion >> 8, 2615 mpt->ioc_facts.MsgVersion & 0xFF, 2616 mpt->ioc_facts.HeaderVersion >> 8, 2617 mpt->ioc_facts.HeaderVersion & 0xFF); 2618 2619 /* 2620 * Now that we know request frame size, we can calculate 2621 * the actual (reasonable) segment limit for read/write I/O. 2622 * 2623 * This limit is constrained by: 2624 * 2625 * + The size of each area we allocate per command (and how 2626 * many chain segments we can fit into it). 2627 * + The total number of areas we've set up. 2628 * + The actual chain depth the card will allow. 2629 * 2630 * The first area's segment count is limited by the I/O request 2631 * at the head of it. We cannot allocate realistically more 2632 * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2633 * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2634 * 2635 */ 2636 /* total number of request areas we (can) allocate */ 2637 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2638 2639 /* converted to the number of chain areas possible */ 2640 mpt->max_seg_cnt *= MPT_NRFM(mpt); 2641 2642 /* limited by the number of chain areas the card will support */ 2643 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) { 2644 mpt_lprt(mpt, MPT_PRT_INFO, 2645 "chain depth limited to %u (from %u)\n", 2646 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt); 2647 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth; 2648 } 2649 2650 /* converted to the number of simple sges in chain segments. */ 2651 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2652 2653 /* 2654 * Use this as the basis for reporting the maximum I/O size to CAM. 2655 */ 2656 mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1); 2657 2658 error = mpt_dma_buf_alloc(mpt); 2659 if (error != 0) { 2660 mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n"); 2661 return (EIO); 2662 } 2663 2664 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2665 request_t *req = &mpt->request_pool[val]; 2666 req->state = REQ_STATE_ALLOCATED; 2667 mpt_callout_init(mpt, &req->callout); 2668 mpt_free_request(mpt, req); 2669 } 2670 2671 mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum " 2672 "CAM Segment Count: %u\n", mpt->max_seg_cnt, 2673 mpt->max_cam_seg_cnt); 2674 2675 mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n", 2676 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber); 2677 mpt_lprt(mpt, MPT_PRT_INFO, 2678 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2679 "Request Frame Size %u bytes Max Chain Depth %u\n", 2680 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize, 2681 mpt->ioc_facts.RequestFrameSize << 2, 2682 mpt->ioc_facts.MaxChainDepth); 2683 mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, " 2684 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts, 2685 mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags); 2686 2687 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY); 2688 mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 2689 if (mpt->port_facts == NULL) { 2690 mpt_prt(mpt, "unable to allocate memory for port facts\n"); 2691 return (ENOMEM); 2692 } 2693 2694 2695 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) && 2696 (mpt->fw_uploaded == 0)) { 2697 struct mpt_map_info mi; 2698 2699 /* 2700 * In some configurations, the IOC's firmware is 2701 * stored in a shared piece of system NVRAM that 2702 * is only accessable via the BIOS. In this 2703 * case, the firmware keeps a copy of firmware in 2704 * RAM until the OS driver retrieves it. Once 2705 * retrieved, we are responsible for re-downloading 2706 * the firmware after any hard-reset. 2707 */ 2708 mpt->fw_image_size = mpt->ioc_facts.FWImageSize; 2709 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0, 2710 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 2711 mpt->fw_image_size, 1, mpt->fw_image_size, 0, 2712 &mpt->fw_dmat); 2713 if (error != 0) { 2714 mpt_prt(mpt, "cannot create firmwarew dma tag\n"); 2715 return (ENOMEM); 2716 } 2717 error = bus_dmamem_alloc(mpt->fw_dmat, 2718 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, &mpt->fw_dmap); 2719 if (error != 0) { 2720 mpt_prt(mpt, "cannot allocate firmware memory\n"); 2721 bus_dma_tag_destroy(mpt->fw_dmat); 2722 return (ENOMEM); 2723 } 2724 mi.mpt = mpt; 2725 mi.error = 0; 2726 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2727 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0); 2728 mpt->fw_phys = mi.phys; 2729 2730 error = mpt_upload_fw(mpt); 2731 if (error != 0) { 2732 mpt_prt(mpt, "firmware upload failed.\n"); 2733 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2734 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2735 mpt->fw_dmap); 2736 bus_dma_tag_destroy(mpt->fw_dmat); 2737 mpt->fw_image = NULL; 2738 return (EIO); 2739 } 2740 mpt->fw_uploaded = 1; 2741 } 2742 2743 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) { 2744 pfp = &mpt->port_facts[port]; 2745 error = mpt_get_portfacts(mpt, 0, pfp); 2746 if (error != MPT_OK) { 2747 mpt_prt(mpt, 2748 "mpt_get_portfacts on port %d failed\n", port); 2749 free(mpt->port_facts, M_DEVBUF); 2750 mpt->port_facts = NULL; 2751 return (mpt_configure_ioc(mpt, tn++, 1)); 2752 } 2753 mpt2host_portfacts_reply(pfp); 2754 2755 if (port > 0) { 2756 error = MPT_PRT_INFO; 2757 } else { 2758 error = MPT_PRT_DEBUG; 2759 } 2760 mpt_lprt(mpt, error, 2761 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n", 2762 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID, 2763 pfp->MaxDevices); 2764 2765 } 2766 2767 /* 2768 * XXX: Not yet supporting more than port 0 2769 */ 2770 pfp = &mpt->port_facts[0]; 2771 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2772 mpt->is_fc = 1; 2773 mpt->is_sas = 0; 2774 mpt->is_spi = 0; 2775 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2776 mpt->is_fc = 0; 2777 mpt->is_sas = 1; 2778 mpt->is_spi = 0; 2779 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) { 2780 mpt->is_fc = 0; 2781 mpt->is_sas = 0; 2782 mpt->is_spi = 1; 2783 if (mpt->mpt_ini_id == MPT_INI_ID_NONE) 2784 mpt->mpt_ini_id = pfp->PortSCSIID; 2785 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) { 2786 mpt_prt(mpt, "iSCSI not supported yet\n"); 2787 return (ENXIO); 2788 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) { 2789 mpt_prt(mpt, "Inactive Port\n"); 2790 return (ENXIO); 2791 } else { 2792 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType); 2793 return (ENXIO); 2794 } 2795 2796 /* 2797 * Set our role with what this port supports. 2798 * 2799 * Note this might be changed later in different modules 2800 * if this is different from what is wanted. 2801 */ 2802 mpt->role = MPT_ROLE_NONE; 2803 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2804 mpt->role |= MPT_ROLE_INITIATOR; 2805 } 2806 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2807 mpt->role |= MPT_ROLE_TARGET; 2808 } 2809 2810 /* 2811 * Enable the IOC 2812 */ 2813 if (mpt_enable_ioc(mpt, 1) != MPT_OK) { 2814 mpt_prt(mpt, "unable to initialize IOC\n"); 2815 return (ENXIO); 2816 } 2817 2818 /* 2819 * Read IOC configuration information. 2820 * 2821 * We need this to determine whether or not we have certain 2822 * settings for Integrated Mirroring (e.g.). 2823 */ 2824 mpt_read_config_info_ioc(mpt); 2825 2826 return (0); 2827} 2828 2829static int 2830mpt_enable_ioc(struct mpt_softc *mpt, int portenable) 2831{ 2832 uint32_t pptr; 2833 int val; 2834 2835 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2836 mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2837 return (EIO); 2838 } 2839 2840 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2841 2842 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2843 mpt_prt(mpt, "IOC failed to go to run state\n"); 2844 return (ENXIO); 2845 } 2846 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2847 2848 /* 2849 * Give it reply buffers 2850 * 2851 * Do *not* exceed global credits. 2852 */ 2853 for (val = 0, pptr = mpt->reply_phys; 2854 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2855 pptr += MPT_REPLY_SIZE) { 2856 mpt_free_reply(mpt, pptr); 2857 if (++val == mpt->ioc_facts.GlobalCredits - 1) 2858 break; 2859 } 2860 2861 2862 /* 2863 * Enable the port if asked. This is only done if we're resetting 2864 * the IOC after initial startup. 2865 */ 2866 if (portenable) { 2867 /* 2868 * Enable asynchronous event reporting 2869 */ 2870 mpt_send_event_request(mpt, 1); 2871 2872 if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2873 mpt_prt(mpt, "%s: failed to enable port 0\n", __func__); 2874 return (ENXIO); 2875 } 2876 } 2877 return (MPT_OK); 2878} 2879 2880/* 2881 * Endian Conversion Functions- only used on Big Endian machines 2882 */ 2883#if _BYTE_ORDER == _BIG_ENDIAN 2884void 2885mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge) 2886{ 2887 2888 MPT_2_HOST32(sge, FlagsLength); 2889 MPT_2_HOST32(sge, u.Address64.Low); 2890 MPT_2_HOST32(sge, u.Address64.High); 2891} 2892 2893void 2894mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp) 2895{ 2896 2897 MPT_2_HOST16(rp, MsgVersion); 2898 MPT_2_HOST16(rp, HeaderVersion); 2899 MPT_2_HOST32(rp, MsgContext); 2900 MPT_2_HOST16(rp, IOCExceptions); 2901 MPT_2_HOST16(rp, IOCStatus); 2902 MPT_2_HOST32(rp, IOCLogInfo); 2903 MPT_2_HOST16(rp, ReplyQueueDepth); 2904 MPT_2_HOST16(rp, RequestFrameSize); 2905 MPT_2_HOST16(rp, Reserved_0101_FWVersion); 2906 MPT_2_HOST16(rp, ProductID); 2907 MPT_2_HOST32(rp, CurrentHostMfaHighAddr); 2908 MPT_2_HOST16(rp, GlobalCredits); 2909 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr); 2910 MPT_2_HOST16(rp, CurReplyFrameSize); 2911 MPT_2_HOST32(rp, FWImageSize); 2912 MPT_2_HOST32(rp, IOCCapabilities); 2913 MPT_2_HOST32(rp, FWVersion.Word); 2914 MPT_2_HOST16(rp, HighPriorityQueueDepth); 2915 MPT_2_HOST16(rp, Reserved2); 2916 mpt2host_sge_simple_union(&rp->HostPageBufferSGE); 2917 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr); 2918} 2919 2920void 2921mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp) 2922{ 2923 2924 MPT_2_HOST16(pfp, Reserved); 2925 MPT_2_HOST16(pfp, Reserved1); 2926 MPT_2_HOST32(pfp, MsgContext); 2927 MPT_2_HOST16(pfp, Reserved2); 2928 MPT_2_HOST16(pfp, IOCStatus); 2929 MPT_2_HOST32(pfp, IOCLogInfo); 2930 MPT_2_HOST16(pfp, MaxDevices); 2931 MPT_2_HOST16(pfp, PortSCSIID); 2932 MPT_2_HOST16(pfp, ProtocolFlags); 2933 MPT_2_HOST16(pfp, MaxPostedCmdBuffers); 2934 MPT_2_HOST16(pfp, MaxPersistentIDs); 2935 MPT_2_HOST16(pfp, MaxLanBuckets); 2936 MPT_2_HOST16(pfp, Reserved4); 2937 MPT_2_HOST32(pfp, Reserved5); 2938} 2939 2940void 2941mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2) 2942{ 2943 int i; 2944 2945 MPT_2_HOST32(ioc2, CapabilitiesFlags); 2946 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) { 2947 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3); 2948 } 2949} 2950 2951void 2952mpt2host_config_page_ioc3(CONFIG_PAGE_IOC_3 *ioc3) 2953{ 2954 2955 MPT_2_HOST16(ioc3, Reserved2); 2956} 2957 2958void 2959mpt2host_config_page_scsi_port_0(CONFIG_PAGE_SCSI_PORT_0 *sp0) 2960{ 2961 2962 MPT_2_HOST32(sp0, Capabilities); 2963 MPT_2_HOST32(sp0, PhysicalInterface); 2964} 2965 2966void 2967mpt2host_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1) 2968{ 2969 2970 MPT_2_HOST32(sp1, Configuration); 2971 MPT_2_HOST32(sp1, OnBusTimerValue); 2972 MPT_2_HOST16(sp1, IDConfig); 2973} 2974 2975void 2976host2mpt_config_page_scsi_port_1(CONFIG_PAGE_SCSI_PORT_1 *sp1) 2977{ 2978 2979 HOST_2_MPT32(sp1, Configuration); 2980 HOST_2_MPT32(sp1, OnBusTimerValue); 2981 HOST_2_MPT16(sp1, IDConfig); 2982} 2983 2984void 2985mpt2host_config_page_scsi_port_2(CONFIG_PAGE_SCSI_PORT_2 *sp2) 2986{ 2987 int i; 2988 2989 MPT_2_HOST32(sp2, PortFlags); 2990 MPT_2_HOST32(sp2, PortSettings); 2991 for (i = 0; i < sizeof(sp2->DeviceSettings) / 2992 sizeof(*sp2->DeviceSettings); i++) { 2993 MPT_2_HOST16(sp2, DeviceSettings[i].DeviceFlags); 2994 } 2995} 2996 2997void 2998mpt2host_config_page_scsi_device_0(CONFIG_PAGE_SCSI_DEVICE_0 *sd0) 2999{ 3000 3001 MPT_2_HOST32(sd0, NegotiatedParameters); 3002 MPT_2_HOST32(sd0, Information); 3003} 3004 3005void 3006mpt2host_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1) 3007{ 3008 3009 MPT_2_HOST32(sd1, RequestedParameters); 3010 MPT_2_HOST32(sd1, Reserved); 3011 MPT_2_HOST32(sd1, Configuration); 3012} 3013 3014void 3015host2mpt_config_page_scsi_device_1(CONFIG_PAGE_SCSI_DEVICE_1 *sd1) 3016{ 3017 3018 HOST_2_MPT32(sd1, RequestedParameters); 3019 HOST_2_MPT32(sd1, Reserved); 3020 HOST_2_MPT32(sd1, Configuration); 3021} 3022 3023void 3024mpt2host_config_page_fc_port_0(CONFIG_PAGE_FC_PORT_0 *fp0) 3025{ 3026 3027 MPT_2_HOST32(fp0, Flags); 3028 MPT_2_HOST32(fp0, PortIdentifier); 3029 MPT_2_HOST32(fp0, WWNN.Low); 3030 MPT_2_HOST32(fp0, WWNN.High); 3031 MPT_2_HOST32(fp0, WWPN.Low); 3032 MPT_2_HOST32(fp0, WWPN.High); 3033 MPT_2_HOST32(fp0, SupportedServiceClass); 3034 MPT_2_HOST32(fp0, SupportedSpeeds); 3035 MPT_2_HOST32(fp0, CurrentSpeed); 3036 MPT_2_HOST32(fp0, MaxFrameSize); 3037 MPT_2_HOST32(fp0, FabricWWNN.Low); 3038 MPT_2_HOST32(fp0, FabricWWNN.High); 3039 MPT_2_HOST32(fp0, FabricWWPN.Low); 3040 MPT_2_HOST32(fp0, FabricWWPN.High); 3041 MPT_2_HOST32(fp0, DiscoveredPortsCount); 3042 MPT_2_HOST32(fp0, MaxInitiators); 3043} 3044 3045void 3046mpt2host_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1) 3047{ 3048 3049 MPT_2_HOST32(fp1, Flags); 3050 MPT_2_HOST32(fp1, NoSEEPROMWWNN.Low); 3051 MPT_2_HOST32(fp1, NoSEEPROMWWNN.High); 3052 MPT_2_HOST32(fp1, NoSEEPROMWWPN.Low); 3053 MPT_2_HOST32(fp1, NoSEEPROMWWPN.High); 3054} 3055 3056void 3057host2mpt_config_page_fc_port_1(CONFIG_PAGE_FC_PORT_1 *fp1) 3058{ 3059 3060 HOST_2_MPT32(fp1, Flags); 3061 HOST_2_MPT32(fp1, NoSEEPROMWWNN.Low); 3062 HOST_2_MPT32(fp1, NoSEEPROMWWNN.High); 3063 HOST_2_MPT32(fp1, NoSEEPROMWWPN.Low); 3064 HOST_2_MPT32(fp1, NoSEEPROMWWPN.High); 3065} 3066 3067void 3068mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp) 3069{ 3070 int i; 3071 3072 MPT_2_HOST16(volp, VolumeStatus.Reserved); 3073 MPT_2_HOST16(volp, VolumeSettings.Settings); 3074 MPT_2_HOST32(volp, MaxLBA); 3075 MPT_2_HOST32(volp, MaxLBAHigh); 3076 MPT_2_HOST32(volp, StripeSize); 3077 MPT_2_HOST32(volp, Reserved2); 3078 MPT_2_HOST32(volp, Reserved3); 3079 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) { 3080 MPT_2_HOST16(volp, PhysDisk[i].Reserved); 3081 } 3082} 3083 3084void 3085mpt2host_config_page_raid_phys_disk_0(CONFIG_PAGE_RAID_PHYS_DISK_0 *rpd0) 3086{ 3087 3088 MPT_2_HOST32(rpd0, Reserved1); 3089 MPT_2_HOST16(rpd0, PhysDiskStatus.Reserved); 3090 MPT_2_HOST32(rpd0, MaxLBA); 3091 MPT_2_HOST16(rpd0, ErrorData.Reserved); 3092 MPT_2_HOST16(rpd0, ErrorData.ErrorCount); 3093 MPT_2_HOST16(rpd0, ErrorData.SmartCount); 3094} 3095 3096void 3097mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi) 3098{ 3099 3100 MPT_2_HOST16(vi, TotalBlocks.High); 3101 MPT_2_HOST16(vi, TotalBlocks.Low); 3102 MPT_2_HOST16(vi, BlocksRemaining.High); 3103 MPT_2_HOST16(vi, BlocksRemaining.Low); 3104} 3105#endif 3106