Deleted Added
sdiff udiff text old ( 156796 ) new ( 157117 )
full compact
1/*-
2 * Generic routines for LSI Fusion adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 78 unchanged lines hidden (view full) ---

87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
92 */
93
94#include <sys/cdefs.h>
95__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 156796 2006-03-17 04:52:27Z mjacob $");
96
97#include <dev/mpt/mpt.h>
98#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
99#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
100
101#include <dev/mpt/mpilib/mpi.h>
102#include <dev/mpt/mpilib/mpi_ioc.h>
103
104#include <sys/sysctl.h>
105
106#define MPT_MAX_TRYS 3
107#define MPT_MAX_WAIT 300000
108
109static int maxwait_ack = 0;
110static int maxwait_int = 0;

--- 7 unchanged lines hidden (view full) ---

118static mpt_reply_handler_t mpt_handshake_reply_handler;
119static mpt_reply_handler_t mpt_event_reply_handler;
120static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
121 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
122static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
123static int mpt_soft_reset(struct mpt_softc *mpt);
124static void mpt_hard_reset(struct mpt_softc *mpt);
125static int mpt_configure_ioc(struct mpt_softc *mpt);
126static int mpt_enable_ioc(struct mpt_softc *mpt);
127
128/************************* Personality Module Support *************************/
129/*
130 * We include one extra entry that is guaranteed to be NULL
131 * to simplify our itterator.
132 */
133static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
134static __inline struct mpt_personality*

--- 10 unchanged lines hidden (view full) ---

145 while (start_at < MPT_MAX_PERSONALITIES
146 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
147 start_at++;
148 }
149 return (mpt_personalities[start_at]);
150}
151
152/*
153 * Used infrequenstly, so no need to optimize like a forward
154 * traversal where we use the MAX+1 is guaranteed to be NULL
155 * trick.
156 */
157static __inline struct mpt_personality *
158mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
159{
160 while (start_at < MPT_MAX_PERSONALITIES
161 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {

--- 12 unchanged lines hidden (view full) ---

174#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
175 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
176 pers != NULL; \
177 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
178
179static mpt_load_handler_t mpt_stdload;
180static mpt_probe_handler_t mpt_stdprobe;
181static mpt_attach_handler_t mpt_stdattach;
182static mpt_event_handler_t mpt_stdevent;
183static mpt_reset_handler_t mpt_stdreset;
184static mpt_shutdown_handler_t mpt_stdshutdown;
185static mpt_detach_handler_t mpt_stddetach;
186static mpt_unload_handler_t mpt_stdunload;
187static struct mpt_personality mpt_default_personality =
188{
189 .load = mpt_stdload,
190 .probe = mpt_stdprobe,
191 .attach = mpt_stdattach,
192 .event = mpt_stdevent,
193 .reset = mpt_stdreset,
194 .shutdown = mpt_stdshutdown,
195 .detach = mpt_stddetach,
196 .unload = mpt_stdunload
197};
198
199static mpt_load_handler_t mpt_core_load;
200static mpt_attach_handler_t mpt_core_attach;
201static mpt_reset_handler_t mpt_core_ioc_reset;
202static mpt_event_handler_t mpt_core_event;
203static mpt_shutdown_handler_t mpt_core_shutdown;
204static mpt_shutdown_handler_t mpt_core_detach;
205static mpt_unload_handler_t mpt_core_unload;
206static struct mpt_personality mpt_core_personality =
207{
208 .name = "mpt_core",
209 .load = mpt_core_load,
210 .attach = mpt_core_attach,
211 .event = mpt_core_event,
212 .reset = mpt_core_ioc_reset,
213 .shutdown = mpt_core_shutdown,
214 .detach = mpt_core_detach,
215 .unload = mpt_core_unload,
216};
217
218/*
219 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
220 * ordering information. We want the core to always register FIRST.
221 * other modules are set to SI_ORDER_SECOND.
222 */
223static moduledata_t mpt_core_mod = {
224 "mpt_core", mpt_modevent, &mpt_core_personality
225};
226DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
227MODULE_VERSION(mpt_core, 1);
228
229#define MPT_PERS_ATACHED(pers, mpt) \
230 ((mpt)->pers_mask & (0x1 << pers->id))
231
232
233int
234mpt_modevent(module_t mod, int type, void *data)
235{
236 struct mpt_personality *pers;
237 int error;
238

--- 30 unchanged lines hidden (view full) ---

269
270 error = (pers->load(pers));
271 if (error != 0)
272 mpt_personalities[i] = NULL;
273 break;
274 }
275 case MOD_SHUTDOWN:
276 break;
277 case MOD_QUIESCE:
278 break;
279 case MOD_UNLOAD:
280 error = pers->unload(pers);
281 mpt_personalities[pers->id] = NULL;
282 break;
283 default:
284 error = EINVAL;
285 break;
286 }

--- 17 unchanged lines hidden (view full) ---

304int
305mpt_stdattach(struct mpt_softc *mpt)
306{
307 /* Attach is always successfull. */
308 return (0);
309}
310
311int
312mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
313{
314 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
315 /* Event was not for us. */
316 return (0);
317}
318
319void

--- 60 unchanged lines hidden (view full) ---

380 * Fill from the front in the hope that
381 * all registered handlers consume only a
382 * single cache line.
383 *
384 * We don't break on the first empty slot so
385 * that the full table is checked to see if
386 * this handler was previously registered.
387 */
388 if (free_cbi == MPT_HANDLER_ID_NONE
389 && (mpt_reply_handlers[cbi]
390 == mpt_default_reply_handler))
391 free_cbi = cbi;
392 }
393 if (free_cbi == MPT_HANDLER_ID_NONE)
394 return (ENOMEM);
395 mpt_reply_handlers[free_cbi] = handler.reply_handler;
396 *phandler_id = MPT_CBI_TO_HID(free_cbi);
397 break;
398 }
399 default:
400 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
401 return (EINVAL);
402 }

--- 21 unchanged lines hidden (view full) ---

424 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
425 return (EINVAL);
426 }
427 return (0);
428}
429
430static int
431mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
432 MSG_DEFAULT_REPLY *reply_frame)
433{
434 mpt_prt(mpt, "XXXX Default Handler Called. Req %p, Frame %p\n",
435 req, reply_frame);
436
437 if (reply_frame != NULL)
438 mpt_dump_reply_frame(mpt, reply_frame);
439
440 mpt_prt(mpt, "XXXX Reply Frame Ignored\n");
441
442 return (/*free_reply*/TRUE);
443}
444
445static int
446mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
447 MSG_DEFAULT_REPLY *reply_frame)
448{
449 if (req != NULL) {
450
451 if (reply_frame != NULL) {
452 MSG_CONFIG *cfgp;
453 MSG_CONFIG_REPLY *reply;
454
455 cfgp = (MSG_CONFIG *)req->req_vbuf;

--- 10 unchanged lines hidden (view full) ---

466 wakeup(req);
467 }
468
469 return (/*free_reply*/TRUE);
470}
471
472static int
473mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
474 MSG_DEFAULT_REPLY *reply_frame)
475{
476 /* Nothing to be done. */
477 return (/*free_reply*/TRUE);
478}
479
480static int
481mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
482 MSG_DEFAULT_REPLY *reply_frame)
483{
484 int free_reply;
485
486 if (reply_frame == NULL) {
487 mpt_prt(mpt, "Event Handler: req %p - Unexpected NULL reply\n");
488 return (/*free_reply*/TRUE);
489 }
490
491 free_reply = TRUE;
492 switch (reply_frame->Function) {
493 case MPI_FUNCTION_EVENT_NOTIFICATION:
494 {
495 MSG_EVENT_NOTIFY_REPLY *msg;

--- 118 unchanged lines hidden (view full) ---

614 mpt_send_cmd(mpt, ack_req);
615}
616
617/***************************** Interrupt Handling *****************************/
618void
619mpt_intr(void *arg)
620{
621 struct mpt_softc *mpt;
622 uint32_t reply_desc;
623
624 mpt = (struct mpt_softc *)arg;
625 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
626 request_t *req;
627 MSG_DEFAULT_REPLY *reply_frame;
628 uint32_t reply_baddr;
629 u_int cb_index;
630 u_int req_index;
631 int free_rf;
632
633 req = NULL;
634 reply_frame = NULL;
635 reply_baddr = 0;
636 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
637 u_int offset;
638
639 /*
640 * Insure that the reply frame is coherent.
641 */
642 reply_baddr = (reply_desc << 1);
643 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
644 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap,
645 offset, MPT_REPLY_SIZE,
646 BUS_DMASYNC_POSTREAD);
647 reply_frame = MPT_REPLY_OTOV(mpt, offset);
648 reply_desc = le32toh(reply_frame->MsgContext);
649 }
650 cb_index = MPT_CONTEXT_TO_CBI(reply_desc);
651 req_index = MPT_CONTEXT_TO_REQI(reply_desc);
652 if (req_index < MPT_MAX_REQUESTS(mpt))
653 req = &mpt->request_pool[req_index];
654
655 free_rf = mpt_reply_handlers[cb_index](mpt, req, reply_frame);
656
657 if (reply_frame != NULL && free_rf)
658 mpt_free_reply(mpt, reply_baddr);
659 }
660}
661
662/******************************* Error Recovery *******************************/
663void
664mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
665 u_int iocstatus)
666{

--- 6 unchanged lines hidden (view full) ---

673 while((req = TAILQ_FIRST(chain)) != NULL) {
674 MSG_REQUEST_HEADER *msg_hdr;
675 u_int cb_index;
676 TAILQ_REMOVE(chain, req, links);
677 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
678 ioc_status_frame.Function = msg_hdr->Function;
679 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
680 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
681 mpt_reply_handlers[cb_index](mpt, req, &ioc_status_frame);
682 }
683}
684
685/********************************* Diagnostics ********************************/
686/*
687 * Perform a diagnostic dump of a reply frame.
688 */
689void

--- 23 unchanged lines hidden (view full) ---

713/* Busy wait for a door bell to be read by IOC */
714static int
715mpt_wait_db_ack(struct mpt_softc *mpt)
716{
717 int i;
718 for (i=0; i < MPT_MAX_WAIT; i++) {
719 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
720 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
721 return MPT_OK;
722 }
723
724 DELAY(1000);
725 }
726 return MPT_FAIL;
727}
728
729/* Busy wait for a door bell interrupt */
730static int
731mpt_wait_db_int(struct mpt_softc *mpt)
732{
733 int i;
734 for (i=0; i < MPT_MAX_WAIT; i++) {
735 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
736 maxwait_int = i > maxwait_int ? i : maxwait_int;
737 return MPT_OK;
738 }
739 DELAY(100);
740 }
741 return MPT_FAIL;
742}
743
744/* Wait for IOC to transition to a give state */
745void
746mpt_check_doorbell(struct mpt_softc *mpt)
747{
748 uint32_t db = mpt_rd_db(mpt);
749 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {

--- 27 unchanged lines hidden (view full) ---

777static int
778mpt_soft_reset(struct mpt_softc *mpt)
779{
780 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
781
782 /* Have to use hard reset if we are not in Running state */
783 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
784 mpt_prt(mpt, "soft reset failed: device not running\n");
785 return MPT_FAIL;
786 }
787
788 /* If door bell is in use we don't have a chance of getting
789 * a word in since the IOC probably crashed in message
790 * processing. So don't waste our time.
791 */
792 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
793 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
794 return MPT_FAIL;
795 }
796
797 /* Send the reset request to the IOC */
798 mpt_write(mpt, MPT_OFFSET_DOORBELL,
799 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
800 if (mpt_wait_db_ack(mpt) != MPT_OK) {
801 mpt_prt(mpt, "soft reset failed: ack timeout\n");
802 return MPT_FAIL;
803 }
804
805 /* Wait for the IOC to reload and come out of reset state */
806 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
807 mpt_prt(mpt, "soft reset failed: device did not restart\n");
808 return MPT_FAIL;
809 }
810
811 return MPT_OK;
812}
813
814static int
815mpt_enable_diag_mode(struct mpt_softc *mpt)
816{

--- 157 unchanged lines hidden (view full) ---

974 * the specified wait condition, it should stop its wait.
975 */
976 mpt->reset_cnt++;
977 MPT_PERS_FOREACH(mpt, pers)
978 pers->reset(mpt, ret);
979 }
980
981 if (reinit != 0) {
982 ret = mpt_enable_ioc(mpt);
983 if (ret == MPT_OK) {
984 mpt_enable_ints(mpt);
985 }
986 }
987 if (ret != MPT_OK && retry_cnt++ < 2) {
988 goto again;
989 }
990 return ret;

--- 14 unchanged lines hidden (view full) ---

1005 if ((nxt = req->chain) != NULL) {
1006 req->chain = NULL;
1007 mpt_free_request(mpt, nxt); /* NB: recursion */
1008 }
1009 req->serno = 0;
1010 req->ccb = NULL;
1011 req->state = REQ_STATE_FREE;
1012 if (LIST_EMPTY(&mpt->ack_frames)) {
1013 TAILQ_INSERT_HEAD(&mpt->request_free_list, req, links);
1014 if (mpt->getreqwaiter != 0) {
1015 mpt->getreqwaiter = 0;
1016 wakeup(&mpt->request_free_list);
1017 }
1018 return;
1019 }
1020
1021 /*

--- 16 unchanged lines hidden (view full) ---

1038retry:
1039 req = TAILQ_FIRST(&mpt->request_free_list);
1040 if (req != NULL) {
1041 KASSERT(req == &mpt->request_pool[req->index],
1042 ("mpt_get_request: corrupted request free list\n"));
1043 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1044 req->state = REQ_STATE_ALLOCATED;
1045 req->chain = NULL;
1046 if ((req->serno = ++(mpt->cmd_serno)) == 0) {
1047 req->serno = ++(mpt->cmd_serno);
1048 }
1049 } else if (sleep_ok != 0) {
1050 mpt->getreqwaiter = 1;
1051 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1052 goto retry;
1053 }
1054 return req;
1055}
1056
1057/* Pass the command to the IOC */
1058void
1059mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1060{
1061 uint32_t *pReq;
1062
1063 pReq = req->req_vbuf;
1064 if (mpt->verbose > MPT_PRT_TRACE) {
1065 int offset;
1066 mpt_prt(mpt, "Send Request %d (0x%x):",
1067 req->index, req->req_pbuf);
1068 for (offset = 0; offset < mpt->request_frame_size; offset++) {
1069 if ((offset & 0x7) == 0) {
1070 mpt_prtc(mpt, "\n");
1071 mpt_prt(mpt, " ");
1072 }
1073 mpt_prtc(mpt, " %08x", pReq[offset]);
1074 }
1075 mpt_prtc(mpt, "\n");

--- 27 unchanged lines hidden (view full) ---

1103 int timeout;
1104 u_int saved_cnt;
1105
1106 /*
1107 * timeout is in ms. 0 indicates infinite wait.
1108 * Convert to ticks or 500us units depending on
1109 * our sleep mode.
1110 */
1111 if (sleep_ok != 0)
1112 timeout = (time_ms * hz) / 1000;
1113 else
1114 timeout = time_ms * 2;
1115 req->state |= REQ_STATE_NEED_WAKEUP;
1116 mask &= ~REQ_STATE_NEED_WAKEUP;
1117 saved_cnt = mpt->reset_cnt;
1118 while ((req->state & mask) != state
1119 && mpt->reset_cnt == saved_cnt) {
1120
1121 if (sleep_ok != 0) {
1122 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1123 if (error == EWOULDBLOCK) {
1124 timeout = 0;
1125 break;
1126 }
1127 } else {
1128 if (time_ms != 0 && --timeout == 0) {
1129 mpt_prt(mpt, "mpt_wait_req timed out\n");
1130 break;
1131 }
1132 DELAY(500);
1133 mpt_intr(mpt);
1134 }
1135 }
1136 req->state &= ~REQ_STATE_NEED_WAKEUP;
1137 if (mpt->reset_cnt != saved_cnt)
1138 return (EIO);
1139 if (time_ms && timeout <= 0)
1140 return (ETIMEDOUT);
1141 return (0);
1142}
1143
1144/*
1145 * Send a command to the IOC via the handshake register.
1146 *
1147 * Only done at initialization time and for certain unusual
1148 * commands such as device/bus reset as specified by LSI.

--- 282 unchanged lines hidden (view full) ---

1431 req->IOCStatus);
1432 error = EIO;
1433 break;
1434 }
1435 mpt_free_request(mpt, req);
1436 return (error);
1437}
1438
1439#define CFG_DATA_OFF 128
1440
1441int
1442mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1443 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1444 int timeout_ms)
1445{
1446 request_t *req;
1447 int error;
1448
1449 req = mpt_get_request(mpt, sleep_ok);
1450 if (req == NULL) {
1451 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1452 return (-1);
1453 }
1454
1455 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1456 hdr->PageLength, hdr->PageNumber,
1457 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1458 PageAddress, req->req_pbuf + CFG_DATA_OFF,
1459 len, sleep_ok, timeout_ms);
1460 if (error != 0) {
1461 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1462 return (-1);
1463 }
1464
1465 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1466 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1467 req->IOCStatus);
1468 mpt_free_request(mpt, req);
1469 return (-1);
1470 }
1471 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1472 BUS_DMASYNC_POSTREAD);
1473 memcpy(hdr, ((uint8_t *)req->req_vbuf)+CFG_DATA_OFF, len);
1474 mpt_free_request(mpt, req);
1475 return (0);
1476}
1477
1478int
1479mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1480 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1481 int timeout_ms)

--- 10 unchanged lines hidden (view full) ---

1492 return (-1);
1493 }
1494 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK,
1495
1496 req = mpt_get_request(mpt, sleep_ok);
1497 if (req == NULL)
1498 return (-1);
1499
1500 memcpy(((caddr_t)req->req_vbuf)+CFG_DATA_OFF, hdr, len);
1501 /* Restore stripped out attributes */
1502 hdr->PageType |= hdr_attr;
1503
1504 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1505 hdr->PageLength, hdr->PageNumber,
1506 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1507 PageAddress, req->req_pbuf + CFG_DATA_OFF,
1508 len, sleep_ok, timeout_ms);
1509 if (error != 0) {
1510 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1511 return (-1);
1512 }
1513
1514 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1515 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",

--- 153 unchanged lines hidden (view full) ---

1669 }
1670
1671 mpt_raid_wakeup(mpt);
1672
1673 return (0);
1674}
1675
1676/*
1677 * Read SCSI configuration information
1678 */
1679static int
1680mpt_read_config_info_spi(struct mpt_softc *mpt)
1681{
1682 int rv, i;
1683
1684 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0,
1685 0, &mpt->mpt_port_page0.Header,
1686 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1687 if (rv)
1688 return (-1);
1689 mpt_lprt(mpt, MPT_PRT_DEBUG,
1690 "SPI Port Page 0 Header: %x %x %x %x\n",
1691 mpt->mpt_port_page0.Header.PageVersion,
1692 mpt->mpt_port_page0.Header.PageLength,
1693 mpt->mpt_port_page0.Header.PageNumber,
1694 mpt->mpt_port_page0.Header.PageType);
1695
1696 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1,
1697 0, &mpt->mpt_port_page1.Header,
1698 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1699 if (rv)
1700 return (-1);
1701
1702 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
1703 mpt->mpt_port_page1.Header.PageVersion,
1704 mpt->mpt_port_page1.Header.PageLength,
1705 mpt->mpt_port_page1.Header.PageNumber,
1706 mpt->mpt_port_page1.Header.PageType);
1707
1708 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2,
1709 /*PageAddress*/0, &mpt->mpt_port_page2.Header,
1710 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1711 if (rv)
1712 return (-1);
1713
1714 mpt_lprt(mpt, MPT_PRT_DEBUG,
1715 "SPI Port Page 2 Header: %x %x %x %x\n",
1716 mpt->mpt_port_page1.Header.PageVersion,
1717 mpt->mpt_port_page1.Header.PageLength,
1718 mpt->mpt_port_page1.Header.PageNumber,
1719 mpt->mpt_port_page1.Header.PageType);
1720
1721 for (i = 0; i < 16; i++) {
1722 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
1723 0, i, &mpt->mpt_dev_page0[i].Header,
1724 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1725 if (rv)
1726 return (-1);
1727
1728 mpt_lprt(mpt, MPT_PRT_DEBUG,
1729 "SPI Target %d Device Page 0 Header: %x %x %x %x\n",
1730 i, mpt->mpt_dev_page0[i].Header.PageVersion,
1731 mpt->mpt_dev_page0[i].Header.PageLength,
1732 mpt->mpt_dev_page0[i].Header.PageNumber,
1733 mpt->mpt_dev_page0[i].Header.PageType);
1734
1735 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
1736 1, i, &mpt->mpt_dev_page1[i].Header,
1737 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1738 if (rv)
1739 return (-1);
1740
1741 mpt_lprt(mpt, MPT_PRT_DEBUG,
1742 "SPI Target %d Device Page 1 Header: %x %x %x %x\n",
1743 i, mpt->mpt_dev_page1[i].Header.PageVersion,
1744 mpt->mpt_dev_page1[i].Header.PageLength,
1745 mpt->mpt_dev_page1[i].Header.PageNumber,
1746 mpt->mpt_dev_page1[i].Header.PageType);
1747 }
1748
1749 /*
1750 * At this point, we don't *have* to fail. As long as we have
1751 * valid config header information, we can (barely) lurch
1752 * along.
1753 */
1754
1755 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1756 &mpt->mpt_port_page0.Header,
1757 sizeof(mpt->mpt_port_page0),
1758 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1759 if (rv) {
1760 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
1761 } else {
1762 mpt_lprt(mpt, MPT_PRT_DEBUG,
1763 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
1764 mpt->mpt_port_page0.Capabilities,
1765 mpt->mpt_port_page0.PhysicalInterface);
1766 }
1767
1768 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1769 &mpt->mpt_port_page1.Header,
1770 sizeof(mpt->mpt_port_page1),
1771 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1772 if (rv) {
1773 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
1774 } else {
1775 mpt_lprt(mpt, MPT_PRT_DEBUG,
1776 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
1777 mpt->mpt_port_page1.Configuration,
1778 mpt->mpt_port_page1.OnBusTimerValue);
1779 }
1780
1781 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1782 &mpt->mpt_port_page2.Header,
1783 sizeof(mpt->mpt_port_page2),
1784 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1785 if (rv) {
1786 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1787 } else {
1788 mpt_lprt(mpt, MPT_PRT_DEBUG,
1789 "SPI Port Page 2: Flags %x Settings %x\n",
1790 mpt->mpt_port_page2.PortFlags,
1791 mpt->mpt_port_page2.PortSettings);
1792 for (i = 0; i < 16; i++) {
1793 mpt_lprt(mpt, MPT_PRT_DEBUG,
1794 "SPI Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1795 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1796 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1797 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1798 }
1799 }
1800
1801 for (i = 0; i < 16; i++) {
1802 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1803 &mpt->mpt_dev_page0[i].Header,
1804 sizeof(*mpt->mpt_dev_page0),
1805 /*sleep_ok*/FALSE,
1806 /*timeout_ms*/5000);
1807 if (rv) {
1808 mpt_prt(mpt,
1809 "cannot read SPI Tgt %d Device Page 0\n", i);
1810 continue;
1811 }
1812 mpt_lprt(mpt, MPT_PRT_DEBUG,
1813 "SPI Tgt %d Page 0: NParms %x Information %x",
1814 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1815 mpt->mpt_dev_page0[i].Information);
1816
1817 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1818 &mpt->mpt_dev_page1[i].Header,
1819 sizeof(*mpt->mpt_dev_page1),
1820 /*sleep_ok*/FALSE,
1821 /*timeout_ms*/5000);
1822 if (rv) {
1823 mpt_prt(mpt,
1824 "cannot read SPI Tgt %d Device Page 1\n", i);
1825 continue;
1826 }
1827 mpt_lprt(mpt, MPT_PRT_DEBUG,
1828 "SPI Tgt %d Page 1: RParms %x Configuration %x\n",
1829 i, mpt->mpt_dev_page1[i].RequestedParameters,
1830 mpt->mpt_dev_page1[i].Configuration);
1831 }
1832 return (0);
1833}
1834
1835/*
1836 * Validate SPI configuration information.
1837 *
1838 * In particular, validate SPI Port Page 1.
1839 */
1840static int
1841mpt_set_initial_config_spi(struct mpt_softc *mpt)
1842{
1843 int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
1844 int error;
1845
1846 mpt->mpt_disc_enable = 0xff;
1847 mpt->mpt_tag_enable = 0;
1848
1849 if (mpt->mpt_port_page1.Configuration != pp1val) {
1850 CONFIG_PAGE_SCSI_PORT_1 tmp;
1851
1852 mpt_prt(mpt,
1853 "SPI Port Page 1 Config value bad (%x)- should be %x\n",
1854 mpt->mpt_port_page1.Configuration, pp1val);
1855 tmp = mpt->mpt_port_page1;
1856 tmp.Configuration = pp1val;
1857 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/0,
1858 &tmp.Header, sizeof(tmp),
1859 /*sleep_ok*/FALSE,
1860 /*timeout_ms*/5000);
1861 if (error)
1862 return (-1);
1863 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1864 &tmp.Header, sizeof(tmp),
1865 /*sleep_ok*/FALSE,
1866 /*timeout_ms*/5000);
1867 if (error)
1868 return (-1);
1869 if (tmp.Configuration != pp1val) {
1870 mpt_prt(mpt,
1871 "failed to reset SPI Port Page 1 Config value\n");
1872 return (-1);
1873 }
1874 mpt->mpt_port_page1 = tmp;
1875 }
1876
1877 for (i = 0; i < 16; i++) {
1878 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
1879 tmp = mpt->mpt_dev_page1[i];
1880 tmp.RequestedParameters = 0;
1881 tmp.Configuration = 0;
1882 mpt_lprt(mpt, MPT_PRT_DEBUG,
1883 "Set Tgt %d SPI DevicePage 1 values to %x 0 %x\n",
1884 i, tmp.RequestedParameters, tmp.Configuration);
1885 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/i,
1886 &tmp.Header, sizeof(tmp),
1887 /*sleep_ok*/FALSE,
1888 /*timeout_ms*/5000);
1889 if (error)
1890 return (-1);
1891 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1892 &tmp.Header, sizeof(tmp),
1893 /*sleep_ok*/FALSE,
1894 /*timeout_ms*/5000);
1895 if (error)
1896 return (-1);
1897 mpt->mpt_dev_page1[i] = tmp;
1898 mpt_lprt(mpt, MPT_PRT_DEBUG,
1899 "SPI Tgt %d Page 1: RParm %x Configuration %x\n", i,
1900 mpt->mpt_dev_page1[i].RequestedParameters,
1901 mpt->mpt_dev_page1[i].Configuration);
1902 }
1903 return (0);
1904}
1905
1906/*
1907 * Enable IOC port
1908 */
1909static int
1910mpt_send_port_enable(struct mpt_softc *mpt, int port)
1911{
1912 request_t *req;
1913 MSG_PORT_ENABLE *enable_req;
1914 int error;
1915
1916 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1917 if (req == NULL)
1918 return (-1);
1919
1920 enable_req = req->req_vbuf;
1921 bzero(enable_req, sizeof *enable_req);
1922
1923 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
1924 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1925 enable_req->PortNumber = port;
1926
1927 mpt_check_doorbell(mpt);
1928 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1929
1930 mpt_send_cmd(mpt, req);
1931 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1932 /*sleep_ok*/FALSE,
1933 /*time_ms*/(mpt->is_sas || mpt->is_fc)? 30000 : 3000);
1934 if (error != 0) {
1935 mpt_prt(mpt, "port enable timed out\n");
1936 return (-1);
1937 }
1938 mpt_free_request(mpt, req);
1939 return (0);
1940}
1941
1942/*
1943 * Enable/Disable asynchronous event reporting.
1944 *
1945 * NB: this is the first command we send via shared memory
1946 * instead of the handshake register.

--- 40 unchanged lines hidden (view full) ---

1987 /* Mask all interrupts */
1988 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1989 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1990}
1991
1992static void
1993mpt_sysctl_attach(struct mpt_softc *mpt)
1994{
1995 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1996 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1997
1998 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1999 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2000 "Debugging/Verbose level");
2001}
2002
2003int
2004mpt_attach(struct mpt_softc *mpt)
2005{
2006 int i;
2007
2008 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2009 struct mpt_personality *pers;
2010 int error;
2011
2012 pers = mpt_personalities[i];
2013 if (pers == NULL)
2014 continue;
2015
2016 if (pers->probe(mpt) == 0) {
2017 error = pers->attach(mpt);
2018 if (error != 0) {
2019 mpt_detach(mpt);
2020 return (error);
2021 }
2022 mpt->mpt_pers_mask |= (0x1 << pers->id);
2023 pers->use_count++;
2024 }
2025 }
2026
2027 return (0);
2028}
2029
2030int
2031mpt_shutdown(struct mpt_softc *mpt)
2032{
2033 struct mpt_personality *pers;
2034
2035 MPT_PERS_FOREACH_REVERSE(mpt, pers)
2036 pers->shutdown(mpt);
2037
2038 mpt_reset(mpt, /*reinit*/FALSE);
2039 return (0);
2040}
2041
2042int
2043mpt_detach(struct mpt_softc *mpt)
2044{
2045 struct mpt_personality *pers;
2046

--- 10 unchanged lines hidden (view full) ---

2057mpt_core_load(struct mpt_personality *pers)
2058{
2059 int i;
2060
2061 /*
2062 * Setup core handlers and insert the default handler
2063 * into all "empty slots".
2064 */
2065 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++)
2066 mpt_reply_handlers[i] = mpt_default_reply_handler;
2067
2068 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2069 mpt_event_reply_handler;
2070 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2071 mpt_config_reply_handler;
2072 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2073 mpt_handshake_reply_handler;
2074
2075 return (0);
2076}
2077
2078/*
2079 * Initialize per-instance driver data and perform
2080 * initial controller configuration.
2081 */
2082int
2083mpt_core_attach(struct mpt_softc *mpt)
2084{
2085 int val;
2086 int error;
2087
2088 LIST_INIT(&mpt->ack_frames);
2089
2090 /* Put all request buffers on the free list */
2091 TAILQ_INIT(&mpt->request_pending_list);
2092 TAILQ_INIT(&mpt->request_free_list);
2093 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++)
2094 mpt_free_request(mpt, &mpt->request_pool[val]);
2095
2096 mpt_sysctl_attach(mpt);
2097
2098 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2099 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2100
2101 error = mpt_configure_ioc(mpt);
2102
2103 return (error);
2104}
2105
2106void
2107mpt_core_shutdown(struct mpt_softc *mpt)
2108{
2109}
2110
2111void
2112mpt_core_detach(struct mpt_softc *mpt)
2113{
2114}
2115
2116int
2117mpt_core_unload(struct mpt_personality *pers)
2118{
2119 /* Unload is always successfull. */
2120 return (0);
2121}

--- 135 unchanged lines hidden (view full) ---

2257 * No need to reset if the IOC is already in the READY state.
2258 *
2259 * Force reset if initialization failed previously.
2260 * Note that a hard_reset of the second channel of a '929
2261 * will stop operation of the first channel. Hopefully, if the
2262 * first channel is ok, the second will not require a hard
2263 * reset.
2264 */
2265 if (needreset || (mpt_rd_db(mpt) & MPT_DB_STATE_MASK) !=
2266 MPT_DB_STATE_READY) {
2267 if (mpt_reset(mpt, /*reinit*/FALSE) != MPT_OK)
2268 continue;
2269 }
2270 needreset = 0;
2271
2272 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) {
2273 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2274 needreset = 1;
2275 continue;
2276 }

--- 126 unchanged lines hidden (view full) ---

2403 mpt->mpt_proto_flags = pfp.ProtocolFlags;
2404 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI &&
2405 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS &&
2406 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) {
2407 mpt_prt(mpt, "Unsupported Port Type (%x)\n",
2408 pfp.PortType);
2409 return (ENXIO);
2410 }
2411 if (!(pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) {
2412 mpt_prt(mpt, "initiator role unsupported\n");
2413 return (ENXIO);
2414 }
2415 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2416 mpt->is_fc = 1;
2417 mpt->is_sas = 0;
2418 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2419 mpt->is_fc = 0;
2420 mpt->is_sas = 1;
2421 } else {
2422 mpt->is_fc = 0;
2423 mpt->is_sas = 0;
2424 }
2425 mpt->mpt_ini_id = pfp.PortSCSIID;
2426 mpt->mpt_max_devices = pfp.MaxDevices;
2427
2428 if (mpt_enable_ioc(mpt) != 0) {
2429 mpt_prt(mpt, "Unable to initialize IOC\n");
2430 return (ENXIO);
2431 }
2432
2433 /*
2434 * Read and set up initial configuration information
2435 * (IOC and SPI only for now)
2436 *
2437 * XXX Should figure out what "personalities" are
2438 * available and defer all initialization junk to
2439 * them.
2440 */
2441 mpt_read_config_info_ioc(mpt);
2442
2443 if (mpt->is_fc == 0 && mpt->is_sas == 0) {
2444 if (mpt_read_config_info_spi(mpt)) {
2445 return (EIO);
2446 }
2447 if (mpt_set_initial_config_spi(mpt)) {
2448 return (EIO);
2449 }
2450 }
2451
2452 /* Everything worked */
2453 break;
2454 }
2455
2456 if (try >= MPT_MAX_TRYS) {
2457 mpt_prt(mpt, "failed to initialize IOC");
2458 return (EIO);
2459 }
2460
2461 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling interrupts\n");
2462
2463 mpt_enable_ints(mpt);
2464 return (0);
2465}
2466
2467static int
2468mpt_enable_ioc(struct mpt_softc *mpt)
2469{
2470 uint32_t pptr;
2471 int val;
2472
2473 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2474 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2475 return (EIO);
2476 }

--- 14 unchanged lines hidden (view full) ---

2491 for (val = 0, pptr = mpt->reply_phys;
2492 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2493 pptr += MPT_REPLY_SIZE) {
2494 mpt_free_reply(mpt, pptr);
2495 if (++val == mpt->mpt_global_credits - 1)
2496 break;
2497 }
2498
2499 /*
2500 * Enable asynchronous event reporting
2501 */
2502 mpt_send_event_request(mpt, 1);
2503
2504 /*
2505 * Enable the port
2506 */
2507 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2508 mpt_prt(mpt, "failed to enable port 0\n");
2509 return (ENXIO);
2510 }
2511 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port 0\n");
2512
2513
2514 return (MPT_OK);
2515}