Lines Matching defs:mpt

99 __FBSDID("$FreeBSD: releng/11.0/sys/dev/mpt/mpt.c 298955 2016-05-03 03:41:25Z pfg $");
101 #include <dev/mpt/mpt.h>
102 #include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
103 #include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
105 #include <dev/mpt/mpilib/mpi.h>
106 #include <dev/mpt/mpilib/mpi_ioc.h>
107 #include <dev/mpt/mpilib/mpi_fc.h>
108 #include <dev/mpt/mpilib/mpi_targ.h>
126 static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
128 static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
129 static int mpt_soft_reset(struct mpt_softc *mpt);
130 static void mpt_hard_reset(struct mpt_softc *mpt);
131 static int mpt_dma_buf_alloc(struct mpt_softc *mpt);
132 static void mpt_dma_buf_free(struct mpt_softc *mpt);
133 static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
134 static int mpt_enable_ioc(struct mpt_softc *mpt, int);
148 mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
154 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
166 mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
169 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
177 #define MPT_PERS_FOREACH(mpt, pers) \
178 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
180 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
182 #define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
183 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
185 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
243 #define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
311 mpt_stdprobe(struct mpt_softc *mpt)
319 mpt_stdattach(struct mpt_softc *mpt)
327 mpt_stdenable(struct mpt_softc *mpt)
335 mpt_stdready(struct mpt_softc *mpt)
341 mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
344 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
350 mpt_stdreset(struct mpt_softc *mpt, int type)
356 mpt_stdshutdown(struct mpt_softc *mpt)
362 mpt_stddetach(struct mpt_softc *mpt)
383 struct mpt_softc *mpt;
386 TAILQ_FOREACH(mpt, &mpt_tailq, links) {
387 MPT_PERS_FOREACH(mpt, pers)
388 pers->ready(mpt);
406 mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
453 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
460 mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
477 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
484 mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
488 mpt_prt(mpt,
493 mpt_dump_reply_frame(mpt, reply_frame);
495 mpt_prt(mpt, "Reply Frame Ignored\n");
501 mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
520 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
527 mpt_free_request(mpt, req);
535 mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
544 mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
566 MPT_PERS_FOREACH(mpt, pers)
567 handled += pers->event(mpt, req, msg);
569 if (handled == 0 && mpt->mpt_pers_mask == 0) {
570 mpt_lprt(mpt, MPT_PRT_INFO,
575 mpt_lprt(mpt,
587 ack_req = mpt_get_request(mpt, FALSE);
593 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
597 mpt_send_event_ack(mpt, ack_req, msg, context);
606 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
611 mpt_prt(mpt, "unknown event function: %x\n",
629 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
630 mpt_free_request(mpt, req);
631 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
636 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
640 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
645 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
648 mpt_prtc(mpt, "\n");
658 mpt_core_event(struct mpt_softc *mpt, request_t *req,
662 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
672 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
674 mpt_prt(mpt, "\tEvtLogData: Event Data:");
676 mpt_prtc(mpt, " %08x", msg->Data[i]);
677 mpt_prtc(mpt, "\n");
696 mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
707 mpt_check_doorbell(mpt);
708 mpt_send_cmd(mpt, ack_req);
715 struct mpt_softc *mpt;
719 mpt = (struct mpt_softc *)arg;
720 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
721 MPT_LOCK_ASSERT(mpt);
723 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
742 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
743 bus_dmamap_sync_range(mpt->reply_dmat,
744 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
746 reply_frame = MPT_REPLY_OTOV(mpt, offset);
753 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
762 if (mpt->tgt_cmd_ptrs == NULL) {
763 mpt_prt(mpt,
768 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
769 mpt_prt(mpt,
776 req = mpt->tgt_cmd_ptrs[ctxt_idx];
778 mpt_prt(mpt, "no request backpointer "
791 req->index | mpt->scsi_tgt_handler_id;
795 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
800 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
814 if (req_index < MPT_MAX_REQUESTS(mpt)) {
815 req = &mpt->request_pool[req_index];
817 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
821 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
823 free_rf = mpt_reply_handlers[cb_index](mpt, req,
827 bus_dmamap_sync_range(mpt->reply_dmat,
828 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
830 mpt_free_reply(mpt, reply_baddr);
836 if (mpt->disabled) {
837 mpt_disable_ints(mpt);
844 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
849 mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
862 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
868 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
870 if (mpt_req_on_pending_list(mpt, req) != 0)
880 mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
883 mpt_prt(mpt, "Address Reply:\n");
888 static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
889 static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
892 mpt_rd_db(struct mpt_softc *mpt)
895 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
899 mpt_rd_intr(struct mpt_softc *mpt)
902 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
907 mpt_wait_db_ack(struct mpt_softc *mpt)
912 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
923 mpt_wait_db_int(struct mpt_softc *mpt)
928 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
939 mpt_check_doorbell(struct mpt_softc *mpt)
941 uint32_t db = mpt_rd_db(mpt);
944 mpt_prt(mpt, "Device not running\n");
951 mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
956 uint32_t db = mpt_rd_db(mpt);
968 static int mpt_download_fw(struct mpt_softc *mpt);
972 mpt_soft_reset(struct mpt_softc *mpt)
975 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
978 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
979 mpt_prt(mpt, "soft reset failed: device not running\n");
987 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
988 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
993 mpt_write(mpt, MPT_OFFSET_DOORBELL,
995 if (mpt_wait_db_ack(mpt) != MPT_OK) {
996 mpt_prt(mpt, "soft reset failed: ack timeout\n");
1001 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
1002 mpt_prt(mpt, "soft reset failed: device did not restart\n");
1010 mpt_enable_diag_mode(struct mpt_softc *mpt)
1017 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
1021 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
1022 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
1023 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
1024 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
1025 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
1026 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
1036 mpt_disable_diag_mode(struct mpt_softc *mpt)
1039 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1046 mpt_hard_reset(struct mpt_softc *mpt)
1052 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1054 if (mpt->is_1078) {
1055 mpt_write(mpt, MPT_OFFSET_RESET_1078, 0x07);
1060 error = mpt_enable_diag_mode(mpt);
1062 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1063 mpt_prt(mpt, "Trying to reset anyway.\n");
1066 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1072 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1076 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1087 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1091 mpt_prt(mpt, "WARNING - Failed hard reset! "
1099 if (mpt->fw_image != NULL) {
1101 error = mpt_download_fw(mpt);
1104 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1105 mpt_prt(mpt, "Trying to initialize anyway.\n");
1114 mpt_disable_diag_mode(mpt);
1118 mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1125 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1136 mpt_reset(struct mpt_softc *mpt, int reinit)
1146 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1150 mpt_hard_reset(mpt);
1156 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1163 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1167 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1178 mpt->reset_cnt++;
1179 MPT_PERS_FOREACH(mpt, pers)
1180 pers->reset(mpt, ret);
1184 ret = mpt_enable_ioc(mpt, 1);
1186 mpt_enable_ints(mpt);
1197 mpt_free_request(struct mpt_softc *mpt, request_t *req)
1203 if (req == NULL || req != &mpt->request_pool[req->index]) {
1208 mpt_free_request(mpt, nxt); /* NB: recursion */
1212 MPT_LOCK_ASSERT(mpt);
1213 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1216 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1220 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1224 if (LIST_EMPTY(&mpt->ack_frames)) {
1233 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1234 if (mpt->getreqwaiter != 0) {
1235 mpt->getreqwaiter = 0;
1236 wakeup(&mpt->request_free_list);
1244 record = LIST_FIRST(&mpt->ack_frames);
1247 mpt_assign_serno(mpt, req);
1248 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1249 offset = (uint32_t)((uint8_t *)record - mpt->reply);
1250 reply_baddr = offset + (mpt->reply_phys & 0xFFFFFFFF);
1251 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, offset,
1253 mpt_free_reply(mpt, reply_baddr);
1258 mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1263 MPT_LOCK_ASSERT(mpt);
1264 req = TAILQ_FIRST(&mpt->request_free_list);
1266 KASSERT(req == &mpt->request_pool[req->index],
1272 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1275 mpt_assign_serno(mpt, req);
1277 mpt->getreqwaiter = 1;
1278 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1286 mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1289 if (mpt->verbose > MPT_PRT_DEBUG2) {
1290 mpt_dump_request(mpt, req);
1292 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1295 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1298 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1301 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1302 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1309 * mpt softc of controller executing request
1319 mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1342 saved_cnt = mpt->reset_cnt;
1343 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1345 if (mpt_sleep(mpt, req, PUSER, "mptreq", sbt) ==
1355 mpt_intr(mpt);
1359 if (mpt->reset_cnt != saved_cnt) {
1365 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1378 mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1384 data = mpt_rd_db(mpt);
1389 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1399 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1400 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1408 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1411 if (mpt_wait_db_int(mpt) != MPT_OK) {
1412 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1417 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1419 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1420 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1426 mpt_write_stream(mpt, MPT_OFFSET_DOORBELL, *data32++);
1427 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1428 mpt_prt(mpt,
1438 mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1452 if (mpt_wait_db_int(mpt) != MPT_OK) {
1453 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1456 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1458 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1461 if (mpt_wait_db_int(mpt) != MPT_OK) {
1462 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1465 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1467 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1475 mpt_prt(mpt, "reply length does not match message length: "
1484 if (mpt_wait_db_int(mpt) != MPT_OK) {
1485 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1488 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1491 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1495 if (mpt_wait_db_int(mpt) != MPT_OK) {
1496 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1499 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1502 if (mpt->verbose >= MPT_PRT_TRACE)
1511 mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1519 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1523 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1528 mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1537 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1541 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1552 mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1569 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1573 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1582 mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, cfgparms_t *params,
1617 mpt_check_doorbell(mpt);
1618 mpt_send_cmd(mpt, req);
1619 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1624 mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion, int PageNumber,
1634 req = mpt_get_request(mpt, sleep_ok);
1636 mpt_prt(mpt, "mpt_extread_cfg_header: Get request failed!\n");
1648 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1657 mpt_prt(mpt, "read_extcfg_header timed out\n");
1672 mpt_lprt(mpt, MPT_PRT_DEBUG,
1678 mpt_prt(mpt, "mpt_read_extcfg_header: Config Info Status %x\n",
1683 mpt_free_request(mpt, req);
1688 mpt_read_extcfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1696 req = mpt_get_request(mpt, sleep_ok);
1698 mpt_prt(mpt, "mpt_read_extcfg_page: Get request failed!\n");
1710 error = mpt_issue_cfg_req(mpt, req, &params,
1711 req->req_pbuf + MPT_RQSL(mpt),
1714 mpt_prt(mpt, "read_extcfg_page(%d) timed out\n", Action);
1719 mpt_prt(mpt, "mpt_read_extcfg_page: Config Info Status %x\n",
1721 mpt_free_request(mpt, req);
1724 memcpy(buf, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1725 mpt_free_request(mpt, req);
1730 mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1739 req = mpt_get_request(mpt, sleep_ok);
1741 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1751 error = mpt_issue_cfg_req(mpt, req, &params, /*addr*/0, /*len*/0,
1760 mpt_prt(mpt, "read_cfg_header timed out\n");
1771 mpt_lprt(mpt, MPT_PRT_DEBUG,
1777 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1782 mpt_free_request(mpt, req);
1787 mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1795 req = mpt_get_request(mpt, sleep_ok);
1797 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1807 error = mpt_issue_cfg_req(mpt, req, &params,
1808 req->req_pbuf + MPT_RQSL(mpt),
1811 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1816 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1818 mpt_free_request(mpt, req);
1821 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1822 mpt_free_request(mpt, req);
1827 mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1839 mpt_prt(mpt, "page type 0x%x not changeable\n",
1851 req = mpt_get_request(mpt, sleep_ok);
1855 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1874 error = mpt_issue_cfg_req(mpt, req, &params,
1875 req->req_pbuf + MPT_RQSL(mpt),
1878 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1883 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1885 mpt_free_request(mpt, req);
1888 mpt_free_request(mpt, req);
1896 mpt_read_config_info_ioc(struct mpt_softc *mpt)
1904 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1916 mpt_lprt(mpt, MPT_PRT_DEBUG,
1922 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1923 if (mpt->ioc_page2 == NULL) {
1924 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1925 mpt_raid_free_mem(mpt);
1928 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1929 rv = mpt_read_cur_cfg_page(mpt, 0,
1930 &mpt->ioc_page2->Header, len, FALSE, 5000);
1932 mpt_prt(mpt, "failed to read IOC Page 2\n");
1933 mpt_raid_free_mem(mpt);
1936 mpt2host_config_page_ioc2(mpt->ioc_page2);
1938 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1941 mpt_prt(mpt, "Capabilities: (");
1943 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1948 mpt_prtc(mpt, " RAID-0");
1951 mpt_prtc(mpt, " RAID-1E");
1954 mpt_prtc(mpt, " RAID-1");
1957 mpt_prtc(mpt, " SES");
1960 mpt_prtc(mpt, " SAFTE");
1963 mpt_prtc(mpt, " Multi-Channel-Arrays");
1968 mpt_prtc(mpt, " )\n");
1969 if ((mpt->ioc_page2->CapabilitiesFlags
1973 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1974 mpt->ioc_page2->NumActiveVolumes,
1975 mpt->ioc_page2->NumActiveVolumes != 1
1977 mpt->ioc_page2->MaxVolumes);
1978 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1979 mpt->ioc_page2->NumActivePhysDisks,
1980 mpt->ioc_page2->NumActivePhysDisks != 1
1982 mpt->ioc_page2->MaxPhysDisks);
1986 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1987 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1988 if (mpt->raid_volumes == NULL) {
1989 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1990 mpt_raid_free_mem(mpt);
1999 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
2001 len = sizeof(*mpt->raid_volumes->config_page) +
2002 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
2003 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
2004 mpt_raid = &mpt->raid_volumes[i];
2008 mpt_prt(mpt, "Could not allocate RAID page data\n");
2009 mpt_raid_free_mem(mpt);
2013 mpt->raid_page0_len = len;
2015 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
2016 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2017 if (mpt->raid_disks == NULL) {
2018 mpt_prt(mpt, "Could not allocate RAID disk data\n");
2019 mpt_raid_free_mem(mpt);
2022 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
2027 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
2030 mpt_raid_free_mem(mpt);
2034 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
2038 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2039 if (mpt->ioc_page3 == NULL) {
2040 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
2041 mpt_raid_free_mem(mpt);
2044 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
2045 rv = mpt_read_cur_cfg_page(mpt, 0,
2046 &mpt->ioc_page3->Header, len, FALSE, 5000);
2048 mpt_raid_free_mem(mpt);
2051 mpt2host_config_page_ioc3(mpt->ioc_page3);
2052 mpt_raid_wakeup(mpt);
2060 mpt_send_port_enable(struct mpt_softc *mpt, int port)
2066 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
2071 memset(enable_req, 0, MPT_RQSL(mpt));
2077 mpt_check_doorbell(mpt);
2078 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
2080 mpt_send_cmd(mpt, req);
2081 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
2082 FALSE, (mpt->is_sas || mpt->is_fc)? 300000 : 30000);
2084 mpt_prt(mpt, "port %d enable timed out\n", port);
2087 mpt_free_request(mpt, req);
2088 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
2096 mpt_send_event_request(struct mpt_softc *mpt, int onoff)
2101 req = mpt_get_request(mpt, FALSE);
2112 mpt_check_doorbell(mpt);
2113 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
2118 mpt_send_cmd(mpt, req);
2126 mpt_enable_ints(struct mpt_softc *mpt)
2130 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
2137 mpt_disable_ints(struct mpt_softc *mpt)
2141 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
2146 mpt_sysctl_attach(struct mpt_softc *mpt)
2148 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
2149 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
2152 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2155 "role", CTLFLAG_RD, &mpt->role, 0,
2159 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2165 mpt_attach(struct mpt_softc *mpt)
2171 mpt_core_attach(mpt);
2172 mpt_core_enable(mpt);
2174 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2180 if (pers->probe(mpt) == 0) {
2181 error = pers->attach(mpt);
2183 mpt_detach(mpt);
2186 mpt->mpt_pers_mask |= (0x1 << pers->id);
2199 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2200 error = pers->enable(mpt);
2202 mpt_prt(mpt, "personality %s attached but would"
2204 mpt_detach(mpt);
2213 mpt_shutdown(struct mpt_softc *mpt)
2217 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2218 pers->shutdown(mpt);
2224 mpt_detach(struct mpt_softc *mpt)
2228 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2229 pers->detach(mpt);
2230 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2233 TAILQ_REMOVE(&mpt_tailq, mpt, links);
2264 mpt_core_attach(struct mpt_softc *mpt)
2268 LIST_INIT(&mpt->ack_frames);
2270 TAILQ_INIT(&mpt->request_pending_list);
2271 TAILQ_INIT(&mpt->request_free_list);
2272 TAILQ_INIT(&mpt->request_timeout_list);
2274 STAILQ_INIT(&mpt->trt[val].atios);
2275 STAILQ_INIT(&mpt->trt[val].inots);
2277 STAILQ_INIT(&mpt->trt_wildcard.atios);
2278 STAILQ_INIT(&mpt->trt_wildcard.inots);
2280 mpt->failure_id = -1;
2282 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2283 mpt_sysctl_attach(mpt);
2284 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2285 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2287 MPT_LOCK(mpt);
2288 error = mpt_configure_ioc(mpt, 0, 0);
2289 MPT_UNLOCK(mpt);
2295 mpt_core_enable(struct mpt_softc *mpt)
2303 MPT_LOCK(mpt);
2310 mpt_send_event_request(mpt, 1);
2318 mpt_intr(mpt);
2323 mpt_enable_ints(mpt);
2331 mpt_intr(mpt);
2336 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2337 mpt_prt(mpt, "failed to enable port 0\n");
2338 MPT_UNLOCK(mpt);
2341 MPT_UNLOCK(mpt);
2346 mpt_core_shutdown(struct mpt_softc *mpt)
2349 mpt_disable_ints(mpt);
2353 mpt_core_detach(struct mpt_softc *mpt)
2360 mpt_disable_ints(mpt);
2363 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2364 request_t *req = &mpt->request_pool[val];
2365 mpt_callout_drain(mpt, &req->callout);
2368 mpt_dma_buf_free(mpt);
2384 mpt_upload_fw(struct mpt_softc *mpt)
2402 tsge->ImageSize = htole32(mpt->fw_image_size);
2408 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2409 sge->Address = htole32(mpt->fw_phys);
2410 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREREAD);
2411 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2414 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2415 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTREAD);
2420 mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2426 if (mpt->is_sas) {
2427 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2429 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2431 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2434 if (mpt->is_sas) {
2435 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2440 mpt_download_fw(struct mpt_softc *mpt)
2447 if (mpt->pci_pio_reg == NULL) {
2448 mpt_prt(mpt, "No PIO resource!\n");
2452 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2453 mpt->fw_image_size);
2455 error = mpt_enable_diag_mode(mpt);
2457 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2461 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2464 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2465 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_PREWRITE);
2466 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2468 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap, BUS_DMASYNC_POSTWRITE);
2476 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
2478 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2480 bus_dmamap_sync(mpt->fw_dmat, mpt->fw_dmap,
2484 if (mpt->is_sas) {
2485 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2488 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2489 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2496 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2497 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2498 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2499 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2501 if (mpt->is_sas) {
2502 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2508 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2510 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2512 mpt_disable_diag_mode(mpt);
2517 mpt_dma_buf_alloc(struct mpt_softc *mpt)
2525 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, 1,
2527 NULL, NULL, (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE,
2528 mpt->max_cam_seg_cnt, BUS_SPACE_MAXSIZE_32BIT, 0,
2529 &mpt->buffer_dmat) != 0) {
2530 mpt_prt(mpt, "cannot create a dma tag for data buffers\n");
2535 if (mpt_dma_tag_create(mpt, mpt->parent_dmat, PAGE_SIZE, 0,
2537 NULL, NULL, MPT_REQ_MEM_SIZE(mpt), 1, BUS_SPACE_MAXSIZE_32BIT, 0,
2538 &mpt->request_dmat) != 0) {
2539 mpt_prt(mpt, "cannot create a dma tag for requests\n");
2544 if (bus_dmamem_alloc(mpt->request_dmat, (void **)&mpt->request,
2545 BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &mpt->request_dmap) != 0) {
2546 mpt_prt(mpt, "cannot allocate %d bytes of request memory\n",
2547 MPT_REQ_MEM_SIZE(mpt));
2551 mi.mpt = mpt;
2555 bus_dmamap_load(mpt->request_dmat, mpt->request_dmap, mpt->request,
2556 MPT_REQ_MEM_SIZE(mpt), mpt_map_rquest, &mi, 0);
2559 mpt_prt(mpt, "error %d loading dma map for DMA request queue\n",
2563 mpt->request_phys = mi.phys;
2569 pptr = mpt->request_phys;
2570 vptr = mpt->request;
2571 end = pptr + MPT_REQ_MEM_SIZE(mpt);
2573 request_t *req = &mpt->request_pool[i];
2586 error = bus_dmamap_create(mpt->buffer_dmat, 0, &req->dmap);
2588 mpt_prt(mpt, "error %d creating per-cmd DMA maps\n",
2598 mpt_dma_buf_free(struct mpt_softc *mpt)
2602 if (mpt->request_dmat == 0) {
2603 mpt_lprt(mpt, MPT_PRT_DEBUG, "already released dma memory\n");
2606 for (i = 0; i < MPT_MAX_REQUESTS(mpt); i++) {
2607 bus_dmamap_destroy(mpt->buffer_dmat, mpt->request_pool[i].dmap);
2609 bus_dmamap_unload(mpt->request_dmat, mpt->request_dmap);
2610 bus_dmamem_free(mpt->request_dmat, mpt->request, mpt->request_dmap);
2611 bus_dma_tag_destroy(mpt->request_dmat);
2612 mpt->request_dmat = 0;
2613 bus_dma_tag_destroy(mpt->buffer_dmat);
2621 mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
2640 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2641 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2642 return (mpt_configure_ioc(mpt, tn++, 1));
2647 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2648 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2649 return (mpt_configure_ioc(mpt, tn++, 1));
2651 mpt2host_iocfacts_reply(&mpt->ioc_facts);
2653 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2654 mpt->ioc_facts.MsgVersion >> 8,
2655 mpt->ioc_facts.MsgVersion & 0xFF,
2656 mpt->ioc_facts.HeaderVersion >> 8,
2657 mpt->ioc_facts.HeaderVersion & 0xFF);
2677 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2680 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2683 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
2684 mpt_lprt(mpt, MPT_PRT_INFO,
2686 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2687 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
2691 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2696 mpt->max_cam_seg_cnt = min(mpt->max_seg_cnt, (MAXPHYS / PAGE_SIZE) + 1);
2698 error = mpt_dma_buf_alloc(mpt);
2700 mpt_prt(mpt, "mpt_dma_buf_alloc() failed!\n");
2704 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2705 request_t *req = &mpt->request_pool[val];
2707 mpt_callout_init(mpt, &req->callout);
2708 mpt_free_request(mpt, req);
2711 mpt_lprt(mpt, MPT_PRT_INFO, "Maximum Segment Count: %u, Maximum "
2712 "CAM Segment Count: %u\n", mpt->max_seg_cnt,
2713 mpt->max_cam_seg_cnt);
2715 mpt_lprt(mpt, MPT_PRT_INFO, "MsgLength=%u IOCNumber = %d\n",
2716 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
2717 mpt_lprt(mpt, MPT_PRT_INFO,
2720 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2721 mpt->ioc_facts.RequestFrameSize << 2,
2722 mpt->ioc_facts.MaxChainDepth);
2723 mpt_lprt(mpt, MPT_PRT_INFO, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2724 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
2725 mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
2727 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2728 mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2729 if (mpt->port_facts == NULL) {
2730 mpt_prt(mpt, "unable to allocate memory for port facts\n");
2735 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2736 (mpt->fw_uploaded == 0)) {
2748 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2749 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2751 mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2752 &mpt->fw_dmat);
2754 mpt_prt(mpt, "cannot create firmware dma tag\n");
2757 error = bus_dmamem_alloc(mpt->fw_dmat,
2758 (void **)&mpt->fw_image, BUS_DMA_NOWAIT |
2759 BUS_DMA_COHERENT, &mpt->fw_dmap);
2761 mpt_prt(mpt, "cannot allocate firmware memory\n");
2762 bus_dma_tag_destroy(mpt->fw_dmat);
2765 mi.mpt = mpt;
2767 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2768 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2769 mpt->fw_phys = mi.phys;
2771 error = mpt_upload_fw(mpt);
2773 mpt_prt(mpt, "firmware upload failed.\n");
2774 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2775 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2776 mpt->fw_dmap);
2777 bus_dma_tag_destroy(mpt->fw_dmat);
2778 mpt->fw_image = NULL;
2781 mpt->fw_uploaded = 1;
2784 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2785 pfp = &mpt->port_facts[port];
2786 error = mpt_get_portfacts(mpt, 0, pfp);
2788 mpt_prt(mpt,
2790 free(mpt->port_facts, M_DEVBUF);
2791 mpt->port_facts = NULL;
2792 return (mpt_configure_ioc(mpt, tn++, 1));
2801 mpt_lprt(mpt, error,
2811 pfp = &mpt->port_facts[0];
2813 mpt->is_fc = 1;
2814 mpt->is_sas = 0;
2815 mpt->is_spi = 0;
2817 mpt->is_fc = 0;
2818 mpt->is_sas = 1;
2819 mpt->is_spi = 0;
2821 mpt->is_fc = 0;
2822 mpt->is_sas = 0;
2823 mpt->is_spi = 1;
2824 if (mpt->mpt_ini_id == MPT_INI_ID_NONE)
2825 mpt->mpt_ini_id = pfp->PortSCSIID;
2827 mpt_prt(mpt, "iSCSI not supported yet\n");
2830 mpt_prt(mpt, "Inactive Port\n");
2833 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2843 mpt->role = MPT_ROLE_NONE;
2845 mpt->role |= MPT_ROLE_INITIATOR;
2848 mpt->role |= MPT_ROLE_TARGET;
2854 if (mpt_enable_ioc(mpt, 1) != MPT_OK) {
2855 mpt_prt(mpt, "unable to initialize IOC\n");
2865 mpt_read_config_info_ioc(mpt);
2871 mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2876 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2877 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2881 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2883 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2884 mpt_prt(mpt, "IOC failed to go to run state\n");
2887 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2894 for (val = 0, pptr = mpt->reply_phys;
2895 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2897 mpt_free_reply(mpt, pptr);
2898 if (++val == mpt->ioc_facts.GlobalCredits - 1)
2911 mpt_send_event_request(mpt, 1);
2913 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2914 mpt_prt(mpt, "%s: failed to enable port 0\n", __func__);