Deleted Added
sdiff udiff text old ( 156796 ) new ( 157117 )
full compact
1/*-
2 * Generic routines for LSI Fusion adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 78 unchanged lines hidden (view full) ---

87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
92 */
93
94#include <sys/cdefs.h>
95__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 157117 2006-03-25 07:08:27Z mjacob $");
96
97#include <dev/mpt/mpt.h>
98#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
99#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
100
101#include <dev/mpt/mpilib/mpi.h>
102#include <dev/mpt/mpilib/mpi_ioc.h>
103#include <dev/mpt/mpilib/mpi_fc.h>
104#include <dev/mpt/mpilib/mpi_targ.h>
105
106#include <sys/sysctl.h>
107
108#define MPT_MAX_TRYS 3
109#define MPT_MAX_WAIT 300000
110
111static int maxwait_ack = 0;
112static int maxwait_int = 0;

--- 7 unchanged lines hidden (view full) ---

120static mpt_reply_handler_t mpt_handshake_reply_handler;
121static mpt_reply_handler_t mpt_event_reply_handler;
122static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
123 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
124static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
125static int mpt_soft_reset(struct mpt_softc *mpt);
126static void mpt_hard_reset(struct mpt_softc *mpt);
127static int mpt_configure_ioc(struct mpt_softc *mpt);
128static int mpt_enable_ioc(struct mpt_softc *mpt, int);
129
130/************************* Personality Module Support *************************/
131/*
132 * We include one extra entry that is guaranteed to be NULL
133 * to simplify our itterator.
134 */
135static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
136static __inline struct mpt_personality*

--- 10 unchanged lines hidden (view full) ---

147 while (start_at < MPT_MAX_PERSONALITIES
148 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
149 start_at++;
150 }
151 return (mpt_personalities[start_at]);
152}
153
154/*
155 * Used infrequently, so no need to optimize like a forward
156 * traversal where we use the MAX+1 is guaranteed to be NULL
157 * trick.
158 */
159static __inline struct mpt_personality *
160mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
161{
162 while (start_at < MPT_MAX_PERSONALITIES
163 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {

--- 12 unchanged lines hidden (view full) ---

176#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
177 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
178 pers != NULL; \
179 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
180
181static mpt_load_handler_t mpt_stdload;
182static mpt_probe_handler_t mpt_stdprobe;
183static mpt_attach_handler_t mpt_stdattach;
184static mpt_enable_handler_t mpt_stdenable;
185static mpt_event_handler_t mpt_stdevent;
186static mpt_reset_handler_t mpt_stdreset;
187static mpt_shutdown_handler_t mpt_stdshutdown;
188static mpt_detach_handler_t mpt_stddetach;
189static mpt_unload_handler_t mpt_stdunload;
190static struct mpt_personality mpt_default_personality =
191{
192 .load = mpt_stdload,
193 .probe = mpt_stdprobe,
194 .attach = mpt_stdattach,
195 .enable = mpt_stdenable,
196 .event = mpt_stdevent,
197 .reset = mpt_stdreset,
198 .shutdown = mpt_stdshutdown,
199 .detach = mpt_stddetach,
200 .unload = mpt_stdunload
201};
202
203static mpt_load_handler_t mpt_core_load;
204static mpt_attach_handler_t mpt_core_attach;
205static mpt_enable_handler_t mpt_core_enable;
206static mpt_reset_handler_t mpt_core_ioc_reset;
207static mpt_event_handler_t mpt_core_event;
208static mpt_shutdown_handler_t mpt_core_shutdown;
209static mpt_shutdown_handler_t mpt_core_detach;
210static mpt_unload_handler_t mpt_core_unload;
211static struct mpt_personality mpt_core_personality =
212{
213 .name = "mpt_core",
214 .load = mpt_core_load,
215 .attach = mpt_core_attach,
216 .enable = mpt_core_enable,
217 .event = mpt_core_event,
218 .reset = mpt_core_ioc_reset,
219 .shutdown = mpt_core_shutdown,
220 .detach = mpt_core_detach,
221 .unload = mpt_core_unload,
222};
223
224/*
225 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
226 * ordering information. We want the core to always register FIRST.
227 * other modules are set to SI_ORDER_SECOND.
228 */
229static moduledata_t mpt_core_mod = {
230 "mpt_core", mpt_modevent, &mpt_core_personality
231};
232DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
233MODULE_VERSION(mpt_core, 1);
234
235#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
236
237
238int
239mpt_modevent(module_t mod, int type, void *data)
240{
241 struct mpt_personality *pers;
242 int error;
243

--- 30 unchanged lines hidden (view full) ---

274
275 error = (pers->load(pers));
276 if (error != 0)
277 mpt_personalities[i] = NULL;
278 break;
279 }
280 case MOD_SHUTDOWN:
281 break;
282#if __FreeBSD_version >= 500000
283 case MOD_QUIESCE:
284 break;
285#endif
286 case MOD_UNLOAD:
287 error = pers->unload(pers);
288 mpt_personalities[pers->id] = NULL;
289 break;
290 default:
291 error = EINVAL;
292 break;
293 }

--- 17 unchanged lines hidden (view full) ---

311int
312mpt_stdattach(struct mpt_softc *mpt)
313{
314 /* Attach is always successfull. */
315 return (0);
316}
317
318int
319mpt_stdenable(struct mpt_softc *mpt)
320{
321 /* Enable is always successfull. */
322 return (0);
323}
324
325int
326mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
327{
328 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
329 /* Event was not for us. */
330 return (0);
331}
332
333void

--- 60 unchanged lines hidden (view full) ---

394 * Fill from the front in the hope that
395 * all registered handlers consume only a
396 * single cache line.
397 *
398 * We don't break on the first empty slot so
399 * that the full table is checked to see if
400 * this handler was previously registered.
401 */
402 if (free_cbi == MPT_HANDLER_ID_NONE &&
403 (mpt_reply_handlers[cbi]
404 == mpt_default_reply_handler))
405 free_cbi = cbi;
406 }
407 if (free_cbi == MPT_HANDLER_ID_NONE) {
408 return (ENOMEM);
409 }
410 mpt_reply_handlers[free_cbi] = handler.reply_handler;
411 *phandler_id = MPT_CBI_TO_HID(free_cbi);
412 break;
413 }
414 default:
415 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
416 return (EINVAL);
417 }

--- 21 unchanged lines hidden (view full) ---

439 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
440 return (EINVAL);
441 }
442 return (0);
443}
444
445static int
446mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
447 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
448{
449 mpt_prt(mpt,
450 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
451 req, req->serno, reply_desc, reply_frame);
452
453 if (reply_frame != NULL)
454 mpt_dump_reply_frame(mpt, reply_frame);
455
456 mpt_prt(mpt, "Reply Frame Ignored\n");
457
458 return (/*free_reply*/TRUE);
459}
460
461static int
462mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
463 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
464{
465 if (req != NULL) {
466
467 if (reply_frame != NULL) {
468 MSG_CONFIG *cfgp;
469 MSG_CONFIG_REPLY *reply;
470
471 cfgp = (MSG_CONFIG *)req->req_vbuf;

--- 10 unchanged lines hidden (view full) ---

482 wakeup(req);
483 }
484
485 return (/*free_reply*/TRUE);
486}
487
488static int
489mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
490 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
491{
492 /* Nothing to be done. */
493 return (/*free_reply*/TRUE);
494}
495
496static int
497mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
498 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
499{
500 int free_reply;
501
502 if (reply_frame == NULL) {
503 mpt_prt(mpt, "Event Handler: req %p:%u - Unexpected NULL reply\n",
504 req, req->serno);
505 return (/*free_reply*/TRUE);
506 }
507
508 free_reply = TRUE;
509 switch (reply_frame->Function) {
510 case MPI_FUNCTION_EVENT_NOTIFICATION:
511 {
512 MSG_EVENT_NOTIFY_REPLY *msg;

--- 118 unchanged lines hidden (view full) ---

631 mpt_send_cmd(mpt, ack_req);
632}
633
634/***************************** Interrupt Handling *****************************/
635void
636mpt_intr(void *arg)
637{
638 struct mpt_softc *mpt;
639 uint32_t reply_desc;
640 int ntrips = 0;
641
642 mpt = (struct mpt_softc *)arg;
643 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
644 request_t *req;
645 MSG_DEFAULT_REPLY *reply_frame;
646 uint32_t reply_baddr;
647 uint32_t ctxt_idx;
648 u_int cb_index;
649 u_int req_index;
650 int free_rf;
651
652 req = NULL;
653 reply_frame = NULL;
654 reply_baddr = 0;
655 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
656 u_int offset;
657 /*
658 * Insure that the reply frame is coherent.
659 */
660 reply_baddr = (reply_desc << 1);
661 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
662 bus_dmamap_sync_range(mpt->reply_dmat,
663 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
664 BUS_DMASYNC_POSTREAD);
665 reply_frame = MPT_REPLY_OTOV(mpt, offset);
666 ctxt_idx = le32toh(reply_frame->MsgContext);
667 } else {
668 uint32_t type;
669
670 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
671 ctxt_idx = reply_desc;
672 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
673 reply_desc);
674
675 switch (type) {
676 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
677 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
678 break;
679 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
680 ctxt_idx = GET_IO_INDEX(reply_desc);
681 if (mpt->tgt_cmd_ptrs == NULL) {
682 mpt_prt(mpt,
683 "mpt_intr: no target cmd ptrs\n");
684 reply_desc = MPT_REPLY_EMPTY;
685 break;
686 }
687 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
688 mpt_prt(mpt,
689 "mpt_intr: bad tgt cmd ctxt %u\n",
690 ctxt_idx);
691 reply_desc = MPT_REPLY_EMPTY;
692 ntrips = 1000;
693 break;
694 }
695 req = mpt->tgt_cmd_ptrs[ctxt_idx];
696 if (req == NULL) {
697 mpt_prt(mpt, "no request backpointer "
698 "at index %u", ctxt_idx);
699 reply_desc = MPT_REPLY_EMPTY;
700 ntrips = 1000;
701 break;
702 }
703 /*
704 * Reformulate ctxt_idx to be just as if
705 * it were another type of context reply
706 * so the code below will find the request
707 * via indexing into the pool.
708 */
709 ctxt_idx =
710 req->index | mpt->scsi_tgt_handler_id;
711 req = NULL;
712 break;
713 case MPI_CONTEXT_REPLY_TYPE_LAN:
714 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
715 reply_desc);
716 reply_desc = MPT_REPLY_EMPTY;
717 break;
718 default:
719 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
720 reply_desc = MPT_REPLY_EMPTY;
721 break;
722 }
723 if (reply_desc == MPT_REPLY_EMPTY) {
724 if (ntrips++ > 1000) {
725 break;
726 }
727 continue;
728 }
729 }
730
731 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
732 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
733 if (req_index < MPT_MAX_REQUESTS(mpt)) {
734 req = &mpt->request_pool[req_index];
735 }
736
737 free_rf = mpt_reply_handlers[cb_index](mpt, req,
738 reply_desc, reply_frame);
739
740 if (reply_frame != NULL && free_rf)
741 mpt_free_reply(mpt, reply_baddr);
742
743 /*
744 * If we got ourselves disabled, don't get stuck in a loop
745 */
746 if (mpt->disabled) {
747 mpt_disable_ints(mpt);
748 break;
749 }
750 if (ntrips++ > 1000) {
751 break;
752 }
753 }
754}
755
756/******************************* Error Recovery *******************************/
757void
758mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
759 u_int iocstatus)
760{

--- 6 unchanged lines hidden (view full) ---

767 while((req = TAILQ_FIRST(chain)) != NULL) {
768 MSG_REQUEST_HEADER *msg_hdr;
769 u_int cb_index;
770 TAILQ_REMOVE(chain, req, links);
771 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
772 ioc_status_frame.Function = msg_hdr->Function;
773 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
774 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
775 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
776 &ioc_status_frame);
777 }
778}
779
780/********************************* Diagnostics ********************************/
781/*
782 * Perform a diagnostic dump of a reply frame.
783 */
784void

--- 23 unchanged lines hidden (view full) ---

808/* Busy wait for a door bell to be read by IOC */
809static int
810mpt_wait_db_ack(struct mpt_softc *mpt)
811{
812 int i;
813 for (i=0; i < MPT_MAX_WAIT; i++) {
814 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
815 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
816 return (MPT_OK);
817 }
818 DELAY(200);
819 }
820 return (MPT_FAIL);
821}
822
823/* Busy wait for a door bell interrupt */
824static int
825mpt_wait_db_int(struct mpt_softc *mpt)
826{
827 int i;
828 for (i=0; i < MPT_MAX_WAIT; i++) {
829 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
830 maxwait_int = i > maxwait_int ? i : maxwait_int;
831 return MPT_OK;
832 }
833 DELAY(100);
834 }
835 return (MPT_FAIL);
836}
837
838/* Wait for IOC to transition to a give state */
839void
840mpt_check_doorbell(struct mpt_softc *mpt)
841{
842 uint32_t db = mpt_rd_db(mpt);
843 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {

--- 27 unchanged lines hidden (view full) ---

871static int
872mpt_soft_reset(struct mpt_softc *mpt)
873{
874 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
875
876 /* Have to use hard reset if we are not in Running state */
877 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
878 mpt_prt(mpt, "soft reset failed: device not running\n");
879 return (MPT_FAIL);
880 }
881
882 /* If door bell is in use we don't have a chance of getting
883 * a word in since the IOC probably crashed in message
884 * processing. So don't waste our time.
885 */
886 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
887 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
888 return (MPT_FAIL);
889 }
890
891 /* Send the reset request to the IOC */
892 mpt_write(mpt, MPT_OFFSET_DOORBELL,
893 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
894 if (mpt_wait_db_ack(mpt) != MPT_OK) {
895 mpt_prt(mpt, "soft reset failed: ack timeout\n");
896 return (MPT_FAIL);
897 }
898
899 /* Wait for the IOC to reload and come out of reset state */
900 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
901 mpt_prt(mpt, "soft reset failed: device did not restart\n");
902 return (MPT_FAIL);
903 }
904
905 return MPT_OK;
906}
907
908static int
909mpt_enable_diag_mode(struct mpt_softc *mpt)
910{

--- 157 unchanged lines hidden (view full) ---

1068 * the specified wait condition, it should stop its wait.
1069 */
1070 mpt->reset_cnt++;
1071 MPT_PERS_FOREACH(mpt, pers)
1072 pers->reset(mpt, ret);
1073 }
1074
1075 if (reinit != 0) {
1076 ret = mpt_enable_ioc(mpt, 1);
1077 if (ret == MPT_OK) {
1078 mpt_enable_ints(mpt);
1079 }
1080 }
1081 if (ret != MPT_OK && retry_cnt++ < 2) {
1082 goto again;
1083 }
1084 return ret;

--- 14 unchanged lines hidden (view full) ---

1099 if ((nxt = req->chain) != NULL) {
1100 req->chain = NULL;
1101 mpt_free_request(mpt, nxt); /* NB: recursion */
1102 }
1103 req->serno = 0;
1104 req->ccb = NULL;
1105 req->state = REQ_STATE_FREE;
1106 if (LIST_EMPTY(&mpt->ack_frames)) {
1107 /*
1108 * Insert free ones at the tail
1109 */
1110 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1111 if (mpt->getreqwaiter != 0) {
1112 mpt->getreqwaiter = 0;
1113 wakeup(&mpt->request_free_list);
1114 }
1115 return;
1116 }
1117
1118 /*

--- 16 unchanged lines hidden (view full) ---

1135retry:
1136 req = TAILQ_FIRST(&mpt->request_free_list);
1137 if (req != NULL) {
1138 KASSERT(req == &mpt->request_pool[req->index],
1139 ("mpt_get_request: corrupted request free list\n"));
1140 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1141 req->state = REQ_STATE_ALLOCATED;
1142 req->chain = NULL;
1143 req->serno = mpt->sequence++;
1144 } else if (sleep_ok != 0) {
1145 mpt->getreqwaiter = 1;
1146 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1147 goto retry;
1148 }
1149 return req;
1150}
1151
1152/* Pass the command to the IOC */
1153void
1154mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1155{
1156 uint32_t *pReq;
1157
1158 pReq = req->req_vbuf;
1159 if (mpt->verbose > MPT_PRT_TRACE) {
1160 int offset;
1161#if __FreeBSD_version >= 500000
1162 mpt_prt(mpt, "Send Request %d (%jx):",
1163 req->index, (uintmax_t) req->req_pbuf);
1164#else
1165 mpt_prt(mpt, "Send Request %d (%llx):",
1166 req->index, (unsigned long long) req->req_pbuf);
1167#endif
1168 for (offset = 0; offset < mpt->request_frame_size; offset++) {
1169 if ((offset & 0x7) == 0) {
1170 mpt_prtc(mpt, "\n");
1171 mpt_prt(mpt, " ");
1172 }
1173 mpt_prtc(mpt, " %08x", pReq[offset]);
1174 }
1175 mpt_prtc(mpt, "\n");

--- 27 unchanged lines hidden (view full) ---

1203 int timeout;
1204 u_int saved_cnt;
1205
1206 /*
1207 * timeout is in ms. 0 indicates infinite wait.
1208 * Convert to ticks or 500us units depending on
1209 * our sleep mode.
1210 */
1211 if (sleep_ok != 0) {
1212 timeout = (time_ms * hz) / 1000;
1213 } else {
1214 timeout = time_ms * 2;
1215 }
1216 req->state |= REQ_STATE_NEED_WAKEUP;
1217 mask &= ~REQ_STATE_NEED_WAKEUP;
1218 saved_cnt = mpt->reset_cnt;
1219 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1220 if (sleep_ok != 0) {
1221 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1222 if (error == EWOULDBLOCK) {
1223 timeout = 0;
1224 break;
1225 }
1226 } else {
1227 if (time_ms != 0 && --timeout == 0) {
1228 break;
1229 }
1230 DELAY(500);
1231 mpt_intr(mpt);
1232 }
1233 }
1234 req->state &= ~REQ_STATE_NEED_WAKEUP;
1235 if (mpt->reset_cnt != saved_cnt) {
1236 return (EIO);
1237 }
1238 if (time_ms && timeout <= 0) {
1239 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1240 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1241 return (ETIMEDOUT);
1242 }
1243 return (0);
1244}
1245
1246/*
1247 * Send a command to the IOC via the handshake register.
1248 *
1249 * Only done at initialization time and for certain unusual
1250 * commands such as device/bus reset as specified by LSI.

--- 282 unchanged lines hidden (view full) ---

1533 req->IOCStatus);
1534 error = EIO;
1535 break;
1536 }
1537 mpt_free_request(mpt, req);
1538 return (error);
1539}
1540
1541int
1542mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1543 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1544 int timeout_ms)
1545{
1546 request_t *req;
1547 int error;
1548
1549 req = mpt_get_request(mpt, sleep_ok);
1550 if (req == NULL) {
1551 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1552 return (-1);
1553 }
1554
1555 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1556 hdr->PageLength, hdr->PageNumber,
1557 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1558 PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1559 len, sleep_ok, timeout_ms);
1560 if (error != 0) {
1561 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1562 return (-1);
1563 }
1564
1565 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1566 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1567 req->IOCStatus);
1568 mpt_free_request(mpt, req);
1569 return (-1);
1570 }
1571 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1572 BUS_DMASYNC_POSTREAD);
1573 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1574 mpt_free_request(mpt, req);
1575 return (0);
1576}
1577
1578int
1579mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1580 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1581 int timeout_ms)

--- 10 unchanged lines hidden (view full) ---

1592 return (-1);
1593 }
1594 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK,
1595
1596 req = mpt_get_request(mpt, sleep_ok);
1597 if (req == NULL)
1598 return (-1);
1599
1600 memcpy(((caddr_t)req->req_vbuf)+MPT_RQSL(mpt), hdr, len);
1601 /* Restore stripped out attributes */
1602 hdr->PageType |= hdr_attr;
1603
1604 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1605 hdr->PageLength, hdr->PageNumber,
1606 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1607 PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1608 len, sleep_ok, timeout_ms);
1609 if (error != 0) {
1610 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1611 return (-1);
1612 }
1613
1614 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1615 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",

--- 153 unchanged lines hidden (view full) ---

1769 }
1770
1771 mpt_raid_wakeup(mpt);
1772
1773 return (0);
1774}
1775
1776/*
1777 * Enable IOC port
1778 */
1779static int
1780mpt_send_port_enable(struct mpt_softc *mpt, int port)
1781{
1782 request_t *req;
1783 MSG_PORT_ENABLE *enable_req;
1784 int error;
1785
1786 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1787 if (req == NULL)
1788 return (-1);
1789
1790 enable_req = req->req_vbuf;
1791 bzero(enable_req, MPT_RQSL(mpt));
1792
1793 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
1794 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1795 enable_req->PortNumber = port;
1796
1797 mpt_check_doorbell(mpt);
1798 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1799
1800 mpt_send_cmd(mpt, req);
1801 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1802 /*sleep_ok*/FALSE,
1803 /*time_ms*/(mpt->is_sas || mpt->is_fc)? 30000 : 3000);
1804 if (error != 0) {
1805 mpt_prt(mpt, "port %d enable timed out\n", port);
1806 return (-1);
1807 }
1808 mpt_free_request(mpt, req);
1809 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
1810 return (0);
1811}
1812
1813/*
1814 * Enable/Disable asynchronous event reporting.
1815 *
1816 * NB: this is the first command we send via shared memory
1817 * instead of the handshake register.

--- 40 unchanged lines hidden (view full) ---

1858 /* Mask all interrupts */
1859 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1860 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1861}
1862
1863static void
1864mpt_sysctl_attach(struct mpt_softc *mpt)
1865{
1866#if __FreeBSD_version >= 500000
1867 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1868 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1869
1870 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1871 "debug", CTLFLAG_RW, &mpt->verbose, 0,
1872 "Debugging/Verbose level");
1873#endif
1874}
1875
1876int
1877mpt_attach(struct mpt_softc *mpt)
1878{
1879 struct mpt_personality *pers;
1880 int i;
1881 int error;
1882
1883 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
1884 pers = mpt_personalities[i];
1885 if (pers == NULL) {
1886 continue;
1887 }
1888 if (pers->probe(mpt) == 0) {
1889 error = pers->attach(mpt);
1890 if (error != 0) {
1891 mpt_detach(mpt);
1892 return (error);
1893 }
1894 mpt->mpt_pers_mask |= (0x1 << pers->id);
1895 pers->use_count++;
1896 }
1897 }
1898
1899 /*
1900 * Now that we've attached everything, do the enable function
1901 * for all of the personalities. This allows the personalities
1902 * to do setups that are appropriate for them prior to enabling
1903 * any ports.
1904 */
1905 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
1906 pers = mpt_personalities[i];
1907 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
1908 error = pers->enable(mpt);
1909 if (error != 0) {
1910 mpt_prt(mpt, "personality %s attached but would"
1911 " not enable (%d)\n", pers->name, error);
1912 mpt_detach(mpt);
1913 return (error);
1914 }
1915 }
1916 }
1917 return (0);
1918}
1919
1920int
1921mpt_shutdown(struct mpt_softc *mpt)
1922{
1923 struct mpt_personality *pers;
1924
1925 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
1926 pers->shutdown(mpt);
1927 }
1928 return (0);
1929}
1930
1931int
1932mpt_detach(struct mpt_softc *mpt)
1933{
1934 struct mpt_personality *pers;
1935

--- 10 unchanged lines hidden (view full) ---

1946mpt_core_load(struct mpt_personality *pers)
1947{
1948 int i;
1949
1950 /*
1951 * Setup core handlers and insert the default handler
1952 * into all "empty slots".
1953 */
1954 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
1955 mpt_reply_handlers[i] = mpt_default_reply_handler;
1956 }
1957
1958 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
1959 mpt_event_reply_handler;
1960 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
1961 mpt_config_reply_handler;
1962 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
1963 mpt_handshake_reply_handler;
1964 return (0);
1965}
1966
1967/*
1968 * Initialize per-instance driver data and perform
1969 * initial controller configuration.
1970 */
1971int
1972mpt_core_attach(struct mpt_softc *mpt)
1973{
1974 int val;
1975 int error;
1976
1977
1978 LIST_INIT(&mpt->ack_frames);
1979
1980 /* Put all request buffers on the free list */
1981 TAILQ_INIT(&mpt->request_pending_list);
1982 TAILQ_INIT(&mpt->request_free_list);
1983 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
1984 mpt_free_request(mpt, &mpt->request_pool[val]);
1985 }
1986
1987 for (val = 0; val < MPT_MAX_LUNS; val++) {
1988 STAILQ_INIT(&mpt->trt[val].atios);
1989 STAILQ_INIT(&mpt->trt[val].inots);
1990 }
1991 STAILQ_INIT(&mpt->trt_wildcard.atios);
1992 STAILQ_INIT(&mpt->trt_wildcard.inots);
1993
1994 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
1995
1996 mpt_sysctl_attach(mpt);
1997
1998 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
1999 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2000
2001 error = mpt_configure_ioc(mpt);
2002
2003 return (error);
2004}
2005
2006int
2007mpt_core_enable(struct mpt_softc *mpt)
2008{
2009 /*
2010 * We enter with the IOC enabled, but async events
2011 * not enabled, ports not enabled and interrupts
2012 * not enabled.
2013 */
2014
2015 /*
2016 * Enable asynchronous event reporting- all personalities
2017 * have attached so that they should be able to now field
2018 * async events.
2019 */
2020 mpt_send_event_request(mpt, 1);
2021
2022 /*
2023 * Catch any pending interrupts
2024 *
2025 * This seems to be crucial- otherwise
2026 * the portenable below times out.
2027 */
2028 mpt_intr(mpt);
2029
2030 /*
2031 * Enable Interrupts
2032 */
2033 mpt_enable_ints(mpt);
2034
2035 /*
2036 * Catch any pending interrupts
2037 *
2038 * This seems to be crucial- otherwise
2039 * the portenable below times out.
2040 */
2041 mpt_intr(mpt);
2042
2043 /*
2044 * Enable the port- but only if we are not MPT_ROLE_NONE.
2045 */
2046 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2047 mpt_prt(mpt, "failed to enable port 0\n");
2048 return (ENXIO);
2049 }
2050 return (0);
2051}
2052
2053void
2054mpt_core_shutdown(struct mpt_softc *mpt)
2055{
2056 mpt_disable_ints(mpt);
2057}
2058
2059void
2060mpt_core_detach(struct mpt_softc *mpt)
2061{
2062 mpt_disable_ints(mpt);
2063}
2064
2065int
2066mpt_core_unload(struct mpt_personality *pers)
2067{
2068 /* Unload is always successfull. */
2069 return (0);
2070}

--- 135 unchanged lines hidden (view full) ---

2206 * No need to reset if the IOC is already in the READY state.
2207 *
2208 * Force reset if initialization failed previously.
2209 * Note that a hard_reset of the second channel of a '929
2210 * will stop operation of the first channel. Hopefully, if the
2211 * first channel is ok, the second will not require a hard
2212 * reset.
2213 */
2214 if (needreset || MPT_STATE(mpt_rd_db(mpt)) !=
2215 MPT_DB_STATE_READY) {
2216 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2217 continue;
2218 }
2219 }
2220 needreset = 0;
2221
2222 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) {
2223 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2224 needreset = 1;
2225 continue;
2226 }

--- 126 unchanged lines hidden (view full) ---

2353 mpt->mpt_proto_flags = pfp.ProtocolFlags;
2354 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI &&
2355 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS &&
2356 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) {
2357 mpt_prt(mpt, "Unsupported Port Type (%x)\n",
2358 pfp.PortType);
2359 return (ENXIO);
2360 }
2361 mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers);
2362
2363 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2364 mpt->is_fc = 1;
2365 mpt->is_sas = 0;
2366 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2367 mpt->is_fc = 0;
2368 mpt->is_sas = 1;
2369 } else {
2370 mpt->is_fc = 0;
2371 mpt->is_sas = 0;
2372 }
2373 mpt->mpt_ini_id = pfp.PortSCSIID;
2374 mpt->mpt_max_devices = pfp.MaxDevices;
2375
2376 /*
2377 * Match our expected role with what this port supports.
2378 *
2379 * We only do this to meet expectations. That is, if the
2380 * user has specified they want initiator role, and we
2381 * don't support it, that's an error we return back upstream.
2382 */
2383
2384 mpt->cap = MPT_ROLE_NONE;
2385 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2386 mpt->cap |= MPT_ROLE_INITIATOR;
2387 }
2388 if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2389 mpt->cap |= MPT_ROLE_TARGET;
2390 }
2391 if (mpt->cap == MPT_ROLE_NONE) {
2392 mpt_prt(mpt, "port does not support either target or "
2393 "initiator role\n");
2394 return (ENXIO);
2395 }
2396
2397 if ((mpt->role & MPT_ROLE_INITIATOR) &&
2398 (mpt->cap & MPT_ROLE_INITIATOR) == 0) {
2399 mpt_prt(mpt, "port does not support initiator role\n");
2400 return (ENXIO);
2401 }
2402
2403 if ((mpt->role & MPT_ROLE_TARGET) &&
2404 (mpt->cap & MPT_ROLE_TARGET) == 0) {
2405 mpt_prt(mpt, "port does not support target role\n");
2406 return (ENXIO);
2407 }
2408
2409 if (mpt_enable_ioc(mpt, 0) != MPT_OK) {
2410 mpt_prt(mpt, "unable to initialize IOC\n");
2411 return (ENXIO);
2412 }
2413
2414 /*
2415 * Read IOC configuration information.
2416 */
2417 mpt_read_config_info_ioc(mpt);
2418
2419 /* Everything worked */
2420 break;
2421 }
2422
2423 if (try >= MPT_MAX_TRYS) {
2424 mpt_prt(mpt, "failed to initialize IOC");
2425 return (EIO);
2426 }
2427
2428 return (0);
2429}
2430
2431static int
2432mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2433{
2434 uint32_t pptr;
2435 int val;
2436
2437 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2438 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2439 return (EIO);
2440 }

--- 14 unchanged lines hidden (view full) ---

2455 for (val = 0, pptr = mpt->reply_phys;
2456 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2457 pptr += MPT_REPLY_SIZE) {
2458 mpt_free_reply(mpt, pptr);
2459 if (++val == mpt->mpt_global_credits - 1)
2460 break;
2461 }
2462
2463
2464 /*
2465 * Enable the port if asked
2466 */
2467 if (portenable) {
2468 /*
2469 * Enable asynchronous event reporting
2470 */
2471 mpt_send_event_request(mpt, 1);
2472
2473 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2474 mpt_prt(mpt, "failed to enable port 0\n");
2475 return (ENXIO);
2476 }
2477 }
2478 return (MPT_OK);
2479}