Deleted Added
sdiff udiff text old ( 156796 ) new ( 157117 )
full compact
1/*-
2 * Generic routines for LSI Fusion adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 */
61/*-
62 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
63 * Copyright (c) 2005, WHEEL Sp. z o.o.
64 * Copyright (c) 2004, 2005 Justin T. Gibbs
65 * All rights reserved.
66 *
67 * Redistribution and use in source and binary forms, with or without
68 * modification, are permitted provided that the following conditions are
69 * met:
70 * 1. Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
73 * substantially similar to the "NO WARRANTY" disclaimer below
74 * ("Disclaimer") and any redistribution must be conditioned upon including
75 * a substantially similar Disclaimer requirement for further binary
76 * redistribution.
77 * 3. Neither the names of the above listed copyright holders nor the names
78 * of any contributors may be used to endorse or promote products derived
79 * from this software without specific prior written permission.
80 *
81 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
82 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
83 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
84 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
85 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
91 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
92 */
93
94#include <sys/cdefs.h>
95__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 156796 2006-03-17 04:52:27Z mjacob $");
96
97#include <dev/mpt/mpt.h>
98#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
99#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
100
101#include <dev/mpt/mpilib/mpi.h>
102#include <dev/mpt/mpilib/mpi_ioc.h>
103
104#include <sys/sysctl.h>
105
106#define MPT_MAX_TRYS 3
107#define MPT_MAX_WAIT 300000
108
109static int maxwait_ack = 0;
110static int maxwait_int = 0;
111static int maxwait_state = 0;
112
113TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
114mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
115
116static mpt_reply_handler_t mpt_default_reply_handler;
117static mpt_reply_handler_t mpt_config_reply_handler;
118static mpt_reply_handler_t mpt_handshake_reply_handler;
119static mpt_reply_handler_t mpt_event_reply_handler;
120static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
121 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
122static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
123static int mpt_soft_reset(struct mpt_softc *mpt);
124static void mpt_hard_reset(struct mpt_softc *mpt);
125static int mpt_configure_ioc(struct mpt_softc *mpt);
126static int mpt_enable_ioc(struct mpt_softc *mpt);
127
128/************************* Personality Module Support *************************/
129/*
130 * We include one extra entry that is guaranteed to be NULL
131 * to simplify our itterator.
132 */
133static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
134static __inline struct mpt_personality*
135 mpt_pers_find(struct mpt_softc *, u_int);
136static __inline struct mpt_personality*
137 mpt_pers_find_reverse(struct mpt_softc *, u_int);
138
139static __inline struct mpt_personality *
140mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
141{
142 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
143 ("mpt_pers_find: starting position out of range\n"));
144
145 while (start_at < MPT_MAX_PERSONALITIES
146 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
147 start_at++;
148 }
149 return (mpt_personalities[start_at]);
150}
151
152/*
153 * Used infrequenstly, so no need to optimize like a forward
154 * traversal where we use the MAX+1 is guaranteed to be NULL
155 * trick.
156 */
157static __inline struct mpt_personality *
158mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
159{
160 while (start_at < MPT_MAX_PERSONALITIES
161 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
162 start_at--;
163 }
164 if (start_at < MPT_MAX_PERSONALITIES)
165 return (mpt_personalities[start_at]);
166 return (NULL);
167}
168
169#define MPT_PERS_FOREACH(mpt, pers) \
170 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
171 pers != NULL; \
172 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
173
174#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
175 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
176 pers != NULL; \
177 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
178
179static mpt_load_handler_t mpt_stdload;
180static mpt_probe_handler_t mpt_stdprobe;
181static mpt_attach_handler_t mpt_stdattach;
182static mpt_event_handler_t mpt_stdevent;
183static mpt_reset_handler_t mpt_stdreset;
184static mpt_shutdown_handler_t mpt_stdshutdown;
185static mpt_detach_handler_t mpt_stddetach;
186static mpt_unload_handler_t mpt_stdunload;
187static struct mpt_personality mpt_default_personality =
188{
189 .load = mpt_stdload,
190 .probe = mpt_stdprobe,
191 .attach = mpt_stdattach,
192 .event = mpt_stdevent,
193 .reset = mpt_stdreset,
194 .shutdown = mpt_stdshutdown,
195 .detach = mpt_stddetach,
196 .unload = mpt_stdunload
197};
198
199static mpt_load_handler_t mpt_core_load;
200static mpt_attach_handler_t mpt_core_attach;
201static mpt_reset_handler_t mpt_core_ioc_reset;
202static mpt_event_handler_t mpt_core_event;
203static mpt_shutdown_handler_t mpt_core_shutdown;
204static mpt_shutdown_handler_t mpt_core_detach;
205static mpt_unload_handler_t mpt_core_unload;
206static struct mpt_personality mpt_core_personality =
207{
208 .name = "mpt_core",
209 .load = mpt_core_load,
210 .attach = mpt_core_attach,
211 .event = mpt_core_event,
212 .reset = mpt_core_ioc_reset,
213 .shutdown = mpt_core_shutdown,
214 .detach = mpt_core_detach,
215 .unload = mpt_core_unload,
216};
217
218/*
219 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
220 * ordering information. We want the core to always register FIRST.
221 * other modules are set to SI_ORDER_SECOND.
222 */
223static moduledata_t mpt_core_mod = {
224 "mpt_core", mpt_modevent, &mpt_core_personality
225};
226DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
227MODULE_VERSION(mpt_core, 1);
228
229#define MPT_PERS_ATACHED(pers, mpt) \
230 ((mpt)->pers_mask & (0x1 << pers->id))
231
232
233int
234mpt_modevent(module_t mod, int type, void *data)
235{
236 struct mpt_personality *pers;
237 int error;
238
239 pers = (struct mpt_personality *)data;
240
241 error = 0;
242 switch (type) {
243 case MOD_LOAD:
244 {
245 mpt_load_handler_t **def_handler;
246 mpt_load_handler_t **pers_handler;
247 int i;
248
249 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
250 if (mpt_personalities[i] == NULL)
251 break;
252 }
253 if (i >= MPT_MAX_PERSONALITIES) {
254 error = ENOMEM;
255 break;
256 }
257 pers->id = i;
258 mpt_personalities[i] = pers;
259
260 /* Install standard/noop handlers for any NULL entries. */
261 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
262 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
263 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
264 if (*pers_handler == NULL)
265 *pers_handler = *def_handler;
266 pers_handler++;
267 def_handler++;
268 }
269
270 error = (pers->load(pers));
271 if (error != 0)
272 mpt_personalities[i] = NULL;
273 break;
274 }
275 case MOD_SHUTDOWN:
276 break;
277 case MOD_QUIESCE:
278 break;
279 case MOD_UNLOAD:
280 error = pers->unload(pers);
281 mpt_personalities[pers->id] = NULL;
282 break;
283 default:
284 error = EINVAL;
285 break;
286 }
287 return (error);
288}
289
290int
291mpt_stdload(struct mpt_personality *pers)
292{
293 /* Load is always successfull. */
294 return (0);
295}
296
297int
298mpt_stdprobe(struct mpt_softc *mpt)
299{
300 /* Probe is always successfull. */
301 return (0);
302}
303
304int
305mpt_stdattach(struct mpt_softc *mpt)
306{
307 /* Attach is always successfull. */
308 return (0);
309}
310
311int
312mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
313{
314 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
315 /* Event was not for us. */
316 return (0);
317}
318
319void
320mpt_stdreset(struct mpt_softc *mpt, int type)
321{
322}
323
324void
325mpt_stdshutdown(struct mpt_softc *mpt)
326{
327}
328
329void
330mpt_stddetach(struct mpt_softc *mpt)
331{
332}
333
334int
335mpt_stdunload(struct mpt_personality *pers)
336{
337 /* Unload is always successfull. */
338 return (0);
339}
340
341/******************************* Bus DMA Support ******************************/
342void
343mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
344{
345 struct mpt_map_info *map_info;
346
347 map_info = (struct mpt_map_info *)arg;
348 map_info->error = error;
349 map_info->phys = segs->ds_addr;
350}
351
352/**************************** Reply/Event Handling ****************************/
353int
354mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
355 mpt_handler_t handler, uint32_t *phandler_id)
356{
357
358 switch (type) {
359 case MPT_HANDLER_REPLY:
360 {
361 u_int cbi;
362 u_int free_cbi;
363
364 if (phandler_id == NULL)
365 return (EINVAL);
366
367 free_cbi = MPT_HANDLER_ID_NONE;
368 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
369 /*
370 * If the same handler is registered multiple
371 * times, don't error out. Just return the
372 * index of the original registration.
373 */
374 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
375 *phandler_id = MPT_CBI_TO_HID(cbi);
376 return (0);
377 }
378
379 /*
380 * Fill from the front in the hope that
381 * all registered handlers consume only a
382 * single cache line.
383 *
384 * We don't break on the first empty slot so
385 * that the full table is checked to see if
386 * this handler was previously registered.
387 */
388 if (free_cbi == MPT_HANDLER_ID_NONE
389 && (mpt_reply_handlers[cbi]
390 == mpt_default_reply_handler))
391 free_cbi = cbi;
392 }
393 if (free_cbi == MPT_HANDLER_ID_NONE)
394 return (ENOMEM);
395 mpt_reply_handlers[free_cbi] = handler.reply_handler;
396 *phandler_id = MPT_CBI_TO_HID(free_cbi);
397 break;
398 }
399 default:
400 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
401 return (EINVAL);
402 }
403 return (0);
404}
405
406int
407mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
408 mpt_handler_t handler, uint32_t handler_id)
409{
410
411 switch (type) {
412 case MPT_HANDLER_REPLY:
413 {
414 u_int cbi;
415
416 cbi = MPT_CBI(handler_id);
417 if (cbi >= MPT_NUM_REPLY_HANDLERS
418 || mpt_reply_handlers[cbi] != handler.reply_handler)
419 return (ENOENT);
420 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
421 break;
422 }
423 default:
424 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
425 return (EINVAL);
426 }
427 return (0);
428}
429
430static int
431mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
432 MSG_DEFAULT_REPLY *reply_frame)
433{
434 mpt_prt(mpt, "XXXX Default Handler Called. Req %p, Frame %p\n",
435 req, reply_frame);
436
437 if (reply_frame != NULL)
438 mpt_dump_reply_frame(mpt, reply_frame);
439
440 mpt_prt(mpt, "XXXX Reply Frame Ignored\n");
441
442 return (/*free_reply*/TRUE);
443}
444
445static int
446mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
447 MSG_DEFAULT_REPLY *reply_frame)
448{
449 if (req != NULL) {
450
451 if (reply_frame != NULL) {
452 MSG_CONFIG *cfgp;
453 MSG_CONFIG_REPLY *reply;
454
455 cfgp = (MSG_CONFIG *)req->req_vbuf;
456 reply = (MSG_CONFIG_REPLY *)reply_frame;
457 req->IOCStatus = le16toh(reply_frame->IOCStatus);
458 bcopy(&reply->Header, &cfgp->Header,
459 sizeof(cfgp->Header));
460 }
461 req->state &= ~REQ_STATE_QUEUED;
462 req->state |= REQ_STATE_DONE;
463 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
464
465 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0)
466 wakeup(req);
467 }
468
469 return (/*free_reply*/TRUE);
470}
471
472static int
473mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
474 MSG_DEFAULT_REPLY *reply_frame)
475{
476 /* Nothing to be done. */
477 return (/*free_reply*/TRUE);
478}
479
480static int
481mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
482 MSG_DEFAULT_REPLY *reply_frame)
483{
484 int free_reply;
485
486 if (reply_frame == NULL) {
487 mpt_prt(mpt, "Event Handler: req %p - Unexpected NULL reply\n");
488 return (/*free_reply*/TRUE);
489 }
490
491 free_reply = TRUE;
492 switch (reply_frame->Function) {
493 case MPI_FUNCTION_EVENT_NOTIFICATION:
494 {
495 MSG_EVENT_NOTIFY_REPLY *msg;
496 struct mpt_personality *pers;
497 u_int handled;
498
499 handled = 0;
500 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
501 MPT_PERS_FOREACH(mpt, pers)
502 handled += pers->event(mpt, req, msg);
503
504 if (handled == 0 && mpt->mpt_pers_mask == 0) {
505 mpt_lprt(mpt, MPT_PRT_INFO,
506 "No Handlers For Any Event Notify Frames. "
507 "Event %#x (ACK %sequired).\n",
508 msg->Event, msg->AckRequired? "r" : "not r");
509 } else if (handled == 0) {
510 mpt_lprt(mpt, MPT_PRT_WARN,
511 "Unhandled Event Notify Frame. Event %#x "
512 "(ACK %sequired).\n",
513 msg->Event, msg->AckRequired? "r" : "not r");
514 }
515
516 if (msg->AckRequired) {
517 request_t *ack_req;
518 uint32_t context;
519
520 context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS);
521 ack_req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
522 if (ack_req == NULL) {
523 struct mpt_evtf_record *evtf;
524
525 evtf = (struct mpt_evtf_record *)reply_frame;
526 evtf->context = context;
527 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
528 free_reply = FALSE;
529 break;
530 }
531 mpt_send_event_ack(mpt, ack_req, msg, context);
532 }
533 break;
534 }
535 case MPI_FUNCTION_PORT_ENABLE:
536 mpt_lprt(mpt, MPT_PRT_DEBUG, "enable port reply\n");
537 break;
538 case MPI_FUNCTION_EVENT_ACK:
539 break;
540 default:
541 mpt_prt(mpt, "Unknown Event Function: %x\n",
542 reply_frame->Function);
543 break;
544 }
545
546 if (req != NULL
547 && (reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
548
549 req->state &= ~REQ_STATE_QUEUED;
550 req->state |= REQ_STATE_DONE;
551 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
552
553 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0)
554 wakeup(req);
555 else
556 mpt_free_request(mpt, req);
557 }
558 return (free_reply);
559}
560
561/*
562 * Process an asynchronous event from the IOC.
563 */
564static int
565mpt_core_event(struct mpt_softc *mpt, request_t *req,
566 MSG_EVENT_NOTIFY_REPLY *msg)
567{
568 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
569 msg->Event & 0xFF);
570 switch(msg->Event & 0xFF) {
571 case MPI_EVENT_NONE:
572 break;
573 case MPI_EVENT_LOG_DATA:
574 {
575 int i;
576
577 /* Some error occured that LSI wants logged */
578 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
579 msg->IOCLogInfo);
580 mpt_prt(mpt, "\tEvtLogData: Event Data:");
581 for (i = 0; i < msg->EventDataLength; i++)
582 mpt_prtc(mpt, " %08x", msg->Data[i]);
583 mpt_prtc(mpt, "\n");
584 break;
585 }
586 case MPI_EVENT_EVENT_CHANGE:
587 /*
588 * This is just an acknowledgement
589 * of our mpt_send_event_request.
590 */
591 break;
592 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
593 break;
594 default:
595 return (/*handled*/0);
596 break;
597 }
598 return (/*handled*/1);
599}
600
601static void
602mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
603 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
604{
605 MSG_EVENT_ACK *ackp;
606
607 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
608 bzero(ackp, sizeof *ackp);
609 ackp->Function = MPI_FUNCTION_EVENT_ACK;
610 ackp->Event = msg->Event;
611 ackp->EventContext = msg->EventContext;
612 ackp->MsgContext = context;
613 mpt_check_doorbell(mpt);
614 mpt_send_cmd(mpt, ack_req);
615}
616
617/***************************** Interrupt Handling *****************************/
618void
619mpt_intr(void *arg)
620{
621 struct mpt_softc *mpt;
622 uint32_t reply_desc;
623
624 mpt = (struct mpt_softc *)arg;
625 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
626 request_t *req;
627 MSG_DEFAULT_REPLY *reply_frame;
628 uint32_t reply_baddr;
629 u_int cb_index;
630 u_int req_index;
631 int free_rf;
632
633 req = NULL;
634 reply_frame = NULL;
635 reply_baddr = 0;
636 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
637 u_int offset;
638
639 /*
640 * Insure that the reply frame is coherent.
641 */
642 reply_baddr = (reply_desc << 1);
643 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
644 bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap,
645 offset, MPT_REPLY_SIZE,
646 BUS_DMASYNC_POSTREAD);
647 reply_frame = MPT_REPLY_OTOV(mpt, offset);
648 reply_desc = le32toh(reply_frame->MsgContext);
649 }
650 cb_index = MPT_CONTEXT_TO_CBI(reply_desc);
651 req_index = MPT_CONTEXT_TO_REQI(reply_desc);
652 if (req_index < MPT_MAX_REQUESTS(mpt))
653 req = &mpt->request_pool[req_index];
654
655 free_rf = mpt_reply_handlers[cb_index](mpt, req, reply_frame);
656
657 if (reply_frame != NULL && free_rf)
658 mpt_free_reply(mpt, reply_baddr);
659 }
660}
661
662/******************************* Error Recovery *******************************/
663void
664mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
665 u_int iocstatus)
666{
667 MSG_DEFAULT_REPLY ioc_status_frame;
668 request_t *req;
669
670 bzero(&ioc_status_frame, sizeof(ioc_status_frame));
671 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
672 ioc_status_frame.IOCStatus = iocstatus;
673 while((req = TAILQ_FIRST(chain)) != NULL) {
674 MSG_REQUEST_HEADER *msg_hdr;
675 u_int cb_index;
676 TAILQ_REMOVE(chain, req, links);
677 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
678 ioc_status_frame.Function = msg_hdr->Function;
679 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
680 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
681 mpt_reply_handlers[cb_index](mpt, req, &ioc_status_frame);
682 }
683}
684
685/********************************* Diagnostics ********************************/
686/*
687 * Perform a diagnostic dump of a reply frame.
688 */
689void
690mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
691{
692
693 mpt_prt(mpt, "Address Reply:\n");
694 mpt_print_reply(reply_frame);
695}
696
697/******************************* Doorbell Access ******************************/
698static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
699static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
700
701static __inline uint32_t
702mpt_rd_db(struct mpt_softc *mpt)
703{
704 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
705}
706
707static __inline uint32_t
708mpt_rd_intr(struct mpt_softc *mpt)
709{
710 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
711}
712
713/* Busy wait for a door bell to be read by IOC */
714static int
715mpt_wait_db_ack(struct mpt_softc *mpt)
716{
717 int i;
718 for (i=0; i < MPT_MAX_WAIT; i++) {
719 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
720 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
721 return MPT_OK;
722 }
723
724 DELAY(1000);
725 }
726 return MPT_FAIL;
727}
728
729/* Busy wait for a door bell interrupt */
730static int
731mpt_wait_db_int(struct mpt_softc *mpt)
732{
733 int i;
734 for (i=0; i < MPT_MAX_WAIT; i++) {
735 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
736 maxwait_int = i > maxwait_int ? i : maxwait_int;
737 return MPT_OK;
738 }
739 DELAY(100);
740 }
741 return MPT_FAIL;
742}
743
744/* Wait for IOC to transition to a give state */
745void
746mpt_check_doorbell(struct mpt_softc *mpt)
747{
748 uint32_t db = mpt_rd_db(mpt);
749 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
750 mpt_prt(mpt, "Device not running\n");
751 mpt_print_db(db);
752 }
753}
754
755/* Wait for IOC to transition to a give state */
756static int
757mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
758{
759 int i;
760
761 for (i = 0; i < MPT_MAX_WAIT; i++) {
762 uint32_t db = mpt_rd_db(mpt);
763 if (MPT_STATE(db) == state) {
764 maxwait_state = i > maxwait_state ? i : maxwait_state;
765 return (MPT_OK);
766 }
767 DELAY(100);
768 }
769 return (MPT_FAIL);
770}
771
772
773/************************* Intialization/Configuration ************************/
774static int mpt_download_fw(struct mpt_softc *mpt);
775
776/* Issue the reset COMMAND to the IOC */
777static int
778mpt_soft_reset(struct mpt_softc *mpt)
779{
780 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
781
782 /* Have to use hard reset if we are not in Running state */
783 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
784 mpt_prt(mpt, "soft reset failed: device not running\n");
785 return MPT_FAIL;
786 }
787
788 /* If door bell is in use we don't have a chance of getting
789 * a word in since the IOC probably crashed in message
790 * processing. So don't waste our time.
791 */
792 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
793 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
794 return MPT_FAIL;
795 }
796
797 /* Send the reset request to the IOC */
798 mpt_write(mpt, MPT_OFFSET_DOORBELL,
799 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
800 if (mpt_wait_db_ack(mpt) != MPT_OK) {
801 mpt_prt(mpt, "soft reset failed: ack timeout\n");
802 return MPT_FAIL;
803 }
804
805 /* Wait for the IOC to reload and come out of reset state */
806 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
807 mpt_prt(mpt, "soft reset failed: device did not restart\n");
808 return MPT_FAIL;
809 }
810
811 return MPT_OK;
812}
813
814static int
815mpt_enable_diag_mode(struct mpt_softc *mpt)
816{
817 int try;
818
819 try = 20;
820 while (--try) {
821
822 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
823 break;
824
825 /* Enable diagnostic registers */
826 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
827 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
828 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
829 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
830 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
831 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
832
833 DELAY(100000);
834 }
835 if (try == 0)
836 return (EIO);
837 return (0);
838}
839
840static void
841mpt_disable_diag_mode(struct mpt_softc *mpt)
842{
843 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
844}
845
846/* This is a magic diagnostic reset that resets all the ARM
847 * processors in the chip.
848 */
849static void
850mpt_hard_reset(struct mpt_softc *mpt)
851{
852 int error;
853 int wait;
854 uint32_t diagreg;
855
856 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
857
858 error = mpt_enable_diag_mode(mpt);
859 if (error) {
860 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
861 mpt_prt(mpt, "Trying to reset anyway.\n");
862 }
863
864 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
865
866 /*
867 * This appears to be a workaround required for some
868 * firmware or hardware revs.
869 */
870 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
871 DELAY(1000);
872
873 /* Diag. port is now active so we can now hit the reset bit */
874 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
875
876 /*
877 * Ensure that the reset has finished. We delay 1ms
878 * prior to reading the register to make sure the chip
879 * has sufficiently completed its reset to handle register
880 * accesses.
881 */
882 wait = 5000;
883 do {
884 DELAY(1000);
885 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
886 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
887
888 if (wait == 0) {
889 mpt_prt(mpt, "WARNING - Failed hard reset! "
890 "Trying to initialize anyway.\n");
891 }
892
893 /*
894 * If we have firmware to download, it must be loaded before
895 * the controller will become operational. Do so now.
896 */
897 if (mpt->fw_image != NULL) {
898
899 error = mpt_download_fw(mpt);
900
901 if (error) {
902 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
903 mpt_prt(mpt, "Trying to initialize anyway.\n");
904 }
905 }
906
907 /*
908 * Reseting the controller should have disabled write
909 * access to the diagnostic registers, but disable
910 * manually to be sure.
911 */
912 mpt_disable_diag_mode(mpt);
913}
914
915static void
916mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
917{
918 /*
919 * Complete all pending requests with a status
920 * appropriate for an IOC reset.
921 */
922 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
923 MPI_IOCSTATUS_INVALID_STATE);
924}
925
926
927/*
928 * Reset the IOC when needed. Try software command first then if needed
929 * poke at the magic diagnostic reset. Note that a hard reset resets
930 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
931 * fouls up the PCI configuration registers.
932 */
933int
934mpt_reset(struct mpt_softc *mpt, int reinit)
935{
936 struct mpt_personality *pers;
937 int ret;
938 int retry_cnt = 0;
939
940 /*
941 * Try a soft reset. If that fails, get out the big hammer.
942 */
943 again:
944 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
945 int cnt;
946 for (cnt = 0; cnt < 5; cnt++) {
947 /* Failed; do a hard reset */
948 mpt_hard_reset(mpt);
949
950 /*
951 * Wait for the IOC to reload
952 * and come out of reset state
953 */
954 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
955 if (ret == MPT_OK) {
956 break;
957 }
958 /*
959 * Okay- try to check again...
960 */
961 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
962 if (ret == MPT_OK) {
963 break;
964 }
965 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
966 retry_cnt, cnt);
967 }
968 }
969
970 if (retry_cnt == 0) {
971 /*
972 * Invoke reset handlers. We bump the reset count so
973 * that mpt_wait_req() understands that regardless of
974 * the specified wait condition, it should stop its wait.
975 */
976 mpt->reset_cnt++;
977 MPT_PERS_FOREACH(mpt, pers)
978 pers->reset(mpt, ret);
979 }
980
981 if (reinit != 0) {
982 ret = mpt_enable_ioc(mpt);
983 if (ret == MPT_OK) {
984 mpt_enable_ints(mpt);
985 }
986 }
987 if (ret != MPT_OK && retry_cnt++ < 2) {
988 goto again;
989 }
990 return ret;
991}
992
993/* Return a command buffer to the free queue */
994void
995mpt_free_request(struct mpt_softc *mpt, request_t *req)
996{
997 request_t *nxt;
998 struct mpt_evtf_record *record;
999 uint32_t reply_baddr;
1000
1001 if (req == NULL || req != &mpt->request_pool[req->index]) {
1002 panic("mpt_free_request bad req ptr\n");
1003 return;
1004 }
1005 if ((nxt = req->chain) != NULL) {
1006 req->chain = NULL;
1007 mpt_free_request(mpt, nxt); /* NB: recursion */
1008 }
1009 req->serno = 0;
1010 req->ccb = NULL;
1011 req->state = REQ_STATE_FREE;
1012 if (LIST_EMPTY(&mpt->ack_frames)) {
1013 TAILQ_INSERT_HEAD(&mpt->request_free_list, req, links);
1014 if (mpt->getreqwaiter != 0) {
1015 mpt->getreqwaiter = 0;
1016 wakeup(&mpt->request_free_list);
1017 }
1018 return;
1019 }
1020
1021 /*
1022 * Process an ack frame deferred due to resource shortage.
1023 */
1024 record = LIST_FIRST(&mpt->ack_frames);
1025 LIST_REMOVE(record, links);
1026 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1027 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1028 + (mpt->reply_phys & 0xFFFFFFFF);
1029 mpt_free_reply(mpt, reply_baddr);
1030}
1031
1032/* Get a command buffer from the free queue */
1033request_t *
1034mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1035{
1036 request_t *req;
1037
1038retry:
1039 req = TAILQ_FIRST(&mpt->request_free_list);
1040 if (req != NULL) {
1041 KASSERT(req == &mpt->request_pool[req->index],
1042 ("mpt_get_request: corrupted request free list\n"));
1043 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1044 req->state = REQ_STATE_ALLOCATED;
1045 req->chain = NULL;
1046 if ((req->serno = ++(mpt->cmd_serno)) == 0) {
1047 req->serno = ++(mpt->cmd_serno);
1048 }
1049 } else if (sleep_ok != 0) {
1050 mpt->getreqwaiter = 1;
1051 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1052 goto retry;
1053 }
1054 return req;
1055}
1056
1057/* Pass the command to the IOC */
1058void
1059mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1060{
1061 uint32_t *pReq;
1062
1063 pReq = req->req_vbuf;
1064 if (mpt->verbose > MPT_PRT_TRACE) {
1065 int offset;
1066 mpt_prt(mpt, "Send Request %d (0x%x):",
1067 req->index, req->req_pbuf);
1068 for (offset = 0; offset < mpt->request_frame_size; offset++) {
1069 if ((offset & 0x7) == 0) {
1070 mpt_prtc(mpt, "\n");
1071 mpt_prt(mpt, " ");
1072 }
1073 mpt_prtc(mpt, " %08x", pReq[offset]);
1074 }
1075 mpt_prtc(mpt, "\n");
1076 }
1077 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1078 BUS_DMASYNC_PREWRITE);
1079 req->state |= REQ_STATE_QUEUED;
1080 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1081 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1082}
1083
1084/*
1085 * Wait for a request to complete.
1086 *
1087 * Inputs:
1088 * mpt softc of controller executing request
1089 * req request to wait for
1090 * sleep_ok nonzero implies may sleep in this context
1091 * time_ms timeout in ms. 0 implies no timeout.
1092 *
1093 * Return Values:
1094 * 0 Request completed
1095 * non-0 Timeout fired before request completion.
1096 */
1097int
1098mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1099 mpt_req_state_t state, mpt_req_state_t mask,
1100 int sleep_ok, int time_ms)
1101{
1102 int error;
1103 int timeout;
1104 u_int saved_cnt;
1105
1106 /*
1107 * timeout is in ms. 0 indicates infinite wait.
1108 * Convert to ticks or 500us units depending on
1109 * our sleep mode.
1110 */
1111 if (sleep_ok != 0)
1112 timeout = (time_ms * hz) / 1000;
1113 else
1114 timeout = time_ms * 2;
1115 req->state |= REQ_STATE_NEED_WAKEUP;
1116 mask &= ~REQ_STATE_NEED_WAKEUP;
1117 saved_cnt = mpt->reset_cnt;
1118 while ((req->state & mask) != state
1119 && mpt->reset_cnt == saved_cnt) {
1120
1121 if (sleep_ok != 0) {
1122 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1123 if (error == EWOULDBLOCK) {
1124 timeout = 0;
1125 break;
1126 }
1127 } else {
1128 if (time_ms != 0 && --timeout == 0) {
1129 mpt_prt(mpt, "mpt_wait_req timed out\n");
1130 break;
1131 }
1132 DELAY(500);
1133 mpt_intr(mpt);
1134 }
1135 }
1136 req->state &= ~REQ_STATE_NEED_WAKEUP;
1137 if (mpt->reset_cnt != saved_cnt)
1138 return (EIO);
1139 if (time_ms && timeout <= 0)
1140 return (ETIMEDOUT);
1141 return (0);
1142}
1143
1144/*
1145 * Send a command to the IOC via the handshake register.
1146 *
1147 * Only done at initialization time and for certain unusual
1148 * commands such as device/bus reset as specified by LSI.
1149 */
1150int
1151mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1152{
1153 int i;
1154 uint32_t data, *data32;
1155
1156 /* Check condition of the IOC */
1157 data = mpt_rd_db(mpt);
1158 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1159 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1160 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1161 || MPT_DB_IS_IN_USE(data)) {
1162 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1163 mpt_print_db(data);
1164 return (EBUSY);
1165 }
1166
1167 /* We move things in 32 bit chunks */
1168 len = (len + 3) >> 2;
1169 data32 = cmd;
1170
1171 /* Clear any left over pending doorbell interupts */
1172 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1173 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1174
1175 /*
1176 * Tell the handshake reg. we are going to send a command
1177 * and how long it is going to be.
1178 */
1179 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1180 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1181 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1182
1183 /* Wait for the chip to notice */
1184 if (mpt_wait_db_int(mpt) != MPT_OK) {
1185 mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n");
1186 return (ETIMEDOUT);
1187 }
1188
1189 /* Clear the interrupt */
1190 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1191
1192 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1193 mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n");
1194 return (ETIMEDOUT);
1195 }
1196
1197 /* Send the command */
1198 for (i = 0; i < len; i++) {
1199 mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++);
1200 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1201 mpt_prt(mpt,
1202 "mpt_send_handshake_cmd timeout! index = %d\n",
1203 i);
1204 return (ETIMEDOUT);
1205 }
1206 }
1207 return MPT_OK;
1208}
1209
1210/* Get the response from the handshake register */
1211int
1212mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1213{
1214 int left, reply_left;
1215 u_int16_t *data16;
1216 MSG_DEFAULT_REPLY *hdr;
1217
1218 /* We move things out in 16 bit chunks */
1219 reply_len >>= 1;
1220 data16 = (u_int16_t *)reply;
1221
1222 hdr = (MSG_DEFAULT_REPLY *)reply;
1223
1224 /* Get first word */
1225 if (mpt_wait_db_int(mpt) != MPT_OK) {
1226 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1227 return ETIMEDOUT;
1228 }
1229 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1230 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1231
1232 /* Get Second Word */
1233 if (mpt_wait_db_int(mpt) != MPT_OK) {
1234 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1235 return ETIMEDOUT;
1236 }
1237 *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1238 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1239
1240 /* With the second word, we can now look at the length */
1241 if (((reply_len >> 1) != hdr->MsgLength)) {
1242 mpt_prt(mpt, "reply length does not match message length: "
1243 "got 0x%02x, expected 0x%02x\n",
1244 hdr->MsgLength << 2, reply_len << 1);
1245 }
1246
1247 /* Get rest of the reply; but don't overflow the provided buffer */
1248 left = (hdr->MsgLength << 1) - 2;
1249 reply_left = reply_len - 2;
1250 while (left--) {
1251 u_int16_t datum;
1252
1253 if (mpt_wait_db_int(mpt) != MPT_OK) {
1254 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1255 return ETIMEDOUT;
1256 }
1257 datum = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1258
1259 if (reply_left-- > 0)
1260 *data16++ = datum & MPT_DB_DATA_MASK;
1261
1262 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1263 }
1264
1265 /* One more wait & clear at the end */
1266 if (mpt_wait_db_int(mpt) != MPT_OK) {
1267 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1268 return ETIMEDOUT;
1269 }
1270 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1271
1272 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1273 if (mpt->verbose >= MPT_PRT_TRACE)
1274 mpt_print_reply(hdr);
1275 return (MPT_FAIL | hdr->IOCStatus);
1276 }
1277
1278 return (0);
1279}
1280
1281static int
1282mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1283{
1284 MSG_IOC_FACTS f_req;
1285 int error;
1286
1287 bzero(&f_req, sizeof f_req);
1288 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1289 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1290 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1291 if (error)
1292 return(error);
1293 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1294 return (error);
1295}
1296
1297static int
1298mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp)
1299{
1300 MSG_PORT_FACTS f_req;
1301 int error;
1302
1303 /* XXX: Only getting PORT FACTS for Port 0 */
1304 memset(&f_req, 0, sizeof f_req);
1305 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1306 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1307 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1308 if (error)
1309 return(error);
1310 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1311 return (error);
1312}
1313
1314/*
1315 * Send the initialization request. This is where we specify how many
1316 * SCSI busses and how many devices per bus we wish to emulate.
1317 * This is also the command that specifies the max size of the reply
1318 * frames from the IOC that we will be allocating.
1319 */
1320static int
1321mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1322{
1323 int error = 0;
1324 MSG_IOC_INIT init;
1325 MSG_IOC_INIT_REPLY reply;
1326
1327 bzero(&init, sizeof init);
1328 init.WhoInit = who;
1329 init.Function = MPI_FUNCTION_IOC_INIT;
1330 if (mpt->is_fc) {
1331 init.MaxDevices = 255;
1332 } else if (mpt->is_sas) {
1333 init.MaxDevices = mpt->mpt_max_devices;
1334 } else {
1335 init.MaxDevices = 16;
1336 }
1337 init.MaxBuses = 1;
1338
1339 init.MsgVersion = htole16(MPI_VERSION);
1340 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1341 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1342 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1343
1344 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1345 return(error);
1346 }
1347
1348 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1349 return (error);
1350}
1351
1352
1353/*
1354 * Utiltity routine to read configuration headers and pages
1355 */
1356int
1357mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action,
1358 u_int PageVersion, u_int PageLength, u_int PageNumber,
1359 u_int PageType, uint32_t PageAddress, bus_addr_t addr,
1360 bus_size_t len, int sleep_ok, int timeout_ms)
1361{
1362 MSG_CONFIG *cfgp;
1363 SGE_SIMPLE32 *se;
1364
1365 cfgp = req->req_vbuf;
1366 memset(cfgp, 0, sizeof *cfgp);
1367 cfgp->Action = Action;
1368 cfgp->Function = MPI_FUNCTION_CONFIG;
1369 cfgp->Header.PageVersion = PageVersion;
1370 cfgp->Header.PageLength = PageLength;
1371 cfgp->Header.PageNumber = PageNumber;
1372 cfgp->Header.PageType = PageType;
1373 cfgp->PageAddress = PageAddress;
1374 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1375 se->Address = addr;
1376 MPI_pSGE_SET_LENGTH(se, len);
1377 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1378 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1379 MPI_SGE_FLAGS_END_OF_LIST |
1380 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1381 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1382 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1383 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1384
1385 mpt_check_doorbell(mpt);
1386 mpt_send_cmd(mpt, req);
1387 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1388 sleep_ok, timeout_ms));
1389}
1390
1391
1392int
1393mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1394 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1395 int sleep_ok, int timeout_ms)
1396{
1397 request_t *req;
1398 MSG_CONFIG *cfgp;
1399 int error;
1400
1401 req = mpt_get_request(mpt, sleep_ok);
1402 if (req == NULL) {
1403 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1404 return (ENOMEM);
1405 }
1406
1407 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER,
1408 /*PageVersion*/0, /*PageLength*/0, PageNumber,
1409 PageType, PageAddress, /*addr*/0, /*len*/0,
1410 sleep_ok, timeout_ms);
1411 if (error != 0) {
1412 mpt_free_request(mpt, req);
1413 mpt_prt(mpt, "read_cfg_header timed out\n");
1414 return (ETIMEDOUT);
1415 }
1416
1417 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1418 case MPI_IOCSTATUS_SUCCESS:
1419 cfgp = req->req_vbuf;
1420 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1421 error = 0;
1422 break;
1423 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1424 mpt_lprt(mpt, MPT_PRT_DEBUG,
1425 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1426 PageType, PageNumber, PageAddress);
1427 error = EINVAL;
1428 break;
1429 default:
1430 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1431 req->IOCStatus);
1432 error = EIO;
1433 break;
1434 }
1435 mpt_free_request(mpt, req);
1436 return (error);
1437}
1438
1439#define CFG_DATA_OFF 128
1440
1441int
1442mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1443 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1444 int timeout_ms)
1445{
1446 request_t *req;
1447 int error;
1448
1449 req = mpt_get_request(mpt, sleep_ok);
1450 if (req == NULL) {
1451 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1452 return (-1);
1453 }
1454
1455 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1456 hdr->PageLength, hdr->PageNumber,
1457 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1458 PageAddress, req->req_pbuf + CFG_DATA_OFF,
1459 len, sleep_ok, timeout_ms);
1460 if (error != 0) {
1461 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1462 return (-1);
1463 }
1464
1465 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1466 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1467 req->IOCStatus);
1468 mpt_free_request(mpt, req);
1469 return (-1);
1470 }
1471 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1472 BUS_DMASYNC_POSTREAD);
1473 memcpy(hdr, ((uint8_t *)req->req_vbuf)+CFG_DATA_OFF, len);
1474 mpt_free_request(mpt, req);
1475 return (0);
1476}
1477
1478int
1479mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1480 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1481 int timeout_ms)
1482{
1483 request_t *req;
1484 u_int hdr_attr;
1485 int error;
1486
1487 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1488 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1489 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1490 mpt_prt(mpt, "page type 0x%x not changeable\n",
1491 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1492 return (-1);
1493 }
1494 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK,
1495
1496 req = mpt_get_request(mpt, sleep_ok);
1497 if (req == NULL)
1498 return (-1);
1499
1500 memcpy(((caddr_t)req->req_vbuf)+CFG_DATA_OFF, hdr, len);
1501 /* Restore stripped out attributes */
1502 hdr->PageType |= hdr_attr;
1503
1504 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1505 hdr->PageLength, hdr->PageNumber,
1506 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1507 PageAddress, req->req_pbuf + CFG_DATA_OFF,
1508 len, sleep_ok, timeout_ms);
1509 if (error != 0) {
1510 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1511 return (-1);
1512 }
1513
1514 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1515 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1516 req->IOCStatus);
1517 mpt_free_request(mpt, req);
1518 return (-1);
1519 }
1520 mpt_free_request(mpt, req);
1521 return (0);
1522}
1523
1524/*
1525 * Read IOC configuration information
1526 */
1527static int
1528mpt_read_config_info_ioc(struct mpt_softc *mpt)
1529{
1530 CONFIG_PAGE_HEADER hdr;
1531 struct mpt_raid_volume *mpt_raid;
1532 int rv;
1533 int i;
1534 size_t len;
1535
1536 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1537 /*PageNumber*/2, /*PageAddress*/0, &hdr,
1538 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1539 /*
1540 * If it's an invalid page, so what? Not a supported function....
1541 */
1542 if (rv == EINVAL)
1543 return (0);
1544 if (rv)
1545 return (rv);
1546
1547 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %x, "
1548 "num %x, type %x\n", hdr.PageVersion,
1549 hdr.PageLength * sizeof(uint32_t),
1550 hdr.PageNumber, hdr.PageType);
1551
1552 len = hdr.PageLength * sizeof(uint32_t);
1553 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1554 if (mpt->ioc_page2 == NULL)
1555 return (ENOMEM);
1556 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1557 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1558 &mpt->ioc_page2->Header, len,
1559 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1560 if (rv) {
1561 mpt_prt(mpt, "failed to read IOC Page 2\n");
1562 } else if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1563 uint32_t mask;
1564
1565 mpt_prt(mpt, "Capabilities: (");
1566 for (mask = 1; mask != 0; mask <<= 1) {
1567 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0)
1568 continue;
1569
1570 switch (mask) {
1571 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1572 mpt_prtc(mpt, " RAID-0");
1573 break;
1574 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1575 mpt_prtc(mpt, " RAID-1E");
1576 break;
1577 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1578 mpt_prtc(mpt, " RAID-1");
1579 break;
1580 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1581 mpt_prtc(mpt, " SES");
1582 break;
1583 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1584 mpt_prtc(mpt, " SAFTE");
1585 break;
1586 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1587 mpt_prtc(mpt, " Multi-Channel-Arrays");
1588 default:
1589 break;
1590 }
1591 }
1592 mpt_prtc(mpt, " )\n");
1593 if ((mpt->ioc_page2->CapabilitiesFlags
1594 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1595 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1596 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1597 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1598 mpt->ioc_page2->NumActiveVolumes,
1599 mpt->ioc_page2->NumActiveVolumes != 1
1600 ? "s " : " ",
1601 mpt->ioc_page2->MaxVolumes);
1602 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1603 mpt->ioc_page2->NumActivePhysDisks,
1604 mpt->ioc_page2->NumActivePhysDisks != 1
1605 ? "s " : " ",
1606 mpt->ioc_page2->MaxPhysDisks);
1607 }
1608 }
1609
1610 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1611 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT);
1612 if (mpt->raid_volumes == NULL) {
1613 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1614 } else {
1615 memset(mpt->raid_volumes, 0, len);
1616 }
1617
1618 /*
1619 * Copy critical data out of ioc_page2 so that we can
1620 * safely refresh the page without windows of unreliable
1621 * data.
1622 */
1623 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
1624
1625 len = sizeof(*mpt->raid_volumes->config_page)
1626 + (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1));
1627 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1628 mpt_raid = &mpt->raid_volumes[i];
1629 mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT);
1630 if (mpt_raid->config_page == NULL) {
1631 mpt_prt(mpt, "Could not allocate RAID page data\n");
1632 break;
1633 }
1634 memset(mpt_raid->config_page, 0, len);
1635 }
1636 mpt->raid_page0_len = len;
1637
1638 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1639 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT);
1640 if (mpt->raid_disks == NULL) {
1641 mpt_prt(mpt, "Could not allocate RAID disk data\n");
1642 } else {
1643 memset(mpt->raid_disks, 0, len);
1644 }
1645
1646 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
1647
1648 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1649 /*PageNumber*/3, /*PageAddress*/0, &hdr,
1650 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1651 if (rv)
1652 return (EIO);
1653
1654 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
1655 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
1656
1657 if (mpt->ioc_page3 != NULL)
1658 free(mpt->ioc_page3, M_DEVBUF);
1659 len = hdr.PageLength * sizeof(uint32_t);
1660 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1661 if (mpt->ioc_page3 == NULL)
1662 return (-1);
1663 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
1664 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1665 &mpt->ioc_page3->Header, len,
1666 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1667 if (rv) {
1668 mpt_prt(mpt, "failed to read IOC Page 3\n");
1669 }
1670
1671 mpt_raid_wakeup(mpt);
1672
1673 return (0);
1674}
1675
1676/*
1677 * Read SCSI configuration information
1678 */
1679static int
1680mpt_read_config_info_spi(struct mpt_softc *mpt)
1681{
1682 int rv, i;
1683
1684 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0,
1685 0, &mpt->mpt_port_page0.Header,
1686 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1687 if (rv)
1688 return (-1);
1689 mpt_lprt(mpt, MPT_PRT_DEBUG,
1690 "SPI Port Page 0 Header: %x %x %x %x\n",
1691 mpt->mpt_port_page0.Header.PageVersion,
1692 mpt->mpt_port_page0.Header.PageLength,
1693 mpt->mpt_port_page0.Header.PageNumber,
1694 mpt->mpt_port_page0.Header.PageType);
1695
1696 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1,
1697 0, &mpt->mpt_port_page1.Header,
1698 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1699 if (rv)
1700 return (-1);
1701
1702 mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
1703 mpt->mpt_port_page1.Header.PageVersion,
1704 mpt->mpt_port_page1.Header.PageLength,
1705 mpt->mpt_port_page1.Header.PageNumber,
1706 mpt->mpt_port_page1.Header.PageType);
1707
1708 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2,
1709 /*PageAddress*/0, &mpt->mpt_port_page2.Header,
1710 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1711 if (rv)
1712 return (-1);
1713
1714 mpt_lprt(mpt, MPT_PRT_DEBUG,
1715 "SPI Port Page 2 Header: %x %x %x %x\n",
1716 mpt->mpt_port_page1.Header.PageVersion,
1717 mpt->mpt_port_page1.Header.PageLength,
1718 mpt->mpt_port_page1.Header.PageNumber,
1719 mpt->mpt_port_page1.Header.PageType);
1720
1721 for (i = 0; i < 16; i++) {
1722 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
1723 0, i, &mpt->mpt_dev_page0[i].Header,
1724 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1725 if (rv)
1726 return (-1);
1727
1728 mpt_lprt(mpt, MPT_PRT_DEBUG,
1729 "SPI Target %d Device Page 0 Header: %x %x %x %x\n",
1730 i, mpt->mpt_dev_page0[i].Header.PageVersion,
1731 mpt->mpt_dev_page0[i].Header.PageLength,
1732 mpt->mpt_dev_page0[i].Header.PageNumber,
1733 mpt->mpt_dev_page0[i].Header.PageType);
1734
1735 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
1736 1, i, &mpt->mpt_dev_page1[i].Header,
1737 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1738 if (rv)
1739 return (-1);
1740
1741 mpt_lprt(mpt, MPT_PRT_DEBUG,
1742 "SPI Target %d Device Page 1 Header: %x %x %x %x\n",
1743 i, mpt->mpt_dev_page1[i].Header.PageVersion,
1744 mpt->mpt_dev_page1[i].Header.PageLength,
1745 mpt->mpt_dev_page1[i].Header.PageNumber,
1746 mpt->mpt_dev_page1[i].Header.PageType);
1747 }
1748
1749 /*
1750 * At this point, we don't *have* to fail. As long as we have
1751 * valid config header information, we can (barely) lurch
1752 * along.
1753 */
1754
1755 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1756 &mpt->mpt_port_page0.Header,
1757 sizeof(mpt->mpt_port_page0),
1758 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1759 if (rv) {
1760 mpt_prt(mpt, "failed to read SPI Port Page 0\n");
1761 } else {
1762 mpt_lprt(mpt, MPT_PRT_DEBUG,
1763 "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
1764 mpt->mpt_port_page0.Capabilities,
1765 mpt->mpt_port_page0.PhysicalInterface);
1766 }
1767
1768 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1769 &mpt->mpt_port_page1.Header,
1770 sizeof(mpt->mpt_port_page1),
1771 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1772 if (rv) {
1773 mpt_prt(mpt, "failed to read SPI Port Page 1\n");
1774 } else {
1775 mpt_lprt(mpt, MPT_PRT_DEBUG,
1776 "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
1777 mpt->mpt_port_page1.Configuration,
1778 mpt->mpt_port_page1.OnBusTimerValue);
1779 }
1780
1781 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1782 &mpt->mpt_port_page2.Header,
1783 sizeof(mpt->mpt_port_page2),
1784 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1785 if (rv) {
1786 mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1787 } else {
1788 mpt_lprt(mpt, MPT_PRT_DEBUG,
1789 "SPI Port Page 2: Flags %x Settings %x\n",
1790 mpt->mpt_port_page2.PortFlags,
1791 mpt->mpt_port_page2.PortSettings);
1792 for (i = 0; i < 16; i++) {
1793 mpt_lprt(mpt, MPT_PRT_DEBUG,
1794 "SPI Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1795 i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1796 mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1797 mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1798 }
1799 }
1800
1801 for (i = 0; i < 16; i++) {
1802 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1803 &mpt->mpt_dev_page0[i].Header,
1804 sizeof(*mpt->mpt_dev_page0),
1805 /*sleep_ok*/FALSE,
1806 /*timeout_ms*/5000);
1807 if (rv) {
1808 mpt_prt(mpt,
1809 "cannot read SPI Tgt %d Device Page 0\n", i);
1810 continue;
1811 }
1812 mpt_lprt(mpt, MPT_PRT_DEBUG,
1813 "SPI Tgt %d Page 0: NParms %x Information %x",
1814 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1815 mpt->mpt_dev_page0[i].Information);
1816
1817 rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1818 &mpt->mpt_dev_page1[i].Header,
1819 sizeof(*mpt->mpt_dev_page1),
1820 /*sleep_ok*/FALSE,
1821 /*timeout_ms*/5000);
1822 if (rv) {
1823 mpt_prt(mpt,
1824 "cannot read SPI Tgt %d Device Page 1\n", i);
1825 continue;
1826 }
1827 mpt_lprt(mpt, MPT_PRT_DEBUG,
1828 "SPI Tgt %d Page 1: RParms %x Configuration %x\n",
1829 i, mpt->mpt_dev_page1[i].RequestedParameters,
1830 mpt->mpt_dev_page1[i].Configuration);
1831 }
1832 return (0);
1833}
1834
1835/*
1836 * Validate SPI configuration information.
1837 *
1838 * In particular, validate SPI Port Page 1.
1839 */
1840static int
1841mpt_set_initial_config_spi(struct mpt_softc *mpt)
1842{
1843 int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
1844 int error;
1845
1846 mpt->mpt_disc_enable = 0xff;
1847 mpt->mpt_tag_enable = 0;
1848
1849 if (mpt->mpt_port_page1.Configuration != pp1val) {
1850 CONFIG_PAGE_SCSI_PORT_1 tmp;
1851
1852 mpt_prt(mpt,
1853 "SPI Port Page 1 Config value bad (%x)- should be %x\n",
1854 mpt->mpt_port_page1.Configuration, pp1val);
1855 tmp = mpt->mpt_port_page1;
1856 tmp.Configuration = pp1val;
1857 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/0,
1858 &tmp.Header, sizeof(tmp),
1859 /*sleep_ok*/FALSE,
1860 /*timeout_ms*/5000);
1861 if (error)
1862 return (-1);
1863 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1864 &tmp.Header, sizeof(tmp),
1865 /*sleep_ok*/FALSE,
1866 /*timeout_ms*/5000);
1867 if (error)
1868 return (-1);
1869 if (tmp.Configuration != pp1val) {
1870 mpt_prt(mpt,
1871 "failed to reset SPI Port Page 1 Config value\n");
1872 return (-1);
1873 }
1874 mpt->mpt_port_page1 = tmp;
1875 }
1876
1877 for (i = 0; i < 16; i++) {
1878 CONFIG_PAGE_SCSI_DEVICE_1 tmp;
1879 tmp = mpt->mpt_dev_page1[i];
1880 tmp.RequestedParameters = 0;
1881 tmp.Configuration = 0;
1882 mpt_lprt(mpt, MPT_PRT_DEBUG,
1883 "Set Tgt %d SPI DevicePage 1 values to %x 0 %x\n",
1884 i, tmp.RequestedParameters, tmp.Configuration);
1885 error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/i,
1886 &tmp.Header, sizeof(tmp),
1887 /*sleep_ok*/FALSE,
1888 /*timeout_ms*/5000);
1889 if (error)
1890 return (-1);
1891 error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1892 &tmp.Header, sizeof(tmp),
1893 /*sleep_ok*/FALSE,
1894 /*timeout_ms*/5000);
1895 if (error)
1896 return (-1);
1897 mpt->mpt_dev_page1[i] = tmp;
1898 mpt_lprt(mpt, MPT_PRT_DEBUG,
1899 "SPI Tgt %d Page 1: RParm %x Configuration %x\n", i,
1900 mpt->mpt_dev_page1[i].RequestedParameters,
1901 mpt->mpt_dev_page1[i].Configuration);
1902 }
1903 return (0);
1904}
1905
1906/*
1907 * Enable IOC port
1908 */
1909static int
1910mpt_send_port_enable(struct mpt_softc *mpt, int port)
1911{
1912 request_t *req;
1913 MSG_PORT_ENABLE *enable_req;
1914 int error;
1915
1916 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1917 if (req == NULL)
1918 return (-1);
1919
1920 enable_req = req->req_vbuf;
1921 bzero(enable_req, sizeof *enable_req);
1922
1923 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
1924 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1925 enable_req->PortNumber = port;
1926
1927 mpt_check_doorbell(mpt);
1928 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1929
1930 mpt_send_cmd(mpt, req);
1931 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1932 /*sleep_ok*/FALSE,
1933 /*time_ms*/(mpt->is_sas || mpt->is_fc)? 30000 : 3000);
1934 if (error != 0) {
1935 mpt_prt(mpt, "port enable timed out\n");
1936 return (-1);
1937 }
1938 mpt_free_request(mpt, req);
1939 return (0);
1940}
1941
1942/*
1943 * Enable/Disable asynchronous event reporting.
1944 *
1945 * NB: this is the first command we send via shared memory
1946 * instead of the handshake register.
1947 */
1948static int
1949mpt_send_event_request(struct mpt_softc *mpt, int onoff)
1950{
1951 request_t *req;
1952 MSG_EVENT_NOTIFY *enable_req;
1953
1954 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1955
1956 enable_req = req->req_vbuf;
1957 bzero(enable_req, sizeof *enable_req);
1958
1959 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
1960 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
1961 enable_req->Switch = onoff;
1962
1963 mpt_check_doorbell(mpt);
1964 mpt_lprt(mpt, MPT_PRT_DEBUG,
1965 "%sabling async events\n", onoff ? "en" : "dis");
1966 mpt_send_cmd(mpt, req);
1967
1968 return (0);
1969}
1970
1971/*
1972 * Un-mask the interupts on the chip.
1973 */
1974void
1975mpt_enable_ints(struct mpt_softc *mpt)
1976{
1977 /* Unmask every thing except door bell int */
1978 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
1979}
1980
1981/*
1982 * Mask the interupts on the chip.
1983 */
1984void
1985mpt_disable_ints(struct mpt_softc *mpt)
1986{
1987 /* Mask all interrupts */
1988 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1989 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1990}
1991
1992static void
1993mpt_sysctl_attach(struct mpt_softc *mpt)
1994{
1995 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1996 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1997
1998 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1999 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2000 "Debugging/Verbose level");
2001}
2002
2003int
2004mpt_attach(struct mpt_softc *mpt)
2005{
2006 int i;
2007
2008 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2009 struct mpt_personality *pers;
2010 int error;
2011
2012 pers = mpt_personalities[i];
2013 if (pers == NULL)
2014 continue;
2015
2016 if (pers->probe(mpt) == 0) {
2017 error = pers->attach(mpt);
2018 if (error != 0) {
2019 mpt_detach(mpt);
2020 return (error);
2021 }
2022 mpt->mpt_pers_mask |= (0x1 << pers->id);
2023 pers->use_count++;
2024 }
2025 }
2026
2027 return (0);
2028}
2029
2030int
2031mpt_shutdown(struct mpt_softc *mpt)
2032{
2033 struct mpt_personality *pers;
2034
2035 MPT_PERS_FOREACH_REVERSE(mpt, pers)
2036 pers->shutdown(mpt);
2037
2038 mpt_reset(mpt, /*reinit*/FALSE);
2039 return (0);
2040}
2041
2042int
2043mpt_detach(struct mpt_softc *mpt)
2044{
2045 struct mpt_personality *pers;
2046
2047 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2048 pers->detach(mpt);
2049 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2050 pers->use_count--;
2051 }
2052
2053 return (0);
2054}
2055
2056int
2057mpt_core_load(struct mpt_personality *pers)
2058{
2059 int i;
2060
2061 /*
2062 * Setup core handlers and insert the default handler
2063 * into all "empty slots".
2064 */
2065 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++)
2066 mpt_reply_handlers[i] = mpt_default_reply_handler;
2067
2068 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2069 mpt_event_reply_handler;
2070 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2071 mpt_config_reply_handler;
2072 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2073 mpt_handshake_reply_handler;
2074
2075 return (0);
2076}
2077
2078/*
2079 * Initialize per-instance driver data and perform
2080 * initial controller configuration.
2081 */
2082int
2083mpt_core_attach(struct mpt_softc *mpt)
2084{
2085 int val;
2086 int error;
2087
2088 LIST_INIT(&mpt->ack_frames);
2089
2090 /* Put all request buffers on the free list */
2091 TAILQ_INIT(&mpt->request_pending_list);
2092 TAILQ_INIT(&mpt->request_free_list);
2093 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++)
2094 mpt_free_request(mpt, &mpt->request_pool[val]);
2095
2096 mpt_sysctl_attach(mpt);
2097
2098 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2099 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2100
2101 error = mpt_configure_ioc(mpt);
2102
2103 return (error);
2104}
2105
2106void
2107mpt_core_shutdown(struct mpt_softc *mpt)
2108{
2109}
2110
2111void
2112mpt_core_detach(struct mpt_softc *mpt)
2113{
2114}
2115
2116int
2117mpt_core_unload(struct mpt_personality *pers)
2118{
2119 /* Unload is always successfull. */
2120 return (0);
2121}
2122
2123#define FW_UPLOAD_REQ_SIZE \
2124 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2125 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2126
2127static int
2128mpt_upload_fw(struct mpt_softc *mpt)
2129{
2130 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2131 MSG_FW_UPLOAD_REPLY fw_reply;
2132 MSG_FW_UPLOAD *fw_req;
2133 FW_UPLOAD_TCSGE *tsge;
2134 SGE_SIMPLE32 *sge;
2135 uint32_t flags;
2136 int error;
2137
2138 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2139 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2140 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2141 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2142 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2143 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2144 tsge->DetailsLength = 12;
2145 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2146 tsge->ImageSize = htole32(mpt->fw_image_size);
2147 sge = (SGE_SIMPLE32 *)(tsge + 1);
2148 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2149 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2150 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2151 flags <<= MPI_SGE_FLAGS_SHIFT;
2152 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2153 sge->Address = htole32(mpt->fw_phys);
2154 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2155 if (error)
2156 return(error);
2157 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2158 return (error);
2159}
2160
2161static void
2162mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2163 uint32_t *data, bus_size_t len)
2164{
2165 uint32_t *data_end;
2166
2167 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2168 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2169 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2170 while (data != data_end) {
2171 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2172 data++;
2173 }
2174 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2175}
2176
2177static int
2178mpt_download_fw(struct mpt_softc *mpt)
2179{
2180 MpiFwHeader_t *fw_hdr;
2181 int error;
2182 uint32_t ext_offset;
2183 uint32_t data;
2184
2185 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2186 mpt->fw_image_size);
2187
2188 error = mpt_enable_diag_mode(mpt);
2189 if (error != 0) {
2190 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2191 return (EIO);
2192 }
2193
2194 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2195 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2196
2197 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2198 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2199 fw_hdr->ImageSize);
2200
2201 ext_offset = fw_hdr->NextImageHeaderOffset;
2202 while (ext_offset != 0) {
2203 MpiExtImageHeader_t *ext;
2204
2205 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2206 ext_offset = ext->NextImageHeaderOffset;
2207
2208 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2209 ext->ImageSize);
2210 }
2211
2212 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2213 /* Setup the address to jump to on reset. */
2214 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2215 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2216
2217 /*
2218 * The controller sets the "flash bad" status after attempting
2219 * to auto-boot from flash. Clear the status so that the controller
2220 * will continue the boot process with our newly installed firmware.
2221 */
2222 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2223 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2224 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2225 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2226
2227 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2228
2229 /*
2230 * Re-enable the processor and clear the boot halt flag.
2231 */
2232 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2233 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2234 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2235
2236 mpt_disable_diag_mode(mpt);
2237 return (0);
2238}
2239
2240/*
2241 * Allocate/Initialize data structures for the controller. Called
2242 * once at instance startup.
2243 */
2244static int
2245mpt_configure_ioc(struct mpt_softc *mpt)
2246{
2247 MSG_PORT_FACTS_REPLY pfp;
2248 MSG_IOC_FACTS_REPLY facts;
2249 int try;
2250 int needreset;
2251 uint32_t max_chain_depth;
2252
2253 needreset = 0;
2254 for (try = 0; try < MPT_MAX_TRYS; try++) {
2255
2256 /*
2257 * No need to reset if the IOC is already in the READY state.
2258 *
2259 * Force reset if initialization failed previously.
2260 * Note that a hard_reset of the second channel of a '929
2261 * will stop operation of the first channel. Hopefully, if the
2262 * first channel is ok, the second will not require a hard
2263 * reset.
2264 */
2265 if (needreset || (mpt_rd_db(mpt) & MPT_DB_STATE_MASK) !=
2266 MPT_DB_STATE_READY) {
2267 if (mpt_reset(mpt, /*reinit*/FALSE) != MPT_OK)
2268 continue;
2269 }
2270 needreset = 0;
2271
2272 if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) {
2273 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2274 needreset = 1;
2275 continue;
2276 }
2277
2278 mpt->mpt_global_credits = le16toh(facts.GlobalCredits);
2279 mpt->request_frame_size = le16toh(facts.RequestFrameSize);
2280 mpt->ioc_facts_flags = facts.Flags;
2281 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2282 le16toh(facts.MsgVersion) >> 8,
2283 le16toh(facts.MsgVersion) & 0xFF,
2284 le16toh(facts.HeaderVersion) >> 8,
2285 le16toh(facts.HeaderVersion) & 0xFF);
2286
2287 /*
2288 * Now that we know request frame size, we can calculate
2289 * the actual (reasonable) segment limit for read/write I/O.
2290 *
2291 * This limit is constrained by:
2292 *
2293 * + The size of each area we allocate per command (and how
2294 * many chain segments we can fit into it).
2295 * + The total number of areas we've set up.
2296 * + The actual chain depth the card will allow.
2297 *
2298 * The first area's segment count is limited by the I/O request
2299 * at the head of it. We cannot allocate realistically more
2300 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2301 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2302 *
2303 */
2304 max_chain_depth = facts.MaxChainDepth;
2305
2306 /* total number of request areas we (can) allocate */
2307 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2308
2309 /* converted to the number of chain areas possible */
2310 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2311
2312 /* limited by the number of chain areas the card will support */
2313 if (mpt->max_seg_cnt > max_chain_depth) {
2314 mpt_lprt(mpt, MPT_PRT_DEBUG,
2315 "chain depth limited to %u (from %u)\n",
2316 max_chain_depth, mpt->max_seg_cnt);
2317 mpt->max_seg_cnt = max_chain_depth;
2318 }
2319
2320 /* converted to the number of simple sges in chain segments. */
2321 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2322
2323 mpt_lprt(mpt, MPT_PRT_DEBUG,
2324 "Maximum Segment Count: %u\n", mpt->max_seg_cnt);
2325 mpt_lprt(mpt, MPT_PRT_DEBUG,
2326 "MsgLength=%u IOCNumber = %d\n",
2327 facts.MsgLength, facts.IOCNumber);
2328 mpt_lprt(mpt, MPT_PRT_DEBUG,
2329 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2330 "Request Frame Size %u bytes Max Chain Depth %u\n",
2331 mpt->mpt_global_credits, facts.BlockSize,
2332 mpt->request_frame_size << 2, max_chain_depth);
2333 mpt_lprt(mpt, MPT_PRT_DEBUG,
2334 "IOCFACTS: Num Ports %d, FWImageSize %d, "
2335 "Flags=%#x\n", facts.NumberOfPorts,
2336 le32toh(facts.FWImageSize), facts.Flags);
2337
2338
2339 if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) {
2340 struct mpt_map_info mi;
2341 int error;
2342
2343 /*
2344 * In some configurations, the IOC's firmware is
2345 * stored in a shared piece of system NVRAM that
2346 * is only accessable via the BIOS. In this
2347 * case, the firmware keeps a copy of firmware in
2348 * RAM until the OS driver retrieves it. Once
2349 * retrieved, we are responsible for re-downloading
2350 * the firmware after any hard-reset.
2351 */
2352 mpt->fw_image_size = le32toh(facts.FWImageSize);
2353 error = mpt_dma_tag_create(mpt, mpt->parent_dmat,
2354 /*alignment*/1, /*boundary*/0,
2355 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2356 /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL,
2357 /*filterarg*/NULL, mpt->fw_image_size,
2358 /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size,
2359 /*flags*/0, &mpt->fw_dmat);
2360 if (error != 0) {
2361 mpt_prt(mpt, "cannot create fw dma tag\n");
2362 return (ENOMEM);
2363 }
2364 error = bus_dmamem_alloc(mpt->fw_dmat,
2365 (void **)&mpt->fw_image, BUS_DMA_NOWAIT,
2366 &mpt->fw_dmap);
2367 if (error != 0) {
2368 mpt_prt(mpt, "cannot allocate fw mem.\n");
2369 bus_dma_tag_destroy(mpt->fw_dmat);
2370 return (ENOMEM);
2371 }
2372 mi.mpt = mpt;
2373 mi.error = 0;
2374 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2375 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest,
2376 &mi, 0);
2377 mpt->fw_phys = mi.phys;
2378
2379 error = mpt_upload_fw(mpt);
2380 if (error != 0) {
2381 mpt_prt(mpt, "fw upload failed.\n");
2382 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2383 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2384 mpt->fw_dmap);
2385 bus_dma_tag_destroy(mpt->fw_dmat);
2386 mpt->fw_image = NULL;
2387 return (EIO);
2388 }
2389 }
2390
2391 if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) {
2392 mpt_prt(mpt, "mpt_get_portfacts failed\n");
2393 needreset = 1;
2394 continue;
2395 }
2396
2397 mpt_lprt(mpt, MPT_PRT_DEBUG,
2398 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n",
2399 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID,
2400 pfp.MaxDevices);
2401
2402 mpt->mpt_port_type = pfp.PortType;
2403 mpt->mpt_proto_flags = pfp.ProtocolFlags;
2404 if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI &&
2405 pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS &&
2406 pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) {
2407 mpt_prt(mpt, "Unsupported Port Type (%x)\n",
2408 pfp.PortType);
2409 return (ENXIO);
2410 }
2411 if (!(pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) {
2412 mpt_prt(mpt, "initiator role unsupported\n");
2413 return (ENXIO);
2414 }
2415 if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2416 mpt->is_fc = 1;
2417 mpt->is_sas = 0;
2418 } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2419 mpt->is_fc = 0;
2420 mpt->is_sas = 1;
2421 } else {
2422 mpt->is_fc = 0;
2423 mpt->is_sas = 0;
2424 }
2425 mpt->mpt_ini_id = pfp.PortSCSIID;
2426 mpt->mpt_max_devices = pfp.MaxDevices;
2427
2428 if (mpt_enable_ioc(mpt) != 0) {
2429 mpt_prt(mpt, "Unable to initialize IOC\n");
2430 return (ENXIO);
2431 }
2432
2433 /*
2434 * Read and set up initial configuration information
2435 * (IOC and SPI only for now)
2436 *
2437 * XXX Should figure out what "personalities" are
2438 * available and defer all initialization junk to
2439 * them.
2440 */
2441 mpt_read_config_info_ioc(mpt);
2442
2443 if (mpt->is_fc == 0 && mpt->is_sas == 0) {
2444 if (mpt_read_config_info_spi(mpt)) {
2445 return (EIO);
2446 }
2447 if (mpt_set_initial_config_spi(mpt)) {
2448 return (EIO);
2449 }
2450 }
2451
2452 /* Everything worked */
2453 break;
2454 }
2455
2456 if (try >= MPT_MAX_TRYS) {
2457 mpt_prt(mpt, "failed to initialize IOC");
2458 return (EIO);
2459 }
2460
2461 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling interrupts\n");
2462
2463 mpt_enable_ints(mpt);
2464 return (0);
2465}
2466
2467static int
2468mpt_enable_ioc(struct mpt_softc *mpt)
2469{
2470 uint32_t pptr;
2471 int val;
2472
2473 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2474 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2475 return (EIO);
2476 }
2477
2478 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2479
2480 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2481 mpt_prt(mpt, "IOC failed to go to run state\n");
2482 return (ENXIO);
2483 }
2484 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2485
2486 /*
2487 * Give it reply buffers
2488 *
2489 * Do *not* exceed global credits.
2490 */
2491 for (val = 0, pptr = mpt->reply_phys;
2492 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2493 pptr += MPT_REPLY_SIZE) {
2494 mpt_free_reply(mpt, pptr);
2495 if (++val == mpt->mpt_global_credits - 1)
2496 break;
2497 }
2498
2499 /*
2500 * Enable asynchronous event reporting
2501 */
2502 mpt_send_event_request(mpt, 1);
2503
2504 /*
2505 * Enable the port
2506 */
2507 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2508 mpt_prt(mpt, "failed to enable port 0\n");
2509 return (ENXIO);
2510 }
2511 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port 0\n");
2512
2513
2514 return (MPT_OK);
2515}