mpt.c revision 155521
1/*-
2 * Generic routines for LSI '909 FC  adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice immediately at the beginning of the file, without modification,
12 *    this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * Additional Copyright (c) 2002 by Matthew Jacob under same license.
29 */
30/*
31 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
32 * Copyright (c) 2005, WHEEL Sp. z o.o.
33 * Copyright (c) 2004, 2005 Justin T. Gibbs
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions are
38 * met:
39 * 1. Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
42 *    substantially similar to the "NO WARRANTY" disclaimer below
43 *    ("Disclaimer") and any redistribution must be conditioned upon including
44 *    a substantially similar Disclaimer requirement for further binary
45 *    redistribution.
46 * 3. Neither the names of the above listed copyright holders nor the names
47 *    of any contributors may be used to endorse or promote products derived
48 *    from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
51 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
52 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
53 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
54 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
60 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 */
62
63#include <sys/cdefs.h>
64__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 155521 2006-02-11 01:35:29Z mjacob $");
65
66#include <dev/mpt/mpt.h>
67#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
68#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
69
70#include <dev/mpt/mpilib/mpi.h>
71#include <dev/mpt/mpilib/mpi_ioc.h>
72
73#include <sys/sysctl.h>
74
75#define MPT_MAX_TRYS 3
76#define MPT_MAX_WAIT 300000
77
78static int maxwait_ack = 0;
79static int maxwait_int = 0;
80static int maxwait_state = 0;
81
82TAILQ_HEAD(, mpt_softc)	mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
83mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
84
85static mpt_reply_handler_t mpt_default_reply_handler;
86static mpt_reply_handler_t mpt_config_reply_handler;
87static mpt_reply_handler_t mpt_handshake_reply_handler;
88static mpt_reply_handler_t mpt_event_reply_handler;
89static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
90			       MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
91static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
92static int mpt_soft_reset(struct mpt_softc *mpt);
93static void mpt_hard_reset(struct mpt_softc *mpt);
94static int mpt_configure_ioc(struct mpt_softc *mpt);
95static int mpt_enable_ioc(struct mpt_softc *mpt);
96
97/************************* Personality Module Support *************************/
98/*
99 * We include one extra entry that is guaranteed to be NULL
100 * to simplify our itterator.
101 */
102static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
103static __inline struct mpt_personality*
104	mpt_pers_find(struct mpt_softc *, u_int);
105static __inline struct mpt_personality*
106	mpt_pers_find_reverse(struct mpt_softc *, u_int);
107
108static __inline struct mpt_personality *
109mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
110{
111	KASSERT(start_at <= MPT_MAX_PERSONALITIES,
112		("mpt_pers_find: starting position out of range\n"));
113
114	while (start_at < MPT_MAX_PERSONALITIES
115	    && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
116		start_at++;
117	}
118	return (mpt_personalities[start_at]);
119}
120
121/*
122 * Used infrequenstly, so no need to optimize like a forward
123 * traversal where we use the MAX+1 is guaranteed to be NULL
124 * trick.
125 */
126static __inline struct mpt_personality *
127mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
128{
129	while (start_at < MPT_MAX_PERSONALITIES
130	    && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
131		start_at--;
132	}
133	if (start_at < MPT_MAX_PERSONALITIES)
134		return (mpt_personalities[start_at]);
135	return (NULL);
136}
137
138#define MPT_PERS_FOREACH(mpt, pers)				\
139	for (pers = mpt_pers_find(mpt, /*start_at*/0);		\
140	     pers != NULL;					\
141	     pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
142
143#define MPT_PERS_FOREACH_REVERSE(mpt, pers)				\
144	for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
145	     pers != NULL;						\
146	     pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
147
148static mpt_load_handler_t      mpt_stdload;
149static mpt_probe_handler_t     mpt_stdprobe;
150static mpt_attach_handler_t    mpt_stdattach;
151static mpt_event_handler_t     mpt_stdevent;
152static mpt_reset_handler_t     mpt_stdreset;
153static mpt_shutdown_handler_t  mpt_stdshutdown;
154static mpt_detach_handler_t    mpt_stddetach;
155static mpt_unload_handler_t    mpt_stdunload;
156static struct mpt_personality mpt_default_personality =
157{
158	.load		= mpt_stdload,
159	.probe		= mpt_stdprobe,
160	.attach		= mpt_stdattach,
161	.event		= mpt_stdevent,
162	.reset		= mpt_stdreset,
163	.shutdown	= mpt_stdshutdown,
164	.detach		= mpt_stddetach,
165	.unload		= mpt_stdunload
166};
167
168static mpt_load_handler_t      mpt_core_load;
169static mpt_attach_handler_t    mpt_core_attach;
170static mpt_reset_handler_t     mpt_core_ioc_reset;
171static mpt_event_handler_t     mpt_core_event;
172static mpt_shutdown_handler_t  mpt_core_shutdown;
173static mpt_shutdown_handler_t  mpt_core_detach;
174static mpt_unload_handler_t    mpt_core_unload;
175static struct mpt_personality mpt_core_personality =
176{
177	.name		= "mpt_core",
178	.load		= mpt_core_load,
179	.attach		= mpt_core_attach,
180	.event		= mpt_core_event,
181	.reset		= mpt_core_ioc_reset,
182	.shutdown	= mpt_core_shutdown,
183	.detach		= mpt_core_detach,
184	.unload		= mpt_core_unload,
185};
186
187/*
188 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
189 * ordering information.  We want the core to always register FIRST.
190 * other modules are set to SI_ORDER_SECOND.
191 */
192static moduledata_t mpt_core_mod = {
193	"mpt_core", mpt_modevent, &mpt_core_personality
194};
195DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
196MODULE_VERSION(mpt_core, 1);
197
198#define MPT_PERS_ATACHED(pers, mpt) \
199	((mpt)->pers_mask & (0x1 << pers->id))
200
201
202int
203mpt_modevent(module_t mod, int type, void *data)
204{
205	struct mpt_personality *pers;
206	int error;
207
208	pers = (struct mpt_personality *)data;
209
210	error = 0;
211	switch (type) {
212	case MOD_LOAD:
213	{
214		mpt_load_handler_t **def_handler;
215		mpt_load_handler_t **pers_handler;
216		int i;
217
218		for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
219			if (mpt_personalities[i] == NULL)
220				break;
221		}
222		if (i >= MPT_MAX_PERSONALITIES) {
223			error = ENOMEM;
224			break;
225		}
226		pers->id = i;
227		mpt_personalities[i] = pers;
228
229		/* Install standard/noop handlers for any NULL entries. */
230		def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
231		pers_handler = MPT_PERS_FIRST_HANDLER(pers);
232		while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
233			if (*pers_handler == NULL)
234				*pers_handler = *def_handler;
235			pers_handler++;
236			def_handler++;
237		}
238
239		error = (pers->load(pers));
240		if (error != 0)
241			mpt_personalities[i] = NULL;
242		break;
243	}
244	case MOD_SHUTDOWN:
245		break;
246	case MOD_QUIESCE:
247		break;
248	case MOD_UNLOAD:
249		error = pers->unload(pers);
250		mpt_personalities[pers->id] = NULL;
251		break;
252	default:
253		error = EINVAL;
254		break;
255	}
256	return (error);
257}
258
259int
260mpt_stdload(struct mpt_personality *pers)
261{
262	/* Load is always successfull. */
263	return (0);
264}
265
266int
267mpt_stdprobe(struct mpt_softc *mpt)
268{
269	/* Probe is always successfull. */
270	return (0);
271}
272
273int
274mpt_stdattach(struct mpt_softc *mpt)
275{
276	/* Attach is always successfull. */
277	return (0);
278}
279
280int
281mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
282{
283	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
284	/* Event was not for us. */
285	return (0);
286}
287
288void
289mpt_stdreset(struct mpt_softc *mpt, int type)
290{
291}
292
293void
294mpt_stdshutdown(struct mpt_softc *mpt)
295{
296}
297
298void
299mpt_stddetach(struct mpt_softc *mpt)
300{
301}
302
303int
304mpt_stdunload(struct mpt_personality *pers)
305{
306	/* Unload is always successfull. */
307	return (0);
308}
309
310/******************************* Bus DMA Support ******************************/
311void
312mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
313{
314	struct mpt_map_info *map_info;
315
316	map_info = (struct mpt_map_info *)arg;
317	map_info->error = error;
318	map_info->phys = segs->ds_addr;
319}
320
321/**************************** Reply/Event Handling ****************************/
322int
323mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
324		     mpt_handler_t handler, uint32_t *phandler_id)
325{
326
327	switch (type) {
328	case MPT_HANDLER_REPLY:
329	{
330		u_int cbi;
331		u_int free_cbi;
332
333		if (phandler_id == NULL)
334			return (EINVAL);
335
336		free_cbi = MPT_HANDLER_ID_NONE;
337		for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
338			/*
339			 * If the same handler is registered multiple
340			 * times, don't error out.  Just return the
341			 * index of the original registration.
342			 */
343			if (mpt_reply_handlers[cbi] == handler.reply_handler) {
344				*phandler_id = MPT_CBI_TO_HID(cbi);
345				return (0);
346			}
347
348			/*
349			 * Fill from the front in the hope that
350			 * all registered handlers consume only a
351			 * single cache line.
352			 *
353			 * We don't break on the first empty slot so
354			 * that the full table is checked to see if
355			 * this handler was previously registered.
356			 */
357			if (free_cbi == MPT_HANDLER_ID_NONE
358			 && (mpt_reply_handlers[cbi]
359			  == mpt_default_reply_handler))
360				free_cbi = cbi;
361		}
362		if (free_cbi == MPT_HANDLER_ID_NONE)
363			return (ENOMEM);
364		mpt_reply_handlers[free_cbi] = handler.reply_handler;
365		*phandler_id = MPT_CBI_TO_HID(free_cbi);
366		break;
367	}
368	default:
369		mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
370		return (EINVAL);
371	}
372	return (0);
373}
374
375int
376mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
377		       mpt_handler_t handler, uint32_t handler_id)
378{
379
380	switch (type) {
381	case MPT_HANDLER_REPLY:
382	{
383		u_int cbi;
384
385		cbi = MPT_CBI(handler_id);
386		if (cbi >= MPT_NUM_REPLY_HANDLERS
387		 || mpt_reply_handlers[cbi] != handler.reply_handler)
388			return (ENOENT);
389		mpt_reply_handlers[cbi] = mpt_default_reply_handler;
390		break;
391	}
392	default:
393		mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
394		return (EINVAL);
395	}
396	return (0);
397}
398
399static int
400mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
401			  MSG_DEFAULT_REPLY *reply_frame)
402{
403	mpt_prt(mpt, "XXXX Default Handler Called.  Req %p, Frame %p\n",
404		req, reply_frame);
405
406	if (reply_frame != NULL)
407		mpt_dump_reply_frame(mpt, reply_frame);
408
409	mpt_prt(mpt, "XXXX Reply Frame Ignored\n");
410
411	return (/*free_reply*/TRUE);
412}
413
414static int
415mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
416				MSG_DEFAULT_REPLY *reply_frame)
417{
418	if (req != NULL) {
419
420		if (reply_frame != NULL) {
421			MSG_CONFIG *cfgp;
422			MSG_CONFIG_REPLY *reply;
423
424			cfgp = (MSG_CONFIG *)req->req_vbuf;
425			reply = (MSG_CONFIG_REPLY *)reply_frame;
426			req->IOCStatus = le16toh(reply_frame->IOCStatus);
427			bcopy(&reply->Header, &cfgp->Header,
428			      sizeof(cfgp->Header));
429		}
430		req->state &= ~REQ_STATE_QUEUED;
431		req->state |= REQ_STATE_DONE;
432		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
433
434		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0)
435			wakeup(req);
436	}
437
438	return (/*free_reply*/TRUE);
439}
440
441static int
442mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
443			 MSG_DEFAULT_REPLY *reply_frame)
444{
445	/* Nothing to be done. */
446	return (/*free_reply*/TRUE);
447}
448
449static int
450mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
451			MSG_DEFAULT_REPLY *reply_frame)
452{
453	int free_reply;
454
455	if (reply_frame == NULL) {
456		mpt_prt(mpt, "Event Handler: req %p - Unexpected NULL reply\n");
457		return (/*free_reply*/TRUE);
458	}
459
460	free_reply = TRUE;
461	switch (reply_frame->Function) {
462	case MPI_FUNCTION_EVENT_NOTIFICATION:
463	{
464		MSG_EVENT_NOTIFY_REPLY *msg;
465		struct mpt_personality *pers;
466		u_int handled;
467
468		handled = 0;
469		msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
470		MPT_PERS_FOREACH(mpt, pers)
471			handled += pers->event(mpt, req, msg);
472
473		if (handled == 0 && mpt->mpt_pers_mask == 0) {
474			mpt_lprt(mpt, MPT_PRT_WARN,
475				"No Handlers For Any Event Notify Frames. "
476				"Event %#x (ACK %sequired).\n",
477				msg->Event, msg->AckRequired? "r" : "not r");
478		} else if (handled == 0) {
479			mpt_prt(mpt,
480				"Unhandled Event Notify Frame. Event %#x "
481				"(ACK %sequired).\n",
482				msg->Event, msg->AckRequired? "r" : "not r");
483		}
484
485		if (msg->AckRequired) {
486			request_t *ack_req;
487			uint32_t context;
488
489			context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS);
490			ack_req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
491			if (ack_req == NULL) {
492				struct mpt_evtf_record *evtf;
493
494				evtf = (struct mpt_evtf_record *)reply_frame;
495				evtf->context = context;
496				LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
497				free_reply = FALSE;
498				break;
499			}
500			mpt_send_event_ack(mpt, ack_req, msg, context);
501		}
502		break;
503	}
504	case MPI_FUNCTION_PORT_ENABLE:
505		mpt_lprt(mpt, MPT_PRT_DEBUG, "enable port reply\n");
506		break;
507	case MPI_FUNCTION_EVENT_ACK:
508		break;
509	default:
510		mpt_prt(mpt, "Unknown Event Function: %x\n",
511			reply_frame->Function);
512		break;
513	}
514
515	if (req != NULL
516	 && (reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
517
518		req->state &= ~REQ_STATE_QUEUED;
519		req->state |= REQ_STATE_DONE;
520		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
521
522		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0)
523			wakeup(req);
524		else
525			mpt_free_request(mpt, req);
526	}
527	return (free_reply);
528}
529
530/*
531 * Process an asynchronous event from the IOC.
532 */
533static int
534mpt_core_event(struct mpt_softc *mpt, request_t *req,
535	       MSG_EVENT_NOTIFY_REPLY *msg)
536{
537	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
538                 msg->Event & 0xFF);
539	switch(msg->Event & 0xFF) {
540	case MPI_EVENT_NONE:
541		break;
542	case MPI_EVENT_LOG_DATA:
543	{
544		int i;
545
546		/* Some error occured that LSI wants logged */
547		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
548			msg->IOCLogInfo);
549		mpt_prt(mpt, "\tEvtLogData: Event Data:");
550		for (i = 0; i < msg->EventDataLength; i++)
551			mpt_prtc(mpt, "  %08x", msg->Data[i]);
552		mpt_prtc(mpt, "\n");
553		break;
554	}
555	case MPI_EVENT_EVENT_CHANGE:
556		/*
557		 * This is just an acknowledgement
558		 * of our mpt_send_event_request.
559		 */
560		break;
561	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
562		break;
563	default:
564		return (/*handled*/0);
565		break;
566	}
567	return (/*handled*/1);
568}
569
570static void
571mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
572		   MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
573{
574	MSG_EVENT_ACK *ackp;
575
576	ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
577	bzero(ackp, sizeof *ackp);
578	ackp->Function = MPI_FUNCTION_EVENT_ACK;
579	ackp->Event = msg->Event;
580	ackp->EventContext = msg->EventContext;
581	ackp->MsgContext = context;
582	mpt_check_doorbell(mpt);
583	mpt_send_cmd(mpt, ack_req);
584}
585
586/***************************** Interrupt Handling *****************************/
587void
588mpt_intr(void *arg)
589{
590	struct mpt_softc *mpt;
591	uint32_t     reply_desc;
592
593	mpt = (struct mpt_softc *)arg;
594	while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
595		request_t	  *req;
596		MSG_DEFAULT_REPLY *reply_frame;
597		uint32_t	   reply_baddr;
598		u_int		   cb_index;
599		u_int		   req_index;
600		int		   free_rf;
601
602		req = NULL;
603		reply_frame = NULL;
604		reply_baddr = 0;
605		if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
606			u_int offset;
607
608			/*
609			 * Insure that the reply frame is coherent.
610			 */
611			reply_baddr = (reply_desc << 1);
612			offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
613			bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap,
614					      offset, MPT_REPLY_SIZE,
615					      BUS_DMASYNC_POSTREAD);
616			reply_frame = MPT_REPLY_OTOV(mpt, offset);
617			reply_desc = le32toh(reply_frame->MsgContext);
618		}
619		cb_index = MPT_CONTEXT_TO_CBI(reply_desc);
620		req_index = MPT_CONTEXT_TO_REQI(reply_desc);
621		if (req_index < MPT_MAX_REQUESTS(mpt))
622			req = &mpt->request_pool[req_index];
623
624		free_rf = mpt_reply_handlers[cb_index](mpt, req, reply_frame);
625
626		if (reply_frame != NULL && free_rf)
627			mpt_free_reply(mpt, reply_baddr);
628	}
629}
630
631/******************************* Error Recovery *******************************/
632void
633mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
634			    u_int iocstatus)
635{
636	MSG_DEFAULT_REPLY  ioc_status_frame;
637	request_t	  *req;
638
639	bzero(&ioc_status_frame, sizeof(ioc_status_frame));
640	ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
641	ioc_status_frame.IOCStatus = iocstatus;
642	while((req = TAILQ_FIRST(chain)) != NULL) {
643		MSG_REQUEST_HEADER *msg_hdr;
644		u_int		    cb_index;
645
646		msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
647		ioc_status_frame.Function = msg_hdr->Function;
648		ioc_status_frame.MsgContext = msg_hdr->MsgContext;
649		cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
650		mpt_reply_handlers[cb_index](mpt, req, &ioc_status_frame);
651	}
652}
653
654/********************************* Diagnostics ********************************/
655/*
656 * Perform a diagnostic dump of a reply frame.
657 */
658void
659mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
660{
661
662	mpt_prt(mpt, "Address Reply:\n");
663	mpt_print_reply(reply_frame);
664}
665
666/******************************* Doorbell Access ******************************/
667static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
668static __inline  uint32_t mpt_rd_intr(struct mpt_softc *mpt);
669
670static __inline uint32_t
671mpt_rd_db(struct mpt_softc *mpt)
672{
673	return mpt_read(mpt, MPT_OFFSET_DOORBELL);
674}
675
676static __inline uint32_t
677mpt_rd_intr(struct mpt_softc *mpt)
678{
679	return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
680}
681
682/* Busy wait for a door bell to be read by IOC */
683static int
684mpt_wait_db_ack(struct mpt_softc *mpt)
685{
686	int i;
687	for (i=0; i < MPT_MAX_WAIT; i++) {
688		if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
689			maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
690			return MPT_OK;
691		}
692
693		DELAY(1000);
694	}
695	return MPT_FAIL;
696}
697
698/* Busy wait for a door bell interrupt */
699static int
700mpt_wait_db_int(struct mpt_softc *mpt)
701{
702	int i;
703	for (i=0; i < MPT_MAX_WAIT; i++) {
704		if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
705			maxwait_int = i > maxwait_int ? i : maxwait_int;
706			return MPT_OK;
707		}
708		DELAY(100);
709	}
710	return MPT_FAIL;
711}
712
713/* Wait for IOC to transition to a give state */
714void
715mpt_check_doorbell(struct mpt_softc *mpt)
716{
717	uint32_t db = mpt_rd_db(mpt);
718	if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
719		mpt_prt(mpt, "Device not running\n");
720		mpt_print_db(db);
721	}
722}
723
724/* Wait for IOC to transition to a give state */
725static int
726mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
727{
728	int i;
729
730	for (i = 0; i < MPT_MAX_WAIT; i++) {
731		uint32_t db = mpt_rd_db(mpt);
732		if (MPT_STATE(db) == state) {
733			maxwait_state = i > maxwait_state ? i : maxwait_state;
734			return (MPT_OK);
735		}
736		DELAY(100);
737	}
738	return (MPT_FAIL);
739}
740
741
742/************************* Intialization/Configuration ************************/
743static int mpt_download_fw(struct mpt_softc *mpt);
744
745/* Issue the reset COMMAND to the IOC */
746static int
747mpt_soft_reset(struct mpt_softc *mpt)
748{
749	mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
750
751	/* Have to use hard reset if we are not in Running state */
752	if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
753		mpt_prt(mpt, "soft reset failed: device not running\n");
754		return MPT_FAIL;
755	}
756
757	/* If door bell is in use we don't have a chance of getting
758	 * a word in since the IOC probably crashed in message
759	 * processing. So don't waste our time.
760	 */
761	if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
762		mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
763		return MPT_FAIL;
764	}
765
766	/* Send the reset request to the IOC */
767	mpt_write(mpt, MPT_OFFSET_DOORBELL,
768	    MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
769	if (mpt_wait_db_ack(mpt) != MPT_OK) {
770		mpt_prt(mpt, "soft reset failed: ack timeout\n");
771		return MPT_FAIL;
772	}
773
774	/* Wait for the IOC to reload and come out of reset state */
775	if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
776		mpt_prt(mpt, "soft reset failed: device did not restart\n");
777		return MPT_FAIL;
778	}
779
780	return MPT_OK;
781}
782
783static int
784mpt_enable_diag_mode(struct mpt_softc *mpt)
785{
786	int try;
787
788	try = 20;
789	while (--try) {
790
791		if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
792			break;
793
794		/* Enable diagnostic registers */
795		mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
796		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
797		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
798		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
799		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
800		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
801
802		DELAY(100000);
803	}
804	if (try == 0)
805		return (EIO);
806	return (0);
807}
808
809static void
810mpt_disable_diag_mode(struct mpt_softc *mpt)
811{
812	mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
813}
814
815/* This is a magic diagnostic reset that resets all the ARM
816 * processors in the chip.
817 */
818static void
819mpt_hard_reset(struct mpt_softc *mpt)
820{
821	int error;
822	int wait;
823	uint32_t diagreg;
824
825	mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
826
827	error = mpt_enable_diag_mode(mpt);
828	if (error) {
829		mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
830		mpt_prt(mpt, "Trying to reset anyway.\n");
831	}
832
833	diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
834
835	/*
836	 * This appears to be a workaround required for some
837	 * firmware or hardware revs.
838	 */
839	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
840	DELAY(1000);
841
842	/* Diag. port is now active so we can now hit the reset bit */
843	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
844
845        /*
846         * Ensure that the reset has finished.  We delay 1ms
847         * prior to reading the register to make sure the chip
848         * has sufficiently completed its reset to handle register
849         * accesses.
850         */
851	wait = 5000;
852	do {
853		DELAY(1000);
854		diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
855	} while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
856
857	if (wait == 0) {
858		mpt_prt(mpt, "WARNING - Failed hard reset! "
859			"Trying to initialize anyway.\n");
860	}
861
862	/*
863	 * If we have firmware to download, it must be loaded before
864	 * the controller will become operational.  Do so now.
865	 */
866	if (mpt->fw_image != NULL) {
867
868		error = mpt_download_fw(mpt);
869
870		if (error) {
871			mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
872			mpt_prt(mpt, "Trying to initialize anyway.\n");
873		}
874	}
875
876	/*
877	 * Reseting the controller should have disabled write
878	 * access to the diagnostic registers, but disable
879	 * manually to be sure.
880	 */
881	mpt_disable_diag_mode(mpt);
882}
883
884static void
885mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
886{
887	/*
888	 * Complete all pending requests with a status
889	 * appropriate for an IOC reset.
890	 */
891	mpt_complete_request_chain(mpt, &mpt->request_pending_list,
892				   MPI_IOCSTATUS_INVALID_STATE);
893}
894
895
896/*
897 * Reset the IOC when needed. Try software command first then if needed
898 * poke at the magic diagnostic reset. Note that a hard reset resets
899 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
900 * fouls up the PCI configuration registers.
901 */
902int
903mpt_reset(struct mpt_softc *mpt, int reinit)
904{
905	struct	mpt_personality *pers;
906	int	ret;
907
908	/* Try a soft reset */
909	if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
910		/* Failed; do a hard reset */
911		mpt_hard_reset(mpt);
912
913		/* Wait for the IOC to reload and come out of reset state */
914		ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
915		if (ret != MPT_OK)
916			mpt_prt(mpt, "failed to reset device\n");
917	}
918
919	/*
920	 * Invoke reset handlers.  We bump the reset count so
921	 * that mpt_wait_req() understands that regardless of
922	 * the specified wait condition, it should stop its wait.
923	 */
924	mpt->reset_cnt++;
925	MPT_PERS_FOREACH(mpt, pers)
926		pers->reset(mpt, ret);
927
928	if (reinit != 0)
929		mpt_enable_ioc(mpt);
930
931	return ret;
932}
933
934/* Return a command buffer to the free queue */
935void
936mpt_free_request(struct mpt_softc *mpt, request_t *req)
937{
938	request_t *nxt;
939	struct mpt_evtf_record *record;
940	uint32_t reply_baddr;
941
942	if (req == NULL || req != &mpt->request_pool[req->index]) {
943		panic("mpt_free_request bad req ptr\n");
944		return;
945	}
946	if ((nxt = req->chain) != NULL) {
947		req->chain = NULL;
948		mpt_free_request(mpt, nxt);	/* NB: recursion */
949	}
950	req->ccb = NULL;
951	req->state = REQ_STATE_FREE;
952	if (LIST_EMPTY(&mpt->ack_frames)) {
953		TAILQ_INSERT_HEAD(&mpt->request_free_list, req, links);
954		if (mpt->getreqwaiter != 0) {
955			mpt->getreqwaiter = 0;
956			wakeup(&mpt->request_free_list);
957		}
958		return;
959	}
960
961	/*
962	 * Process an ack frame deferred due to resource shortage.
963	 */
964	record = LIST_FIRST(&mpt->ack_frames);
965	LIST_REMOVE(record, links);
966	mpt_send_event_ack(mpt, req, &record->reply, record->context);
967	reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
968		    + (mpt->reply_phys & 0xFFFFFFFF);
969	mpt_free_reply(mpt, reply_baddr);
970}
971
972/* Get a command buffer from the free queue */
973request_t *
974mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
975{
976	request_t *req;
977
978retry:
979	req = TAILQ_FIRST(&mpt->request_free_list);
980	if (req != NULL) {
981		KASSERT(req == &mpt->request_pool[req->index],
982		    ("mpt_get_request: corrupted request free list\n"));
983		TAILQ_REMOVE(&mpt->request_free_list, req, links);
984		req->state = REQ_STATE_ALLOCATED;
985		req->chain = NULL;
986	} else if (sleep_ok != 0) {
987		mpt->getreqwaiter = 1;
988		mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
989		goto retry;
990	}
991	return req;
992}
993
994/* Pass the command to the IOC */
995void
996mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
997{
998	uint32_t *pReq;
999
1000	pReq = req->req_vbuf;
1001	if (mpt->verbose > MPT_PRT_TRACE) {
1002		int offset;
1003		mpt_prt(mpt, "Send Request %d (0x%x):",
1004		    req->index, req->req_pbuf);
1005		for (offset = 0; offset < mpt->request_frame_size; offset++) {
1006			if ((offset & 0x7) == 0) {
1007				mpt_prtc(mpt, "\n");
1008				mpt_prt(mpt, " ");
1009			}
1010			mpt_prtc(mpt, " %08x", pReq[offset]);
1011		}
1012		mpt_prtc(mpt, "\n");
1013	}
1014	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1015	    BUS_DMASYNC_PREWRITE);
1016	req->state |= REQ_STATE_QUEUED;
1017	TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1018	mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1019}
1020
1021/*
1022 * Wait for a request to complete.
1023 *
1024 * Inputs:
1025 *	mpt		softc of controller executing request
1026 *	req		request to wait for
1027 *	sleep_ok	nonzero implies may sleep in this context
1028 *	time_ms		timeout in ms.  0 implies no timeout.
1029 *
1030 * Return Values:
1031 *	0		Request completed
1032 *	non-0		Timeout fired before request completion.
1033 */
1034int
1035mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1036	     mpt_req_state_t state, mpt_req_state_t mask,
1037	     int sleep_ok, int time_ms)
1038{
1039	int   error;
1040	int   timeout;
1041	u_int saved_cnt;
1042
1043	/*
1044	 * timeout is in ms.  0 indicates infinite wait.
1045	 * Convert to ticks or 500us units depending on
1046	 * our sleep mode.
1047	 */
1048	if (sleep_ok != 0)
1049		timeout = (time_ms * hz) / 1000;
1050	else
1051		timeout = time_ms * 2;
1052	req->state |= REQ_STATE_NEED_WAKEUP;
1053	mask &= ~REQ_STATE_NEED_WAKEUP;
1054	saved_cnt = mpt->reset_cnt;
1055	while ((req->state & mask) != state
1056            && mpt->reset_cnt == saved_cnt) {
1057
1058		if (sleep_ok != 0) {
1059			error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1060			if (error == EWOULDBLOCK) {
1061				timeout = 0;
1062				break;
1063			}
1064		} else {
1065			if (time_ms != 0 && --timeout == 0) {
1066				mpt_prt(mpt, "mpt_wait_req timed out\n");
1067				break;
1068			}
1069			DELAY(500);
1070			mpt_intr(mpt);
1071		}
1072	}
1073	req->state &= ~REQ_STATE_NEED_WAKEUP;
1074	if (mpt->reset_cnt != saved_cnt)
1075		return (EIO);
1076	if (time_ms && timeout <= 0)
1077		return (ETIMEDOUT);
1078	return (0);
1079}
1080
1081/*
1082 * Send a command to the IOC via the handshake register.
1083 *
1084 * Only done at initialization time and for certain unusual
1085 * commands such as device/bus reset as specified by LSI.
1086 */
1087int
1088mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1089{
1090	int i;
1091	uint32_t data, *data32;
1092
1093	/* Check condition of the IOC */
1094	data = mpt_rd_db(mpt);
1095	if ((MPT_STATE(data) != MPT_DB_STATE_READY
1096	  && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1097	  && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1098	 || MPT_DB_IS_IN_USE(data)) {
1099		mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1100		mpt_print_db(data);
1101		return (EBUSY);
1102	}
1103
1104	/* We move things in 32 bit chunks */
1105	len = (len + 3) >> 2;
1106	data32 = cmd;
1107
1108	/* Clear any left over pending doorbell interupts */
1109	if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1110		mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1111
1112	/*
1113	 * Tell the handshake reg. we are going to send a command
1114         * and how long it is going to be.
1115	 */
1116	data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1117	    (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1118	mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1119
1120	/* Wait for the chip to notice */
1121	if (mpt_wait_db_int(mpt) != MPT_OK) {
1122		mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n");
1123		return (ETIMEDOUT);
1124	}
1125
1126	/* Clear the interrupt */
1127	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1128
1129	if (mpt_wait_db_ack(mpt) != MPT_OK) {
1130		mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n");
1131		return (ETIMEDOUT);
1132	}
1133
1134	/* Send the command */
1135	for (i = 0; i < len; i++) {
1136		mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++);
1137		if (mpt_wait_db_ack(mpt) != MPT_OK) {
1138			mpt_prt(mpt,
1139				"mpt_send_handshake_cmd timeout! index = %d\n",
1140				i);
1141			return (ETIMEDOUT);
1142		}
1143	}
1144	return MPT_OK;
1145}
1146
1147/* Get the response from the handshake register */
1148int
1149mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1150{
1151	int left, reply_left;
1152	u_int16_t *data16;
1153	MSG_DEFAULT_REPLY *hdr;
1154
1155	/* We move things out in 16 bit chunks */
1156	reply_len >>= 1;
1157	data16 = (u_int16_t *)reply;
1158
1159	hdr = (MSG_DEFAULT_REPLY *)reply;
1160
1161	/* Get first word */
1162	if (mpt_wait_db_int(mpt) != MPT_OK) {
1163		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1164		return ETIMEDOUT;
1165	}
1166	*data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1167	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1168
1169	/* Get Second Word */
1170	if (mpt_wait_db_int(mpt) != MPT_OK) {
1171		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1172		return ETIMEDOUT;
1173	}
1174	*data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1175	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1176
1177	/* With the second word, we can now look at the length */
1178	if (((reply_len >> 1) != hdr->MsgLength)) {
1179		mpt_prt(mpt, "reply length does not match message length: "
1180			"got 0x%02x, expected 0x%02x\n",
1181			hdr->MsgLength << 2, reply_len << 1);
1182	}
1183
1184	/* Get rest of the reply; but don't overflow the provided buffer */
1185	left = (hdr->MsgLength << 1) - 2;
1186	reply_left =  reply_len - 2;
1187	while (left--) {
1188		u_int16_t datum;
1189
1190		if (mpt_wait_db_int(mpt) != MPT_OK) {
1191			mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1192			return ETIMEDOUT;
1193		}
1194		datum = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1195
1196		if (reply_left-- > 0)
1197			*data16++ = datum & MPT_DB_DATA_MASK;
1198
1199		mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1200	}
1201
1202	/* One more wait & clear at the end */
1203	if (mpt_wait_db_int(mpt) != MPT_OK) {
1204		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1205		return ETIMEDOUT;
1206	}
1207	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1208
1209	if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1210		if (mpt->verbose >= MPT_PRT_TRACE)
1211			mpt_print_reply(hdr);
1212		return (MPT_FAIL | hdr->IOCStatus);
1213	}
1214
1215	return (0);
1216}
1217
1218static int
1219mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1220{
1221	MSG_IOC_FACTS f_req;
1222	int error;
1223
1224	bzero(&f_req, sizeof f_req);
1225	f_req.Function = MPI_FUNCTION_IOC_FACTS;
1226	f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1227	error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1228	if (error)
1229		return(error);
1230	error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1231	return (error);
1232}
1233
1234static int
1235mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp)
1236{
1237	MSG_PORT_FACTS f_req;
1238	int error;
1239
1240	/* XXX: Only getting PORT FACTS for Port 0 */
1241	memset(&f_req, 0, sizeof f_req);
1242	f_req.Function = MPI_FUNCTION_PORT_FACTS;
1243	f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1244	error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1245	if (error)
1246		return(error);
1247	error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1248	return (error);
1249}
1250
1251/*
1252 * Send the initialization request. This is where we specify how many
1253 * SCSI busses and how many devices per bus we wish to emulate.
1254 * This is also the command that specifies the max size of the reply
1255 * frames from the IOC that we will be allocating.
1256 */
1257static int
1258mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1259{
1260	int error = 0;
1261	MSG_IOC_INIT init;
1262	MSG_IOC_INIT_REPLY reply;
1263
1264	bzero(&init, sizeof init);
1265	init.WhoInit = who;
1266	init.Function = MPI_FUNCTION_IOC_INIT;
1267	if (mpt->is_fc) {
1268		init.MaxDevices = 255;
1269	} else if (mpt->is_sas) {
1270		init.MaxDevices = mpt->mpt_max_devices;
1271	} else {
1272		init.MaxDevices = 16;
1273	}
1274	init.MaxBuses = 1;
1275
1276	init.MsgVersion = htole16(MPI_VERSION);
1277	init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1278	init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1279	init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1280	if (mpt->ioc_facts_flags & MPI_IOCFACTS_FLAGS_REPLY_FIFO_HOST_SIGNAL) {
1281		init.Flags |= MPI_IOCINIT_FLAGS_REPLY_FIFO_HOST_SIGNAL;
1282	}
1283
1284	if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1285		return(error);
1286	}
1287
1288	error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1289	return (error);
1290}
1291
1292
1293/*
1294 * Utiltity routine to read configuration headers and pages
1295 */
1296int
1297mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action,
1298		  u_int PageVersion, u_int PageLength, u_int PageNumber,
1299		  u_int PageType, uint32_t PageAddress, bus_addr_t addr,
1300		  bus_size_t len, int sleep_ok, int timeout_ms)
1301{
1302	MSG_CONFIG *cfgp;
1303	SGE_SIMPLE32 *se;
1304
1305	cfgp = req->req_vbuf;
1306	memset(cfgp, 0, sizeof *cfgp);
1307	cfgp->Action = Action;
1308	cfgp->Function = MPI_FUNCTION_CONFIG;
1309	cfgp->Header.PageVersion = PageVersion;
1310	cfgp->Header.PageLength = PageLength;
1311	cfgp->Header.PageNumber = PageNumber;
1312	cfgp->Header.PageType = PageType;
1313	cfgp->PageAddress = PageAddress;
1314	se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1315	se->Address = addr;
1316	MPI_pSGE_SET_LENGTH(se, len);
1317	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1318	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1319	    MPI_SGE_FLAGS_END_OF_LIST |
1320	    ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1321	  || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1322	   ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1323	cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1324
1325	mpt_check_doorbell(mpt);
1326	mpt_send_cmd(mpt, req);
1327	return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1328			     sleep_ok, timeout_ms));
1329}
1330
1331
1332int
1333mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1334		    uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1335		    int sleep_ok, int timeout_ms)
1336{
1337	request_t  *req;
1338	int	    error;
1339
1340	req = mpt_get_request(mpt, sleep_ok);
1341	if (req == NULL) {
1342		mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1343		return (-1);
1344	}
1345
1346	error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER,
1347				  /*PageVersion*/0, /*PageLength*/0, PageNumber,
1348				  PageType, PageAddress, /*addr*/0, /*len*/0,
1349				  sleep_ok, timeout_ms);
1350	if (error != 0) {
1351		mpt_prt(mpt, "read_cfg_header timed out\n");
1352		return (-1);
1353	}
1354
1355        if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1356		mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1357			req->IOCStatus);
1358		error = -1;
1359	} else {
1360		MSG_CONFIG *cfgp;
1361
1362		cfgp = req->req_vbuf;
1363		bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1364		error = 0;
1365	}
1366	mpt_free_request(mpt, req);
1367	return (error);
1368}
1369
1370#define	CFG_DATA_OFF	128
1371
1372int
1373mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1374		  CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1375		  int timeout_ms)
1376{
1377	request_t    *req;
1378	int	      error;
1379
1380	req = mpt_get_request(mpt, sleep_ok);
1381	if (req == NULL) {
1382		mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1383		return (-1);
1384	}
1385
1386	error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1387				  hdr->PageLength, hdr->PageNumber,
1388				  hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1389				  PageAddress, req->req_pbuf + CFG_DATA_OFF,
1390				  len, sleep_ok, timeout_ms);
1391	if (error != 0) {
1392		mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1393		return (-1);
1394	}
1395
1396	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1397		mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1398			req->IOCStatus);
1399		mpt_free_request(mpt, req);
1400		return (-1);
1401	}
1402	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1403	    BUS_DMASYNC_POSTREAD);
1404	memcpy(hdr, ((uint8_t *)req->req_vbuf)+CFG_DATA_OFF, len);
1405	mpt_free_request(mpt, req);
1406	return (0);
1407}
1408
1409int
1410mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1411		   CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1412		   int timeout_ms)
1413{
1414	request_t    *req;
1415	u_int	      hdr_attr;
1416	int	      error;
1417
1418	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1419	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1420	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1421		mpt_prt(mpt, "page type 0x%x not changeable\n",
1422			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1423		return (-1);
1424	}
1425	hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK,
1426
1427	req = mpt_get_request(mpt, sleep_ok);
1428	if (req == NULL)
1429		return (-1);
1430
1431	memcpy(((caddr_t)req->req_vbuf)+CFG_DATA_OFF, hdr, len);
1432	/* Restore stripped out attributes */
1433	hdr->PageType |= hdr_attr;
1434
1435	error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1436				  hdr->PageLength, hdr->PageNumber,
1437				  hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1438				  PageAddress, req->req_pbuf + CFG_DATA_OFF,
1439				  len, sleep_ok, timeout_ms);
1440	if (error != 0) {
1441		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1442		return (-1);
1443	}
1444
1445        if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1446		mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1447			req->IOCStatus);
1448		mpt_free_request(mpt, req);
1449		return (-1);
1450	}
1451	mpt_free_request(mpt, req);
1452	return (0);
1453}
1454
1455/*
1456 * Read IOC configuration information
1457 */
1458static int
1459mpt_read_config_info_ioc(struct mpt_softc *mpt)
1460{
1461	CONFIG_PAGE_HEADER hdr;
1462	struct mpt_raid_volume *mpt_raid;
1463	int rv;
1464	int i;
1465	size_t len;
1466
1467	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1468				 /*PageNumber*/2, /*PageAddress*/0, &hdr,
1469				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1470	if (rv)
1471		return (EIO);
1472
1473	mpt_lprt(mpt, MPT_PRT_DEBUG,  "IOC Page 2 Header: ver %x, len %x, "
1474		 "num %x, type %x\n", hdr.PageVersion,
1475		 hdr.PageLength * sizeof(uint32_t),
1476		 hdr.PageNumber, hdr.PageType);
1477
1478	len = hdr.PageLength * sizeof(uint32_t);
1479	mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1480	if (mpt->ioc_page2 == NULL)
1481		return (ENOMEM);
1482	memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1483	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1484				   &mpt->ioc_page2->Header, len,
1485				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1486	if (rv) {
1487		mpt_prt(mpt, "failed to read IOC Page 2\n");
1488	} else if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1489		uint32_t mask;
1490
1491		mpt_prt(mpt, "Capabilities: (");
1492		for (mask = 1; mask != 0; mask <<= 1) {
1493			if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0)
1494				continue;
1495
1496			switch (mask) {
1497			case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1498				mpt_prtc(mpt, " RAID-0");
1499				break;
1500			case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1501				mpt_prtc(mpt, " RAID-1E");
1502				break;
1503			case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1504				mpt_prtc(mpt, " RAID-1");
1505				break;
1506			case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1507				mpt_prtc(mpt, " SES");
1508				break;
1509			case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1510				mpt_prtc(mpt, " SAFTE");
1511				break;
1512			case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1513				mpt_prtc(mpt, " Multi-Channel-Arrays");
1514			default:
1515				break;
1516			}
1517		}
1518		mpt_prtc(mpt, " )\n");
1519		if ((mpt->ioc_page2->CapabilitiesFlags
1520		   & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1521		    | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1522		    | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1523			mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1524				mpt->ioc_page2->NumActiveVolumes,
1525				mpt->ioc_page2->NumActiveVolumes != 1
1526			      ? "s " : " ",
1527				mpt->ioc_page2->MaxVolumes);
1528			mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1529				mpt->ioc_page2->NumActivePhysDisks,
1530				mpt->ioc_page2->NumActivePhysDisks != 1
1531			      ? "s " : " ",
1532				mpt->ioc_page2->MaxPhysDisks);
1533		}
1534	}
1535
1536	len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1537	mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT);
1538	if (mpt->raid_volumes == NULL) {
1539		mpt_prt(mpt, "Could not allocate RAID volume data\n");
1540	} else {
1541		memset(mpt->raid_volumes, 0, len);
1542	}
1543
1544	/*
1545	 * Copy critical data out of ioc_page2 so that we can
1546	 * safely refresh the page without windows of unreliable
1547	 * data.
1548	 */
1549	mpt->raid_max_volumes =  mpt->ioc_page2->MaxVolumes;
1550
1551	len = sizeof(*mpt->raid_volumes->config_page)
1552	    + (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1));
1553	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1554		mpt_raid = &mpt->raid_volumes[i];
1555		mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT);
1556		if (mpt_raid->config_page == NULL) {
1557			mpt_prt(mpt, "Could not allocate RAID page data\n");
1558			break;
1559		}
1560		memset(mpt_raid->config_page, 0, len);
1561	}
1562	mpt->raid_page0_len = len;
1563
1564	len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1565	mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT);
1566	if (mpt->raid_disks == NULL) {
1567		mpt_prt(mpt, "Could not allocate RAID disk data\n");
1568	} else {
1569		memset(mpt->raid_disks, 0, len);
1570	}
1571
1572	mpt->raid_max_disks =  mpt->ioc_page2->MaxPhysDisks;
1573
1574	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1575				 /*PageNumber*/3, /*PageAddress*/0, &hdr,
1576				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1577	if (rv)
1578		return (EIO);
1579
1580	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
1581		 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
1582
1583	if (mpt->ioc_page3 != NULL)
1584		free(mpt->ioc_page3, M_DEVBUF);
1585	len = hdr.PageLength * sizeof(uint32_t);
1586	mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1587	if (mpt->ioc_page3 == NULL)
1588		return (-1);
1589	memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
1590	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1591				   &mpt->ioc_page3->Header, len,
1592				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1593	if (rv) {
1594		mpt_prt(mpt, "failed to read IOC Page 3\n");
1595	}
1596
1597	mpt_raid_wakeup(mpt);
1598
1599	return (0);
1600}
1601
1602/*
1603 * Read SCSI configuration information
1604 */
1605static int
1606mpt_read_config_info_spi(struct mpt_softc *mpt)
1607{
1608	int rv, i;
1609
1610	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0,
1611				 0, &mpt->mpt_port_page0.Header,
1612				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1613	if (rv)
1614		return (-1);
1615	mpt_lprt(mpt, MPT_PRT_DEBUG,
1616		 "SPI Port Page 0 Header: %x %x %x %x\n",
1617		 mpt->mpt_port_page0.Header.PageVersion,
1618		 mpt->mpt_port_page0.Header.PageLength,
1619		 mpt->mpt_port_page0.Header.PageNumber,
1620		 mpt->mpt_port_page0.Header.PageType);
1621
1622	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1,
1623				 0, &mpt->mpt_port_page1.Header,
1624				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1625	if (rv)
1626		return (-1);
1627
1628	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
1629		 mpt->mpt_port_page1.Header.PageVersion,
1630		 mpt->mpt_port_page1.Header.PageLength,
1631		 mpt->mpt_port_page1.Header.PageNumber,
1632		 mpt->mpt_port_page1.Header.PageType);
1633
1634	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2,
1635				 /*PageAddress*/0, &mpt->mpt_port_page2.Header,
1636				 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1637	if (rv)
1638		return (-1);
1639
1640	mpt_lprt(mpt, MPT_PRT_DEBUG,
1641		 "SPI Port Page 2 Header: %x %x %x %x\n",
1642		 mpt->mpt_port_page1.Header.PageVersion,
1643		 mpt->mpt_port_page1.Header.PageLength,
1644		 mpt->mpt_port_page1.Header.PageNumber,
1645		 mpt->mpt_port_page1.Header.PageType);
1646
1647	for (i = 0; i < 16; i++) {
1648		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
1649					 0, i, &mpt->mpt_dev_page0[i].Header,
1650					 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1651		if (rv)
1652			return (-1);
1653
1654		mpt_lprt(mpt, MPT_PRT_DEBUG,
1655			 "SPI Target %d Device Page 0 Header: %x %x %x %x\n",
1656			 i, mpt->mpt_dev_page0[i].Header.PageVersion,
1657			 mpt->mpt_dev_page0[i].Header.PageLength,
1658			 mpt->mpt_dev_page0[i].Header.PageNumber,
1659			 mpt->mpt_dev_page0[i].Header.PageType);
1660
1661		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
1662					 1, i, &mpt->mpt_dev_page1[i].Header,
1663					 /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1664		if (rv)
1665			return (-1);
1666
1667		mpt_lprt(mpt, MPT_PRT_DEBUG,
1668			 "SPI Target %d Device Page 1 Header: %x %x %x %x\n",
1669			 i, mpt->mpt_dev_page1[i].Header.PageVersion,
1670			 mpt->mpt_dev_page1[i].Header.PageLength,
1671			 mpt->mpt_dev_page1[i].Header.PageNumber,
1672			 mpt->mpt_dev_page1[i].Header.PageType);
1673	}
1674
1675	/*
1676	 * At this point, we don't *have* to fail. As long as we have
1677	 * valid config header information, we can (barely) lurch
1678	 * along.
1679	 */
1680
1681	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1682				   &mpt->mpt_port_page0.Header,
1683				   sizeof(mpt->mpt_port_page0),
1684				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1685	if (rv) {
1686		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
1687	} else {
1688		mpt_lprt(mpt, MPT_PRT_DEBUG,
1689		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
1690		    mpt->mpt_port_page0.Capabilities,
1691		    mpt->mpt_port_page0.PhysicalInterface);
1692	}
1693
1694	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1695				   &mpt->mpt_port_page1.Header,
1696				   sizeof(mpt->mpt_port_page1),
1697				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1698	if (rv) {
1699		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
1700	} else {
1701		mpt_lprt(mpt, MPT_PRT_DEBUG,
1702		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
1703		    mpt->mpt_port_page1.Configuration,
1704		    mpt->mpt_port_page1.OnBusTimerValue);
1705	}
1706
1707	rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1708				   &mpt->mpt_port_page2.Header,
1709				   sizeof(mpt->mpt_port_page2),
1710				   /*sleep_ok*/FALSE, /*timeout_ms*/5000);
1711	if (rv) {
1712		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
1713	} else {
1714		mpt_lprt(mpt, MPT_PRT_DEBUG,
1715		    "SPI Port Page 2: Flags %x Settings %x\n",
1716		    mpt->mpt_port_page2.PortFlags,
1717		    mpt->mpt_port_page2.PortSettings);
1718		for (i = 0; i < 16; i++) {
1719			mpt_lprt(mpt, MPT_PRT_DEBUG,
1720		  	    "SPI Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1721			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1722			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1723			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1724		}
1725	}
1726
1727	for (i = 0; i < 16; i++) {
1728		rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1729					   &mpt->mpt_dev_page0[i].Header,
1730					   sizeof(*mpt->mpt_dev_page0),
1731					   /*sleep_ok*/FALSE,
1732					   /*timeout_ms*/5000);
1733		if (rv) {
1734			mpt_prt(mpt,
1735			    "cannot read SPI Tgt %d Device Page 0\n", i);
1736			continue;
1737		}
1738		mpt_lprt(mpt, MPT_PRT_DEBUG,
1739			 "SPI Tgt %d Page 0: NParms %x Information %x",
1740			 i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1741			 mpt->mpt_dev_page0[i].Information);
1742
1743		rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1744					   &mpt->mpt_dev_page1[i].Header,
1745					   sizeof(*mpt->mpt_dev_page1),
1746					   /*sleep_ok*/FALSE,
1747					   /*timeout_ms*/5000);
1748		if (rv) {
1749			mpt_prt(mpt,
1750			    "cannot read SPI Tgt %d Device Page 1\n", i);
1751			continue;
1752		}
1753		mpt_lprt(mpt, MPT_PRT_DEBUG,
1754			 "SPI Tgt %d Page 1: RParms %x Configuration %x\n",
1755			 i, mpt->mpt_dev_page1[i].RequestedParameters,
1756			 mpt->mpt_dev_page1[i].Configuration);
1757	}
1758	return (0);
1759}
1760
1761/*
1762 * Validate SPI configuration information.
1763 *
1764 * In particular, validate SPI Port Page 1.
1765 */
1766static int
1767mpt_set_initial_config_spi(struct mpt_softc *mpt)
1768{
1769	int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id;
1770	int error;
1771
1772	mpt->mpt_disc_enable = 0xff;
1773	mpt->mpt_tag_enable = 0;
1774
1775	if (mpt->mpt_port_page1.Configuration != pp1val) {
1776		CONFIG_PAGE_SCSI_PORT_1 tmp;
1777
1778		mpt_prt(mpt,
1779		    "SPI Port Page 1 Config value bad (%x)- should be %x\n",
1780		    mpt->mpt_port_page1.Configuration, pp1val);
1781		tmp = mpt->mpt_port_page1;
1782		tmp.Configuration = pp1val;
1783		error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/0,
1784					       &tmp.Header, sizeof(tmp),
1785					       /*sleep_ok*/FALSE,
1786					       /*timeout_ms*/5000);
1787		if (error)
1788			return (-1);
1789		error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0,
1790					      &tmp.Header, sizeof(tmp),
1791					      /*sleep_ok*/FALSE,
1792					      /*timeout_ms*/5000);
1793		if (error)
1794			return (-1);
1795		if (tmp.Configuration != pp1val) {
1796			mpt_prt(mpt,
1797			    "failed to reset SPI Port Page 1 Config value\n");
1798			return (-1);
1799		}
1800		mpt->mpt_port_page1 = tmp;
1801	}
1802
1803	for (i = 0; i < 16; i++) {
1804		CONFIG_PAGE_SCSI_DEVICE_1 tmp;
1805		tmp = mpt->mpt_dev_page1[i];
1806		tmp.RequestedParameters = 0;
1807		tmp.Configuration = 0;
1808		mpt_lprt(mpt, MPT_PRT_DEBUG,
1809			 "Set Tgt %d SPI DevicePage 1 values to %x 0 %x\n",
1810			 i, tmp.RequestedParameters, tmp.Configuration);
1811		error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/i,
1812					       &tmp.Header, sizeof(tmp),
1813					       /*sleep_ok*/FALSE,
1814					       /*timeout_ms*/5000);
1815		if (error)
1816			return (-1);
1817		error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i,
1818					      &tmp.Header, sizeof(tmp),
1819					      /*sleep_ok*/FALSE,
1820					      /*timeout_ms*/5000);
1821		if (error)
1822			return (-1);
1823		mpt->mpt_dev_page1[i] = tmp;
1824		mpt_lprt(mpt, MPT_PRT_DEBUG,
1825			 "SPI Tgt %d Page 1: RParm %x Configuration %x\n", i,
1826			 mpt->mpt_dev_page1[i].RequestedParameters,
1827			 mpt->mpt_dev_page1[i].Configuration);
1828	}
1829	return (0);
1830}
1831
1832/*
1833 * Enable IOC port
1834 */
1835static int
1836mpt_send_port_enable(struct mpt_softc *mpt, int port)
1837{
1838	request_t	*req;
1839	MSG_PORT_ENABLE *enable_req;
1840	int		 error;
1841
1842	req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1843	if (req == NULL)
1844		return (-1);
1845
1846	enable_req = req->req_vbuf;
1847	bzero(enable_req, sizeof *enable_req);
1848
1849	enable_req->Function   = MPI_FUNCTION_PORT_ENABLE;
1850	enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1851	enable_req->PortNumber = port;
1852
1853	mpt_check_doorbell(mpt);
1854	mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1855
1856	mpt_send_cmd(mpt, req);
1857	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1858	    /*sleep_ok*/FALSE, /*time_ms*/mpt->is_sas? 30000 : 3000);
1859	if (error != 0) {
1860		mpt_prt(mpt, "port enable timed out\n");
1861		return (-1);
1862	}
1863	mpt_free_request(mpt, req);
1864	return (0);
1865}
1866
1867/*
1868 * Enable/Disable asynchronous event reporting.
1869 *
1870 * NB: this is the first command we send via shared memory
1871 * instead of the handshake register.
1872 */
1873static int
1874mpt_send_event_request(struct mpt_softc *mpt, int onoff)
1875{
1876	request_t *req;
1877	MSG_EVENT_NOTIFY *enable_req;
1878
1879	req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1880
1881	enable_req = req->req_vbuf;
1882	bzero(enable_req, sizeof *enable_req);
1883
1884	enable_req->Function   = MPI_FUNCTION_EVENT_NOTIFICATION;
1885	enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
1886	enable_req->Switch     = onoff;
1887
1888	mpt_check_doorbell(mpt);
1889	mpt_lprt(mpt, MPT_PRT_DEBUG,
1890		 "%sabling async events\n", onoff ? "en" : "dis");
1891	mpt_send_cmd(mpt, req);
1892
1893	return (0);
1894}
1895
1896/*
1897 * Un-mask the interupts on the chip.
1898 */
1899void
1900mpt_enable_ints(struct mpt_softc *mpt)
1901{
1902	/* Unmask every thing except door bell int */
1903	mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
1904}
1905
1906/*
1907 * Mask the interupts on the chip.
1908 */
1909void
1910mpt_disable_ints(struct mpt_softc *mpt)
1911{
1912	/* Mask all interrupts */
1913	mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1914	    MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1915}
1916
1917static void
1918mpt_sysctl_attach(struct mpt_softc *mpt)
1919{
1920	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1921	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1922
1923	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1924		       "debug", CTLFLAG_RW, &mpt->verbose, 0,
1925		       "Debugging/Verbose level");
1926}
1927
1928int
1929mpt_attach(struct mpt_softc *mpt)
1930{
1931	int i;
1932
1933	for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
1934		struct mpt_personality *pers;
1935		int error;
1936
1937		pers = mpt_personalities[i];
1938		if (pers == NULL)
1939			continue;
1940
1941		if (pers->probe(mpt) == 0) {
1942			error = pers->attach(mpt);
1943			if (error != 0) {
1944				mpt_detach(mpt);
1945				return (error);
1946			}
1947			mpt->mpt_pers_mask |= (0x1 << pers->id);
1948			pers->use_count++;
1949		}
1950	}
1951
1952	return (0);
1953}
1954
1955int
1956mpt_shutdown(struct mpt_softc *mpt)
1957{
1958	struct mpt_personality *pers;
1959
1960	MPT_PERS_FOREACH_REVERSE(mpt, pers)
1961		pers->shutdown(mpt);
1962
1963	mpt_reset(mpt, /*reinit*/FALSE);
1964	return (0);
1965}
1966
1967int
1968mpt_detach(struct mpt_softc *mpt)
1969{
1970	struct mpt_personality *pers;
1971
1972	MPT_PERS_FOREACH_REVERSE(mpt, pers) {
1973		pers->detach(mpt);
1974		mpt->mpt_pers_mask &= ~(0x1 << pers->id);
1975		pers->use_count--;
1976	}
1977
1978	return (0);
1979}
1980
1981int
1982mpt_core_load(struct mpt_personality *pers)
1983{
1984	int i;
1985
1986	/*
1987	 * Setup core handlers and insert the default handler
1988	 * into all "empty slots".
1989	 */
1990	for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++)
1991		mpt_reply_handlers[i] = mpt_default_reply_handler;
1992
1993	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
1994	    mpt_event_reply_handler;
1995	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
1996	    mpt_config_reply_handler;
1997	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
1998	    mpt_handshake_reply_handler;
1999
2000	return (0);
2001}
2002
2003/*
2004 * Initialize per-instance driver data and perform
2005 * initial controller configuration.
2006 */
2007int
2008mpt_core_attach(struct mpt_softc *mpt)
2009{
2010        int val;
2011	int error;
2012
2013	LIST_INIT(&mpt->ack_frames);
2014
2015	/* Put all request buffers on the free list */
2016	TAILQ_INIT(&mpt->request_pending_list);
2017	TAILQ_INIT(&mpt->request_free_list);
2018	for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++)
2019		mpt_free_request(mpt, &mpt->request_pool[val]);
2020
2021	mpt_sysctl_attach(mpt);
2022
2023	mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2024		 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2025
2026	error = mpt_configure_ioc(mpt);
2027
2028	return (error);
2029}
2030
2031void
2032mpt_core_shutdown(struct mpt_softc *mpt)
2033{
2034}
2035
2036void
2037mpt_core_detach(struct mpt_softc *mpt)
2038{
2039}
2040
2041int
2042mpt_core_unload(struct mpt_personality *pers)
2043{
2044	/* Unload is always successfull. */
2045	return (0);
2046}
2047
2048#define FW_UPLOAD_REQ_SIZE				\
2049	(sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION)	\
2050       + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2051
2052static int
2053mpt_upload_fw(struct mpt_softc *mpt)
2054{
2055	uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2056	MSG_FW_UPLOAD_REPLY fw_reply;
2057	MSG_FW_UPLOAD *fw_req;
2058	FW_UPLOAD_TCSGE *tsge;
2059	SGE_SIMPLE32 *sge;
2060	uint32_t flags;
2061	int error;
2062
2063	memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2064	fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2065	fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2066	fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2067	fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2068	tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2069	tsge->DetailsLength = 12;
2070	tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2071	tsge->ImageSize = htole32(mpt->fw_image_size);
2072	sge = (SGE_SIMPLE32 *)(tsge + 1);
2073	flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2074	      | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2075	      | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2076	flags <<= MPI_SGE_FLAGS_SHIFT;
2077	sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2078	sge->Address = htole32(mpt->fw_phys);
2079	error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2080	if (error)
2081		return(error);
2082	error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2083	return (error);
2084}
2085
2086static void
2087mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2088	       uint32_t *data, bus_size_t len)
2089{
2090	uint32_t *data_end;
2091
2092	data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2093	pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2094	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2095	while (data != data_end) {
2096		mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2097		data++;
2098	}
2099	pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2100}
2101
2102static int
2103mpt_download_fw(struct mpt_softc *mpt)
2104{
2105	MpiFwHeader_t *fw_hdr;
2106	int error;
2107	uint32_t ext_offset;
2108	uint32_t data;
2109
2110	mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2111		mpt->fw_image_size);
2112
2113	error = mpt_enable_diag_mode(mpt);
2114	if (error != 0) {
2115		mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2116		return (EIO);
2117	}
2118
2119	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2120		  MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2121
2122	fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2123	mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2124		       fw_hdr->ImageSize);
2125
2126	ext_offset = fw_hdr->NextImageHeaderOffset;
2127	while (ext_offset != 0) {
2128		MpiExtImageHeader_t *ext;
2129
2130		ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2131		ext_offset = ext->NextImageHeaderOffset;
2132
2133		mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2134			       ext->ImageSize);
2135	}
2136
2137	pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2138	/* Setup the address to jump to on reset. */
2139	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2140	mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2141
2142	/*
2143	 * The controller sets the "flash bad" status after attempting
2144	 * to auto-boot from flash.  Clear the status so that the controller
2145	 * will continue the boot process with our newly installed firmware.
2146	 */
2147	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2148	data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2149	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2150	mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2151
2152	pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2153
2154	/*
2155	 * Re-enable the processor and clear the boot halt flag.
2156	 */
2157	data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2158	data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2159	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2160
2161	mpt_disable_diag_mode(mpt);
2162	return (0);
2163}
2164
2165/*
2166 * Allocate/Initialize data structures for the controller.  Called
2167 * once at instance startup.
2168 */
2169static int
2170mpt_configure_ioc(struct mpt_softc *mpt)
2171{
2172        MSG_PORT_FACTS_REPLY pfp;
2173        MSG_IOC_FACTS_REPLY facts;
2174	int try;
2175	int needreset;
2176	uint32_t max_chain_depth;
2177
2178	needreset = 0;
2179	for (try = 0; try < MPT_MAX_TRYS; try++) {
2180
2181		/*
2182		 * No need to reset if the IOC is already in the READY state.
2183		 *
2184		 * Force reset if initialization failed previously.
2185		 * Note that a hard_reset of the second channel of a '929
2186		 * will stop operation of the first channel.  Hopefully, if the
2187		 * first channel is ok, the second will not require a hard
2188		 * reset.
2189		 */
2190		if (needreset || (mpt_rd_db(mpt) & MPT_DB_STATE_MASK) !=
2191		    MPT_DB_STATE_READY) {
2192			if (mpt_reset(mpt, /*reinit*/FALSE) != MPT_OK)
2193				continue;
2194		}
2195		needreset = 0;
2196
2197		if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) {
2198			mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2199			needreset = 1;
2200			continue;
2201		}
2202
2203		mpt->mpt_global_credits = le16toh(facts.GlobalCredits);
2204		mpt->request_frame_size = le16toh(facts.RequestFrameSize);
2205		mpt->ioc_facts_flags = facts.Flags;
2206		mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2207			    le16toh(facts.MsgVersion) >> 8,
2208			    le16toh(facts.MsgVersion) & 0xFF,
2209			    le16toh(facts.HeaderVersion) >> 8,
2210			    le16toh(facts.HeaderVersion) & 0xFF);
2211
2212		/*
2213		 * Now that we know request frame size, we can calculate
2214		 * the actual (reasonable) segment limit for read/write I/O.
2215		 *
2216		 * This limit is constrained by:
2217		 *
2218		 *  + The size of each area we allocate per command (and how
2219                 *    many chain segments we can fit into it).
2220                 *  + The total number of areas we've set up.
2221		 *  + The actual chain depth the card will allow.
2222		 *
2223		 * The first area's segment count is limited by the I/O request
2224		 * at the head of it. We cannot allocate realistically more
2225		 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2226		 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2227		 *
2228		 */
2229		max_chain_depth = facts.MaxChainDepth;
2230
2231		/* total number of request areas we (can) allocate */
2232		mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2233
2234		/* converted to the number of chain areas possible */
2235		mpt->max_seg_cnt *= MPT_NRFM(mpt);
2236
2237		/* limited by the number of chain areas the card will support */
2238		if (mpt->max_seg_cnt > max_chain_depth) {
2239			mpt_lprt(mpt, MPT_PRT_DEBUG,
2240			    "chain depth limited to %u (from %u)\n",
2241			    max_chain_depth, mpt->max_seg_cnt);
2242			mpt->max_seg_cnt = max_chain_depth;
2243		}
2244
2245		/* converted to the number of simple sges in chain segments. */
2246		mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2247
2248		mpt_lprt(mpt, MPT_PRT_DEBUG,
2249		    "Maximum Segment Count: %u\n", mpt->max_seg_cnt);
2250		mpt_lprt(mpt, MPT_PRT_DEBUG,
2251			 "MsgLength=%u IOCNumber = %d\n",
2252			 facts.MsgLength, facts.IOCNumber);
2253		mpt_lprt(mpt, MPT_PRT_DEBUG,
2254			 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2255			 "Request Frame Size %u bytes Max Chain Depth %u\n",
2256                         mpt->mpt_global_credits, facts.BlockSize,
2257                         mpt->request_frame_size << 2, max_chain_depth);
2258		mpt_lprt(mpt, MPT_PRT_DEBUG,
2259			 "IOCFACTS: Num Ports %d, FWImageSize %d, "
2260			 "Flags=%#x\n", facts.NumberOfPorts,
2261			 le32toh(facts.FWImageSize), facts.Flags);
2262
2263
2264		if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) {
2265			struct mpt_map_info mi;
2266			int error;
2267
2268			/*
2269			 * In some configurations, the IOC's firmware is
2270			 * stored in a shared piece of system NVRAM that
2271			 * is only accessable via the BIOS.  In this
2272			 * case, the firmware keeps a copy of firmware in
2273			 * RAM until the OS driver retrieves it.  Once
2274			 * retrieved, we are responsible for re-downloading
2275			 * the firmware after any hard-reset.
2276			 */
2277			mpt->fw_image_size = le32toh(facts.FWImageSize);
2278			error = mpt_dma_tag_create(mpt, mpt->parent_dmat,
2279			    /*alignment*/1, /*boundary*/0,
2280			    /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2281			    /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL,
2282			    /*filterarg*/NULL, mpt->fw_image_size,
2283			    /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size,
2284			    /*flags*/0, &mpt->fw_dmat);
2285			if (error != 0) {
2286				mpt_prt(mpt, "cannot create fw dma tag\n");
2287				return (ENOMEM);
2288			}
2289			error = bus_dmamem_alloc(mpt->fw_dmat,
2290			    (void **)&mpt->fw_image, BUS_DMA_NOWAIT,
2291			    &mpt->fw_dmap);
2292			if (error != 0) {
2293				mpt_prt(mpt, "cannot allocate fw mem.\n");
2294				bus_dma_tag_destroy(mpt->fw_dmat);
2295				return (ENOMEM);
2296			}
2297			mi.mpt = mpt;
2298			mi.error = 0;
2299			bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2300			    mpt->fw_image, mpt->fw_image_size, mpt_map_rquest,
2301			    &mi, 0);
2302			mpt->fw_phys = mi.phys;
2303
2304			error = mpt_upload_fw(mpt);
2305			if (error != 0) {
2306				mpt_prt(mpt, "fw upload failed.\n");
2307				bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2308				bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2309				    mpt->fw_dmap);
2310				bus_dma_tag_destroy(mpt->fw_dmat);
2311				mpt->fw_image = NULL;
2312				return (EIO);
2313			}
2314		}
2315
2316		if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) {
2317			mpt_prt(mpt, "mpt_get_portfacts failed\n");
2318			needreset = 1;
2319			continue;
2320		}
2321
2322		mpt_lprt(mpt, MPT_PRT_DEBUG,
2323			 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n",
2324			 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID,
2325			 pfp.MaxDevices);
2326
2327		mpt->mpt_port_type = pfp.PortType;
2328		mpt->mpt_proto_flags = pfp.ProtocolFlags;
2329		if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI &&
2330		    pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS &&
2331		    pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) {
2332			mpt_prt(mpt, "Unsupported Port Type (%x)\n",
2333			    pfp.PortType);
2334			return (ENXIO);
2335		}
2336		if (!(pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) {
2337			mpt_prt(mpt, "initiator role unsupported\n");
2338			return (ENXIO);
2339		}
2340		if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2341			mpt->is_fc = 1;
2342			mpt->is_sas = 0;
2343		} else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2344			mpt->is_fc = 0;
2345			mpt->is_sas = 1;
2346		} else {
2347			mpt->is_fc = 0;
2348			mpt->is_sas = 0;
2349		}
2350		mpt->mpt_ini_id = pfp.PortSCSIID;
2351		mpt->mpt_max_devices = pfp.MaxDevices;
2352
2353		if (mpt_enable_ioc(mpt) != 0) {
2354			mpt_prt(mpt, "Unable to initialize IOC\n");
2355			return (ENXIO);
2356		}
2357
2358		/*
2359		 * Read and set up initial configuration information
2360		 * (IOC and SPI only for now)
2361		 *
2362		 * XXX Should figure out what "personalities" are
2363		 * available and defer all initialization junk to
2364		 * them.
2365		 */
2366		mpt_read_config_info_ioc(mpt);
2367
2368		if (mpt->is_fc == 0 && mpt->is_sas == 0) {
2369			if (mpt_read_config_info_spi(mpt)) {
2370				return (EIO);
2371			}
2372			if (mpt_set_initial_config_spi(mpt)) {
2373				return (EIO);
2374			}
2375		}
2376
2377		/* Everything worked */
2378		break;
2379	}
2380
2381	if (try >= MPT_MAX_TRYS) {
2382		mpt_prt(mpt, "failed to initialize IOC");
2383		return (EIO);
2384	}
2385
2386	mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling interrupts\n");
2387
2388	mpt_enable_ints(mpt);
2389	return (0);
2390}
2391
2392static int
2393mpt_enable_ioc(struct mpt_softc *mpt)
2394{
2395	uint32_t pptr;
2396	int val;
2397
2398	if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2399		mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2400		return (EIO);
2401	}
2402
2403	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2404
2405	if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2406		mpt_prt(mpt, "IOC failed to go to run state\n");
2407		return (ENXIO);
2408	}
2409	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2410
2411	/*
2412	 * Give it reply buffers
2413	 *
2414	 * Do *not* exceed global credits.
2415	 */
2416	for (val = 0, pptr = mpt->reply_phys;
2417	    (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2418	     pptr += MPT_REPLY_SIZE) {
2419		mpt_free_reply(mpt, pptr);
2420		if (++val == mpt->mpt_global_credits - 1)
2421			break;
2422	}
2423
2424	/*
2425	 * Enable asynchronous event reporting
2426	 */
2427	mpt_send_event_request(mpt, 1);
2428
2429	/*
2430	 * Enable the port
2431	 */
2432	if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2433		mpt_prt(mpt, "failed to enable port 0\n");
2434		return (ENXIO);
2435	}
2436	mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port 0\n");
2437
2438
2439	return (0);
2440}
2441