mpt.c revision 160396
1/*-
2 * Generic routines for LSI Fusion adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice immediately at the beginning of the file, without modification,
12 *    this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 *    derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 *    notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 *    substantially similar to the "NO WARRANTY" disclaimer below
39 *    ("Disclaimer") and any redistribution must be conditioned upon including
40 *    a substantially similar Disclaimer requirement for further binary
41 *    redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 *    of any contributors may be used to endorse or promote products derived
44 *    from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 *
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
64 */
65/*-
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
70 *
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
73 * met:
74 * 1. Redistributions of source code must retain the above copyright
75 *    notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 *    substantially similar to the "NO WARRANTY" disclaimer below
78 *    ("Disclaimer") and any redistribution must be conditioned upon including
79 *    a substantially similar Disclaimer requirement for further binary
80 *    redistribution.
81 * 3. Neither the names of the above listed copyright holders nor the names
82 *    of any contributors may be used to endorse or promote products derived
83 *    from this software without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 */
97
98#include <sys/cdefs.h>
99__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 160396 2006-07-16 03:34:55Z mjacob $");
100
101#include <dev/mpt/mpt.h>
102#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
103#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
104
105#include <dev/mpt/mpilib/mpi.h>
106#include <dev/mpt/mpilib/mpi_ioc.h>
107#include <dev/mpt/mpilib/mpi_fc.h>
108#include <dev/mpt/mpilib/mpi_targ.h>
109
110#include <sys/sysctl.h>
111
112#define MPT_MAX_TRYS 3
113#define MPT_MAX_WAIT 300000
114
115static int maxwait_ack = 0;
116static int maxwait_int = 0;
117static int maxwait_state = 0;
118
119TAILQ_HEAD(, mpt_softc)	mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
120mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
121
122static mpt_reply_handler_t mpt_default_reply_handler;
123static mpt_reply_handler_t mpt_config_reply_handler;
124static mpt_reply_handler_t mpt_handshake_reply_handler;
125static mpt_reply_handler_t mpt_event_reply_handler;
126static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
127			       MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
128static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
129static int mpt_soft_reset(struct mpt_softc *mpt);
130static void mpt_hard_reset(struct mpt_softc *mpt);
131static int mpt_configure_ioc(struct mpt_softc *mpt);
132static int mpt_enable_ioc(struct mpt_softc *mpt, int);
133
134/************************* Personality Module Support *************************/
135/*
136 * We include one extra entry that is guaranteed to be NULL
137 * to simplify our itterator.
138 */
139static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
140static __inline struct mpt_personality*
141	mpt_pers_find(struct mpt_softc *, u_int);
142static __inline struct mpt_personality*
143	mpt_pers_find_reverse(struct mpt_softc *, u_int);
144
145static __inline struct mpt_personality *
146mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
147{
148	KASSERT(start_at <= MPT_MAX_PERSONALITIES,
149		("mpt_pers_find: starting position out of range\n"));
150
151	while (start_at < MPT_MAX_PERSONALITIES
152	    && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
153		start_at++;
154	}
155	return (mpt_personalities[start_at]);
156}
157
158/*
159 * Used infrequently, so no need to optimize like a forward
160 * traversal where we use the MAX+1 is guaranteed to be NULL
161 * trick.
162 */
163static __inline struct mpt_personality *
164mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
165{
166	while (start_at < MPT_MAX_PERSONALITIES
167	    && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
168		start_at--;
169	}
170	if (start_at < MPT_MAX_PERSONALITIES)
171		return (mpt_personalities[start_at]);
172	return (NULL);
173}
174
175#define MPT_PERS_FOREACH(mpt, pers)				\
176	for (pers = mpt_pers_find(mpt, /*start_at*/0);		\
177	     pers != NULL;					\
178	     pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
179
180#define MPT_PERS_FOREACH_REVERSE(mpt, pers)				\
181	for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
182	     pers != NULL;						\
183	     pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
184
185static mpt_load_handler_t      mpt_stdload;
186static mpt_probe_handler_t     mpt_stdprobe;
187static mpt_attach_handler_t    mpt_stdattach;
188static mpt_enable_handler_t    mpt_stdenable;
189static mpt_event_handler_t     mpt_stdevent;
190static mpt_reset_handler_t     mpt_stdreset;
191static mpt_shutdown_handler_t  mpt_stdshutdown;
192static mpt_detach_handler_t    mpt_stddetach;
193static mpt_unload_handler_t    mpt_stdunload;
194static struct mpt_personality mpt_default_personality =
195{
196	.load		= mpt_stdload,
197	.probe		= mpt_stdprobe,
198	.attach		= mpt_stdattach,
199	.enable		= mpt_stdenable,
200	.event		= mpt_stdevent,
201	.reset		= mpt_stdreset,
202	.shutdown	= mpt_stdshutdown,
203	.detach		= mpt_stddetach,
204	.unload		= mpt_stdunload
205};
206
207static mpt_load_handler_t      mpt_core_load;
208static mpt_attach_handler_t    mpt_core_attach;
209static mpt_enable_handler_t    mpt_core_enable;
210static mpt_reset_handler_t     mpt_core_ioc_reset;
211static mpt_event_handler_t     mpt_core_event;
212static mpt_shutdown_handler_t  mpt_core_shutdown;
213static mpt_shutdown_handler_t  mpt_core_detach;
214static mpt_unload_handler_t    mpt_core_unload;
215static struct mpt_personality mpt_core_personality =
216{
217	.name		= "mpt_core",
218	.load		= mpt_core_load,
219	.attach		= mpt_core_attach,
220	.enable		= mpt_core_enable,
221	.event		= mpt_core_event,
222	.reset		= mpt_core_ioc_reset,
223	.shutdown	= mpt_core_shutdown,
224	.detach		= mpt_core_detach,
225	.unload		= mpt_core_unload,
226};
227
228/*
229 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
230 * ordering information.  We want the core to always register FIRST.
231 * other modules are set to SI_ORDER_SECOND.
232 */
233static moduledata_t mpt_core_mod = {
234	"mpt_core", mpt_modevent, &mpt_core_personality
235};
236DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
237MODULE_VERSION(mpt_core, 1);
238
239#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
240
241
242int
243mpt_modevent(module_t mod, int type, void *data)
244{
245	struct mpt_personality *pers;
246	int error;
247
248	pers = (struct mpt_personality *)data;
249
250	error = 0;
251	switch (type) {
252	case MOD_LOAD:
253	{
254		mpt_load_handler_t **def_handler;
255		mpt_load_handler_t **pers_handler;
256		int i;
257
258		for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
259			if (mpt_personalities[i] == NULL)
260				break;
261		}
262		if (i >= MPT_MAX_PERSONALITIES) {
263			error = ENOMEM;
264			break;
265		}
266		pers->id = i;
267		mpt_personalities[i] = pers;
268
269		/* Install standard/noop handlers for any NULL entries. */
270		def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
271		pers_handler = MPT_PERS_FIRST_HANDLER(pers);
272		while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
273			if (*pers_handler == NULL)
274				*pers_handler = *def_handler;
275			pers_handler++;
276			def_handler++;
277		}
278
279		error = (pers->load(pers));
280		if (error != 0)
281			mpt_personalities[i] = NULL;
282		break;
283	}
284	case MOD_SHUTDOWN:
285		break;
286#if __FreeBSD_version >= 500000
287	case MOD_QUIESCE:
288		break;
289#endif
290	case MOD_UNLOAD:
291		error = pers->unload(pers);
292		mpt_personalities[pers->id] = NULL;
293		break;
294	default:
295		error = EINVAL;
296		break;
297	}
298	return (error);
299}
300
301int
302mpt_stdload(struct mpt_personality *pers)
303{
304	/* Load is always successfull. */
305	return (0);
306}
307
308int
309mpt_stdprobe(struct mpt_softc *mpt)
310{
311	/* Probe is always successfull. */
312	return (0);
313}
314
315int
316mpt_stdattach(struct mpt_softc *mpt)
317{
318	/* Attach is always successfull. */
319	return (0);
320}
321
322int
323mpt_stdenable(struct mpt_softc *mpt)
324{
325	/* Enable is always successfull. */
326	return (0);
327}
328
329int
330mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
331{
332	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
333	/* Event was not for us. */
334	return (0);
335}
336
337void
338mpt_stdreset(struct mpt_softc *mpt, int type)
339{
340}
341
342void
343mpt_stdshutdown(struct mpt_softc *mpt)
344{
345}
346
347void
348mpt_stddetach(struct mpt_softc *mpt)
349{
350}
351
352int
353mpt_stdunload(struct mpt_personality *pers)
354{
355	/* Unload is always successfull. */
356	return (0);
357}
358
359/******************************* Bus DMA Support ******************************/
360void
361mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
362{
363	struct mpt_map_info *map_info;
364
365	map_info = (struct mpt_map_info *)arg;
366	map_info->error = error;
367	map_info->phys = segs->ds_addr;
368}
369
370/**************************** Reply/Event Handling ****************************/
371int
372mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
373		     mpt_handler_t handler, uint32_t *phandler_id)
374{
375
376	switch (type) {
377	case MPT_HANDLER_REPLY:
378	{
379		u_int cbi;
380		u_int free_cbi;
381
382		if (phandler_id == NULL)
383			return (EINVAL);
384
385		free_cbi = MPT_HANDLER_ID_NONE;
386		for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
387			/*
388			 * If the same handler is registered multiple
389			 * times, don't error out.  Just return the
390			 * index of the original registration.
391			 */
392			if (mpt_reply_handlers[cbi] == handler.reply_handler) {
393				*phandler_id = MPT_CBI_TO_HID(cbi);
394				return (0);
395			}
396
397			/*
398			 * Fill from the front in the hope that
399			 * all registered handlers consume only a
400			 * single cache line.
401			 *
402			 * We don't break on the first empty slot so
403			 * that the full table is checked to see if
404			 * this handler was previously registered.
405			 */
406			if (free_cbi == MPT_HANDLER_ID_NONE &&
407			    (mpt_reply_handlers[cbi]
408			  == mpt_default_reply_handler))
409				free_cbi = cbi;
410		}
411		if (free_cbi == MPT_HANDLER_ID_NONE) {
412			return (ENOMEM);
413		}
414		mpt_reply_handlers[free_cbi] = handler.reply_handler;
415		*phandler_id = MPT_CBI_TO_HID(free_cbi);
416		break;
417	}
418	default:
419		mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
420		return (EINVAL);
421	}
422	return (0);
423}
424
425int
426mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
427		       mpt_handler_t handler, uint32_t handler_id)
428{
429
430	switch (type) {
431	case MPT_HANDLER_REPLY:
432	{
433		u_int cbi;
434
435		cbi = MPT_CBI(handler_id);
436		if (cbi >= MPT_NUM_REPLY_HANDLERS
437		 || mpt_reply_handlers[cbi] != handler.reply_handler)
438			return (ENOENT);
439		mpt_reply_handlers[cbi] = mpt_default_reply_handler;
440		break;
441	}
442	default:
443		mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
444		return (EINVAL);
445	}
446	return (0);
447}
448
449static int
450mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
451	uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
452{
453	mpt_prt(mpt,
454	    "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
455	    req, req->serno, reply_desc, reply_frame);
456
457	if (reply_frame != NULL)
458		mpt_dump_reply_frame(mpt, reply_frame);
459
460	mpt_prt(mpt, "Reply Frame Ignored\n");
461
462	return (/*free_reply*/TRUE);
463}
464
465static int
466mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
467 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
468{
469	if (req != NULL) {
470
471		if (reply_frame != NULL) {
472			MSG_CONFIG *cfgp;
473			MSG_CONFIG_REPLY *reply;
474
475			cfgp = (MSG_CONFIG *)req->req_vbuf;
476			reply = (MSG_CONFIG_REPLY *)reply_frame;
477			req->IOCStatus = le16toh(reply_frame->IOCStatus);
478			bcopy(&reply->Header, &cfgp->Header,
479			      sizeof(cfgp->Header));
480		}
481		req->state &= ~REQ_STATE_QUEUED;
482		req->state |= REQ_STATE_DONE;
483		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
484		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
485			wakeup(req);
486		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
487			/*
488			 * Whew- we can free this request (late completion)
489			 */
490			mpt_free_request(mpt, req);
491		}
492	}
493
494	return (TRUE);
495}
496
497static int
498mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
499 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
500{
501	/* Nothing to be done. */
502	return (TRUE);
503}
504
505static int
506mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
507    uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
508{
509	int free_reply;
510
511	KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
512	KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
513
514	free_reply = TRUE;
515	switch (reply_frame->Function) {
516	case MPI_FUNCTION_EVENT_NOTIFICATION:
517	{
518		MSG_EVENT_NOTIFY_REPLY *msg;
519		struct mpt_personality *pers;
520		u_int handled;
521
522		handled = 0;
523		msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
524		MPT_PERS_FOREACH(mpt, pers)
525			handled += pers->event(mpt, req, msg);
526
527		if (handled == 0 && mpt->mpt_pers_mask == 0) {
528			mpt_lprt(mpt, MPT_PRT_INFO,
529				"No Handlers For Any Event Notify Frames. "
530				"Event %#x (ACK %sequired).\n",
531				msg->Event, msg->AckRequired? "r" : "not r");
532		} else if (handled == 0) {
533			mpt_lprt(mpt, MPT_PRT_WARN,
534				"Unhandled Event Notify Frame. Event %#x "
535				"(ACK %sequired).\n",
536				msg->Event, msg->AckRequired? "r" : "not r");
537		}
538
539		if (msg->AckRequired) {
540			request_t *ack_req;
541			uint32_t context;
542
543			context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS);
544			ack_req = mpt_get_request(mpt, FALSE);
545			if (ack_req == NULL) {
546				struct mpt_evtf_record *evtf;
547
548				evtf = (struct mpt_evtf_record *)reply_frame;
549				evtf->context = context;
550				LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
551				free_reply = FALSE;
552				break;
553			}
554			mpt_send_event_ack(mpt, ack_req, msg, context);
555			/*
556			 * Don't check for CONTINUATION_REPLY here
557			 */
558			return (free_reply);
559		}
560		break;
561	}
562	case MPI_FUNCTION_PORT_ENABLE:
563		mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
564		break;
565	case MPI_FUNCTION_EVENT_ACK:
566		break;
567	default:
568		mpt_prt(mpt, "unknown event function: %x\n",
569			reply_frame->Function);
570		break;
571	}
572
573	/*
574	 * I'm not sure that this continuation stuff works as it should.
575	 *
576	 * I've had FC async events occur that free the frame up because
577	 * the continuation bit isn't set, and then additional async events
578	 * then occur using the same context. As you might imagine, this
579	 * leads to Very Bad Thing.
580	 *
581	 *  Let's just be safe for now and not free them up until we figure
582	 * out what's actually happening here.
583	 */
584#if	0
585	if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
586		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
587		mpt_free_request(mpt, req);
588		mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
589		    reply_frame->Function, req, req->serno);
590		if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
591			MSG_EVENT_NOTIFY_REPLY *msg =
592			    (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
593			mpt_prtc(mpt, " Event=0x%x AckReq=%d",
594			    msg->Event, msg->AckRequired);
595		}
596	} else {
597		mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
598		    reply_frame->Function, req, req->serno);
599		if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
600			MSG_EVENT_NOTIFY_REPLY *msg =
601			    (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
602			mpt_prtc(mpt, " Event=0x%x AckReq=%d",
603			    msg->Event, msg->AckRequired);
604		}
605		mpt_prtc(mpt, "\n");
606	}
607#endif
608	return (free_reply);
609}
610
611/*
612 * Process an asynchronous event from the IOC.
613 */
614static int
615mpt_core_event(struct mpt_softc *mpt, request_t *req,
616	       MSG_EVENT_NOTIFY_REPLY *msg)
617{
618	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
619                 msg->Event & 0xFF);
620	switch(msg->Event & 0xFF) {
621	case MPI_EVENT_NONE:
622		break;
623	case MPI_EVENT_LOG_DATA:
624	{
625		int i;
626
627		/* Some error occured that LSI wants logged */
628		mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
629			msg->IOCLogInfo);
630		mpt_prt(mpt, "\tEvtLogData: Event Data:");
631		for (i = 0; i < msg->EventDataLength; i++)
632			mpt_prtc(mpt, "  %08x", msg->Data[i]);
633		mpt_prtc(mpt, "\n");
634		break;
635	}
636	case MPI_EVENT_EVENT_CHANGE:
637		/*
638		 * This is just an acknowledgement
639		 * of our mpt_send_event_request.
640		 */
641		break;
642	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
643		break;
644	default:
645		return (0);
646		break;
647	}
648	return (1);
649}
650
651static void
652mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
653		   MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
654{
655	MSG_EVENT_ACK *ackp;
656
657	ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
658	memset(ackp, 0, sizeof (*ackp));
659	ackp->Function = MPI_FUNCTION_EVENT_ACK;
660	ackp->Event = msg->Event;
661	ackp->EventContext = msg->EventContext;
662	ackp->MsgContext = context;
663	mpt_check_doorbell(mpt);
664	mpt_send_cmd(mpt, ack_req);
665}
666
667/***************************** Interrupt Handling *****************************/
668void
669mpt_intr(void *arg)
670{
671	struct mpt_softc *mpt;
672	uint32_t reply_desc;
673	int ntrips = 0;
674
675	mpt = (struct mpt_softc *)arg;
676	mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
677	while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
678		request_t	  *req;
679		MSG_DEFAULT_REPLY *reply_frame;
680		uint32_t	   reply_baddr;
681		uint32_t           ctxt_idx;
682		u_int		   cb_index;
683		u_int		   req_index;
684		int		   free_rf;
685
686		req = NULL;
687		reply_frame = NULL;
688		reply_baddr = 0;
689		if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
690			u_int offset;
691			/*
692			 * Insure that the reply frame is coherent.
693			 */
694			reply_baddr = MPT_REPLY_BADDR(reply_desc);
695			offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
696			bus_dmamap_sync_range(mpt->reply_dmat,
697			    mpt->reply_dmap, offset, MPT_REPLY_SIZE,
698			    BUS_DMASYNC_POSTREAD);
699			reply_frame = MPT_REPLY_OTOV(mpt, offset);
700			ctxt_idx = le32toh(reply_frame->MsgContext);
701		} else {
702			uint32_t type;
703
704			type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
705			ctxt_idx = reply_desc;
706			mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
707				    reply_desc);
708
709			switch (type) {
710			case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
711				ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
712				break;
713			case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
714				ctxt_idx = GET_IO_INDEX(reply_desc);
715				if (mpt->tgt_cmd_ptrs == NULL) {
716					mpt_prt(mpt,
717					    "mpt_intr: no target cmd ptrs\n");
718					reply_desc = MPT_REPLY_EMPTY;
719					break;
720				}
721				if (ctxt_idx >= mpt->tgt_cmds_allocated) {
722					mpt_prt(mpt,
723					    "mpt_intr: bad tgt cmd ctxt %u\n",
724					    ctxt_idx);
725					reply_desc = MPT_REPLY_EMPTY;
726					ntrips = 1000;
727					break;
728				}
729				req = mpt->tgt_cmd_ptrs[ctxt_idx];
730				if (req == NULL) {
731					mpt_prt(mpt, "no request backpointer "
732					    "at index %u", ctxt_idx);
733					reply_desc = MPT_REPLY_EMPTY;
734					ntrips = 1000;
735					break;
736				}
737				/*
738				 * Reformulate ctxt_idx to be just as if
739				 * it were another type of context reply
740				 * so the code below will find the request
741				 * via indexing into the pool.
742				 */
743				ctxt_idx =
744				    req->index | mpt->scsi_tgt_handler_id;
745				req = NULL;
746				break;
747			case MPI_CONTEXT_REPLY_TYPE_LAN:
748				mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
749				    reply_desc);
750				reply_desc = MPT_REPLY_EMPTY;
751				break;
752			default:
753				mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
754				reply_desc = MPT_REPLY_EMPTY;
755				break;
756			}
757			if (reply_desc == MPT_REPLY_EMPTY) {
758				if (ntrips++ > 1000) {
759					break;
760				}
761				continue;
762			}
763		}
764
765		cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
766		req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
767		if (req_index < MPT_MAX_REQUESTS(mpt)) {
768			req = &mpt->request_pool[req_index];
769		} else {
770			mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
771			    " 0x%x)\n", req_index, reply_desc);
772		}
773
774		free_rf = mpt_reply_handlers[cb_index](mpt, req,
775		    reply_desc, reply_frame);
776
777		if (reply_frame != NULL && free_rf) {
778			mpt_free_reply(mpt, reply_baddr);
779		}
780
781		/*
782		 * If we got ourselves disabled, don't get stuck in a loop
783		 */
784		if (mpt->disabled) {
785			mpt_disable_ints(mpt);
786			break;
787		}
788		if (ntrips++ > 1000) {
789			break;
790		}
791	}
792	mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
793}
794
795/******************************* Error Recovery *******************************/
796void
797mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
798			    u_int iocstatus)
799{
800	MSG_DEFAULT_REPLY  ioc_status_frame;
801	request_t	  *req;
802
803	memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
804	ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
805	ioc_status_frame.IOCStatus = iocstatus;
806	while((req = TAILQ_FIRST(chain)) != NULL) {
807		MSG_REQUEST_HEADER *msg_hdr;
808		u_int		    cb_index;
809
810		TAILQ_REMOVE(chain, req, links);
811		msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
812		ioc_status_frame.Function = msg_hdr->Function;
813		ioc_status_frame.MsgContext = msg_hdr->MsgContext;
814		cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
815		mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
816		    &ioc_status_frame);
817	}
818}
819
820/********************************* Diagnostics ********************************/
821/*
822 * Perform a diagnostic dump of a reply frame.
823 */
824void
825mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
826{
827	mpt_prt(mpt, "Address Reply:\n");
828	mpt_print_reply(reply_frame);
829}
830
831/******************************* Doorbell Access ******************************/
832static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
833static __inline  uint32_t mpt_rd_intr(struct mpt_softc *mpt);
834
835static __inline uint32_t
836mpt_rd_db(struct mpt_softc *mpt)
837{
838	return mpt_read(mpt, MPT_OFFSET_DOORBELL);
839}
840
841static __inline uint32_t
842mpt_rd_intr(struct mpt_softc *mpt)
843{
844	return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
845}
846
847/* Busy wait for a door bell to be read by IOC */
848static int
849mpt_wait_db_ack(struct mpt_softc *mpt)
850{
851	int i;
852	for (i=0; i < MPT_MAX_WAIT; i++) {
853		if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
854			maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
855			return (MPT_OK);
856		}
857		DELAY(200);
858	}
859	return (MPT_FAIL);
860}
861
862/* Busy wait for a door bell interrupt */
863static int
864mpt_wait_db_int(struct mpt_softc *mpt)
865{
866	int i;
867	for (i=0; i < MPT_MAX_WAIT; i++) {
868		if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
869			maxwait_int = i > maxwait_int ? i : maxwait_int;
870			return MPT_OK;
871		}
872		DELAY(100);
873	}
874	return (MPT_FAIL);
875}
876
877/* Wait for IOC to transition to a give state */
878void
879mpt_check_doorbell(struct mpt_softc *mpt)
880{
881	uint32_t db = mpt_rd_db(mpt);
882	if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
883		mpt_prt(mpt, "Device not running\n");
884		mpt_print_db(db);
885	}
886}
887
888/* Wait for IOC to transition to a give state */
889static int
890mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
891{
892	int i;
893
894	for (i = 0; i < MPT_MAX_WAIT; i++) {
895		uint32_t db = mpt_rd_db(mpt);
896		if (MPT_STATE(db) == state) {
897			maxwait_state = i > maxwait_state ? i : maxwait_state;
898			return (MPT_OK);
899		}
900		DELAY(100);
901	}
902	return (MPT_FAIL);
903}
904
905
906/************************* Intialization/Configuration ************************/
907static int mpt_download_fw(struct mpt_softc *mpt);
908
909/* Issue the reset COMMAND to the IOC */
910static int
911mpt_soft_reset(struct mpt_softc *mpt)
912{
913	mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
914
915	/* Have to use hard reset if we are not in Running state */
916	if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
917		mpt_prt(mpt, "soft reset failed: device not running\n");
918		return (MPT_FAIL);
919	}
920
921	/* If door bell is in use we don't have a chance of getting
922	 * a word in since the IOC probably crashed in message
923	 * processing. So don't waste our time.
924	 */
925	if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
926		mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
927		return (MPT_FAIL);
928	}
929
930	/* Send the reset request to the IOC */
931	mpt_write(mpt, MPT_OFFSET_DOORBELL,
932	    MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
933	if (mpt_wait_db_ack(mpt) != MPT_OK) {
934		mpt_prt(mpt, "soft reset failed: ack timeout\n");
935		return (MPT_FAIL);
936	}
937
938	/* Wait for the IOC to reload and come out of reset state */
939	if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
940		mpt_prt(mpt, "soft reset failed: device did not restart\n");
941		return (MPT_FAIL);
942	}
943
944	return MPT_OK;
945}
946
947static int
948mpt_enable_diag_mode(struct mpt_softc *mpt)
949{
950	int try;
951
952	try = 20;
953	while (--try) {
954
955		if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
956			break;
957
958		/* Enable diagnostic registers */
959		mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
960		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
961		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
962		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
963		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
964		mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
965
966		DELAY(100000);
967	}
968	if (try == 0)
969		return (EIO);
970	return (0);
971}
972
973static void
974mpt_disable_diag_mode(struct mpt_softc *mpt)
975{
976	mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
977}
978
979/* This is a magic diagnostic reset that resets all the ARM
980 * processors in the chip.
981 */
982static void
983mpt_hard_reset(struct mpt_softc *mpt)
984{
985	int error;
986	int wait;
987	uint32_t diagreg;
988
989	mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
990
991	error = mpt_enable_diag_mode(mpt);
992	if (error) {
993		mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
994		mpt_prt(mpt, "Trying to reset anyway.\n");
995	}
996
997	diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
998
999	/*
1000	 * This appears to be a workaround required for some
1001	 * firmware or hardware revs.
1002	 */
1003	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1004	DELAY(1000);
1005
1006	/* Diag. port is now active so we can now hit the reset bit */
1007	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1008
1009        /*
1010         * Ensure that the reset has finished.  We delay 1ms
1011         * prior to reading the register to make sure the chip
1012         * has sufficiently completed its reset to handle register
1013         * accesses.
1014         */
1015	wait = 5000;
1016	do {
1017		DELAY(1000);
1018		diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1019	} while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1020
1021	if (wait == 0) {
1022		mpt_prt(mpt, "WARNING - Failed hard reset! "
1023			"Trying to initialize anyway.\n");
1024	}
1025
1026	/*
1027	 * If we have firmware to download, it must be loaded before
1028	 * the controller will become operational.  Do so now.
1029	 */
1030	if (mpt->fw_image != NULL) {
1031
1032		error = mpt_download_fw(mpt);
1033
1034		if (error) {
1035			mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1036			mpt_prt(mpt, "Trying to initialize anyway.\n");
1037		}
1038	}
1039
1040	/*
1041	 * Reseting the controller should have disabled write
1042	 * access to the diagnostic registers, but disable
1043	 * manually to be sure.
1044	 */
1045	mpt_disable_diag_mode(mpt);
1046}
1047
1048static void
1049mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1050{
1051	/*
1052	 * Complete all pending requests with a status
1053	 * appropriate for an IOC reset.
1054	 */
1055	mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1056				   MPI_IOCSTATUS_INVALID_STATE);
1057}
1058
1059
1060/*
1061 * Reset the IOC when needed. Try software command first then if needed
1062 * poke at the magic diagnostic reset. Note that a hard reset resets
1063 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1064 * fouls up the PCI configuration registers.
1065 */
1066int
1067mpt_reset(struct mpt_softc *mpt, int reinit)
1068{
1069	struct	mpt_personality *pers;
1070	int	ret;
1071	int	retry_cnt = 0;
1072
1073	/*
1074	 * Try a soft reset. If that fails, get out the big hammer.
1075	 */
1076 again:
1077	if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1078		int	cnt;
1079		for (cnt = 0; cnt < 5; cnt++) {
1080			/* Failed; do a hard reset */
1081			mpt_hard_reset(mpt);
1082
1083			/*
1084			 * Wait for the IOC to reload
1085			 * and come out of reset state
1086			 */
1087			ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1088			if (ret == MPT_OK) {
1089				break;
1090			}
1091			/*
1092			 * Okay- try to check again...
1093			 */
1094			ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1095			if (ret == MPT_OK) {
1096				break;
1097			}
1098			mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1099			    retry_cnt, cnt);
1100		}
1101	}
1102
1103	if (retry_cnt == 0) {
1104		/*
1105		 * Invoke reset handlers.  We bump the reset count so
1106		 * that mpt_wait_req() understands that regardless of
1107		 * the specified wait condition, it should stop its wait.
1108		 */
1109		mpt->reset_cnt++;
1110		MPT_PERS_FOREACH(mpt, pers)
1111			pers->reset(mpt, ret);
1112	}
1113
1114	if (reinit) {
1115		ret = mpt_enable_ioc(mpt, 1);
1116		if (ret == MPT_OK) {
1117			mpt_enable_ints(mpt);
1118		}
1119	}
1120	if (ret != MPT_OK && retry_cnt++ < 2) {
1121		goto again;
1122	}
1123	return ret;
1124}
1125
1126/* Return a command buffer to the free queue */
1127void
1128mpt_free_request(struct mpt_softc *mpt, request_t *req)
1129{
1130	request_t *nxt;
1131	struct mpt_evtf_record *record;
1132	uint32_t reply_baddr;
1133
1134	if (req == NULL || req != &mpt->request_pool[req->index]) {
1135		panic("mpt_free_request bad req ptr\n");
1136		return;
1137	}
1138	if ((nxt = req->chain) != NULL) {
1139		req->chain = NULL;
1140		mpt_free_request(mpt, nxt);	/* NB: recursion */
1141	}
1142	KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1143	KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1144	KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n"));
1145	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1146	    ("mpt_free_request: req %p:%u func %x already on freelist",
1147	    req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1148	KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1149	    ("mpt_free_request: req %p:%u func %x on pending list",
1150	    req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1151#ifdef	INVARIANTS
1152	mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1153#endif
1154
1155	req->ccb = NULL;
1156	if (LIST_EMPTY(&mpt->ack_frames)) {
1157		/*
1158		 * Insert free ones at the tail
1159		 */
1160		req->serno = 0;
1161		req->state = REQ_STATE_FREE;
1162#ifdef	INVARIANTS
1163		memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1164#endif
1165		TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1166		if (mpt->getreqwaiter != 0) {
1167			mpt->getreqwaiter = 0;
1168			wakeup(&mpt->request_free_list);
1169		}
1170		return;
1171	}
1172
1173	/*
1174	 * Process an ack frame deferred due to resource shortage.
1175	 */
1176	record = LIST_FIRST(&mpt->ack_frames);
1177	LIST_REMOVE(record, links);
1178	req->state = REQ_STATE_ALLOCATED;
1179	mpt_assign_serno(mpt, req);
1180	mpt_send_event_ack(mpt, req, &record->reply, record->context);
1181	reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1182		    + (mpt->reply_phys & 0xFFFFFFFF);
1183	mpt_free_reply(mpt, reply_baddr);
1184}
1185
1186/* Get a command buffer from the free queue */
1187request_t *
1188mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1189{
1190	request_t *req;
1191
1192retry:
1193	KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n"));
1194	req = TAILQ_FIRST(&mpt->request_free_list);
1195	if (req != NULL) {
1196		KASSERT(req == &mpt->request_pool[req->index],
1197		    ("mpt_get_request: corrupted request free list\n"));
1198		KASSERT(req->state == REQ_STATE_FREE,
1199		    ("req %p:%u not free on free list %x index %d function %x",
1200		    req, req->serno, req->state, req->index,
1201		    ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1202		TAILQ_REMOVE(&mpt->request_free_list, req, links);
1203		req->state = REQ_STATE_ALLOCATED;
1204		req->chain = NULL;
1205		mpt_assign_serno(mpt, req);
1206	} else if (sleep_ok != 0) {
1207		mpt->getreqwaiter = 1;
1208		mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1209		goto retry;
1210	}
1211	return (req);
1212}
1213
1214/* Pass the command to the IOC */
1215void
1216mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1217{
1218	if (mpt->verbose > MPT_PRT_DEBUG2) {
1219		mpt_dump_request(mpt, req);
1220	}
1221	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1222	    BUS_DMASYNC_PREWRITE);
1223	req->state |= REQ_STATE_QUEUED;
1224	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1225	    ("req %p:%u func %x on freelist list in mpt_send_cmd",
1226	    req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1227	KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1228	    ("req %p:%u func %x already on pending list in mpt_send_cmd",
1229	    req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1230	TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1231	mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1232}
1233
1234/*
1235 * Wait for a request to complete.
1236 *
1237 * Inputs:
1238 *	mpt		softc of controller executing request
1239 *	req		request to wait for
1240 *	sleep_ok	nonzero implies may sleep in this context
1241 *	time_ms		timeout in ms.  0 implies no timeout.
1242 *
1243 * Return Values:
1244 *	0		Request completed
1245 *	non-0		Timeout fired before request completion.
1246 */
1247int
1248mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1249	     mpt_req_state_t state, mpt_req_state_t mask,
1250	     int sleep_ok, int time_ms)
1251{
1252	int   error;
1253	int   timeout;
1254	u_int saved_cnt;
1255
1256	/*
1257	 * timeout is in ms.  0 indicates infinite wait.
1258	 * Convert to ticks or 500us units depending on
1259	 * our sleep mode.
1260	 */
1261	if (sleep_ok != 0) {
1262		timeout = (time_ms * hz) / 1000;
1263	} else {
1264		timeout = time_ms * 2;
1265	}
1266	req->state |= REQ_STATE_NEED_WAKEUP;
1267	mask &= ~REQ_STATE_NEED_WAKEUP;
1268	saved_cnt = mpt->reset_cnt;
1269	while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1270		if (sleep_ok != 0) {
1271			error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1272			if (error == EWOULDBLOCK) {
1273				timeout = 0;
1274				break;
1275			}
1276		} else {
1277			if (time_ms != 0 && --timeout == 0) {
1278				break;
1279			}
1280			DELAY(500);
1281			mpt_intr(mpt);
1282		}
1283	}
1284	req->state &= ~REQ_STATE_NEED_WAKEUP;
1285	if (mpt->reset_cnt != saved_cnt) {
1286		return (EIO);
1287	}
1288	if (time_ms && timeout <= 0) {
1289		MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1290		req->state |= REQ_STATE_TIMEDOUT;
1291		mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1292		return (ETIMEDOUT);
1293	}
1294	return (0);
1295}
1296
1297/*
1298 * Send a command to the IOC via the handshake register.
1299 *
1300 * Only done at initialization time and for certain unusual
1301 * commands such as device/bus reset as specified by LSI.
1302 */
1303int
1304mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1305{
1306	int i;
1307	uint32_t data, *data32;
1308
1309	/* Check condition of the IOC */
1310	data = mpt_rd_db(mpt);
1311	if ((MPT_STATE(data) != MPT_DB_STATE_READY
1312	  && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1313	  && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1314	 || MPT_DB_IS_IN_USE(data)) {
1315		mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1316		mpt_print_db(data);
1317		return (EBUSY);
1318	}
1319
1320	/* We move things in 32 bit chunks */
1321	len = (len + 3) >> 2;
1322	data32 = cmd;
1323
1324	/* Clear any left over pending doorbell interupts */
1325	if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1326		mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1327
1328	/*
1329	 * Tell the handshake reg. we are going to send a command
1330         * and how long it is going to be.
1331	 */
1332	data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1333	    (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1334	mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1335
1336	/* Wait for the chip to notice */
1337	if (mpt_wait_db_int(mpt) != MPT_OK) {
1338		mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n");
1339		return (ETIMEDOUT);
1340	}
1341
1342	/* Clear the interrupt */
1343	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1344
1345	if (mpt_wait_db_ack(mpt) != MPT_OK) {
1346		mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n");
1347		return (ETIMEDOUT);
1348	}
1349
1350	/* Send the command */
1351	for (i = 0; i < len; i++) {
1352		mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++);
1353		if (mpt_wait_db_ack(mpt) != MPT_OK) {
1354			mpt_prt(mpt,
1355				"mpt_send_handshake_cmd timeout! index = %d\n",
1356				i);
1357			return (ETIMEDOUT);
1358		}
1359	}
1360	return MPT_OK;
1361}
1362
1363/* Get the response from the handshake register */
1364int
1365mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1366{
1367	int left, reply_left;
1368	u_int16_t *data16;
1369	MSG_DEFAULT_REPLY *hdr;
1370
1371	/* We move things out in 16 bit chunks */
1372	reply_len >>= 1;
1373	data16 = (u_int16_t *)reply;
1374
1375	hdr = (MSG_DEFAULT_REPLY *)reply;
1376
1377	/* Get first word */
1378	if (mpt_wait_db_int(mpt) != MPT_OK) {
1379		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1380		return ETIMEDOUT;
1381	}
1382	*data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1383	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1384
1385	/* Get Second Word */
1386	if (mpt_wait_db_int(mpt) != MPT_OK) {
1387		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1388		return ETIMEDOUT;
1389	}
1390	*data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK;
1391	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1392
1393	/*
1394	 * With the second word, we can now look at the length.
1395	 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1396	 */
1397	if ((reply_len >> 1) != hdr->MsgLength &&
1398	    (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1399#if __FreeBSD_version >= 500000
1400		mpt_prt(mpt, "reply length does not match message length: "
1401			"got %x; expected %zx for function %x\n",
1402			hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1403#else
1404		mpt_prt(mpt, "reply length does not match message length: "
1405			"got %x; expected %x for function %x\n",
1406			hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1407#endif
1408	}
1409
1410	/* Get rest of the reply; but don't overflow the provided buffer */
1411	left = (hdr->MsgLength << 1) - 2;
1412	reply_left =  reply_len - 2;
1413	while (left--) {
1414		u_int16_t datum;
1415
1416		if (mpt_wait_db_int(mpt) != MPT_OK) {
1417			mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1418			return ETIMEDOUT;
1419		}
1420		datum = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1421
1422		if (reply_left-- > 0)
1423			*data16++ = datum & MPT_DB_DATA_MASK;
1424
1425		mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1426	}
1427
1428	/* One more wait & clear at the end */
1429	if (mpt_wait_db_int(mpt) != MPT_OK) {
1430		mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1431		return ETIMEDOUT;
1432	}
1433	mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1434
1435	if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1436		if (mpt->verbose >= MPT_PRT_TRACE)
1437			mpt_print_reply(hdr);
1438		return (MPT_FAIL | hdr->IOCStatus);
1439	}
1440
1441	return (0);
1442}
1443
1444static int
1445mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1446{
1447	MSG_IOC_FACTS f_req;
1448	int error;
1449
1450	memset(&f_req, 0, sizeof f_req);
1451	f_req.Function = MPI_FUNCTION_IOC_FACTS;
1452	f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1453	error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1454	if (error)
1455		return(error);
1456	error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1457	return (error);
1458}
1459
1460static int
1461mpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp)
1462{
1463	MSG_PORT_FACTS f_req;
1464	int error;
1465
1466	/* XXX: Only getting PORT FACTS for Port 0 */
1467	memset(&f_req, 0, sizeof f_req);
1468	f_req.Function = MPI_FUNCTION_PORT_FACTS;
1469	f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1470	error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1471	if (error)
1472		return(error);
1473	error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1474	return (error);
1475}
1476
1477/*
1478 * Send the initialization request. This is where we specify how many
1479 * SCSI busses and how many devices per bus we wish to emulate.
1480 * This is also the command that specifies the max size of the reply
1481 * frames from the IOC that we will be allocating.
1482 */
1483static int
1484mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1485{
1486	int error = 0;
1487	MSG_IOC_INIT init;
1488	MSG_IOC_INIT_REPLY reply;
1489
1490	memset(&init, 0, sizeof init);
1491	init.WhoInit = who;
1492	init.Function = MPI_FUNCTION_IOC_INIT;
1493	init.MaxDevices = mpt->mpt_max_devices;
1494	init.MaxBuses = 1;
1495
1496	init.MsgVersion = htole16(MPI_VERSION);
1497	init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1498	init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1499	init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1500
1501	if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1502		return(error);
1503	}
1504
1505	error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1506	return (error);
1507}
1508
1509
1510/*
1511 * Utiltity routine to read configuration headers and pages
1512 */
1513int
1514mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action,
1515		  u_int PageVersion, u_int PageLength, u_int PageNumber,
1516		  u_int PageType, uint32_t PageAddress, bus_addr_t addr,
1517		  bus_size_t len, int sleep_ok, int timeout_ms)
1518{
1519	MSG_CONFIG *cfgp;
1520	SGE_SIMPLE32 *se;
1521
1522	cfgp = req->req_vbuf;
1523	memset(cfgp, 0, sizeof *cfgp);
1524	cfgp->Action = Action;
1525	cfgp->Function = MPI_FUNCTION_CONFIG;
1526	cfgp->Header.PageVersion = PageVersion;
1527	cfgp->Header.PageLength = PageLength;
1528	cfgp->Header.PageNumber = PageNumber;
1529	cfgp->Header.PageType = PageType;
1530	cfgp->PageAddress = PageAddress;
1531	se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1532	se->Address = addr;
1533	MPI_pSGE_SET_LENGTH(se, len);
1534	MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1535	    MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1536	    MPI_SGE_FLAGS_END_OF_LIST |
1537	    ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1538	  || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1539	   ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1540	cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1541
1542	mpt_check_doorbell(mpt);
1543	mpt_send_cmd(mpt, req);
1544	return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1545			     sleep_ok, timeout_ms));
1546}
1547
1548
1549int
1550mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1551		    uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1552		    int sleep_ok, int timeout_ms)
1553{
1554	request_t  *req;
1555	MSG_CONFIG *cfgp;
1556	int	    error;
1557
1558	req = mpt_get_request(mpt, sleep_ok);
1559	if (req == NULL) {
1560		mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1561		return (ENOMEM);
1562	}
1563
1564	error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER,
1565				  /*PageVersion*/0, /*PageLength*/0, PageNumber,
1566				  PageType, PageAddress, /*addr*/0, /*len*/0,
1567				  sleep_ok, timeout_ms);
1568	if (error != 0) {
1569		/*
1570		 * Leave the request. Without resetting the chip, it's
1571		 * still owned by it and we'll just get into trouble
1572		 * freeing it now. Mark it as abandoned so that if it
1573		 * shows up later it can be freed.
1574		 */
1575		mpt_prt(mpt, "read_cfg_header timed out\n");
1576		return (ETIMEDOUT);
1577	}
1578
1579        switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1580	case MPI_IOCSTATUS_SUCCESS:
1581		cfgp = req->req_vbuf;
1582		bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1583		error = 0;
1584		break;
1585	case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1586		mpt_lprt(mpt, MPT_PRT_DEBUG,
1587		    "Invalid Page Type %d Number %d Addr 0x%0x\n",
1588		    PageType, PageNumber, PageAddress);
1589		error = EINVAL;
1590		break;
1591	default:
1592		mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1593			req->IOCStatus);
1594		error = EIO;
1595		break;
1596	}
1597	mpt_free_request(mpt, req);
1598	return (error);
1599}
1600
1601int
1602mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1603		  CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1604		  int timeout_ms)
1605{
1606	request_t    *req;
1607	int	      error;
1608
1609	req = mpt_get_request(mpt, sleep_ok);
1610	if (req == NULL) {
1611		mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1612		return (-1);
1613	}
1614
1615	error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1616				  hdr->PageLength, hdr->PageNumber,
1617				  hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1618				  PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1619				  len, sleep_ok, timeout_ms);
1620	if (error != 0) {
1621		mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1622		return (-1);
1623	}
1624
1625	if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1626		mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1627			req->IOCStatus);
1628		mpt_free_request(mpt, req);
1629		return (-1);
1630	}
1631	bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1632	    BUS_DMASYNC_POSTREAD);
1633	memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1634	mpt_free_request(mpt, req);
1635	return (0);
1636}
1637
1638int
1639mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1640		   CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1641		   int timeout_ms)
1642{
1643	request_t    *req;
1644	u_int	      hdr_attr;
1645	int	      error;
1646
1647	hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1648	if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1649	    hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1650		mpt_prt(mpt, "page type 0x%x not changeable\n",
1651			hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1652		return (-1);
1653	}
1654
1655#if	0
1656	/*
1657	 * We shouldn't mask off other bits here.
1658	 */
1659	hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1660#endif
1661
1662	req = mpt_get_request(mpt, sleep_ok);
1663	if (req == NULL)
1664		return (-1);
1665
1666	memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1667
1668	/*
1669	 * There isn't any point in restoring stripped out attributes
1670	 * if you then mask them going down to issue the request.
1671	 */
1672
1673#if	0
1674	/* Restore stripped out attributes */
1675	hdr->PageType |= hdr_attr;
1676
1677	error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1678				  hdr->PageLength, hdr->PageNumber,
1679				  hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1680				  PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1681				  len, sleep_ok, timeout_ms);
1682#else
1683	error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1684				  hdr->PageLength, hdr->PageNumber,
1685				  hdr->PageType, PageAddress,
1686				  req->req_pbuf + MPT_RQSL(mpt),
1687				  len, sleep_ok, timeout_ms);
1688#endif
1689	if (error != 0) {
1690		mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1691		return (-1);
1692	}
1693
1694        if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1695		mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1696			req->IOCStatus);
1697		mpt_free_request(mpt, req);
1698		return (-1);
1699	}
1700	mpt_free_request(mpt, req);
1701	return (0);
1702}
1703
1704/*
1705 * Read IOC configuration information
1706 */
1707static int
1708mpt_read_config_info_ioc(struct mpt_softc *mpt)
1709{
1710	CONFIG_PAGE_HEADER hdr;
1711	struct mpt_raid_volume *mpt_raid;
1712	int rv;
1713	int i;
1714	size_t len;
1715
1716	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1717		2, 0, &hdr, FALSE, 5000);
1718	/*
1719	 * If it's an invalid page, so what? Not a supported function....
1720	 */
1721	if (rv == EINVAL) {
1722		return (0);
1723	}
1724	if (rv) {
1725		return (rv);
1726	}
1727
1728#if __FreeBSD_version >= 500000
1729	mpt_lprt(mpt, MPT_PRT_DEBUG,  "IOC Page 2 Header: ver %x, len %zx, "
1730		 "num %x, type %x\n", hdr.PageVersion,
1731		 hdr.PageLength * sizeof(uint32_t),
1732		 hdr.PageNumber, hdr.PageType);
1733#else
1734	mpt_lprt(mpt, MPT_PRT_DEBUG,  "IOC Page 2 Header: ver %x, len %z, "
1735		 "num %x, type %x\n", hdr.PageVersion,
1736		 hdr.PageLength * sizeof(uint32_t),
1737		 hdr.PageNumber, hdr.PageType);
1738#endif
1739
1740	len = hdr.PageLength * sizeof(uint32_t);
1741	mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1742	if (mpt->ioc_page2 == NULL) {
1743		mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1744		mpt_raid_free_mem(mpt);
1745		return (ENOMEM);
1746	}
1747	memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1748	rv = mpt_read_cur_cfg_page(mpt, 0,
1749	    &mpt->ioc_page2->Header, len, FALSE, 5000);
1750	if (rv) {
1751		mpt_prt(mpt, "failed to read IOC Page 2\n");
1752		mpt_raid_free_mem(mpt);
1753		return (EIO);
1754	}
1755
1756	if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1757		uint32_t mask;
1758
1759		mpt_prt(mpt, "Capabilities: (");
1760		for (mask = 1; mask != 0; mask <<= 1) {
1761			if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1762				continue;
1763			}
1764			switch (mask) {
1765			case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1766				mpt_prtc(mpt, " RAID-0");
1767				break;
1768			case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1769				mpt_prtc(mpt, " RAID-1E");
1770				break;
1771			case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1772				mpt_prtc(mpt, " RAID-1");
1773				break;
1774			case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1775				mpt_prtc(mpt, " SES");
1776				break;
1777			case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1778				mpt_prtc(mpt, " SAFTE");
1779				break;
1780			case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1781				mpt_prtc(mpt, " Multi-Channel-Arrays");
1782			default:
1783				break;
1784			}
1785		}
1786		mpt_prtc(mpt, " )\n");
1787		if ((mpt->ioc_page2->CapabilitiesFlags
1788		   & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1789		    | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1790		    | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1791			mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1792				mpt->ioc_page2->NumActiveVolumes,
1793				mpt->ioc_page2->NumActiveVolumes != 1
1794			      ? "s " : " ",
1795				mpt->ioc_page2->MaxVolumes);
1796			mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1797				mpt->ioc_page2->NumActivePhysDisks,
1798				mpt->ioc_page2->NumActivePhysDisks != 1
1799			      ? "s " : " ",
1800				mpt->ioc_page2->MaxPhysDisks);
1801		}
1802	}
1803
1804	len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1805	mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1806	if (mpt->raid_volumes == NULL) {
1807		mpt_prt(mpt, "Could not allocate RAID volume data\n");
1808		mpt_raid_free_mem(mpt);
1809		return (ENOMEM);
1810	}
1811
1812	/*
1813	 * Copy critical data out of ioc_page2 so that we can
1814	 * safely refresh the page without windows of unreliable
1815	 * data.
1816	 */
1817	mpt->raid_max_volumes =  mpt->ioc_page2->MaxVolumes;
1818
1819	len = sizeof(*mpt->raid_volumes->config_page) +
1820	    (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
1821	for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1822		mpt_raid = &mpt->raid_volumes[i];
1823		mpt_raid->config_page =
1824		    malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1825		if (mpt_raid->config_page == NULL) {
1826			mpt_prt(mpt, "Could not allocate RAID page data\n");
1827			mpt_raid_free_mem(mpt);
1828			return (ENOMEM);
1829		}
1830	}
1831	mpt->raid_page0_len = len;
1832
1833	len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1834	mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1835	if (mpt->raid_disks == NULL) {
1836		mpt_prt(mpt, "Could not allocate RAID disk data\n");
1837		mpt_raid_free_mem(mpt);
1838		return (ENOMEM);
1839	}
1840	mpt->raid_max_disks =  mpt->ioc_page2->MaxPhysDisks;
1841
1842	/*
1843	 * Load page 3.
1844	 */
1845	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1846	    3, 0, &hdr, FALSE, 5000);
1847	if (rv) {
1848		mpt_raid_free_mem(mpt);
1849		return (EIO);
1850	}
1851
1852	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
1853	    hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
1854
1855	len = hdr.PageLength * sizeof(uint32_t);
1856	mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1857	if (mpt->ioc_page3 == NULL) {
1858		mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
1859		mpt_raid_free_mem(mpt);
1860		return (ENOMEM);
1861	}
1862	memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
1863	rv = mpt_read_cur_cfg_page(mpt, 0,
1864	    &mpt->ioc_page3->Header, len, FALSE, 5000);
1865	if (rv) {
1866		mpt_raid_free_mem(mpt);
1867		return (EIO);
1868	}
1869	mpt_raid_wakeup(mpt);
1870	return (0);
1871}
1872
1873/*
1874 * Enable IOC port
1875 */
1876static int
1877mpt_send_port_enable(struct mpt_softc *mpt, int port)
1878{
1879	request_t	*req;
1880	MSG_PORT_ENABLE *enable_req;
1881	int		 error;
1882
1883	req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1884	if (req == NULL)
1885		return (-1);
1886
1887	enable_req = req->req_vbuf;
1888	memset(enable_req, 0,  MPT_RQSL(mpt));
1889
1890	enable_req->Function   = MPI_FUNCTION_PORT_ENABLE;
1891	enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1892	enable_req->PortNumber = port;
1893
1894	mpt_check_doorbell(mpt);
1895	mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1896
1897	mpt_send_cmd(mpt, req);
1898	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1899	    FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
1900	if (error != 0) {
1901		mpt_prt(mpt, "port %d enable timed out\n", port);
1902		return (-1);
1903	}
1904	mpt_free_request(mpt, req);
1905	mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
1906	return (0);
1907}
1908
1909/*
1910 * Enable/Disable asynchronous event reporting.
1911 */
1912static int
1913mpt_send_event_request(struct mpt_softc *mpt, int onoff)
1914{
1915	request_t *req;
1916	MSG_EVENT_NOTIFY *enable_req;
1917
1918	req = mpt_get_request(mpt, FALSE);
1919	if (req == NULL) {
1920		return (ENOMEM);
1921	}
1922	enable_req = req->req_vbuf;
1923	memset(enable_req, 0, sizeof *enable_req);
1924
1925	enable_req->Function   = MPI_FUNCTION_EVENT_NOTIFICATION;
1926	enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
1927	enable_req->Switch     = onoff;
1928
1929	mpt_check_doorbell(mpt);
1930	mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
1931	    onoff ? "en" : "dis");
1932	/*
1933	 * Send the command off, but don't wait for it.
1934	 */
1935	mpt_send_cmd(mpt, req);
1936	return (0);
1937}
1938
1939/*
1940 * Un-mask the interupts on the chip.
1941 */
1942void
1943mpt_enable_ints(struct mpt_softc *mpt)
1944{
1945	/* Unmask every thing except door bell int */
1946	mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
1947}
1948
1949/*
1950 * Mask the interupts on the chip.
1951 */
1952void
1953mpt_disable_ints(struct mpt_softc *mpt)
1954{
1955	/* Mask all interrupts */
1956	mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1957	    MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1958}
1959
1960static void
1961mpt_sysctl_attach(struct mpt_softc *mpt)
1962{
1963#if __FreeBSD_version >= 500000
1964	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1965	struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1966
1967	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1968		       "debug", CTLFLAG_RW, &mpt->verbose, 0,
1969		       "Debugging/Verbose level");
1970	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1971		       "role", CTLFLAG_RD, &mpt->role, 0,
1972		       "HBA role");
1973#endif
1974}
1975
1976int
1977mpt_attach(struct mpt_softc *mpt)
1978{
1979	struct mpt_personality *pers;
1980	int i;
1981	int error;
1982
1983	for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
1984		pers = mpt_personalities[i];
1985		if (pers == NULL) {
1986			continue;
1987		}
1988		if (pers->probe(mpt) == 0) {
1989			error = pers->attach(mpt);
1990			if (error != 0) {
1991				mpt_detach(mpt);
1992				return (error);
1993			}
1994			mpt->mpt_pers_mask |= (0x1 << pers->id);
1995			pers->use_count++;
1996		}
1997	}
1998
1999	/*
2000	 * Now that we've attached everything, do the enable function
2001	 * for all of the personalities. This allows the personalities
2002	 * to do setups that are appropriate for them prior to enabling
2003	 * any ports.
2004	 */
2005	for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2006		pers = mpt_personalities[i];
2007		if (pers != NULL  && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2008			error = pers->enable(mpt);
2009			if (error != 0) {
2010				mpt_prt(mpt, "personality %s attached but would"
2011				    " not enable (%d)\n", pers->name, error);
2012				mpt_detach(mpt);
2013				return (error);
2014			}
2015		}
2016	}
2017	return (0);
2018}
2019
2020int
2021mpt_shutdown(struct mpt_softc *mpt)
2022{
2023	struct mpt_personality *pers;
2024
2025	MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2026		pers->shutdown(mpt);
2027	}
2028	return (0);
2029}
2030
2031int
2032mpt_detach(struct mpt_softc *mpt)
2033{
2034	struct mpt_personality *pers;
2035
2036	MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2037		pers->detach(mpt);
2038		mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2039		pers->use_count--;
2040	}
2041
2042	return (0);
2043}
2044
2045int
2046mpt_core_load(struct mpt_personality *pers)
2047{
2048	int i;
2049
2050	/*
2051	 * Setup core handlers and insert the default handler
2052	 * into all "empty slots".
2053	 */
2054	for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2055		mpt_reply_handlers[i] = mpt_default_reply_handler;
2056	}
2057
2058	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2059	    mpt_event_reply_handler;
2060	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2061	    mpt_config_reply_handler;
2062	mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2063	    mpt_handshake_reply_handler;
2064	return (0);
2065}
2066
2067/*
2068 * Initialize per-instance driver data and perform
2069 * initial controller configuration.
2070 */
2071int
2072mpt_core_attach(struct mpt_softc *mpt)
2073{
2074        int val;
2075	int error;
2076
2077
2078	LIST_INIT(&mpt->ack_frames);
2079
2080	/* Put all request buffers on the free list */
2081	TAILQ_INIT(&mpt->request_pending_list);
2082	TAILQ_INIT(&mpt->request_free_list);
2083	TAILQ_INIT(&mpt->request_timeout_list);
2084	for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2085		request_t *req = &mpt->request_pool[val];
2086		req->state = REQ_STATE_ALLOCATED;
2087		mpt_free_request(mpt, req);
2088	}
2089
2090	for (val = 0; val < MPT_MAX_LUNS; val++) {
2091		STAILQ_INIT(&mpt->trt[val].atios);
2092		STAILQ_INIT(&mpt->trt[val].inots);
2093	}
2094	STAILQ_INIT(&mpt->trt_wildcard.atios);
2095	STAILQ_INIT(&mpt->trt_wildcard.inots);
2096
2097	mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2098
2099	mpt_sysctl_attach(mpt);
2100
2101	mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2102	    mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2103
2104	error = mpt_configure_ioc(mpt);
2105
2106	return (error);
2107}
2108
2109int
2110mpt_core_enable(struct mpt_softc *mpt)
2111{
2112	/*
2113	 * We enter with the IOC enabled, but async events
2114	 * not enabled, ports not enabled and interrupts
2115	 * not enabled.
2116	 */
2117
2118	/*
2119	 * Enable asynchronous event reporting- all personalities
2120	 * have attached so that they should be able to now field
2121	 * async events.
2122	 */
2123	mpt_send_event_request(mpt, 1);
2124
2125	/*
2126	 * Catch any pending interrupts
2127	 *
2128	 * This seems to be crucial- otherwise
2129	 * the portenable below times out.
2130	 */
2131	mpt_intr(mpt);
2132
2133	/*
2134	 * Enable Interrupts
2135	 */
2136	mpt_enable_ints(mpt);
2137
2138	/*
2139	 * Catch any pending interrupts
2140	 *
2141	 * This seems to be crucial- otherwise
2142	 * the portenable below times out.
2143	 */
2144	mpt_intr(mpt);
2145
2146	/*
2147	 * Enable the port.
2148	 */
2149	if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2150		mpt_prt(mpt, "failed to enable port 0\n");
2151		return (ENXIO);
2152	}
2153	return (0);
2154}
2155
2156void
2157mpt_core_shutdown(struct mpt_softc *mpt)
2158{
2159	mpt_disable_ints(mpt);
2160}
2161
2162void
2163mpt_core_detach(struct mpt_softc *mpt)
2164{
2165	mpt_disable_ints(mpt);
2166}
2167
2168int
2169mpt_core_unload(struct mpt_personality *pers)
2170{
2171	/* Unload is always successfull. */
2172	return (0);
2173}
2174
2175#define FW_UPLOAD_REQ_SIZE				\
2176	(sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION)	\
2177       + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2178
2179static int
2180mpt_upload_fw(struct mpt_softc *mpt)
2181{
2182	uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2183	MSG_FW_UPLOAD_REPLY fw_reply;
2184	MSG_FW_UPLOAD *fw_req;
2185	FW_UPLOAD_TCSGE *tsge;
2186	SGE_SIMPLE32 *sge;
2187	uint32_t flags;
2188	int error;
2189
2190	memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2191	fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2192	fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2193	fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2194	fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2195	tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2196	tsge->DetailsLength = 12;
2197	tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2198	tsge->ImageSize = htole32(mpt->fw_image_size);
2199	sge = (SGE_SIMPLE32 *)(tsge + 1);
2200	flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2201	      | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2202	      | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2203	flags <<= MPI_SGE_FLAGS_SHIFT;
2204	sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2205	sge->Address = htole32(mpt->fw_phys);
2206	error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2207	if (error)
2208		return(error);
2209	error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2210	return (error);
2211}
2212
2213static void
2214mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2215	       uint32_t *data, bus_size_t len)
2216{
2217	uint32_t *data_end;
2218
2219	data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2220	if (mpt->is_sas) {
2221		pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2222	}
2223	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2224	while (data != data_end) {
2225		mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2226		data++;
2227	}
2228	if (mpt->is_sas) {
2229		pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2230	}
2231}
2232
2233static int
2234mpt_download_fw(struct mpt_softc *mpt)
2235{
2236	MpiFwHeader_t *fw_hdr;
2237	int error;
2238	uint32_t ext_offset;
2239	uint32_t data;
2240
2241	mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2242		mpt->fw_image_size);
2243
2244	error = mpt_enable_diag_mode(mpt);
2245	if (error != 0) {
2246		mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2247		return (EIO);
2248	}
2249
2250	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2251		  MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2252
2253	fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2254	mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2255		       fw_hdr->ImageSize);
2256
2257	ext_offset = fw_hdr->NextImageHeaderOffset;
2258	while (ext_offset != 0) {
2259		MpiExtImageHeader_t *ext;
2260
2261		ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2262		ext_offset = ext->NextImageHeaderOffset;
2263
2264		mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2265			       ext->ImageSize);
2266	}
2267
2268	if (mpt->is_sas) {
2269		pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2270	}
2271	/* Setup the address to jump to on reset. */
2272	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2273	mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2274
2275	/*
2276	 * The controller sets the "flash bad" status after attempting
2277	 * to auto-boot from flash.  Clear the status so that the controller
2278	 * will continue the boot process with our newly installed firmware.
2279	 */
2280	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2281	data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2282	mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2283	mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2284
2285	if (mpt->is_sas) {
2286		pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2287	}
2288
2289	/*
2290	 * Re-enable the processor and clear the boot halt flag.
2291	 */
2292	data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2293	data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2294	mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2295
2296	mpt_disable_diag_mode(mpt);
2297	return (0);
2298}
2299
2300/*
2301 * Allocate/Initialize data structures for the controller.  Called
2302 * once at instance startup.
2303 */
2304static int
2305mpt_configure_ioc(struct mpt_softc *mpt)
2306{
2307        MSG_PORT_FACTS_REPLY pfp;
2308        MSG_IOC_FACTS_REPLY facts;
2309	int try;
2310	int needreset;
2311	uint32_t max_chain_depth;
2312
2313	needreset = 0;
2314	for (try = 0; try < MPT_MAX_TRYS; try++) {
2315
2316		/*
2317		 * No need to reset if the IOC is already in the READY state.
2318		 *
2319		 * Force reset if initialization failed previously.
2320		 * Note that a hard_reset of the second channel of a '929
2321		 * will stop operation of the first channel.  Hopefully, if the
2322		 * first channel is ok, the second will not require a hard
2323		 * reset.
2324		 */
2325		if (needreset || MPT_STATE(mpt_rd_db(mpt)) !=
2326		    MPT_DB_STATE_READY) {
2327			if (mpt_reset(mpt, FALSE) != MPT_OK) {
2328				continue;
2329			}
2330		}
2331		needreset = 0;
2332
2333		if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) {
2334			mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2335			needreset = 1;
2336			continue;
2337		}
2338
2339		mpt->mpt_global_credits = le16toh(facts.GlobalCredits);
2340		mpt->request_frame_size = le16toh(facts.RequestFrameSize);
2341		mpt->ioc_facts_flags = facts.Flags;
2342		mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2343			    le16toh(facts.MsgVersion) >> 8,
2344			    le16toh(facts.MsgVersion) & 0xFF,
2345			    le16toh(facts.HeaderVersion) >> 8,
2346			    le16toh(facts.HeaderVersion) & 0xFF);
2347
2348		/*
2349		 * Now that we know request frame size, we can calculate
2350		 * the actual (reasonable) segment limit for read/write I/O.
2351		 *
2352		 * This limit is constrained by:
2353		 *
2354		 *  + The size of each area we allocate per command (and how
2355                 *    many chain segments we can fit into it).
2356                 *  + The total number of areas we've set up.
2357		 *  + The actual chain depth the card will allow.
2358		 *
2359		 * The first area's segment count is limited by the I/O request
2360		 * at the head of it. We cannot allocate realistically more
2361		 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2362		 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2363		 *
2364		 */
2365		max_chain_depth = facts.MaxChainDepth;
2366
2367		/* total number of request areas we (can) allocate */
2368		mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2369
2370		/* converted to the number of chain areas possible */
2371		mpt->max_seg_cnt *= MPT_NRFM(mpt);
2372
2373		/* limited by the number of chain areas the card will support */
2374		if (mpt->max_seg_cnt > max_chain_depth) {
2375			mpt_lprt(mpt, MPT_PRT_DEBUG,
2376			    "chain depth limited to %u (from %u)\n",
2377			    max_chain_depth, mpt->max_seg_cnt);
2378			mpt->max_seg_cnt = max_chain_depth;
2379		}
2380
2381		/* converted to the number of simple sges in chain segments. */
2382		mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2383
2384		mpt_lprt(mpt, MPT_PRT_DEBUG,
2385		    "Maximum Segment Count: %u\n", mpt->max_seg_cnt);
2386		mpt_lprt(mpt, MPT_PRT_DEBUG,
2387			 "MsgLength=%u IOCNumber = %d\n",
2388			 facts.MsgLength, facts.IOCNumber);
2389		mpt_lprt(mpt, MPT_PRT_DEBUG,
2390			 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2391			 "Request Frame Size %u bytes Max Chain Depth %u\n",
2392                         mpt->mpt_global_credits, facts.BlockSize,
2393                         mpt->request_frame_size << 2, max_chain_depth);
2394		mpt_lprt(mpt, MPT_PRT_DEBUG,
2395			 "IOCFACTS: Num Ports %d, FWImageSize %d, "
2396			 "Flags=%#x\n", facts.NumberOfPorts,
2397			 le32toh(facts.FWImageSize), facts.Flags);
2398
2399
2400		if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) {
2401			struct mpt_map_info mi;
2402			int error;
2403
2404			/*
2405			 * In some configurations, the IOC's firmware is
2406			 * stored in a shared piece of system NVRAM that
2407			 * is only accessable via the BIOS.  In this
2408			 * case, the firmware keeps a copy of firmware in
2409			 * RAM until the OS driver retrieves it.  Once
2410			 * retrieved, we are responsible for re-downloading
2411			 * the firmware after any hard-reset.
2412			 */
2413			mpt->fw_image_size = le32toh(facts.FWImageSize);
2414			error = mpt_dma_tag_create(mpt, mpt->parent_dmat,
2415			    /*alignment*/1, /*boundary*/0,
2416			    /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
2417			    /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL,
2418			    /*filterarg*/NULL, mpt->fw_image_size,
2419			    /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size,
2420			    /*flags*/0, &mpt->fw_dmat);
2421			if (error != 0) {
2422				mpt_prt(mpt, "cannot create fw dma tag\n");
2423				return (ENOMEM);
2424			}
2425			error = bus_dmamem_alloc(mpt->fw_dmat,
2426			    (void **)&mpt->fw_image, BUS_DMA_NOWAIT,
2427			    &mpt->fw_dmap);
2428			if (error != 0) {
2429				mpt_prt(mpt, "cannot allocate fw mem.\n");
2430				bus_dma_tag_destroy(mpt->fw_dmat);
2431				return (ENOMEM);
2432			}
2433			mi.mpt = mpt;
2434			mi.error = 0;
2435			bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2436			    mpt->fw_image, mpt->fw_image_size, mpt_map_rquest,
2437			    &mi, 0);
2438			mpt->fw_phys = mi.phys;
2439
2440			error = mpt_upload_fw(mpt);
2441			if (error != 0) {
2442				mpt_prt(mpt, "fw upload failed.\n");
2443				bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2444				bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2445				    mpt->fw_dmap);
2446				bus_dma_tag_destroy(mpt->fw_dmat);
2447				mpt->fw_image = NULL;
2448				return (EIO);
2449			}
2450		}
2451
2452		if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) {
2453			mpt_prt(mpt, "mpt_get_portfacts failed\n");
2454			needreset = 1;
2455			continue;
2456		}
2457
2458		mpt_lprt(mpt, MPT_PRT_DEBUG,
2459			 "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n",
2460			 pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID,
2461			 pfp.MaxDevices);
2462
2463		mpt->mpt_port_type = pfp.PortType;
2464		mpt->mpt_proto_flags = pfp.ProtocolFlags;
2465		if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI &&
2466		    pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS &&
2467		    pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) {
2468			mpt_prt(mpt, "Unsupported Port Type (%x)\n",
2469			    pfp.PortType);
2470			return (ENXIO);
2471		}
2472		mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers);
2473
2474		if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2475			mpt->is_fc = 1;
2476			mpt->is_sas = 0;
2477			mpt->is_spi = 0;
2478		} else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2479			mpt->is_fc = 0;
2480			mpt->is_sas = 1;
2481			mpt->is_spi = 0;
2482		} else {
2483			mpt->is_fc = 0;
2484			mpt->is_sas = 0;
2485			mpt->is_spi = 1;
2486		}
2487		mpt->mpt_ini_id = pfp.PortSCSIID;
2488		mpt->mpt_max_devices = pfp.MaxDevices;
2489
2490		/*
2491		 * Set our role with what this port supports.
2492		 *
2493		 * Note this might be changed later in different modules
2494		 * if this is different from what is wanted.
2495		 */
2496		mpt->role = MPT_ROLE_NONE;
2497		if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2498			mpt->role |= MPT_ROLE_INITIATOR;
2499		}
2500		if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2501			mpt->role |= MPT_ROLE_TARGET;
2502		}
2503		if (mpt_enable_ioc(mpt, 0) != MPT_OK) {
2504			mpt_prt(mpt, "unable to initialize IOC\n");
2505			return (ENXIO);
2506		}
2507
2508		/*
2509		 * Read IOC configuration information.
2510		 *
2511		 * We need this to determine whether or not we have certain
2512		 * settings for Integrated Mirroring (e.g.).
2513		 */
2514		mpt_read_config_info_ioc(mpt);
2515
2516		/* Everything worked */
2517		break;
2518	}
2519
2520	if (try >= MPT_MAX_TRYS) {
2521		mpt_prt(mpt, "failed to initialize IOC");
2522		return (EIO);
2523	}
2524
2525	return (0);
2526}
2527
2528static int
2529mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2530{
2531	uint32_t pptr;
2532	int val;
2533
2534	if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2535		mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2536		return (EIO);
2537	}
2538
2539	mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2540
2541	if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2542		mpt_prt(mpt, "IOC failed to go to run state\n");
2543		return (ENXIO);
2544	}
2545	mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2546
2547	/*
2548	 * Give it reply buffers
2549	 *
2550	 * Do *not* exceed global credits.
2551	 */
2552	for (val = 0, pptr = mpt->reply_phys;
2553	    (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2554	     pptr += MPT_REPLY_SIZE) {
2555		mpt_free_reply(mpt, pptr);
2556		if (++val == mpt->mpt_global_credits - 1)
2557			break;
2558	}
2559
2560
2561	/*
2562	 * Enable the port if asked. This is only done if we're resetting
2563	 * the IOC after initial startup.
2564	 */
2565	if (portenable) {
2566		/*
2567		 * Enable asynchronous event reporting
2568		 */
2569		mpt_send_event_request(mpt, 1);
2570
2571		if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2572			mpt_prt(mpt, "failed to enable port 0\n");
2573			return (ENXIO);
2574		}
2575	}
2576	return (MPT_OK);
2577}
2578