Deleted Added
full compact
mpt.c (165814) mpt.c (169293)
1/*-
2 * Generic routines for LSI Fusion adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 *
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
64 */
65/*-
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
70 *
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
73 * met:
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
80 * redistribution.
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 */
97
98#include <sys/cdefs.h>
1/*-
2 * Generic routines for LSI Fusion adapters.
3 * FreeBSD Version.
4 *
5 * Copyright (c) 2000, 2001 by Greg Ansley
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice immediately at the beginning of the file, without modification,
12 * this list of conditions, and the following disclaimer.
13 * 2. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28/*-
29 * Copyright (c) 2002, 2006 by Matthew Jacob
30 * All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions are
34 * met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38 * substantially similar to the "NO WARRANTY" disclaimer below
39 * ("Disclaimer") and any redistribution must be conditioned upon including
40 * a substantially similar Disclaimer requirement for further binary
41 * redistribution.
42 * 3. Neither the names of the above listed copyright holders nor the names
43 * of any contributors may be used to endorse or promote products derived
44 * from this software without specific prior written permission.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 *
58 * Support from Chris Ellsworth in order to make SAS adapters work
59 * is gratefully acknowledged.
60 *
61 *
62 * Support from LSI-Logic has also gone a great deal toward making this a
63 * workable subsystem and is gratefully acknowledged.
64 */
65/*-
66 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
67 * Copyright (c) 2005, WHEEL Sp. z o.o.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * All rights reserved.
70 *
71 * Redistribution and use in source and binary forms, with or without
72 * modification, are permitted provided that the following conditions are
73 * met:
74 * 1. Redistributions of source code must retain the above copyright
75 * notice, this list of conditions and the following disclaimer.
76 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
77 * substantially similar to the "NO WARRANTY" disclaimer below
78 * ("Disclaimer") and any redistribution must be conditioned upon including
79 * a substantially similar Disclaimer requirement for further binary
80 * redistribution.
81 * 3. Neither the names of the above listed copyright holders nor the names
82 * of any contributors may be used to endorse or promote products derived
83 * from this software without specific prior written permission.
84 *
85 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
86 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
87 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
88 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
89 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
90 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
91 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
92 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
93 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
94 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
95 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
96 */
97
98#include <sys/cdefs.h>
99__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 165814 2007-01-05 22:49:05Z mjacob $");
99__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 169293 2007-05-05 20:18:24Z mjacob $");
100
101#include <dev/mpt/mpt.h>
102#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
103#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
104
105#include <dev/mpt/mpilib/mpi.h>
106#include <dev/mpt/mpilib/mpi_ioc.h>
107#include <dev/mpt/mpilib/mpi_fc.h>
108#include <dev/mpt/mpilib/mpi_targ.h>
109
110#include <sys/sysctl.h>
111
112#define MPT_MAX_TRYS 3
113#define MPT_MAX_WAIT 300000
114
115static int maxwait_ack = 0;
116static int maxwait_int = 0;
117static int maxwait_state = 0;
118
119static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
120mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
121
122static mpt_reply_handler_t mpt_default_reply_handler;
123static mpt_reply_handler_t mpt_config_reply_handler;
124static mpt_reply_handler_t mpt_handshake_reply_handler;
125static mpt_reply_handler_t mpt_event_reply_handler;
126static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
127 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
128static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
129static int mpt_soft_reset(struct mpt_softc *mpt);
130static void mpt_hard_reset(struct mpt_softc *mpt);
131static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
132static int mpt_enable_ioc(struct mpt_softc *mpt, int);
133
134/************************* Personality Module Support *************************/
135/*
136 * We include one extra entry that is guaranteed to be NULL
137 * to simplify our itterator.
138 */
139static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
140static __inline struct mpt_personality*
141 mpt_pers_find(struct mpt_softc *, u_int);
142static __inline struct mpt_personality*
143 mpt_pers_find_reverse(struct mpt_softc *, u_int);
144
145static __inline struct mpt_personality *
146mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
147{
148 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
149 ("mpt_pers_find: starting position out of range\n"));
150
151 while (start_at < MPT_MAX_PERSONALITIES
152 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
153 start_at++;
154 }
155 return (mpt_personalities[start_at]);
156}
157
158/*
159 * Used infrequently, so no need to optimize like a forward
160 * traversal where we use the MAX+1 is guaranteed to be NULL
161 * trick.
162 */
163static __inline struct mpt_personality *
164mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
165{
166 while (start_at < MPT_MAX_PERSONALITIES
167 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
168 start_at--;
169 }
170 if (start_at < MPT_MAX_PERSONALITIES)
171 return (mpt_personalities[start_at]);
172 return (NULL);
173}
174
175#define MPT_PERS_FOREACH(mpt, pers) \
176 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
177 pers != NULL; \
178 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
179
180#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
181 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
182 pers != NULL; \
183 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
184
185static mpt_load_handler_t mpt_stdload;
186static mpt_probe_handler_t mpt_stdprobe;
187static mpt_attach_handler_t mpt_stdattach;
188static mpt_enable_handler_t mpt_stdenable;
189static mpt_ready_handler_t mpt_stdready;
190static mpt_event_handler_t mpt_stdevent;
191static mpt_reset_handler_t mpt_stdreset;
192static mpt_shutdown_handler_t mpt_stdshutdown;
193static mpt_detach_handler_t mpt_stddetach;
194static mpt_unload_handler_t mpt_stdunload;
195static struct mpt_personality mpt_default_personality =
196{
197 .load = mpt_stdload,
198 .probe = mpt_stdprobe,
199 .attach = mpt_stdattach,
200 .enable = mpt_stdenable,
201 .ready = mpt_stdready,
202 .event = mpt_stdevent,
203 .reset = mpt_stdreset,
204 .shutdown = mpt_stdshutdown,
205 .detach = mpt_stddetach,
206 .unload = mpt_stdunload
207};
208
209static mpt_load_handler_t mpt_core_load;
210static mpt_attach_handler_t mpt_core_attach;
211static mpt_enable_handler_t mpt_core_enable;
212static mpt_reset_handler_t mpt_core_ioc_reset;
213static mpt_event_handler_t mpt_core_event;
214static mpt_shutdown_handler_t mpt_core_shutdown;
215static mpt_shutdown_handler_t mpt_core_detach;
216static mpt_unload_handler_t mpt_core_unload;
217static struct mpt_personality mpt_core_personality =
218{
219 .name = "mpt_core",
220 .load = mpt_core_load,
221 .attach = mpt_core_attach,
222 .enable = mpt_core_enable,
223 .event = mpt_core_event,
224 .reset = mpt_core_ioc_reset,
225 .shutdown = mpt_core_shutdown,
226 .detach = mpt_core_detach,
227 .unload = mpt_core_unload,
228};
229
230/*
231 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
232 * ordering information. We want the core to always register FIRST.
233 * other modules are set to SI_ORDER_SECOND.
234 */
235static moduledata_t mpt_core_mod = {
236 "mpt_core", mpt_modevent, &mpt_core_personality
237};
238DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
239MODULE_VERSION(mpt_core, 1);
240
241#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
242
243int
244mpt_modevent(module_t mod, int type, void *data)
245{
246 struct mpt_personality *pers;
247 int error;
248
249 pers = (struct mpt_personality *)data;
250
251 error = 0;
252 switch (type) {
253 case MOD_LOAD:
254 {
255 mpt_load_handler_t **def_handler;
256 mpt_load_handler_t **pers_handler;
257 int i;
258
259 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
260 if (mpt_personalities[i] == NULL)
261 break;
262 }
263 if (i >= MPT_MAX_PERSONALITIES) {
264 error = ENOMEM;
265 break;
266 }
267 pers->id = i;
268 mpt_personalities[i] = pers;
269
270 /* Install standard/noop handlers for any NULL entries. */
271 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
272 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
273 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
274 if (*pers_handler == NULL)
275 *pers_handler = *def_handler;
276 pers_handler++;
277 def_handler++;
278 }
279
280 error = (pers->load(pers));
281 if (error != 0)
282 mpt_personalities[i] = NULL;
283 break;
284 }
285 case MOD_SHUTDOWN:
286 break;
287#if __FreeBSD_version >= 500000
288 case MOD_QUIESCE:
289 break;
290#endif
291 case MOD_UNLOAD:
292 error = pers->unload(pers);
293 mpt_personalities[pers->id] = NULL;
294 break;
295 default:
296 error = EINVAL;
297 break;
298 }
299 return (error);
300}
301
302int
303mpt_stdload(struct mpt_personality *pers)
304{
305 /* Load is always successfull. */
306 return (0);
307}
308
309int
310mpt_stdprobe(struct mpt_softc *mpt)
311{
312 /* Probe is always successfull. */
313 return (0);
314}
315
316int
317mpt_stdattach(struct mpt_softc *mpt)
318{
319 /* Attach is always successfull. */
320 return (0);
321}
322
323int
324mpt_stdenable(struct mpt_softc *mpt)
325{
326 /* Enable is always successfull. */
327 return (0);
328}
329
330void
331mpt_stdready(struct mpt_softc *mpt)
332{
333}
334
335
336int
337mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
338{
339 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
340 /* Event was not for us. */
341 return (0);
342}
343
344void
345mpt_stdreset(struct mpt_softc *mpt, int type)
346{
347}
348
349void
350mpt_stdshutdown(struct mpt_softc *mpt)
351{
352}
353
354void
355mpt_stddetach(struct mpt_softc *mpt)
356{
357}
358
359int
360mpt_stdunload(struct mpt_personality *pers)
361{
362 /* Unload is always successfull. */
363 return (0);
364}
365
366/*
367 * Post driver attachment, we may want to perform some global actions.
368 * Here is the hook to do so.
369 */
370
371static void
372mpt_postattach(void *unused)
373{
374 struct mpt_softc *mpt;
375 struct mpt_personality *pers;
376
377 TAILQ_FOREACH(mpt, &mpt_tailq, links) {
378 MPT_PERS_FOREACH(mpt, pers)
379 pers->ready(mpt);
380 }
381}
382SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
383
384
385/******************************* Bus DMA Support ******************************/
386void
387mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
388{
389 struct mpt_map_info *map_info;
390
391 map_info = (struct mpt_map_info *)arg;
392 map_info->error = error;
393 map_info->phys = segs->ds_addr;
394}
395
396/**************************** Reply/Event Handling ****************************/
397int
398mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
399 mpt_handler_t handler, uint32_t *phandler_id)
400{
401
402 switch (type) {
403 case MPT_HANDLER_REPLY:
404 {
405 u_int cbi;
406 u_int free_cbi;
407
408 if (phandler_id == NULL)
409 return (EINVAL);
410
411 free_cbi = MPT_HANDLER_ID_NONE;
412 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
413 /*
414 * If the same handler is registered multiple
415 * times, don't error out. Just return the
416 * index of the original registration.
417 */
418 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
419 *phandler_id = MPT_CBI_TO_HID(cbi);
420 return (0);
421 }
422
423 /*
424 * Fill from the front in the hope that
425 * all registered handlers consume only a
426 * single cache line.
427 *
428 * We don't break on the first empty slot so
429 * that the full table is checked to see if
430 * this handler was previously registered.
431 */
432 if (free_cbi == MPT_HANDLER_ID_NONE &&
433 (mpt_reply_handlers[cbi]
434 == mpt_default_reply_handler))
435 free_cbi = cbi;
436 }
437 if (free_cbi == MPT_HANDLER_ID_NONE) {
438 return (ENOMEM);
439 }
440 mpt_reply_handlers[free_cbi] = handler.reply_handler;
441 *phandler_id = MPT_CBI_TO_HID(free_cbi);
442 break;
443 }
444 default:
445 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
446 return (EINVAL);
447 }
448 return (0);
449}
450
451int
452mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
453 mpt_handler_t handler, uint32_t handler_id)
454{
455
456 switch (type) {
457 case MPT_HANDLER_REPLY:
458 {
459 u_int cbi;
460
461 cbi = MPT_CBI(handler_id);
462 if (cbi >= MPT_NUM_REPLY_HANDLERS
463 || mpt_reply_handlers[cbi] != handler.reply_handler)
464 return (ENOENT);
465 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
466 break;
467 }
468 default:
469 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
470 return (EINVAL);
471 }
472 return (0);
473}
474
475static int
476mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
477 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
478{
479 mpt_prt(mpt,
480 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
481 req, req->serno, reply_desc, reply_frame);
482
483 if (reply_frame != NULL)
484 mpt_dump_reply_frame(mpt, reply_frame);
485
486 mpt_prt(mpt, "Reply Frame Ignored\n");
487
488 return (/*free_reply*/TRUE);
489}
490
491static int
492mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
493 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
494{
495 if (req != NULL) {
496
497 if (reply_frame != NULL) {
498 MSG_CONFIG *cfgp;
499 MSG_CONFIG_REPLY *reply;
500
501 cfgp = (MSG_CONFIG *)req->req_vbuf;
502 reply = (MSG_CONFIG_REPLY *)reply_frame;
503 req->IOCStatus = le16toh(reply_frame->IOCStatus);
504 bcopy(&reply->Header, &cfgp->Header,
505 sizeof(cfgp->Header));
506 }
507 req->state &= ~REQ_STATE_QUEUED;
508 req->state |= REQ_STATE_DONE;
509 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
510 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
511 wakeup(req);
512 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
513 /*
514 * Whew- we can free this request (late completion)
515 */
516 mpt_free_request(mpt, req);
517 }
518 }
519
520 return (TRUE);
521}
522
523static int
524mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
525 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
526{
527 /* Nothing to be done. */
528 return (TRUE);
529}
530
531static int
532mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
533 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
534{
535 int free_reply;
536
537 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
538 KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
539
540 free_reply = TRUE;
541 switch (reply_frame->Function) {
542 case MPI_FUNCTION_EVENT_NOTIFICATION:
543 {
544 MSG_EVENT_NOTIFY_REPLY *msg;
545 struct mpt_personality *pers;
546 u_int handled;
547
548 handled = 0;
549 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
550 msg->EventDataLength = le16toh(msg->EventDataLength);
551 msg->IOCStatus = le16toh(msg->IOCStatus);
552 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
553 msg->Event = le32toh(msg->Event);
554 MPT_PERS_FOREACH(mpt, pers)
555 handled += pers->event(mpt, req, msg);
556
557 if (handled == 0 && mpt->mpt_pers_mask == 0) {
558 mpt_lprt(mpt, MPT_PRT_INFO,
559 "No Handlers For Any Event Notify Frames. "
560 "Event %#x (ACK %sequired).\n",
561 msg->Event, msg->AckRequired? "r" : "not r");
562 } else if (handled == 0) {
563 mpt_lprt(mpt, MPT_PRT_WARN,
564 "Unhandled Event Notify Frame. Event %#x "
565 "(ACK %sequired).\n",
566 msg->Event, msg->AckRequired? "r" : "not r");
567 }
568
569 if (msg->AckRequired) {
570 request_t *ack_req;
571 uint32_t context;
572
573 context = req->index | MPT_REPLY_HANDLER_EVENTS;
574 ack_req = mpt_get_request(mpt, FALSE);
575 if (ack_req == NULL) {
576 struct mpt_evtf_record *evtf;
577
578 evtf = (struct mpt_evtf_record *)reply_frame;
579 evtf->context = context;
580 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
581 free_reply = FALSE;
582 break;
583 }
584 mpt_send_event_ack(mpt, ack_req, msg, context);
585 /*
586 * Don't check for CONTINUATION_REPLY here
587 */
588 return (free_reply);
589 }
590 break;
591 }
592 case MPI_FUNCTION_PORT_ENABLE:
593 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
594 break;
595 case MPI_FUNCTION_EVENT_ACK:
596 break;
597 default:
598 mpt_prt(mpt, "unknown event function: %x\n",
599 reply_frame->Function);
600 break;
601 }
602
603 /*
604 * I'm not sure that this continuation stuff works as it should.
605 *
606 * I've had FC async events occur that free the frame up because
607 * the continuation bit isn't set, and then additional async events
608 * then occur using the same context. As you might imagine, this
609 * leads to Very Bad Thing.
610 *
611 * Let's just be safe for now and not free them up until we figure
612 * out what's actually happening here.
613 */
614#if 0
615 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
616 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
617 mpt_free_request(mpt, req);
618 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
619 reply_frame->Function, req, req->serno);
620 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
621 MSG_EVENT_NOTIFY_REPLY *msg =
622 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
623 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
624 msg->Event, msg->AckRequired);
625 }
626 } else {
627 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
628 reply_frame->Function, req, req->serno);
629 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
630 MSG_EVENT_NOTIFY_REPLY *msg =
631 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
632 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
633 msg->Event, msg->AckRequired);
634 }
635 mpt_prtc(mpt, "\n");
636 }
637#endif
638 return (free_reply);
639}
640
641/*
642 * Process an asynchronous event from the IOC.
643 */
644static int
645mpt_core_event(struct mpt_softc *mpt, request_t *req,
646 MSG_EVENT_NOTIFY_REPLY *msg)
647{
648 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
649 msg->Event & 0xFF);
650 switch(msg->Event & 0xFF) {
651 case MPI_EVENT_NONE:
652 break;
653 case MPI_EVENT_LOG_DATA:
654 {
655 int i;
656
657 /* Some error occured that LSI wants logged */
658 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
659 msg->IOCLogInfo);
660 mpt_prt(mpt, "\tEvtLogData: Event Data:");
661 for (i = 0; i < msg->EventDataLength; i++)
662 mpt_prtc(mpt, " %08x", msg->Data[i]);
663 mpt_prtc(mpt, "\n");
664 break;
665 }
666 case MPI_EVENT_EVENT_CHANGE:
667 /*
668 * This is just an acknowledgement
669 * of our mpt_send_event_request.
670 */
671 break;
672 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
673 break;
674 default:
675 return (0);
676 break;
677 }
678 return (1);
679}
680
681static void
682mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
683 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
684{
685 MSG_EVENT_ACK *ackp;
686
687 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
688 memset(ackp, 0, sizeof (*ackp));
689 ackp->Function = MPI_FUNCTION_EVENT_ACK;
690 ackp->Event = htole32(msg->Event);
691 ackp->EventContext = htole32(msg->EventContext);
692 ackp->MsgContext = htole32(context);
693 mpt_check_doorbell(mpt);
694 mpt_send_cmd(mpt, ack_req);
695}
696
697/***************************** Interrupt Handling *****************************/
698void
699mpt_intr(void *arg)
700{
701 struct mpt_softc *mpt;
702 uint32_t reply_desc;
703 int ntrips = 0;
704
705 mpt = (struct mpt_softc *)arg;
706 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
100
101#include <dev/mpt/mpt.h>
102#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */
103#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */
104
105#include <dev/mpt/mpilib/mpi.h>
106#include <dev/mpt/mpilib/mpi_ioc.h>
107#include <dev/mpt/mpilib/mpi_fc.h>
108#include <dev/mpt/mpilib/mpi_targ.h>
109
110#include <sys/sysctl.h>
111
112#define MPT_MAX_TRYS 3
113#define MPT_MAX_WAIT 300000
114
115static int maxwait_ack = 0;
116static int maxwait_int = 0;
117static int maxwait_state = 0;
118
119static TAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq);
120mpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS];
121
122static mpt_reply_handler_t mpt_default_reply_handler;
123static mpt_reply_handler_t mpt_config_reply_handler;
124static mpt_reply_handler_t mpt_handshake_reply_handler;
125static mpt_reply_handler_t mpt_event_reply_handler;
126static void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
127 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context);
128static int mpt_send_event_request(struct mpt_softc *mpt, int onoff);
129static int mpt_soft_reset(struct mpt_softc *mpt);
130static void mpt_hard_reset(struct mpt_softc *mpt);
131static int mpt_configure_ioc(struct mpt_softc *mpt, int, int);
132static int mpt_enable_ioc(struct mpt_softc *mpt, int);
133
134/************************* Personality Module Support *************************/
135/*
136 * We include one extra entry that is guaranteed to be NULL
137 * to simplify our itterator.
138 */
139static struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1];
140static __inline struct mpt_personality*
141 mpt_pers_find(struct mpt_softc *, u_int);
142static __inline struct mpt_personality*
143 mpt_pers_find_reverse(struct mpt_softc *, u_int);
144
145static __inline struct mpt_personality *
146mpt_pers_find(struct mpt_softc *mpt, u_int start_at)
147{
148 KASSERT(start_at <= MPT_MAX_PERSONALITIES,
149 ("mpt_pers_find: starting position out of range\n"));
150
151 while (start_at < MPT_MAX_PERSONALITIES
152 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
153 start_at++;
154 }
155 return (mpt_personalities[start_at]);
156}
157
158/*
159 * Used infrequently, so no need to optimize like a forward
160 * traversal where we use the MAX+1 is guaranteed to be NULL
161 * trick.
162 */
163static __inline struct mpt_personality *
164mpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at)
165{
166 while (start_at < MPT_MAX_PERSONALITIES
167 && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) {
168 start_at--;
169 }
170 if (start_at < MPT_MAX_PERSONALITIES)
171 return (mpt_personalities[start_at]);
172 return (NULL);
173}
174
175#define MPT_PERS_FOREACH(mpt, pers) \
176 for (pers = mpt_pers_find(mpt, /*start_at*/0); \
177 pers != NULL; \
178 pers = mpt_pers_find(mpt, /*start_at*/pers->id+1))
179
180#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \
181 for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\
182 pers != NULL; \
183 pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1))
184
185static mpt_load_handler_t mpt_stdload;
186static mpt_probe_handler_t mpt_stdprobe;
187static mpt_attach_handler_t mpt_stdattach;
188static mpt_enable_handler_t mpt_stdenable;
189static mpt_ready_handler_t mpt_stdready;
190static mpt_event_handler_t mpt_stdevent;
191static mpt_reset_handler_t mpt_stdreset;
192static mpt_shutdown_handler_t mpt_stdshutdown;
193static mpt_detach_handler_t mpt_stddetach;
194static mpt_unload_handler_t mpt_stdunload;
195static struct mpt_personality mpt_default_personality =
196{
197 .load = mpt_stdload,
198 .probe = mpt_stdprobe,
199 .attach = mpt_stdattach,
200 .enable = mpt_stdenable,
201 .ready = mpt_stdready,
202 .event = mpt_stdevent,
203 .reset = mpt_stdreset,
204 .shutdown = mpt_stdshutdown,
205 .detach = mpt_stddetach,
206 .unload = mpt_stdunload
207};
208
209static mpt_load_handler_t mpt_core_load;
210static mpt_attach_handler_t mpt_core_attach;
211static mpt_enable_handler_t mpt_core_enable;
212static mpt_reset_handler_t mpt_core_ioc_reset;
213static mpt_event_handler_t mpt_core_event;
214static mpt_shutdown_handler_t mpt_core_shutdown;
215static mpt_shutdown_handler_t mpt_core_detach;
216static mpt_unload_handler_t mpt_core_unload;
217static struct mpt_personality mpt_core_personality =
218{
219 .name = "mpt_core",
220 .load = mpt_core_load,
221 .attach = mpt_core_attach,
222 .enable = mpt_core_enable,
223 .event = mpt_core_event,
224 .reset = mpt_core_ioc_reset,
225 .shutdown = mpt_core_shutdown,
226 .detach = mpt_core_detach,
227 .unload = mpt_core_unload,
228};
229
230/*
231 * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need
232 * ordering information. We want the core to always register FIRST.
233 * other modules are set to SI_ORDER_SECOND.
234 */
235static moduledata_t mpt_core_mod = {
236 "mpt_core", mpt_modevent, &mpt_core_personality
237};
238DECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
239MODULE_VERSION(mpt_core, 1);
240
241#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id))
242
243int
244mpt_modevent(module_t mod, int type, void *data)
245{
246 struct mpt_personality *pers;
247 int error;
248
249 pers = (struct mpt_personality *)data;
250
251 error = 0;
252 switch (type) {
253 case MOD_LOAD:
254 {
255 mpt_load_handler_t **def_handler;
256 mpt_load_handler_t **pers_handler;
257 int i;
258
259 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
260 if (mpt_personalities[i] == NULL)
261 break;
262 }
263 if (i >= MPT_MAX_PERSONALITIES) {
264 error = ENOMEM;
265 break;
266 }
267 pers->id = i;
268 mpt_personalities[i] = pers;
269
270 /* Install standard/noop handlers for any NULL entries. */
271 def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality);
272 pers_handler = MPT_PERS_FIRST_HANDLER(pers);
273 while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) {
274 if (*pers_handler == NULL)
275 *pers_handler = *def_handler;
276 pers_handler++;
277 def_handler++;
278 }
279
280 error = (pers->load(pers));
281 if (error != 0)
282 mpt_personalities[i] = NULL;
283 break;
284 }
285 case MOD_SHUTDOWN:
286 break;
287#if __FreeBSD_version >= 500000
288 case MOD_QUIESCE:
289 break;
290#endif
291 case MOD_UNLOAD:
292 error = pers->unload(pers);
293 mpt_personalities[pers->id] = NULL;
294 break;
295 default:
296 error = EINVAL;
297 break;
298 }
299 return (error);
300}
301
302int
303mpt_stdload(struct mpt_personality *pers)
304{
305 /* Load is always successfull. */
306 return (0);
307}
308
309int
310mpt_stdprobe(struct mpt_softc *mpt)
311{
312 /* Probe is always successfull. */
313 return (0);
314}
315
316int
317mpt_stdattach(struct mpt_softc *mpt)
318{
319 /* Attach is always successfull. */
320 return (0);
321}
322
323int
324mpt_stdenable(struct mpt_softc *mpt)
325{
326 /* Enable is always successfull. */
327 return (0);
328}
329
330void
331mpt_stdready(struct mpt_softc *mpt)
332{
333}
334
335
336int
337mpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg)
338{
339 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF);
340 /* Event was not for us. */
341 return (0);
342}
343
344void
345mpt_stdreset(struct mpt_softc *mpt, int type)
346{
347}
348
349void
350mpt_stdshutdown(struct mpt_softc *mpt)
351{
352}
353
354void
355mpt_stddetach(struct mpt_softc *mpt)
356{
357}
358
359int
360mpt_stdunload(struct mpt_personality *pers)
361{
362 /* Unload is always successfull. */
363 return (0);
364}
365
366/*
367 * Post driver attachment, we may want to perform some global actions.
368 * Here is the hook to do so.
369 */
370
371static void
372mpt_postattach(void *unused)
373{
374 struct mpt_softc *mpt;
375 struct mpt_personality *pers;
376
377 TAILQ_FOREACH(mpt, &mpt_tailq, links) {
378 MPT_PERS_FOREACH(mpt, pers)
379 pers->ready(mpt);
380 }
381}
382SYSINIT(mptdev, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, mpt_postattach, NULL);
383
384
385/******************************* Bus DMA Support ******************************/
386void
387mpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error)
388{
389 struct mpt_map_info *map_info;
390
391 map_info = (struct mpt_map_info *)arg;
392 map_info->error = error;
393 map_info->phys = segs->ds_addr;
394}
395
396/**************************** Reply/Event Handling ****************************/
397int
398mpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type,
399 mpt_handler_t handler, uint32_t *phandler_id)
400{
401
402 switch (type) {
403 case MPT_HANDLER_REPLY:
404 {
405 u_int cbi;
406 u_int free_cbi;
407
408 if (phandler_id == NULL)
409 return (EINVAL);
410
411 free_cbi = MPT_HANDLER_ID_NONE;
412 for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) {
413 /*
414 * If the same handler is registered multiple
415 * times, don't error out. Just return the
416 * index of the original registration.
417 */
418 if (mpt_reply_handlers[cbi] == handler.reply_handler) {
419 *phandler_id = MPT_CBI_TO_HID(cbi);
420 return (0);
421 }
422
423 /*
424 * Fill from the front in the hope that
425 * all registered handlers consume only a
426 * single cache line.
427 *
428 * We don't break on the first empty slot so
429 * that the full table is checked to see if
430 * this handler was previously registered.
431 */
432 if (free_cbi == MPT_HANDLER_ID_NONE &&
433 (mpt_reply_handlers[cbi]
434 == mpt_default_reply_handler))
435 free_cbi = cbi;
436 }
437 if (free_cbi == MPT_HANDLER_ID_NONE) {
438 return (ENOMEM);
439 }
440 mpt_reply_handlers[free_cbi] = handler.reply_handler;
441 *phandler_id = MPT_CBI_TO_HID(free_cbi);
442 break;
443 }
444 default:
445 mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type);
446 return (EINVAL);
447 }
448 return (0);
449}
450
451int
452mpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type,
453 mpt_handler_t handler, uint32_t handler_id)
454{
455
456 switch (type) {
457 case MPT_HANDLER_REPLY:
458 {
459 u_int cbi;
460
461 cbi = MPT_CBI(handler_id);
462 if (cbi >= MPT_NUM_REPLY_HANDLERS
463 || mpt_reply_handlers[cbi] != handler.reply_handler)
464 return (ENOENT);
465 mpt_reply_handlers[cbi] = mpt_default_reply_handler;
466 break;
467 }
468 default:
469 mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type);
470 return (EINVAL);
471 }
472 return (0);
473}
474
475static int
476mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
477 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
478{
479 mpt_prt(mpt,
480 "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
481 req, req->serno, reply_desc, reply_frame);
482
483 if (reply_frame != NULL)
484 mpt_dump_reply_frame(mpt, reply_frame);
485
486 mpt_prt(mpt, "Reply Frame Ignored\n");
487
488 return (/*free_reply*/TRUE);
489}
490
491static int
492mpt_config_reply_handler(struct mpt_softc *mpt, request_t *req,
493 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
494{
495 if (req != NULL) {
496
497 if (reply_frame != NULL) {
498 MSG_CONFIG *cfgp;
499 MSG_CONFIG_REPLY *reply;
500
501 cfgp = (MSG_CONFIG *)req->req_vbuf;
502 reply = (MSG_CONFIG_REPLY *)reply_frame;
503 req->IOCStatus = le16toh(reply_frame->IOCStatus);
504 bcopy(&reply->Header, &cfgp->Header,
505 sizeof(cfgp->Header));
506 }
507 req->state &= ~REQ_STATE_QUEUED;
508 req->state |= REQ_STATE_DONE;
509 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
510 if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
511 wakeup(req);
512 } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
513 /*
514 * Whew- we can free this request (late completion)
515 */
516 mpt_free_request(mpt, req);
517 }
518 }
519
520 return (TRUE);
521}
522
523static int
524mpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req,
525 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
526{
527 /* Nothing to be done. */
528 return (TRUE);
529}
530
531static int
532mpt_event_reply_handler(struct mpt_softc *mpt, request_t *req,
533 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
534{
535 int free_reply;
536
537 KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler"));
538 KASSERT(req != NULL, ("null request in mpt_event_reply_handler"));
539
540 free_reply = TRUE;
541 switch (reply_frame->Function) {
542 case MPI_FUNCTION_EVENT_NOTIFICATION:
543 {
544 MSG_EVENT_NOTIFY_REPLY *msg;
545 struct mpt_personality *pers;
546 u_int handled;
547
548 handled = 0;
549 msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
550 msg->EventDataLength = le16toh(msg->EventDataLength);
551 msg->IOCStatus = le16toh(msg->IOCStatus);
552 msg->IOCLogInfo = le32toh(msg->IOCLogInfo);
553 msg->Event = le32toh(msg->Event);
554 MPT_PERS_FOREACH(mpt, pers)
555 handled += pers->event(mpt, req, msg);
556
557 if (handled == 0 && mpt->mpt_pers_mask == 0) {
558 mpt_lprt(mpt, MPT_PRT_INFO,
559 "No Handlers For Any Event Notify Frames. "
560 "Event %#x (ACK %sequired).\n",
561 msg->Event, msg->AckRequired? "r" : "not r");
562 } else if (handled == 0) {
563 mpt_lprt(mpt, MPT_PRT_WARN,
564 "Unhandled Event Notify Frame. Event %#x "
565 "(ACK %sequired).\n",
566 msg->Event, msg->AckRequired? "r" : "not r");
567 }
568
569 if (msg->AckRequired) {
570 request_t *ack_req;
571 uint32_t context;
572
573 context = req->index | MPT_REPLY_HANDLER_EVENTS;
574 ack_req = mpt_get_request(mpt, FALSE);
575 if (ack_req == NULL) {
576 struct mpt_evtf_record *evtf;
577
578 evtf = (struct mpt_evtf_record *)reply_frame;
579 evtf->context = context;
580 LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links);
581 free_reply = FALSE;
582 break;
583 }
584 mpt_send_event_ack(mpt, ack_req, msg, context);
585 /*
586 * Don't check for CONTINUATION_REPLY here
587 */
588 return (free_reply);
589 }
590 break;
591 }
592 case MPI_FUNCTION_PORT_ENABLE:
593 mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n");
594 break;
595 case MPI_FUNCTION_EVENT_ACK:
596 break;
597 default:
598 mpt_prt(mpt, "unknown event function: %x\n",
599 reply_frame->Function);
600 break;
601 }
602
603 /*
604 * I'm not sure that this continuation stuff works as it should.
605 *
606 * I've had FC async events occur that free the frame up because
607 * the continuation bit isn't set, and then additional async events
608 * then occur using the same context. As you might imagine, this
609 * leads to Very Bad Thing.
610 *
611 * Let's just be safe for now and not free them up until we figure
612 * out what's actually happening here.
613 */
614#if 0
615 if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) {
616 TAILQ_REMOVE(&mpt->request_pending_list, req, links);
617 mpt_free_request(mpt, req);
618 mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation",
619 reply_frame->Function, req, req->serno);
620 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
621 MSG_EVENT_NOTIFY_REPLY *msg =
622 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
623 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
624 msg->Event, msg->AckRequired);
625 }
626 } else {
627 mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation",
628 reply_frame->Function, req, req->serno);
629 if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) {
630 MSG_EVENT_NOTIFY_REPLY *msg =
631 (MSG_EVENT_NOTIFY_REPLY *)reply_frame;
632 mpt_prtc(mpt, " Event=0x%x AckReq=%d",
633 msg->Event, msg->AckRequired);
634 }
635 mpt_prtc(mpt, "\n");
636 }
637#endif
638 return (free_reply);
639}
640
641/*
642 * Process an asynchronous event from the IOC.
643 */
644static int
645mpt_core_event(struct mpt_softc *mpt, request_t *req,
646 MSG_EVENT_NOTIFY_REPLY *msg)
647{
648 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n",
649 msg->Event & 0xFF);
650 switch(msg->Event & 0xFF) {
651 case MPI_EVENT_NONE:
652 break;
653 case MPI_EVENT_LOG_DATA:
654 {
655 int i;
656
657 /* Some error occured that LSI wants logged */
658 mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n",
659 msg->IOCLogInfo);
660 mpt_prt(mpt, "\tEvtLogData: Event Data:");
661 for (i = 0; i < msg->EventDataLength; i++)
662 mpt_prtc(mpt, " %08x", msg->Data[i]);
663 mpt_prtc(mpt, "\n");
664 break;
665 }
666 case MPI_EVENT_EVENT_CHANGE:
667 /*
668 * This is just an acknowledgement
669 * of our mpt_send_event_request.
670 */
671 break;
672 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
673 break;
674 default:
675 return (0);
676 break;
677 }
678 return (1);
679}
680
681static void
682mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req,
683 MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context)
684{
685 MSG_EVENT_ACK *ackp;
686
687 ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf;
688 memset(ackp, 0, sizeof (*ackp));
689 ackp->Function = MPI_FUNCTION_EVENT_ACK;
690 ackp->Event = htole32(msg->Event);
691 ackp->EventContext = htole32(msg->EventContext);
692 ackp->MsgContext = htole32(context);
693 mpt_check_doorbell(mpt);
694 mpt_send_cmd(mpt, ack_req);
695}
696
697/***************************** Interrupt Handling *****************************/
698void
699mpt_intr(void *arg)
700{
701 struct mpt_softc *mpt;
702 uint32_t reply_desc;
703 int ntrips = 0;
704
705 mpt = (struct mpt_softc *)arg;
706 mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n");
707 MPT_LOCK_ASSERT(mpt);
708
707 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
708 request_t *req;
709 MSG_DEFAULT_REPLY *reply_frame;
710 uint32_t reply_baddr;
711 uint32_t ctxt_idx;
712 u_int cb_index;
713 u_int req_index;
714 int free_rf;
715
716 req = NULL;
717 reply_frame = NULL;
718 reply_baddr = 0;
719 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
720 u_int offset;
721 /*
722 * Insure that the reply frame is coherent.
723 */
724 reply_baddr = MPT_REPLY_BADDR(reply_desc);
725 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
726 bus_dmamap_sync_range(mpt->reply_dmat,
727 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
728 BUS_DMASYNC_POSTREAD);
729 reply_frame = MPT_REPLY_OTOV(mpt, offset);
730 ctxt_idx = le32toh(reply_frame->MsgContext);
731 } else {
732 uint32_t type;
733
734 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
735 ctxt_idx = reply_desc;
736 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
737 reply_desc);
738
739 switch (type) {
740 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
741 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
742 break;
743 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
744 ctxt_idx = GET_IO_INDEX(reply_desc);
745 if (mpt->tgt_cmd_ptrs == NULL) {
746 mpt_prt(mpt,
747 "mpt_intr: no target cmd ptrs\n");
748 reply_desc = MPT_REPLY_EMPTY;
749 break;
750 }
751 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
752 mpt_prt(mpt,
753 "mpt_intr: bad tgt cmd ctxt %u\n",
754 ctxt_idx);
755 reply_desc = MPT_REPLY_EMPTY;
756 ntrips = 1000;
757 break;
758 }
759 req = mpt->tgt_cmd_ptrs[ctxt_idx];
760 if (req == NULL) {
761 mpt_prt(mpt, "no request backpointer "
762 "at index %u", ctxt_idx);
763 reply_desc = MPT_REPLY_EMPTY;
764 ntrips = 1000;
765 break;
766 }
767 /*
768 * Reformulate ctxt_idx to be just as if
769 * it were another type of context reply
770 * so the code below will find the request
771 * via indexing into the pool.
772 */
773 ctxt_idx =
774 req->index | mpt->scsi_tgt_handler_id;
775 req = NULL;
776 break;
777 case MPI_CONTEXT_REPLY_TYPE_LAN:
778 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
779 reply_desc);
780 reply_desc = MPT_REPLY_EMPTY;
781 break;
782 default:
783 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
784 reply_desc = MPT_REPLY_EMPTY;
785 break;
786 }
787 if (reply_desc == MPT_REPLY_EMPTY) {
788 if (ntrips++ > 1000) {
789 break;
790 }
791 continue;
792 }
793 }
794
795 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
796 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
797 if (req_index < MPT_MAX_REQUESTS(mpt)) {
798 req = &mpt->request_pool[req_index];
799 } else {
800 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
801 " 0x%x)\n", req_index, reply_desc);
802 }
803
804 free_rf = mpt_reply_handlers[cb_index](mpt, req,
805 reply_desc, reply_frame);
806
807 if (reply_frame != NULL && free_rf) {
808 mpt_free_reply(mpt, reply_baddr);
809 }
810
811 /*
812 * If we got ourselves disabled, don't get stuck in a loop
813 */
814 if (mpt->disabled) {
815 mpt_disable_ints(mpt);
816 break;
817 }
818 if (ntrips++ > 1000) {
819 break;
820 }
821 }
822 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
823}
824
825/******************************* Error Recovery *******************************/
826void
827mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
828 u_int iocstatus)
829{
830 MSG_DEFAULT_REPLY ioc_status_frame;
831 request_t *req;
832
833 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
834 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
835 ioc_status_frame.IOCStatus = iocstatus;
836 while((req = TAILQ_FIRST(chain)) != NULL) {
837 MSG_REQUEST_HEADER *msg_hdr;
838 u_int cb_index;
839
840 TAILQ_REMOVE(chain, req, links);
841 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
842 ioc_status_frame.Function = msg_hdr->Function;
843 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
844 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
845 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
846 &ioc_status_frame);
847 }
848}
849
850/********************************* Diagnostics ********************************/
851/*
852 * Perform a diagnostic dump of a reply frame.
853 */
854void
855mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
856{
857 mpt_prt(mpt, "Address Reply:\n");
858 mpt_print_reply(reply_frame);
859}
860
861/******************************* Doorbell Access ******************************/
862static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
863static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
864
865static __inline uint32_t
866mpt_rd_db(struct mpt_softc *mpt)
867{
868 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
869}
870
871static __inline uint32_t
872mpt_rd_intr(struct mpt_softc *mpt)
873{
874 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
875}
876
877/* Busy wait for a door bell to be read by IOC */
878static int
879mpt_wait_db_ack(struct mpt_softc *mpt)
880{
881 int i;
882 for (i=0; i < MPT_MAX_WAIT; i++) {
883 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
884 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
885 return (MPT_OK);
886 }
887 DELAY(200);
888 }
889 return (MPT_FAIL);
890}
891
892/* Busy wait for a door bell interrupt */
893static int
894mpt_wait_db_int(struct mpt_softc *mpt)
895{
896 int i;
897 for (i = 0; i < MPT_MAX_WAIT; i++) {
898 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
899 maxwait_int = i > maxwait_int ? i : maxwait_int;
900 return MPT_OK;
901 }
902 DELAY(100);
903 }
904 return (MPT_FAIL);
905}
906
907/* Wait for IOC to transition to a give state */
908void
909mpt_check_doorbell(struct mpt_softc *mpt)
910{
911 uint32_t db = mpt_rd_db(mpt);
912 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
913 mpt_prt(mpt, "Device not running\n");
914 mpt_print_db(db);
915 }
916}
917
918/* Wait for IOC to transition to a give state */
919static int
920mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
921{
922 int i;
923
924 for (i = 0; i < MPT_MAX_WAIT; i++) {
925 uint32_t db = mpt_rd_db(mpt);
926 if (MPT_STATE(db) == state) {
927 maxwait_state = i > maxwait_state ? i : maxwait_state;
928 return (MPT_OK);
929 }
930 DELAY(100);
931 }
932 return (MPT_FAIL);
933}
934
935
936/************************* Intialization/Configuration ************************/
937static int mpt_download_fw(struct mpt_softc *mpt);
938
939/* Issue the reset COMMAND to the IOC */
940static int
941mpt_soft_reset(struct mpt_softc *mpt)
942{
943 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
944
945 /* Have to use hard reset if we are not in Running state */
946 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
947 mpt_prt(mpt, "soft reset failed: device not running\n");
948 return (MPT_FAIL);
949 }
950
951 /* If door bell is in use we don't have a chance of getting
952 * a word in since the IOC probably crashed in message
953 * processing. So don't waste our time.
954 */
955 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
956 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
957 return (MPT_FAIL);
958 }
959
960 /* Send the reset request to the IOC */
961 mpt_write(mpt, MPT_OFFSET_DOORBELL,
962 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
963 if (mpt_wait_db_ack(mpt) != MPT_OK) {
964 mpt_prt(mpt, "soft reset failed: ack timeout\n");
965 return (MPT_FAIL);
966 }
967
968 /* Wait for the IOC to reload and come out of reset state */
969 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
970 mpt_prt(mpt, "soft reset failed: device did not restart\n");
971 return (MPT_FAIL);
972 }
973
974 return MPT_OK;
975}
976
977static int
978mpt_enable_diag_mode(struct mpt_softc *mpt)
979{
980 int try;
981
982 try = 20;
983 while (--try) {
984
985 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
986 break;
987
988 /* Enable diagnostic registers */
989 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
990 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
991 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
992 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
993 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
994 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
995
996 DELAY(100000);
997 }
998 if (try == 0)
999 return (EIO);
1000 return (0);
1001}
1002
1003static void
1004mpt_disable_diag_mode(struct mpt_softc *mpt)
1005{
1006 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1007}
1008
1009/* This is a magic diagnostic reset that resets all the ARM
1010 * processors in the chip.
1011 */
1012static void
1013mpt_hard_reset(struct mpt_softc *mpt)
1014{
1015 int error;
1016 int wait;
1017 uint32_t diagreg;
1018
1019 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1020
1021 error = mpt_enable_diag_mode(mpt);
1022 if (error) {
1023 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1024 mpt_prt(mpt, "Trying to reset anyway.\n");
1025 }
1026
1027 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1028
1029 /*
1030 * This appears to be a workaround required for some
1031 * firmware or hardware revs.
1032 */
1033 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1034 DELAY(1000);
1035
1036 /* Diag. port is now active so we can now hit the reset bit */
1037 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1038
1039 /*
1040 * Ensure that the reset has finished. We delay 1ms
1041 * prior to reading the register to make sure the chip
1042 * has sufficiently completed its reset to handle register
1043 * accesses.
1044 */
1045 wait = 5000;
1046 do {
1047 DELAY(1000);
1048 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1049 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1050
1051 if (wait == 0) {
1052 mpt_prt(mpt, "WARNING - Failed hard reset! "
1053 "Trying to initialize anyway.\n");
1054 }
1055
1056 /*
1057 * If we have firmware to download, it must be loaded before
1058 * the controller will become operational. Do so now.
1059 */
1060 if (mpt->fw_image != NULL) {
1061
1062 error = mpt_download_fw(mpt);
1063
1064 if (error) {
1065 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1066 mpt_prt(mpt, "Trying to initialize anyway.\n");
1067 }
1068 }
1069
1070 /*
1071 * Reseting the controller should have disabled write
1072 * access to the diagnostic registers, but disable
1073 * manually to be sure.
1074 */
1075 mpt_disable_diag_mode(mpt);
1076}
1077
1078static void
1079mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1080{
1081 /*
1082 * Complete all pending requests with a status
1083 * appropriate for an IOC reset.
1084 */
1085 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1086 MPI_IOCSTATUS_INVALID_STATE);
1087}
1088
1089
1090/*
1091 * Reset the IOC when needed. Try software command first then if needed
1092 * poke at the magic diagnostic reset. Note that a hard reset resets
1093 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1094 * fouls up the PCI configuration registers.
1095 */
1096int
1097mpt_reset(struct mpt_softc *mpt, int reinit)
1098{
1099 struct mpt_personality *pers;
1100 int ret;
1101 int retry_cnt = 0;
1102
1103 /*
1104 * Try a soft reset. If that fails, get out the big hammer.
1105 */
1106 again:
1107 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1108 int cnt;
1109 for (cnt = 0; cnt < 5; cnt++) {
1110 /* Failed; do a hard reset */
1111 mpt_hard_reset(mpt);
1112
1113 /*
1114 * Wait for the IOC to reload
1115 * and come out of reset state
1116 */
1117 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1118 if (ret == MPT_OK) {
1119 break;
1120 }
1121 /*
1122 * Okay- try to check again...
1123 */
1124 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1125 if (ret == MPT_OK) {
1126 break;
1127 }
1128 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1129 retry_cnt, cnt);
1130 }
1131 }
1132
1133 if (retry_cnt == 0) {
1134 /*
1135 * Invoke reset handlers. We bump the reset count so
1136 * that mpt_wait_req() understands that regardless of
1137 * the specified wait condition, it should stop its wait.
1138 */
1139 mpt->reset_cnt++;
1140 MPT_PERS_FOREACH(mpt, pers)
1141 pers->reset(mpt, ret);
1142 }
1143
1144 if (reinit) {
1145 ret = mpt_enable_ioc(mpt, 1);
1146 if (ret == MPT_OK) {
1147 mpt_enable_ints(mpt);
1148 }
1149 }
1150 if (ret != MPT_OK && retry_cnt++ < 2) {
1151 goto again;
1152 }
1153 return ret;
1154}
1155
1156/* Return a command buffer to the free queue */
1157void
1158mpt_free_request(struct mpt_softc *mpt, request_t *req)
1159{
1160 request_t *nxt;
1161 struct mpt_evtf_record *record;
1162 uint32_t reply_baddr;
1163
1164 if (req == NULL || req != &mpt->request_pool[req->index]) {
1165 panic("mpt_free_request bad req ptr\n");
1166 return;
1167 }
1168 if ((nxt = req->chain) != NULL) {
1169 req->chain = NULL;
1170 mpt_free_request(mpt, nxt); /* NB: recursion */
1171 }
1172 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1173 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
709 while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) {
710 request_t *req;
711 MSG_DEFAULT_REPLY *reply_frame;
712 uint32_t reply_baddr;
713 uint32_t ctxt_idx;
714 u_int cb_index;
715 u_int req_index;
716 int free_rf;
717
718 req = NULL;
719 reply_frame = NULL;
720 reply_baddr = 0;
721 if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) {
722 u_int offset;
723 /*
724 * Insure that the reply frame is coherent.
725 */
726 reply_baddr = MPT_REPLY_BADDR(reply_desc);
727 offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF);
728 bus_dmamap_sync_range(mpt->reply_dmat,
729 mpt->reply_dmap, offset, MPT_REPLY_SIZE,
730 BUS_DMASYNC_POSTREAD);
731 reply_frame = MPT_REPLY_OTOV(mpt, offset);
732 ctxt_idx = le32toh(reply_frame->MsgContext);
733 } else {
734 uint32_t type;
735
736 type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc);
737 ctxt_idx = reply_desc;
738 mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n",
739 reply_desc);
740
741 switch (type) {
742 case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT:
743 ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK;
744 break;
745 case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET:
746 ctxt_idx = GET_IO_INDEX(reply_desc);
747 if (mpt->tgt_cmd_ptrs == NULL) {
748 mpt_prt(mpt,
749 "mpt_intr: no target cmd ptrs\n");
750 reply_desc = MPT_REPLY_EMPTY;
751 break;
752 }
753 if (ctxt_idx >= mpt->tgt_cmds_allocated) {
754 mpt_prt(mpt,
755 "mpt_intr: bad tgt cmd ctxt %u\n",
756 ctxt_idx);
757 reply_desc = MPT_REPLY_EMPTY;
758 ntrips = 1000;
759 break;
760 }
761 req = mpt->tgt_cmd_ptrs[ctxt_idx];
762 if (req == NULL) {
763 mpt_prt(mpt, "no request backpointer "
764 "at index %u", ctxt_idx);
765 reply_desc = MPT_REPLY_EMPTY;
766 ntrips = 1000;
767 break;
768 }
769 /*
770 * Reformulate ctxt_idx to be just as if
771 * it were another type of context reply
772 * so the code below will find the request
773 * via indexing into the pool.
774 */
775 ctxt_idx =
776 req->index | mpt->scsi_tgt_handler_id;
777 req = NULL;
778 break;
779 case MPI_CONTEXT_REPLY_TYPE_LAN:
780 mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n",
781 reply_desc);
782 reply_desc = MPT_REPLY_EMPTY;
783 break;
784 default:
785 mpt_prt(mpt, "Context Reply 0x%08x?\n", type);
786 reply_desc = MPT_REPLY_EMPTY;
787 break;
788 }
789 if (reply_desc == MPT_REPLY_EMPTY) {
790 if (ntrips++ > 1000) {
791 break;
792 }
793 continue;
794 }
795 }
796
797 cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx);
798 req_index = MPT_CONTEXT_TO_REQI(ctxt_idx);
799 if (req_index < MPT_MAX_REQUESTS(mpt)) {
800 req = &mpt->request_pool[req_index];
801 } else {
802 mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc =="
803 " 0x%x)\n", req_index, reply_desc);
804 }
805
806 free_rf = mpt_reply_handlers[cb_index](mpt, req,
807 reply_desc, reply_frame);
808
809 if (reply_frame != NULL && free_rf) {
810 mpt_free_reply(mpt, reply_baddr);
811 }
812
813 /*
814 * If we got ourselves disabled, don't get stuck in a loop
815 */
816 if (mpt->disabled) {
817 mpt_disable_ints(mpt);
818 break;
819 }
820 if (ntrips++ > 1000) {
821 break;
822 }
823 }
824 mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n");
825}
826
827/******************************* Error Recovery *******************************/
828void
829mpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain,
830 u_int iocstatus)
831{
832 MSG_DEFAULT_REPLY ioc_status_frame;
833 request_t *req;
834
835 memset(&ioc_status_frame, 0, sizeof(ioc_status_frame));
836 ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4);
837 ioc_status_frame.IOCStatus = iocstatus;
838 while((req = TAILQ_FIRST(chain)) != NULL) {
839 MSG_REQUEST_HEADER *msg_hdr;
840 u_int cb_index;
841
842 TAILQ_REMOVE(chain, req, links);
843 msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf;
844 ioc_status_frame.Function = msg_hdr->Function;
845 ioc_status_frame.MsgContext = msg_hdr->MsgContext;
846 cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext));
847 mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext,
848 &ioc_status_frame);
849 }
850}
851
852/********************************* Diagnostics ********************************/
853/*
854 * Perform a diagnostic dump of a reply frame.
855 */
856void
857mpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame)
858{
859 mpt_prt(mpt, "Address Reply:\n");
860 mpt_print_reply(reply_frame);
861}
862
863/******************************* Doorbell Access ******************************/
864static __inline uint32_t mpt_rd_db(struct mpt_softc *mpt);
865static __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt);
866
867static __inline uint32_t
868mpt_rd_db(struct mpt_softc *mpt)
869{
870 return mpt_read(mpt, MPT_OFFSET_DOORBELL);
871}
872
873static __inline uint32_t
874mpt_rd_intr(struct mpt_softc *mpt)
875{
876 return mpt_read(mpt, MPT_OFFSET_INTR_STATUS);
877}
878
879/* Busy wait for a door bell to be read by IOC */
880static int
881mpt_wait_db_ack(struct mpt_softc *mpt)
882{
883 int i;
884 for (i=0; i < MPT_MAX_WAIT; i++) {
885 if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) {
886 maxwait_ack = i > maxwait_ack ? i : maxwait_ack;
887 return (MPT_OK);
888 }
889 DELAY(200);
890 }
891 return (MPT_FAIL);
892}
893
894/* Busy wait for a door bell interrupt */
895static int
896mpt_wait_db_int(struct mpt_softc *mpt)
897{
898 int i;
899 for (i = 0; i < MPT_MAX_WAIT; i++) {
900 if (MPT_DB_INTR(mpt_rd_intr(mpt))) {
901 maxwait_int = i > maxwait_int ? i : maxwait_int;
902 return MPT_OK;
903 }
904 DELAY(100);
905 }
906 return (MPT_FAIL);
907}
908
909/* Wait for IOC to transition to a give state */
910void
911mpt_check_doorbell(struct mpt_softc *mpt)
912{
913 uint32_t db = mpt_rd_db(mpt);
914 if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) {
915 mpt_prt(mpt, "Device not running\n");
916 mpt_print_db(db);
917 }
918}
919
920/* Wait for IOC to transition to a give state */
921static int
922mpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state)
923{
924 int i;
925
926 for (i = 0; i < MPT_MAX_WAIT; i++) {
927 uint32_t db = mpt_rd_db(mpt);
928 if (MPT_STATE(db) == state) {
929 maxwait_state = i > maxwait_state ? i : maxwait_state;
930 return (MPT_OK);
931 }
932 DELAY(100);
933 }
934 return (MPT_FAIL);
935}
936
937
938/************************* Intialization/Configuration ************************/
939static int mpt_download_fw(struct mpt_softc *mpt);
940
941/* Issue the reset COMMAND to the IOC */
942static int
943mpt_soft_reset(struct mpt_softc *mpt)
944{
945 mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n");
946
947 /* Have to use hard reset if we are not in Running state */
948 if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) {
949 mpt_prt(mpt, "soft reset failed: device not running\n");
950 return (MPT_FAIL);
951 }
952
953 /* If door bell is in use we don't have a chance of getting
954 * a word in since the IOC probably crashed in message
955 * processing. So don't waste our time.
956 */
957 if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) {
958 mpt_prt(mpt, "soft reset failed: doorbell wedged\n");
959 return (MPT_FAIL);
960 }
961
962 /* Send the reset request to the IOC */
963 mpt_write(mpt, MPT_OFFSET_DOORBELL,
964 MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT);
965 if (mpt_wait_db_ack(mpt) != MPT_OK) {
966 mpt_prt(mpt, "soft reset failed: ack timeout\n");
967 return (MPT_FAIL);
968 }
969
970 /* Wait for the IOC to reload and come out of reset state */
971 if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) {
972 mpt_prt(mpt, "soft reset failed: device did not restart\n");
973 return (MPT_FAIL);
974 }
975
976 return MPT_OK;
977}
978
979static int
980mpt_enable_diag_mode(struct mpt_softc *mpt)
981{
982 int try;
983
984 try = 20;
985 while (--try) {
986
987 if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0)
988 break;
989
990 /* Enable diagnostic registers */
991 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF);
992 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE);
993 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE);
994 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE);
995 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE);
996 mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE);
997
998 DELAY(100000);
999 }
1000 if (try == 0)
1001 return (EIO);
1002 return (0);
1003}
1004
1005static void
1006mpt_disable_diag_mode(struct mpt_softc *mpt)
1007{
1008 mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF);
1009}
1010
1011/* This is a magic diagnostic reset that resets all the ARM
1012 * processors in the chip.
1013 */
1014static void
1015mpt_hard_reset(struct mpt_softc *mpt)
1016{
1017 int error;
1018 int wait;
1019 uint32_t diagreg;
1020
1021 mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n");
1022
1023 error = mpt_enable_diag_mode(mpt);
1024 if (error) {
1025 mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n");
1026 mpt_prt(mpt, "Trying to reset anyway.\n");
1027 }
1028
1029 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1030
1031 /*
1032 * This appears to be a workaround required for some
1033 * firmware or hardware revs.
1034 */
1035 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM);
1036 DELAY(1000);
1037
1038 /* Diag. port is now active so we can now hit the reset bit */
1039 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER);
1040
1041 /*
1042 * Ensure that the reset has finished. We delay 1ms
1043 * prior to reading the register to make sure the chip
1044 * has sufficiently completed its reset to handle register
1045 * accesses.
1046 */
1047 wait = 5000;
1048 do {
1049 DELAY(1000);
1050 diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
1051 } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0);
1052
1053 if (wait == 0) {
1054 mpt_prt(mpt, "WARNING - Failed hard reset! "
1055 "Trying to initialize anyway.\n");
1056 }
1057
1058 /*
1059 * If we have firmware to download, it must be loaded before
1060 * the controller will become operational. Do so now.
1061 */
1062 if (mpt->fw_image != NULL) {
1063
1064 error = mpt_download_fw(mpt);
1065
1066 if (error) {
1067 mpt_prt(mpt, "WARNING - Firmware Download Failed!\n");
1068 mpt_prt(mpt, "Trying to initialize anyway.\n");
1069 }
1070 }
1071
1072 /*
1073 * Reseting the controller should have disabled write
1074 * access to the diagnostic registers, but disable
1075 * manually to be sure.
1076 */
1077 mpt_disable_diag_mode(mpt);
1078}
1079
1080static void
1081mpt_core_ioc_reset(struct mpt_softc *mpt, int type)
1082{
1083 /*
1084 * Complete all pending requests with a status
1085 * appropriate for an IOC reset.
1086 */
1087 mpt_complete_request_chain(mpt, &mpt->request_pending_list,
1088 MPI_IOCSTATUS_INVALID_STATE);
1089}
1090
1091
1092/*
1093 * Reset the IOC when needed. Try software command first then if needed
1094 * poke at the magic diagnostic reset. Note that a hard reset resets
1095 * *both* IOCs on dual function chips (FC929 && LSI1030) as well as
1096 * fouls up the PCI configuration registers.
1097 */
1098int
1099mpt_reset(struct mpt_softc *mpt, int reinit)
1100{
1101 struct mpt_personality *pers;
1102 int ret;
1103 int retry_cnt = 0;
1104
1105 /*
1106 * Try a soft reset. If that fails, get out the big hammer.
1107 */
1108 again:
1109 if ((ret = mpt_soft_reset(mpt)) != MPT_OK) {
1110 int cnt;
1111 for (cnt = 0; cnt < 5; cnt++) {
1112 /* Failed; do a hard reset */
1113 mpt_hard_reset(mpt);
1114
1115 /*
1116 * Wait for the IOC to reload
1117 * and come out of reset state
1118 */
1119 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1120 if (ret == MPT_OK) {
1121 break;
1122 }
1123 /*
1124 * Okay- try to check again...
1125 */
1126 ret = mpt_wait_state(mpt, MPT_DB_STATE_READY);
1127 if (ret == MPT_OK) {
1128 break;
1129 }
1130 mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n",
1131 retry_cnt, cnt);
1132 }
1133 }
1134
1135 if (retry_cnt == 0) {
1136 /*
1137 * Invoke reset handlers. We bump the reset count so
1138 * that mpt_wait_req() understands that regardless of
1139 * the specified wait condition, it should stop its wait.
1140 */
1141 mpt->reset_cnt++;
1142 MPT_PERS_FOREACH(mpt, pers)
1143 pers->reset(mpt, ret);
1144 }
1145
1146 if (reinit) {
1147 ret = mpt_enable_ioc(mpt, 1);
1148 if (ret == MPT_OK) {
1149 mpt_enable_ints(mpt);
1150 }
1151 }
1152 if (ret != MPT_OK && retry_cnt++ < 2) {
1153 goto again;
1154 }
1155 return ret;
1156}
1157
1158/* Return a command buffer to the free queue */
1159void
1160mpt_free_request(struct mpt_softc *mpt, request_t *req)
1161{
1162 request_t *nxt;
1163 struct mpt_evtf_record *record;
1164 uint32_t reply_baddr;
1165
1166 if (req == NULL || req != &mpt->request_pool[req->index]) {
1167 panic("mpt_free_request bad req ptr\n");
1168 return;
1169 }
1170 if ((nxt = req->chain) != NULL) {
1171 req->chain = NULL;
1172 mpt_free_request(mpt, nxt); /* NB: recursion */
1173 }
1174 KASSERT(req->state != REQ_STATE_FREE, ("freeing free request"));
1175 KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request"));
1174 KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n"));
1176 MPT_LOCK_ASSERT(mpt);
1175 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1176 ("mpt_free_request: req %p:%u func %x already on freelist",
1177 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1178 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1179 ("mpt_free_request: req %p:%u func %x on pending list",
1180 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1181#ifdef INVARIANTS
1182 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1183#endif
1184
1185 req->ccb = NULL;
1186 if (LIST_EMPTY(&mpt->ack_frames)) {
1187 /*
1188 * Insert free ones at the tail
1189 */
1190 req->serno = 0;
1191 req->state = REQ_STATE_FREE;
1192#ifdef INVARIANTS
1193 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1194#endif
1195 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1196 if (mpt->getreqwaiter != 0) {
1197 mpt->getreqwaiter = 0;
1198 wakeup(&mpt->request_free_list);
1199 }
1200 return;
1201 }
1202
1203 /*
1204 * Process an ack frame deferred due to resource shortage.
1205 */
1206 record = LIST_FIRST(&mpt->ack_frames);
1207 LIST_REMOVE(record, links);
1208 req->state = REQ_STATE_ALLOCATED;
1209 mpt_assign_serno(mpt, req);
1210 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1211 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1212 + (mpt->reply_phys & 0xFFFFFFFF);
1213 mpt_free_reply(mpt, reply_baddr);
1214}
1215
1216/* Get a command buffer from the free queue */
1217request_t *
1218mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1219{
1220 request_t *req;
1221
1222retry:
1177 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1178 ("mpt_free_request: req %p:%u func %x already on freelist",
1179 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1180 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1181 ("mpt_free_request: req %p:%u func %x on pending list",
1182 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1183#ifdef INVARIANTS
1184 mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__);
1185#endif
1186
1187 req->ccb = NULL;
1188 if (LIST_EMPTY(&mpt->ack_frames)) {
1189 /*
1190 * Insert free ones at the tail
1191 */
1192 req->serno = 0;
1193 req->state = REQ_STATE_FREE;
1194#ifdef INVARIANTS
1195 memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER));
1196#endif
1197 TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links);
1198 if (mpt->getreqwaiter != 0) {
1199 mpt->getreqwaiter = 0;
1200 wakeup(&mpt->request_free_list);
1201 }
1202 return;
1203 }
1204
1205 /*
1206 * Process an ack frame deferred due to resource shortage.
1207 */
1208 record = LIST_FIRST(&mpt->ack_frames);
1209 LIST_REMOVE(record, links);
1210 req->state = REQ_STATE_ALLOCATED;
1211 mpt_assign_serno(mpt, req);
1212 mpt_send_event_ack(mpt, req, &record->reply, record->context);
1213 reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply)
1214 + (mpt->reply_phys & 0xFFFFFFFF);
1215 mpt_free_reply(mpt, reply_baddr);
1216}
1217
1218/* Get a command buffer from the free queue */
1219request_t *
1220mpt_get_request(struct mpt_softc *mpt, int sleep_ok)
1221{
1222 request_t *req;
1223
1224retry:
1223 KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n"));
1225 MPT_LOCK_ASSERT(mpt);
1224 req = TAILQ_FIRST(&mpt->request_free_list);
1225 if (req != NULL) {
1226 KASSERT(req == &mpt->request_pool[req->index],
1227 ("mpt_get_request: corrupted request free list\n"));
1228 KASSERT(req->state == REQ_STATE_FREE,
1229 ("req %p:%u not free on free list %x index %d function %x",
1230 req, req->serno, req->state, req->index,
1231 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1232 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1233 req->state = REQ_STATE_ALLOCATED;
1234 req->chain = NULL;
1235 mpt_assign_serno(mpt, req);
1236 } else if (sleep_ok != 0) {
1237 mpt->getreqwaiter = 1;
1238 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1239 goto retry;
1240 }
1241 return (req);
1242}
1243
1244/* Pass the command to the IOC */
1245void
1246mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1247{
1248 if (mpt->verbose > MPT_PRT_DEBUG2) {
1249 mpt_dump_request(mpt, req);
1250 }
1251 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1252 BUS_DMASYNC_PREWRITE);
1253 req->state |= REQ_STATE_QUEUED;
1254 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1255 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1256 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1257 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1258 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1259 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1260 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1261 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1262}
1263
1264/*
1265 * Wait for a request to complete.
1266 *
1267 * Inputs:
1268 * mpt softc of controller executing request
1269 * req request to wait for
1270 * sleep_ok nonzero implies may sleep in this context
1271 * time_ms timeout in ms. 0 implies no timeout.
1272 *
1273 * Return Values:
1274 * 0 Request completed
1275 * non-0 Timeout fired before request completion.
1276 */
1277int
1278mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1279 mpt_req_state_t state, mpt_req_state_t mask,
1280 int sleep_ok, int time_ms)
1281{
1282 int error;
1283 int timeout;
1284 u_int saved_cnt;
1285
1286 /*
1287 * timeout is in ms. 0 indicates infinite wait.
1288 * Convert to ticks or 500us units depending on
1289 * our sleep mode.
1290 */
1291 if (sleep_ok != 0) {
1292 timeout = (time_ms * hz) / 1000;
1293 } else {
1294 timeout = time_ms * 2;
1295 }
1296 req->state |= REQ_STATE_NEED_WAKEUP;
1297 mask &= ~REQ_STATE_NEED_WAKEUP;
1298 saved_cnt = mpt->reset_cnt;
1299 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1300 if (sleep_ok != 0) {
1301 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1302 if (error == EWOULDBLOCK) {
1303 timeout = 0;
1304 break;
1305 }
1306 } else {
1307 if (time_ms != 0 && --timeout == 0) {
1308 break;
1309 }
1310 DELAY(500);
1311 mpt_intr(mpt);
1312 }
1313 }
1314 req->state &= ~REQ_STATE_NEED_WAKEUP;
1315 if (mpt->reset_cnt != saved_cnt) {
1316 return (EIO);
1317 }
1318 if (time_ms && timeout <= 0) {
1319 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1320 req->state |= REQ_STATE_TIMEDOUT;
1321 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1322 return (ETIMEDOUT);
1323 }
1324 return (0);
1325}
1326
1327/*
1328 * Send a command to the IOC via the handshake register.
1329 *
1330 * Only done at initialization time and for certain unusual
1331 * commands such as device/bus reset as specified by LSI.
1332 */
1333int
1334mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1335{
1336 int i;
1337 uint32_t data, *data32;
1338
1339 /* Check condition of the IOC */
1340 data = mpt_rd_db(mpt);
1341 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1342 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1343 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1344 || MPT_DB_IS_IN_USE(data)) {
1345 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1346 mpt_print_db(data);
1347 return (EBUSY);
1348 }
1349
1350 /* We move things in 32 bit chunks */
1351 len = (len + 3) >> 2;
1352 data32 = cmd;
1353
1354 /* Clear any left over pending doorbell interupts */
1355 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1356 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1357
1358 /*
1359 * Tell the handshake reg. we are going to send a command
1360 * and how long it is going to be.
1361 */
1362 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1363 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1364 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1365
1366 /* Wait for the chip to notice */
1367 if (mpt_wait_db_int(mpt) != MPT_OK) {
1368 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1369 return (ETIMEDOUT);
1370 }
1371
1372 /* Clear the interrupt */
1373 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1374
1375 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1376 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1377 return (ETIMEDOUT);
1378 }
1379
1380 /* Send the command */
1381 for (i = 0; i < len; i++) {
1382 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
1383 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1384 mpt_prt(mpt,
1385 "mpt_send_handshake_cmd: timeout @ index %d\n", i);
1386 return (ETIMEDOUT);
1387 }
1388 }
1389 return MPT_OK;
1390}
1391
1392/* Get the response from the handshake register */
1393int
1394mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1395{
1396 int left, reply_left;
1397 u_int16_t *data16;
1398 uint32_t data;
1399 MSG_DEFAULT_REPLY *hdr;
1400
1401 /* We move things out in 16 bit chunks */
1402 reply_len >>= 1;
1403 data16 = (u_int16_t *)reply;
1404
1405 hdr = (MSG_DEFAULT_REPLY *)reply;
1406
1407 /* Get first word */
1408 if (mpt_wait_db_int(mpt) != MPT_OK) {
1409 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1410 return ETIMEDOUT;
1411 }
1412 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1413 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1414 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1415
1416 /* Get Second Word */
1417 if (mpt_wait_db_int(mpt) != MPT_OK) {
1418 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1419 return ETIMEDOUT;
1420 }
1421 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1422 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1423 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1424
1425 /*
1426 * With the second word, we can now look at the length.
1427 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1428 */
1429 if ((reply_len >> 1) != hdr->MsgLength &&
1430 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1431#if __FreeBSD_version >= 500000
1432 mpt_prt(mpt, "reply length does not match message length: "
1433 "got %x; expected %zx for function %x\n",
1434 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1435#else
1436 mpt_prt(mpt, "reply length does not match message length: "
1437 "got %x; expected %x for function %x\n",
1438 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1439#endif
1440 }
1441
1442 /* Get rest of the reply; but don't overflow the provided buffer */
1443 left = (hdr->MsgLength << 1) - 2;
1444 reply_left = reply_len - 2;
1445 while (left--) {
1446 u_int16_t datum;
1447
1448 if (mpt_wait_db_int(mpt) != MPT_OK) {
1449 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1450 return ETIMEDOUT;
1451 }
1452 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1453 datum = le16toh(data & MPT_DB_DATA_MASK);
1454
1455 if (reply_left-- > 0)
1456 *data16++ = datum;
1457
1458 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1459 }
1460
1461 /* One more wait & clear at the end */
1462 if (mpt_wait_db_int(mpt) != MPT_OK) {
1463 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1464 return ETIMEDOUT;
1465 }
1466 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1467
1468 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1469 if (mpt->verbose >= MPT_PRT_TRACE)
1470 mpt_print_reply(hdr);
1471 return (MPT_FAIL | hdr->IOCStatus);
1472 }
1473
1474 return (0);
1475}
1476
1477static int
1478mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1479{
1480 MSG_IOC_FACTS f_req;
1481 int error;
1482
1483 memset(&f_req, 0, sizeof f_req);
1484 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1485 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1486 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1487 if (error) {
1488 return(error);
1489 }
1490 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1491 return (error);
1492}
1493
1494static int
1495mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1496{
1497 MSG_PORT_FACTS f_req;
1498 int error;
1499
1500 memset(&f_req, 0, sizeof f_req);
1501 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1502 f_req.PortNumber = port;
1503 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1504 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1505 if (error) {
1506 return(error);
1507 }
1508 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1509 return (error);
1510}
1511
1512/*
1513 * Send the initialization request. This is where we specify how many
1514 * SCSI busses and how many devices per bus we wish to emulate.
1515 * This is also the command that specifies the max size of the reply
1516 * frames from the IOC that we will be allocating.
1517 */
1518static int
1519mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1520{
1521 int error = 0;
1522 MSG_IOC_INIT init;
1523 MSG_IOC_INIT_REPLY reply;
1524
1525 memset(&init, 0, sizeof init);
1526 init.WhoInit = who;
1527 init.Function = MPI_FUNCTION_IOC_INIT;
1528 init.MaxDevices = 0; /* at least 256 devices per bus */
1529 init.MaxBuses = 16; /* at least 16 busses */
1530
1531 init.MsgVersion = htole16(MPI_VERSION);
1532 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1533 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1534 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1535
1536 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1537 return(error);
1538 }
1539
1540 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1541 return (error);
1542}
1543
1544
1545/*
1546 * Utiltity routine to read configuration headers and pages
1547 */
1548int
1549mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action,
1550 u_int PageVersion, u_int PageLength, u_int PageNumber,
1551 u_int PageType, uint32_t PageAddress, bus_addr_t addr,
1552 bus_size_t len, int sleep_ok, int timeout_ms)
1553{
1554 MSG_CONFIG *cfgp;
1555 SGE_SIMPLE32 *se;
1556
1557 cfgp = req->req_vbuf;
1558 memset(cfgp, 0, sizeof *cfgp);
1559 cfgp->Action = Action;
1560 cfgp->Function = MPI_FUNCTION_CONFIG;
1561 cfgp->Header.PageVersion = PageVersion;
1562 cfgp->Header.PageLength = PageLength;
1563 cfgp->Header.PageNumber = PageNumber;
1564 cfgp->Header.PageType = PageType;
1565 cfgp->PageAddress = htole32(PageAddress);
1566 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1567 se->Address = htole32(addr);
1568 MPI_pSGE_SET_LENGTH(se, len);
1569 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1570 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1571 MPI_SGE_FLAGS_END_OF_LIST |
1572 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1573 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1574 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1575 se->FlagsLength = htole32(se->FlagsLength);
1576 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1577
1578 mpt_check_doorbell(mpt);
1579 mpt_send_cmd(mpt, req);
1580 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1581 sleep_ok, timeout_ms));
1582}
1583
1584
1585int
1586mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1587 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1588 int sleep_ok, int timeout_ms)
1589{
1590 request_t *req;
1591 MSG_CONFIG *cfgp;
1592 int error;
1593
1594 req = mpt_get_request(mpt, sleep_ok);
1595 if (req == NULL) {
1596 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1597 return (ENOMEM);
1598 }
1599
1600 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER,
1601 /*PageVersion*/0, /*PageLength*/0, PageNumber,
1602 PageType, PageAddress, /*addr*/0, /*len*/0,
1603 sleep_ok, timeout_ms);
1604 if (error != 0) {
1605 /*
1606 * Leave the request. Without resetting the chip, it's
1607 * still owned by it and we'll just get into trouble
1608 * freeing it now. Mark it as abandoned so that if it
1609 * shows up later it can be freed.
1610 */
1611 mpt_prt(mpt, "read_cfg_header timed out\n");
1612 return (ETIMEDOUT);
1613 }
1614
1615 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1616 case MPI_IOCSTATUS_SUCCESS:
1617 cfgp = req->req_vbuf;
1618 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1619 error = 0;
1620 break;
1621 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1622 mpt_lprt(mpt, MPT_PRT_DEBUG,
1623 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1624 PageType, PageNumber, PageAddress);
1625 error = EINVAL;
1626 break;
1627 default:
1628 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1629 req->IOCStatus);
1630 error = EIO;
1631 break;
1632 }
1633 mpt_free_request(mpt, req);
1634 return (error);
1635}
1636
1637int
1638mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1639 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1640 int timeout_ms)
1641{
1642 request_t *req;
1643 int error;
1644
1645 req = mpt_get_request(mpt, sleep_ok);
1646 if (req == NULL) {
1647 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1648 return (-1);
1649 }
1650
1651 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1652 hdr->PageLength, hdr->PageNumber,
1653 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1654 PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1655 len, sleep_ok, timeout_ms);
1656 if (error != 0) {
1657 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1658 return (-1);
1659 }
1660
1661 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1662 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1663 req->IOCStatus);
1664 mpt_free_request(mpt, req);
1665 return (-1);
1666 }
1667 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1668 BUS_DMASYNC_POSTREAD);
1669 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1670 mpt_free_request(mpt, req);
1671 return (0);
1672}
1673
1674int
1675mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1676 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1677 int timeout_ms)
1678{
1679 request_t *req;
1680 u_int hdr_attr;
1681 int error;
1682
1683 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1684 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1685 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1686 mpt_prt(mpt, "page type 0x%x not changeable\n",
1687 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1688 return (-1);
1689 }
1690
1691#if 0
1692 /*
1693 * We shouldn't mask off other bits here.
1694 */
1695 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1696#endif
1697
1698 req = mpt_get_request(mpt, sleep_ok);
1699 if (req == NULL)
1700 return (-1);
1701
1702 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1703
1704 /*
1705 * There isn't any point in restoring stripped out attributes
1706 * if you then mask them going down to issue the request.
1707 */
1708
1709#if 0
1710 /* Restore stripped out attributes */
1711 hdr->PageType |= hdr_attr;
1712
1713 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1714 hdr->PageLength, hdr->PageNumber,
1715 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1716 PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1717 len, sleep_ok, timeout_ms);
1718#else
1719 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1720 hdr->PageLength, hdr->PageNumber,
1721 hdr->PageType, PageAddress,
1722 req->req_pbuf + MPT_RQSL(mpt),
1723 len, sleep_ok, timeout_ms);
1724#endif
1725 if (error != 0) {
1726 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1727 return (-1);
1728 }
1729
1730 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1731 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1732 req->IOCStatus);
1733 mpt_free_request(mpt, req);
1734 return (-1);
1735 }
1736 mpt_free_request(mpt, req);
1737 return (0);
1738}
1739
1740/*
1741 * Read IOC configuration information
1742 */
1743static int
1744mpt_read_config_info_ioc(struct mpt_softc *mpt)
1745{
1746 CONFIG_PAGE_HEADER hdr;
1747 struct mpt_raid_volume *mpt_raid;
1748 int rv;
1749 int i;
1750 size_t len;
1751
1752 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1753 2, 0, &hdr, FALSE, 5000);
1754 /*
1755 * If it's an invalid page, so what? Not a supported function....
1756 */
1757 if (rv == EINVAL) {
1758 return (0);
1759 }
1760 if (rv) {
1761 return (rv);
1762 }
1763
1764 mpt_lprt(mpt, MPT_PRT_DEBUG,
1765 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1766 hdr.PageVersion, hdr.PageLength << 2,
1767 hdr.PageNumber, hdr.PageType);
1768
1769 len = hdr.PageLength * sizeof(uint32_t);
1770 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1771 if (mpt->ioc_page2 == NULL) {
1772 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1773 mpt_raid_free_mem(mpt);
1774 return (ENOMEM);
1775 }
1776 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1777 rv = mpt_read_cur_cfg_page(mpt, 0,
1778 &mpt->ioc_page2->Header, len, FALSE, 5000);
1779 if (rv) {
1780 mpt_prt(mpt, "failed to read IOC Page 2\n");
1781 mpt_raid_free_mem(mpt);
1782 return (EIO);
1783 }
1784 mpt2host_config_page_ioc2(mpt->ioc_page2);
1785
1786 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1787 uint32_t mask;
1788
1789 mpt_prt(mpt, "Capabilities: (");
1790 for (mask = 1; mask != 0; mask <<= 1) {
1791 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1792 continue;
1793 }
1794 switch (mask) {
1795 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1796 mpt_prtc(mpt, " RAID-0");
1797 break;
1798 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1799 mpt_prtc(mpt, " RAID-1E");
1800 break;
1801 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1802 mpt_prtc(mpt, " RAID-1");
1803 break;
1804 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1805 mpt_prtc(mpt, " SES");
1806 break;
1807 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1808 mpt_prtc(mpt, " SAFTE");
1809 break;
1810 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1811 mpt_prtc(mpt, " Multi-Channel-Arrays");
1812 default:
1813 break;
1814 }
1815 }
1816 mpt_prtc(mpt, " )\n");
1817 if ((mpt->ioc_page2->CapabilitiesFlags
1818 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1819 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1820 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1821 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1822 mpt->ioc_page2->NumActiveVolumes,
1823 mpt->ioc_page2->NumActiveVolumes != 1
1824 ? "s " : " ",
1825 mpt->ioc_page2->MaxVolumes);
1826 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1827 mpt->ioc_page2->NumActivePhysDisks,
1828 mpt->ioc_page2->NumActivePhysDisks != 1
1829 ? "s " : " ",
1830 mpt->ioc_page2->MaxPhysDisks);
1831 }
1832 }
1833
1834 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1835 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1836 if (mpt->raid_volumes == NULL) {
1837 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1838 mpt_raid_free_mem(mpt);
1839 return (ENOMEM);
1840 }
1841
1842 /*
1843 * Copy critical data out of ioc_page2 so that we can
1844 * safely refresh the page without windows of unreliable
1845 * data.
1846 */
1847 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
1848
1849 len = sizeof(*mpt->raid_volumes->config_page) +
1850 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
1851 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1852 mpt_raid = &mpt->raid_volumes[i];
1853 mpt_raid->config_page =
1854 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1855 if (mpt_raid->config_page == NULL) {
1856 mpt_prt(mpt, "Could not allocate RAID page data\n");
1857 mpt_raid_free_mem(mpt);
1858 return (ENOMEM);
1859 }
1860 }
1861 mpt->raid_page0_len = len;
1862
1863 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1864 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1865 if (mpt->raid_disks == NULL) {
1866 mpt_prt(mpt, "Could not allocate RAID disk data\n");
1867 mpt_raid_free_mem(mpt);
1868 return (ENOMEM);
1869 }
1870 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
1871
1872 /*
1873 * Load page 3.
1874 */
1875 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1876 3, 0, &hdr, FALSE, 5000);
1877 if (rv) {
1878 mpt_raid_free_mem(mpt);
1879 return (EIO);
1880 }
1881
1882 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
1883 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
1884
1885 len = hdr.PageLength * sizeof(uint32_t);
1886 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1887 if (mpt->ioc_page3 == NULL) {
1888 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
1889 mpt_raid_free_mem(mpt);
1890 return (ENOMEM);
1891 }
1892 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
1893 rv = mpt_read_cur_cfg_page(mpt, 0,
1894 &mpt->ioc_page3->Header, len, FALSE, 5000);
1895 if (rv) {
1896 mpt_raid_free_mem(mpt);
1897 return (EIO);
1898 }
1899 mpt_raid_wakeup(mpt);
1900 return (0);
1901}
1902
1903/*
1904 * Enable IOC port
1905 */
1906static int
1907mpt_send_port_enable(struct mpt_softc *mpt, int port)
1908{
1909 request_t *req;
1910 MSG_PORT_ENABLE *enable_req;
1911 int error;
1912
1913 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1914 if (req == NULL)
1915 return (-1);
1916
1917 enable_req = req->req_vbuf;
1918 memset(enable_req, 0, MPT_RQSL(mpt));
1919
1920 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
1921 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1922 enable_req->PortNumber = port;
1923
1924 mpt_check_doorbell(mpt);
1925 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1926
1927 mpt_send_cmd(mpt, req);
1928 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1929 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
1930 if (error != 0) {
1931 mpt_prt(mpt, "port %d enable timed out\n", port);
1932 return (-1);
1933 }
1934 mpt_free_request(mpt, req);
1935 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
1936 return (0);
1937}
1938
1939/*
1940 * Enable/Disable asynchronous event reporting.
1941 */
1942static int
1943mpt_send_event_request(struct mpt_softc *mpt, int onoff)
1944{
1945 request_t *req;
1946 MSG_EVENT_NOTIFY *enable_req;
1947
1948 req = mpt_get_request(mpt, FALSE);
1949 if (req == NULL) {
1950 return (ENOMEM);
1951 }
1952 enable_req = req->req_vbuf;
1953 memset(enable_req, 0, sizeof *enable_req);
1954
1955 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
1956 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
1957 enable_req->Switch = onoff;
1958
1959 mpt_check_doorbell(mpt);
1960 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
1961 onoff ? "en" : "dis");
1962 /*
1963 * Send the command off, but don't wait for it.
1964 */
1965 mpt_send_cmd(mpt, req);
1966 return (0);
1967}
1968
1969/*
1970 * Un-mask the interupts on the chip.
1971 */
1972void
1973mpt_enable_ints(struct mpt_softc *mpt)
1974{
1975 /* Unmask every thing except door bell int */
1976 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
1977}
1978
1979/*
1980 * Mask the interupts on the chip.
1981 */
1982void
1983mpt_disable_ints(struct mpt_softc *mpt)
1984{
1985 /* Mask all interrupts */
1986 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1987 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1988}
1989
1990static void
1991mpt_sysctl_attach(struct mpt_softc *mpt)
1992{
1993#if __FreeBSD_version >= 500000
1994 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1995 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1996
1997 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
1998 "debug", CTLFLAG_RW, &mpt->verbose, 0,
1999 "Debugging/Verbose level");
2000 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2001 "role", CTLFLAG_RD, &mpt->role, 0,
2002 "HBA role");
2003#ifdef MPT_TEST_MULTIPATH
2004 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2005 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2006 "Next Target to Fail");
2007#endif
2008#endif
2009}
2010
2011int
2012mpt_attach(struct mpt_softc *mpt)
2013{
2014 struct mpt_personality *pers;
2015 int i;
2016 int error;
2017
2018 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2019 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2020 pers = mpt_personalities[i];
2021 if (pers == NULL) {
2022 continue;
2023 }
2024 if (pers->probe(mpt) == 0) {
2025 error = pers->attach(mpt);
2026 if (error != 0) {
2027 mpt_detach(mpt);
2028 return (error);
2029 }
2030 mpt->mpt_pers_mask |= (0x1 << pers->id);
2031 pers->use_count++;
2032 }
2033 }
2034
2035 /*
2036 * Now that we've attached everything, do the enable function
2037 * for all of the personalities. This allows the personalities
2038 * to do setups that are appropriate for them prior to enabling
2039 * any ports.
2040 */
2041 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2042 pers = mpt_personalities[i];
2043 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2044 error = pers->enable(mpt);
2045 if (error != 0) {
2046 mpt_prt(mpt, "personality %s attached but would"
2047 " not enable (%d)\n", pers->name, error);
2048 mpt_detach(mpt);
2049 return (error);
2050 }
2051 }
2052 }
2053 return (0);
2054}
2055
2056int
2057mpt_shutdown(struct mpt_softc *mpt)
2058{
2059 struct mpt_personality *pers;
2060
2061 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2062 pers->shutdown(mpt);
2063 }
2064 return (0);
2065}
2066
2067int
2068mpt_detach(struct mpt_softc *mpt)
2069{
2070 struct mpt_personality *pers;
2071
2072 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2073 pers->detach(mpt);
2074 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2075 pers->use_count--;
2076 }
2077 TAILQ_REMOVE(&mpt_tailq, mpt, links);
2078 return (0);
2079}
2080
2081int
2082mpt_core_load(struct mpt_personality *pers)
2083{
2084 int i;
2085
2086 /*
2087 * Setup core handlers and insert the default handler
2088 * into all "empty slots".
2089 */
2090 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2091 mpt_reply_handlers[i] = mpt_default_reply_handler;
2092 }
2093
2094 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2095 mpt_event_reply_handler;
2096 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2097 mpt_config_reply_handler;
2098 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2099 mpt_handshake_reply_handler;
2100 return (0);
2101}
2102
2103/*
2104 * Initialize per-instance driver data and perform
2105 * initial controller configuration.
2106 */
2107int
2108mpt_core_attach(struct mpt_softc *mpt)
2109{
1226 req = TAILQ_FIRST(&mpt->request_free_list);
1227 if (req != NULL) {
1228 KASSERT(req == &mpt->request_pool[req->index],
1229 ("mpt_get_request: corrupted request free list\n"));
1230 KASSERT(req->state == REQ_STATE_FREE,
1231 ("req %p:%u not free on free list %x index %d function %x",
1232 req, req->serno, req->state, req->index,
1233 ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1234 TAILQ_REMOVE(&mpt->request_free_list, req, links);
1235 req->state = REQ_STATE_ALLOCATED;
1236 req->chain = NULL;
1237 mpt_assign_serno(mpt, req);
1238 } else if (sleep_ok != 0) {
1239 mpt->getreqwaiter = 1;
1240 mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0);
1241 goto retry;
1242 }
1243 return (req);
1244}
1245
1246/* Pass the command to the IOC */
1247void
1248mpt_send_cmd(struct mpt_softc *mpt, request_t *req)
1249{
1250 if (mpt->verbose > MPT_PRT_DEBUG2) {
1251 mpt_dump_request(mpt, req);
1252 }
1253 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1254 BUS_DMASYNC_PREWRITE);
1255 req->state |= REQ_STATE_QUEUED;
1256 KASSERT(mpt_req_on_free_list(mpt, req) == 0,
1257 ("req %p:%u func %x on freelist list in mpt_send_cmd",
1258 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1259 KASSERT(mpt_req_on_pending_list(mpt, req) == 0,
1260 ("req %p:%u func %x already on pending list in mpt_send_cmd",
1261 req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function));
1262 TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links);
1263 mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf);
1264}
1265
1266/*
1267 * Wait for a request to complete.
1268 *
1269 * Inputs:
1270 * mpt softc of controller executing request
1271 * req request to wait for
1272 * sleep_ok nonzero implies may sleep in this context
1273 * time_ms timeout in ms. 0 implies no timeout.
1274 *
1275 * Return Values:
1276 * 0 Request completed
1277 * non-0 Timeout fired before request completion.
1278 */
1279int
1280mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1281 mpt_req_state_t state, mpt_req_state_t mask,
1282 int sleep_ok, int time_ms)
1283{
1284 int error;
1285 int timeout;
1286 u_int saved_cnt;
1287
1288 /*
1289 * timeout is in ms. 0 indicates infinite wait.
1290 * Convert to ticks or 500us units depending on
1291 * our sleep mode.
1292 */
1293 if (sleep_ok != 0) {
1294 timeout = (time_ms * hz) / 1000;
1295 } else {
1296 timeout = time_ms * 2;
1297 }
1298 req->state |= REQ_STATE_NEED_WAKEUP;
1299 mask &= ~REQ_STATE_NEED_WAKEUP;
1300 saved_cnt = mpt->reset_cnt;
1301 while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) {
1302 if (sleep_ok != 0) {
1303 error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout);
1304 if (error == EWOULDBLOCK) {
1305 timeout = 0;
1306 break;
1307 }
1308 } else {
1309 if (time_ms != 0 && --timeout == 0) {
1310 break;
1311 }
1312 DELAY(500);
1313 mpt_intr(mpt);
1314 }
1315 }
1316 req->state &= ~REQ_STATE_NEED_WAKEUP;
1317 if (mpt->reset_cnt != saved_cnt) {
1318 return (EIO);
1319 }
1320 if (time_ms && timeout <= 0) {
1321 MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf;
1322 req->state |= REQ_STATE_TIMEDOUT;
1323 mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function);
1324 return (ETIMEDOUT);
1325 }
1326 return (0);
1327}
1328
1329/*
1330 * Send a command to the IOC via the handshake register.
1331 *
1332 * Only done at initialization time and for certain unusual
1333 * commands such as device/bus reset as specified by LSI.
1334 */
1335int
1336mpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd)
1337{
1338 int i;
1339 uint32_t data, *data32;
1340
1341 /* Check condition of the IOC */
1342 data = mpt_rd_db(mpt);
1343 if ((MPT_STATE(data) != MPT_DB_STATE_READY
1344 && MPT_STATE(data) != MPT_DB_STATE_RUNNING
1345 && MPT_STATE(data) != MPT_DB_STATE_FAULT)
1346 || MPT_DB_IS_IN_USE(data)) {
1347 mpt_prt(mpt, "handshake aborted - invalid doorbell state\n");
1348 mpt_print_db(data);
1349 return (EBUSY);
1350 }
1351
1352 /* We move things in 32 bit chunks */
1353 len = (len + 3) >> 2;
1354 data32 = cmd;
1355
1356 /* Clear any left over pending doorbell interupts */
1357 if (MPT_DB_INTR(mpt_rd_intr(mpt)))
1358 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1359
1360 /*
1361 * Tell the handshake reg. we are going to send a command
1362 * and how long it is going to be.
1363 */
1364 data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) |
1365 (len << MPI_DOORBELL_ADD_DWORDS_SHIFT);
1366 mpt_write(mpt, MPT_OFFSET_DOORBELL, data);
1367
1368 /* Wait for the chip to notice */
1369 if (mpt_wait_db_int(mpt) != MPT_OK) {
1370 mpt_prt(mpt, "mpt_send_handshake_cmd: db ignored\n");
1371 return (ETIMEDOUT);
1372 }
1373
1374 /* Clear the interrupt */
1375 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1376
1377 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1378 mpt_prt(mpt, "mpt_send_handshake_cmd: db ack timed out\n");
1379 return (ETIMEDOUT);
1380 }
1381
1382 /* Send the command */
1383 for (i = 0; i < len; i++) {
1384 mpt_write(mpt, MPT_OFFSET_DOORBELL, htole32(*data32++));
1385 if (mpt_wait_db_ack(mpt) != MPT_OK) {
1386 mpt_prt(mpt,
1387 "mpt_send_handshake_cmd: timeout @ index %d\n", i);
1388 return (ETIMEDOUT);
1389 }
1390 }
1391 return MPT_OK;
1392}
1393
1394/* Get the response from the handshake register */
1395int
1396mpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply)
1397{
1398 int left, reply_left;
1399 u_int16_t *data16;
1400 uint32_t data;
1401 MSG_DEFAULT_REPLY *hdr;
1402
1403 /* We move things out in 16 bit chunks */
1404 reply_len >>= 1;
1405 data16 = (u_int16_t *)reply;
1406
1407 hdr = (MSG_DEFAULT_REPLY *)reply;
1408
1409 /* Get first word */
1410 if (mpt_wait_db_int(mpt) != MPT_OK) {
1411 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n");
1412 return ETIMEDOUT;
1413 }
1414 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1415 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1416 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1417
1418 /* Get Second Word */
1419 if (mpt_wait_db_int(mpt) != MPT_OK) {
1420 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n");
1421 return ETIMEDOUT;
1422 }
1423 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1424 *data16++ = le16toh(data & MPT_DB_DATA_MASK);
1425 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1426
1427 /*
1428 * With the second word, we can now look at the length.
1429 * Warn about a reply that's too short (except for IOC FACTS REPLY)
1430 */
1431 if ((reply_len >> 1) != hdr->MsgLength &&
1432 (hdr->Function != MPI_FUNCTION_IOC_FACTS)){
1433#if __FreeBSD_version >= 500000
1434 mpt_prt(mpt, "reply length does not match message length: "
1435 "got %x; expected %zx for function %x\n",
1436 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1437#else
1438 mpt_prt(mpt, "reply length does not match message length: "
1439 "got %x; expected %x for function %x\n",
1440 hdr->MsgLength << 2, reply_len << 1, hdr->Function);
1441#endif
1442 }
1443
1444 /* Get rest of the reply; but don't overflow the provided buffer */
1445 left = (hdr->MsgLength << 1) - 2;
1446 reply_left = reply_len - 2;
1447 while (left--) {
1448 u_int16_t datum;
1449
1450 if (mpt_wait_db_int(mpt) != MPT_OK) {
1451 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n");
1452 return ETIMEDOUT;
1453 }
1454 data = mpt_read(mpt, MPT_OFFSET_DOORBELL);
1455 datum = le16toh(data & MPT_DB_DATA_MASK);
1456
1457 if (reply_left-- > 0)
1458 *data16++ = datum;
1459
1460 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1461 }
1462
1463 /* One more wait & clear at the end */
1464 if (mpt_wait_db_int(mpt) != MPT_OK) {
1465 mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n");
1466 return ETIMEDOUT;
1467 }
1468 mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0);
1469
1470 if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1471 if (mpt->verbose >= MPT_PRT_TRACE)
1472 mpt_print_reply(hdr);
1473 return (MPT_FAIL | hdr->IOCStatus);
1474 }
1475
1476 return (0);
1477}
1478
1479static int
1480mpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp)
1481{
1482 MSG_IOC_FACTS f_req;
1483 int error;
1484
1485 memset(&f_req, 0, sizeof f_req);
1486 f_req.Function = MPI_FUNCTION_IOC_FACTS;
1487 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1488 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1489 if (error) {
1490 return(error);
1491 }
1492 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1493 return (error);
1494}
1495
1496static int
1497mpt_get_portfacts(struct mpt_softc *mpt, U8 port, MSG_PORT_FACTS_REPLY *freplp)
1498{
1499 MSG_PORT_FACTS f_req;
1500 int error;
1501
1502 memset(&f_req, 0, sizeof f_req);
1503 f_req.Function = MPI_FUNCTION_PORT_FACTS;
1504 f_req.PortNumber = port;
1505 f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1506 error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req);
1507 if (error) {
1508 return(error);
1509 }
1510 error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp);
1511 return (error);
1512}
1513
1514/*
1515 * Send the initialization request. This is where we specify how many
1516 * SCSI busses and how many devices per bus we wish to emulate.
1517 * This is also the command that specifies the max size of the reply
1518 * frames from the IOC that we will be allocating.
1519 */
1520static int
1521mpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who)
1522{
1523 int error = 0;
1524 MSG_IOC_INIT init;
1525 MSG_IOC_INIT_REPLY reply;
1526
1527 memset(&init, 0, sizeof init);
1528 init.WhoInit = who;
1529 init.Function = MPI_FUNCTION_IOC_INIT;
1530 init.MaxDevices = 0; /* at least 256 devices per bus */
1531 init.MaxBuses = 16; /* at least 16 busses */
1532
1533 init.MsgVersion = htole16(MPI_VERSION);
1534 init.HeaderVersion = htole16(MPI_HEADER_VERSION);
1535 init.ReplyFrameSize = htole16(MPT_REPLY_SIZE);
1536 init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
1537
1538 if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) {
1539 return(error);
1540 }
1541
1542 error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply);
1543 return (error);
1544}
1545
1546
1547/*
1548 * Utiltity routine to read configuration headers and pages
1549 */
1550int
1551mpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action,
1552 u_int PageVersion, u_int PageLength, u_int PageNumber,
1553 u_int PageType, uint32_t PageAddress, bus_addr_t addr,
1554 bus_size_t len, int sleep_ok, int timeout_ms)
1555{
1556 MSG_CONFIG *cfgp;
1557 SGE_SIMPLE32 *se;
1558
1559 cfgp = req->req_vbuf;
1560 memset(cfgp, 0, sizeof *cfgp);
1561 cfgp->Action = Action;
1562 cfgp->Function = MPI_FUNCTION_CONFIG;
1563 cfgp->Header.PageVersion = PageVersion;
1564 cfgp->Header.PageLength = PageLength;
1565 cfgp->Header.PageNumber = PageNumber;
1566 cfgp->Header.PageType = PageType;
1567 cfgp->PageAddress = htole32(PageAddress);
1568 se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE;
1569 se->Address = htole32(addr);
1570 MPI_pSGE_SET_LENGTH(se, len);
1571 MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
1572 MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1573 MPI_SGE_FLAGS_END_OF_LIST |
1574 ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT
1575 || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM)
1576 ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST)));
1577 se->FlagsLength = htole32(se->FlagsLength);
1578 cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1579
1580 mpt_check_doorbell(mpt);
1581 mpt_send_cmd(mpt, req);
1582 return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1583 sleep_ok, timeout_ms));
1584}
1585
1586
1587int
1588mpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber,
1589 uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt,
1590 int sleep_ok, int timeout_ms)
1591{
1592 request_t *req;
1593 MSG_CONFIG *cfgp;
1594 int error;
1595
1596 req = mpt_get_request(mpt, sleep_ok);
1597 if (req == NULL) {
1598 mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n");
1599 return (ENOMEM);
1600 }
1601
1602 error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER,
1603 /*PageVersion*/0, /*PageLength*/0, PageNumber,
1604 PageType, PageAddress, /*addr*/0, /*len*/0,
1605 sleep_ok, timeout_ms);
1606 if (error != 0) {
1607 /*
1608 * Leave the request. Without resetting the chip, it's
1609 * still owned by it and we'll just get into trouble
1610 * freeing it now. Mark it as abandoned so that if it
1611 * shows up later it can be freed.
1612 */
1613 mpt_prt(mpt, "read_cfg_header timed out\n");
1614 return (ETIMEDOUT);
1615 }
1616
1617 switch (req->IOCStatus & MPI_IOCSTATUS_MASK) {
1618 case MPI_IOCSTATUS_SUCCESS:
1619 cfgp = req->req_vbuf;
1620 bcopy(&cfgp->Header, rslt, sizeof(*rslt));
1621 error = 0;
1622 break;
1623 case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:
1624 mpt_lprt(mpt, MPT_PRT_DEBUG,
1625 "Invalid Page Type %d Number %d Addr 0x%0x\n",
1626 PageType, PageNumber, PageAddress);
1627 error = EINVAL;
1628 break;
1629 default:
1630 mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n",
1631 req->IOCStatus);
1632 error = EIO;
1633 break;
1634 }
1635 mpt_free_request(mpt, req);
1636 return (error);
1637}
1638
1639int
1640mpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1641 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1642 int timeout_ms)
1643{
1644 request_t *req;
1645 int error;
1646
1647 req = mpt_get_request(mpt, sleep_ok);
1648 if (req == NULL) {
1649 mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n");
1650 return (-1);
1651 }
1652
1653 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1654 hdr->PageLength, hdr->PageNumber,
1655 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1656 PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1657 len, sleep_ok, timeout_ms);
1658 if (error != 0) {
1659 mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action);
1660 return (-1);
1661 }
1662
1663 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1664 mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n",
1665 req->IOCStatus);
1666 mpt_free_request(mpt, req);
1667 return (-1);
1668 }
1669 bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap,
1670 BUS_DMASYNC_POSTREAD);
1671 memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len);
1672 mpt_free_request(mpt, req);
1673 return (0);
1674}
1675
1676int
1677mpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress,
1678 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1679 int timeout_ms)
1680{
1681 request_t *req;
1682 u_int hdr_attr;
1683 int error;
1684
1685 hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK;
1686 if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE &&
1687 hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) {
1688 mpt_prt(mpt, "page type 0x%x not changeable\n",
1689 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
1690 return (-1);
1691 }
1692
1693#if 0
1694 /*
1695 * We shouldn't mask off other bits here.
1696 */
1697 hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK;
1698#endif
1699
1700 req = mpt_get_request(mpt, sleep_ok);
1701 if (req == NULL)
1702 return (-1);
1703
1704 memcpy(((caddr_t)req->req_vbuf) + MPT_RQSL(mpt), hdr, len);
1705
1706 /*
1707 * There isn't any point in restoring stripped out attributes
1708 * if you then mask them going down to issue the request.
1709 */
1710
1711#if 0
1712 /* Restore stripped out attributes */
1713 hdr->PageType |= hdr_attr;
1714
1715 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1716 hdr->PageLength, hdr->PageNumber,
1717 hdr->PageType & MPI_CONFIG_PAGETYPE_MASK,
1718 PageAddress, req->req_pbuf + MPT_RQSL(mpt),
1719 len, sleep_ok, timeout_ms);
1720#else
1721 error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion,
1722 hdr->PageLength, hdr->PageNumber,
1723 hdr->PageType, PageAddress,
1724 req->req_pbuf + MPT_RQSL(mpt),
1725 len, sleep_ok, timeout_ms);
1726#endif
1727 if (error != 0) {
1728 mpt_prt(mpt, "mpt_write_cfg_page timed out\n");
1729 return (-1);
1730 }
1731
1732 if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
1733 mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n",
1734 req->IOCStatus);
1735 mpt_free_request(mpt, req);
1736 return (-1);
1737 }
1738 mpt_free_request(mpt, req);
1739 return (0);
1740}
1741
1742/*
1743 * Read IOC configuration information
1744 */
1745static int
1746mpt_read_config_info_ioc(struct mpt_softc *mpt)
1747{
1748 CONFIG_PAGE_HEADER hdr;
1749 struct mpt_raid_volume *mpt_raid;
1750 int rv;
1751 int i;
1752 size_t len;
1753
1754 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1755 2, 0, &hdr, FALSE, 5000);
1756 /*
1757 * If it's an invalid page, so what? Not a supported function....
1758 */
1759 if (rv == EINVAL) {
1760 return (0);
1761 }
1762 if (rv) {
1763 return (rv);
1764 }
1765
1766 mpt_lprt(mpt, MPT_PRT_DEBUG,
1767 "IOC Page 2 Header: Version %x len %x PageNumber %x PageType %x\n",
1768 hdr.PageVersion, hdr.PageLength << 2,
1769 hdr.PageNumber, hdr.PageType);
1770
1771 len = hdr.PageLength * sizeof(uint32_t);
1772 mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1773 if (mpt->ioc_page2 == NULL) {
1774 mpt_prt(mpt, "unable to allocate memory for IOC page 2\n");
1775 mpt_raid_free_mem(mpt);
1776 return (ENOMEM);
1777 }
1778 memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr));
1779 rv = mpt_read_cur_cfg_page(mpt, 0,
1780 &mpt->ioc_page2->Header, len, FALSE, 5000);
1781 if (rv) {
1782 mpt_prt(mpt, "failed to read IOC Page 2\n");
1783 mpt_raid_free_mem(mpt);
1784 return (EIO);
1785 }
1786 mpt2host_config_page_ioc2(mpt->ioc_page2);
1787
1788 if (mpt->ioc_page2->CapabilitiesFlags != 0) {
1789 uint32_t mask;
1790
1791 mpt_prt(mpt, "Capabilities: (");
1792 for (mask = 1; mask != 0; mask <<= 1) {
1793 if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) {
1794 continue;
1795 }
1796 switch (mask) {
1797 case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT:
1798 mpt_prtc(mpt, " RAID-0");
1799 break;
1800 case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT:
1801 mpt_prtc(mpt, " RAID-1E");
1802 break;
1803 case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT:
1804 mpt_prtc(mpt, " RAID-1");
1805 break;
1806 case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT:
1807 mpt_prtc(mpt, " SES");
1808 break;
1809 case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT:
1810 mpt_prtc(mpt, " SAFTE");
1811 break;
1812 case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT:
1813 mpt_prtc(mpt, " Multi-Channel-Arrays");
1814 default:
1815 break;
1816 }
1817 }
1818 mpt_prtc(mpt, " )\n");
1819 if ((mpt->ioc_page2->CapabilitiesFlags
1820 & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT
1821 | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT
1822 | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) {
1823 mpt_prt(mpt, "%d Active Volume%s(%d Max)\n",
1824 mpt->ioc_page2->NumActiveVolumes,
1825 mpt->ioc_page2->NumActiveVolumes != 1
1826 ? "s " : " ",
1827 mpt->ioc_page2->MaxVolumes);
1828 mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n",
1829 mpt->ioc_page2->NumActivePhysDisks,
1830 mpt->ioc_page2->NumActivePhysDisks != 1
1831 ? "s " : " ",
1832 mpt->ioc_page2->MaxPhysDisks);
1833 }
1834 }
1835
1836 len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume);
1837 mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1838 if (mpt->raid_volumes == NULL) {
1839 mpt_prt(mpt, "Could not allocate RAID volume data\n");
1840 mpt_raid_free_mem(mpt);
1841 return (ENOMEM);
1842 }
1843
1844 /*
1845 * Copy critical data out of ioc_page2 so that we can
1846 * safely refresh the page without windows of unreliable
1847 * data.
1848 */
1849 mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes;
1850
1851 len = sizeof(*mpt->raid_volumes->config_page) +
1852 (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1));
1853 for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) {
1854 mpt_raid = &mpt->raid_volumes[i];
1855 mpt_raid->config_page =
1856 malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1857 if (mpt_raid->config_page == NULL) {
1858 mpt_prt(mpt, "Could not allocate RAID page data\n");
1859 mpt_raid_free_mem(mpt);
1860 return (ENOMEM);
1861 }
1862 }
1863 mpt->raid_page0_len = len;
1864
1865 len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk);
1866 mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1867 if (mpt->raid_disks == NULL) {
1868 mpt_prt(mpt, "Could not allocate RAID disk data\n");
1869 mpt_raid_free_mem(mpt);
1870 return (ENOMEM);
1871 }
1872 mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks;
1873
1874 /*
1875 * Load page 3.
1876 */
1877 rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC,
1878 3, 0, &hdr, FALSE, 5000);
1879 if (rv) {
1880 mpt_raid_free_mem(mpt);
1881 return (EIO);
1882 }
1883
1884 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n",
1885 hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType);
1886
1887 len = hdr.PageLength * sizeof(uint32_t);
1888 mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
1889 if (mpt->ioc_page3 == NULL) {
1890 mpt_prt(mpt, "unable to allocate memory for IOC page 3\n");
1891 mpt_raid_free_mem(mpt);
1892 return (ENOMEM);
1893 }
1894 memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr));
1895 rv = mpt_read_cur_cfg_page(mpt, 0,
1896 &mpt->ioc_page3->Header, len, FALSE, 5000);
1897 if (rv) {
1898 mpt_raid_free_mem(mpt);
1899 return (EIO);
1900 }
1901 mpt_raid_wakeup(mpt);
1902 return (0);
1903}
1904
1905/*
1906 * Enable IOC port
1907 */
1908static int
1909mpt_send_port_enable(struct mpt_softc *mpt, int port)
1910{
1911 request_t *req;
1912 MSG_PORT_ENABLE *enable_req;
1913 int error;
1914
1915 req = mpt_get_request(mpt, /*sleep_ok*/FALSE);
1916 if (req == NULL)
1917 return (-1);
1918
1919 enable_req = req->req_vbuf;
1920 memset(enable_req, 0, MPT_RQSL(mpt));
1921
1922 enable_req->Function = MPI_FUNCTION_PORT_ENABLE;
1923 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG);
1924 enable_req->PortNumber = port;
1925
1926 mpt_check_doorbell(mpt);
1927 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port);
1928
1929 mpt_send_cmd(mpt, req);
1930 error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE,
1931 FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000);
1932 if (error != 0) {
1933 mpt_prt(mpt, "port %d enable timed out\n", port);
1934 return (-1);
1935 }
1936 mpt_free_request(mpt, req);
1937 mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port);
1938 return (0);
1939}
1940
1941/*
1942 * Enable/Disable asynchronous event reporting.
1943 */
1944static int
1945mpt_send_event_request(struct mpt_softc *mpt, int onoff)
1946{
1947 request_t *req;
1948 MSG_EVENT_NOTIFY *enable_req;
1949
1950 req = mpt_get_request(mpt, FALSE);
1951 if (req == NULL) {
1952 return (ENOMEM);
1953 }
1954 enable_req = req->req_vbuf;
1955 memset(enable_req, 0, sizeof *enable_req);
1956
1957 enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
1958 enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS);
1959 enable_req->Switch = onoff;
1960
1961 mpt_check_doorbell(mpt);
1962 mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n",
1963 onoff ? "en" : "dis");
1964 /*
1965 * Send the command off, but don't wait for it.
1966 */
1967 mpt_send_cmd(mpt, req);
1968 return (0);
1969}
1970
1971/*
1972 * Un-mask the interupts on the chip.
1973 */
1974void
1975mpt_enable_ints(struct mpt_softc *mpt)
1976{
1977 /* Unmask every thing except door bell int */
1978 mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK);
1979}
1980
1981/*
1982 * Mask the interupts on the chip.
1983 */
1984void
1985mpt_disable_ints(struct mpt_softc *mpt)
1986{
1987 /* Mask all interrupts */
1988 mpt_write(mpt, MPT_OFFSET_INTR_MASK,
1989 MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK);
1990}
1991
1992static void
1993mpt_sysctl_attach(struct mpt_softc *mpt)
1994{
1995#if __FreeBSD_version >= 500000
1996 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev);
1997 struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev);
1998
1999 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2000 "debug", CTLFLAG_RW, &mpt->verbose, 0,
2001 "Debugging/Verbose level");
2002 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2003 "role", CTLFLAG_RD, &mpt->role, 0,
2004 "HBA role");
2005#ifdef MPT_TEST_MULTIPATH
2006 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
2007 "failure_id", CTLFLAG_RW, &mpt->failure_id, -1,
2008 "Next Target to Fail");
2009#endif
2010#endif
2011}
2012
2013int
2014mpt_attach(struct mpt_softc *mpt)
2015{
2016 struct mpt_personality *pers;
2017 int i;
2018 int error;
2019
2020 TAILQ_INSERT_TAIL(&mpt_tailq, mpt, links);
2021 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2022 pers = mpt_personalities[i];
2023 if (pers == NULL) {
2024 continue;
2025 }
2026 if (pers->probe(mpt) == 0) {
2027 error = pers->attach(mpt);
2028 if (error != 0) {
2029 mpt_detach(mpt);
2030 return (error);
2031 }
2032 mpt->mpt_pers_mask |= (0x1 << pers->id);
2033 pers->use_count++;
2034 }
2035 }
2036
2037 /*
2038 * Now that we've attached everything, do the enable function
2039 * for all of the personalities. This allows the personalities
2040 * to do setups that are appropriate for them prior to enabling
2041 * any ports.
2042 */
2043 for (i = 0; i < MPT_MAX_PERSONALITIES; i++) {
2044 pers = mpt_personalities[i];
2045 if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) {
2046 error = pers->enable(mpt);
2047 if (error != 0) {
2048 mpt_prt(mpt, "personality %s attached but would"
2049 " not enable (%d)\n", pers->name, error);
2050 mpt_detach(mpt);
2051 return (error);
2052 }
2053 }
2054 }
2055 return (0);
2056}
2057
2058int
2059mpt_shutdown(struct mpt_softc *mpt)
2060{
2061 struct mpt_personality *pers;
2062
2063 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2064 pers->shutdown(mpt);
2065 }
2066 return (0);
2067}
2068
2069int
2070mpt_detach(struct mpt_softc *mpt)
2071{
2072 struct mpt_personality *pers;
2073
2074 MPT_PERS_FOREACH_REVERSE(mpt, pers) {
2075 pers->detach(mpt);
2076 mpt->mpt_pers_mask &= ~(0x1 << pers->id);
2077 pers->use_count--;
2078 }
2079 TAILQ_REMOVE(&mpt_tailq, mpt, links);
2080 return (0);
2081}
2082
2083int
2084mpt_core_load(struct mpt_personality *pers)
2085{
2086 int i;
2087
2088 /*
2089 * Setup core handlers and insert the default handler
2090 * into all "empty slots".
2091 */
2092 for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) {
2093 mpt_reply_handlers[i] = mpt_default_reply_handler;
2094 }
2095
2096 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] =
2097 mpt_event_reply_handler;
2098 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] =
2099 mpt_config_reply_handler;
2100 mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] =
2101 mpt_handshake_reply_handler;
2102 return (0);
2103}
2104
2105/*
2106 * Initialize per-instance driver data and perform
2107 * initial controller configuration.
2108 */
2109int
2110mpt_core_attach(struct mpt_softc *mpt)
2111{
2110 int val;
2112 int val, error;
2111
2112 LIST_INIT(&mpt->ack_frames);
2113 /* Put all request buffers on the free list */
2114 TAILQ_INIT(&mpt->request_pending_list);
2115 TAILQ_INIT(&mpt->request_free_list);
2116 TAILQ_INIT(&mpt->request_timeout_list);
2113
2114 LIST_INIT(&mpt->ack_frames);
2115 /* Put all request buffers on the free list */
2116 TAILQ_INIT(&mpt->request_pending_list);
2117 TAILQ_INIT(&mpt->request_free_list);
2118 TAILQ_INIT(&mpt->request_timeout_list);
2119 MPT_LOCK(mpt);
2117 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2118 request_t *req = &mpt->request_pool[val];
2119 req->state = REQ_STATE_ALLOCATED;
2120 mpt_free_request(mpt, req);
2121 }
2120 for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) {
2121 request_t *req = &mpt->request_pool[val];
2122 req->state = REQ_STATE_ALLOCATED;
2123 mpt_free_request(mpt, req);
2124 }
2125 MPT_UNLOCK(mpt);
2122 for (val = 0; val < MPT_MAX_LUNS; val++) {
2123 STAILQ_INIT(&mpt->trt[val].atios);
2124 STAILQ_INIT(&mpt->trt[val].inots);
2125 }
2126 STAILQ_INIT(&mpt->trt_wildcard.atios);
2127 STAILQ_INIT(&mpt->trt_wildcard.inots);
2128#ifdef MPT_TEST_MULTIPATH
2129 mpt->failure_id = -1;
2130#endif
2131 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2132 mpt_sysctl_attach(mpt);
2133 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2134 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2126 for (val = 0; val < MPT_MAX_LUNS; val++) {
2127 STAILQ_INIT(&mpt->trt[val].atios);
2128 STAILQ_INIT(&mpt->trt[val].inots);
2129 }
2130 STAILQ_INIT(&mpt->trt_wildcard.atios);
2131 STAILQ_INIT(&mpt->trt_wildcard.inots);
2132#ifdef MPT_TEST_MULTIPATH
2133 mpt->failure_id = -1;
2134#endif
2135 mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE;
2136 mpt_sysctl_attach(mpt);
2137 mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n",
2138 mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL)));
2135 return (mpt_configure_ioc(mpt, 0, 0));
2139
2140 MPT_LOCK(mpt);
2141 error = mpt_configure_ioc(mpt, 0, 0);
2142 MPT_UNLOCK(mpt);
2143
2144 return (error);
2136}
2137
2138int
2139mpt_core_enable(struct mpt_softc *mpt)
2140{
2141 /*
2142 * We enter with the IOC enabled, but async events
2143 * not enabled, ports not enabled and interrupts
2144 * not enabled.
2145 */
2145}
2146
2147int
2148mpt_core_enable(struct mpt_softc *mpt)
2149{
2150 /*
2151 * We enter with the IOC enabled, but async events
2152 * not enabled, ports not enabled and interrupts
2153 * not enabled.
2154 */
2155 MPT_LOCK(mpt);
2146
2147 /*
2148 * Enable asynchronous event reporting- all personalities
2149 * have attached so that they should be able to now field
2150 * async events.
2151 */
2152 mpt_send_event_request(mpt, 1);
2153
2154 /*
2155 * Catch any pending interrupts
2156 *
2157 * This seems to be crucial- otherwise
2158 * the portenable below times out.
2159 */
2160 mpt_intr(mpt);
2161
2162 /*
2163 * Enable Interrupts
2164 */
2165 mpt_enable_ints(mpt);
2166
2167 /*
2168 * Catch any pending interrupts
2169 *
2170 * This seems to be crucial- otherwise
2171 * the portenable below times out.
2172 */
2173 mpt_intr(mpt);
2174
2175 /*
2176 * Enable the port.
2177 */
2178 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2179 mpt_prt(mpt, "failed to enable port 0\n");
2156
2157 /*
2158 * Enable asynchronous event reporting- all personalities
2159 * have attached so that they should be able to now field
2160 * async events.
2161 */
2162 mpt_send_event_request(mpt, 1);
2163
2164 /*
2165 * Catch any pending interrupts
2166 *
2167 * This seems to be crucial- otherwise
2168 * the portenable below times out.
2169 */
2170 mpt_intr(mpt);
2171
2172 /*
2173 * Enable Interrupts
2174 */
2175 mpt_enable_ints(mpt);
2176
2177 /*
2178 * Catch any pending interrupts
2179 *
2180 * This seems to be crucial- otherwise
2181 * the portenable below times out.
2182 */
2183 mpt_intr(mpt);
2184
2185 /*
2186 * Enable the port.
2187 */
2188 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2189 mpt_prt(mpt, "failed to enable port 0\n");
2190 MPT_UNLOCK(mpt);
2180 return (ENXIO);
2181 }
2191 return (ENXIO);
2192 }
2193 MPT_UNLOCK(mpt);
2182 return (0);
2183}
2184
2185void
2186mpt_core_shutdown(struct mpt_softc *mpt)
2187{
2188 mpt_disable_ints(mpt);
2189}
2190
2191void
2192mpt_core_detach(struct mpt_softc *mpt)
2193{
2194 /*
2195 * XXX: FREE MEMORY
2196 */
2197 mpt_disable_ints(mpt);
2198}
2199
2200int
2201mpt_core_unload(struct mpt_personality *pers)
2202{
2203 /* Unload is always successfull. */
2204 return (0);
2205}
2206
2207#define FW_UPLOAD_REQ_SIZE \
2208 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2209 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2210
2211static int
2212mpt_upload_fw(struct mpt_softc *mpt)
2213{
2214 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2215 MSG_FW_UPLOAD_REPLY fw_reply;
2216 MSG_FW_UPLOAD *fw_req;
2217 FW_UPLOAD_TCSGE *tsge;
2218 SGE_SIMPLE32 *sge;
2219 uint32_t flags;
2220 int error;
2221
2222 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2223 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2224 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2225 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2226 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2227 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2228 tsge->DetailsLength = 12;
2229 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2230 tsge->ImageSize = htole32(mpt->fw_image_size);
2231 sge = (SGE_SIMPLE32 *)(tsge + 1);
2232 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2233 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2234 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2235 flags <<= MPI_SGE_FLAGS_SHIFT;
2236 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2237 sge->Address = htole32(mpt->fw_phys);
2238 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2239 if (error)
2240 return(error);
2241 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2242 return (error);
2243}
2244
2245static void
2246mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2247 uint32_t *data, bus_size_t len)
2248{
2249 uint32_t *data_end;
2250
2251 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2252 if (mpt->is_sas) {
2253 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2254 }
2255 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2256 while (data != data_end) {
2257 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2258 data++;
2259 }
2260 if (mpt->is_sas) {
2261 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2262 }
2263}
2264
2265static int
2266mpt_download_fw(struct mpt_softc *mpt)
2267{
2268 MpiFwHeader_t *fw_hdr;
2269 int error;
2270 uint32_t ext_offset;
2271 uint32_t data;
2272
2273 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2274 mpt->fw_image_size);
2275
2276 error = mpt_enable_diag_mode(mpt);
2277 if (error != 0) {
2278 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2279 return (EIO);
2280 }
2281
2282 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2283 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2284
2285 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2286 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2287 fw_hdr->ImageSize);
2288
2289 ext_offset = fw_hdr->NextImageHeaderOffset;
2290 while (ext_offset != 0) {
2291 MpiExtImageHeader_t *ext;
2292
2293 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2294 ext_offset = ext->NextImageHeaderOffset;
2295
2296 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2297 ext->ImageSize);
2298 }
2299
2300 if (mpt->is_sas) {
2301 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2302 }
2303 /* Setup the address to jump to on reset. */
2304 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2305 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2306
2307 /*
2308 * The controller sets the "flash bad" status after attempting
2309 * to auto-boot from flash. Clear the status so that the controller
2310 * will continue the boot process with our newly installed firmware.
2311 */
2312 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2313 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2314 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2315 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2316
2317 if (mpt->is_sas) {
2318 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2319 }
2320
2321 /*
2322 * Re-enable the processor and clear the boot halt flag.
2323 */
2324 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2325 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2326 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2327
2328 mpt_disable_diag_mode(mpt);
2329 return (0);
2330}
2331
2332/*
2333 * Allocate/Initialize data structures for the controller. Called
2334 * once at instance startup.
2335 */
2336static int
2337mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
2338{
2339 PTR_MSG_PORT_FACTS_REPLY pfp;
2340 int error, port;
2341 size_t len;
2342
2343 if (tn == MPT_MAX_TRYS) {
2344 return (-1);
2345 }
2346
2347 /*
2348 * No need to reset if the IOC is already in the READY state.
2349 *
2350 * Force reset if initialization failed previously.
2351 * Note that a hard_reset of the second channel of a '929
2352 * will stop operation of the first channel. Hopefully, if the
2353 * first channel is ok, the second will not require a hard
2354 * reset.
2355 */
2356 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2357 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2358 return (mpt_configure_ioc(mpt, tn++, 1));
2359 }
2360 needreset = 0;
2361 }
2362
2363 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2364 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2365 return (mpt_configure_ioc(mpt, tn++, 1));
2366 }
2367 mpt2host_iocfacts_reply(&mpt->ioc_facts);
2368
2369 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2370 mpt->ioc_facts.MsgVersion >> 8,
2371 mpt->ioc_facts.MsgVersion & 0xFF,
2372 mpt->ioc_facts.HeaderVersion >> 8,
2373 mpt->ioc_facts.HeaderVersion & 0xFF);
2374
2375 /*
2376 * Now that we know request frame size, we can calculate
2377 * the actual (reasonable) segment limit for read/write I/O.
2378 *
2379 * This limit is constrained by:
2380 *
2381 * + The size of each area we allocate per command (and how
2382 * many chain segments we can fit into it).
2383 * + The total number of areas we've set up.
2384 * + The actual chain depth the card will allow.
2385 *
2386 * The first area's segment count is limited by the I/O request
2387 * at the head of it. We cannot allocate realistically more
2388 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2389 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2390 *
2391 */
2392 /* total number of request areas we (can) allocate */
2393 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2394
2395 /* converted to the number of chain areas possible */
2396 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2397
2398 /* limited by the number of chain areas the card will support */
2399 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
2400 mpt_lprt(mpt, MPT_PRT_DEBUG,
2401 "chain depth limited to %u (from %u)\n",
2402 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2403 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
2404 }
2405
2406 /* converted to the number of simple sges in chain segments. */
2407 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2408
2409 mpt_lprt(mpt, MPT_PRT_DEBUG, "Maximum Segment Count: %u\n",
2410 mpt->max_seg_cnt);
2411 mpt_lprt(mpt, MPT_PRT_DEBUG, "MsgLength=%u IOCNumber = %d\n",
2412 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
2413 mpt_lprt(mpt, MPT_PRT_DEBUG,
2414 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2415 "Request Frame Size %u bytes Max Chain Depth %u\n",
2416 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2417 mpt->ioc_facts.RequestFrameSize << 2,
2418 mpt->ioc_facts.MaxChainDepth);
2419 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2420 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
2421 mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
2422
2423 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2424 mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2425 if (mpt->port_facts == NULL) {
2426 mpt_prt(mpt, "unable to allocate memory for port facts\n");
2427 return (ENOMEM);
2428 }
2429
2430
2431 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2432 (mpt->fw_uploaded == 0)) {
2433 struct mpt_map_info mi;
2434
2435 /*
2436 * In some configurations, the IOC's firmware is
2437 * stored in a shared piece of system NVRAM that
2438 * is only accessable via the BIOS. In this
2439 * case, the firmware keeps a copy of firmware in
2440 * RAM until the OS driver retrieves it. Once
2441 * retrieved, we are responsible for re-downloading
2442 * the firmware after any hard-reset.
2443 */
2444 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2445 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2446 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2447 mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2448 &mpt->fw_dmat);
2449 if (error != 0) {
2450 mpt_prt(mpt, "cannot create firmwarew dma tag\n");
2451 return (ENOMEM);
2452 }
2453 error = bus_dmamem_alloc(mpt->fw_dmat,
2454 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, &mpt->fw_dmap);
2455 if (error != 0) {
2456 mpt_prt(mpt, "cannot allocate firmware memory\n");
2457 bus_dma_tag_destroy(mpt->fw_dmat);
2458 return (ENOMEM);
2459 }
2460 mi.mpt = mpt;
2461 mi.error = 0;
2462 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2463 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2464 mpt->fw_phys = mi.phys;
2465
2466 error = mpt_upload_fw(mpt);
2467 if (error != 0) {
2468 mpt_prt(mpt, "firmware upload failed.\n");
2469 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2470 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2471 mpt->fw_dmap);
2472 bus_dma_tag_destroy(mpt->fw_dmat);
2473 mpt->fw_image = NULL;
2474 return (EIO);
2475 }
2476 mpt->fw_uploaded = 1;
2477 }
2478
2479 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2480 pfp = &mpt->port_facts[port];
2481 error = mpt_get_portfacts(mpt, 0, pfp);
2482 if (error != MPT_OK) {
2483 mpt_prt(mpt,
2484 "mpt_get_portfacts on port %d failed\n", port);
2485 free(mpt->port_facts, M_DEVBUF);
2486 mpt->port_facts = NULL;
2487 return (mpt_configure_ioc(mpt, tn++, 1));
2488 }
2489 mpt2host_portfacts_reply(pfp);
2490
2491 if (port > 0) {
2492 error = MPT_PRT_INFO;
2493 } else {
2494 error = MPT_PRT_DEBUG;
2495 }
2496 mpt_lprt(mpt, error,
2497 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2498 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
2499 pfp->MaxDevices);
2500
2501 }
2502
2503 /*
2504 * XXX: Not yet supporting more than port 0
2505 */
2506 pfp = &mpt->port_facts[0];
2507 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2508 mpt->is_fc = 1;
2509 mpt->is_sas = 0;
2510 mpt->is_spi = 0;
2511 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2512 mpt->is_fc = 0;
2513 mpt->is_sas = 1;
2514 mpt->is_spi = 0;
2515 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
2516 mpt->is_fc = 0;
2517 mpt->is_sas = 0;
2518 mpt->is_spi = 1;
2519 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
2520 mpt_prt(mpt, "iSCSI not supported yet\n");
2521 return (ENXIO);
2522 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
2523 mpt_prt(mpt, "Inactive Port\n");
2524 return (ENXIO);
2525 } else {
2526 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2527 return (ENXIO);
2528 }
2529
2530 /*
2531 * Set our role with what this port supports.
2532 *
2533 * Note this might be changed later in different modules
2534 * if this is different from what is wanted.
2535 */
2536 mpt->role = MPT_ROLE_NONE;
2537 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2538 mpt->role |= MPT_ROLE_INITIATOR;
2539 }
2540 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2541 mpt->role |= MPT_ROLE_TARGET;
2542 }
2543
2544 /*
2545 * Enable the IOC
2546 */
2547 if (mpt_enable_ioc(mpt, 0) != MPT_OK) {
2548 mpt_prt(mpt, "unable to initialize IOC\n");
2549 return (ENXIO);
2550 }
2551
2552 /*
2553 * Read IOC configuration information.
2554 *
2555 * We need this to determine whether or not we have certain
2556 * settings for Integrated Mirroring (e.g.).
2557 */
2558 mpt_read_config_info_ioc(mpt);
2559
2560 return (0);
2561}
2562
2563static int
2564mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2565{
2566 uint32_t pptr;
2567 int val;
2568
2569 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2570 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2571 return (EIO);
2572 }
2573
2574 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2575
2576 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2577 mpt_prt(mpt, "IOC failed to go to run state\n");
2578 return (ENXIO);
2579 }
2580 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2581
2582 /*
2583 * Give it reply buffers
2584 *
2585 * Do *not* exceed global credits.
2586 */
2587 for (val = 0, pptr = mpt->reply_phys;
2588 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2589 pptr += MPT_REPLY_SIZE) {
2590 mpt_free_reply(mpt, pptr);
2591 if (++val == mpt->ioc_facts.GlobalCredits - 1)
2592 break;
2593 }
2594
2595
2596 /*
2597 * Enable the port if asked. This is only done if we're resetting
2598 * the IOC after initial startup.
2599 */
2600 if (portenable) {
2601 /*
2602 * Enable asynchronous event reporting
2603 */
2604 mpt_send_event_request(mpt, 1);
2605
2606 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2607 mpt_prt(mpt, "failed to enable port 0\n");
2608 return (ENXIO);
2609 }
2610 }
2611 return (MPT_OK);
2612}
2613
2614/*
2615 * Endian Conversion Functions- only used on Big Endian machines
2616 */
2617#if _BYTE_ORDER == _BIG_ENDIAN
2618void
2619mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
2620{
2621 MPT_2_HOST32(sge, FlagsLength);
2622 MPT_2_HOST32(sge, u.Address64.Low);
2623 MPT_2_HOST32(sge, u.Address64.High);
2624}
2625
2626void
2627mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
2628{
2629 MPT_2_HOST16(rp, MsgVersion);
2630 MPT_2_HOST16(rp, HeaderVersion);
2631 MPT_2_HOST32(rp, MsgContext);
2632 MPT_2_HOST16(rp, IOCExceptions);
2633 MPT_2_HOST16(rp, IOCStatus);
2634 MPT_2_HOST32(rp, IOCLogInfo);
2635 MPT_2_HOST16(rp, ReplyQueueDepth);
2636 MPT_2_HOST16(rp, RequestFrameSize);
2637 MPT_2_HOST16(rp, Reserved_0101_FWVersion);
2638 MPT_2_HOST16(rp, ProductID);
2639 MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
2640 MPT_2_HOST16(rp, GlobalCredits);
2641 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
2642 MPT_2_HOST16(rp, CurReplyFrameSize);
2643 MPT_2_HOST32(rp, FWImageSize);
2644 MPT_2_HOST32(rp, IOCCapabilities);
2645 MPT_2_HOST32(rp, FWVersion.Word);
2646 MPT_2_HOST16(rp, HighPriorityQueueDepth);
2647 MPT_2_HOST16(rp, Reserved2);
2648 mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
2649 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
2650}
2651
2652void
2653mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
2654{
2655 MPT_2_HOST16(pfp, Reserved);
2656 MPT_2_HOST16(pfp, Reserved1);
2657 MPT_2_HOST32(pfp, MsgContext);
2658 MPT_2_HOST16(pfp, Reserved2);
2659 MPT_2_HOST16(pfp, IOCStatus);
2660 MPT_2_HOST32(pfp, IOCLogInfo);
2661 MPT_2_HOST16(pfp, MaxDevices);
2662 MPT_2_HOST16(pfp, PortSCSIID);
2663 MPT_2_HOST16(pfp, ProtocolFlags);
2664 MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
2665 MPT_2_HOST16(pfp, MaxPersistentIDs);
2666 MPT_2_HOST16(pfp, MaxLanBuckets);
2667 MPT_2_HOST16(pfp, Reserved4);
2668 MPT_2_HOST32(pfp, Reserved5);
2669}
2670void
2671mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
2672{
2673 int i;
2674 ioc2->CapabilitiesFlags = htole32(ioc2->CapabilitiesFlags);
2675 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
2676 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
2677 }
2678}
2679
2680void
2681mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
2682{
2683 int i;
2684 MPT_2_HOST16(volp, VolumeStatus.Reserved);
2685 MPT_2_HOST16(volp, VolumeSettings.Settings);
2686 MPT_2_HOST32(volp, MaxLBA);
2687 MPT_2_HOST32(volp, Reserved1);
2688 MPT_2_HOST32(volp, StripeSize);
2689 MPT_2_HOST32(volp, Reserved2);
2690 MPT_2_HOST32(volp, Reserved3);
2691 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
2692 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
2693 }
2694}
2695
2696void
2697mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
2698{
2699 MPT_2_HOST16(vi, TotalBlocks.High);
2700 MPT_2_HOST16(vi, TotalBlocks.Low);
2701 MPT_2_HOST16(vi, BlocksRemaining.High);
2702 MPT_2_HOST16(vi, BlocksRemaining.Low);
2703}
2704#endif
2194 return (0);
2195}
2196
2197void
2198mpt_core_shutdown(struct mpt_softc *mpt)
2199{
2200 mpt_disable_ints(mpt);
2201}
2202
2203void
2204mpt_core_detach(struct mpt_softc *mpt)
2205{
2206 /*
2207 * XXX: FREE MEMORY
2208 */
2209 mpt_disable_ints(mpt);
2210}
2211
2212int
2213mpt_core_unload(struct mpt_personality *pers)
2214{
2215 /* Unload is always successfull. */
2216 return (0);
2217}
2218
2219#define FW_UPLOAD_REQ_SIZE \
2220 (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \
2221 + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32))
2222
2223static int
2224mpt_upload_fw(struct mpt_softc *mpt)
2225{
2226 uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE];
2227 MSG_FW_UPLOAD_REPLY fw_reply;
2228 MSG_FW_UPLOAD *fw_req;
2229 FW_UPLOAD_TCSGE *tsge;
2230 SGE_SIMPLE32 *sge;
2231 uint32_t flags;
2232 int error;
2233
2234 memset(&fw_req_buf, 0, sizeof(fw_req_buf));
2235 fw_req = (MSG_FW_UPLOAD *)fw_req_buf;
2236 fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
2237 fw_req->Function = MPI_FUNCTION_FW_UPLOAD;
2238 fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE);
2239 tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL;
2240 tsge->DetailsLength = 12;
2241 tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
2242 tsge->ImageSize = htole32(mpt->fw_image_size);
2243 sge = (SGE_SIMPLE32 *)(tsge + 1);
2244 flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER
2245 | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT
2246 | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST);
2247 flags <<= MPI_SGE_FLAGS_SHIFT;
2248 sge->FlagsLength = htole32(flags | mpt->fw_image_size);
2249 sge->Address = htole32(mpt->fw_phys);
2250 error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf);
2251 if (error)
2252 return(error);
2253 error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply);
2254 return (error);
2255}
2256
2257static void
2258mpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr,
2259 uint32_t *data, bus_size_t len)
2260{
2261 uint32_t *data_end;
2262
2263 data_end = data + (roundup2(len, sizeof(uint32_t)) / 4);
2264 if (mpt->is_sas) {
2265 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2266 }
2267 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr);
2268 while (data != data_end) {
2269 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data);
2270 data++;
2271 }
2272 if (mpt->is_sas) {
2273 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2274 }
2275}
2276
2277static int
2278mpt_download_fw(struct mpt_softc *mpt)
2279{
2280 MpiFwHeader_t *fw_hdr;
2281 int error;
2282 uint32_t ext_offset;
2283 uint32_t data;
2284
2285 mpt_prt(mpt, "Downloading Firmware - Image Size %d\n",
2286 mpt->fw_image_size);
2287
2288 error = mpt_enable_diag_mode(mpt);
2289 if (error != 0) {
2290 mpt_prt(mpt, "Could not enter diagnostic mode!\n");
2291 return (EIO);
2292 }
2293
2294 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC,
2295 MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM);
2296
2297 fw_hdr = (MpiFwHeader_t *)mpt->fw_image;
2298 mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr,
2299 fw_hdr->ImageSize);
2300
2301 ext_offset = fw_hdr->NextImageHeaderOffset;
2302 while (ext_offset != 0) {
2303 MpiExtImageHeader_t *ext;
2304
2305 ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset);
2306 ext_offset = ext->NextImageHeaderOffset;
2307
2308 mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext,
2309 ext->ImageSize);
2310 }
2311
2312 if (mpt->is_sas) {
2313 pci_enable_io(mpt->dev, SYS_RES_IOPORT);
2314 }
2315 /* Setup the address to jump to on reset. */
2316 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr);
2317 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue);
2318
2319 /*
2320 * The controller sets the "flash bad" status after attempting
2321 * to auto-boot from flash. Clear the status so that the controller
2322 * will continue the boot process with our newly installed firmware.
2323 */
2324 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2325 data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL;
2326 mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE);
2327 mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data);
2328
2329 if (mpt->is_sas) {
2330 pci_disable_io(mpt->dev, SYS_RES_IOPORT);
2331 }
2332
2333 /*
2334 * Re-enable the processor and clear the boot halt flag.
2335 */
2336 data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC);
2337 data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM);
2338 mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data);
2339
2340 mpt_disable_diag_mode(mpt);
2341 return (0);
2342}
2343
2344/*
2345 * Allocate/Initialize data structures for the controller. Called
2346 * once at instance startup.
2347 */
2348static int
2349mpt_configure_ioc(struct mpt_softc *mpt, int tn, int needreset)
2350{
2351 PTR_MSG_PORT_FACTS_REPLY pfp;
2352 int error, port;
2353 size_t len;
2354
2355 if (tn == MPT_MAX_TRYS) {
2356 return (-1);
2357 }
2358
2359 /*
2360 * No need to reset if the IOC is already in the READY state.
2361 *
2362 * Force reset if initialization failed previously.
2363 * Note that a hard_reset of the second channel of a '929
2364 * will stop operation of the first channel. Hopefully, if the
2365 * first channel is ok, the second will not require a hard
2366 * reset.
2367 */
2368 if (needreset || MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_READY) {
2369 if (mpt_reset(mpt, FALSE) != MPT_OK) {
2370 return (mpt_configure_ioc(mpt, tn++, 1));
2371 }
2372 needreset = 0;
2373 }
2374
2375 if (mpt_get_iocfacts(mpt, &mpt->ioc_facts) != MPT_OK) {
2376 mpt_prt(mpt, "mpt_get_iocfacts failed\n");
2377 return (mpt_configure_ioc(mpt, tn++, 1));
2378 }
2379 mpt2host_iocfacts_reply(&mpt->ioc_facts);
2380
2381 mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n",
2382 mpt->ioc_facts.MsgVersion >> 8,
2383 mpt->ioc_facts.MsgVersion & 0xFF,
2384 mpt->ioc_facts.HeaderVersion >> 8,
2385 mpt->ioc_facts.HeaderVersion & 0xFF);
2386
2387 /*
2388 * Now that we know request frame size, we can calculate
2389 * the actual (reasonable) segment limit for read/write I/O.
2390 *
2391 * This limit is constrained by:
2392 *
2393 * + The size of each area we allocate per command (and how
2394 * many chain segments we can fit into it).
2395 * + The total number of areas we've set up.
2396 * + The actual chain depth the card will allow.
2397 *
2398 * The first area's segment count is limited by the I/O request
2399 * at the head of it. We cannot allocate realistically more
2400 * than MPT_MAX_REQUESTS areas. Therefore, to account for both
2401 * conditions, we'll just start out with MPT_MAX_REQUESTS-2.
2402 *
2403 */
2404 /* total number of request areas we (can) allocate */
2405 mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2;
2406
2407 /* converted to the number of chain areas possible */
2408 mpt->max_seg_cnt *= MPT_NRFM(mpt);
2409
2410 /* limited by the number of chain areas the card will support */
2411 if (mpt->max_seg_cnt > mpt->ioc_facts.MaxChainDepth) {
2412 mpt_lprt(mpt, MPT_PRT_DEBUG,
2413 "chain depth limited to %u (from %u)\n",
2414 mpt->ioc_facts.MaxChainDepth, mpt->max_seg_cnt);
2415 mpt->max_seg_cnt = mpt->ioc_facts.MaxChainDepth;
2416 }
2417
2418 /* converted to the number of simple sges in chain segments. */
2419 mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1);
2420
2421 mpt_lprt(mpt, MPT_PRT_DEBUG, "Maximum Segment Count: %u\n",
2422 mpt->max_seg_cnt);
2423 mpt_lprt(mpt, MPT_PRT_DEBUG, "MsgLength=%u IOCNumber = %d\n",
2424 mpt->ioc_facts.MsgLength, mpt->ioc_facts.IOCNumber);
2425 mpt_lprt(mpt, MPT_PRT_DEBUG,
2426 "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes "
2427 "Request Frame Size %u bytes Max Chain Depth %u\n",
2428 mpt->ioc_facts.GlobalCredits, mpt->ioc_facts.BlockSize,
2429 mpt->ioc_facts.RequestFrameSize << 2,
2430 mpt->ioc_facts.MaxChainDepth);
2431 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOCFACTS: Num Ports %d, FWImageSize %d, "
2432 "Flags=%#x\n", mpt->ioc_facts.NumberOfPorts,
2433 mpt->ioc_facts.FWImageSize, mpt->ioc_facts.Flags);
2434
2435 len = mpt->ioc_facts.NumberOfPorts * sizeof (MSG_PORT_FACTS_REPLY);
2436 mpt->port_facts = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
2437 if (mpt->port_facts == NULL) {
2438 mpt_prt(mpt, "unable to allocate memory for port facts\n");
2439 return (ENOMEM);
2440 }
2441
2442
2443 if ((mpt->ioc_facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) &&
2444 (mpt->fw_uploaded == 0)) {
2445 struct mpt_map_info mi;
2446
2447 /*
2448 * In some configurations, the IOC's firmware is
2449 * stored in a shared piece of system NVRAM that
2450 * is only accessable via the BIOS. In this
2451 * case, the firmware keeps a copy of firmware in
2452 * RAM until the OS driver retrieves it. Once
2453 * retrieved, we are responsible for re-downloading
2454 * the firmware after any hard-reset.
2455 */
2456 mpt->fw_image_size = mpt->ioc_facts.FWImageSize;
2457 error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 1, 0,
2458 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
2459 mpt->fw_image_size, 1, mpt->fw_image_size, 0,
2460 &mpt->fw_dmat);
2461 if (error != 0) {
2462 mpt_prt(mpt, "cannot create firmwarew dma tag\n");
2463 return (ENOMEM);
2464 }
2465 error = bus_dmamem_alloc(mpt->fw_dmat,
2466 (void **)&mpt->fw_image, BUS_DMA_NOWAIT, &mpt->fw_dmap);
2467 if (error != 0) {
2468 mpt_prt(mpt, "cannot allocate firmware memory\n");
2469 bus_dma_tag_destroy(mpt->fw_dmat);
2470 return (ENOMEM);
2471 }
2472 mi.mpt = mpt;
2473 mi.error = 0;
2474 bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap,
2475 mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, &mi, 0);
2476 mpt->fw_phys = mi.phys;
2477
2478 error = mpt_upload_fw(mpt);
2479 if (error != 0) {
2480 mpt_prt(mpt, "firmware upload failed.\n");
2481 bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap);
2482 bus_dmamem_free(mpt->fw_dmat, mpt->fw_image,
2483 mpt->fw_dmap);
2484 bus_dma_tag_destroy(mpt->fw_dmat);
2485 mpt->fw_image = NULL;
2486 return (EIO);
2487 }
2488 mpt->fw_uploaded = 1;
2489 }
2490
2491 for (port = 0; port < mpt->ioc_facts.NumberOfPorts; port++) {
2492 pfp = &mpt->port_facts[port];
2493 error = mpt_get_portfacts(mpt, 0, pfp);
2494 if (error != MPT_OK) {
2495 mpt_prt(mpt,
2496 "mpt_get_portfacts on port %d failed\n", port);
2497 free(mpt->port_facts, M_DEVBUF);
2498 mpt->port_facts = NULL;
2499 return (mpt_configure_ioc(mpt, tn++, 1));
2500 }
2501 mpt2host_portfacts_reply(pfp);
2502
2503 if (port > 0) {
2504 error = MPT_PRT_INFO;
2505 } else {
2506 error = MPT_PRT_DEBUG;
2507 }
2508 mpt_lprt(mpt, error,
2509 "PORTFACTS[%d]: Type %x PFlags %x IID %d MaxDev %d\n",
2510 port, pfp->PortType, pfp->ProtocolFlags, pfp->PortSCSIID,
2511 pfp->MaxDevices);
2512
2513 }
2514
2515 /*
2516 * XXX: Not yet supporting more than port 0
2517 */
2518 pfp = &mpt->port_facts[0];
2519 if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_FC) {
2520 mpt->is_fc = 1;
2521 mpt->is_sas = 0;
2522 mpt->is_spi = 0;
2523 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SAS) {
2524 mpt->is_fc = 0;
2525 mpt->is_sas = 1;
2526 mpt->is_spi = 0;
2527 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_SCSI) {
2528 mpt->is_fc = 0;
2529 mpt->is_sas = 0;
2530 mpt->is_spi = 1;
2531 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_ISCSI) {
2532 mpt_prt(mpt, "iSCSI not supported yet\n");
2533 return (ENXIO);
2534 } else if (pfp->PortType == MPI_PORTFACTS_PORTTYPE_INACTIVE) {
2535 mpt_prt(mpt, "Inactive Port\n");
2536 return (ENXIO);
2537 } else {
2538 mpt_prt(mpt, "unknown Port Type %#x\n", pfp->PortType);
2539 return (ENXIO);
2540 }
2541
2542 /*
2543 * Set our role with what this port supports.
2544 *
2545 * Note this might be changed later in different modules
2546 * if this is different from what is wanted.
2547 */
2548 mpt->role = MPT_ROLE_NONE;
2549 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
2550 mpt->role |= MPT_ROLE_INITIATOR;
2551 }
2552 if (pfp->ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
2553 mpt->role |= MPT_ROLE_TARGET;
2554 }
2555
2556 /*
2557 * Enable the IOC
2558 */
2559 if (mpt_enable_ioc(mpt, 0) != MPT_OK) {
2560 mpt_prt(mpt, "unable to initialize IOC\n");
2561 return (ENXIO);
2562 }
2563
2564 /*
2565 * Read IOC configuration information.
2566 *
2567 * We need this to determine whether or not we have certain
2568 * settings for Integrated Mirroring (e.g.).
2569 */
2570 mpt_read_config_info_ioc(mpt);
2571
2572 return (0);
2573}
2574
2575static int
2576mpt_enable_ioc(struct mpt_softc *mpt, int portenable)
2577{
2578 uint32_t pptr;
2579 int val;
2580
2581 if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) {
2582 mpt_prt(mpt, "mpt_send_ioc_init failed\n");
2583 return (EIO);
2584 }
2585
2586 mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n");
2587
2588 if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) {
2589 mpt_prt(mpt, "IOC failed to go to run state\n");
2590 return (ENXIO);
2591 }
2592 mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n");
2593
2594 /*
2595 * Give it reply buffers
2596 *
2597 * Do *not* exceed global credits.
2598 */
2599 for (val = 0, pptr = mpt->reply_phys;
2600 (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE);
2601 pptr += MPT_REPLY_SIZE) {
2602 mpt_free_reply(mpt, pptr);
2603 if (++val == mpt->ioc_facts.GlobalCredits - 1)
2604 break;
2605 }
2606
2607
2608 /*
2609 * Enable the port if asked. This is only done if we're resetting
2610 * the IOC after initial startup.
2611 */
2612 if (portenable) {
2613 /*
2614 * Enable asynchronous event reporting
2615 */
2616 mpt_send_event_request(mpt, 1);
2617
2618 if (mpt_send_port_enable(mpt, 0) != MPT_OK) {
2619 mpt_prt(mpt, "failed to enable port 0\n");
2620 return (ENXIO);
2621 }
2622 }
2623 return (MPT_OK);
2624}
2625
2626/*
2627 * Endian Conversion Functions- only used on Big Endian machines
2628 */
2629#if _BYTE_ORDER == _BIG_ENDIAN
2630void
2631mpt2host_sge_simple_union(SGE_SIMPLE_UNION *sge)
2632{
2633 MPT_2_HOST32(sge, FlagsLength);
2634 MPT_2_HOST32(sge, u.Address64.Low);
2635 MPT_2_HOST32(sge, u.Address64.High);
2636}
2637
2638void
2639mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *rp)
2640{
2641 MPT_2_HOST16(rp, MsgVersion);
2642 MPT_2_HOST16(rp, HeaderVersion);
2643 MPT_2_HOST32(rp, MsgContext);
2644 MPT_2_HOST16(rp, IOCExceptions);
2645 MPT_2_HOST16(rp, IOCStatus);
2646 MPT_2_HOST32(rp, IOCLogInfo);
2647 MPT_2_HOST16(rp, ReplyQueueDepth);
2648 MPT_2_HOST16(rp, RequestFrameSize);
2649 MPT_2_HOST16(rp, Reserved_0101_FWVersion);
2650 MPT_2_HOST16(rp, ProductID);
2651 MPT_2_HOST32(rp, CurrentHostMfaHighAddr);
2652 MPT_2_HOST16(rp, GlobalCredits);
2653 MPT_2_HOST32(rp, CurrentSenseBufferHighAddr);
2654 MPT_2_HOST16(rp, CurReplyFrameSize);
2655 MPT_2_HOST32(rp, FWImageSize);
2656 MPT_2_HOST32(rp, IOCCapabilities);
2657 MPT_2_HOST32(rp, FWVersion.Word);
2658 MPT_2_HOST16(rp, HighPriorityQueueDepth);
2659 MPT_2_HOST16(rp, Reserved2);
2660 mpt2host_sge_simple_union(&rp->HostPageBufferSGE);
2661 MPT_2_HOST32(rp, ReplyFifoHostSignalingAddr);
2662}
2663
2664void
2665mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *pfp)
2666{
2667 MPT_2_HOST16(pfp, Reserved);
2668 MPT_2_HOST16(pfp, Reserved1);
2669 MPT_2_HOST32(pfp, MsgContext);
2670 MPT_2_HOST16(pfp, Reserved2);
2671 MPT_2_HOST16(pfp, IOCStatus);
2672 MPT_2_HOST32(pfp, IOCLogInfo);
2673 MPT_2_HOST16(pfp, MaxDevices);
2674 MPT_2_HOST16(pfp, PortSCSIID);
2675 MPT_2_HOST16(pfp, ProtocolFlags);
2676 MPT_2_HOST16(pfp, MaxPostedCmdBuffers);
2677 MPT_2_HOST16(pfp, MaxPersistentIDs);
2678 MPT_2_HOST16(pfp, MaxLanBuckets);
2679 MPT_2_HOST16(pfp, Reserved4);
2680 MPT_2_HOST32(pfp, Reserved5);
2681}
2682void
2683mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *ioc2)
2684{
2685 int i;
2686 ioc2->CapabilitiesFlags = htole32(ioc2->CapabilitiesFlags);
2687 for (i = 0; i < MPI_IOC_PAGE_2_RAID_VOLUME_MAX; i++) {
2688 MPT_2_HOST16(ioc2, RaidVolume[i].Reserved3);
2689 }
2690}
2691
2692void
2693mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *volp)
2694{
2695 int i;
2696 MPT_2_HOST16(volp, VolumeStatus.Reserved);
2697 MPT_2_HOST16(volp, VolumeSettings.Settings);
2698 MPT_2_HOST32(volp, MaxLBA);
2699 MPT_2_HOST32(volp, Reserved1);
2700 MPT_2_HOST32(volp, StripeSize);
2701 MPT_2_HOST32(volp, Reserved2);
2702 MPT_2_HOST32(volp, Reserved3);
2703 for (i = 0; i < MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX; i++) {
2704 MPT_2_HOST16(volp, PhysDisk[i].Reserved);
2705 }
2706}
2707
2708void
2709mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *vi)
2710{
2711 MPT_2_HOST16(vi, TotalBlocks.High);
2712 MPT_2_HOST16(vi, TotalBlocks.Low);
2713 MPT_2_HOST16(vi, BlocksRemaining.High);
2714 MPT_2_HOST16(vi, BlocksRemaining.Low);
2715}
2716#endif