mpt.c revision 159919
1139749Simp/*- 2156000Smjacob * Generic routines for LSI Fusion adapters. 3101704Smjacob * FreeBSD Version. 4101704Smjacob * 5101704Smjacob * Copyright (c) 2000, 2001 by Greg Ansley 6101704Smjacob * 7101704Smjacob * Redistribution and use in source and binary forms, with or without 8101704Smjacob * modification, are permitted provided that the following conditions 9101704Smjacob * are met: 10101704Smjacob * 1. Redistributions of source code must retain the above copyright 11101704Smjacob * notice immediately at the beginning of the file, without modification, 12101704Smjacob * this list of conditions, and the following disclaimer. 13101704Smjacob * 2. The name of the author may not be used to endorse or promote products 14101704Smjacob * derived from this software without specific prior written permission. 15101704Smjacob * 16101704Smjacob * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17101704Smjacob * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18101704Smjacob * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19101704Smjacob * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20101704Smjacob * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21101704Smjacob * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22101704Smjacob * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23101704Smjacob * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24101704Smjacob * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25101704Smjacob * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26101704Smjacob * SUCH DAMAGE. 27156000Smjacob */ 28156000Smjacob/*- 29156000Smjacob * Copyright (c) 2002, 2006 by Matthew Jacob 30156000Smjacob * All rights reserved. 31156104Smjacob * 32156000Smjacob * Redistribution and use in source and binary forms, with or without 33156000Smjacob * modification, are permitted provided that the following conditions are 34156000Smjacob * met: 35156000Smjacob * 1. Redistributions of source code must retain the above copyright 36156000Smjacob * notice, this list of conditions and the following disclaimer. 37156000Smjacob * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38156000Smjacob * substantially similar to the "NO WARRANTY" disclaimer below 39156000Smjacob * ("Disclaimer") and any redistribution must be conditioned upon including 40156000Smjacob * a substantially similar Disclaimer requirement for further binary 41156000Smjacob * redistribution. 42156000Smjacob * 3. Neither the names of the above listed copyright holders nor the names 43156000Smjacob * of any contributors may be used to endorse or promote products derived 44156000Smjacob * from this software without specific prior written permission. 45156104Smjacob * 46156000Smjacob * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47156000Smjacob * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48156000Smjacob * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49156000Smjacob * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50156000Smjacob * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51156000Smjacob * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52156000Smjacob * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53156000Smjacob * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54156000Smjacob * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55156000Smjacob * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56156000Smjacob * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57147883Sscottl * 58156000Smjacob * Support from Chris Ellsworth in order to make SAS adapters work 59156000Smjacob * is gratefully acknowledged. 60159052Smjacob * 61159052Smjacob * 62159052Smjacob * Support from LSI-Logic has also gone a great deal toward making this a 63159052Smjacob * workable subsystem and is gratefully acknowledged. 64101704Smjacob */ 65156000Smjacob/*- 66147883Sscottl * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 67147883Sscottl * Copyright (c) 2005, WHEEL Sp. z o.o. 68147883Sscottl * Copyright (c) 2004, 2005 Justin T. Gibbs 69147883Sscottl * All rights reserved. 70156104Smjacob * 71147883Sscottl * Redistribution and use in source and binary forms, with or without 72147883Sscottl * modification, are permitted provided that the following conditions are 73147883Sscottl * met: 74147883Sscottl * 1. Redistributions of source code must retain the above copyright 75147883Sscottl * notice, this list of conditions and the following disclaimer. 76147883Sscottl * 2. Redistributions in binary form must reproduce at minimum a disclaimer 77147883Sscottl * substantially similar to the "NO WARRANTY" disclaimer below 78147883Sscottl * ("Disclaimer") and any redistribution must be conditioned upon including 79147883Sscottl * a substantially similar Disclaimer requirement for further binary 80147883Sscottl * redistribution. 81148679Sgibbs * 3. Neither the names of the above listed copyright holders nor the names 82148679Sgibbs * of any contributors may be used to endorse or promote products derived 83148679Sgibbs * from this software without specific prior written permission. 84156104Smjacob * 85147883Sscottl * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 86147883Sscottl * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 87147883Sscottl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 88147883Sscottl * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 89147883Sscottl * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 90147883Sscottl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 91147883Sscottl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 92147883Sscottl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 93147883Sscottl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 94147883Sscottl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 95147883Sscottl * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 96101704Smjacob */ 97101704Smjacob 98134123Sobrien#include <sys/cdefs.h> 99134123Sobrien__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 159919 2006-06-25 04:23:26Z mjacob $"); 100134123Sobrien 101147883Sscottl#include <dev/mpt/mpt.h> 102147883Sscottl#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 103147883Sscottl#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 104102199Smjacob 105147883Sscottl#include <dev/mpt/mpilib/mpi.h> 106147883Sscottl#include <dev/mpt/mpilib/mpi_ioc.h> 107157117Smjacob#include <dev/mpt/mpilib/mpi_fc.h> 108157117Smjacob#include <dev/mpt/mpilib/mpi_targ.h> 109147883Sscottl 110147883Sscottl#include <sys/sysctl.h> 111147883Sscottl 112101704Smjacob#define MPT_MAX_TRYS 3 113101704Smjacob#define MPT_MAX_WAIT 300000 114101704Smjacob 115101704Smjacobstatic int maxwait_ack = 0; 116101704Smjacobstatic int maxwait_int = 0; 117101704Smjacobstatic int maxwait_state = 0; 118101704Smjacob 119147883SscottlTAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 120147883Sscottlmpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 121101704Smjacob 122147883Sscottlstatic mpt_reply_handler_t mpt_default_reply_handler; 123147883Sscottlstatic mpt_reply_handler_t mpt_config_reply_handler; 124147883Sscottlstatic mpt_reply_handler_t mpt_handshake_reply_handler; 125147883Sscottlstatic mpt_reply_handler_t mpt_event_reply_handler; 126147883Sscottlstatic void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 127147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 128155521Smjacobstatic int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 129147883Sscottlstatic int mpt_soft_reset(struct mpt_softc *mpt); 130147883Sscottlstatic void mpt_hard_reset(struct mpt_softc *mpt); 131147883Sscottlstatic int mpt_configure_ioc(struct mpt_softc *mpt); 132157117Smjacobstatic int mpt_enable_ioc(struct mpt_softc *mpt, int); 133147883Sscottl 134147883Sscottl/************************* Personality Module Support *************************/ 135147883Sscottl/* 136147883Sscottl * We include one extra entry that is guaranteed to be NULL 137147883Sscottl * to simplify our itterator. 138147883Sscottl */ 139147883Sscottlstatic struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 140147883Sscottlstatic __inline struct mpt_personality* 141147883Sscottl mpt_pers_find(struct mpt_softc *, u_int); 142147883Sscottlstatic __inline struct mpt_personality* 143147883Sscottl mpt_pers_find_reverse(struct mpt_softc *, u_int); 144147883Sscottl 145147883Sscottlstatic __inline struct mpt_personality * 146147883Sscottlmpt_pers_find(struct mpt_softc *mpt, u_int start_at) 147101704Smjacob{ 148147883Sscottl KASSERT(start_at <= MPT_MAX_PERSONALITIES, 149147883Sscottl ("mpt_pers_find: starting position out of range\n")); 150147883Sscottl 151147883Sscottl while (start_at < MPT_MAX_PERSONALITIES 152147883Sscottl && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 153147883Sscottl start_at++; 154147883Sscottl } 155147883Sscottl return (mpt_personalities[start_at]); 156147883Sscottl} 157147883Sscottl 158147883Sscottl/* 159157117Smjacob * Used infrequently, so no need to optimize like a forward 160147883Sscottl * traversal where we use the MAX+1 is guaranteed to be NULL 161147883Sscottl * trick. 162147883Sscottl */ 163147883Sscottlstatic __inline struct mpt_personality * 164147883Sscottlmpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 165147883Sscottl{ 166147883Sscottl while (start_at < MPT_MAX_PERSONALITIES 167147883Sscottl && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 168147883Sscottl start_at--; 169147883Sscottl } 170147883Sscottl if (start_at < MPT_MAX_PERSONALITIES) 171147883Sscottl return (mpt_personalities[start_at]); 172147883Sscottl return (NULL); 173147883Sscottl} 174147883Sscottl 175147883Sscottl#define MPT_PERS_FOREACH(mpt, pers) \ 176147883Sscottl for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 177147883Sscottl pers != NULL; \ 178147883Sscottl pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 179147883Sscottl 180147883Sscottl#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 181147883Sscottl for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 182147883Sscottl pers != NULL; \ 183147883Sscottl pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 184147883Sscottl 185147883Sscottlstatic mpt_load_handler_t mpt_stdload; 186147883Sscottlstatic mpt_probe_handler_t mpt_stdprobe; 187147883Sscottlstatic mpt_attach_handler_t mpt_stdattach; 188157117Smjacobstatic mpt_enable_handler_t mpt_stdenable; 189147883Sscottlstatic mpt_event_handler_t mpt_stdevent; 190147883Sscottlstatic mpt_reset_handler_t mpt_stdreset; 191147883Sscottlstatic mpt_shutdown_handler_t mpt_stdshutdown; 192147883Sscottlstatic mpt_detach_handler_t mpt_stddetach; 193147883Sscottlstatic mpt_unload_handler_t mpt_stdunload; 194147883Sscottlstatic struct mpt_personality mpt_default_personality = 195147883Sscottl{ 196147883Sscottl .load = mpt_stdload, 197147883Sscottl .probe = mpt_stdprobe, 198147883Sscottl .attach = mpt_stdattach, 199157117Smjacob .enable = mpt_stdenable, 200147883Sscottl .event = mpt_stdevent, 201147883Sscottl .reset = mpt_stdreset, 202147883Sscottl .shutdown = mpt_stdshutdown, 203147883Sscottl .detach = mpt_stddetach, 204147883Sscottl .unload = mpt_stdunload 205147883Sscottl}; 206147883Sscottl 207147883Sscottlstatic mpt_load_handler_t mpt_core_load; 208147883Sscottlstatic mpt_attach_handler_t mpt_core_attach; 209157117Smjacobstatic mpt_enable_handler_t mpt_core_enable; 210147883Sscottlstatic mpt_reset_handler_t mpt_core_ioc_reset; 211147883Sscottlstatic mpt_event_handler_t mpt_core_event; 212147883Sscottlstatic mpt_shutdown_handler_t mpt_core_shutdown; 213147883Sscottlstatic mpt_shutdown_handler_t mpt_core_detach; 214147883Sscottlstatic mpt_unload_handler_t mpt_core_unload; 215147883Sscottlstatic struct mpt_personality mpt_core_personality = 216147883Sscottl{ 217147883Sscottl .name = "mpt_core", 218147883Sscottl .load = mpt_core_load, 219147883Sscottl .attach = mpt_core_attach, 220157117Smjacob .enable = mpt_core_enable, 221147883Sscottl .event = mpt_core_event, 222147883Sscottl .reset = mpt_core_ioc_reset, 223147883Sscottl .shutdown = mpt_core_shutdown, 224147883Sscottl .detach = mpt_core_detach, 225147883Sscottl .unload = mpt_core_unload, 226147883Sscottl}; 227147883Sscottl 228147883Sscottl/* 229147883Sscottl * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 230147883Sscottl * ordering information. We want the core to always register FIRST. 231147883Sscottl * other modules are set to SI_ORDER_SECOND. 232147883Sscottl */ 233147883Sscottlstatic moduledata_t mpt_core_mod = { 234147883Sscottl "mpt_core", mpt_modevent, &mpt_core_personality 235147883Sscottl}; 236147883SscottlDECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 237147883SscottlMODULE_VERSION(mpt_core, 1); 238147883Sscottl 239157117Smjacob#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id)) 240147883Sscottl 241147883Sscottl 242147883Sscottlint 243147883Sscottlmpt_modevent(module_t mod, int type, void *data) 244147883Sscottl{ 245147883Sscottl struct mpt_personality *pers; 246147883Sscottl int error; 247147883Sscottl 248147883Sscottl pers = (struct mpt_personality *)data; 249147883Sscottl 250147883Sscottl error = 0; 251147883Sscottl switch (type) { 252147883Sscottl case MOD_LOAD: 253147883Sscottl { 254147883Sscottl mpt_load_handler_t **def_handler; 255147883Sscottl mpt_load_handler_t **pers_handler; 256147883Sscottl int i; 257147883Sscottl 258147883Sscottl for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 259147883Sscottl if (mpt_personalities[i] == NULL) 260147883Sscottl break; 261147883Sscottl } 262147883Sscottl if (i >= MPT_MAX_PERSONALITIES) { 263147883Sscottl error = ENOMEM; 264147883Sscottl break; 265147883Sscottl } 266147883Sscottl pers->id = i; 267147883Sscottl mpt_personalities[i] = pers; 268147883Sscottl 269147883Sscottl /* Install standard/noop handlers for any NULL entries. */ 270147883Sscottl def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 271147883Sscottl pers_handler = MPT_PERS_FIRST_HANDLER(pers); 272147883Sscottl while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 273147883Sscottl if (*pers_handler == NULL) 274147883Sscottl *pers_handler = *def_handler; 275147883Sscottl pers_handler++; 276147883Sscottl def_handler++; 277147883Sscottl } 278147883Sscottl 279147883Sscottl error = (pers->load(pers)); 280147883Sscottl if (error != 0) 281147883Sscottl mpt_personalities[i] = NULL; 282147883Sscottl break; 283147883Sscottl } 284147883Sscottl case MOD_SHUTDOWN: 285147883Sscottl break; 286157117Smjacob#if __FreeBSD_version >= 500000 287147883Sscottl case MOD_QUIESCE: 288147883Sscottl break; 289157117Smjacob#endif 290147883Sscottl case MOD_UNLOAD: 291147883Sscottl error = pers->unload(pers); 292147883Sscottl mpt_personalities[pers->id] = NULL; 293147883Sscottl break; 294147883Sscottl default: 295147883Sscottl error = EINVAL; 296147883Sscottl break; 297147883Sscottl } 298147883Sscottl return (error); 299147883Sscottl} 300147883Sscottl 301147883Sscottlint 302147883Sscottlmpt_stdload(struct mpt_personality *pers) 303147883Sscottl{ 304147883Sscottl /* Load is always successfull. */ 305147883Sscottl return (0); 306147883Sscottl} 307147883Sscottl 308147883Sscottlint 309147883Sscottlmpt_stdprobe(struct mpt_softc *mpt) 310147883Sscottl{ 311147883Sscottl /* Probe is always successfull. */ 312147883Sscottl return (0); 313147883Sscottl} 314147883Sscottl 315147883Sscottlint 316147883Sscottlmpt_stdattach(struct mpt_softc *mpt) 317147883Sscottl{ 318147883Sscottl /* Attach is always successfull. */ 319147883Sscottl return (0); 320147883Sscottl} 321147883Sscottl 322147883Sscottlint 323157117Smjacobmpt_stdenable(struct mpt_softc *mpt) 324157117Smjacob{ 325157117Smjacob /* Enable is always successfull. */ 326157117Smjacob return (0); 327157117Smjacob} 328157117Smjacob 329157117Smjacobint 330155521Smjacobmpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 331147883Sscottl{ 332155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 333147883Sscottl /* Event was not for us. */ 334147883Sscottl return (0); 335147883Sscottl} 336147883Sscottl 337147883Sscottlvoid 338147883Sscottlmpt_stdreset(struct mpt_softc *mpt, int type) 339147883Sscottl{ 340147883Sscottl} 341147883Sscottl 342147883Sscottlvoid 343147883Sscottlmpt_stdshutdown(struct mpt_softc *mpt) 344147883Sscottl{ 345147883Sscottl} 346147883Sscottl 347147883Sscottlvoid 348147883Sscottlmpt_stddetach(struct mpt_softc *mpt) 349147883Sscottl{ 350147883Sscottl} 351147883Sscottl 352147883Sscottlint 353147883Sscottlmpt_stdunload(struct mpt_personality *pers) 354147883Sscottl{ 355147883Sscottl /* Unload is always successfull. */ 356147883Sscottl return (0); 357147883Sscottl} 358147883Sscottl 359147883Sscottl/******************************* Bus DMA Support ******************************/ 360147883Sscottlvoid 361147883Sscottlmpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 362147883Sscottl{ 363147883Sscottl struct mpt_map_info *map_info; 364147883Sscottl 365147883Sscottl map_info = (struct mpt_map_info *)arg; 366147883Sscottl map_info->error = error; 367147883Sscottl map_info->phys = segs->ds_addr; 368147883Sscottl} 369147883Sscottl 370147883Sscottl/**************************** Reply/Event Handling ****************************/ 371147883Sscottlint 372147883Sscottlmpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 373147883Sscottl mpt_handler_t handler, uint32_t *phandler_id) 374147883Sscottl{ 375147883Sscottl 376147883Sscottl switch (type) { 377147883Sscottl case MPT_HANDLER_REPLY: 378147883Sscottl { 379147883Sscottl u_int cbi; 380147883Sscottl u_int free_cbi; 381147883Sscottl 382147883Sscottl if (phandler_id == NULL) 383147883Sscottl return (EINVAL); 384147883Sscottl 385147883Sscottl free_cbi = MPT_HANDLER_ID_NONE; 386147883Sscottl for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 387147883Sscottl /* 388147883Sscottl * If the same handler is registered multiple 389147883Sscottl * times, don't error out. Just return the 390147883Sscottl * index of the original registration. 391147883Sscottl */ 392147883Sscottl if (mpt_reply_handlers[cbi] == handler.reply_handler) { 393147883Sscottl *phandler_id = MPT_CBI_TO_HID(cbi); 394147883Sscottl return (0); 395147883Sscottl } 396147883Sscottl 397147883Sscottl /* 398147883Sscottl * Fill from the front in the hope that 399147883Sscottl * all registered handlers consume only a 400147883Sscottl * single cache line. 401147883Sscottl * 402147883Sscottl * We don't break on the first empty slot so 403147883Sscottl * that the full table is checked to see if 404147883Sscottl * this handler was previously registered. 405147883Sscottl */ 406157117Smjacob if (free_cbi == MPT_HANDLER_ID_NONE && 407157117Smjacob (mpt_reply_handlers[cbi] 408147883Sscottl == mpt_default_reply_handler)) 409147883Sscottl free_cbi = cbi; 410147883Sscottl } 411157117Smjacob if (free_cbi == MPT_HANDLER_ID_NONE) { 412147883Sscottl return (ENOMEM); 413157117Smjacob } 414147883Sscottl mpt_reply_handlers[free_cbi] = handler.reply_handler; 415147883Sscottl *phandler_id = MPT_CBI_TO_HID(free_cbi); 416147883Sscottl break; 417147883Sscottl } 418147883Sscottl default: 419147883Sscottl mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 420147883Sscottl return (EINVAL); 421147883Sscottl } 422147883Sscottl return (0); 423147883Sscottl} 424147883Sscottl 425147883Sscottlint 426147883Sscottlmpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 427147883Sscottl mpt_handler_t handler, uint32_t handler_id) 428147883Sscottl{ 429147883Sscottl 430147883Sscottl switch (type) { 431147883Sscottl case MPT_HANDLER_REPLY: 432147883Sscottl { 433147883Sscottl u_int cbi; 434147883Sscottl 435147883Sscottl cbi = MPT_CBI(handler_id); 436147883Sscottl if (cbi >= MPT_NUM_REPLY_HANDLERS 437147883Sscottl || mpt_reply_handlers[cbi] != handler.reply_handler) 438147883Sscottl return (ENOENT); 439147883Sscottl mpt_reply_handlers[cbi] = mpt_default_reply_handler; 440147883Sscottl break; 441147883Sscottl } 442147883Sscottl default: 443147883Sscottl mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 444147883Sscottl return (EINVAL); 445147883Sscottl } 446147883Sscottl return (0); 447147883Sscottl} 448147883Sscottl 449147883Sscottlstatic int 450147883Sscottlmpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 451157117Smjacob uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 452147883Sscottl{ 453157117Smjacob mpt_prt(mpt, 454157117Smjacob "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n", 455157117Smjacob req, req->serno, reply_desc, reply_frame); 456147883Sscottl 457147883Sscottl if (reply_frame != NULL) 458147883Sscottl mpt_dump_reply_frame(mpt, reply_frame); 459147883Sscottl 460157117Smjacob mpt_prt(mpt, "Reply Frame Ignored\n"); 461147883Sscottl 462147883Sscottl return (/*free_reply*/TRUE); 463147883Sscottl} 464147883Sscottl 465147883Sscottlstatic int 466147883Sscottlmpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 467157117Smjacob uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 468147883Sscottl{ 469147883Sscottl if (req != NULL) { 470147883Sscottl 471147883Sscottl if (reply_frame != NULL) { 472147883Sscottl MSG_CONFIG *cfgp; 473147883Sscottl MSG_CONFIG_REPLY *reply; 474147883Sscottl 475147883Sscottl cfgp = (MSG_CONFIG *)req->req_vbuf; 476147883Sscottl reply = (MSG_CONFIG_REPLY *)reply_frame; 477147883Sscottl req->IOCStatus = le16toh(reply_frame->IOCStatus); 478147883Sscottl bcopy(&reply->Header, &cfgp->Header, 479147883Sscottl sizeof(cfgp->Header)); 480147883Sscottl } 481147883Sscottl req->state &= ~REQ_STATE_QUEUED; 482147883Sscottl req->state |= REQ_STATE_DONE; 483147883Sscottl TAILQ_REMOVE(&mpt->request_pending_list, req, links); 484157354Smjacob if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 485147883Sscottl wakeup(req); 486157354Smjacob } 487147883Sscottl } 488147883Sscottl 489157354Smjacob return (TRUE); 490147883Sscottl} 491147883Sscottl 492147883Sscottlstatic int 493147883Sscottlmpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 494157117Smjacob uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 495147883Sscottl{ 496147883Sscottl /* Nothing to be done. */ 497157354Smjacob return (TRUE); 498147883Sscottl} 499147883Sscottl 500147883Sscottlstatic int 501147883Sscottlmpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 502157117Smjacob uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 503147883Sscottl{ 504147883Sscottl int free_reply; 505147883Sscottl 506157354Smjacob KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler")); 507157354Smjacob KASSERT(req != NULL, ("null request in mpt_event_reply_handler")); 508147883Sscottl 509147883Sscottl free_reply = TRUE; 510147883Sscottl switch (reply_frame->Function) { 511147883Sscottl case MPI_FUNCTION_EVENT_NOTIFICATION: 512147883Sscottl { 513147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg; 514147883Sscottl struct mpt_personality *pers; 515147883Sscottl u_int handled; 516147883Sscottl 517147883Sscottl handled = 0; 518147883Sscottl msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 519147883Sscottl MPT_PERS_FOREACH(mpt, pers) 520147883Sscottl handled += pers->event(mpt, req, msg); 521147883Sscottl 522155521Smjacob if (handled == 0 && mpt->mpt_pers_mask == 0) { 523156301Smjacob mpt_lprt(mpt, MPT_PRT_INFO, 524155521Smjacob "No Handlers For Any Event Notify Frames. " 525155521Smjacob "Event %#x (ACK %sequired).\n", 526155521Smjacob msg->Event, msg->AckRequired? "r" : "not r"); 527155521Smjacob } else if (handled == 0) { 528156301Smjacob mpt_lprt(mpt, MPT_PRT_WARN, 529155521Smjacob "Unhandled Event Notify Frame. Event %#x " 530155521Smjacob "(ACK %sequired).\n", 531155521Smjacob msg->Event, msg->AckRequired? "r" : "not r"); 532155521Smjacob } 533147883Sscottl 534147883Sscottl if (msg->AckRequired) { 535147883Sscottl request_t *ack_req; 536147883Sscottl uint32_t context; 537147883Sscottl 538147883Sscottl context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS); 539157354Smjacob ack_req = mpt_get_request(mpt, FALSE); 540147883Sscottl if (ack_req == NULL) { 541147883Sscottl struct mpt_evtf_record *evtf; 542147883Sscottl 543147883Sscottl evtf = (struct mpt_evtf_record *)reply_frame; 544147883Sscottl evtf->context = context; 545147883Sscottl LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 546147883Sscottl free_reply = FALSE; 547147883Sscottl break; 548147883Sscottl } 549147883Sscottl mpt_send_event_ack(mpt, ack_req, msg, context); 550157354Smjacob /* 551157354Smjacob * Don't check for CONTINUATION_REPLY here 552157354Smjacob */ 553157354Smjacob return (free_reply); 554147883Sscottl } 555147883Sscottl break; 556147883Sscottl } 557147883Sscottl case MPI_FUNCTION_PORT_ENABLE: 558157354Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n"); 559147883Sscottl break; 560147883Sscottl case MPI_FUNCTION_EVENT_ACK: 561147883Sscottl break; 562147883Sscottl default: 563157354Smjacob mpt_prt(mpt, "unknown event function: %x\n", 564147883Sscottl reply_frame->Function); 565147883Sscottl break; 566147883Sscottl } 567147883Sscottl 568157354Smjacob /* 569157354Smjacob * I'm not sure that this continuation stuff works as it should. 570157354Smjacob * 571157354Smjacob * I've had FC async events occur that free the frame up because 572157354Smjacob * the continuation bit isn't set, and then additional async events 573157354Smjacob * then occur using the same context. As you might imagine, this 574157354Smjacob * leads to Very Bad Thing. 575157354Smjacob * 576157354Smjacob * Let's just be safe for now and not free them up until we figure 577157354Smjacob * out what's actually happening here. 578157354Smjacob */ 579157354Smjacob#if 0 580157354Smjacob if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 581147883Sscottl TAILQ_REMOVE(&mpt->request_pending_list, req, links); 582157354Smjacob mpt_free_request(mpt, req); 583157354Smjacob mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", 584157354Smjacob reply_frame->Function, req, req->serno); 585157354Smjacob if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 586157354Smjacob MSG_EVENT_NOTIFY_REPLY *msg = 587157354Smjacob (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 588157354Smjacob mpt_prtc(mpt, " Event=0x%x AckReq=%d", 589157354Smjacob msg->Event, msg->AckRequired); 590157354Smjacob } 591157354Smjacob } else { 592157354Smjacob mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", 593157354Smjacob reply_frame->Function, req, req->serno); 594157354Smjacob if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 595157354Smjacob MSG_EVENT_NOTIFY_REPLY *msg = 596157354Smjacob (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 597157354Smjacob mpt_prtc(mpt, " Event=0x%x AckReq=%d", 598157354Smjacob msg->Event, msg->AckRequired); 599157354Smjacob } 600157354Smjacob mpt_prtc(mpt, "\n"); 601147883Sscottl } 602157354Smjacob#endif 603147883Sscottl return (free_reply); 604147883Sscottl} 605147883Sscottl 606147883Sscottl/* 607147883Sscottl * Process an asynchronous event from the IOC. 608147883Sscottl */ 609147883Sscottlstatic int 610147883Sscottlmpt_core_event(struct mpt_softc *mpt, request_t *req, 611147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg) 612147883Sscottl{ 613155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 614155521Smjacob msg->Event & 0xFF); 615147883Sscottl switch(msg->Event & 0xFF) { 616147883Sscottl case MPI_EVENT_NONE: 617147883Sscottl break; 618147883Sscottl case MPI_EVENT_LOG_DATA: 619147883Sscottl { 620147883Sscottl int i; 621147883Sscottl 622147883Sscottl /* Some error occured that LSI wants logged */ 623147883Sscottl mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 624147883Sscottl msg->IOCLogInfo); 625147883Sscottl mpt_prt(mpt, "\tEvtLogData: Event Data:"); 626147883Sscottl for (i = 0; i < msg->EventDataLength; i++) 627147883Sscottl mpt_prtc(mpt, " %08x", msg->Data[i]); 628147883Sscottl mpt_prtc(mpt, "\n"); 629147883Sscottl break; 630147883Sscottl } 631147883Sscottl case MPI_EVENT_EVENT_CHANGE: 632147883Sscottl /* 633147883Sscottl * This is just an acknowledgement 634147883Sscottl * of our mpt_send_event_request. 635147883Sscottl */ 636147883Sscottl break; 637155521Smjacob case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 638155521Smjacob break; 639147883Sscottl default: 640157354Smjacob return (0); 641147883Sscottl break; 642147883Sscottl } 643157354Smjacob return (1); 644147883Sscottl} 645147883Sscottl 646147883Sscottlstatic void 647147883Sscottlmpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 648147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 649147883Sscottl{ 650147883Sscottl MSG_EVENT_ACK *ackp; 651147883Sscottl 652147883Sscottl ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 653157354Smjacob memset(ackp, 0, sizeof (*ackp)); 654147883Sscottl ackp->Function = MPI_FUNCTION_EVENT_ACK; 655147883Sscottl ackp->Event = msg->Event; 656147883Sscottl ackp->EventContext = msg->EventContext; 657147883Sscottl ackp->MsgContext = context; 658147883Sscottl mpt_check_doorbell(mpt); 659147883Sscottl mpt_send_cmd(mpt, ack_req); 660147883Sscottl} 661147883Sscottl 662147883Sscottl/***************************** Interrupt Handling *****************************/ 663147883Sscottlvoid 664147883Sscottlmpt_intr(void *arg) 665147883Sscottl{ 666147883Sscottl struct mpt_softc *mpt; 667157117Smjacob uint32_t reply_desc; 668157117Smjacob int ntrips = 0; 669147883Sscottl 670147883Sscottl mpt = (struct mpt_softc *)arg; 671157662Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG2, "enter mpt_intr\n"); 672147883Sscottl while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 673147883Sscottl request_t *req; 674147883Sscottl MSG_DEFAULT_REPLY *reply_frame; 675147883Sscottl uint32_t reply_baddr; 676157117Smjacob uint32_t ctxt_idx; 677147883Sscottl u_int cb_index; 678147883Sscottl u_int req_index; 679147883Sscottl int free_rf; 680147883Sscottl 681147883Sscottl req = NULL; 682147883Sscottl reply_frame = NULL; 683147883Sscottl reply_baddr = 0; 684147883Sscottl if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 685147883Sscottl u_int offset; 686147883Sscottl /* 687147883Sscottl * Insure that the reply frame is coherent. 688147883Sscottl */ 689157354Smjacob reply_baddr = MPT_REPLY_BADDR(reply_desc); 690147883Sscottl offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 691157117Smjacob bus_dmamap_sync_range(mpt->reply_dmat, 692157117Smjacob mpt->reply_dmap, offset, MPT_REPLY_SIZE, 693157117Smjacob BUS_DMASYNC_POSTREAD); 694147883Sscottl reply_frame = MPT_REPLY_OTOV(mpt, offset); 695157117Smjacob ctxt_idx = le32toh(reply_frame->MsgContext); 696157117Smjacob } else { 697157117Smjacob uint32_t type; 698157117Smjacob 699157117Smjacob type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc); 700157117Smjacob ctxt_idx = reply_desc; 701157117Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n", 702157117Smjacob reply_desc); 703157117Smjacob 704157117Smjacob switch (type) { 705157117Smjacob case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: 706157117Smjacob ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK; 707157117Smjacob break; 708157117Smjacob case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: 709157117Smjacob ctxt_idx = GET_IO_INDEX(reply_desc); 710157117Smjacob if (mpt->tgt_cmd_ptrs == NULL) { 711157117Smjacob mpt_prt(mpt, 712157117Smjacob "mpt_intr: no target cmd ptrs\n"); 713157117Smjacob reply_desc = MPT_REPLY_EMPTY; 714157117Smjacob break; 715157117Smjacob } 716157117Smjacob if (ctxt_idx >= mpt->tgt_cmds_allocated) { 717157117Smjacob mpt_prt(mpt, 718157117Smjacob "mpt_intr: bad tgt cmd ctxt %u\n", 719157117Smjacob ctxt_idx); 720157117Smjacob reply_desc = MPT_REPLY_EMPTY; 721157117Smjacob ntrips = 1000; 722157117Smjacob break; 723157117Smjacob } 724157117Smjacob req = mpt->tgt_cmd_ptrs[ctxt_idx]; 725157117Smjacob if (req == NULL) { 726157117Smjacob mpt_prt(mpt, "no request backpointer " 727157117Smjacob "at index %u", ctxt_idx); 728157117Smjacob reply_desc = MPT_REPLY_EMPTY; 729157117Smjacob ntrips = 1000; 730157117Smjacob break; 731157117Smjacob } 732157117Smjacob /* 733157117Smjacob * Reformulate ctxt_idx to be just as if 734157117Smjacob * it were another type of context reply 735157117Smjacob * so the code below will find the request 736157117Smjacob * via indexing into the pool. 737157117Smjacob */ 738157117Smjacob ctxt_idx = 739157117Smjacob req->index | mpt->scsi_tgt_handler_id; 740157117Smjacob req = NULL; 741157117Smjacob break; 742157117Smjacob case MPI_CONTEXT_REPLY_TYPE_LAN: 743157117Smjacob mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n", 744157117Smjacob reply_desc); 745157117Smjacob reply_desc = MPT_REPLY_EMPTY; 746157117Smjacob break; 747157117Smjacob default: 748157117Smjacob mpt_prt(mpt, "Context Reply 0x%08x?\n", type); 749157117Smjacob reply_desc = MPT_REPLY_EMPTY; 750157117Smjacob break; 751157117Smjacob } 752157117Smjacob if (reply_desc == MPT_REPLY_EMPTY) { 753157117Smjacob if (ntrips++ > 1000) { 754157117Smjacob break; 755157117Smjacob } 756157117Smjacob continue; 757157117Smjacob } 758147883Sscottl } 759157117Smjacob 760157117Smjacob cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx); 761157117Smjacob req_index = MPT_CONTEXT_TO_REQI(ctxt_idx); 762157117Smjacob if (req_index < MPT_MAX_REQUESTS(mpt)) { 763147883Sscottl req = &mpt->request_pool[req_index]; 764157354Smjacob } else { 765157354Smjacob mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc ==" 766157354Smjacob " 0x%x)\n", req_index, reply_desc); 767157117Smjacob } 768147883Sscottl 769157117Smjacob free_rf = mpt_reply_handlers[cb_index](mpt, req, 770157117Smjacob reply_desc, reply_frame); 771147883Sscottl 772157354Smjacob if (reply_frame != NULL && free_rf) { 773147883Sscottl mpt_free_reply(mpt, reply_baddr); 774157354Smjacob } 775157117Smjacob 776157117Smjacob /* 777157117Smjacob * If we got ourselves disabled, don't get stuck in a loop 778157117Smjacob */ 779157117Smjacob if (mpt->disabled) { 780157117Smjacob mpt_disable_ints(mpt); 781157117Smjacob break; 782157117Smjacob } 783157117Smjacob if (ntrips++ > 1000) { 784157117Smjacob break; 785157117Smjacob } 786147883Sscottl } 787157662Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG2, "exit mpt_intr\n"); 788147883Sscottl} 789147883Sscottl 790147883Sscottl/******************************* Error Recovery *******************************/ 791147883Sscottlvoid 792147883Sscottlmpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 793147883Sscottl u_int iocstatus) 794147883Sscottl{ 795147883Sscottl MSG_DEFAULT_REPLY ioc_status_frame; 796147883Sscottl request_t *req; 797147883Sscottl 798157354Smjacob memset(&ioc_status_frame, 0, sizeof(ioc_status_frame)); 799147883Sscottl ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 800156104Smjacob ioc_status_frame.IOCStatus = iocstatus; 801147883Sscottl while((req = TAILQ_FIRST(chain)) != NULL) { 802147883Sscottl MSG_REQUEST_HEADER *msg_hdr; 803147883Sscottl u_int cb_index; 804157354Smjacob 805156796Smjacob TAILQ_REMOVE(chain, req, links); 806147883Sscottl msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 807156104Smjacob ioc_status_frame.Function = msg_hdr->Function; 808156104Smjacob ioc_status_frame.MsgContext = msg_hdr->MsgContext; 809147883Sscottl cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 810157117Smjacob mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, 811157117Smjacob &ioc_status_frame); 812147883Sscottl } 813147883Sscottl} 814147883Sscottl 815147883Sscottl/********************************* Diagnostics ********************************/ 816147883Sscottl/* 817147883Sscottl * Perform a diagnostic dump of a reply frame. 818147883Sscottl */ 819147883Sscottlvoid 820147883Sscottlmpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 821147883Sscottl{ 822147883Sscottl mpt_prt(mpt, "Address Reply:\n"); 823147883Sscottl mpt_print_reply(reply_frame); 824147883Sscottl} 825147883Sscottl 826147883Sscottl/******************************* Doorbell Access ******************************/ 827147883Sscottlstatic __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 828147883Sscottlstatic __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 829147883Sscottl 830147883Sscottlstatic __inline uint32_t 831147883Sscottlmpt_rd_db(struct mpt_softc *mpt) 832147883Sscottl{ 833101704Smjacob return mpt_read(mpt, MPT_OFFSET_DOORBELL); 834101704Smjacob} 835101704Smjacob 836147883Sscottlstatic __inline uint32_t 837147883Sscottlmpt_rd_intr(struct mpt_softc *mpt) 838101704Smjacob{ 839101704Smjacob return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 840101704Smjacob} 841101704Smjacob 842101704Smjacob/* Busy wait for a door bell to be read by IOC */ 843101704Smjacobstatic int 844147883Sscottlmpt_wait_db_ack(struct mpt_softc *mpt) 845101704Smjacob{ 846101704Smjacob int i; 847101704Smjacob for (i=0; i < MPT_MAX_WAIT; i++) { 848101704Smjacob if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 849101704Smjacob maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 850157117Smjacob return (MPT_OK); 851101704Smjacob } 852157117Smjacob DELAY(200); 853101704Smjacob } 854157117Smjacob return (MPT_FAIL); 855101704Smjacob} 856101704Smjacob 857101704Smjacob/* Busy wait for a door bell interrupt */ 858101704Smjacobstatic int 859147883Sscottlmpt_wait_db_int(struct mpt_softc *mpt) 860101704Smjacob{ 861101704Smjacob int i; 862101704Smjacob for (i=0; i < MPT_MAX_WAIT; i++) { 863101704Smjacob if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 864101704Smjacob maxwait_int = i > maxwait_int ? i : maxwait_int; 865101704Smjacob return MPT_OK; 866101704Smjacob } 867101704Smjacob DELAY(100); 868101704Smjacob } 869157117Smjacob return (MPT_FAIL); 870101704Smjacob} 871101704Smjacob 872101704Smjacob/* Wait for IOC to transition to a give state */ 873101704Smjacobvoid 874147883Sscottlmpt_check_doorbell(struct mpt_softc *mpt) 875101704Smjacob{ 876147883Sscottl uint32_t db = mpt_rd_db(mpt); 877101704Smjacob if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 878147883Sscottl mpt_prt(mpt, "Device not running\n"); 879101704Smjacob mpt_print_db(db); 880101704Smjacob } 881101704Smjacob} 882101704Smjacob 883101704Smjacob/* Wait for IOC to transition to a give state */ 884101704Smjacobstatic int 885147883Sscottlmpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 886101704Smjacob{ 887101704Smjacob int i; 888101704Smjacob 889101704Smjacob for (i = 0; i < MPT_MAX_WAIT; i++) { 890147883Sscottl uint32_t db = mpt_rd_db(mpt); 891101704Smjacob if (MPT_STATE(db) == state) { 892101704Smjacob maxwait_state = i > maxwait_state ? i : maxwait_state; 893101704Smjacob return (MPT_OK); 894101704Smjacob } 895101704Smjacob DELAY(100); 896101704Smjacob } 897101704Smjacob return (MPT_FAIL); 898101704Smjacob} 899101704Smjacob 900101704Smjacob 901147883Sscottl/************************* Intialization/Configuration ************************/ 902147883Sscottlstatic int mpt_download_fw(struct mpt_softc *mpt); 903147883Sscottl 904101704Smjacob/* Issue the reset COMMAND to the IOC */ 905147883Sscottlstatic int 906147883Sscottlmpt_soft_reset(struct mpt_softc *mpt) 907101704Smjacob{ 908147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 909101704Smjacob 910101704Smjacob /* Have to use hard reset if we are not in Running state */ 911101704Smjacob if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 912147883Sscottl mpt_prt(mpt, "soft reset failed: device not running\n"); 913157117Smjacob return (MPT_FAIL); 914101704Smjacob } 915101704Smjacob 916101704Smjacob /* If door bell is in use we don't have a chance of getting 917101704Smjacob * a word in since the IOC probably crashed in message 918101704Smjacob * processing. So don't waste our time. 919101704Smjacob */ 920101704Smjacob if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 921147883Sscottl mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 922157117Smjacob return (MPT_FAIL); 923101704Smjacob } 924101704Smjacob 925101704Smjacob /* Send the reset request to the IOC */ 926101704Smjacob mpt_write(mpt, MPT_OFFSET_DOORBELL, 927101704Smjacob MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 928101704Smjacob if (mpt_wait_db_ack(mpt) != MPT_OK) { 929147883Sscottl mpt_prt(mpt, "soft reset failed: ack timeout\n"); 930157117Smjacob return (MPT_FAIL); 931101704Smjacob } 932101704Smjacob 933101704Smjacob /* Wait for the IOC to reload and come out of reset state */ 934101704Smjacob if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 935147883Sscottl mpt_prt(mpt, "soft reset failed: device did not restart\n"); 936157117Smjacob return (MPT_FAIL); 937101704Smjacob } 938101704Smjacob 939101704Smjacob return MPT_OK; 940101704Smjacob} 941101704Smjacob 942147883Sscottlstatic int 943147883Sscottlmpt_enable_diag_mode(struct mpt_softc *mpt) 944147883Sscottl{ 945147883Sscottl int try; 946147883Sscottl 947147883Sscottl try = 20; 948147883Sscottl while (--try) { 949147883Sscottl 950147883Sscottl if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 951147883Sscottl break; 952147883Sscottl 953147883Sscottl /* Enable diagnostic registers */ 954147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 955147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 956147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 957147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 958147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 959147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 960147883Sscottl 961147883Sscottl DELAY(100000); 962147883Sscottl } 963147883Sscottl if (try == 0) 964147883Sscottl return (EIO); 965147883Sscottl return (0); 966147883Sscottl} 967147883Sscottl 968147883Sscottlstatic void 969147883Sscottlmpt_disable_diag_mode(struct mpt_softc *mpt) 970147883Sscottl{ 971147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 972147883Sscottl} 973147883Sscottl 974101704Smjacob/* This is a magic diagnostic reset that resets all the ARM 975156104Smjacob * processors in the chip. 976101704Smjacob */ 977147883Sscottlstatic void 978147883Sscottlmpt_hard_reset(struct mpt_softc *mpt) 979101704Smjacob{ 980147883Sscottl int error; 981147883Sscottl int wait; 982147883Sscottl uint32_t diagreg; 983147883Sscottl 984147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 985147883Sscottl 986147883Sscottl error = mpt_enable_diag_mode(mpt); 987147883Sscottl if (error) { 988147883Sscottl mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 989147883Sscottl mpt_prt(mpt, "Trying to reset anyway.\n"); 990101704Smjacob } 991101704Smjacob 992147883Sscottl diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 993101704Smjacob 994147883Sscottl /* 995147883Sscottl * This appears to be a workaround required for some 996147883Sscottl * firmware or hardware revs. 997147883Sscottl */ 998147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 999147883Sscottl DELAY(1000); 1000147883Sscottl 1001101704Smjacob /* Diag. port is now active so we can now hit the reset bit */ 1002147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 1003101704Smjacob 1004147883Sscottl /* 1005147883Sscottl * Ensure that the reset has finished. We delay 1ms 1006147883Sscottl * prior to reading the register to make sure the chip 1007147883Sscottl * has sufficiently completed its reset to handle register 1008147883Sscottl * accesses. 1009147883Sscottl */ 1010147883Sscottl wait = 5000; 1011147883Sscottl do { 1012147883Sscottl DELAY(1000); 1013147883Sscottl diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1014147883Sscottl } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 1015101704Smjacob 1016147883Sscottl if (wait == 0) { 1017147883Sscottl mpt_prt(mpt, "WARNING - Failed hard reset! " 1018147883Sscottl "Trying to initialize anyway.\n"); 1019147883Sscottl } 1020101704Smjacob 1021147883Sscottl /* 1022147883Sscottl * If we have firmware to download, it must be loaded before 1023147883Sscottl * the controller will become operational. Do so now. 1024147883Sscottl */ 1025147883Sscottl if (mpt->fw_image != NULL) { 1026147883Sscottl 1027147883Sscottl error = mpt_download_fw(mpt); 1028147883Sscottl 1029147883Sscottl if (error) { 1030147883Sscottl mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 1031147883Sscottl mpt_prt(mpt, "Trying to initialize anyway.\n"); 1032147883Sscottl } 1033101704Smjacob } 1034101704Smjacob 1035147883Sscottl /* 1036147883Sscottl * Reseting the controller should have disabled write 1037147883Sscottl * access to the diagnostic registers, but disable 1038147883Sscottl * manually to be sure. 1039147883Sscottl */ 1040147883Sscottl mpt_disable_diag_mode(mpt); 1041101704Smjacob} 1042101704Smjacob 1043147883Sscottlstatic void 1044147883Sscottlmpt_core_ioc_reset(struct mpt_softc *mpt, int type) 1045147883Sscottl{ 1046147883Sscottl /* 1047147883Sscottl * Complete all pending requests with a status 1048147883Sscottl * appropriate for an IOC reset. 1049147883Sscottl */ 1050147883Sscottl mpt_complete_request_chain(mpt, &mpt->request_pending_list, 1051147883Sscottl MPI_IOCSTATUS_INVALID_STATE); 1052147883Sscottl} 1053147883Sscottl 1054147883Sscottl 1055101704Smjacob/* 1056101704Smjacob * Reset the IOC when needed. Try software command first then if needed 1057101704Smjacob * poke at the magic diagnostic reset. Note that a hard reset resets 1058101704Smjacob * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 1059101704Smjacob * fouls up the PCI configuration registers. 1060101704Smjacob */ 1061101704Smjacobint 1062147883Sscottlmpt_reset(struct mpt_softc *mpt, int reinit) 1063101704Smjacob{ 1064147883Sscottl struct mpt_personality *pers; 1065147883Sscottl int ret; 1066156104Smjacob int retry_cnt = 0; 1067101704Smjacob 1068156104Smjacob /* 1069156104Smjacob * Try a soft reset. If that fails, get out the big hammer. 1070156104Smjacob */ 1071156104Smjacob again: 1072101704Smjacob if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 1073156104Smjacob int cnt; 1074156104Smjacob for (cnt = 0; cnt < 5; cnt++) { 1075156104Smjacob /* Failed; do a hard reset */ 1076156104Smjacob mpt_hard_reset(mpt); 1077101704Smjacob 1078156104Smjacob /* 1079156104Smjacob * Wait for the IOC to reload 1080156104Smjacob * and come out of reset state 1081156104Smjacob */ 1082156104Smjacob ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1083156104Smjacob if (ret == MPT_OK) { 1084156104Smjacob break; 1085156104Smjacob } 1086156104Smjacob /* 1087156104Smjacob * Okay- try to check again... 1088156104Smjacob */ 1089156104Smjacob ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1090156104Smjacob if (ret == MPT_OK) { 1091156104Smjacob break; 1092156104Smjacob } 1093156104Smjacob mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 1094156104Smjacob retry_cnt, cnt); 1095156104Smjacob } 1096101704Smjacob } 1097101704Smjacob 1098156104Smjacob if (retry_cnt == 0) { 1099156104Smjacob /* 1100156104Smjacob * Invoke reset handlers. We bump the reset count so 1101156104Smjacob * that mpt_wait_req() understands that regardless of 1102156104Smjacob * the specified wait condition, it should stop its wait. 1103156104Smjacob */ 1104156104Smjacob mpt->reset_cnt++; 1105156104Smjacob MPT_PERS_FOREACH(mpt, pers) 1106156104Smjacob pers->reset(mpt, ret); 1107156104Smjacob } 1108147883Sscottl 1109157354Smjacob if (reinit) { 1110157117Smjacob ret = mpt_enable_ioc(mpt, 1); 1111156104Smjacob if (ret == MPT_OK) { 1112156104Smjacob mpt_enable_ints(mpt); 1113156104Smjacob } 1114156104Smjacob } 1115156104Smjacob if (ret != MPT_OK && retry_cnt++ < 2) { 1116156104Smjacob goto again; 1117156104Smjacob } 1118101704Smjacob return ret; 1119101704Smjacob} 1120101704Smjacob 1121101704Smjacob/* Return a command buffer to the free queue */ 1122101704Smjacobvoid 1123147883Sscottlmpt_free_request(struct mpt_softc *mpt, request_t *req) 1124101704Smjacob{ 1125155521Smjacob request_t *nxt; 1126147883Sscottl struct mpt_evtf_record *record; 1127147883Sscottl uint32_t reply_baddr; 1128147883Sscottl 1129103871Smjacob if (req == NULL || req != &mpt->request_pool[req->index]) { 1130101704Smjacob panic("mpt_free_request bad req ptr\n"); 1131101704Smjacob return; 1132101704Smjacob } 1133155521Smjacob if ((nxt = req->chain) != NULL) { 1134155521Smjacob req->chain = NULL; 1135155521Smjacob mpt_free_request(mpt, nxt); /* NB: recursion */ 1136155521Smjacob } 1137157354Smjacob KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); 1138157354Smjacob KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); 1139157662Smjacob KASSERT(MPT_OWNED(mpt), ("mpt_free_request: mpt not locked\n")); 1140157662Smjacob KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1141157662Smjacob ("mpt_free_request: req %p:%u func %x already on freelist", 1142157662Smjacob req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1143157662Smjacob KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1144157662Smjacob ("mpt_free_request: req %p:%u func %x on pending list", 1145157662Smjacob req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1146157662Smjacob#ifdef INVARIANTS 1147157662Smjacob mpt_req_not_spcl(mpt, req, "mpt_free_request", __LINE__); 1148157662Smjacob#endif 1149157354Smjacob 1150101704Smjacob req->ccb = NULL; 1151147883Sscottl if (LIST_EMPTY(&mpt->ack_frames)) { 1152157117Smjacob /* 1153157117Smjacob * Insert free ones at the tail 1154157117Smjacob */ 1155157354Smjacob req->serno = 0; 1156157354Smjacob req->state = REQ_STATE_FREE; 1157157662Smjacob#ifdef INVARIANTS 1158157662Smjacob memset(req->req_vbuf, 0xff, sizeof (MSG_REQUEST_HEADER)); 1159157662Smjacob#endif 1160157117Smjacob TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); 1161147883Sscottl if (mpt->getreqwaiter != 0) { 1162147883Sscottl mpt->getreqwaiter = 0; 1163147883Sscottl wakeup(&mpt->request_free_list); 1164147883Sscottl } 1165147883Sscottl return; 1166147883Sscottl } 1167147883Sscottl 1168147883Sscottl /* 1169147883Sscottl * Process an ack frame deferred due to resource shortage. 1170147883Sscottl */ 1171147883Sscottl record = LIST_FIRST(&mpt->ack_frames); 1172147883Sscottl LIST_REMOVE(record, links); 1173157354Smjacob req->state = REQ_STATE_ALLOCATED; 1174157662Smjacob mpt_assign_serno(mpt, req); 1175147883Sscottl mpt_send_event_ack(mpt, req, &record->reply, record->context); 1176147883Sscottl reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) 1177147883Sscottl + (mpt->reply_phys & 0xFFFFFFFF); 1178147883Sscottl mpt_free_reply(mpt, reply_baddr); 1179101704Smjacob} 1180101704Smjacob 1181101704Smjacob/* Get a command buffer from the free queue */ 1182101704Smjacobrequest_t * 1183147883Sscottlmpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1184101704Smjacob{ 1185101704Smjacob request_t *req; 1186147883Sscottl 1187147883Sscottlretry: 1188157662Smjacob KASSERT(MPT_OWNED(mpt), ("mpt_get_request: mpt not locked\n")); 1189147883Sscottl req = TAILQ_FIRST(&mpt->request_free_list); 1190101704Smjacob if (req != NULL) { 1191147883Sscottl KASSERT(req == &mpt->request_pool[req->index], 1192147883Sscottl ("mpt_get_request: corrupted request free list\n")); 1193157354Smjacob KASSERT(req->state == REQ_STATE_FREE, 1194157662Smjacob ("req %p:%u not free on free list %x index %d function %x", 1195157662Smjacob req, req->serno, req->state, req->index, 1196157662Smjacob ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1197147883Sscottl TAILQ_REMOVE(&mpt->request_free_list, req, links); 1198147883Sscottl req->state = REQ_STATE_ALLOCATED; 1199155521Smjacob req->chain = NULL; 1200157662Smjacob mpt_assign_serno(mpt, req); 1201147883Sscottl } else if (sleep_ok != 0) { 1202147883Sscottl mpt->getreqwaiter = 1; 1203147883Sscottl mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1204147883Sscottl goto retry; 1205101704Smjacob } 1206157354Smjacob return (req); 1207101704Smjacob} 1208101704Smjacob 1209101704Smjacob/* Pass the command to the IOC */ 1210101704Smjacobvoid 1211147883Sscottlmpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1212101704Smjacob{ 1213158982Smjacob if (mpt->verbose > MPT_PRT_DEBUG2) { 1214158932Smjacob mpt_dump_request(mpt, req); 1215155521Smjacob } 1216101704Smjacob bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1217103914Smjacob BUS_DMASYNC_PREWRITE); 1218147883Sscottl req->state |= REQ_STATE_QUEUED; 1219157662Smjacob KASSERT(mpt_req_on_free_list(mpt, req) == 0, 1220157662Smjacob ("req %p:%u func %x on freelist list in mpt_send_cmd", 1221157662Smjacob req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1222157662Smjacob KASSERT(mpt_req_on_pending_list(mpt, req) == 0, 1223157662Smjacob ("req %p:%u func %x already on pending list in mpt_send_cmd", 1224157662Smjacob req, req->serno, ((MSG_REQUEST_HEADER *)req->req_vbuf)->Function)); 1225147883Sscottl TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1226147883Sscottl mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1227101704Smjacob} 1228101704Smjacob 1229101704Smjacob/* 1230147883Sscottl * Wait for a request to complete. 1231147883Sscottl * 1232147883Sscottl * Inputs: 1233147883Sscottl * mpt softc of controller executing request 1234147883Sscottl * req request to wait for 1235147883Sscottl * sleep_ok nonzero implies may sleep in this context 1236147883Sscottl * time_ms timeout in ms. 0 implies no timeout. 1237147883Sscottl * 1238147883Sscottl * Return Values: 1239147883Sscottl * 0 Request completed 1240147883Sscottl * non-0 Timeout fired before request completion. 1241101704Smjacob */ 1242147883Sscottlint 1243147883Sscottlmpt_wait_req(struct mpt_softc *mpt, request_t *req, 1244147883Sscottl mpt_req_state_t state, mpt_req_state_t mask, 1245147883Sscottl int sleep_ok, int time_ms) 1246101704Smjacob{ 1247147883Sscottl int error; 1248147883Sscottl int timeout; 1249147883Sscottl u_int saved_cnt; 1250101704Smjacob 1251147883Sscottl /* 1252147883Sscottl * timeout is in ms. 0 indicates infinite wait. 1253147883Sscottl * Convert to ticks or 500us units depending on 1254147883Sscottl * our sleep mode. 1255147883Sscottl */ 1256157117Smjacob if (sleep_ok != 0) { 1257147883Sscottl timeout = (time_ms * hz) / 1000; 1258157117Smjacob } else { 1259147883Sscottl timeout = time_ms * 2; 1260157117Smjacob } 1261147883Sscottl req->state |= REQ_STATE_NEED_WAKEUP; 1262147883Sscottl mask &= ~REQ_STATE_NEED_WAKEUP; 1263155521Smjacob saved_cnt = mpt->reset_cnt; 1264157117Smjacob while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { 1265147883Sscottl if (sleep_ok != 0) { 1266147883Sscottl error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1267147883Sscottl if (error == EWOULDBLOCK) { 1268147883Sscottl timeout = 0; 1269147883Sscottl break; 1270147883Sscottl } 1271147883Sscottl } else { 1272147883Sscottl if (time_ms != 0 && --timeout == 0) { 1273147883Sscottl break; 1274147883Sscottl } 1275147883Sscottl DELAY(500); 1276147883Sscottl mpt_intr(mpt); 1277147883Sscottl } 1278147883Sscottl } 1279147883Sscottl req->state &= ~REQ_STATE_NEED_WAKEUP; 1280157117Smjacob if (mpt->reset_cnt != saved_cnt) { 1281147883Sscottl return (EIO); 1282157117Smjacob } 1283157117Smjacob if (time_ms && timeout <= 0) { 1284157117Smjacob MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; 1285157117Smjacob mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); 1286147883Sscottl return (ETIMEDOUT); 1287157117Smjacob } 1288147883Sscottl return (0); 1289101704Smjacob} 1290101704Smjacob 1291101704Smjacob/* 1292101704Smjacob * Send a command to the IOC via the handshake register. 1293101704Smjacob * 1294101704Smjacob * Only done at initialization time and for certain unusual 1295101704Smjacob * commands such as device/bus reset as specified by LSI. 1296101704Smjacob */ 1297101704Smjacobint 1298147883Sscottlmpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1299101704Smjacob{ 1300101704Smjacob int i; 1301147883Sscottl uint32_t data, *data32; 1302101704Smjacob 1303101704Smjacob /* Check condition of the IOC */ 1304101704Smjacob data = mpt_rd_db(mpt); 1305147883Sscottl if ((MPT_STATE(data) != MPT_DB_STATE_READY 1306147883Sscottl && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1307147883Sscottl && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1308147883Sscottl || MPT_DB_IS_IN_USE(data)) { 1309147883Sscottl mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1310101704Smjacob mpt_print_db(data); 1311147883Sscottl return (EBUSY); 1312101704Smjacob } 1313101704Smjacob 1314101704Smjacob /* We move things in 32 bit chunks */ 1315101704Smjacob len = (len + 3) >> 2; 1316101704Smjacob data32 = cmd; 1317101704Smjacob 1318101704Smjacob /* Clear any left over pending doorbell interupts */ 1319101704Smjacob if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1320101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1321101704Smjacob 1322101704Smjacob /* 1323101704Smjacob * Tell the handshake reg. we are going to send a command 1324101704Smjacob * and how long it is going to be. 1325101704Smjacob */ 1326101704Smjacob data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1327101704Smjacob (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1328101704Smjacob mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1329101704Smjacob 1330101704Smjacob /* Wait for the chip to notice */ 1331101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1332147883Sscottl mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n"); 1333147883Sscottl return (ETIMEDOUT); 1334101704Smjacob } 1335101704Smjacob 1336101704Smjacob /* Clear the interrupt */ 1337101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1338101704Smjacob 1339101704Smjacob if (mpt_wait_db_ack(mpt) != MPT_OK) { 1340147883Sscottl mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n"); 1341147883Sscottl return (ETIMEDOUT); 1342101704Smjacob } 1343101704Smjacob 1344101704Smjacob /* Send the command */ 1345101704Smjacob for (i = 0; i < len; i++) { 1346101704Smjacob mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++); 1347101704Smjacob if (mpt_wait_db_ack(mpt) != MPT_OK) { 1348156104Smjacob mpt_prt(mpt, 1349147883Sscottl "mpt_send_handshake_cmd timeout! index = %d\n", 1350147883Sscottl i); 1351147883Sscottl return (ETIMEDOUT); 1352101704Smjacob } 1353101704Smjacob } 1354101704Smjacob return MPT_OK; 1355101704Smjacob} 1356101704Smjacob 1357101704Smjacob/* Get the response from the handshake register */ 1358101704Smjacobint 1359147883Sscottlmpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1360101704Smjacob{ 1361101704Smjacob int left, reply_left; 1362101704Smjacob u_int16_t *data16; 1363101704Smjacob MSG_DEFAULT_REPLY *hdr; 1364101704Smjacob 1365101704Smjacob /* We move things out in 16 bit chunks */ 1366101704Smjacob reply_len >>= 1; 1367101704Smjacob data16 = (u_int16_t *)reply; 1368101704Smjacob 1369101704Smjacob hdr = (MSG_DEFAULT_REPLY *)reply; 1370101704Smjacob 1371101704Smjacob /* Get first word */ 1372101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1373147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1374101704Smjacob return ETIMEDOUT; 1375101704Smjacob } 1376101704Smjacob *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1377101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1378101704Smjacob 1379101704Smjacob /* Get Second Word */ 1380101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1381147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1382101704Smjacob return ETIMEDOUT; 1383101704Smjacob } 1384101704Smjacob *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1385101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1386101704Smjacob 1387157354Smjacob /* 1388157354Smjacob * With the second word, we can now look at the length. 1389157354Smjacob * Warn about a reply that's too short (except for IOC FACTS REPLY) 1390157354Smjacob */ 1391157354Smjacob if ((reply_len >> 1) != hdr->MsgLength && 1392157354Smjacob (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ 1393157354Smjacob#if __FreeBSD_version >= 500000 1394103914Smjacob mpt_prt(mpt, "reply length does not match message length: " 1395157382Smjacob "got %x; expected %zx for function %x\n", 1396157354Smjacob hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1397157354Smjacob#else 1398157354Smjacob mpt_prt(mpt, "reply length does not match message length: " 1399157382Smjacob "got %x; expected %x for function %x\n", 1400157354Smjacob hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1401157354Smjacob#endif 1402101704Smjacob } 1403101704Smjacob 1404101704Smjacob /* Get rest of the reply; but don't overflow the provided buffer */ 1405101704Smjacob left = (hdr->MsgLength << 1) - 2; 1406101704Smjacob reply_left = reply_len - 2; 1407101704Smjacob while (left--) { 1408101704Smjacob u_int16_t datum; 1409101704Smjacob 1410101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1411147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1412101704Smjacob return ETIMEDOUT; 1413101704Smjacob } 1414101704Smjacob datum = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1415101704Smjacob 1416101704Smjacob if (reply_left-- > 0) 1417101704Smjacob *data16++ = datum & MPT_DB_DATA_MASK; 1418101704Smjacob 1419101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1420101704Smjacob } 1421101704Smjacob 1422101704Smjacob /* One more wait & clear at the end */ 1423101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1424147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1425101704Smjacob return ETIMEDOUT; 1426101704Smjacob } 1427101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1428101704Smjacob 1429101704Smjacob if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1430147883Sscottl if (mpt->verbose >= MPT_PRT_TRACE) 1431101704Smjacob mpt_print_reply(hdr); 1432101704Smjacob return (MPT_FAIL | hdr->IOCStatus); 1433101704Smjacob } 1434101704Smjacob 1435101704Smjacob return (0); 1436101704Smjacob} 1437101704Smjacob 1438101704Smjacobstatic int 1439147883Sscottlmpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1440101704Smjacob{ 1441101704Smjacob MSG_IOC_FACTS f_req; 1442101704Smjacob int error; 1443101704Smjacob 1444157354Smjacob memset(&f_req, 0, sizeof f_req); 1445101704Smjacob f_req.Function = MPI_FUNCTION_IOC_FACTS; 1446147883Sscottl f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1447101704Smjacob error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1448101704Smjacob if (error) 1449101704Smjacob return(error); 1450101704Smjacob error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1451101704Smjacob return (error); 1452101704Smjacob} 1453101704Smjacob 1454102199Smjacobstatic int 1455147883Sscottlmpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp) 1456102199Smjacob{ 1457102199Smjacob MSG_PORT_FACTS f_req; 1458102199Smjacob int error; 1459102199Smjacob 1460102199Smjacob /* XXX: Only getting PORT FACTS for Port 0 */ 1461147883Sscottl memset(&f_req, 0, sizeof f_req); 1462102199Smjacob f_req.Function = MPI_FUNCTION_PORT_FACTS; 1463147883Sscottl f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1464102199Smjacob error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1465102199Smjacob if (error) 1466102199Smjacob return(error); 1467102199Smjacob error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1468102199Smjacob return (error); 1469102199Smjacob} 1470102199Smjacob 1471101704Smjacob/* 1472101704Smjacob * Send the initialization request. This is where we specify how many 1473101704Smjacob * SCSI busses and how many devices per bus we wish to emulate. 1474101704Smjacob * This is also the command that specifies the max size of the reply 1475101704Smjacob * frames from the IOC that we will be allocating. 1476101704Smjacob */ 1477101704Smjacobstatic int 1478147883Sscottlmpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1479101704Smjacob{ 1480101704Smjacob int error = 0; 1481101704Smjacob MSG_IOC_INIT init; 1482101704Smjacob MSG_IOC_INIT_REPLY reply; 1483101704Smjacob 1484157354Smjacob memset(&init, 0, sizeof init); 1485101704Smjacob init.WhoInit = who; 1486101704Smjacob init.Function = MPI_FUNCTION_IOC_INIT; 1487159919Smjacob init.MaxDevices = mpt->mpt_max_devices; 1488101704Smjacob init.MaxBuses = 1; 1489155521Smjacob 1490155521Smjacob init.MsgVersion = htole16(MPI_VERSION); 1491155521Smjacob init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1492155521Smjacob init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1493147883Sscottl init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1494101704Smjacob 1495101704Smjacob if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1496101704Smjacob return(error); 1497101704Smjacob } 1498101704Smjacob 1499101704Smjacob error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1500101704Smjacob return (error); 1501101704Smjacob} 1502101704Smjacob 1503102199Smjacob 1504102199Smjacob/* 1505102199Smjacob * Utiltity routine to read configuration headers and pages 1506102199Smjacob */ 1507147883Sscottlint 1508147883Sscottlmpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action, 1509147883Sscottl u_int PageVersion, u_int PageLength, u_int PageNumber, 1510147883Sscottl u_int PageType, uint32_t PageAddress, bus_addr_t addr, 1511147883Sscottl bus_size_t len, int sleep_ok, int timeout_ms) 1512101704Smjacob{ 1513102199Smjacob MSG_CONFIG *cfgp; 1514147883Sscottl SGE_SIMPLE32 *se; 1515102199Smjacob 1516102199Smjacob cfgp = req->req_vbuf; 1517147883Sscottl memset(cfgp, 0, sizeof *cfgp); 1518147883Sscottl cfgp->Action = Action; 1519102199Smjacob cfgp->Function = MPI_FUNCTION_CONFIG; 1520147883Sscottl cfgp->Header.PageVersion = PageVersion; 1521147883Sscottl cfgp->Header.PageLength = PageLength; 1522147883Sscottl cfgp->Header.PageNumber = PageNumber; 1523147883Sscottl cfgp->Header.PageType = PageType; 1524102199Smjacob cfgp->PageAddress = PageAddress; 1525147883Sscottl se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1526147883Sscottl se->Address = addr; 1527147883Sscottl MPI_pSGE_SET_LENGTH(se, len); 1528147883Sscottl MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1529147883Sscottl MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1530147883Sscottl MPI_SGE_FLAGS_END_OF_LIST | 1531147883Sscottl ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1532147883Sscottl || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1533147883Sscottl ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1534147883Sscottl cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1535102199Smjacob 1536102199Smjacob mpt_check_doorbell(mpt); 1537102199Smjacob mpt_send_cmd(mpt, req); 1538147883Sscottl return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1539147883Sscottl sleep_ok, timeout_ms)); 1540147883Sscottl} 1541102199Smjacob 1542147883Sscottl 1543147883Sscottlint 1544147883Sscottlmpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1545147883Sscottl uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1546147883Sscottl int sleep_ok, int timeout_ms) 1547147883Sscottl{ 1548147883Sscottl request_t *req; 1549156104Smjacob MSG_CONFIG *cfgp; 1550147883Sscottl int error; 1551147883Sscottl 1552147883Sscottl req = mpt_get_request(mpt, sleep_ok); 1553147883Sscottl if (req == NULL) { 1554147883Sscottl mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1555156104Smjacob return (ENOMEM); 1556102199Smjacob } 1557147883Sscottl 1558147883Sscottl error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER, 1559147883Sscottl /*PageVersion*/0, /*PageLength*/0, PageNumber, 1560147883Sscottl PageType, PageAddress, /*addr*/0, /*len*/0, 1561147883Sscottl sleep_ok, timeout_ms); 1562147883Sscottl if (error != 0) { 1563156104Smjacob mpt_free_request(mpt, req); 1564147883Sscottl mpt_prt(mpt, "read_cfg_header timed out\n"); 1565156104Smjacob return (ETIMEDOUT); 1566147883Sscottl } 1567147883Sscottl 1568156104Smjacob switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1569156104Smjacob case MPI_IOCSTATUS_SUCCESS: 1570147883Sscottl cfgp = req->req_vbuf; 1571147883Sscottl bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1572147883Sscottl error = 0; 1573156104Smjacob break; 1574156104Smjacob case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1575156104Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, 1576156104Smjacob "Invalid Page Type %d Number %d Addr 0x%0x\n", 1577156104Smjacob PageType, PageNumber, PageAddress); 1578156104Smjacob error = EINVAL; 1579156104Smjacob break; 1580156104Smjacob default: 1581156104Smjacob mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1582156104Smjacob req->IOCStatus); 1583156104Smjacob error = EIO; 1584156104Smjacob break; 1585147883Sscottl } 1586102199Smjacob mpt_free_request(mpt, req); 1587147883Sscottl return (error); 1588102199Smjacob} 1589102199Smjacob 1590102822Smjacobint 1591147883Sscottlmpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1592147883Sscottl CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1593147883Sscottl int timeout_ms) 1594102199Smjacob{ 1595147883Sscottl request_t *req; 1596147883Sscottl int error; 1597102199Smjacob 1598147883Sscottl req = mpt_get_request(mpt, sleep_ok); 1599147883Sscottl if (req == NULL) { 1600147883Sscottl mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1601147883Sscottl return (-1); 1602147883Sscottl } 1603102199Smjacob 1604147883Sscottl error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1605147883Sscottl hdr->PageLength, hdr->PageNumber, 1606147883Sscottl hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1607157117Smjacob PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1608147883Sscottl len, sleep_ok, timeout_ms); 1609147883Sscottl if (error != 0) { 1610147883Sscottl mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1611147883Sscottl return (-1); 1612147883Sscottl } 1613102199Smjacob 1614147883Sscottl if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1615147883Sscottl mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1616147883Sscottl req->IOCStatus); 1617147883Sscottl mpt_free_request(mpt, req); 1618102199Smjacob return (-1); 1619102199Smjacob } 1620102199Smjacob bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1621102199Smjacob BUS_DMASYNC_POSTREAD); 1622157117Smjacob memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1623102199Smjacob mpt_free_request(mpt, req); 1624102199Smjacob return (0); 1625102199Smjacob} 1626102199Smjacob 1627102822Smjacobint 1628147883Sscottlmpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1629147883Sscottl CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1630147883Sscottl int timeout_ms) 1631102199Smjacob{ 1632147883Sscottl request_t *req; 1633147883Sscottl u_int hdr_attr; 1634147883Sscottl int error; 1635102199Smjacob 1636102199Smjacob hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1637102199Smjacob if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1638102199Smjacob hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1639147883Sscottl mpt_prt(mpt, "page type 0x%x not changeable\n", 1640147883Sscottl hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1641102199Smjacob return (-1); 1642102199Smjacob } 1643147883Sscottl hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK, 1644102199Smjacob 1645147883Sscottl req = mpt_get_request(mpt, sleep_ok); 1646147883Sscottl if (req == NULL) 1647147883Sscottl return (-1); 1648102199Smjacob 1649157117Smjacob memcpy(((caddr_t)req->req_vbuf)+MPT_RQSL(mpt), hdr, len); 1650147883Sscottl /* Restore stripped out attributes */ 1651147883Sscottl hdr->PageType |= hdr_attr; 1652102199Smjacob 1653147883Sscottl error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1654147883Sscottl hdr->PageLength, hdr->PageNumber, 1655147883Sscottl hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1656157117Smjacob PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1657147883Sscottl len, sleep_ok, timeout_ms); 1658147883Sscottl if (error != 0) { 1659147883Sscottl mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1660147883Sscottl return (-1); 1661147883Sscottl } 1662102199Smjacob 1663147883Sscottl if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1664147883Sscottl mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1665147883Sscottl req->IOCStatus); 1666147883Sscottl mpt_free_request(mpt, req); 1667147883Sscottl return (-1); 1668102199Smjacob } 1669147883Sscottl mpt_free_request(mpt, req); 1670147883Sscottl return (0); 1671147883Sscottl} 1672102199Smjacob 1673147883Sscottl/* 1674147883Sscottl * Read IOC configuration information 1675147883Sscottl */ 1676147883Sscottlstatic int 1677147883Sscottlmpt_read_config_info_ioc(struct mpt_softc *mpt) 1678147883Sscottl{ 1679147883Sscottl CONFIG_PAGE_HEADER hdr; 1680147883Sscottl struct mpt_raid_volume *mpt_raid; 1681147883Sscottl int rv; 1682147883Sscottl int i; 1683147883Sscottl size_t len; 1684147883Sscottl 1685147883Sscottl rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1686158982Smjacob 2, 0, &hdr, FALSE, 5000); 1687156104Smjacob /* 1688156104Smjacob * If it's an invalid page, so what? Not a supported function.... 1689156104Smjacob */ 1690158982Smjacob if (rv == EINVAL) { 1691156104Smjacob return (0); 1692158982Smjacob } 1693158982Smjacob if (rv) { 1694156104Smjacob return (rv); 1695158982Smjacob } 1696147883Sscottl 1697157354Smjacob#if __FreeBSD_version >= 500000 1698157141Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %zx, " 1699147883Sscottl "num %x, type %x\n", hdr.PageVersion, 1700147883Sscottl hdr.PageLength * sizeof(uint32_t), 1701147883Sscottl hdr.PageNumber, hdr.PageType); 1702157354Smjacob#else 1703157354Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %z, " 1704157354Smjacob "num %x, type %x\n", hdr.PageVersion, 1705157354Smjacob hdr.PageLength * sizeof(uint32_t), 1706157354Smjacob hdr.PageNumber, hdr.PageType); 1707157354Smjacob#endif 1708147883Sscottl 1709147883Sscottl len = hdr.PageLength * sizeof(uint32_t); 1710151075Sscottl mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1711158982Smjacob if (mpt->ioc_page2 == NULL) { 1712158982Smjacob mpt_prt(mpt, "unable to allocate memory for IOC page 2\n"); 1713158982Smjacob mpt_raid_free_mem(mpt); 1714147883Sscottl return (ENOMEM); 1715158982Smjacob } 1716147883Sscottl memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1717158982Smjacob rv = mpt_read_cur_cfg_page(mpt, 0, 1718158982Smjacob &mpt->ioc_page2->Header, len, FALSE, 5000); 1719147883Sscottl if (rv) { 1720147883Sscottl mpt_prt(mpt, "failed to read IOC Page 2\n"); 1721158982Smjacob mpt_raid_free_mem(mpt); 1722158982Smjacob return (EIO); 1723158982Smjacob } 1724158982Smjacob 1725158982Smjacob if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1726147883Sscottl uint32_t mask; 1727147883Sscottl 1728147883Sscottl mpt_prt(mpt, "Capabilities: ("); 1729147883Sscottl for (mask = 1; mask != 0; mask <<= 1) { 1730158982Smjacob if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) { 1731147883Sscottl continue; 1732158982Smjacob } 1733147883Sscottl switch (mask) { 1734147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1735147883Sscottl mpt_prtc(mpt, " RAID-0"); 1736147883Sscottl break; 1737147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1738147883Sscottl mpt_prtc(mpt, " RAID-1E"); 1739147883Sscottl break; 1740147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1741147883Sscottl mpt_prtc(mpt, " RAID-1"); 1742147883Sscottl break; 1743147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1744147883Sscottl mpt_prtc(mpt, " SES"); 1745147883Sscottl break; 1746147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1747147883Sscottl mpt_prtc(mpt, " SAFTE"); 1748147883Sscottl break; 1749147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1750147883Sscottl mpt_prtc(mpt, " Multi-Channel-Arrays"); 1751147883Sscottl default: 1752147883Sscottl break; 1753147883Sscottl } 1754102199Smjacob } 1755147883Sscottl mpt_prtc(mpt, " )\n"); 1756147883Sscottl if ((mpt->ioc_page2->CapabilitiesFlags 1757147883Sscottl & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1758147883Sscottl | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1759147883Sscottl | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1760147883Sscottl mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1761147883Sscottl mpt->ioc_page2->NumActiveVolumes, 1762147883Sscottl mpt->ioc_page2->NumActiveVolumes != 1 1763147883Sscottl ? "s " : " ", 1764147883Sscottl mpt->ioc_page2->MaxVolumes); 1765147883Sscottl mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1766147883Sscottl mpt->ioc_page2->NumActivePhysDisks, 1767147883Sscottl mpt->ioc_page2->NumActivePhysDisks != 1 1768147883Sscottl ? "s " : " ", 1769147883Sscottl mpt->ioc_page2->MaxPhysDisks); 1770147883Sscottl } 1771147883Sscottl } 1772102199Smjacob 1773147883Sscottl len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1774158982Smjacob mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1775147883Sscottl if (mpt->raid_volumes == NULL) { 1776147883Sscottl mpt_prt(mpt, "Could not allocate RAID volume data\n"); 1777158982Smjacob mpt_raid_free_mem(mpt); 1778158982Smjacob return (ENOMEM); 1779147883Sscottl } 1780147883Sscottl 1781147883Sscottl /* 1782147883Sscottl * Copy critical data out of ioc_page2 so that we can 1783147883Sscottl * safely refresh the page without windows of unreliable 1784147883Sscottl * data. 1785147883Sscottl */ 1786147883Sscottl mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 1787147883Sscottl 1788158982Smjacob len = sizeof(*mpt->raid_volumes->config_page) + 1789158982Smjacob (sizeof (RAID_VOL0_PHYS_DISK) * (mpt->ioc_page2->MaxPhysDisks - 1)); 1790147883Sscottl for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 1791147883Sscottl mpt_raid = &mpt->raid_volumes[i]; 1792158982Smjacob mpt_raid->config_page = 1793158982Smjacob malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1794147883Sscottl if (mpt_raid->config_page == NULL) { 1795147883Sscottl mpt_prt(mpt, "Could not allocate RAID page data\n"); 1796158982Smjacob mpt_raid_free_mem(mpt); 1797158982Smjacob return (ENOMEM); 1798147883Sscottl } 1799147883Sscottl } 1800147883Sscottl mpt->raid_page0_len = len; 1801147883Sscottl 1802147883Sscottl len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 1803158982Smjacob mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1804147883Sscottl if (mpt->raid_disks == NULL) { 1805147883Sscottl mpt_prt(mpt, "Could not allocate RAID disk data\n"); 1806158982Smjacob mpt_raid_free_mem(mpt); 1807158982Smjacob return (ENOMEM); 1808147883Sscottl } 1809147883Sscottl mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 1810147883Sscottl 1811158982Smjacob /* 1812158982Smjacob * Load page 3. 1813158982Smjacob */ 1814147883Sscottl rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1815158982Smjacob 3, 0, &hdr, FALSE, 5000); 1816158982Smjacob if (rv) { 1817158982Smjacob mpt_raid_free_mem(mpt); 1818147883Sscottl return (EIO); 1819158982Smjacob } 1820147883Sscottl 1821147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 1822158982Smjacob hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 1823147883Sscottl 1824147883Sscottl len = hdr.PageLength * sizeof(uint32_t); 1825151075Sscottl mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1826158982Smjacob if (mpt->ioc_page3 == NULL) { 1827158982Smjacob mpt_prt(mpt, "unable to allocate memory for IOC page 3\n"); 1828158982Smjacob mpt_raid_free_mem(mpt); 1829158982Smjacob return (ENOMEM); 1830158982Smjacob } 1831147883Sscottl memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 1832158982Smjacob rv = mpt_read_cur_cfg_page(mpt, 0, 1833158982Smjacob &mpt->ioc_page3->Header, len, FALSE, 5000); 1834147883Sscottl if (rv) { 1835158982Smjacob mpt_raid_free_mem(mpt); 1836158982Smjacob return (EIO); 1837102199Smjacob } 1838147883Sscottl mpt_raid_wakeup(mpt); 1839102199Smjacob return (0); 1840102199Smjacob} 1841102199Smjacob 1842102199Smjacob/* 1843102199Smjacob * Enable IOC port 1844102199Smjacob */ 1845102199Smjacobstatic int 1846147883Sscottlmpt_send_port_enable(struct mpt_softc *mpt, int port) 1847102199Smjacob{ 1848147883Sscottl request_t *req; 1849101704Smjacob MSG_PORT_ENABLE *enable_req; 1850147883Sscottl int error; 1851101704Smjacob 1852147883Sscottl req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1853147883Sscottl if (req == NULL) 1854147883Sscottl return (-1); 1855101704Smjacob 1856101704Smjacob enable_req = req->req_vbuf; 1857157354Smjacob memset(enable_req, 0, MPT_RQSL(mpt)); 1858101704Smjacob 1859101704Smjacob enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 1860147883Sscottl enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1861101704Smjacob enable_req->PortNumber = port; 1862101704Smjacob 1863101704Smjacob mpt_check_doorbell(mpt); 1864147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 1865147883Sscottl 1866147883Sscottl mpt_send_cmd(mpt, req); 1867147883Sscottl error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1868157662Smjacob FALSE, (mpt->is_sas || mpt->is_fc)? 30000 : 3000); 1869147883Sscottl if (error != 0) { 1870157117Smjacob mpt_prt(mpt, "port %d enable timed out\n", port); 1871147883Sscottl return (-1); 1872101704Smjacob } 1873101704Smjacob mpt_free_request(mpt, req); 1874157117Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port); 1875101704Smjacob return (0); 1876101704Smjacob} 1877101704Smjacob 1878101704Smjacob/* 1879101704Smjacob * Enable/Disable asynchronous event reporting. 1880101704Smjacob */ 1881101704Smjacobstatic int 1882147883Sscottlmpt_send_event_request(struct mpt_softc *mpt, int onoff) 1883101704Smjacob{ 1884101704Smjacob request_t *req; 1885101704Smjacob MSG_EVENT_NOTIFY *enable_req; 1886101704Smjacob 1887157354Smjacob req = mpt_get_request(mpt, FALSE); 1888157354Smjacob if (req == NULL) { 1889157354Smjacob return (ENOMEM); 1890157354Smjacob } 1891101704Smjacob enable_req = req->req_vbuf; 1892157354Smjacob memset(enable_req, 0, sizeof *enable_req); 1893101704Smjacob 1894101704Smjacob enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 1895147883Sscottl enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 1896101704Smjacob enable_req->Switch = onoff; 1897101704Smjacob 1898101704Smjacob mpt_check_doorbell(mpt); 1899157354Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n", 1900157354Smjacob onoff ? "en" : "dis"); 1901157354Smjacob /* 1902157354Smjacob * Send the command off, but don't wait for it. 1903157354Smjacob */ 1904101704Smjacob mpt_send_cmd(mpt, req); 1905101704Smjacob return (0); 1906101704Smjacob} 1907101704Smjacob 1908101704Smjacob/* 1909101704Smjacob * Un-mask the interupts on the chip. 1910101704Smjacob */ 1911101704Smjacobvoid 1912147883Sscottlmpt_enable_ints(struct mpt_softc *mpt) 1913101704Smjacob{ 1914101704Smjacob /* Unmask every thing except door bell int */ 1915101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 1916101704Smjacob} 1917101704Smjacob 1918101704Smjacob/* 1919101704Smjacob * Mask the interupts on the chip. 1920101704Smjacob */ 1921101704Smjacobvoid 1922147883Sscottlmpt_disable_ints(struct mpt_softc *mpt) 1923101704Smjacob{ 1924101704Smjacob /* Mask all interrupts */ 1925156104Smjacob mpt_write(mpt, MPT_OFFSET_INTR_MASK, 1926101704Smjacob MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 1927101704Smjacob} 1928101704Smjacob 1929147883Sscottlstatic void 1930147883Sscottlmpt_sysctl_attach(struct mpt_softc *mpt) 1931147883Sscottl{ 1932157117Smjacob#if __FreeBSD_version >= 500000 1933147883Sscottl struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 1934147883Sscottl struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 1935147883Sscottl 1936156104Smjacob SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1937147883Sscottl "debug", CTLFLAG_RW, &mpt->verbose, 0, 1938147883Sscottl "Debugging/Verbose level"); 1939157117Smjacob#endif 1940147883Sscottl} 1941147883Sscottl 1942101704Smjacobint 1943147883Sscottlmpt_attach(struct mpt_softc *mpt) 1944101704Smjacob{ 1945157117Smjacob struct mpt_personality *pers; 1946147883Sscottl int i; 1947157117Smjacob int error; 1948147883Sscottl 1949147883Sscottl for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1950147883Sscottl pers = mpt_personalities[i]; 1951157117Smjacob if (pers == NULL) { 1952147883Sscottl continue; 1953157117Smjacob } 1954147883Sscottl if (pers->probe(mpt) == 0) { 1955147883Sscottl error = pers->attach(mpt); 1956147883Sscottl if (error != 0) { 1957147883Sscottl mpt_detach(mpt); 1958147883Sscottl return (error); 1959147883Sscottl } 1960147883Sscottl mpt->mpt_pers_mask |= (0x1 << pers->id); 1961147883Sscottl pers->use_count++; 1962147883Sscottl } 1963147883Sscottl } 1964155521Smjacob 1965157117Smjacob /* 1966157117Smjacob * Now that we've attached everything, do the enable function 1967157117Smjacob * for all of the personalities. This allows the personalities 1968157117Smjacob * to do setups that are appropriate for them prior to enabling 1969157117Smjacob * any ports. 1970157117Smjacob */ 1971157117Smjacob for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1972157117Smjacob pers = mpt_personalities[i]; 1973157117Smjacob if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) { 1974157117Smjacob error = pers->enable(mpt); 1975157117Smjacob if (error != 0) { 1976157117Smjacob mpt_prt(mpt, "personality %s attached but would" 1977157117Smjacob " not enable (%d)\n", pers->name, error); 1978157117Smjacob mpt_detach(mpt); 1979157117Smjacob return (error); 1980157117Smjacob } 1981157117Smjacob } 1982157117Smjacob } 1983147883Sscottl return (0); 1984147883Sscottl} 1985147883Sscottl 1986147883Sscottlint 1987147883Sscottlmpt_shutdown(struct mpt_softc *mpt) 1988147883Sscottl{ 1989147883Sscottl struct mpt_personality *pers; 1990147883Sscottl 1991157117Smjacob MPT_PERS_FOREACH_REVERSE(mpt, pers) { 1992147883Sscottl pers->shutdown(mpt); 1993157117Smjacob } 1994147883Sscottl return (0); 1995147883Sscottl} 1996147883Sscottl 1997147883Sscottlint 1998147883Sscottlmpt_detach(struct mpt_softc *mpt) 1999147883Sscottl{ 2000147883Sscottl struct mpt_personality *pers; 2001147883Sscottl 2002147883Sscottl MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2003147883Sscottl pers->detach(mpt); 2004147883Sscottl mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2005147883Sscottl pers->use_count--; 2006147883Sscottl } 2007147883Sscottl 2008147883Sscottl return (0); 2009147883Sscottl} 2010147883Sscottl 2011147883Sscottlint 2012147883Sscottlmpt_core_load(struct mpt_personality *pers) 2013147883Sscottl{ 2014147883Sscottl int i; 2015147883Sscottl 2016147883Sscottl /* 2017147883Sscottl * Setup core handlers and insert the default handler 2018147883Sscottl * into all "empty slots". 2019147883Sscottl */ 2020157117Smjacob for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { 2021147883Sscottl mpt_reply_handlers[i] = mpt_default_reply_handler; 2022157117Smjacob } 2023147883Sscottl 2024147883Sscottl mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2025147883Sscottl mpt_event_reply_handler; 2026147883Sscottl mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2027147883Sscottl mpt_config_reply_handler; 2028147883Sscottl mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2029147883Sscottl mpt_handshake_reply_handler; 2030147883Sscottl return (0); 2031147883Sscottl} 2032147883Sscottl 2033147883Sscottl/* 2034147883Sscottl * Initialize per-instance driver data and perform 2035147883Sscottl * initial controller configuration. 2036147883Sscottl */ 2037147883Sscottlint 2038147883Sscottlmpt_core_attach(struct mpt_softc *mpt) 2039147883Sscottl{ 2040101704Smjacob int val; 2041147883Sscottl int error; 2042101704Smjacob 2043157117Smjacob 2044147883Sscottl LIST_INIT(&mpt->ack_frames); 2045147883Sscottl 2046147883Sscottl /* Put all request buffers on the free list */ 2047147883Sscottl TAILQ_INIT(&mpt->request_pending_list); 2048147883Sscottl TAILQ_INIT(&mpt->request_free_list); 2049157354Smjacob TAILQ_INIT(&mpt->request_timeout_list); 2050157117Smjacob for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2051157354Smjacob request_t *req = &mpt->request_pool[val]; 2052157354Smjacob req->state = REQ_STATE_ALLOCATED; 2053157354Smjacob mpt_free_request(mpt, req); 2054157117Smjacob } 2055147883Sscottl 2056157117Smjacob for (val = 0; val < MPT_MAX_LUNS; val++) { 2057157117Smjacob STAILQ_INIT(&mpt->trt[val].atios); 2058157117Smjacob STAILQ_INIT(&mpt->trt[val].inots); 2059157117Smjacob } 2060157117Smjacob STAILQ_INIT(&mpt->trt_wildcard.atios); 2061157117Smjacob STAILQ_INIT(&mpt->trt_wildcard.inots); 2062157117Smjacob 2063157117Smjacob mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; 2064157117Smjacob 2065147883Sscottl mpt_sysctl_attach(mpt); 2066147883Sscottl 2067147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2068157117Smjacob mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2069147883Sscottl 2070147883Sscottl error = mpt_configure_ioc(mpt); 2071147883Sscottl 2072147883Sscottl return (error); 2073147883Sscottl} 2074147883Sscottl 2075157117Smjacobint 2076157117Smjacobmpt_core_enable(struct mpt_softc *mpt) 2077157117Smjacob{ 2078157117Smjacob /* 2079157117Smjacob * We enter with the IOC enabled, but async events 2080157117Smjacob * not enabled, ports not enabled and interrupts 2081157117Smjacob * not enabled. 2082157117Smjacob */ 2083157117Smjacob 2084157117Smjacob /* 2085157117Smjacob * Enable asynchronous event reporting- all personalities 2086157117Smjacob * have attached so that they should be able to now field 2087157117Smjacob * async events. 2088157117Smjacob */ 2089157117Smjacob mpt_send_event_request(mpt, 1); 2090157117Smjacob 2091157117Smjacob /* 2092157117Smjacob * Catch any pending interrupts 2093157117Smjacob * 2094157117Smjacob * This seems to be crucial- otherwise 2095157117Smjacob * the portenable below times out. 2096157117Smjacob */ 2097157117Smjacob mpt_intr(mpt); 2098157117Smjacob 2099157117Smjacob /* 2100157117Smjacob * Enable Interrupts 2101157117Smjacob */ 2102157117Smjacob mpt_enable_ints(mpt); 2103157117Smjacob 2104157117Smjacob /* 2105157117Smjacob * Catch any pending interrupts 2106157117Smjacob * 2107157117Smjacob * This seems to be crucial- otherwise 2108157117Smjacob * the portenable below times out. 2109157117Smjacob */ 2110157117Smjacob mpt_intr(mpt); 2111157117Smjacob 2112157117Smjacob /* 2113157662Smjacob * Enable the port. 2114157117Smjacob */ 2115157117Smjacob if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2116157117Smjacob mpt_prt(mpt, "failed to enable port 0\n"); 2117157117Smjacob return (ENXIO); 2118157117Smjacob } 2119157117Smjacob return (0); 2120157117Smjacob} 2121157117Smjacob 2122147883Sscottlvoid 2123147883Sscottlmpt_core_shutdown(struct mpt_softc *mpt) 2124147883Sscottl{ 2125157117Smjacob mpt_disable_ints(mpt); 2126147883Sscottl} 2127147883Sscottl 2128147883Sscottlvoid 2129147883Sscottlmpt_core_detach(struct mpt_softc *mpt) 2130147883Sscottl{ 2131157117Smjacob mpt_disable_ints(mpt); 2132147883Sscottl} 2133147883Sscottl 2134147883Sscottlint 2135147883Sscottlmpt_core_unload(struct mpt_personality *pers) 2136147883Sscottl{ 2137147883Sscottl /* Unload is always successfull. */ 2138147883Sscottl return (0); 2139147883Sscottl} 2140147883Sscottl 2141147883Sscottl#define FW_UPLOAD_REQ_SIZE \ 2142147883Sscottl (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2143147883Sscottl + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2144147883Sscottl 2145147883Sscottlstatic int 2146147883Sscottlmpt_upload_fw(struct mpt_softc *mpt) 2147147883Sscottl{ 2148147883Sscottl uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2149147883Sscottl MSG_FW_UPLOAD_REPLY fw_reply; 2150147883Sscottl MSG_FW_UPLOAD *fw_req; 2151147883Sscottl FW_UPLOAD_TCSGE *tsge; 2152147883Sscottl SGE_SIMPLE32 *sge; 2153147883Sscottl uint32_t flags; 2154147883Sscottl int error; 2155147883Sscottl 2156147883Sscottl memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2157147883Sscottl fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2158147883Sscottl fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2159147883Sscottl fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2160147883Sscottl fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2161147883Sscottl tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2162147883Sscottl tsge->DetailsLength = 12; 2163147883Sscottl tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2164147883Sscottl tsge->ImageSize = htole32(mpt->fw_image_size); 2165147883Sscottl sge = (SGE_SIMPLE32 *)(tsge + 1); 2166147883Sscottl flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2167147883Sscottl | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2168147883Sscottl | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2169147883Sscottl flags <<= MPI_SGE_FLAGS_SHIFT; 2170147883Sscottl sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2171147883Sscottl sge->Address = htole32(mpt->fw_phys); 2172147883Sscottl error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2173147883Sscottl if (error) 2174147883Sscottl return(error); 2175147883Sscottl error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2176147883Sscottl return (error); 2177147883Sscottl} 2178147883Sscottl 2179147883Sscottlstatic void 2180147883Sscottlmpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2181147883Sscottl uint32_t *data, bus_size_t len) 2182147883Sscottl{ 2183147883Sscottl uint32_t *data_end; 2184147883Sscottl 2185147883Sscottl data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2186159919Smjacob if (mpt->is_sas) { 2187159919Smjacob pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2188159919Smjacob } 2189147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2190147883Sscottl while (data != data_end) { 2191147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2192147883Sscottl data++; 2193101704Smjacob } 2194159919Smjacob if (mpt->is_sas) { 2195159919Smjacob pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2196159919Smjacob } 2197147883Sscottl} 2198101704Smjacob 2199147883Sscottlstatic int 2200147883Sscottlmpt_download_fw(struct mpt_softc *mpt) 2201147883Sscottl{ 2202147883Sscottl MpiFwHeader_t *fw_hdr; 2203147883Sscottl int error; 2204147883Sscottl uint32_t ext_offset; 2205147883Sscottl uint32_t data; 2206147883Sscottl 2207147883Sscottl mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2208147883Sscottl mpt->fw_image_size); 2209147883Sscottl 2210147883Sscottl error = mpt_enable_diag_mode(mpt); 2211147883Sscottl if (error != 0) { 2212147883Sscottl mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2213147883Sscottl return (EIO); 2214101704Smjacob } 2215101704Smjacob 2216147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2217147883Sscottl MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2218147883Sscottl 2219147883Sscottl fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2220147883Sscottl mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2221147883Sscottl fw_hdr->ImageSize); 2222147883Sscottl 2223147883Sscottl ext_offset = fw_hdr->NextImageHeaderOffset; 2224147883Sscottl while (ext_offset != 0) { 2225147883Sscottl MpiExtImageHeader_t *ext; 2226147883Sscottl 2227147883Sscottl ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2228147883Sscottl ext_offset = ext->NextImageHeaderOffset; 2229147883Sscottl 2230147883Sscottl mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2231147883Sscottl ext->ImageSize); 2232147883Sscottl } 2233147883Sscottl 2234159919Smjacob if (mpt->is_sas) { 2235159919Smjacob pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2236159919Smjacob } 2237147883Sscottl /* Setup the address to jump to on reset. */ 2238147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2239147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2240147883Sscottl 2241101704Smjacob /* 2242147883Sscottl * The controller sets the "flash bad" status after attempting 2243147883Sscottl * to auto-boot from flash. Clear the status so that the controller 2244147883Sscottl * will continue the boot process with our newly installed firmware. 2245101704Smjacob */ 2246147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2247147883Sscottl data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2248147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2249147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2250147883Sscottl 2251159919Smjacob if (mpt->is_sas) { 2252159919Smjacob pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2253159919Smjacob } 2254155521Smjacob 2255147883Sscottl /* 2256147883Sscottl * Re-enable the processor and clear the boot halt flag. 2257147883Sscottl */ 2258147883Sscottl data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2259147883Sscottl data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2260147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2261147883Sscottl 2262147883Sscottl mpt_disable_diag_mode(mpt); 2263147883Sscottl return (0); 2264147883Sscottl} 2265147883Sscottl 2266147883Sscottl/* 2267147883Sscottl * Allocate/Initialize data structures for the controller. Called 2268147883Sscottl * once at instance startup. 2269147883Sscottl */ 2270147883Sscottlstatic int 2271147883Sscottlmpt_configure_ioc(struct mpt_softc *mpt) 2272147883Sscottl{ 2273147883Sscottl MSG_PORT_FACTS_REPLY pfp; 2274147883Sscottl MSG_IOC_FACTS_REPLY facts; 2275147883Sscottl int try; 2276147883Sscottl int needreset; 2277155521Smjacob uint32_t max_chain_depth; 2278147883Sscottl 2279147883Sscottl needreset = 0; 2280101704Smjacob for (try = 0; try < MPT_MAX_TRYS; try++) { 2281147883Sscottl 2282101704Smjacob /* 2283101704Smjacob * No need to reset if the IOC is already in the READY state. 2284101704Smjacob * 2285101704Smjacob * Force reset if initialization failed previously. 2286101704Smjacob * Note that a hard_reset of the second channel of a '929 2287101704Smjacob * will stop operation of the first channel. Hopefully, if the 2288156104Smjacob * first channel is ok, the second will not require a hard 2289101704Smjacob * reset. 2290101704Smjacob */ 2291157117Smjacob if (needreset || MPT_STATE(mpt_rd_db(mpt)) != 2292101704Smjacob MPT_DB_STATE_READY) { 2293157117Smjacob if (mpt_reset(mpt, FALSE) != MPT_OK) { 2294101704Smjacob continue; 2295157117Smjacob } 2296101704Smjacob } 2297147883Sscottl needreset = 0; 2298101704Smjacob 2299101704Smjacob if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) { 2300147883Sscottl mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2301147883Sscottl needreset = 1; 2302101704Smjacob continue; 2303102199Smjacob } 2304102199Smjacob 2305147883Sscottl mpt->mpt_global_credits = le16toh(facts.GlobalCredits); 2306147883Sscottl mpt->request_frame_size = le16toh(facts.RequestFrameSize); 2307155521Smjacob mpt->ioc_facts_flags = facts.Flags; 2308147883Sscottl mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2309147883Sscottl le16toh(facts.MsgVersion) >> 8, 2310147883Sscottl le16toh(facts.MsgVersion) & 0xFF, 2311147883Sscottl le16toh(facts.HeaderVersion) >> 8, 2312147883Sscottl le16toh(facts.HeaderVersion) & 0xFF); 2313155521Smjacob 2314155521Smjacob /* 2315155521Smjacob * Now that we know request frame size, we can calculate 2316155521Smjacob * the actual (reasonable) segment limit for read/write I/O. 2317155521Smjacob * 2318155521Smjacob * This limit is constrained by: 2319155521Smjacob * 2320155521Smjacob * + The size of each area we allocate per command (and how 2321155521Smjacob * many chain segments we can fit into it). 2322155521Smjacob * + The total number of areas we've set up. 2323155521Smjacob * + The actual chain depth the card will allow. 2324155521Smjacob * 2325155521Smjacob * The first area's segment count is limited by the I/O request 2326155521Smjacob * at the head of it. We cannot allocate realistically more 2327155521Smjacob * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2328155521Smjacob * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2329155521Smjacob * 2330155521Smjacob */ 2331155521Smjacob max_chain_depth = facts.MaxChainDepth; 2332155521Smjacob 2333155521Smjacob /* total number of request areas we (can) allocate */ 2334155521Smjacob mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2335155521Smjacob 2336155521Smjacob /* converted to the number of chain areas possible */ 2337155521Smjacob mpt->max_seg_cnt *= MPT_NRFM(mpt); 2338155521Smjacob 2339155521Smjacob /* limited by the number of chain areas the card will support */ 2340155521Smjacob if (mpt->max_seg_cnt > max_chain_depth) { 2341155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, 2342155521Smjacob "chain depth limited to %u (from %u)\n", 2343155521Smjacob max_chain_depth, mpt->max_seg_cnt); 2344155521Smjacob mpt->max_seg_cnt = max_chain_depth; 2345155521Smjacob } 2346155521Smjacob 2347155521Smjacob /* converted to the number of simple sges in chain segments. */ 2348155521Smjacob mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2349155521Smjacob 2350147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2351155521Smjacob "Maximum Segment Count: %u\n", mpt->max_seg_cnt); 2352155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, 2353147883Sscottl "MsgLength=%u IOCNumber = %d\n", 2354147883Sscottl facts.MsgLength, facts.IOCNumber); 2355147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2356155521Smjacob "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2357155521Smjacob "Request Frame Size %u bytes Max Chain Depth %u\n", 2358155521Smjacob mpt->mpt_global_credits, facts.BlockSize, 2359155521Smjacob mpt->request_frame_size << 2, max_chain_depth); 2360147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2361147883Sscottl "IOCFACTS: Num Ports %d, FWImageSize %d, " 2362147883Sscottl "Flags=%#x\n", facts.NumberOfPorts, 2363147883Sscottl le32toh(facts.FWImageSize), facts.Flags); 2364147883Sscottl 2365155521Smjacob 2366147883Sscottl if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) { 2367147883Sscottl struct mpt_map_info mi; 2368147883Sscottl int error; 2369147883Sscottl 2370147883Sscottl /* 2371147883Sscottl * In some configurations, the IOC's firmware is 2372147883Sscottl * stored in a shared piece of system NVRAM that 2373147883Sscottl * is only accessable via the BIOS. In this 2374147883Sscottl * case, the firmware keeps a copy of firmware in 2375147883Sscottl * RAM until the OS driver retrieves it. Once 2376147883Sscottl * retrieved, we are responsible for re-downloading 2377147883Sscottl * the firmware after any hard-reset. 2378147883Sscottl */ 2379147883Sscottl mpt->fw_image_size = le32toh(facts.FWImageSize); 2380147883Sscottl error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 2381147883Sscottl /*alignment*/1, /*boundary*/0, 2382147883Sscottl /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2383147883Sscottl /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, 2384147883Sscottl /*filterarg*/NULL, mpt->fw_image_size, 2385147883Sscottl /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size, 2386147883Sscottl /*flags*/0, &mpt->fw_dmat); 2387147883Sscottl if (error != 0) { 2388147883Sscottl mpt_prt(mpt, "cannot create fw dma tag\n"); 2389147883Sscottl return (ENOMEM); 2390147883Sscottl } 2391147883Sscottl error = bus_dmamem_alloc(mpt->fw_dmat, 2392147883Sscottl (void **)&mpt->fw_image, BUS_DMA_NOWAIT, 2393147883Sscottl &mpt->fw_dmap); 2394147883Sscottl if (error != 0) { 2395147883Sscottl mpt_prt(mpt, "cannot allocate fw mem.\n"); 2396147883Sscottl bus_dma_tag_destroy(mpt->fw_dmat); 2397147883Sscottl return (ENOMEM); 2398147883Sscottl } 2399147883Sscottl mi.mpt = mpt; 2400147883Sscottl mi.error = 0; 2401147883Sscottl bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2402147883Sscottl mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, 2403147883Sscottl &mi, 0); 2404147883Sscottl mpt->fw_phys = mi.phys; 2405147883Sscottl 2406147883Sscottl error = mpt_upload_fw(mpt); 2407147883Sscottl if (error != 0) { 2408147883Sscottl mpt_prt(mpt, "fw upload failed.\n"); 2409147883Sscottl bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2410147883Sscottl bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2411147883Sscottl mpt->fw_dmap); 2412147883Sscottl bus_dma_tag_destroy(mpt->fw_dmat); 2413147883Sscottl mpt->fw_image = NULL; 2414147883Sscottl return (EIO); 2415147883Sscottl } 2416101704Smjacob } 2417101704Smjacob 2418102199Smjacob if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) { 2419147883Sscottl mpt_prt(mpt, "mpt_get_portfacts failed\n"); 2420147883Sscottl needreset = 1; 2421102199Smjacob continue; 2422102199Smjacob } 2423102199Smjacob 2424147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2425147883Sscottl "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n", 2426147883Sscottl pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID, 2427147883Sscottl pfp.MaxDevices); 2428102199Smjacob 2429147883Sscottl mpt->mpt_port_type = pfp.PortType; 2430147883Sscottl mpt->mpt_proto_flags = pfp.ProtocolFlags; 2431102199Smjacob if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI && 2432155521Smjacob pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS && 2433102199Smjacob pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) { 2434147883Sscottl mpt_prt(mpt, "Unsupported Port Type (%x)\n", 2435102199Smjacob pfp.PortType); 2436102199Smjacob return (ENXIO); 2437102199Smjacob } 2438157117Smjacob mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers); 2439157117Smjacob 2440102199Smjacob if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2441102199Smjacob mpt->is_fc = 1; 2442155521Smjacob mpt->is_sas = 0; 2443159178Smjacob mpt->is_spi = 0; 2444155521Smjacob } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2445155521Smjacob mpt->is_fc = 0; 2446155521Smjacob mpt->is_sas = 1; 2447159178Smjacob mpt->is_spi = 0; 2448102199Smjacob } else { 2449102199Smjacob mpt->is_fc = 0; 2450155521Smjacob mpt->is_sas = 0; 2451159178Smjacob mpt->is_spi = 1; 2452102199Smjacob } 2453102199Smjacob mpt->mpt_ini_id = pfp.PortSCSIID; 2454155521Smjacob mpt->mpt_max_devices = pfp.MaxDevices; 2455102199Smjacob 2456157117Smjacob /* 2457157662Smjacob * Set our expected role with what this port supports. 2458157117Smjacob */ 2459157117Smjacob 2460157662Smjacob mpt->role = MPT_ROLE_NONE; 2461157117Smjacob if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2462157662Smjacob mpt->role |= MPT_ROLE_INITIATOR; 2463157117Smjacob } 2464157117Smjacob if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2465157662Smjacob mpt->role |= MPT_ROLE_TARGET; 2466157117Smjacob } 2467157662Smjacob if (mpt->role == MPT_ROLE_NONE) { 2468157117Smjacob mpt_prt(mpt, "port does not support either target or " 2469157117Smjacob "initiator role\n"); 2470147883Sscottl return (ENXIO); 2471102199Smjacob } 2472102199Smjacob 2473157117Smjacob if (mpt_enable_ioc(mpt, 0) != MPT_OK) { 2474157117Smjacob mpt_prt(mpt, "unable to initialize IOC\n"); 2475157117Smjacob return (ENXIO); 2476157117Smjacob } 2477157117Smjacob 2478101704Smjacob /* 2479157117Smjacob * Read IOC configuration information. 2480158982Smjacob * 2481158982Smjacob * We need this to determine whether or not we have certain 2482158982Smjacob * settings for Integrated Mirroring (e.g.). 2483101704Smjacob */ 2484147883Sscottl mpt_read_config_info_ioc(mpt); 2485101704Smjacob 2486101704Smjacob /* Everything worked */ 2487101704Smjacob break; 2488101704Smjacob } 2489101704Smjacob 2490101704Smjacob if (try >= MPT_MAX_TRYS) { 2491103914Smjacob mpt_prt(mpt, "failed to initialize IOC"); 2492101704Smjacob return (EIO); 2493101704Smjacob } 2494101704Smjacob 2495101704Smjacob return (0); 2496101704Smjacob} 2497147883Sscottl 2498147883Sscottlstatic int 2499157117Smjacobmpt_enable_ioc(struct mpt_softc *mpt, int portenable) 2500147883Sscottl{ 2501147883Sscottl uint32_t pptr; 2502147883Sscottl int val; 2503147883Sscottl 2504155521Smjacob if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2505147883Sscottl mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2506147883Sscottl return (EIO); 2507147883Sscottl } 2508147883Sscottl 2509147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2510147883Sscottl 2511147883Sscottl if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2512147883Sscottl mpt_prt(mpt, "IOC failed to go to run state\n"); 2513147883Sscottl return (ENXIO); 2514147883Sscottl } 2515155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2516147883Sscottl 2517147883Sscottl /* 2518147883Sscottl * Give it reply buffers 2519147883Sscottl * 2520147883Sscottl * Do *not* exceed global credits. 2521147883Sscottl */ 2522156104Smjacob for (val = 0, pptr = mpt->reply_phys; 2523156104Smjacob (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2524147883Sscottl pptr += MPT_REPLY_SIZE) { 2525147883Sscottl mpt_free_reply(mpt, pptr); 2526147883Sscottl if (++val == mpt->mpt_global_credits - 1) 2527147883Sscottl break; 2528147883Sscottl } 2529147883Sscottl 2530157117Smjacob 2531147883Sscottl /* 2532157354Smjacob * Enable the port if asked. This is only done if we're resetting 2533157354Smjacob * the IOC after initial startup. 2534147883Sscottl */ 2535157117Smjacob if (portenable) { 2536157117Smjacob /* 2537157117Smjacob * Enable asynchronous event reporting 2538157117Smjacob */ 2539157117Smjacob mpt_send_event_request(mpt, 1); 2540147883Sscottl 2541157117Smjacob if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2542157117Smjacob mpt_prt(mpt, "failed to enable port 0\n"); 2543157117Smjacob return (ENXIO); 2544157117Smjacob } 2545147883Sscottl } 2546156104Smjacob return (MPT_OK); 2547147883Sscottl} 2548