mpt.c revision 157354
1139749Simp/*- 2156000Smjacob * Generic routines for LSI Fusion adapters. 3101704Smjacob * FreeBSD Version. 4101704Smjacob * 5101704Smjacob * Copyright (c) 2000, 2001 by Greg Ansley 6101704Smjacob * 7101704Smjacob * Redistribution and use in source and binary forms, with or without 8101704Smjacob * modification, are permitted provided that the following conditions 9101704Smjacob * are met: 10101704Smjacob * 1. Redistributions of source code must retain the above copyright 11101704Smjacob * notice immediately at the beginning of the file, without modification, 12101704Smjacob * this list of conditions, and the following disclaimer. 13101704Smjacob * 2. The name of the author may not be used to endorse or promote products 14101704Smjacob * derived from this software without specific prior written permission. 15101704Smjacob * 16101704Smjacob * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17101704Smjacob * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18101704Smjacob * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19101704Smjacob * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20101704Smjacob * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21101704Smjacob * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22101704Smjacob * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23101704Smjacob * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24101704Smjacob * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25101704Smjacob * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26101704Smjacob * SUCH DAMAGE. 27156000Smjacob */ 28156000Smjacob/*- 29156000Smjacob * Copyright (c) 2002, 2006 by Matthew Jacob 30156000Smjacob * All rights reserved. 31156104Smjacob * 32156000Smjacob * Redistribution and use in source and binary forms, with or without 33156000Smjacob * modification, are permitted provided that the following conditions are 34156000Smjacob * met: 35156000Smjacob * 1. Redistributions of source code must retain the above copyright 36156000Smjacob * notice, this list of conditions and the following disclaimer. 37156000Smjacob * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38156000Smjacob * substantially similar to the "NO WARRANTY" disclaimer below 39156000Smjacob * ("Disclaimer") and any redistribution must be conditioned upon including 40156000Smjacob * a substantially similar Disclaimer requirement for further binary 41156000Smjacob * redistribution. 42156000Smjacob * 3. Neither the names of the above listed copyright holders nor the names 43156000Smjacob * of any contributors may be used to endorse or promote products derived 44156000Smjacob * from this software without specific prior written permission. 45156104Smjacob * 46156000Smjacob * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47156000Smjacob * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48156000Smjacob * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49156000Smjacob * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50156000Smjacob * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51156000Smjacob * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52156000Smjacob * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53156000Smjacob * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54156000Smjacob * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55156000Smjacob * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56156000Smjacob * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57147883Sscottl * 58156000Smjacob * Support from Chris Ellsworth in order to make SAS adapters work 59156000Smjacob * is gratefully acknowledged. 60101704Smjacob */ 61156000Smjacob/*- 62147883Sscottl * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 63147883Sscottl * Copyright (c) 2005, WHEEL Sp. z o.o. 64147883Sscottl * Copyright (c) 2004, 2005 Justin T. Gibbs 65147883Sscottl * All rights reserved. 66156104Smjacob * 67147883Sscottl * Redistribution and use in source and binary forms, with or without 68147883Sscottl * modification, are permitted provided that the following conditions are 69147883Sscottl * met: 70147883Sscottl * 1. Redistributions of source code must retain the above copyright 71147883Sscottl * notice, this list of conditions and the following disclaimer. 72147883Sscottl * 2. Redistributions in binary form must reproduce at minimum a disclaimer 73147883Sscottl * substantially similar to the "NO WARRANTY" disclaimer below 74147883Sscottl * ("Disclaimer") and any redistribution must be conditioned upon including 75147883Sscottl * a substantially similar Disclaimer requirement for further binary 76147883Sscottl * redistribution. 77148679Sgibbs * 3. Neither the names of the above listed copyright holders nor the names 78148679Sgibbs * of any contributors may be used to endorse or promote products derived 79148679Sgibbs * from this software without specific prior written permission. 80156104Smjacob * 81147883Sscottl * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 82147883Sscottl * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 83147883Sscottl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 84147883Sscottl * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 85147883Sscottl * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 86147883Sscottl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 87147883Sscottl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 88147883Sscottl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 89147883Sscottl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 90147883Sscottl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 91147883Sscottl * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 92101704Smjacob */ 93101704Smjacob 94134123Sobrien#include <sys/cdefs.h> 95134123Sobrien__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 157354 2006-04-01 07:12:18Z mjacob $"); 96134123Sobrien 97147883Sscottl#include <dev/mpt/mpt.h> 98147883Sscottl#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 99147883Sscottl#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 100102199Smjacob 101147883Sscottl#include <dev/mpt/mpilib/mpi.h> 102147883Sscottl#include <dev/mpt/mpilib/mpi_ioc.h> 103157117Smjacob#include <dev/mpt/mpilib/mpi_fc.h> 104157117Smjacob#include <dev/mpt/mpilib/mpi_targ.h> 105147883Sscottl 106147883Sscottl#include <sys/sysctl.h> 107147883Sscottl 108101704Smjacob#define MPT_MAX_TRYS 3 109101704Smjacob#define MPT_MAX_WAIT 300000 110101704Smjacob 111101704Smjacobstatic int maxwait_ack = 0; 112101704Smjacobstatic int maxwait_int = 0; 113101704Smjacobstatic int maxwait_state = 0; 114101704Smjacob 115147883SscottlTAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 116147883Sscottlmpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 117101704Smjacob 118147883Sscottlstatic mpt_reply_handler_t mpt_default_reply_handler; 119147883Sscottlstatic mpt_reply_handler_t mpt_config_reply_handler; 120147883Sscottlstatic mpt_reply_handler_t mpt_handshake_reply_handler; 121147883Sscottlstatic mpt_reply_handler_t mpt_event_reply_handler; 122147883Sscottlstatic void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 123147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 124155521Smjacobstatic int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 125147883Sscottlstatic int mpt_soft_reset(struct mpt_softc *mpt); 126147883Sscottlstatic void mpt_hard_reset(struct mpt_softc *mpt); 127147883Sscottlstatic int mpt_configure_ioc(struct mpt_softc *mpt); 128157117Smjacobstatic int mpt_enable_ioc(struct mpt_softc *mpt, int); 129147883Sscottl 130147883Sscottl/************************* Personality Module Support *************************/ 131147883Sscottl/* 132147883Sscottl * We include one extra entry that is guaranteed to be NULL 133147883Sscottl * to simplify our itterator. 134147883Sscottl */ 135147883Sscottlstatic struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 136147883Sscottlstatic __inline struct mpt_personality* 137147883Sscottl mpt_pers_find(struct mpt_softc *, u_int); 138147883Sscottlstatic __inline struct mpt_personality* 139147883Sscottl mpt_pers_find_reverse(struct mpt_softc *, u_int); 140147883Sscottl 141147883Sscottlstatic __inline struct mpt_personality * 142147883Sscottlmpt_pers_find(struct mpt_softc *mpt, u_int start_at) 143101704Smjacob{ 144147883Sscottl KASSERT(start_at <= MPT_MAX_PERSONALITIES, 145147883Sscottl ("mpt_pers_find: starting position out of range\n")); 146147883Sscottl 147147883Sscottl while (start_at < MPT_MAX_PERSONALITIES 148147883Sscottl && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 149147883Sscottl start_at++; 150147883Sscottl } 151147883Sscottl return (mpt_personalities[start_at]); 152147883Sscottl} 153147883Sscottl 154147883Sscottl/* 155157117Smjacob * Used infrequently, so no need to optimize like a forward 156147883Sscottl * traversal where we use the MAX+1 is guaranteed to be NULL 157147883Sscottl * trick. 158147883Sscottl */ 159147883Sscottlstatic __inline struct mpt_personality * 160147883Sscottlmpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 161147883Sscottl{ 162147883Sscottl while (start_at < MPT_MAX_PERSONALITIES 163147883Sscottl && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 164147883Sscottl start_at--; 165147883Sscottl } 166147883Sscottl if (start_at < MPT_MAX_PERSONALITIES) 167147883Sscottl return (mpt_personalities[start_at]); 168147883Sscottl return (NULL); 169147883Sscottl} 170147883Sscottl 171147883Sscottl#define MPT_PERS_FOREACH(mpt, pers) \ 172147883Sscottl for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 173147883Sscottl pers != NULL; \ 174147883Sscottl pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 175147883Sscottl 176147883Sscottl#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 177147883Sscottl for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 178147883Sscottl pers != NULL; \ 179147883Sscottl pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 180147883Sscottl 181147883Sscottlstatic mpt_load_handler_t mpt_stdload; 182147883Sscottlstatic mpt_probe_handler_t mpt_stdprobe; 183147883Sscottlstatic mpt_attach_handler_t mpt_stdattach; 184157117Smjacobstatic mpt_enable_handler_t mpt_stdenable; 185147883Sscottlstatic mpt_event_handler_t mpt_stdevent; 186147883Sscottlstatic mpt_reset_handler_t mpt_stdreset; 187147883Sscottlstatic mpt_shutdown_handler_t mpt_stdshutdown; 188147883Sscottlstatic mpt_detach_handler_t mpt_stddetach; 189147883Sscottlstatic mpt_unload_handler_t mpt_stdunload; 190147883Sscottlstatic struct mpt_personality mpt_default_personality = 191147883Sscottl{ 192147883Sscottl .load = mpt_stdload, 193147883Sscottl .probe = mpt_stdprobe, 194147883Sscottl .attach = mpt_stdattach, 195157117Smjacob .enable = mpt_stdenable, 196147883Sscottl .event = mpt_stdevent, 197147883Sscottl .reset = mpt_stdreset, 198147883Sscottl .shutdown = mpt_stdshutdown, 199147883Sscottl .detach = mpt_stddetach, 200147883Sscottl .unload = mpt_stdunload 201147883Sscottl}; 202147883Sscottl 203147883Sscottlstatic mpt_load_handler_t mpt_core_load; 204147883Sscottlstatic mpt_attach_handler_t mpt_core_attach; 205157117Smjacobstatic mpt_enable_handler_t mpt_core_enable; 206147883Sscottlstatic mpt_reset_handler_t mpt_core_ioc_reset; 207147883Sscottlstatic mpt_event_handler_t mpt_core_event; 208147883Sscottlstatic mpt_shutdown_handler_t mpt_core_shutdown; 209147883Sscottlstatic mpt_shutdown_handler_t mpt_core_detach; 210147883Sscottlstatic mpt_unload_handler_t mpt_core_unload; 211147883Sscottlstatic struct mpt_personality mpt_core_personality = 212147883Sscottl{ 213147883Sscottl .name = "mpt_core", 214147883Sscottl .load = mpt_core_load, 215147883Sscottl .attach = mpt_core_attach, 216157117Smjacob .enable = mpt_core_enable, 217147883Sscottl .event = mpt_core_event, 218147883Sscottl .reset = mpt_core_ioc_reset, 219147883Sscottl .shutdown = mpt_core_shutdown, 220147883Sscottl .detach = mpt_core_detach, 221147883Sscottl .unload = mpt_core_unload, 222147883Sscottl}; 223147883Sscottl 224147883Sscottl/* 225147883Sscottl * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 226147883Sscottl * ordering information. We want the core to always register FIRST. 227147883Sscottl * other modules are set to SI_ORDER_SECOND. 228147883Sscottl */ 229147883Sscottlstatic moduledata_t mpt_core_mod = { 230147883Sscottl "mpt_core", mpt_modevent, &mpt_core_personality 231147883Sscottl}; 232147883SscottlDECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 233147883SscottlMODULE_VERSION(mpt_core, 1); 234147883Sscottl 235157117Smjacob#define MPT_PERS_ATTACHED(pers, mpt) ((mpt)->mpt_pers_mask & (0x1 << pers->id)) 236147883Sscottl 237147883Sscottl 238147883Sscottlint 239147883Sscottlmpt_modevent(module_t mod, int type, void *data) 240147883Sscottl{ 241147883Sscottl struct mpt_personality *pers; 242147883Sscottl int error; 243147883Sscottl 244147883Sscottl pers = (struct mpt_personality *)data; 245147883Sscottl 246147883Sscottl error = 0; 247147883Sscottl switch (type) { 248147883Sscottl case MOD_LOAD: 249147883Sscottl { 250147883Sscottl mpt_load_handler_t **def_handler; 251147883Sscottl mpt_load_handler_t **pers_handler; 252147883Sscottl int i; 253147883Sscottl 254147883Sscottl for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 255147883Sscottl if (mpt_personalities[i] == NULL) 256147883Sscottl break; 257147883Sscottl } 258147883Sscottl if (i >= MPT_MAX_PERSONALITIES) { 259147883Sscottl error = ENOMEM; 260147883Sscottl break; 261147883Sscottl } 262147883Sscottl pers->id = i; 263147883Sscottl mpt_personalities[i] = pers; 264147883Sscottl 265147883Sscottl /* Install standard/noop handlers for any NULL entries. */ 266147883Sscottl def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 267147883Sscottl pers_handler = MPT_PERS_FIRST_HANDLER(pers); 268147883Sscottl while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 269147883Sscottl if (*pers_handler == NULL) 270147883Sscottl *pers_handler = *def_handler; 271147883Sscottl pers_handler++; 272147883Sscottl def_handler++; 273147883Sscottl } 274147883Sscottl 275147883Sscottl error = (pers->load(pers)); 276147883Sscottl if (error != 0) 277147883Sscottl mpt_personalities[i] = NULL; 278147883Sscottl break; 279147883Sscottl } 280147883Sscottl case MOD_SHUTDOWN: 281147883Sscottl break; 282157117Smjacob#if __FreeBSD_version >= 500000 283147883Sscottl case MOD_QUIESCE: 284147883Sscottl break; 285157117Smjacob#endif 286147883Sscottl case MOD_UNLOAD: 287147883Sscottl error = pers->unload(pers); 288147883Sscottl mpt_personalities[pers->id] = NULL; 289147883Sscottl break; 290147883Sscottl default: 291147883Sscottl error = EINVAL; 292147883Sscottl break; 293147883Sscottl } 294147883Sscottl return (error); 295147883Sscottl} 296147883Sscottl 297147883Sscottlint 298147883Sscottlmpt_stdload(struct mpt_personality *pers) 299147883Sscottl{ 300147883Sscottl /* Load is always successfull. */ 301147883Sscottl return (0); 302147883Sscottl} 303147883Sscottl 304147883Sscottlint 305147883Sscottlmpt_stdprobe(struct mpt_softc *mpt) 306147883Sscottl{ 307147883Sscottl /* Probe is always successfull. */ 308147883Sscottl return (0); 309147883Sscottl} 310147883Sscottl 311147883Sscottlint 312147883Sscottlmpt_stdattach(struct mpt_softc *mpt) 313147883Sscottl{ 314147883Sscottl /* Attach is always successfull. */ 315147883Sscottl return (0); 316147883Sscottl} 317147883Sscottl 318147883Sscottlint 319157117Smjacobmpt_stdenable(struct mpt_softc *mpt) 320157117Smjacob{ 321157117Smjacob /* Enable is always successfull. */ 322157117Smjacob return (0); 323157117Smjacob} 324157117Smjacob 325157117Smjacobint 326155521Smjacobmpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 327147883Sscottl{ 328155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 329147883Sscottl /* Event was not for us. */ 330147883Sscottl return (0); 331147883Sscottl} 332147883Sscottl 333147883Sscottlvoid 334147883Sscottlmpt_stdreset(struct mpt_softc *mpt, int type) 335147883Sscottl{ 336147883Sscottl} 337147883Sscottl 338147883Sscottlvoid 339147883Sscottlmpt_stdshutdown(struct mpt_softc *mpt) 340147883Sscottl{ 341147883Sscottl} 342147883Sscottl 343147883Sscottlvoid 344147883Sscottlmpt_stddetach(struct mpt_softc *mpt) 345147883Sscottl{ 346147883Sscottl} 347147883Sscottl 348147883Sscottlint 349147883Sscottlmpt_stdunload(struct mpt_personality *pers) 350147883Sscottl{ 351147883Sscottl /* Unload is always successfull. */ 352147883Sscottl return (0); 353147883Sscottl} 354147883Sscottl 355147883Sscottl/******************************* Bus DMA Support ******************************/ 356147883Sscottlvoid 357147883Sscottlmpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 358147883Sscottl{ 359147883Sscottl struct mpt_map_info *map_info; 360147883Sscottl 361147883Sscottl map_info = (struct mpt_map_info *)arg; 362147883Sscottl map_info->error = error; 363147883Sscottl map_info->phys = segs->ds_addr; 364147883Sscottl} 365147883Sscottl 366147883Sscottl/**************************** Reply/Event Handling ****************************/ 367147883Sscottlint 368147883Sscottlmpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 369147883Sscottl mpt_handler_t handler, uint32_t *phandler_id) 370147883Sscottl{ 371147883Sscottl 372147883Sscottl switch (type) { 373147883Sscottl case MPT_HANDLER_REPLY: 374147883Sscottl { 375147883Sscottl u_int cbi; 376147883Sscottl u_int free_cbi; 377147883Sscottl 378147883Sscottl if (phandler_id == NULL) 379147883Sscottl return (EINVAL); 380147883Sscottl 381147883Sscottl free_cbi = MPT_HANDLER_ID_NONE; 382147883Sscottl for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 383147883Sscottl /* 384147883Sscottl * If the same handler is registered multiple 385147883Sscottl * times, don't error out. Just return the 386147883Sscottl * index of the original registration. 387147883Sscottl */ 388147883Sscottl if (mpt_reply_handlers[cbi] == handler.reply_handler) { 389147883Sscottl *phandler_id = MPT_CBI_TO_HID(cbi); 390147883Sscottl return (0); 391147883Sscottl } 392147883Sscottl 393147883Sscottl /* 394147883Sscottl * Fill from the front in the hope that 395147883Sscottl * all registered handlers consume only a 396147883Sscottl * single cache line. 397147883Sscottl * 398147883Sscottl * We don't break on the first empty slot so 399147883Sscottl * that the full table is checked to see if 400147883Sscottl * this handler was previously registered. 401147883Sscottl */ 402157117Smjacob if (free_cbi == MPT_HANDLER_ID_NONE && 403157117Smjacob (mpt_reply_handlers[cbi] 404147883Sscottl == mpt_default_reply_handler)) 405147883Sscottl free_cbi = cbi; 406147883Sscottl } 407157117Smjacob if (free_cbi == MPT_HANDLER_ID_NONE) { 408147883Sscottl return (ENOMEM); 409157117Smjacob } 410147883Sscottl mpt_reply_handlers[free_cbi] = handler.reply_handler; 411147883Sscottl *phandler_id = MPT_CBI_TO_HID(free_cbi); 412147883Sscottl break; 413147883Sscottl } 414147883Sscottl default: 415147883Sscottl mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 416147883Sscottl return (EINVAL); 417147883Sscottl } 418147883Sscottl return (0); 419147883Sscottl} 420147883Sscottl 421147883Sscottlint 422147883Sscottlmpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 423147883Sscottl mpt_handler_t handler, uint32_t handler_id) 424147883Sscottl{ 425147883Sscottl 426147883Sscottl switch (type) { 427147883Sscottl case MPT_HANDLER_REPLY: 428147883Sscottl { 429147883Sscottl u_int cbi; 430147883Sscottl 431147883Sscottl cbi = MPT_CBI(handler_id); 432147883Sscottl if (cbi >= MPT_NUM_REPLY_HANDLERS 433147883Sscottl || mpt_reply_handlers[cbi] != handler.reply_handler) 434147883Sscottl return (ENOENT); 435147883Sscottl mpt_reply_handlers[cbi] = mpt_default_reply_handler; 436147883Sscottl break; 437147883Sscottl } 438147883Sscottl default: 439147883Sscottl mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 440147883Sscottl return (EINVAL); 441147883Sscottl } 442147883Sscottl return (0); 443147883Sscottl} 444147883Sscottl 445147883Sscottlstatic int 446147883Sscottlmpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 447157117Smjacob uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 448147883Sscottl{ 449157117Smjacob mpt_prt(mpt, 450157117Smjacob "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n", 451157117Smjacob req, req->serno, reply_desc, reply_frame); 452147883Sscottl 453147883Sscottl if (reply_frame != NULL) 454147883Sscottl mpt_dump_reply_frame(mpt, reply_frame); 455147883Sscottl 456157117Smjacob mpt_prt(mpt, "Reply Frame Ignored\n"); 457147883Sscottl 458147883Sscottl return (/*free_reply*/TRUE); 459147883Sscottl} 460147883Sscottl 461147883Sscottlstatic int 462147883Sscottlmpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 463157117Smjacob uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 464147883Sscottl{ 465147883Sscottl if (req != NULL) { 466147883Sscottl 467147883Sscottl if (reply_frame != NULL) { 468147883Sscottl MSG_CONFIG *cfgp; 469147883Sscottl MSG_CONFIG_REPLY *reply; 470147883Sscottl 471147883Sscottl cfgp = (MSG_CONFIG *)req->req_vbuf; 472147883Sscottl reply = (MSG_CONFIG_REPLY *)reply_frame; 473147883Sscottl req->IOCStatus = le16toh(reply_frame->IOCStatus); 474147883Sscottl bcopy(&reply->Header, &cfgp->Header, 475147883Sscottl sizeof(cfgp->Header)); 476147883Sscottl } 477147883Sscottl req->state &= ~REQ_STATE_QUEUED; 478147883Sscottl req->state |= REQ_STATE_DONE; 479147883Sscottl TAILQ_REMOVE(&mpt->request_pending_list, req, links); 480157354Smjacob if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { 481147883Sscottl wakeup(req); 482157354Smjacob } 483147883Sscottl } 484147883Sscottl 485157354Smjacob return (TRUE); 486147883Sscottl} 487147883Sscottl 488147883Sscottlstatic int 489147883Sscottlmpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 490157117Smjacob uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 491147883Sscottl{ 492147883Sscottl /* Nothing to be done. */ 493157354Smjacob return (TRUE); 494147883Sscottl} 495147883Sscottl 496147883Sscottlstatic int 497147883Sscottlmpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 498157117Smjacob uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) 499147883Sscottl{ 500147883Sscottl int free_reply; 501147883Sscottl 502157354Smjacob KASSERT(reply_frame != NULL, ("null reply in mpt_event_reply_handler")); 503157354Smjacob KASSERT(req != NULL, ("null request in mpt_event_reply_handler")); 504147883Sscottl 505147883Sscottl free_reply = TRUE; 506147883Sscottl switch (reply_frame->Function) { 507147883Sscottl case MPI_FUNCTION_EVENT_NOTIFICATION: 508147883Sscottl { 509147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg; 510147883Sscottl struct mpt_personality *pers; 511147883Sscottl u_int handled; 512147883Sscottl 513147883Sscottl handled = 0; 514147883Sscottl msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 515147883Sscottl MPT_PERS_FOREACH(mpt, pers) 516147883Sscottl handled += pers->event(mpt, req, msg); 517147883Sscottl 518155521Smjacob if (handled == 0 && mpt->mpt_pers_mask == 0) { 519156301Smjacob mpt_lprt(mpt, MPT_PRT_INFO, 520155521Smjacob "No Handlers For Any Event Notify Frames. " 521155521Smjacob "Event %#x (ACK %sequired).\n", 522155521Smjacob msg->Event, msg->AckRequired? "r" : "not r"); 523155521Smjacob } else if (handled == 0) { 524156301Smjacob mpt_lprt(mpt, MPT_PRT_WARN, 525155521Smjacob "Unhandled Event Notify Frame. Event %#x " 526155521Smjacob "(ACK %sequired).\n", 527155521Smjacob msg->Event, msg->AckRequired? "r" : "not r"); 528155521Smjacob } 529147883Sscottl 530147883Sscottl if (msg->AckRequired) { 531147883Sscottl request_t *ack_req; 532147883Sscottl uint32_t context; 533147883Sscottl 534147883Sscottl context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS); 535157354Smjacob ack_req = mpt_get_request(mpt, FALSE); 536147883Sscottl if (ack_req == NULL) { 537147883Sscottl struct mpt_evtf_record *evtf; 538147883Sscottl 539147883Sscottl evtf = (struct mpt_evtf_record *)reply_frame; 540147883Sscottl evtf->context = context; 541147883Sscottl LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 542147883Sscottl free_reply = FALSE; 543147883Sscottl break; 544147883Sscottl } 545147883Sscottl mpt_send_event_ack(mpt, ack_req, msg, context); 546157354Smjacob /* 547157354Smjacob * Don't check for CONTINUATION_REPLY here 548157354Smjacob */ 549157354Smjacob return (free_reply); 550147883Sscottl } 551147883Sscottl break; 552147883Sscottl } 553147883Sscottl case MPI_FUNCTION_PORT_ENABLE: 554157354Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG , "enable port reply\n"); 555147883Sscottl break; 556147883Sscottl case MPI_FUNCTION_EVENT_ACK: 557147883Sscottl break; 558147883Sscottl default: 559157354Smjacob mpt_prt(mpt, "unknown event function: %x\n", 560147883Sscottl reply_frame->Function); 561147883Sscottl break; 562147883Sscottl } 563147883Sscottl 564157354Smjacob /* 565157354Smjacob * I'm not sure that this continuation stuff works as it should. 566157354Smjacob * 567157354Smjacob * I've had FC async events occur that free the frame up because 568157354Smjacob * the continuation bit isn't set, and then additional async events 569157354Smjacob * then occur using the same context. As you might imagine, this 570157354Smjacob * leads to Very Bad Thing. 571157354Smjacob * 572157354Smjacob * Let's just be safe for now and not free them up until we figure 573157354Smjacob * out what's actually happening here. 574157354Smjacob */ 575157354Smjacob#if 0 576157354Smjacob if ((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 577147883Sscottl TAILQ_REMOVE(&mpt->request_pending_list, req, links); 578157354Smjacob mpt_free_request(mpt, req); 579157354Smjacob mpt_prt(mpt, "event_reply %x for req %p:%u NOT a continuation", 580157354Smjacob reply_frame->Function, req, req->serno); 581157354Smjacob if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 582157354Smjacob MSG_EVENT_NOTIFY_REPLY *msg = 583157354Smjacob (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 584157354Smjacob mpt_prtc(mpt, " Event=0x%x AckReq=%d", 585157354Smjacob msg->Event, msg->AckRequired); 586157354Smjacob } 587157354Smjacob } else { 588157354Smjacob mpt_prt(mpt, "event_reply %x for %p:%u IS a continuation", 589157354Smjacob reply_frame->Function, req, req->serno); 590157354Smjacob if (reply_frame->Function == MPI_FUNCTION_EVENT_NOTIFICATION) { 591157354Smjacob MSG_EVENT_NOTIFY_REPLY *msg = 592157354Smjacob (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 593157354Smjacob mpt_prtc(mpt, " Event=0x%x AckReq=%d", 594157354Smjacob msg->Event, msg->AckRequired); 595157354Smjacob } 596157354Smjacob mpt_prtc(mpt, "\n"); 597147883Sscottl } 598157354Smjacob#endif 599147883Sscottl return (free_reply); 600147883Sscottl} 601147883Sscottl 602147883Sscottl/* 603147883Sscottl * Process an asynchronous event from the IOC. 604147883Sscottl */ 605147883Sscottlstatic int 606147883Sscottlmpt_core_event(struct mpt_softc *mpt, request_t *req, 607147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg) 608147883Sscottl{ 609155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 610155521Smjacob msg->Event & 0xFF); 611147883Sscottl switch(msg->Event & 0xFF) { 612147883Sscottl case MPI_EVENT_NONE: 613147883Sscottl break; 614147883Sscottl case MPI_EVENT_LOG_DATA: 615147883Sscottl { 616147883Sscottl int i; 617147883Sscottl 618147883Sscottl /* Some error occured that LSI wants logged */ 619147883Sscottl mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 620147883Sscottl msg->IOCLogInfo); 621147883Sscottl mpt_prt(mpt, "\tEvtLogData: Event Data:"); 622147883Sscottl for (i = 0; i < msg->EventDataLength; i++) 623147883Sscottl mpt_prtc(mpt, " %08x", msg->Data[i]); 624147883Sscottl mpt_prtc(mpt, "\n"); 625147883Sscottl break; 626147883Sscottl } 627147883Sscottl case MPI_EVENT_EVENT_CHANGE: 628147883Sscottl /* 629147883Sscottl * This is just an acknowledgement 630147883Sscottl * of our mpt_send_event_request. 631147883Sscottl */ 632147883Sscottl break; 633155521Smjacob case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 634155521Smjacob break; 635147883Sscottl default: 636157354Smjacob return (0); 637147883Sscottl break; 638147883Sscottl } 639157354Smjacob return (1); 640147883Sscottl} 641147883Sscottl 642147883Sscottlstatic void 643147883Sscottlmpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 644147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 645147883Sscottl{ 646147883Sscottl MSG_EVENT_ACK *ackp; 647147883Sscottl 648147883Sscottl ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 649157354Smjacob memset(ackp, 0, sizeof (*ackp)); 650147883Sscottl ackp->Function = MPI_FUNCTION_EVENT_ACK; 651147883Sscottl ackp->Event = msg->Event; 652147883Sscottl ackp->EventContext = msg->EventContext; 653147883Sscottl ackp->MsgContext = context; 654147883Sscottl mpt_check_doorbell(mpt); 655147883Sscottl mpt_send_cmd(mpt, ack_req); 656147883Sscottl} 657147883Sscottl 658147883Sscottl/***************************** Interrupt Handling *****************************/ 659147883Sscottlvoid 660147883Sscottlmpt_intr(void *arg) 661147883Sscottl{ 662147883Sscottl struct mpt_softc *mpt; 663157117Smjacob uint32_t reply_desc; 664157354Smjacob uint32_t last_reply_desc = MPT_REPLY_EMPTY; 665157117Smjacob int ntrips = 0; 666147883Sscottl 667147883Sscottl mpt = (struct mpt_softc *)arg; 668147883Sscottl while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 669147883Sscottl request_t *req; 670147883Sscottl MSG_DEFAULT_REPLY *reply_frame; 671147883Sscottl uint32_t reply_baddr; 672157117Smjacob uint32_t ctxt_idx; 673147883Sscottl u_int cb_index; 674147883Sscottl u_int req_index; 675147883Sscottl int free_rf; 676147883Sscottl 677157354Smjacob if (reply_desc == last_reply_desc) { 678157354Smjacob mpt_prt(mpt, "debounce reply_desc 0x%x\n", reply_desc); 679157354Smjacob if (ntrips++ == 1000) { 680157354Smjacob break; 681157354Smjacob } 682157354Smjacob continue; 683157354Smjacob } 684157354Smjacob last_reply_desc = reply_desc; 685157354Smjacob 686147883Sscottl req = NULL; 687147883Sscottl reply_frame = NULL; 688147883Sscottl reply_baddr = 0; 689147883Sscottl if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 690147883Sscottl u_int offset; 691147883Sscottl /* 692147883Sscottl * Insure that the reply frame is coherent. 693147883Sscottl */ 694157354Smjacob reply_baddr = MPT_REPLY_BADDR(reply_desc); 695147883Sscottl offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 696157117Smjacob bus_dmamap_sync_range(mpt->reply_dmat, 697157117Smjacob mpt->reply_dmap, offset, MPT_REPLY_SIZE, 698157117Smjacob BUS_DMASYNC_POSTREAD); 699147883Sscottl reply_frame = MPT_REPLY_OTOV(mpt, offset); 700157117Smjacob ctxt_idx = le32toh(reply_frame->MsgContext); 701157117Smjacob } else { 702157117Smjacob uint32_t type; 703157117Smjacob 704157117Smjacob type = MPI_GET_CONTEXT_REPLY_TYPE(reply_desc); 705157117Smjacob ctxt_idx = reply_desc; 706157117Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG1, "Context Reply: 0x%08x\n", 707157117Smjacob reply_desc); 708157117Smjacob 709157117Smjacob switch (type) { 710157117Smjacob case MPI_CONTEXT_REPLY_TYPE_SCSI_INIT: 711157117Smjacob ctxt_idx &= MPI_CONTEXT_REPLY_CONTEXT_MASK; 712157117Smjacob break; 713157117Smjacob case MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET: 714157117Smjacob ctxt_idx = GET_IO_INDEX(reply_desc); 715157117Smjacob if (mpt->tgt_cmd_ptrs == NULL) { 716157117Smjacob mpt_prt(mpt, 717157117Smjacob "mpt_intr: no target cmd ptrs\n"); 718157117Smjacob reply_desc = MPT_REPLY_EMPTY; 719157117Smjacob break; 720157117Smjacob } 721157117Smjacob if (ctxt_idx >= mpt->tgt_cmds_allocated) { 722157117Smjacob mpt_prt(mpt, 723157117Smjacob "mpt_intr: bad tgt cmd ctxt %u\n", 724157117Smjacob ctxt_idx); 725157117Smjacob reply_desc = MPT_REPLY_EMPTY; 726157117Smjacob ntrips = 1000; 727157117Smjacob break; 728157117Smjacob } 729157117Smjacob req = mpt->tgt_cmd_ptrs[ctxt_idx]; 730157117Smjacob if (req == NULL) { 731157117Smjacob mpt_prt(mpt, "no request backpointer " 732157117Smjacob "at index %u", ctxt_idx); 733157117Smjacob reply_desc = MPT_REPLY_EMPTY; 734157117Smjacob ntrips = 1000; 735157117Smjacob break; 736157117Smjacob } 737157117Smjacob /* 738157117Smjacob * Reformulate ctxt_idx to be just as if 739157117Smjacob * it were another type of context reply 740157117Smjacob * so the code below will find the request 741157117Smjacob * via indexing into the pool. 742157117Smjacob */ 743157117Smjacob ctxt_idx = 744157117Smjacob req->index | mpt->scsi_tgt_handler_id; 745157117Smjacob req = NULL; 746157117Smjacob break; 747157117Smjacob case MPI_CONTEXT_REPLY_TYPE_LAN: 748157117Smjacob mpt_prt(mpt, "LAN CONTEXT REPLY: 0x%08x\n", 749157117Smjacob reply_desc); 750157117Smjacob reply_desc = MPT_REPLY_EMPTY; 751157117Smjacob break; 752157117Smjacob default: 753157117Smjacob mpt_prt(mpt, "Context Reply 0x%08x?\n", type); 754157117Smjacob reply_desc = MPT_REPLY_EMPTY; 755157117Smjacob break; 756157117Smjacob } 757157117Smjacob if (reply_desc == MPT_REPLY_EMPTY) { 758157117Smjacob if (ntrips++ > 1000) { 759157117Smjacob break; 760157117Smjacob } 761157117Smjacob continue; 762157117Smjacob } 763147883Sscottl } 764157117Smjacob 765157117Smjacob cb_index = MPT_CONTEXT_TO_CBI(ctxt_idx); 766157117Smjacob req_index = MPT_CONTEXT_TO_REQI(ctxt_idx); 767157117Smjacob if (req_index < MPT_MAX_REQUESTS(mpt)) { 768147883Sscottl req = &mpt->request_pool[req_index]; 769157354Smjacob } else { 770157354Smjacob mpt_prt(mpt, "WARN: mpt_intr index == %d (reply_desc ==" 771157354Smjacob " 0x%x)\n", req_index, reply_desc); 772157117Smjacob } 773147883Sscottl 774157117Smjacob free_rf = mpt_reply_handlers[cb_index](mpt, req, 775157117Smjacob reply_desc, reply_frame); 776147883Sscottl 777157354Smjacob if (reply_frame != NULL && free_rf) { 778147883Sscottl mpt_free_reply(mpt, reply_baddr); 779157354Smjacob } 780157117Smjacob 781157117Smjacob /* 782157117Smjacob * If we got ourselves disabled, don't get stuck in a loop 783157117Smjacob */ 784157117Smjacob if (mpt->disabled) { 785157117Smjacob mpt_disable_ints(mpt); 786157117Smjacob break; 787157117Smjacob } 788157117Smjacob if (ntrips++ > 1000) { 789157117Smjacob break; 790157117Smjacob } 791147883Sscottl } 792147883Sscottl} 793147883Sscottl 794147883Sscottl/******************************* Error Recovery *******************************/ 795147883Sscottlvoid 796147883Sscottlmpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 797147883Sscottl u_int iocstatus) 798147883Sscottl{ 799147883Sscottl MSG_DEFAULT_REPLY ioc_status_frame; 800147883Sscottl request_t *req; 801147883Sscottl 802157354Smjacob memset(&ioc_status_frame, 0, sizeof(ioc_status_frame)); 803147883Sscottl ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 804156104Smjacob ioc_status_frame.IOCStatus = iocstatus; 805147883Sscottl while((req = TAILQ_FIRST(chain)) != NULL) { 806147883Sscottl MSG_REQUEST_HEADER *msg_hdr; 807147883Sscottl u_int cb_index; 808157354Smjacob 809156796Smjacob TAILQ_REMOVE(chain, req, links); 810147883Sscottl msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 811156104Smjacob ioc_status_frame.Function = msg_hdr->Function; 812156104Smjacob ioc_status_frame.MsgContext = msg_hdr->MsgContext; 813147883Sscottl cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 814157117Smjacob mpt_reply_handlers[cb_index](mpt, req, msg_hdr->MsgContext, 815157117Smjacob &ioc_status_frame); 816147883Sscottl } 817147883Sscottl} 818147883Sscottl 819147883Sscottl/********************************* Diagnostics ********************************/ 820147883Sscottl/* 821147883Sscottl * Perform a diagnostic dump of a reply frame. 822147883Sscottl */ 823147883Sscottlvoid 824147883Sscottlmpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 825147883Sscottl{ 826147883Sscottl mpt_prt(mpt, "Address Reply:\n"); 827147883Sscottl mpt_print_reply(reply_frame); 828147883Sscottl} 829147883Sscottl 830147883Sscottl/******************************* Doorbell Access ******************************/ 831147883Sscottlstatic __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 832147883Sscottlstatic __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 833147883Sscottl 834147883Sscottlstatic __inline uint32_t 835147883Sscottlmpt_rd_db(struct mpt_softc *mpt) 836147883Sscottl{ 837101704Smjacob return mpt_read(mpt, MPT_OFFSET_DOORBELL); 838101704Smjacob} 839101704Smjacob 840147883Sscottlstatic __inline uint32_t 841147883Sscottlmpt_rd_intr(struct mpt_softc *mpt) 842101704Smjacob{ 843101704Smjacob return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 844101704Smjacob} 845101704Smjacob 846101704Smjacob/* Busy wait for a door bell to be read by IOC */ 847101704Smjacobstatic int 848147883Sscottlmpt_wait_db_ack(struct mpt_softc *mpt) 849101704Smjacob{ 850101704Smjacob int i; 851101704Smjacob for (i=0; i < MPT_MAX_WAIT; i++) { 852101704Smjacob if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 853101704Smjacob maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 854157117Smjacob return (MPT_OK); 855101704Smjacob } 856157117Smjacob DELAY(200); 857101704Smjacob } 858157117Smjacob return (MPT_FAIL); 859101704Smjacob} 860101704Smjacob 861101704Smjacob/* Busy wait for a door bell interrupt */ 862101704Smjacobstatic int 863147883Sscottlmpt_wait_db_int(struct mpt_softc *mpt) 864101704Smjacob{ 865101704Smjacob int i; 866101704Smjacob for (i=0; i < MPT_MAX_WAIT; i++) { 867101704Smjacob if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 868101704Smjacob maxwait_int = i > maxwait_int ? i : maxwait_int; 869101704Smjacob return MPT_OK; 870101704Smjacob } 871101704Smjacob DELAY(100); 872101704Smjacob } 873157117Smjacob return (MPT_FAIL); 874101704Smjacob} 875101704Smjacob 876101704Smjacob/* Wait for IOC to transition to a give state */ 877101704Smjacobvoid 878147883Sscottlmpt_check_doorbell(struct mpt_softc *mpt) 879101704Smjacob{ 880147883Sscottl uint32_t db = mpt_rd_db(mpt); 881101704Smjacob if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 882147883Sscottl mpt_prt(mpt, "Device not running\n"); 883101704Smjacob mpt_print_db(db); 884101704Smjacob } 885101704Smjacob} 886101704Smjacob 887101704Smjacob/* Wait for IOC to transition to a give state */ 888101704Smjacobstatic int 889147883Sscottlmpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 890101704Smjacob{ 891101704Smjacob int i; 892101704Smjacob 893101704Smjacob for (i = 0; i < MPT_MAX_WAIT; i++) { 894147883Sscottl uint32_t db = mpt_rd_db(mpt); 895101704Smjacob if (MPT_STATE(db) == state) { 896101704Smjacob maxwait_state = i > maxwait_state ? i : maxwait_state; 897101704Smjacob return (MPT_OK); 898101704Smjacob } 899101704Smjacob DELAY(100); 900101704Smjacob } 901101704Smjacob return (MPT_FAIL); 902101704Smjacob} 903101704Smjacob 904101704Smjacob 905147883Sscottl/************************* Intialization/Configuration ************************/ 906147883Sscottlstatic int mpt_download_fw(struct mpt_softc *mpt); 907147883Sscottl 908101704Smjacob/* Issue the reset COMMAND to the IOC */ 909147883Sscottlstatic int 910147883Sscottlmpt_soft_reset(struct mpt_softc *mpt) 911101704Smjacob{ 912147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 913101704Smjacob 914101704Smjacob /* Have to use hard reset if we are not in Running state */ 915101704Smjacob if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 916147883Sscottl mpt_prt(mpt, "soft reset failed: device not running\n"); 917157117Smjacob return (MPT_FAIL); 918101704Smjacob } 919101704Smjacob 920101704Smjacob /* If door bell is in use we don't have a chance of getting 921101704Smjacob * a word in since the IOC probably crashed in message 922101704Smjacob * processing. So don't waste our time. 923101704Smjacob */ 924101704Smjacob if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 925147883Sscottl mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 926157117Smjacob return (MPT_FAIL); 927101704Smjacob } 928101704Smjacob 929101704Smjacob /* Send the reset request to the IOC */ 930101704Smjacob mpt_write(mpt, MPT_OFFSET_DOORBELL, 931101704Smjacob MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 932101704Smjacob if (mpt_wait_db_ack(mpt) != MPT_OK) { 933147883Sscottl mpt_prt(mpt, "soft reset failed: ack timeout\n"); 934157117Smjacob return (MPT_FAIL); 935101704Smjacob } 936101704Smjacob 937101704Smjacob /* Wait for the IOC to reload and come out of reset state */ 938101704Smjacob if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 939147883Sscottl mpt_prt(mpt, "soft reset failed: device did not restart\n"); 940157117Smjacob return (MPT_FAIL); 941101704Smjacob } 942101704Smjacob 943101704Smjacob return MPT_OK; 944101704Smjacob} 945101704Smjacob 946147883Sscottlstatic int 947147883Sscottlmpt_enable_diag_mode(struct mpt_softc *mpt) 948147883Sscottl{ 949147883Sscottl int try; 950147883Sscottl 951147883Sscottl try = 20; 952147883Sscottl while (--try) { 953147883Sscottl 954147883Sscottl if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 955147883Sscottl break; 956147883Sscottl 957147883Sscottl /* Enable diagnostic registers */ 958147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 959147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 960147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 961147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 962147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 963147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 964147883Sscottl 965147883Sscottl DELAY(100000); 966147883Sscottl } 967147883Sscottl if (try == 0) 968147883Sscottl return (EIO); 969147883Sscottl return (0); 970147883Sscottl} 971147883Sscottl 972147883Sscottlstatic void 973147883Sscottlmpt_disable_diag_mode(struct mpt_softc *mpt) 974147883Sscottl{ 975147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 976147883Sscottl} 977147883Sscottl 978101704Smjacob/* This is a magic diagnostic reset that resets all the ARM 979156104Smjacob * processors in the chip. 980101704Smjacob */ 981147883Sscottlstatic void 982147883Sscottlmpt_hard_reset(struct mpt_softc *mpt) 983101704Smjacob{ 984147883Sscottl int error; 985147883Sscottl int wait; 986147883Sscottl uint32_t diagreg; 987147883Sscottl 988147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 989147883Sscottl 990147883Sscottl error = mpt_enable_diag_mode(mpt); 991147883Sscottl if (error) { 992147883Sscottl mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 993147883Sscottl mpt_prt(mpt, "Trying to reset anyway.\n"); 994101704Smjacob } 995101704Smjacob 996147883Sscottl diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 997101704Smjacob 998147883Sscottl /* 999147883Sscottl * This appears to be a workaround required for some 1000147883Sscottl * firmware or hardware revs. 1001147883Sscottl */ 1002147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 1003147883Sscottl DELAY(1000); 1004147883Sscottl 1005101704Smjacob /* Diag. port is now active so we can now hit the reset bit */ 1006147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 1007101704Smjacob 1008147883Sscottl /* 1009147883Sscottl * Ensure that the reset has finished. We delay 1ms 1010147883Sscottl * prior to reading the register to make sure the chip 1011147883Sscottl * has sufficiently completed its reset to handle register 1012147883Sscottl * accesses. 1013147883Sscottl */ 1014147883Sscottl wait = 5000; 1015147883Sscottl do { 1016147883Sscottl DELAY(1000); 1017147883Sscottl diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 1018147883Sscottl } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 1019101704Smjacob 1020147883Sscottl if (wait == 0) { 1021147883Sscottl mpt_prt(mpt, "WARNING - Failed hard reset! " 1022147883Sscottl "Trying to initialize anyway.\n"); 1023147883Sscottl } 1024101704Smjacob 1025147883Sscottl /* 1026147883Sscottl * If we have firmware to download, it must be loaded before 1027147883Sscottl * the controller will become operational. Do so now. 1028147883Sscottl */ 1029147883Sscottl if (mpt->fw_image != NULL) { 1030147883Sscottl 1031147883Sscottl error = mpt_download_fw(mpt); 1032147883Sscottl 1033147883Sscottl if (error) { 1034147883Sscottl mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 1035147883Sscottl mpt_prt(mpt, "Trying to initialize anyway.\n"); 1036147883Sscottl } 1037101704Smjacob } 1038101704Smjacob 1039147883Sscottl /* 1040147883Sscottl * Reseting the controller should have disabled write 1041147883Sscottl * access to the diagnostic registers, but disable 1042147883Sscottl * manually to be sure. 1043147883Sscottl */ 1044147883Sscottl mpt_disable_diag_mode(mpt); 1045101704Smjacob} 1046101704Smjacob 1047147883Sscottlstatic void 1048147883Sscottlmpt_core_ioc_reset(struct mpt_softc *mpt, int type) 1049147883Sscottl{ 1050147883Sscottl /* 1051147883Sscottl * Complete all pending requests with a status 1052147883Sscottl * appropriate for an IOC reset. 1053147883Sscottl */ 1054147883Sscottl mpt_complete_request_chain(mpt, &mpt->request_pending_list, 1055147883Sscottl MPI_IOCSTATUS_INVALID_STATE); 1056147883Sscottl} 1057147883Sscottl 1058147883Sscottl 1059101704Smjacob/* 1060101704Smjacob * Reset the IOC when needed. Try software command first then if needed 1061101704Smjacob * poke at the magic diagnostic reset. Note that a hard reset resets 1062101704Smjacob * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 1063101704Smjacob * fouls up the PCI configuration registers. 1064101704Smjacob */ 1065101704Smjacobint 1066147883Sscottlmpt_reset(struct mpt_softc *mpt, int reinit) 1067101704Smjacob{ 1068147883Sscottl struct mpt_personality *pers; 1069147883Sscottl int ret; 1070156104Smjacob int retry_cnt = 0; 1071101704Smjacob 1072156104Smjacob /* 1073156104Smjacob * Try a soft reset. If that fails, get out the big hammer. 1074156104Smjacob */ 1075156104Smjacob again: 1076101704Smjacob if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 1077156104Smjacob int cnt; 1078156104Smjacob for (cnt = 0; cnt < 5; cnt++) { 1079156104Smjacob /* Failed; do a hard reset */ 1080156104Smjacob mpt_hard_reset(mpt); 1081101704Smjacob 1082156104Smjacob /* 1083156104Smjacob * Wait for the IOC to reload 1084156104Smjacob * and come out of reset state 1085156104Smjacob */ 1086156104Smjacob ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1087156104Smjacob if (ret == MPT_OK) { 1088156104Smjacob break; 1089156104Smjacob } 1090156104Smjacob /* 1091156104Smjacob * Okay- try to check again... 1092156104Smjacob */ 1093156104Smjacob ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 1094156104Smjacob if (ret == MPT_OK) { 1095156104Smjacob break; 1096156104Smjacob } 1097156104Smjacob mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 1098156104Smjacob retry_cnt, cnt); 1099156104Smjacob } 1100101704Smjacob } 1101101704Smjacob 1102156104Smjacob if (retry_cnt == 0) { 1103156104Smjacob /* 1104156104Smjacob * Invoke reset handlers. We bump the reset count so 1105156104Smjacob * that mpt_wait_req() understands that regardless of 1106156104Smjacob * the specified wait condition, it should stop its wait. 1107156104Smjacob */ 1108156104Smjacob mpt->reset_cnt++; 1109156104Smjacob MPT_PERS_FOREACH(mpt, pers) 1110156104Smjacob pers->reset(mpt, ret); 1111156104Smjacob } 1112147883Sscottl 1113157354Smjacob if (reinit) { 1114157117Smjacob ret = mpt_enable_ioc(mpt, 1); 1115156104Smjacob if (ret == MPT_OK) { 1116156104Smjacob mpt_enable_ints(mpt); 1117156104Smjacob } 1118156104Smjacob } 1119156104Smjacob if (ret != MPT_OK && retry_cnt++ < 2) { 1120156104Smjacob goto again; 1121156104Smjacob } 1122101704Smjacob return ret; 1123101704Smjacob} 1124101704Smjacob 1125101704Smjacob/* Return a command buffer to the free queue */ 1126101704Smjacobvoid 1127147883Sscottlmpt_free_request(struct mpt_softc *mpt, request_t *req) 1128101704Smjacob{ 1129155521Smjacob request_t *nxt; 1130147883Sscottl struct mpt_evtf_record *record; 1131147883Sscottl uint32_t reply_baddr; 1132147883Sscottl 1133103871Smjacob if (req == NULL || req != &mpt->request_pool[req->index]) { 1134101704Smjacob panic("mpt_free_request bad req ptr\n"); 1135101704Smjacob return; 1136101704Smjacob } 1137155521Smjacob if ((nxt = req->chain) != NULL) { 1138155521Smjacob req->chain = NULL; 1139155521Smjacob mpt_free_request(mpt, nxt); /* NB: recursion */ 1140155521Smjacob } 1141157354Smjacob 1142157354Smjacob KASSERT(req->state != REQ_STATE_FREE, ("freeing free request")); 1143157354Smjacob KASSERT(!(req->state & REQ_STATE_LOCKED), ("freeing locked request")); 1144157354Smjacob 1145101704Smjacob req->ccb = NULL; 1146157354Smjacob 1147147883Sscottl if (LIST_EMPTY(&mpt->ack_frames)) { 1148157117Smjacob /* 1149157117Smjacob * Insert free ones at the tail 1150157117Smjacob */ 1151157354Smjacob req->serno = 0; 1152157354Smjacob req->state = REQ_STATE_FREE; 1153157117Smjacob TAILQ_INSERT_TAIL(&mpt->request_free_list, req, links); 1154147883Sscottl if (mpt->getreqwaiter != 0) { 1155147883Sscottl mpt->getreqwaiter = 0; 1156147883Sscottl wakeup(&mpt->request_free_list); 1157147883Sscottl } 1158147883Sscottl return; 1159147883Sscottl } 1160147883Sscottl 1161147883Sscottl /* 1162147883Sscottl * Process an ack frame deferred due to resource shortage. 1163147883Sscottl */ 1164147883Sscottl record = LIST_FIRST(&mpt->ack_frames); 1165147883Sscottl LIST_REMOVE(record, links); 1166157354Smjacob req->state = REQ_STATE_ALLOCATED; 1167157354Smjacob if ((req->serno = mpt->sequence++) == 0) { 1168157354Smjacob req->serno = mpt->sequence++; 1169157354Smjacob } 1170147883Sscottl mpt_send_event_ack(mpt, req, &record->reply, record->context); 1171147883Sscottl reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) 1172147883Sscottl + (mpt->reply_phys & 0xFFFFFFFF); 1173147883Sscottl mpt_free_reply(mpt, reply_baddr); 1174101704Smjacob} 1175101704Smjacob 1176101704Smjacob/* Get a command buffer from the free queue */ 1177101704Smjacobrequest_t * 1178147883Sscottlmpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1179101704Smjacob{ 1180101704Smjacob request_t *req; 1181147883Sscottl 1182147883Sscottlretry: 1183147883Sscottl req = TAILQ_FIRST(&mpt->request_free_list); 1184101704Smjacob if (req != NULL) { 1185147883Sscottl KASSERT(req == &mpt->request_pool[req->index], 1186147883Sscottl ("mpt_get_request: corrupted request free list\n")); 1187157354Smjacob KASSERT(req->state == REQ_STATE_FREE, 1188157354Smjacob ("req not free on free list %x", req->state)); 1189147883Sscottl TAILQ_REMOVE(&mpt->request_free_list, req, links); 1190147883Sscottl req->state = REQ_STATE_ALLOCATED; 1191155521Smjacob req->chain = NULL; 1192157354Smjacob if ((req->serno = mpt->sequence++) == 0) { 1193157354Smjacob req->serno = mpt->sequence++; 1194157354Smjacob } 1195147883Sscottl } else if (sleep_ok != 0) { 1196147883Sscottl mpt->getreqwaiter = 1; 1197147883Sscottl mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1198147883Sscottl goto retry; 1199101704Smjacob } 1200157354Smjacob return (req); 1201101704Smjacob} 1202101704Smjacob 1203101704Smjacob/* Pass the command to the IOC */ 1204101704Smjacobvoid 1205147883Sscottlmpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1206101704Smjacob{ 1207147883Sscottl uint32_t *pReq; 1208147883Sscottl 1209147883Sscottl pReq = req->req_vbuf; 1210155521Smjacob if (mpt->verbose > MPT_PRT_TRACE) { 1211155521Smjacob int offset; 1212157117Smjacob#if __FreeBSD_version >= 500000 1213157117Smjacob mpt_prt(mpt, "Send Request %d (%jx):", 1214157117Smjacob req->index, (uintmax_t) req->req_pbuf); 1215157117Smjacob#else 1216157117Smjacob mpt_prt(mpt, "Send Request %d (%llx):", 1217157117Smjacob req->index, (unsigned long long) req->req_pbuf); 1218157117Smjacob#endif 1219155521Smjacob for (offset = 0; offset < mpt->request_frame_size; offset++) { 1220155521Smjacob if ((offset & 0x7) == 0) { 1221155521Smjacob mpt_prtc(mpt, "\n"); 1222155521Smjacob mpt_prt(mpt, " "); 1223155521Smjacob } 1224155521Smjacob mpt_prtc(mpt, " %08x", pReq[offset]); 1225155521Smjacob } 1226155521Smjacob mpt_prtc(mpt, "\n"); 1227155521Smjacob } 1228101704Smjacob bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1229103914Smjacob BUS_DMASYNC_PREWRITE); 1230147883Sscottl req->state |= REQ_STATE_QUEUED; 1231147883Sscottl TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1232147883Sscottl mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1233101704Smjacob} 1234101704Smjacob 1235101704Smjacob/* 1236147883Sscottl * Wait for a request to complete. 1237147883Sscottl * 1238147883Sscottl * Inputs: 1239147883Sscottl * mpt softc of controller executing request 1240147883Sscottl * req request to wait for 1241147883Sscottl * sleep_ok nonzero implies may sleep in this context 1242147883Sscottl * time_ms timeout in ms. 0 implies no timeout. 1243147883Sscottl * 1244147883Sscottl * Return Values: 1245147883Sscottl * 0 Request completed 1246147883Sscottl * non-0 Timeout fired before request completion. 1247101704Smjacob */ 1248147883Sscottlint 1249147883Sscottlmpt_wait_req(struct mpt_softc *mpt, request_t *req, 1250147883Sscottl mpt_req_state_t state, mpt_req_state_t mask, 1251147883Sscottl int sleep_ok, int time_ms) 1252101704Smjacob{ 1253147883Sscottl int error; 1254147883Sscottl int timeout; 1255147883Sscottl u_int saved_cnt; 1256101704Smjacob 1257147883Sscottl /* 1258147883Sscottl * timeout is in ms. 0 indicates infinite wait. 1259147883Sscottl * Convert to ticks or 500us units depending on 1260147883Sscottl * our sleep mode. 1261147883Sscottl */ 1262157117Smjacob if (sleep_ok != 0) { 1263147883Sscottl timeout = (time_ms * hz) / 1000; 1264157117Smjacob } else { 1265147883Sscottl timeout = time_ms * 2; 1266157117Smjacob } 1267147883Sscottl req->state |= REQ_STATE_NEED_WAKEUP; 1268147883Sscottl mask &= ~REQ_STATE_NEED_WAKEUP; 1269155521Smjacob saved_cnt = mpt->reset_cnt; 1270157117Smjacob while ((req->state & mask) != state && mpt->reset_cnt == saved_cnt) { 1271147883Sscottl if (sleep_ok != 0) { 1272147883Sscottl error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1273147883Sscottl if (error == EWOULDBLOCK) { 1274147883Sscottl timeout = 0; 1275147883Sscottl break; 1276147883Sscottl } 1277147883Sscottl } else { 1278147883Sscottl if (time_ms != 0 && --timeout == 0) { 1279147883Sscottl break; 1280147883Sscottl } 1281147883Sscottl DELAY(500); 1282147883Sscottl mpt_intr(mpt); 1283147883Sscottl } 1284147883Sscottl } 1285147883Sscottl req->state &= ~REQ_STATE_NEED_WAKEUP; 1286157117Smjacob if (mpt->reset_cnt != saved_cnt) { 1287147883Sscottl return (EIO); 1288157117Smjacob } 1289157117Smjacob if (time_ms && timeout <= 0) { 1290157117Smjacob MSG_REQUEST_HEADER *msg_hdr = req->req_vbuf; 1291157117Smjacob mpt_prt(mpt, "mpt_wait_req(%x) timed out\n", msg_hdr->Function); 1292147883Sscottl return (ETIMEDOUT); 1293157117Smjacob } 1294147883Sscottl return (0); 1295101704Smjacob} 1296101704Smjacob 1297101704Smjacob/* 1298101704Smjacob * Send a command to the IOC via the handshake register. 1299101704Smjacob * 1300101704Smjacob * Only done at initialization time and for certain unusual 1301101704Smjacob * commands such as device/bus reset as specified by LSI. 1302101704Smjacob */ 1303101704Smjacobint 1304147883Sscottlmpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1305101704Smjacob{ 1306101704Smjacob int i; 1307147883Sscottl uint32_t data, *data32; 1308101704Smjacob 1309101704Smjacob /* Check condition of the IOC */ 1310101704Smjacob data = mpt_rd_db(mpt); 1311147883Sscottl if ((MPT_STATE(data) != MPT_DB_STATE_READY 1312147883Sscottl && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1313147883Sscottl && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1314147883Sscottl || MPT_DB_IS_IN_USE(data)) { 1315147883Sscottl mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1316101704Smjacob mpt_print_db(data); 1317147883Sscottl return (EBUSY); 1318101704Smjacob } 1319101704Smjacob 1320101704Smjacob /* We move things in 32 bit chunks */ 1321101704Smjacob len = (len + 3) >> 2; 1322101704Smjacob data32 = cmd; 1323101704Smjacob 1324101704Smjacob /* Clear any left over pending doorbell interupts */ 1325101704Smjacob if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1326101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1327101704Smjacob 1328101704Smjacob /* 1329101704Smjacob * Tell the handshake reg. we are going to send a command 1330101704Smjacob * and how long it is going to be. 1331101704Smjacob */ 1332101704Smjacob data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1333101704Smjacob (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1334101704Smjacob mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1335101704Smjacob 1336101704Smjacob /* Wait for the chip to notice */ 1337101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1338147883Sscottl mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n"); 1339147883Sscottl return (ETIMEDOUT); 1340101704Smjacob } 1341101704Smjacob 1342101704Smjacob /* Clear the interrupt */ 1343101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1344101704Smjacob 1345101704Smjacob if (mpt_wait_db_ack(mpt) != MPT_OK) { 1346147883Sscottl mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n"); 1347147883Sscottl return (ETIMEDOUT); 1348101704Smjacob } 1349101704Smjacob 1350101704Smjacob /* Send the command */ 1351101704Smjacob for (i = 0; i < len; i++) { 1352101704Smjacob mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++); 1353101704Smjacob if (mpt_wait_db_ack(mpt) != MPT_OK) { 1354156104Smjacob mpt_prt(mpt, 1355147883Sscottl "mpt_send_handshake_cmd timeout! index = %d\n", 1356147883Sscottl i); 1357147883Sscottl return (ETIMEDOUT); 1358101704Smjacob } 1359101704Smjacob } 1360101704Smjacob return MPT_OK; 1361101704Smjacob} 1362101704Smjacob 1363101704Smjacob/* Get the response from the handshake register */ 1364101704Smjacobint 1365147883Sscottlmpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1366101704Smjacob{ 1367101704Smjacob int left, reply_left; 1368101704Smjacob u_int16_t *data16; 1369101704Smjacob MSG_DEFAULT_REPLY *hdr; 1370101704Smjacob 1371101704Smjacob /* We move things out in 16 bit chunks */ 1372101704Smjacob reply_len >>= 1; 1373101704Smjacob data16 = (u_int16_t *)reply; 1374101704Smjacob 1375101704Smjacob hdr = (MSG_DEFAULT_REPLY *)reply; 1376101704Smjacob 1377101704Smjacob /* Get first word */ 1378101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1379147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1380101704Smjacob return ETIMEDOUT; 1381101704Smjacob } 1382101704Smjacob *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1383101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1384101704Smjacob 1385101704Smjacob /* Get Second Word */ 1386101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1387147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1388101704Smjacob return ETIMEDOUT; 1389101704Smjacob } 1390101704Smjacob *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1391101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1392101704Smjacob 1393157354Smjacob /* 1394157354Smjacob * With the second word, we can now look at the length. 1395157354Smjacob * Warn about a reply that's too short (except for IOC FACTS REPLY) 1396157354Smjacob */ 1397157354Smjacob if ((reply_len >> 1) != hdr->MsgLength && 1398157354Smjacob (hdr->Function != MPI_FUNCTION_IOC_FACTS)){ 1399157354Smjacob#if __FreeBSD_version >= 500000 1400103914Smjacob mpt_prt(mpt, "reply length does not match message length: " 1401157354Smjacob "got %x; expected %x for function %x\n", 1402157354Smjacob hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1403157354Smjacob#else 1404157354Smjacob mpt_prt(mpt, "reply length does not match message length: " 1405157354Smjacob "got %x; expected %zx for function %x\n", 1406157354Smjacob hdr->MsgLength << 2, reply_len << 1, hdr->Function); 1407157354Smjacob#endif 1408101704Smjacob } 1409101704Smjacob 1410101704Smjacob /* Get rest of the reply; but don't overflow the provided buffer */ 1411101704Smjacob left = (hdr->MsgLength << 1) - 2; 1412101704Smjacob reply_left = reply_len - 2; 1413101704Smjacob while (left--) { 1414101704Smjacob u_int16_t datum; 1415101704Smjacob 1416101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1417147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1418101704Smjacob return ETIMEDOUT; 1419101704Smjacob } 1420101704Smjacob datum = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1421101704Smjacob 1422101704Smjacob if (reply_left-- > 0) 1423101704Smjacob *data16++ = datum & MPT_DB_DATA_MASK; 1424101704Smjacob 1425101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1426101704Smjacob } 1427101704Smjacob 1428101704Smjacob /* One more wait & clear at the end */ 1429101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1430147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1431101704Smjacob return ETIMEDOUT; 1432101704Smjacob } 1433101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1434101704Smjacob 1435101704Smjacob if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1436147883Sscottl if (mpt->verbose >= MPT_PRT_TRACE) 1437101704Smjacob mpt_print_reply(hdr); 1438101704Smjacob return (MPT_FAIL | hdr->IOCStatus); 1439101704Smjacob } 1440101704Smjacob 1441101704Smjacob return (0); 1442101704Smjacob} 1443101704Smjacob 1444101704Smjacobstatic int 1445147883Sscottlmpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1446101704Smjacob{ 1447101704Smjacob MSG_IOC_FACTS f_req; 1448101704Smjacob int error; 1449101704Smjacob 1450157354Smjacob memset(&f_req, 0, sizeof f_req); 1451101704Smjacob f_req.Function = MPI_FUNCTION_IOC_FACTS; 1452147883Sscottl f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1453101704Smjacob error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1454101704Smjacob if (error) 1455101704Smjacob return(error); 1456101704Smjacob error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1457101704Smjacob return (error); 1458101704Smjacob} 1459101704Smjacob 1460102199Smjacobstatic int 1461147883Sscottlmpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp) 1462102199Smjacob{ 1463102199Smjacob MSG_PORT_FACTS f_req; 1464102199Smjacob int error; 1465102199Smjacob 1466102199Smjacob /* XXX: Only getting PORT FACTS for Port 0 */ 1467147883Sscottl memset(&f_req, 0, sizeof f_req); 1468102199Smjacob f_req.Function = MPI_FUNCTION_PORT_FACTS; 1469147883Sscottl f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1470102199Smjacob error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1471102199Smjacob if (error) 1472102199Smjacob return(error); 1473102199Smjacob error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1474102199Smjacob return (error); 1475102199Smjacob} 1476102199Smjacob 1477101704Smjacob/* 1478101704Smjacob * Send the initialization request. This is where we specify how many 1479101704Smjacob * SCSI busses and how many devices per bus we wish to emulate. 1480101704Smjacob * This is also the command that specifies the max size of the reply 1481101704Smjacob * frames from the IOC that we will be allocating. 1482101704Smjacob */ 1483101704Smjacobstatic int 1484147883Sscottlmpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1485101704Smjacob{ 1486101704Smjacob int error = 0; 1487101704Smjacob MSG_IOC_INIT init; 1488101704Smjacob MSG_IOC_INIT_REPLY reply; 1489101704Smjacob 1490157354Smjacob memset(&init, 0, sizeof init); 1491101704Smjacob init.WhoInit = who; 1492101704Smjacob init.Function = MPI_FUNCTION_IOC_INIT; 1493101704Smjacob if (mpt->is_fc) { 1494101704Smjacob init.MaxDevices = 255; 1495155521Smjacob } else if (mpt->is_sas) { 1496155521Smjacob init.MaxDevices = mpt->mpt_max_devices; 1497101704Smjacob } else { 1498101704Smjacob init.MaxDevices = 16; 1499101704Smjacob } 1500101704Smjacob init.MaxBuses = 1; 1501155521Smjacob 1502155521Smjacob init.MsgVersion = htole16(MPI_VERSION); 1503155521Smjacob init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1504155521Smjacob init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1505147883Sscottl init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1506101704Smjacob 1507101704Smjacob if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1508101704Smjacob return(error); 1509101704Smjacob } 1510101704Smjacob 1511101704Smjacob error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1512101704Smjacob return (error); 1513101704Smjacob} 1514101704Smjacob 1515102199Smjacob 1516102199Smjacob/* 1517102199Smjacob * Utiltity routine to read configuration headers and pages 1518102199Smjacob */ 1519147883Sscottlint 1520147883Sscottlmpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action, 1521147883Sscottl u_int PageVersion, u_int PageLength, u_int PageNumber, 1522147883Sscottl u_int PageType, uint32_t PageAddress, bus_addr_t addr, 1523147883Sscottl bus_size_t len, int sleep_ok, int timeout_ms) 1524101704Smjacob{ 1525102199Smjacob MSG_CONFIG *cfgp; 1526147883Sscottl SGE_SIMPLE32 *se; 1527102199Smjacob 1528102199Smjacob cfgp = req->req_vbuf; 1529147883Sscottl memset(cfgp, 0, sizeof *cfgp); 1530147883Sscottl cfgp->Action = Action; 1531102199Smjacob cfgp->Function = MPI_FUNCTION_CONFIG; 1532147883Sscottl cfgp->Header.PageVersion = PageVersion; 1533147883Sscottl cfgp->Header.PageLength = PageLength; 1534147883Sscottl cfgp->Header.PageNumber = PageNumber; 1535147883Sscottl cfgp->Header.PageType = PageType; 1536102199Smjacob cfgp->PageAddress = PageAddress; 1537147883Sscottl se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1538147883Sscottl se->Address = addr; 1539147883Sscottl MPI_pSGE_SET_LENGTH(se, len); 1540147883Sscottl MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1541147883Sscottl MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1542147883Sscottl MPI_SGE_FLAGS_END_OF_LIST | 1543147883Sscottl ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1544147883Sscottl || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1545147883Sscottl ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1546147883Sscottl cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1547102199Smjacob 1548102199Smjacob mpt_check_doorbell(mpt); 1549102199Smjacob mpt_send_cmd(mpt, req); 1550147883Sscottl return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1551147883Sscottl sleep_ok, timeout_ms)); 1552147883Sscottl} 1553102199Smjacob 1554147883Sscottl 1555147883Sscottlint 1556147883Sscottlmpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1557147883Sscottl uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1558147883Sscottl int sleep_ok, int timeout_ms) 1559147883Sscottl{ 1560147883Sscottl request_t *req; 1561156104Smjacob MSG_CONFIG *cfgp; 1562147883Sscottl int error; 1563147883Sscottl 1564147883Sscottl req = mpt_get_request(mpt, sleep_ok); 1565147883Sscottl if (req == NULL) { 1566147883Sscottl mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1567156104Smjacob return (ENOMEM); 1568102199Smjacob } 1569147883Sscottl 1570147883Sscottl error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER, 1571147883Sscottl /*PageVersion*/0, /*PageLength*/0, PageNumber, 1572147883Sscottl PageType, PageAddress, /*addr*/0, /*len*/0, 1573147883Sscottl sleep_ok, timeout_ms); 1574147883Sscottl if (error != 0) { 1575156104Smjacob mpt_free_request(mpt, req); 1576147883Sscottl mpt_prt(mpt, "read_cfg_header timed out\n"); 1577156104Smjacob return (ETIMEDOUT); 1578147883Sscottl } 1579147883Sscottl 1580156104Smjacob switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1581156104Smjacob case MPI_IOCSTATUS_SUCCESS: 1582147883Sscottl cfgp = req->req_vbuf; 1583147883Sscottl bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1584147883Sscottl error = 0; 1585156104Smjacob break; 1586156104Smjacob case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1587156104Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, 1588156104Smjacob "Invalid Page Type %d Number %d Addr 0x%0x\n", 1589156104Smjacob PageType, PageNumber, PageAddress); 1590156104Smjacob error = EINVAL; 1591156104Smjacob break; 1592156104Smjacob default: 1593156104Smjacob mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1594156104Smjacob req->IOCStatus); 1595156104Smjacob error = EIO; 1596156104Smjacob break; 1597147883Sscottl } 1598102199Smjacob mpt_free_request(mpt, req); 1599147883Sscottl return (error); 1600102199Smjacob} 1601102199Smjacob 1602102822Smjacobint 1603147883Sscottlmpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1604147883Sscottl CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1605147883Sscottl int timeout_ms) 1606102199Smjacob{ 1607147883Sscottl request_t *req; 1608147883Sscottl int error; 1609102199Smjacob 1610147883Sscottl req = mpt_get_request(mpt, sleep_ok); 1611147883Sscottl if (req == NULL) { 1612147883Sscottl mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1613147883Sscottl return (-1); 1614147883Sscottl } 1615102199Smjacob 1616147883Sscottl error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1617147883Sscottl hdr->PageLength, hdr->PageNumber, 1618147883Sscottl hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1619157117Smjacob PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1620147883Sscottl len, sleep_ok, timeout_ms); 1621147883Sscottl if (error != 0) { 1622147883Sscottl mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1623147883Sscottl return (-1); 1624147883Sscottl } 1625102199Smjacob 1626147883Sscottl if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1627147883Sscottl mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1628147883Sscottl req->IOCStatus); 1629147883Sscottl mpt_free_request(mpt, req); 1630102199Smjacob return (-1); 1631102199Smjacob } 1632102199Smjacob bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1633102199Smjacob BUS_DMASYNC_POSTREAD); 1634157117Smjacob memcpy(hdr, ((uint8_t *)req->req_vbuf)+MPT_RQSL(mpt), len); 1635102199Smjacob mpt_free_request(mpt, req); 1636102199Smjacob return (0); 1637102199Smjacob} 1638102199Smjacob 1639102822Smjacobint 1640147883Sscottlmpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1641147883Sscottl CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1642147883Sscottl int timeout_ms) 1643102199Smjacob{ 1644147883Sscottl request_t *req; 1645147883Sscottl u_int hdr_attr; 1646147883Sscottl int error; 1647102199Smjacob 1648102199Smjacob hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1649102199Smjacob if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1650102199Smjacob hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1651147883Sscottl mpt_prt(mpt, "page type 0x%x not changeable\n", 1652147883Sscottl hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1653102199Smjacob return (-1); 1654102199Smjacob } 1655147883Sscottl hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK, 1656102199Smjacob 1657147883Sscottl req = mpt_get_request(mpt, sleep_ok); 1658147883Sscottl if (req == NULL) 1659147883Sscottl return (-1); 1660102199Smjacob 1661157117Smjacob memcpy(((caddr_t)req->req_vbuf)+MPT_RQSL(mpt), hdr, len); 1662147883Sscottl /* Restore stripped out attributes */ 1663147883Sscottl hdr->PageType |= hdr_attr; 1664102199Smjacob 1665147883Sscottl error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1666147883Sscottl hdr->PageLength, hdr->PageNumber, 1667147883Sscottl hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1668157117Smjacob PageAddress, req->req_pbuf + MPT_RQSL(mpt), 1669147883Sscottl len, sleep_ok, timeout_ms); 1670147883Sscottl if (error != 0) { 1671147883Sscottl mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1672147883Sscottl return (-1); 1673147883Sscottl } 1674102199Smjacob 1675147883Sscottl if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1676147883Sscottl mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1677147883Sscottl req->IOCStatus); 1678147883Sscottl mpt_free_request(mpt, req); 1679147883Sscottl return (-1); 1680102199Smjacob } 1681147883Sscottl mpt_free_request(mpt, req); 1682147883Sscottl return (0); 1683147883Sscottl} 1684102199Smjacob 1685147883Sscottl/* 1686147883Sscottl * Read IOC configuration information 1687147883Sscottl */ 1688147883Sscottlstatic int 1689147883Sscottlmpt_read_config_info_ioc(struct mpt_softc *mpt) 1690147883Sscottl{ 1691147883Sscottl CONFIG_PAGE_HEADER hdr; 1692147883Sscottl struct mpt_raid_volume *mpt_raid; 1693147883Sscottl int rv; 1694147883Sscottl int i; 1695147883Sscottl size_t len; 1696147883Sscottl 1697147883Sscottl rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1698147883Sscottl /*PageNumber*/2, /*PageAddress*/0, &hdr, 1699147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1700156104Smjacob /* 1701156104Smjacob * If it's an invalid page, so what? Not a supported function.... 1702156104Smjacob */ 1703156104Smjacob if (rv == EINVAL) 1704156104Smjacob return (0); 1705147883Sscottl if (rv) 1706156104Smjacob return (rv); 1707147883Sscottl 1708157354Smjacob#if __FreeBSD_version >= 500000 1709157141Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %zx, " 1710147883Sscottl "num %x, type %x\n", hdr.PageVersion, 1711147883Sscottl hdr.PageLength * sizeof(uint32_t), 1712147883Sscottl hdr.PageNumber, hdr.PageType); 1713157354Smjacob#else 1714157354Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %z, " 1715157354Smjacob "num %x, type %x\n", hdr.PageVersion, 1716157354Smjacob hdr.PageLength * sizeof(uint32_t), 1717157354Smjacob hdr.PageNumber, hdr.PageType); 1718157354Smjacob#endif 1719147883Sscottl 1720147883Sscottl len = hdr.PageLength * sizeof(uint32_t); 1721151075Sscottl mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1722147883Sscottl if (mpt->ioc_page2 == NULL) 1723147883Sscottl return (ENOMEM); 1724147883Sscottl memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1725147883Sscottl rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1726147883Sscottl &mpt->ioc_page2->Header, len, 1727147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1728147883Sscottl if (rv) { 1729147883Sscottl mpt_prt(mpt, "failed to read IOC Page 2\n"); 1730147883Sscottl } else if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1731147883Sscottl uint32_t mask; 1732147883Sscottl 1733147883Sscottl mpt_prt(mpt, "Capabilities: ("); 1734147883Sscottl for (mask = 1; mask != 0; mask <<= 1) { 1735147883Sscottl if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) 1736147883Sscottl continue; 1737147883Sscottl 1738147883Sscottl switch (mask) { 1739147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1740147883Sscottl mpt_prtc(mpt, " RAID-0"); 1741147883Sscottl break; 1742147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1743147883Sscottl mpt_prtc(mpt, " RAID-1E"); 1744147883Sscottl break; 1745147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1746147883Sscottl mpt_prtc(mpt, " RAID-1"); 1747147883Sscottl break; 1748147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1749147883Sscottl mpt_prtc(mpt, " SES"); 1750147883Sscottl break; 1751147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1752147883Sscottl mpt_prtc(mpt, " SAFTE"); 1753147883Sscottl break; 1754147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1755147883Sscottl mpt_prtc(mpt, " Multi-Channel-Arrays"); 1756147883Sscottl default: 1757147883Sscottl break; 1758147883Sscottl } 1759102199Smjacob } 1760147883Sscottl mpt_prtc(mpt, " )\n"); 1761147883Sscottl if ((mpt->ioc_page2->CapabilitiesFlags 1762147883Sscottl & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1763147883Sscottl | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1764147883Sscottl | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1765147883Sscottl mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1766147883Sscottl mpt->ioc_page2->NumActiveVolumes, 1767147883Sscottl mpt->ioc_page2->NumActiveVolumes != 1 1768147883Sscottl ? "s " : " ", 1769147883Sscottl mpt->ioc_page2->MaxVolumes); 1770147883Sscottl mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1771147883Sscottl mpt->ioc_page2->NumActivePhysDisks, 1772147883Sscottl mpt->ioc_page2->NumActivePhysDisks != 1 1773147883Sscottl ? "s " : " ", 1774147883Sscottl mpt->ioc_page2->MaxPhysDisks); 1775147883Sscottl } 1776147883Sscottl } 1777102199Smjacob 1778147883Sscottl len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1779147883Sscottl mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT); 1780147883Sscottl if (mpt->raid_volumes == NULL) { 1781147883Sscottl mpt_prt(mpt, "Could not allocate RAID volume data\n"); 1782147883Sscottl } else { 1783147883Sscottl memset(mpt->raid_volumes, 0, len); 1784147883Sscottl } 1785147883Sscottl 1786147883Sscottl /* 1787147883Sscottl * Copy critical data out of ioc_page2 so that we can 1788147883Sscottl * safely refresh the page without windows of unreliable 1789147883Sscottl * data. 1790147883Sscottl */ 1791147883Sscottl mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 1792147883Sscottl 1793147883Sscottl len = sizeof(*mpt->raid_volumes->config_page) 1794147883Sscottl + (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1)); 1795147883Sscottl for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 1796147883Sscottl mpt_raid = &mpt->raid_volumes[i]; 1797147883Sscottl mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT); 1798147883Sscottl if (mpt_raid->config_page == NULL) { 1799147883Sscottl mpt_prt(mpt, "Could not allocate RAID page data\n"); 1800147883Sscottl break; 1801147883Sscottl } 1802147883Sscottl memset(mpt_raid->config_page, 0, len); 1803147883Sscottl } 1804147883Sscottl mpt->raid_page0_len = len; 1805147883Sscottl 1806147883Sscottl len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 1807147883Sscottl mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT); 1808147883Sscottl if (mpt->raid_disks == NULL) { 1809147883Sscottl mpt_prt(mpt, "Could not allocate RAID disk data\n"); 1810147883Sscottl } else { 1811147883Sscottl memset(mpt->raid_disks, 0, len); 1812147883Sscottl } 1813147883Sscottl 1814147883Sscottl mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 1815147883Sscottl 1816147883Sscottl rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1817147883Sscottl /*PageNumber*/3, /*PageAddress*/0, &hdr, 1818147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1819147883Sscottl if (rv) 1820147883Sscottl return (EIO); 1821147883Sscottl 1822147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 1823147883Sscottl hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 1824147883Sscottl 1825147883Sscottl if (mpt->ioc_page3 != NULL) 1826147883Sscottl free(mpt->ioc_page3, M_DEVBUF); 1827147883Sscottl len = hdr.PageLength * sizeof(uint32_t); 1828151075Sscottl mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1829147883Sscottl if (mpt->ioc_page3 == NULL) 1830102199Smjacob return (-1); 1831147883Sscottl memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 1832147883Sscottl rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1833147883Sscottl &mpt->ioc_page3->Header, len, 1834147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1835147883Sscottl if (rv) { 1836147883Sscottl mpt_prt(mpt, "failed to read IOC Page 3\n"); 1837102199Smjacob } 1838102199Smjacob 1839147883Sscottl mpt_raid_wakeup(mpt); 1840147883Sscottl 1841102199Smjacob return (0); 1842102199Smjacob} 1843102199Smjacob 1844102199Smjacob/* 1845102199Smjacob * Enable IOC port 1846102199Smjacob */ 1847102199Smjacobstatic int 1848147883Sscottlmpt_send_port_enable(struct mpt_softc *mpt, int port) 1849102199Smjacob{ 1850147883Sscottl request_t *req; 1851101704Smjacob MSG_PORT_ENABLE *enable_req; 1852147883Sscottl int error; 1853101704Smjacob 1854147883Sscottl req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1855147883Sscottl if (req == NULL) 1856147883Sscottl return (-1); 1857101704Smjacob 1858101704Smjacob enable_req = req->req_vbuf; 1859157354Smjacob memset(enable_req, 0, MPT_RQSL(mpt)); 1860101704Smjacob 1861101704Smjacob enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 1862147883Sscottl enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1863101704Smjacob enable_req->PortNumber = port; 1864101704Smjacob 1865101704Smjacob mpt_check_doorbell(mpt); 1866147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 1867147883Sscottl 1868147883Sscottl mpt_send_cmd(mpt, req); 1869147883Sscottl error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1870156021Smjacob /*sleep_ok*/FALSE, 1871156021Smjacob /*time_ms*/(mpt->is_sas || mpt->is_fc)? 30000 : 3000); 1872147883Sscottl if (error != 0) { 1873157117Smjacob mpt_prt(mpt, "port %d enable timed out\n", port); 1874147883Sscottl return (-1); 1875101704Smjacob } 1876101704Smjacob mpt_free_request(mpt, req); 1877157117Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port %d\n", port); 1878101704Smjacob return (0); 1879101704Smjacob} 1880101704Smjacob 1881101704Smjacob/* 1882101704Smjacob * Enable/Disable asynchronous event reporting. 1883101704Smjacob */ 1884101704Smjacobstatic int 1885147883Sscottlmpt_send_event_request(struct mpt_softc *mpt, int onoff) 1886101704Smjacob{ 1887101704Smjacob request_t *req; 1888101704Smjacob MSG_EVENT_NOTIFY *enable_req; 1889101704Smjacob 1890157354Smjacob req = mpt_get_request(mpt, FALSE); 1891157354Smjacob if (req == NULL) { 1892157354Smjacob return (ENOMEM); 1893157354Smjacob } 1894101704Smjacob enable_req = req->req_vbuf; 1895157354Smjacob memset(enable_req, 0, sizeof *enable_req); 1896101704Smjacob 1897101704Smjacob enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 1898147883Sscottl enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 1899101704Smjacob enable_req->Switch = onoff; 1900101704Smjacob 1901101704Smjacob mpt_check_doorbell(mpt); 1902157354Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "%sabling async events\n", 1903157354Smjacob onoff ? "en" : "dis"); 1904157354Smjacob /* 1905157354Smjacob * Send the command off, but don't wait for it. 1906157354Smjacob */ 1907101704Smjacob mpt_send_cmd(mpt, req); 1908101704Smjacob return (0); 1909101704Smjacob} 1910101704Smjacob 1911101704Smjacob/* 1912101704Smjacob * Un-mask the interupts on the chip. 1913101704Smjacob */ 1914101704Smjacobvoid 1915147883Sscottlmpt_enable_ints(struct mpt_softc *mpt) 1916101704Smjacob{ 1917101704Smjacob /* Unmask every thing except door bell int */ 1918101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 1919101704Smjacob} 1920101704Smjacob 1921101704Smjacob/* 1922101704Smjacob * Mask the interupts on the chip. 1923101704Smjacob */ 1924101704Smjacobvoid 1925147883Sscottlmpt_disable_ints(struct mpt_softc *mpt) 1926101704Smjacob{ 1927101704Smjacob /* Mask all interrupts */ 1928156104Smjacob mpt_write(mpt, MPT_OFFSET_INTR_MASK, 1929101704Smjacob MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 1930101704Smjacob} 1931101704Smjacob 1932147883Sscottlstatic void 1933147883Sscottlmpt_sysctl_attach(struct mpt_softc *mpt) 1934147883Sscottl{ 1935157117Smjacob#if __FreeBSD_version >= 500000 1936147883Sscottl struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 1937147883Sscottl struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 1938147883Sscottl 1939156104Smjacob SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1940147883Sscottl "debug", CTLFLAG_RW, &mpt->verbose, 0, 1941147883Sscottl "Debugging/Verbose level"); 1942157117Smjacob#endif 1943147883Sscottl} 1944147883Sscottl 1945101704Smjacobint 1946147883Sscottlmpt_attach(struct mpt_softc *mpt) 1947101704Smjacob{ 1948157117Smjacob struct mpt_personality *pers; 1949147883Sscottl int i; 1950157117Smjacob int error; 1951147883Sscottl 1952147883Sscottl for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1953147883Sscottl pers = mpt_personalities[i]; 1954157117Smjacob if (pers == NULL) { 1955147883Sscottl continue; 1956157117Smjacob } 1957147883Sscottl if (pers->probe(mpt) == 0) { 1958147883Sscottl error = pers->attach(mpt); 1959147883Sscottl if (error != 0) { 1960147883Sscottl mpt_detach(mpt); 1961147883Sscottl return (error); 1962147883Sscottl } 1963147883Sscottl mpt->mpt_pers_mask |= (0x1 << pers->id); 1964147883Sscottl pers->use_count++; 1965147883Sscottl } 1966147883Sscottl } 1967155521Smjacob 1968157117Smjacob /* 1969157117Smjacob * Now that we've attached everything, do the enable function 1970157117Smjacob * for all of the personalities. This allows the personalities 1971157117Smjacob * to do setups that are appropriate for them prior to enabling 1972157117Smjacob * any ports. 1973157117Smjacob */ 1974157117Smjacob for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 1975157117Smjacob pers = mpt_personalities[i]; 1976157117Smjacob if (pers != NULL && MPT_PERS_ATTACHED(pers, mpt) != 0) { 1977157117Smjacob error = pers->enable(mpt); 1978157117Smjacob if (error != 0) { 1979157117Smjacob mpt_prt(mpt, "personality %s attached but would" 1980157117Smjacob " not enable (%d)\n", pers->name, error); 1981157117Smjacob mpt_detach(mpt); 1982157117Smjacob return (error); 1983157117Smjacob } 1984157117Smjacob } 1985157117Smjacob } 1986147883Sscottl return (0); 1987147883Sscottl} 1988147883Sscottl 1989147883Sscottlint 1990147883Sscottlmpt_shutdown(struct mpt_softc *mpt) 1991147883Sscottl{ 1992147883Sscottl struct mpt_personality *pers; 1993147883Sscottl 1994157117Smjacob MPT_PERS_FOREACH_REVERSE(mpt, pers) { 1995147883Sscottl pers->shutdown(mpt); 1996157117Smjacob } 1997147883Sscottl return (0); 1998147883Sscottl} 1999147883Sscottl 2000147883Sscottlint 2001147883Sscottlmpt_detach(struct mpt_softc *mpt) 2002147883Sscottl{ 2003147883Sscottl struct mpt_personality *pers; 2004147883Sscottl 2005147883Sscottl MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2006147883Sscottl pers->detach(mpt); 2007147883Sscottl mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2008147883Sscottl pers->use_count--; 2009147883Sscottl } 2010147883Sscottl 2011147883Sscottl return (0); 2012147883Sscottl} 2013147883Sscottl 2014147883Sscottlint 2015147883Sscottlmpt_core_load(struct mpt_personality *pers) 2016147883Sscottl{ 2017147883Sscottl int i; 2018147883Sscottl 2019147883Sscottl /* 2020147883Sscottl * Setup core handlers and insert the default handler 2021147883Sscottl * into all "empty slots". 2022147883Sscottl */ 2023157117Smjacob for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) { 2024147883Sscottl mpt_reply_handlers[i] = mpt_default_reply_handler; 2025157117Smjacob } 2026147883Sscottl 2027147883Sscottl mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2028147883Sscottl mpt_event_reply_handler; 2029147883Sscottl mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2030147883Sscottl mpt_config_reply_handler; 2031147883Sscottl mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2032147883Sscottl mpt_handshake_reply_handler; 2033147883Sscottl return (0); 2034147883Sscottl} 2035147883Sscottl 2036147883Sscottl/* 2037147883Sscottl * Initialize per-instance driver data and perform 2038147883Sscottl * initial controller configuration. 2039147883Sscottl */ 2040147883Sscottlint 2041147883Sscottlmpt_core_attach(struct mpt_softc *mpt) 2042147883Sscottl{ 2043101704Smjacob int val; 2044147883Sscottl int error; 2045101704Smjacob 2046157117Smjacob 2047147883Sscottl LIST_INIT(&mpt->ack_frames); 2048147883Sscottl 2049147883Sscottl /* Put all request buffers on the free list */ 2050147883Sscottl TAILQ_INIT(&mpt->request_pending_list); 2051147883Sscottl TAILQ_INIT(&mpt->request_free_list); 2052157354Smjacob TAILQ_INIT(&mpt->request_timeout_list); 2053157117Smjacob for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) { 2054157354Smjacob request_t *req = &mpt->request_pool[val]; 2055157354Smjacob req->state = REQ_STATE_ALLOCATED; 2056157354Smjacob mpt_free_request(mpt, req); 2057157117Smjacob } 2058147883Sscottl 2059157117Smjacob for (val = 0; val < MPT_MAX_LUNS; val++) { 2060157117Smjacob STAILQ_INIT(&mpt->trt[val].atios); 2061157117Smjacob STAILQ_INIT(&mpt->trt[val].inots); 2062157117Smjacob } 2063157117Smjacob STAILQ_INIT(&mpt->trt_wildcard.atios); 2064157117Smjacob STAILQ_INIT(&mpt->trt_wildcard.inots); 2065157117Smjacob 2066157117Smjacob mpt->scsi_tgt_handler_id = MPT_HANDLER_ID_NONE; 2067157117Smjacob 2068147883Sscottl mpt_sysctl_attach(mpt); 2069147883Sscottl 2070147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2071157117Smjacob mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2072147883Sscottl 2073147883Sscottl error = mpt_configure_ioc(mpt); 2074147883Sscottl 2075147883Sscottl return (error); 2076147883Sscottl} 2077147883Sscottl 2078157117Smjacobint 2079157117Smjacobmpt_core_enable(struct mpt_softc *mpt) 2080157117Smjacob{ 2081157117Smjacob /* 2082157117Smjacob * We enter with the IOC enabled, but async events 2083157117Smjacob * not enabled, ports not enabled and interrupts 2084157117Smjacob * not enabled. 2085157117Smjacob */ 2086157117Smjacob 2087157117Smjacob /* 2088157117Smjacob * Enable asynchronous event reporting- all personalities 2089157117Smjacob * have attached so that they should be able to now field 2090157117Smjacob * async events. 2091157117Smjacob */ 2092157117Smjacob mpt_send_event_request(mpt, 1); 2093157117Smjacob 2094157117Smjacob /* 2095157117Smjacob * Catch any pending interrupts 2096157117Smjacob * 2097157117Smjacob * This seems to be crucial- otherwise 2098157117Smjacob * the portenable below times out. 2099157117Smjacob */ 2100157117Smjacob mpt_intr(mpt); 2101157117Smjacob 2102157117Smjacob /* 2103157117Smjacob * Enable Interrupts 2104157117Smjacob */ 2105157117Smjacob mpt_enable_ints(mpt); 2106157117Smjacob 2107157117Smjacob /* 2108157117Smjacob * Catch any pending interrupts 2109157117Smjacob * 2110157117Smjacob * This seems to be crucial- otherwise 2111157117Smjacob * the portenable below times out. 2112157117Smjacob */ 2113157117Smjacob mpt_intr(mpt); 2114157117Smjacob 2115157117Smjacob /* 2116157117Smjacob * Enable the port- but only if we are not MPT_ROLE_NONE. 2117157117Smjacob */ 2118157117Smjacob if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2119157117Smjacob mpt_prt(mpt, "failed to enable port 0\n"); 2120157117Smjacob return (ENXIO); 2121157117Smjacob } 2122157117Smjacob return (0); 2123157117Smjacob} 2124157117Smjacob 2125147883Sscottlvoid 2126147883Sscottlmpt_core_shutdown(struct mpt_softc *mpt) 2127147883Sscottl{ 2128157117Smjacob mpt_disable_ints(mpt); 2129147883Sscottl} 2130147883Sscottl 2131147883Sscottlvoid 2132147883Sscottlmpt_core_detach(struct mpt_softc *mpt) 2133147883Sscottl{ 2134157117Smjacob mpt_disable_ints(mpt); 2135147883Sscottl} 2136147883Sscottl 2137147883Sscottlint 2138147883Sscottlmpt_core_unload(struct mpt_personality *pers) 2139147883Sscottl{ 2140147883Sscottl /* Unload is always successfull. */ 2141147883Sscottl return (0); 2142147883Sscottl} 2143147883Sscottl 2144147883Sscottl#define FW_UPLOAD_REQ_SIZE \ 2145147883Sscottl (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2146147883Sscottl + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2147147883Sscottl 2148147883Sscottlstatic int 2149147883Sscottlmpt_upload_fw(struct mpt_softc *mpt) 2150147883Sscottl{ 2151147883Sscottl uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2152147883Sscottl MSG_FW_UPLOAD_REPLY fw_reply; 2153147883Sscottl MSG_FW_UPLOAD *fw_req; 2154147883Sscottl FW_UPLOAD_TCSGE *tsge; 2155147883Sscottl SGE_SIMPLE32 *sge; 2156147883Sscottl uint32_t flags; 2157147883Sscottl int error; 2158147883Sscottl 2159147883Sscottl memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2160147883Sscottl fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2161147883Sscottl fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2162147883Sscottl fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2163147883Sscottl fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2164147883Sscottl tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2165147883Sscottl tsge->DetailsLength = 12; 2166147883Sscottl tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2167147883Sscottl tsge->ImageSize = htole32(mpt->fw_image_size); 2168147883Sscottl sge = (SGE_SIMPLE32 *)(tsge + 1); 2169147883Sscottl flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2170147883Sscottl | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2171147883Sscottl | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2172147883Sscottl flags <<= MPI_SGE_FLAGS_SHIFT; 2173147883Sscottl sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2174147883Sscottl sge->Address = htole32(mpt->fw_phys); 2175147883Sscottl error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2176147883Sscottl if (error) 2177147883Sscottl return(error); 2178147883Sscottl error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2179147883Sscottl return (error); 2180147883Sscottl} 2181147883Sscottl 2182147883Sscottlstatic void 2183147883Sscottlmpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2184147883Sscottl uint32_t *data, bus_size_t len) 2185147883Sscottl{ 2186147883Sscottl uint32_t *data_end; 2187147883Sscottl 2188147883Sscottl data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2189155521Smjacob pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2190147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2191147883Sscottl while (data != data_end) { 2192147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2193147883Sscottl data++; 2194101704Smjacob } 2195155521Smjacob pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2196147883Sscottl} 2197101704Smjacob 2198147883Sscottlstatic int 2199147883Sscottlmpt_download_fw(struct mpt_softc *mpt) 2200147883Sscottl{ 2201147883Sscottl MpiFwHeader_t *fw_hdr; 2202147883Sscottl int error; 2203147883Sscottl uint32_t ext_offset; 2204147883Sscottl uint32_t data; 2205147883Sscottl 2206147883Sscottl mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2207147883Sscottl mpt->fw_image_size); 2208147883Sscottl 2209147883Sscottl error = mpt_enable_diag_mode(mpt); 2210147883Sscottl if (error != 0) { 2211147883Sscottl mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2212147883Sscottl return (EIO); 2213101704Smjacob } 2214101704Smjacob 2215147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2216147883Sscottl MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2217147883Sscottl 2218147883Sscottl fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2219147883Sscottl mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2220147883Sscottl fw_hdr->ImageSize); 2221147883Sscottl 2222147883Sscottl ext_offset = fw_hdr->NextImageHeaderOffset; 2223147883Sscottl while (ext_offset != 0) { 2224147883Sscottl MpiExtImageHeader_t *ext; 2225147883Sscottl 2226147883Sscottl ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2227147883Sscottl ext_offset = ext->NextImageHeaderOffset; 2228147883Sscottl 2229147883Sscottl mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2230147883Sscottl ext->ImageSize); 2231147883Sscottl } 2232147883Sscottl 2233155521Smjacob pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2234147883Sscottl /* Setup the address to jump to on reset. */ 2235147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2236147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2237147883Sscottl 2238101704Smjacob /* 2239147883Sscottl * The controller sets the "flash bad" status after attempting 2240147883Sscottl * to auto-boot from flash. Clear the status so that the controller 2241147883Sscottl * will continue the boot process with our newly installed firmware. 2242101704Smjacob */ 2243147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2244147883Sscottl data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2245147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2246147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2247147883Sscottl 2248155521Smjacob pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2249155521Smjacob 2250147883Sscottl /* 2251147883Sscottl * Re-enable the processor and clear the boot halt flag. 2252147883Sscottl */ 2253147883Sscottl data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2254147883Sscottl data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2255147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2256147883Sscottl 2257147883Sscottl mpt_disable_diag_mode(mpt); 2258147883Sscottl return (0); 2259147883Sscottl} 2260147883Sscottl 2261147883Sscottl/* 2262147883Sscottl * Allocate/Initialize data structures for the controller. Called 2263147883Sscottl * once at instance startup. 2264147883Sscottl */ 2265147883Sscottlstatic int 2266147883Sscottlmpt_configure_ioc(struct mpt_softc *mpt) 2267147883Sscottl{ 2268147883Sscottl MSG_PORT_FACTS_REPLY pfp; 2269147883Sscottl MSG_IOC_FACTS_REPLY facts; 2270147883Sscottl int try; 2271147883Sscottl int needreset; 2272155521Smjacob uint32_t max_chain_depth; 2273147883Sscottl 2274147883Sscottl needreset = 0; 2275101704Smjacob for (try = 0; try < MPT_MAX_TRYS; try++) { 2276147883Sscottl 2277101704Smjacob /* 2278101704Smjacob * No need to reset if the IOC is already in the READY state. 2279101704Smjacob * 2280101704Smjacob * Force reset if initialization failed previously. 2281101704Smjacob * Note that a hard_reset of the second channel of a '929 2282101704Smjacob * will stop operation of the first channel. Hopefully, if the 2283156104Smjacob * first channel is ok, the second will not require a hard 2284101704Smjacob * reset. 2285101704Smjacob */ 2286157117Smjacob if (needreset || MPT_STATE(mpt_rd_db(mpt)) != 2287101704Smjacob MPT_DB_STATE_READY) { 2288157117Smjacob if (mpt_reset(mpt, FALSE) != MPT_OK) { 2289101704Smjacob continue; 2290157117Smjacob } 2291101704Smjacob } 2292147883Sscottl needreset = 0; 2293101704Smjacob 2294101704Smjacob if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) { 2295147883Sscottl mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2296147883Sscottl needreset = 1; 2297101704Smjacob continue; 2298102199Smjacob } 2299102199Smjacob 2300147883Sscottl mpt->mpt_global_credits = le16toh(facts.GlobalCredits); 2301147883Sscottl mpt->request_frame_size = le16toh(facts.RequestFrameSize); 2302155521Smjacob mpt->ioc_facts_flags = facts.Flags; 2303147883Sscottl mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2304147883Sscottl le16toh(facts.MsgVersion) >> 8, 2305147883Sscottl le16toh(facts.MsgVersion) & 0xFF, 2306147883Sscottl le16toh(facts.HeaderVersion) >> 8, 2307147883Sscottl le16toh(facts.HeaderVersion) & 0xFF); 2308155521Smjacob 2309155521Smjacob /* 2310155521Smjacob * Now that we know request frame size, we can calculate 2311155521Smjacob * the actual (reasonable) segment limit for read/write I/O. 2312155521Smjacob * 2313155521Smjacob * This limit is constrained by: 2314155521Smjacob * 2315155521Smjacob * + The size of each area we allocate per command (and how 2316155521Smjacob * many chain segments we can fit into it). 2317155521Smjacob * + The total number of areas we've set up. 2318155521Smjacob * + The actual chain depth the card will allow. 2319155521Smjacob * 2320155521Smjacob * The first area's segment count is limited by the I/O request 2321155521Smjacob * at the head of it. We cannot allocate realistically more 2322155521Smjacob * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2323155521Smjacob * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2324155521Smjacob * 2325155521Smjacob */ 2326155521Smjacob max_chain_depth = facts.MaxChainDepth; 2327155521Smjacob 2328155521Smjacob /* total number of request areas we (can) allocate */ 2329155521Smjacob mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2330155521Smjacob 2331155521Smjacob /* converted to the number of chain areas possible */ 2332155521Smjacob mpt->max_seg_cnt *= MPT_NRFM(mpt); 2333155521Smjacob 2334155521Smjacob /* limited by the number of chain areas the card will support */ 2335155521Smjacob if (mpt->max_seg_cnt > max_chain_depth) { 2336155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, 2337155521Smjacob "chain depth limited to %u (from %u)\n", 2338155521Smjacob max_chain_depth, mpt->max_seg_cnt); 2339155521Smjacob mpt->max_seg_cnt = max_chain_depth; 2340155521Smjacob } 2341155521Smjacob 2342155521Smjacob /* converted to the number of simple sges in chain segments. */ 2343155521Smjacob mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2344155521Smjacob 2345147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2346155521Smjacob "Maximum Segment Count: %u\n", mpt->max_seg_cnt); 2347155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, 2348147883Sscottl "MsgLength=%u IOCNumber = %d\n", 2349147883Sscottl facts.MsgLength, facts.IOCNumber); 2350147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2351155521Smjacob "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2352155521Smjacob "Request Frame Size %u bytes Max Chain Depth %u\n", 2353155521Smjacob mpt->mpt_global_credits, facts.BlockSize, 2354155521Smjacob mpt->request_frame_size << 2, max_chain_depth); 2355147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2356147883Sscottl "IOCFACTS: Num Ports %d, FWImageSize %d, " 2357147883Sscottl "Flags=%#x\n", facts.NumberOfPorts, 2358147883Sscottl le32toh(facts.FWImageSize), facts.Flags); 2359147883Sscottl 2360155521Smjacob 2361147883Sscottl if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) { 2362147883Sscottl struct mpt_map_info mi; 2363147883Sscottl int error; 2364147883Sscottl 2365147883Sscottl /* 2366147883Sscottl * In some configurations, the IOC's firmware is 2367147883Sscottl * stored in a shared piece of system NVRAM that 2368147883Sscottl * is only accessable via the BIOS. In this 2369147883Sscottl * case, the firmware keeps a copy of firmware in 2370147883Sscottl * RAM until the OS driver retrieves it. Once 2371147883Sscottl * retrieved, we are responsible for re-downloading 2372147883Sscottl * the firmware after any hard-reset. 2373147883Sscottl */ 2374147883Sscottl mpt->fw_image_size = le32toh(facts.FWImageSize); 2375147883Sscottl error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 2376147883Sscottl /*alignment*/1, /*boundary*/0, 2377147883Sscottl /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2378147883Sscottl /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, 2379147883Sscottl /*filterarg*/NULL, mpt->fw_image_size, 2380147883Sscottl /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size, 2381147883Sscottl /*flags*/0, &mpt->fw_dmat); 2382147883Sscottl if (error != 0) { 2383147883Sscottl mpt_prt(mpt, "cannot create fw dma tag\n"); 2384147883Sscottl return (ENOMEM); 2385147883Sscottl } 2386147883Sscottl error = bus_dmamem_alloc(mpt->fw_dmat, 2387147883Sscottl (void **)&mpt->fw_image, BUS_DMA_NOWAIT, 2388147883Sscottl &mpt->fw_dmap); 2389147883Sscottl if (error != 0) { 2390147883Sscottl mpt_prt(mpt, "cannot allocate fw mem.\n"); 2391147883Sscottl bus_dma_tag_destroy(mpt->fw_dmat); 2392147883Sscottl return (ENOMEM); 2393147883Sscottl } 2394147883Sscottl mi.mpt = mpt; 2395147883Sscottl mi.error = 0; 2396147883Sscottl bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2397147883Sscottl mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, 2398147883Sscottl &mi, 0); 2399147883Sscottl mpt->fw_phys = mi.phys; 2400147883Sscottl 2401147883Sscottl error = mpt_upload_fw(mpt); 2402147883Sscottl if (error != 0) { 2403147883Sscottl mpt_prt(mpt, "fw upload failed.\n"); 2404147883Sscottl bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2405147883Sscottl bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2406147883Sscottl mpt->fw_dmap); 2407147883Sscottl bus_dma_tag_destroy(mpt->fw_dmat); 2408147883Sscottl mpt->fw_image = NULL; 2409147883Sscottl return (EIO); 2410147883Sscottl } 2411101704Smjacob } 2412101704Smjacob 2413102199Smjacob if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) { 2414147883Sscottl mpt_prt(mpt, "mpt_get_portfacts failed\n"); 2415147883Sscottl needreset = 1; 2416102199Smjacob continue; 2417102199Smjacob } 2418102199Smjacob 2419147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2420147883Sscottl "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n", 2421147883Sscottl pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID, 2422147883Sscottl pfp.MaxDevices); 2423102199Smjacob 2424147883Sscottl mpt->mpt_port_type = pfp.PortType; 2425147883Sscottl mpt->mpt_proto_flags = pfp.ProtocolFlags; 2426102199Smjacob if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI && 2427155521Smjacob pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS && 2428102199Smjacob pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) { 2429147883Sscottl mpt_prt(mpt, "Unsupported Port Type (%x)\n", 2430102199Smjacob pfp.PortType); 2431102199Smjacob return (ENXIO); 2432102199Smjacob } 2433157117Smjacob mpt->mpt_max_tgtcmds = le16toh(pfp.MaxPostedCmdBuffers); 2434157117Smjacob 2435102199Smjacob if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2436102199Smjacob mpt->is_fc = 1; 2437155521Smjacob mpt->is_sas = 0; 2438155521Smjacob } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2439155521Smjacob mpt->is_fc = 0; 2440155521Smjacob mpt->is_sas = 1; 2441102199Smjacob } else { 2442102199Smjacob mpt->is_fc = 0; 2443155521Smjacob mpt->is_sas = 0; 2444102199Smjacob } 2445102199Smjacob mpt->mpt_ini_id = pfp.PortSCSIID; 2446155521Smjacob mpt->mpt_max_devices = pfp.MaxDevices; 2447102199Smjacob 2448157117Smjacob /* 2449157117Smjacob * Match our expected role with what this port supports. 2450157117Smjacob * 2451157117Smjacob * We only do this to meet expectations. That is, if the 2452157117Smjacob * user has specified they want initiator role, and we 2453157117Smjacob * don't support it, that's an error we return back upstream. 2454157117Smjacob */ 2455157117Smjacob 2456157117Smjacob mpt->cap = MPT_ROLE_NONE; 2457157117Smjacob if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) { 2458157117Smjacob mpt->cap |= MPT_ROLE_INITIATOR; 2459157117Smjacob } 2460157117Smjacob if (pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) { 2461157117Smjacob mpt->cap |= MPT_ROLE_TARGET; 2462157117Smjacob } 2463157117Smjacob if (mpt->cap == MPT_ROLE_NONE) { 2464157117Smjacob mpt_prt(mpt, "port does not support either target or " 2465157117Smjacob "initiator role\n"); 2466147883Sscottl return (ENXIO); 2467102199Smjacob } 2468102199Smjacob 2469157117Smjacob if ((mpt->role & MPT_ROLE_INITIATOR) && 2470157117Smjacob (mpt->cap & MPT_ROLE_INITIATOR) == 0) { 2471157117Smjacob mpt_prt(mpt, "port does not support initiator role\n"); 2472157117Smjacob return (ENXIO); 2473157117Smjacob } 2474157117Smjacob 2475157117Smjacob if ((mpt->role & MPT_ROLE_TARGET) && 2476157117Smjacob (mpt->cap & MPT_ROLE_TARGET) == 0) { 2477157117Smjacob mpt_prt(mpt, "port does not support target role\n"); 2478157117Smjacob return (ENXIO); 2479157117Smjacob } 2480157117Smjacob 2481157117Smjacob if (mpt_enable_ioc(mpt, 0) != MPT_OK) { 2482157117Smjacob mpt_prt(mpt, "unable to initialize IOC\n"); 2483157117Smjacob return (ENXIO); 2484157117Smjacob } 2485157117Smjacob 2486101704Smjacob /* 2487157117Smjacob * Read IOC configuration information. 2488101704Smjacob */ 2489147883Sscottl mpt_read_config_info_ioc(mpt); 2490101704Smjacob 2491101704Smjacob /* Everything worked */ 2492101704Smjacob break; 2493101704Smjacob } 2494101704Smjacob 2495101704Smjacob if (try >= MPT_MAX_TRYS) { 2496103914Smjacob mpt_prt(mpt, "failed to initialize IOC"); 2497101704Smjacob return (EIO); 2498101704Smjacob } 2499101704Smjacob 2500101704Smjacob return (0); 2501101704Smjacob} 2502147883Sscottl 2503147883Sscottlstatic int 2504157117Smjacobmpt_enable_ioc(struct mpt_softc *mpt, int portenable) 2505147883Sscottl{ 2506147883Sscottl uint32_t pptr; 2507147883Sscottl int val; 2508147883Sscottl 2509155521Smjacob if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2510147883Sscottl mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2511147883Sscottl return (EIO); 2512147883Sscottl } 2513147883Sscottl 2514147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2515147883Sscottl 2516147883Sscottl if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2517147883Sscottl mpt_prt(mpt, "IOC failed to go to run state\n"); 2518147883Sscottl return (ENXIO); 2519147883Sscottl } 2520155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2521147883Sscottl 2522147883Sscottl /* 2523147883Sscottl * Give it reply buffers 2524147883Sscottl * 2525147883Sscottl * Do *not* exceed global credits. 2526147883Sscottl */ 2527156104Smjacob for (val = 0, pptr = mpt->reply_phys; 2528156104Smjacob (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2529147883Sscottl pptr += MPT_REPLY_SIZE) { 2530147883Sscottl mpt_free_reply(mpt, pptr); 2531147883Sscottl if (++val == mpt->mpt_global_credits - 1) 2532147883Sscottl break; 2533147883Sscottl } 2534147883Sscottl 2535157117Smjacob 2536147883Sscottl /* 2537157354Smjacob * Enable the port if asked. This is only done if we're resetting 2538157354Smjacob * the IOC after initial startup. 2539147883Sscottl */ 2540157117Smjacob if (portenable) { 2541157117Smjacob /* 2542157117Smjacob * Enable asynchronous event reporting 2543157117Smjacob */ 2544157117Smjacob mpt_send_event_request(mpt, 1); 2545147883Sscottl 2546157117Smjacob if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2547157117Smjacob mpt_prt(mpt, "failed to enable port 0\n"); 2548157117Smjacob return (ENXIO); 2549157117Smjacob } 2550147883Sscottl } 2551156104Smjacob return (MPT_OK); 2552147883Sscottl} 2553