mpt.c revision 156104
1139749Simp/*- 2156000Smjacob * Generic routines for LSI Fusion adapters. 3101704Smjacob * FreeBSD Version. 4101704Smjacob * 5101704Smjacob * Copyright (c) 2000, 2001 by Greg Ansley 6101704Smjacob * 7101704Smjacob * Redistribution and use in source and binary forms, with or without 8101704Smjacob * modification, are permitted provided that the following conditions 9101704Smjacob * are met: 10101704Smjacob * 1. Redistributions of source code must retain the above copyright 11101704Smjacob * notice immediately at the beginning of the file, without modification, 12101704Smjacob * this list of conditions, and the following disclaimer. 13101704Smjacob * 2. The name of the author may not be used to endorse or promote products 14101704Smjacob * derived from this software without specific prior written permission. 15101704Smjacob * 16101704Smjacob * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17101704Smjacob * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18101704Smjacob * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19101704Smjacob * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20101704Smjacob * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21101704Smjacob * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22101704Smjacob * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23101704Smjacob * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24101704Smjacob * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25101704Smjacob * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26101704Smjacob * SUCH DAMAGE. 27156000Smjacob */ 28156000Smjacob/*- 29156000Smjacob * Copyright (c) 2002, 2006 by Matthew Jacob 30156000Smjacob * All rights reserved. 31156104Smjacob * 32156000Smjacob * Redistribution and use in source and binary forms, with or without 33156000Smjacob * modification, are permitted provided that the following conditions are 34156000Smjacob * met: 35156000Smjacob * 1. Redistributions of source code must retain the above copyright 36156000Smjacob * notice, this list of conditions and the following disclaimer. 37156000Smjacob * 2. Redistributions in binary form must reproduce at minimum a disclaimer 38156000Smjacob * substantially similar to the "NO WARRANTY" disclaimer below 39156000Smjacob * ("Disclaimer") and any redistribution must be conditioned upon including 40156000Smjacob * a substantially similar Disclaimer requirement for further binary 41156000Smjacob * redistribution. 42156000Smjacob * 3. Neither the names of the above listed copyright holders nor the names 43156000Smjacob * of any contributors may be used to endorse or promote products derived 44156000Smjacob * from this software without specific prior written permission. 45156104Smjacob * 46156000Smjacob * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 47156000Smjacob * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 48156000Smjacob * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 49156000Smjacob * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 50156000Smjacob * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 51156000Smjacob * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 52156000Smjacob * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 53156000Smjacob * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 54156000Smjacob * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 55156000Smjacob * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 56156000Smjacob * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57147883Sscottl * 58156000Smjacob * Support from Chris Ellsworth in order to make SAS adapters work 59156000Smjacob * is gratefully acknowledged. 60101704Smjacob */ 61156000Smjacob/*- 62147883Sscottl * Copyright (c) 2004, Avid Technology, Inc. and its contributors. 63147883Sscottl * Copyright (c) 2005, WHEEL Sp. z o.o. 64147883Sscottl * Copyright (c) 2004, 2005 Justin T. Gibbs 65147883Sscottl * All rights reserved. 66156104Smjacob * 67147883Sscottl * Redistribution and use in source and binary forms, with or without 68147883Sscottl * modification, are permitted provided that the following conditions are 69147883Sscottl * met: 70147883Sscottl * 1. Redistributions of source code must retain the above copyright 71147883Sscottl * notice, this list of conditions and the following disclaimer. 72147883Sscottl * 2. Redistributions in binary form must reproduce at minimum a disclaimer 73147883Sscottl * substantially similar to the "NO WARRANTY" disclaimer below 74147883Sscottl * ("Disclaimer") and any redistribution must be conditioned upon including 75147883Sscottl * a substantially similar Disclaimer requirement for further binary 76147883Sscottl * redistribution. 77148679Sgibbs * 3. Neither the names of the above listed copyright holders nor the names 78148679Sgibbs * of any contributors may be used to endorse or promote products derived 79148679Sgibbs * from this software without specific prior written permission. 80156104Smjacob * 81147883Sscottl * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 82147883Sscottl * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 83147883Sscottl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 84147883Sscottl * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 85147883Sscottl * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 86147883Sscottl * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 87147883Sscottl * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 88147883Sscottl * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 89147883Sscottl * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 90147883Sscottl * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT 91147883Sscottl * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 92101704Smjacob */ 93101704Smjacob 94134123Sobrien#include <sys/cdefs.h> 95134123Sobrien__FBSDID("$FreeBSD: head/sys/dev/mpt/mpt.c 156104 2006-02-28 07:44:50Z mjacob $"); 96134123Sobrien 97147883Sscottl#include <dev/mpt/mpt.h> 98147883Sscottl#include <dev/mpt/mpt_cam.h> /* XXX For static handler registration */ 99147883Sscottl#include <dev/mpt/mpt_raid.h> /* XXX For static handler registration */ 100102199Smjacob 101147883Sscottl#include <dev/mpt/mpilib/mpi.h> 102147883Sscottl#include <dev/mpt/mpilib/mpi_ioc.h> 103147883Sscottl 104147883Sscottl#include <sys/sysctl.h> 105147883Sscottl 106101704Smjacob#define MPT_MAX_TRYS 3 107101704Smjacob#define MPT_MAX_WAIT 300000 108101704Smjacob 109101704Smjacobstatic int maxwait_ack = 0; 110101704Smjacobstatic int maxwait_int = 0; 111101704Smjacobstatic int maxwait_state = 0; 112101704Smjacob 113147883SscottlTAILQ_HEAD(, mpt_softc) mpt_tailq = TAILQ_HEAD_INITIALIZER(mpt_tailq); 114147883Sscottlmpt_reply_handler_t *mpt_reply_handlers[MPT_NUM_REPLY_HANDLERS]; 115101704Smjacob 116147883Sscottlstatic mpt_reply_handler_t mpt_default_reply_handler; 117147883Sscottlstatic mpt_reply_handler_t mpt_config_reply_handler; 118147883Sscottlstatic mpt_reply_handler_t mpt_handshake_reply_handler; 119147883Sscottlstatic mpt_reply_handler_t mpt_event_reply_handler; 120147883Sscottlstatic void mpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 121147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context); 122155521Smjacobstatic int mpt_send_event_request(struct mpt_softc *mpt, int onoff); 123147883Sscottlstatic int mpt_soft_reset(struct mpt_softc *mpt); 124147883Sscottlstatic void mpt_hard_reset(struct mpt_softc *mpt); 125147883Sscottlstatic int mpt_configure_ioc(struct mpt_softc *mpt); 126147883Sscottlstatic int mpt_enable_ioc(struct mpt_softc *mpt); 127147883Sscottl 128147883Sscottl/************************* Personality Module Support *************************/ 129147883Sscottl/* 130147883Sscottl * We include one extra entry that is guaranteed to be NULL 131147883Sscottl * to simplify our itterator. 132147883Sscottl */ 133147883Sscottlstatic struct mpt_personality *mpt_personalities[MPT_MAX_PERSONALITIES + 1]; 134147883Sscottlstatic __inline struct mpt_personality* 135147883Sscottl mpt_pers_find(struct mpt_softc *, u_int); 136147883Sscottlstatic __inline struct mpt_personality* 137147883Sscottl mpt_pers_find_reverse(struct mpt_softc *, u_int); 138147883Sscottl 139147883Sscottlstatic __inline struct mpt_personality * 140147883Sscottlmpt_pers_find(struct mpt_softc *mpt, u_int start_at) 141101704Smjacob{ 142147883Sscottl KASSERT(start_at <= MPT_MAX_PERSONALITIES, 143147883Sscottl ("mpt_pers_find: starting position out of range\n")); 144147883Sscottl 145147883Sscottl while (start_at < MPT_MAX_PERSONALITIES 146147883Sscottl && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 147147883Sscottl start_at++; 148147883Sscottl } 149147883Sscottl return (mpt_personalities[start_at]); 150147883Sscottl} 151147883Sscottl 152147883Sscottl/* 153147883Sscottl * Used infrequenstly, so no need to optimize like a forward 154147883Sscottl * traversal where we use the MAX+1 is guaranteed to be NULL 155147883Sscottl * trick. 156147883Sscottl */ 157147883Sscottlstatic __inline struct mpt_personality * 158147883Sscottlmpt_pers_find_reverse(struct mpt_softc *mpt, u_int start_at) 159147883Sscottl{ 160147883Sscottl while (start_at < MPT_MAX_PERSONALITIES 161147883Sscottl && (mpt->mpt_pers_mask & (0x1 << start_at)) == 0) { 162147883Sscottl start_at--; 163147883Sscottl } 164147883Sscottl if (start_at < MPT_MAX_PERSONALITIES) 165147883Sscottl return (mpt_personalities[start_at]); 166147883Sscottl return (NULL); 167147883Sscottl} 168147883Sscottl 169147883Sscottl#define MPT_PERS_FOREACH(mpt, pers) \ 170147883Sscottl for (pers = mpt_pers_find(mpt, /*start_at*/0); \ 171147883Sscottl pers != NULL; \ 172147883Sscottl pers = mpt_pers_find(mpt, /*start_at*/pers->id+1)) 173147883Sscottl 174147883Sscottl#define MPT_PERS_FOREACH_REVERSE(mpt, pers) \ 175147883Sscottl for (pers = mpt_pers_find_reverse(mpt, MPT_MAX_PERSONALITIES-1);\ 176147883Sscottl pers != NULL; \ 177147883Sscottl pers = mpt_pers_find_reverse(mpt, /*start_at*/pers->id-1)) 178147883Sscottl 179147883Sscottlstatic mpt_load_handler_t mpt_stdload; 180147883Sscottlstatic mpt_probe_handler_t mpt_stdprobe; 181147883Sscottlstatic mpt_attach_handler_t mpt_stdattach; 182147883Sscottlstatic mpt_event_handler_t mpt_stdevent; 183147883Sscottlstatic mpt_reset_handler_t mpt_stdreset; 184147883Sscottlstatic mpt_shutdown_handler_t mpt_stdshutdown; 185147883Sscottlstatic mpt_detach_handler_t mpt_stddetach; 186147883Sscottlstatic mpt_unload_handler_t mpt_stdunload; 187147883Sscottlstatic struct mpt_personality mpt_default_personality = 188147883Sscottl{ 189147883Sscottl .load = mpt_stdload, 190147883Sscottl .probe = mpt_stdprobe, 191147883Sscottl .attach = mpt_stdattach, 192147883Sscottl .event = mpt_stdevent, 193147883Sscottl .reset = mpt_stdreset, 194147883Sscottl .shutdown = mpt_stdshutdown, 195147883Sscottl .detach = mpt_stddetach, 196147883Sscottl .unload = mpt_stdunload 197147883Sscottl}; 198147883Sscottl 199147883Sscottlstatic mpt_load_handler_t mpt_core_load; 200147883Sscottlstatic mpt_attach_handler_t mpt_core_attach; 201147883Sscottlstatic mpt_reset_handler_t mpt_core_ioc_reset; 202147883Sscottlstatic mpt_event_handler_t mpt_core_event; 203147883Sscottlstatic mpt_shutdown_handler_t mpt_core_shutdown; 204147883Sscottlstatic mpt_shutdown_handler_t mpt_core_detach; 205147883Sscottlstatic mpt_unload_handler_t mpt_core_unload; 206147883Sscottlstatic struct mpt_personality mpt_core_personality = 207147883Sscottl{ 208147883Sscottl .name = "mpt_core", 209147883Sscottl .load = mpt_core_load, 210147883Sscottl .attach = mpt_core_attach, 211147883Sscottl .event = mpt_core_event, 212147883Sscottl .reset = mpt_core_ioc_reset, 213147883Sscottl .shutdown = mpt_core_shutdown, 214147883Sscottl .detach = mpt_core_detach, 215147883Sscottl .unload = mpt_core_unload, 216147883Sscottl}; 217147883Sscottl 218147883Sscottl/* 219147883Sscottl * Manual declaration so that DECLARE_MPT_PERSONALITY doesn't need 220147883Sscottl * ordering information. We want the core to always register FIRST. 221147883Sscottl * other modules are set to SI_ORDER_SECOND. 222147883Sscottl */ 223147883Sscottlstatic moduledata_t mpt_core_mod = { 224147883Sscottl "mpt_core", mpt_modevent, &mpt_core_personality 225147883Sscottl}; 226147883SscottlDECLARE_MODULE(mpt_core, mpt_core_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); 227147883SscottlMODULE_VERSION(mpt_core, 1); 228147883Sscottl 229147883Sscottl#define MPT_PERS_ATACHED(pers, mpt) \ 230147883Sscottl ((mpt)->pers_mask & (0x1 << pers->id)) 231147883Sscottl 232147883Sscottl 233147883Sscottlint 234147883Sscottlmpt_modevent(module_t mod, int type, void *data) 235147883Sscottl{ 236147883Sscottl struct mpt_personality *pers; 237147883Sscottl int error; 238147883Sscottl 239147883Sscottl pers = (struct mpt_personality *)data; 240147883Sscottl 241147883Sscottl error = 0; 242147883Sscottl switch (type) { 243147883Sscottl case MOD_LOAD: 244147883Sscottl { 245147883Sscottl mpt_load_handler_t **def_handler; 246147883Sscottl mpt_load_handler_t **pers_handler; 247147883Sscottl int i; 248147883Sscottl 249147883Sscottl for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 250147883Sscottl if (mpt_personalities[i] == NULL) 251147883Sscottl break; 252147883Sscottl } 253147883Sscottl if (i >= MPT_MAX_PERSONALITIES) { 254147883Sscottl error = ENOMEM; 255147883Sscottl break; 256147883Sscottl } 257147883Sscottl pers->id = i; 258147883Sscottl mpt_personalities[i] = pers; 259147883Sscottl 260147883Sscottl /* Install standard/noop handlers for any NULL entries. */ 261147883Sscottl def_handler = MPT_PERS_FIRST_HANDLER(&mpt_default_personality); 262147883Sscottl pers_handler = MPT_PERS_FIRST_HANDLER(pers); 263147883Sscottl while (pers_handler <= MPT_PERS_LAST_HANDLER(pers)) { 264147883Sscottl if (*pers_handler == NULL) 265147883Sscottl *pers_handler = *def_handler; 266147883Sscottl pers_handler++; 267147883Sscottl def_handler++; 268147883Sscottl } 269147883Sscottl 270147883Sscottl error = (pers->load(pers)); 271147883Sscottl if (error != 0) 272147883Sscottl mpt_personalities[i] = NULL; 273147883Sscottl break; 274147883Sscottl } 275147883Sscottl case MOD_SHUTDOWN: 276147883Sscottl break; 277147883Sscottl case MOD_QUIESCE: 278147883Sscottl break; 279147883Sscottl case MOD_UNLOAD: 280147883Sscottl error = pers->unload(pers); 281147883Sscottl mpt_personalities[pers->id] = NULL; 282147883Sscottl break; 283147883Sscottl default: 284147883Sscottl error = EINVAL; 285147883Sscottl break; 286147883Sscottl } 287147883Sscottl return (error); 288147883Sscottl} 289147883Sscottl 290147883Sscottlint 291147883Sscottlmpt_stdload(struct mpt_personality *pers) 292147883Sscottl{ 293147883Sscottl /* Load is always successfull. */ 294147883Sscottl return (0); 295147883Sscottl} 296147883Sscottl 297147883Sscottlint 298147883Sscottlmpt_stdprobe(struct mpt_softc *mpt) 299147883Sscottl{ 300147883Sscottl /* Probe is always successfull. */ 301147883Sscottl return (0); 302147883Sscottl} 303147883Sscottl 304147883Sscottlint 305147883Sscottlmpt_stdattach(struct mpt_softc *mpt) 306147883Sscottl{ 307147883Sscottl /* Attach is always successfull. */ 308147883Sscottl return (0); 309147883Sscottl} 310147883Sscottl 311147883Sscottlint 312155521Smjacobmpt_stdevent(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) 313147883Sscottl{ 314155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_stdevent: 0x%x\n", msg->Event & 0xFF); 315147883Sscottl /* Event was not for us. */ 316147883Sscottl return (0); 317147883Sscottl} 318147883Sscottl 319147883Sscottlvoid 320147883Sscottlmpt_stdreset(struct mpt_softc *mpt, int type) 321147883Sscottl{ 322147883Sscottl} 323147883Sscottl 324147883Sscottlvoid 325147883Sscottlmpt_stdshutdown(struct mpt_softc *mpt) 326147883Sscottl{ 327147883Sscottl} 328147883Sscottl 329147883Sscottlvoid 330147883Sscottlmpt_stddetach(struct mpt_softc *mpt) 331147883Sscottl{ 332147883Sscottl} 333147883Sscottl 334147883Sscottlint 335147883Sscottlmpt_stdunload(struct mpt_personality *pers) 336147883Sscottl{ 337147883Sscottl /* Unload is always successfull. */ 338147883Sscottl return (0); 339147883Sscottl} 340147883Sscottl 341147883Sscottl/******************************* Bus DMA Support ******************************/ 342147883Sscottlvoid 343147883Sscottlmpt_map_rquest(void *arg, bus_dma_segment_t *segs, int nseg, int error) 344147883Sscottl{ 345147883Sscottl struct mpt_map_info *map_info; 346147883Sscottl 347147883Sscottl map_info = (struct mpt_map_info *)arg; 348147883Sscottl map_info->error = error; 349147883Sscottl map_info->phys = segs->ds_addr; 350147883Sscottl} 351147883Sscottl 352147883Sscottl/**************************** Reply/Event Handling ****************************/ 353147883Sscottlint 354147883Sscottlmpt_register_handler(struct mpt_softc *mpt, mpt_handler_type type, 355147883Sscottl mpt_handler_t handler, uint32_t *phandler_id) 356147883Sscottl{ 357147883Sscottl 358147883Sscottl switch (type) { 359147883Sscottl case MPT_HANDLER_REPLY: 360147883Sscottl { 361147883Sscottl u_int cbi; 362147883Sscottl u_int free_cbi; 363147883Sscottl 364147883Sscottl if (phandler_id == NULL) 365147883Sscottl return (EINVAL); 366147883Sscottl 367147883Sscottl free_cbi = MPT_HANDLER_ID_NONE; 368147883Sscottl for (cbi = 0; cbi < MPT_NUM_REPLY_HANDLERS; cbi++) { 369147883Sscottl /* 370147883Sscottl * If the same handler is registered multiple 371147883Sscottl * times, don't error out. Just return the 372147883Sscottl * index of the original registration. 373147883Sscottl */ 374147883Sscottl if (mpt_reply_handlers[cbi] == handler.reply_handler) { 375147883Sscottl *phandler_id = MPT_CBI_TO_HID(cbi); 376147883Sscottl return (0); 377147883Sscottl } 378147883Sscottl 379147883Sscottl /* 380147883Sscottl * Fill from the front in the hope that 381147883Sscottl * all registered handlers consume only a 382147883Sscottl * single cache line. 383147883Sscottl * 384147883Sscottl * We don't break on the first empty slot so 385147883Sscottl * that the full table is checked to see if 386147883Sscottl * this handler was previously registered. 387147883Sscottl */ 388147883Sscottl if (free_cbi == MPT_HANDLER_ID_NONE 389147883Sscottl && (mpt_reply_handlers[cbi] 390147883Sscottl == mpt_default_reply_handler)) 391147883Sscottl free_cbi = cbi; 392147883Sscottl } 393147883Sscottl if (free_cbi == MPT_HANDLER_ID_NONE) 394147883Sscottl return (ENOMEM); 395147883Sscottl mpt_reply_handlers[free_cbi] = handler.reply_handler; 396147883Sscottl *phandler_id = MPT_CBI_TO_HID(free_cbi); 397147883Sscottl break; 398147883Sscottl } 399147883Sscottl default: 400147883Sscottl mpt_prt(mpt, "mpt_register_handler unknown type %d\n", type); 401147883Sscottl return (EINVAL); 402147883Sscottl } 403147883Sscottl return (0); 404147883Sscottl} 405147883Sscottl 406147883Sscottlint 407147883Sscottlmpt_deregister_handler(struct mpt_softc *mpt, mpt_handler_type type, 408147883Sscottl mpt_handler_t handler, uint32_t handler_id) 409147883Sscottl{ 410147883Sscottl 411147883Sscottl switch (type) { 412147883Sscottl case MPT_HANDLER_REPLY: 413147883Sscottl { 414147883Sscottl u_int cbi; 415147883Sscottl 416147883Sscottl cbi = MPT_CBI(handler_id); 417147883Sscottl if (cbi >= MPT_NUM_REPLY_HANDLERS 418147883Sscottl || mpt_reply_handlers[cbi] != handler.reply_handler) 419147883Sscottl return (ENOENT); 420147883Sscottl mpt_reply_handlers[cbi] = mpt_default_reply_handler; 421147883Sscottl break; 422147883Sscottl } 423147883Sscottl default: 424147883Sscottl mpt_prt(mpt, "mpt_deregister_handler unknown type %d\n", type); 425147883Sscottl return (EINVAL); 426147883Sscottl } 427147883Sscottl return (0); 428147883Sscottl} 429147883Sscottl 430147883Sscottlstatic int 431147883Sscottlmpt_default_reply_handler(struct mpt_softc *mpt, request_t *req, 432147883Sscottl MSG_DEFAULT_REPLY *reply_frame) 433147883Sscottl{ 434147883Sscottl mpt_prt(mpt, "XXXX Default Handler Called. Req %p, Frame %p\n", 435147883Sscottl req, reply_frame); 436147883Sscottl 437147883Sscottl if (reply_frame != NULL) 438147883Sscottl mpt_dump_reply_frame(mpt, reply_frame); 439147883Sscottl 440147883Sscottl mpt_prt(mpt, "XXXX Reply Frame Ignored\n"); 441147883Sscottl 442147883Sscottl return (/*free_reply*/TRUE); 443147883Sscottl} 444147883Sscottl 445147883Sscottlstatic int 446147883Sscottlmpt_config_reply_handler(struct mpt_softc *mpt, request_t *req, 447147883Sscottl MSG_DEFAULT_REPLY *reply_frame) 448147883Sscottl{ 449147883Sscottl if (req != NULL) { 450147883Sscottl 451147883Sscottl if (reply_frame != NULL) { 452147883Sscottl MSG_CONFIG *cfgp; 453147883Sscottl MSG_CONFIG_REPLY *reply; 454147883Sscottl 455147883Sscottl cfgp = (MSG_CONFIG *)req->req_vbuf; 456147883Sscottl reply = (MSG_CONFIG_REPLY *)reply_frame; 457147883Sscottl req->IOCStatus = le16toh(reply_frame->IOCStatus); 458147883Sscottl bcopy(&reply->Header, &cfgp->Header, 459147883Sscottl sizeof(cfgp->Header)); 460147883Sscottl } 461147883Sscottl req->state &= ~REQ_STATE_QUEUED; 462147883Sscottl req->state |= REQ_STATE_DONE; 463147883Sscottl TAILQ_REMOVE(&mpt->request_pending_list, req, links); 464147883Sscottl 465147883Sscottl if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) 466147883Sscottl wakeup(req); 467147883Sscottl } 468147883Sscottl 469147883Sscottl return (/*free_reply*/TRUE); 470147883Sscottl} 471147883Sscottl 472147883Sscottlstatic int 473147883Sscottlmpt_handshake_reply_handler(struct mpt_softc *mpt, request_t *req, 474147883Sscottl MSG_DEFAULT_REPLY *reply_frame) 475147883Sscottl{ 476147883Sscottl /* Nothing to be done. */ 477147883Sscottl return (/*free_reply*/TRUE); 478147883Sscottl} 479147883Sscottl 480147883Sscottlstatic int 481147883Sscottlmpt_event_reply_handler(struct mpt_softc *mpt, request_t *req, 482147883Sscottl MSG_DEFAULT_REPLY *reply_frame) 483147883Sscottl{ 484147883Sscottl int free_reply; 485147883Sscottl 486147883Sscottl if (reply_frame == NULL) { 487147883Sscottl mpt_prt(mpt, "Event Handler: req %p - Unexpected NULL reply\n"); 488147883Sscottl return (/*free_reply*/TRUE); 489147883Sscottl } 490147883Sscottl 491147883Sscottl free_reply = TRUE; 492147883Sscottl switch (reply_frame->Function) { 493147883Sscottl case MPI_FUNCTION_EVENT_NOTIFICATION: 494147883Sscottl { 495147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg; 496147883Sscottl struct mpt_personality *pers; 497147883Sscottl u_int handled; 498147883Sscottl 499147883Sscottl handled = 0; 500147883Sscottl msg = (MSG_EVENT_NOTIFY_REPLY *)reply_frame; 501147883Sscottl MPT_PERS_FOREACH(mpt, pers) 502147883Sscottl handled += pers->event(mpt, req, msg); 503147883Sscottl 504155521Smjacob if (handled == 0 && mpt->mpt_pers_mask == 0) { 505155521Smjacob mpt_lprt(mpt, MPT_PRT_WARN, 506155521Smjacob "No Handlers For Any Event Notify Frames. " 507155521Smjacob "Event %#x (ACK %sequired).\n", 508155521Smjacob msg->Event, msg->AckRequired? "r" : "not r"); 509155521Smjacob } else if (handled == 0) { 510147883Sscottl mpt_prt(mpt, 511155521Smjacob "Unhandled Event Notify Frame. Event %#x " 512155521Smjacob "(ACK %sequired).\n", 513155521Smjacob msg->Event, msg->AckRequired? "r" : "not r"); 514155521Smjacob } 515147883Sscottl 516147883Sscottl if (msg->AckRequired) { 517147883Sscottl request_t *ack_req; 518147883Sscottl uint32_t context; 519147883Sscottl 520147883Sscottl context = htole32(req->index|MPT_REPLY_HANDLER_EVENTS); 521147883Sscottl ack_req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 522147883Sscottl if (ack_req == NULL) { 523147883Sscottl struct mpt_evtf_record *evtf; 524147883Sscottl 525147883Sscottl evtf = (struct mpt_evtf_record *)reply_frame; 526147883Sscottl evtf->context = context; 527147883Sscottl LIST_INSERT_HEAD(&mpt->ack_frames, evtf, links); 528147883Sscottl free_reply = FALSE; 529147883Sscottl break; 530147883Sscottl } 531147883Sscottl mpt_send_event_ack(mpt, ack_req, msg, context); 532147883Sscottl } 533147883Sscottl break; 534147883Sscottl } 535147883Sscottl case MPI_FUNCTION_PORT_ENABLE: 536147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "enable port reply\n"); 537147883Sscottl break; 538147883Sscottl case MPI_FUNCTION_EVENT_ACK: 539147883Sscottl break; 540147883Sscottl default: 541147883Sscottl mpt_prt(mpt, "Unknown Event Function: %x\n", 542147883Sscottl reply_frame->Function); 543147883Sscottl break; 544147883Sscottl } 545147883Sscottl 546147883Sscottl if (req != NULL 547147883Sscottl && (reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0) { 548147883Sscottl 549147883Sscottl req->state &= ~REQ_STATE_QUEUED; 550147883Sscottl req->state |= REQ_STATE_DONE; 551147883Sscottl TAILQ_REMOVE(&mpt->request_pending_list, req, links); 552147883Sscottl 553147883Sscottl if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) 554147883Sscottl wakeup(req); 555147883Sscottl else 556147883Sscottl mpt_free_request(mpt, req); 557147883Sscottl } 558147883Sscottl return (free_reply); 559147883Sscottl} 560147883Sscottl 561147883Sscottl/* 562147883Sscottl * Process an asynchronous event from the IOC. 563147883Sscottl */ 564147883Sscottlstatic int 565147883Sscottlmpt_core_event(struct mpt_softc *mpt, request_t *req, 566147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg) 567147883Sscottl{ 568155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_core_event: 0x%x\n", 569155521Smjacob msg->Event & 0xFF); 570147883Sscottl switch(msg->Event & 0xFF) { 571147883Sscottl case MPI_EVENT_NONE: 572147883Sscottl break; 573147883Sscottl case MPI_EVENT_LOG_DATA: 574147883Sscottl { 575147883Sscottl int i; 576147883Sscottl 577147883Sscottl /* Some error occured that LSI wants logged */ 578147883Sscottl mpt_prt(mpt, "EvtLogData: IOCLogInfo: 0x%08x\n", 579147883Sscottl msg->IOCLogInfo); 580147883Sscottl mpt_prt(mpt, "\tEvtLogData: Event Data:"); 581147883Sscottl for (i = 0; i < msg->EventDataLength; i++) 582147883Sscottl mpt_prtc(mpt, " %08x", msg->Data[i]); 583147883Sscottl mpt_prtc(mpt, "\n"); 584147883Sscottl break; 585147883Sscottl } 586147883Sscottl case MPI_EVENT_EVENT_CHANGE: 587147883Sscottl /* 588147883Sscottl * This is just an acknowledgement 589147883Sscottl * of our mpt_send_event_request. 590147883Sscottl */ 591147883Sscottl break; 592155521Smjacob case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: 593155521Smjacob break; 594147883Sscottl default: 595147883Sscottl return (/*handled*/0); 596147883Sscottl break; 597147883Sscottl } 598147883Sscottl return (/*handled*/1); 599147883Sscottl} 600147883Sscottl 601147883Sscottlstatic void 602147883Sscottlmpt_send_event_ack(struct mpt_softc *mpt, request_t *ack_req, 603147883Sscottl MSG_EVENT_NOTIFY_REPLY *msg, uint32_t context) 604147883Sscottl{ 605147883Sscottl MSG_EVENT_ACK *ackp; 606147883Sscottl 607147883Sscottl ackp = (MSG_EVENT_ACK *)ack_req->req_vbuf; 608147883Sscottl bzero(ackp, sizeof *ackp); 609147883Sscottl ackp->Function = MPI_FUNCTION_EVENT_ACK; 610147883Sscottl ackp->Event = msg->Event; 611147883Sscottl ackp->EventContext = msg->EventContext; 612147883Sscottl ackp->MsgContext = context; 613147883Sscottl mpt_check_doorbell(mpt); 614147883Sscottl mpt_send_cmd(mpt, ack_req); 615147883Sscottl} 616147883Sscottl 617147883Sscottl/***************************** Interrupt Handling *****************************/ 618147883Sscottlvoid 619147883Sscottlmpt_intr(void *arg) 620147883Sscottl{ 621147883Sscottl struct mpt_softc *mpt; 622147883Sscottl uint32_t reply_desc; 623147883Sscottl 624147883Sscottl mpt = (struct mpt_softc *)arg; 625147883Sscottl while ((reply_desc = mpt_pop_reply_queue(mpt)) != MPT_REPLY_EMPTY) { 626147883Sscottl request_t *req; 627147883Sscottl MSG_DEFAULT_REPLY *reply_frame; 628147883Sscottl uint32_t reply_baddr; 629147883Sscottl u_int cb_index; 630147883Sscottl u_int req_index; 631147883Sscottl int free_rf; 632147883Sscottl 633147883Sscottl req = NULL; 634147883Sscottl reply_frame = NULL; 635147883Sscottl reply_baddr = 0; 636147883Sscottl if ((reply_desc & MPI_ADDRESS_REPLY_A_BIT) != 0) { 637147883Sscottl u_int offset; 638147883Sscottl 639147883Sscottl /* 640147883Sscottl * Insure that the reply frame is coherent. 641147883Sscottl */ 642147883Sscottl reply_baddr = (reply_desc << 1); 643147883Sscottl offset = reply_baddr - (mpt->reply_phys & 0xFFFFFFFF); 644147883Sscottl bus_dmamap_sync_range(mpt->reply_dmat, mpt->reply_dmap, 645147883Sscottl offset, MPT_REPLY_SIZE, 646147883Sscottl BUS_DMASYNC_POSTREAD); 647147883Sscottl reply_frame = MPT_REPLY_OTOV(mpt, offset); 648147883Sscottl reply_desc = le32toh(reply_frame->MsgContext); 649147883Sscottl } 650147883Sscottl cb_index = MPT_CONTEXT_TO_CBI(reply_desc); 651147883Sscottl req_index = MPT_CONTEXT_TO_REQI(reply_desc); 652147883Sscottl if (req_index < MPT_MAX_REQUESTS(mpt)) 653147883Sscottl req = &mpt->request_pool[req_index]; 654147883Sscottl 655147883Sscottl free_rf = mpt_reply_handlers[cb_index](mpt, req, reply_frame); 656147883Sscottl 657147883Sscottl if (reply_frame != NULL && free_rf) 658147883Sscottl mpt_free_reply(mpt, reply_baddr); 659147883Sscottl } 660147883Sscottl} 661147883Sscottl 662147883Sscottl/******************************* Error Recovery *******************************/ 663147883Sscottlvoid 664147883Sscottlmpt_complete_request_chain(struct mpt_softc *mpt, struct req_queue *chain, 665147883Sscottl u_int iocstatus) 666147883Sscottl{ 667147883Sscottl MSG_DEFAULT_REPLY ioc_status_frame; 668147883Sscottl request_t *req; 669147883Sscottl 670147883Sscottl bzero(&ioc_status_frame, sizeof(ioc_status_frame)); 671147883Sscottl ioc_status_frame.MsgLength = roundup2(sizeof(ioc_status_frame), 4); 672156104Smjacob ioc_status_frame.IOCStatus = iocstatus; 673147883Sscottl while((req = TAILQ_FIRST(chain)) != NULL) { 674147883Sscottl MSG_REQUEST_HEADER *msg_hdr; 675147883Sscottl u_int cb_index; 676147883Sscottl 677147883Sscottl msg_hdr = (MSG_REQUEST_HEADER *)req->req_vbuf; 678156104Smjacob ioc_status_frame.Function = msg_hdr->Function; 679156104Smjacob ioc_status_frame.MsgContext = msg_hdr->MsgContext; 680147883Sscottl cb_index = MPT_CONTEXT_TO_CBI(le32toh(msg_hdr->MsgContext)); 681147883Sscottl mpt_reply_handlers[cb_index](mpt, req, &ioc_status_frame); 682147883Sscottl } 683147883Sscottl} 684147883Sscottl 685147883Sscottl/********************************* Diagnostics ********************************/ 686147883Sscottl/* 687147883Sscottl * Perform a diagnostic dump of a reply frame. 688147883Sscottl */ 689147883Sscottlvoid 690147883Sscottlmpt_dump_reply_frame(struct mpt_softc *mpt, MSG_DEFAULT_REPLY *reply_frame) 691147883Sscottl{ 692147883Sscottl 693147883Sscottl mpt_prt(mpt, "Address Reply:\n"); 694147883Sscottl mpt_print_reply(reply_frame); 695147883Sscottl} 696147883Sscottl 697147883Sscottl/******************************* Doorbell Access ******************************/ 698147883Sscottlstatic __inline uint32_t mpt_rd_db(struct mpt_softc *mpt); 699147883Sscottlstatic __inline uint32_t mpt_rd_intr(struct mpt_softc *mpt); 700147883Sscottl 701147883Sscottlstatic __inline uint32_t 702147883Sscottlmpt_rd_db(struct mpt_softc *mpt) 703147883Sscottl{ 704101704Smjacob return mpt_read(mpt, MPT_OFFSET_DOORBELL); 705101704Smjacob} 706101704Smjacob 707147883Sscottlstatic __inline uint32_t 708147883Sscottlmpt_rd_intr(struct mpt_softc *mpt) 709101704Smjacob{ 710101704Smjacob return mpt_read(mpt, MPT_OFFSET_INTR_STATUS); 711101704Smjacob} 712101704Smjacob 713101704Smjacob/* Busy wait for a door bell to be read by IOC */ 714101704Smjacobstatic int 715147883Sscottlmpt_wait_db_ack(struct mpt_softc *mpt) 716101704Smjacob{ 717101704Smjacob int i; 718101704Smjacob for (i=0; i < MPT_MAX_WAIT; i++) { 719101704Smjacob if (!MPT_DB_IS_BUSY(mpt_rd_intr(mpt))) { 720101704Smjacob maxwait_ack = i > maxwait_ack ? i : maxwait_ack; 721101704Smjacob return MPT_OK; 722101704Smjacob } 723101704Smjacob 724147883Sscottl DELAY(1000); 725101704Smjacob } 726101704Smjacob return MPT_FAIL; 727101704Smjacob} 728101704Smjacob 729101704Smjacob/* Busy wait for a door bell interrupt */ 730101704Smjacobstatic int 731147883Sscottlmpt_wait_db_int(struct mpt_softc *mpt) 732101704Smjacob{ 733101704Smjacob int i; 734101704Smjacob for (i=0; i < MPT_MAX_WAIT; i++) { 735101704Smjacob if (MPT_DB_INTR(mpt_rd_intr(mpt))) { 736101704Smjacob maxwait_int = i > maxwait_int ? i : maxwait_int; 737101704Smjacob return MPT_OK; 738101704Smjacob } 739101704Smjacob DELAY(100); 740101704Smjacob } 741101704Smjacob return MPT_FAIL; 742101704Smjacob} 743101704Smjacob 744101704Smjacob/* Wait for IOC to transition to a give state */ 745101704Smjacobvoid 746147883Sscottlmpt_check_doorbell(struct mpt_softc *mpt) 747101704Smjacob{ 748147883Sscottl uint32_t db = mpt_rd_db(mpt); 749101704Smjacob if (MPT_STATE(db) != MPT_DB_STATE_RUNNING) { 750147883Sscottl mpt_prt(mpt, "Device not running\n"); 751101704Smjacob mpt_print_db(db); 752101704Smjacob } 753101704Smjacob} 754101704Smjacob 755101704Smjacob/* Wait for IOC to transition to a give state */ 756101704Smjacobstatic int 757147883Sscottlmpt_wait_state(struct mpt_softc *mpt, enum DB_STATE_BITS state) 758101704Smjacob{ 759101704Smjacob int i; 760101704Smjacob 761101704Smjacob for (i = 0; i < MPT_MAX_WAIT; i++) { 762147883Sscottl uint32_t db = mpt_rd_db(mpt); 763101704Smjacob if (MPT_STATE(db) == state) { 764101704Smjacob maxwait_state = i > maxwait_state ? i : maxwait_state; 765101704Smjacob return (MPT_OK); 766101704Smjacob } 767101704Smjacob DELAY(100); 768101704Smjacob } 769101704Smjacob return (MPT_FAIL); 770101704Smjacob} 771101704Smjacob 772101704Smjacob 773147883Sscottl/************************* Intialization/Configuration ************************/ 774147883Sscottlstatic int mpt_download_fw(struct mpt_softc *mpt); 775147883Sscottl 776101704Smjacob/* Issue the reset COMMAND to the IOC */ 777147883Sscottlstatic int 778147883Sscottlmpt_soft_reset(struct mpt_softc *mpt) 779101704Smjacob{ 780147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "soft reset\n"); 781101704Smjacob 782101704Smjacob /* Have to use hard reset if we are not in Running state */ 783101704Smjacob if (MPT_STATE(mpt_rd_db(mpt)) != MPT_DB_STATE_RUNNING) { 784147883Sscottl mpt_prt(mpt, "soft reset failed: device not running\n"); 785101704Smjacob return MPT_FAIL; 786101704Smjacob } 787101704Smjacob 788101704Smjacob /* If door bell is in use we don't have a chance of getting 789101704Smjacob * a word in since the IOC probably crashed in message 790101704Smjacob * processing. So don't waste our time. 791101704Smjacob */ 792101704Smjacob if (MPT_DB_IS_IN_USE(mpt_rd_db(mpt))) { 793147883Sscottl mpt_prt(mpt, "soft reset failed: doorbell wedged\n"); 794101704Smjacob return MPT_FAIL; 795101704Smjacob } 796101704Smjacob 797101704Smjacob /* Send the reset request to the IOC */ 798101704Smjacob mpt_write(mpt, MPT_OFFSET_DOORBELL, 799101704Smjacob MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI_DOORBELL_FUNCTION_SHIFT); 800101704Smjacob if (mpt_wait_db_ack(mpt) != MPT_OK) { 801147883Sscottl mpt_prt(mpt, "soft reset failed: ack timeout\n"); 802101704Smjacob return MPT_FAIL; 803101704Smjacob } 804101704Smjacob 805101704Smjacob /* Wait for the IOC to reload and come out of reset state */ 806101704Smjacob if (mpt_wait_state(mpt, MPT_DB_STATE_READY) != MPT_OK) { 807147883Sscottl mpt_prt(mpt, "soft reset failed: device did not restart\n"); 808101704Smjacob return MPT_FAIL; 809101704Smjacob } 810101704Smjacob 811101704Smjacob return MPT_OK; 812101704Smjacob} 813101704Smjacob 814147883Sscottlstatic int 815147883Sscottlmpt_enable_diag_mode(struct mpt_softc *mpt) 816147883Sscottl{ 817147883Sscottl int try; 818147883Sscottl 819147883Sscottl try = 20; 820147883Sscottl while (--try) { 821147883Sscottl 822147883Sscottl if ((mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC) & MPI_DIAG_DRWE) != 0) 823147883Sscottl break; 824147883Sscottl 825147883Sscottl /* Enable diagnostic registers */ 826147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFF); 827147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_1ST_KEY_VALUE); 828147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_2ND_KEY_VALUE); 829147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_3RD_KEY_VALUE); 830147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_4TH_KEY_VALUE); 831147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, MPI_WRSEQ_5TH_KEY_VALUE); 832147883Sscottl 833147883Sscottl DELAY(100000); 834147883Sscottl } 835147883Sscottl if (try == 0) 836147883Sscottl return (EIO); 837147883Sscottl return (0); 838147883Sscottl} 839147883Sscottl 840147883Sscottlstatic void 841147883Sscottlmpt_disable_diag_mode(struct mpt_softc *mpt) 842147883Sscottl{ 843147883Sscottl mpt_write(mpt, MPT_OFFSET_SEQUENCE, 0xFFFFFFFF); 844147883Sscottl} 845147883Sscottl 846101704Smjacob/* This is a magic diagnostic reset that resets all the ARM 847156104Smjacob * processors in the chip. 848101704Smjacob */ 849147883Sscottlstatic void 850147883Sscottlmpt_hard_reset(struct mpt_softc *mpt) 851101704Smjacob{ 852147883Sscottl int error; 853147883Sscottl int wait; 854147883Sscottl uint32_t diagreg; 855147883Sscottl 856147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "hard reset\n"); 857147883Sscottl 858147883Sscottl error = mpt_enable_diag_mode(mpt); 859147883Sscottl if (error) { 860147883Sscottl mpt_prt(mpt, "WARNING - Could not enter diagnostic mode !\n"); 861147883Sscottl mpt_prt(mpt, "Trying to reset anyway.\n"); 862101704Smjacob } 863101704Smjacob 864147883Sscottl diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 865101704Smjacob 866147883Sscottl /* 867147883Sscottl * This appears to be a workaround required for some 868147883Sscottl * firmware or hardware revs. 869147883Sscottl */ 870147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_DISABLE_ARM); 871147883Sscottl DELAY(1000); 872147883Sscottl 873101704Smjacob /* Diag. port is now active so we can now hit the reset bit */ 874147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, diagreg | MPI_DIAG_RESET_ADAPTER); 875101704Smjacob 876147883Sscottl /* 877147883Sscottl * Ensure that the reset has finished. We delay 1ms 878147883Sscottl * prior to reading the register to make sure the chip 879147883Sscottl * has sufficiently completed its reset to handle register 880147883Sscottl * accesses. 881147883Sscottl */ 882147883Sscottl wait = 5000; 883147883Sscottl do { 884147883Sscottl DELAY(1000); 885147883Sscottl diagreg = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 886147883Sscottl } while (--wait && (diagreg & MPI_DIAG_RESET_ADAPTER) == 0); 887101704Smjacob 888147883Sscottl if (wait == 0) { 889147883Sscottl mpt_prt(mpt, "WARNING - Failed hard reset! " 890147883Sscottl "Trying to initialize anyway.\n"); 891147883Sscottl } 892101704Smjacob 893147883Sscottl /* 894147883Sscottl * If we have firmware to download, it must be loaded before 895147883Sscottl * the controller will become operational. Do so now. 896147883Sscottl */ 897147883Sscottl if (mpt->fw_image != NULL) { 898147883Sscottl 899147883Sscottl error = mpt_download_fw(mpt); 900147883Sscottl 901147883Sscottl if (error) { 902147883Sscottl mpt_prt(mpt, "WARNING - Firmware Download Failed!\n"); 903147883Sscottl mpt_prt(mpt, "Trying to initialize anyway.\n"); 904147883Sscottl } 905101704Smjacob } 906101704Smjacob 907147883Sscottl /* 908147883Sscottl * Reseting the controller should have disabled write 909147883Sscottl * access to the diagnostic registers, but disable 910147883Sscottl * manually to be sure. 911147883Sscottl */ 912147883Sscottl mpt_disable_diag_mode(mpt); 913101704Smjacob} 914101704Smjacob 915147883Sscottlstatic void 916147883Sscottlmpt_core_ioc_reset(struct mpt_softc *mpt, int type) 917147883Sscottl{ 918147883Sscottl /* 919147883Sscottl * Complete all pending requests with a status 920147883Sscottl * appropriate for an IOC reset. 921147883Sscottl */ 922147883Sscottl mpt_complete_request_chain(mpt, &mpt->request_pending_list, 923147883Sscottl MPI_IOCSTATUS_INVALID_STATE); 924147883Sscottl} 925147883Sscottl 926147883Sscottl 927101704Smjacob/* 928101704Smjacob * Reset the IOC when needed. Try software command first then if needed 929101704Smjacob * poke at the magic diagnostic reset. Note that a hard reset resets 930101704Smjacob * *both* IOCs on dual function chips (FC929 && LSI1030) as well as 931101704Smjacob * fouls up the PCI configuration registers. 932101704Smjacob */ 933101704Smjacobint 934147883Sscottlmpt_reset(struct mpt_softc *mpt, int reinit) 935101704Smjacob{ 936147883Sscottl struct mpt_personality *pers; 937147883Sscottl int ret; 938156104Smjacob int retry_cnt = 0; 939101704Smjacob 940156104Smjacob /* 941156104Smjacob * Try a soft reset. If that fails, get out the big hammer. 942156104Smjacob */ 943156104Smjacob again: 944101704Smjacob if ((ret = mpt_soft_reset(mpt)) != MPT_OK) { 945156104Smjacob int cnt; 946156104Smjacob for (cnt = 0; cnt < 5; cnt++) { 947156104Smjacob /* Failed; do a hard reset */ 948156104Smjacob mpt_hard_reset(mpt); 949101704Smjacob 950156104Smjacob /* 951156104Smjacob * Wait for the IOC to reload 952156104Smjacob * and come out of reset state 953156104Smjacob */ 954156104Smjacob ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 955156104Smjacob if (ret == MPT_OK) { 956156104Smjacob break; 957156104Smjacob } 958156104Smjacob /* 959156104Smjacob * Okay- try to check again... 960156104Smjacob */ 961156104Smjacob ret = mpt_wait_state(mpt, MPT_DB_STATE_READY); 962156104Smjacob if (ret == MPT_OK) { 963156104Smjacob break; 964156104Smjacob } 965156104Smjacob mpt_prt(mpt, "mpt_reset: failed hard reset (%d:%d)\n", 966156104Smjacob retry_cnt, cnt); 967156104Smjacob } 968101704Smjacob } 969101704Smjacob 970156104Smjacob if (retry_cnt == 0) { 971156104Smjacob /* 972156104Smjacob * Invoke reset handlers. We bump the reset count so 973156104Smjacob * that mpt_wait_req() understands that regardless of 974156104Smjacob * the specified wait condition, it should stop its wait. 975156104Smjacob */ 976156104Smjacob mpt->reset_cnt++; 977156104Smjacob MPT_PERS_FOREACH(mpt, pers) 978156104Smjacob pers->reset(mpt, ret); 979156104Smjacob } 980147883Sscottl 981156104Smjacob if (reinit != 0) { 982156104Smjacob ret = mpt_enable_ioc(mpt); 983156104Smjacob if (ret == MPT_OK) { 984156104Smjacob mpt_enable_ints(mpt); 985156104Smjacob } 986156104Smjacob } 987156104Smjacob if (ret != MPT_OK && retry_cnt++ < 2) { 988156104Smjacob goto again; 989156104Smjacob } 990101704Smjacob return ret; 991101704Smjacob} 992101704Smjacob 993101704Smjacob/* Return a command buffer to the free queue */ 994101704Smjacobvoid 995147883Sscottlmpt_free_request(struct mpt_softc *mpt, request_t *req) 996101704Smjacob{ 997155521Smjacob request_t *nxt; 998147883Sscottl struct mpt_evtf_record *record; 999147883Sscottl uint32_t reply_baddr; 1000147883Sscottl 1001103871Smjacob if (req == NULL || req != &mpt->request_pool[req->index]) { 1002101704Smjacob panic("mpt_free_request bad req ptr\n"); 1003101704Smjacob return; 1004101704Smjacob } 1005155521Smjacob if ((nxt = req->chain) != NULL) { 1006155521Smjacob req->chain = NULL; 1007155521Smjacob mpt_free_request(mpt, nxt); /* NB: recursion */ 1008155521Smjacob } 1009101704Smjacob req->ccb = NULL; 1010147883Sscottl req->state = REQ_STATE_FREE; 1011147883Sscottl if (LIST_EMPTY(&mpt->ack_frames)) { 1012147883Sscottl TAILQ_INSERT_HEAD(&mpt->request_free_list, req, links); 1013147883Sscottl if (mpt->getreqwaiter != 0) { 1014147883Sscottl mpt->getreqwaiter = 0; 1015147883Sscottl wakeup(&mpt->request_free_list); 1016147883Sscottl } 1017147883Sscottl return; 1018147883Sscottl } 1019147883Sscottl 1020147883Sscottl /* 1021147883Sscottl * Process an ack frame deferred due to resource shortage. 1022147883Sscottl */ 1023147883Sscottl record = LIST_FIRST(&mpt->ack_frames); 1024147883Sscottl LIST_REMOVE(record, links); 1025147883Sscottl mpt_send_event_ack(mpt, req, &record->reply, record->context); 1026147883Sscottl reply_baddr = (uint32_t)((uint8_t *)record - mpt->reply) 1027147883Sscottl + (mpt->reply_phys & 0xFFFFFFFF); 1028147883Sscottl mpt_free_reply(mpt, reply_baddr); 1029101704Smjacob} 1030101704Smjacob 1031101704Smjacob/* Get a command buffer from the free queue */ 1032101704Smjacobrequest_t * 1033147883Sscottlmpt_get_request(struct mpt_softc *mpt, int sleep_ok) 1034101704Smjacob{ 1035101704Smjacob request_t *req; 1036147883Sscottl 1037147883Sscottlretry: 1038147883Sscottl req = TAILQ_FIRST(&mpt->request_free_list); 1039101704Smjacob if (req != NULL) { 1040147883Sscottl KASSERT(req == &mpt->request_pool[req->index], 1041147883Sscottl ("mpt_get_request: corrupted request free list\n")); 1042147883Sscottl TAILQ_REMOVE(&mpt->request_free_list, req, links); 1043147883Sscottl req->state = REQ_STATE_ALLOCATED; 1044155521Smjacob req->chain = NULL; 1045147883Sscottl } else if (sleep_ok != 0) { 1046147883Sscottl mpt->getreqwaiter = 1; 1047147883Sscottl mpt_sleep(mpt, &mpt->request_free_list, PUSER, "mptgreq", 0); 1048147883Sscottl goto retry; 1049101704Smjacob } 1050101704Smjacob return req; 1051101704Smjacob} 1052101704Smjacob 1053101704Smjacob/* Pass the command to the IOC */ 1054101704Smjacobvoid 1055147883Sscottlmpt_send_cmd(struct mpt_softc *mpt, request_t *req) 1056101704Smjacob{ 1057147883Sscottl uint32_t *pReq; 1058147883Sscottl 1059147883Sscottl pReq = req->req_vbuf; 1060155521Smjacob if (mpt->verbose > MPT_PRT_TRACE) { 1061155521Smjacob int offset; 1062155521Smjacob mpt_prt(mpt, "Send Request %d (0x%x):", 1063155521Smjacob req->index, req->req_pbuf); 1064155521Smjacob for (offset = 0; offset < mpt->request_frame_size; offset++) { 1065155521Smjacob if ((offset & 0x7) == 0) { 1066155521Smjacob mpt_prtc(mpt, "\n"); 1067155521Smjacob mpt_prt(mpt, " "); 1068155521Smjacob } 1069155521Smjacob mpt_prtc(mpt, " %08x", pReq[offset]); 1070155521Smjacob } 1071155521Smjacob mpt_prtc(mpt, "\n"); 1072155521Smjacob } 1073101704Smjacob bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1074103914Smjacob BUS_DMASYNC_PREWRITE); 1075147883Sscottl req->state |= REQ_STATE_QUEUED; 1076147883Sscottl TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); 1077147883Sscottl mpt_write(mpt, MPT_OFFSET_REQUEST_Q, (uint32_t) req->req_pbuf); 1078101704Smjacob} 1079101704Smjacob 1080101704Smjacob/* 1081147883Sscottl * Wait for a request to complete. 1082147883Sscottl * 1083147883Sscottl * Inputs: 1084147883Sscottl * mpt softc of controller executing request 1085147883Sscottl * req request to wait for 1086147883Sscottl * sleep_ok nonzero implies may sleep in this context 1087147883Sscottl * time_ms timeout in ms. 0 implies no timeout. 1088147883Sscottl * 1089147883Sscottl * Return Values: 1090147883Sscottl * 0 Request completed 1091147883Sscottl * non-0 Timeout fired before request completion. 1092101704Smjacob */ 1093147883Sscottlint 1094147883Sscottlmpt_wait_req(struct mpt_softc *mpt, request_t *req, 1095147883Sscottl mpt_req_state_t state, mpt_req_state_t mask, 1096147883Sscottl int sleep_ok, int time_ms) 1097101704Smjacob{ 1098147883Sscottl int error; 1099147883Sscottl int timeout; 1100147883Sscottl u_int saved_cnt; 1101101704Smjacob 1102147883Sscottl /* 1103147883Sscottl * timeout is in ms. 0 indicates infinite wait. 1104147883Sscottl * Convert to ticks or 500us units depending on 1105147883Sscottl * our sleep mode. 1106147883Sscottl */ 1107147883Sscottl if (sleep_ok != 0) 1108147883Sscottl timeout = (time_ms * hz) / 1000; 1109147883Sscottl else 1110147883Sscottl timeout = time_ms * 2; 1111147883Sscottl req->state |= REQ_STATE_NEED_WAKEUP; 1112147883Sscottl mask &= ~REQ_STATE_NEED_WAKEUP; 1113155521Smjacob saved_cnt = mpt->reset_cnt; 1114147883Sscottl while ((req->state & mask) != state 1115155521Smjacob && mpt->reset_cnt == saved_cnt) { 1116147883Sscottl 1117147883Sscottl if (sleep_ok != 0) { 1118147883Sscottl error = mpt_sleep(mpt, req, PUSER, "mptreq", timeout); 1119147883Sscottl if (error == EWOULDBLOCK) { 1120147883Sscottl timeout = 0; 1121147883Sscottl break; 1122147883Sscottl } 1123147883Sscottl } else { 1124147883Sscottl if (time_ms != 0 && --timeout == 0) { 1125147883Sscottl mpt_prt(mpt, "mpt_wait_req timed out\n"); 1126147883Sscottl break; 1127147883Sscottl } 1128147883Sscottl DELAY(500); 1129147883Sscottl mpt_intr(mpt); 1130147883Sscottl } 1131147883Sscottl } 1132147883Sscottl req->state &= ~REQ_STATE_NEED_WAKEUP; 1133147883Sscottl if (mpt->reset_cnt != saved_cnt) 1134147883Sscottl return (EIO); 1135155521Smjacob if (time_ms && timeout <= 0) 1136147883Sscottl return (ETIMEDOUT); 1137147883Sscottl return (0); 1138101704Smjacob} 1139101704Smjacob 1140101704Smjacob/* 1141101704Smjacob * Send a command to the IOC via the handshake register. 1142101704Smjacob * 1143101704Smjacob * Only done at initialization time and for certain unusual 1144101704Smjacob * commands such as device/bus reset as specified by LSI. 1145101704Smjacob */ 1146101704Smjacobint 1147147883Sscottlmpt_send_handshake_cmd(struct mpt_softc *mpt, size_t len, void *cmd) 1148101704Smjacob{ 1149101704Smjacob int i; 1150147883Sscottl uint32_t data, *data32; 1151101704Smjacob 1152101704Smjacob /* Check condition of the IOC */ 1153101704Smjacob data = mpt_rd_db(mpt); 1154147883Sscottl if ((MPT_STATE(data) != MPT_DB_STATE_READY 1155147883Sscottl && MPT_STATE(data) != MPT_DB_STATE_RUNNING 1156147883Sscottl && MPT_STATE(data) != MPT_DB_STATE_FAULT) 1157147883Sscottl || MPT_DB_IS_IN_USE(data)) { 1158147883Sscottl mpt_prt(mpt, "handshake aborted - invalid doorbell state\n"); 1159101704Smjacob mpt_print_db(data); 1160147883Sscottl return (EBUSY); 1161101704Smjacob } 1162101704Smjacob 1163101704Smjacob /* We move things in 32 bit chunks */ 1164101704Smjacob len = (len + 3) >> 2; 1165101704Smjacob data32 = cmd; 1166101704Smjacob 1167101704Smjacob /* Clear any left over pending doorbell interupts */ 1168101704Smjacob if (MPT_DB_INTR(mpt_rd_intr(mpt))) 1169101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1170101704Smjacob 1171101704Smjacob /* 1172101704Smjacob * Tell the handshake reg. we are going to send a command 1173101704Smjacob * and how long it is going to be. 1174101704Smjacob */ 1175101704Smjacob data = (MPI_FUNCTION_HANDSHAKE << MPI_DOORBELL_FUNCTION_SHIFT) | 1176101704Smjacob (len << MPI_DOORBELL_ADD_DWORDS_SHIFT); 1177101704Smjacob mpt_write(mpt, MPT_OFFSET_DOORBELL, data); 1178101704Smjacob 1179101704Smjacob /* Wait for the chip to notice */ 1180101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1181147883Sscottl mpt_prt(mpt, "mpt_send_handshake_cmd timeout1\n"); 1182147883Sscottl return (ETIMEDOUT); 1183101704Smjacob } 1184101704Smjacob 1185101704Smjacob /* Clear the interrupt */ 1186101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1187101704Smjacob 1188101704Smjacob if (mpt_wait_db_ack(mpt) != MPT_OK) { 1189147883Sscottl mpt_prt(mpt, "mpt_send_handshake_cmd timeout2\n"); 1190147883Sscottl return (ETIMEDOUT); 1191101704Smjacob } 1192101704Smjacob 1193101704Smjacob /* Send the command */ 1194101704Smjacob for (i = 0; i < len; i++) { 1195101704Smjacob mpt_write(mpt, MPT_OFFSET_DOORBELL, *data32++); 1196101704Smjacob if (mpt_wait_db_ack(mpt) != MPT_OK) { 1197156104Smjacob mpt_prt(mpt, 1198147883Sscottl "mpt_send_handshake_cmd timeout! index = %d\n", 1199147883Sscottl i); 1200147883Sscottl return (ETIMEDOUT); 1201101704Smjacob } 1202101704Smjacob } 1203101704Smjacob return MPT_OK; 1204101704Smjacob} 1205101704Smjacob 1206101704Smjacob/* Get the response from the handshake register */ 1207101704Smjacobint 1208147883Sscottlmpt_recv_handshake_reply(struct mpt_softc *mpt, size_t reply_len, void *reply) 1209101704Smjacob{ 1210101704Smjacob int left, reply_left; 1211101704Smjacob u_int16_t *data16; 1212101704Smjacob MSG_DEFAULT_REPLY *hdr; 1213101704Smjacob 1214101704Smjacob /* We move things out in 16 bit chunks */ 1215101704Smjacob reply_len >>= 1; 1216101704Smjacob data16 = (u_int16_t *)reply; 1217101704Smjacob 1218101704Smjacob hdr = (MSG_DEFAULT_REPLY *)reply; 1219101704Smjacob 1220101704Smjacob /* Get first word */ 1221101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1222147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout1\n"); 1223101704Smjacob return ETIMEDOUT; 1224101704Smjacob } 1225101704Smjacob *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1226101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1227101704Smjacob 1228101704Smjacob /* Get Second Word */ 1229101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1230147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout2\n"); 1231101704Smjacob return ETIMEDOUT; 1232101704Smjacob } 1233101704Smjacob *data16++ = mpt_read(mpt, MPT_OFFSET_DOORBELL) & MPT_DB_DATA_MASK; 1234101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1235101704Smjacob 1236101704Smjacob /* With the second word, we can now look at the length */ 1237147883Sscottl if (((reply_len >> 1) != hdr->MsgLength)) { 1238103914Smjacob mpt_prt(mpt, "reply length does not match message length: " 1239147883Sscottl "got 0x%02x, expected 0x%02x\n", 1240103914Smjacob hdr->MsgLength << 2, reply_len << 1); 1241101704Smjacob } 1242101704Smjacob 1243101704Smjacob /* Get rest of the reply; but don't overflow the provided buffer */ 1244101704Smjacob left = (hdr->MsgLength << 1) - 2; 1245101704Smjacob reply_left = reply_len - 2; 1246101704Smjacob while (left--) { 1247101704Smjacob u_int16_t datum; 1248101704Smjacob 1249101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1250147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout3\n"); 1251101704Smjacob return ETIMEDOUT; 1252101704Smjacob } 1253101704Smjacob datum = mpt_read(mpt, MPT_OFFSET_DOORBELL); 1254101704Smjacob 1255101704Smjacob if (reply_left-- > 0) 1256101704Smjacob *data16++ = datum & MPT_DB_DATA_MASK; 1257101704Smjacob 1258101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1259101704Smjacob } 1260101704Smjacob 1261101704Smjacob /* One more wait & clear at the end */ 1262101704Smjacob if (mpt_wait_db_int(mpt) != MPT_OK) { 1263147883Sscottl mpt_prt(mpt, "mpt_recv_handshake_cmd timeout4\n"); 1264101704Smjacob return ETIMEDOUT; 1265101704Smjacob } 1266101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_STATUS, 0); 1267101704Smjacob 1268101704Smjacob if ((hdr->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1269147883Sscottl if (mpt->verbose >= MPT_PRT_TRACE) 1270101704Smjacob mpt_print_reply(hdr); 1271101704Smjacob return (MPT_FAIL | hdr->IOCStatus); 1272101704Smjacob } 1273101704Smjacob 1274101704Smjacob return (0); 1275101704Smjacob} 1276101704Smjacob 1277101704Smjacobstatic int 1278147883Sscottlmpt_get_iocfacts(struct mpt_softc *mpt, MSG_IOC_FACTS_REPLY *freplp) 1279101704Smjacob{ 1280101704Smjacob MSG_IOC_FACTS f_req; 1281101704Smjacob int error; 1282101704Smjacob 1283101704Smjacob bzero(&f_req, sizeof f_req); 1284101704Smjacob f_req.Function = MPI_FUNCTION_IOC_FACTS; 1285147883Sscottl f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1286101704Smjacob error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1287101704Smjacob if (error) 1288101704Smjacob return(error); 1289101704Smjacob error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1290101704Smjacob return (error); 1291101704Smjacob} 1292101704Smjacob 1293102199Smjacobstatic int 1294147883Sscottlmpt_get_portfacts(struct mpt_softc *mpt, MSG_PORT_FACTS_REPLY *freplp) 1295102199Smjacob{ 1296102199Smjacob MSG_PORT_FACTS f_req; 1297102199Smjacob int error; 1298102199Smjacob 1299102199Smjacob /* XXX: Only getting PORT FACTS for Port 0 */ 1300147883Sscottl memset(&f_req, 0, sizeof f_req); 1301102199Smjacob f_req.Function = MPI_FUNCTION_PORT_FACTS; 1302147883Sscottl f_req.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1303102199Smjacob error = mpt_send_handshake_cmd(mpt, sizeof f_req, &f_req); 1304102199Smjacob if (error) 1305102199Smjacob return(error); 1306102199Smjacob error = mpt_recv_handshake_reply(mpt, sizeof (*freplp), freplp); 1307102199Smjacob return (error); 1308102199Smjacob} 1309102199Smjacob 1310101704Smjacob/* 1311101704Smjacob * Send the initialization request. This is where we specify how many 1312101704Smjacob * SCSI busses and how many devices per bus we wish to emulate. 1313101704Smjacob * This is also the command that specifies the max size of the reply 1314101704Smjacob * frames from the IOC that we will be allocating. 1315101704Smjacob */ 1316101704Smjacobstatic int 1317147883Sscottlmpt_send_ioc_init(struct mpt_softc *mpt, uint32_t who) 1318101704Smjacob{ 1319101704Smjacob int error = 0; 1320101704Smjacob MSG_IOC_INIT init; 1321101704Smjacob MSG_IOC_INIT_REPLY reply; 1322101704Smjacob 1323101704Smjacob bzero(&init, sizeof init); 1324101704Smjacob init.WhoInit = who; 1325101704Smjacob init.Function = MPI_FUNCTION_IOC_INIT; 1326101704Smjacob if (mpt->is_fc) { 1327101704Smjacob init.MaxDevices = 255; 1328155521Smjacob } else if (mpt->is_sas) { 1329155521Smjacob init.MaxDevices = mpt->mpt_max_devices; 1330101704Smjacob } else { 1331101704Smjacob init.MaxDevices = 16; 1332101704Smjacob } 1333101704Smjacob init.MaxBuses = 1; 1334155521Smjacob 1335155521Smjacob init.MsgVersion = htole16(MPI_VERSION); 1336155521Smjacob init.HeaderVersion = htole16(MPI_HEADER_VERSION); 1337155521Smjacob init.ReplyFrameSize = htole16(MPT_REPLY_SIZE); 1338147883Sscottl init.MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 1339101704Smjacob 1340101704Smjacob if ((error = mpt_send_handshake_cmd(mpt, sizeof init, &init)) != 0) { 1341101704Smjacob return(error); 1342101704Smjacob } 1343101704Smjacob 1344101704Smjacob error = mpt_recv_handshake_reply(mpt, sizeof reply, &reply); 1345101704Smjacob return (error); 1346101704Smjacob} 1347101704Smjacob 1348102199Smjacob 1349102199Smjacob/* 1350102199Smjacob * Utiltity routine to read configuration headers and pages 1351102199Smjacob */ 1352147883Sscottlint 1353147883Sscottlmpt_issue_cfg_req(struct mpt_softc *mpt, request_t *req, u_int Action, 1354147883Sscottl u_int PageVersion, u_int PageLength, u_int PageNumber, 1355147883Sscottl u_int PageType, uint32_t PageAddress, bus_addr_t addr, 1356147883Sscottl bus_size_t len, int sleep_ok, int timeout_ms) 1357101704Smjacob{ 1358102199Smjacob MSG_CONFIG *cfgp; 1359147883Sscottl SGE_SIMPLE32 *se; 1360102199Smjacob 1361102199Smjacob cfgp = req->req_vbuf; 1362147883Sscottl memset(cfgp, 0, sizeof *cfgp); 1363147883Sscottl cfgp->Action = Action; 1364102199Smjacob cfgp->Function = MPI_FUNCTION_CONFIG; 1365147883Sscottl cfgp->Header.PageVersion = PageVersion; 1366147883Sscottl cfgp->Header.PageLength = PageLength; 1367147883Sscottl cfgp->Header.PageNumber = PageNumber; 1368147883Sscottl cfgp->Header.PageType = PageType; 1369102199Smjacob cfgp->PageAddress = PageAddress; 1370147883Sscottl se = (SGE_SIMPLE32 *)&cfgp->PageBufferSGE; 1371147883Sscottl se->Address = addr; 1372147883Sscottl MPI_pSGE_SET_LENGTH(se, len); 1373147883Sscottl MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | 1374147883Sscottl MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | 1375147883Sscottl MPI_SGE_FLAGS_END_OF_LIST | 1376147883Sscottl ((Action == MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT 1377147883Sscottl || Action == MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM) 1378147883Sscottl ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); 1379147883Sscottl cfgp->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1380102199Smjacob 1381102199Smjacob mpt_check_doorbell(mpt); 1382102199Smjacob mpt_send_cmd(mpt, req); 1383147883Sscottl return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1384147883Sscottl sleep_ok, timeout_ms)); 1385147883Sscottl} 1386102199Smjacob 1387147883Sscottl 1388147883Sscottlint 1389147883Sscottlmpt_read_cfg_header(struct mpt_softc *mpt, int PageType, int PageNumber, 1390147883Sscottl uint32_t PageAddress, CONFIG_PAGE_HEADER *rslt, 1391147883Sscottl int sleep_ok, int timeout_ms) 1392147883Sscottl{ 1393147883Sscottl request_t *req; 1394156104Smjacob MSG_CONFIG *cfgp; 1395147883Sscottl int error; 1396147883Sscottl 1397147883Sscottl req = mpt_get_request(mpt, sleep_ok); 1398147883Sscottl if (req == NULL) { 1399147883Sscottl mpt_prt(mpt, "mpt_read_cfg_header: Get request failed!\n"); 1400156104Smjacob return (ENOMEM); 1401102199Smjacob } 1402147883Sscottl 1403147883Sscottl error = mpt_issue_cfg_req(mpt, req, MPI_CONFIG_ACTION_PAGE_HEADER, 1404147883Sscottl /*PageVersion*/0, /*PageLength*/0, PageNumber, 1405147883Sscottl PageType, PageAddress, /*addr*/0, /*len*/0, 1406147883Sscottl sleep_ok, timeout_ms); 1407147883Sscottl if (error != 0) { 1408156104Smjacob mpt_free_request(mpt, req); 1409147883Sscottl mpt_prt(mpt, "read_cfg_header timed out\n"); 1410156104Smjacob return (ETIMEDOUT); 1411147883Sscottl } 1412147883Sscottl 1413156104Smjacob switch (req->IOCStatus & MPI_IOCSTATUS_MASK) { 1414156104Smjacob case MPI_IOCSTATUS_SUCCESS: 1415147883Sscottl cfgp = req->req_vbuf; 1416147883Sscottl bcopy(&cfgp->Header, rslt, sizeof(*rslt)); 1417147883Sscottl error = 0; 1418156104Smjacob break; 1419156104Smjacob case MPI_IOCSTATUS_CONFIG_INVALID_PAGE: 1420156104Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, 1421156104Smjacob "Invalid Page Type %d Number %d Addr 0x%0x\n", 1422156104Smjacob PageType, PageNumber, PageAddress); 1423156104Smjacob error = EINVAL; 1424156104Smjacob break; 1425156104Smjacob default: 1426156104Smjacob mpt_prt(mpt, "mpt_read_cfg_header: Config Info Status %x\n", 1427156104Smjacob req->IOCStatus); 1428156104Smjacob error = EIO; 1429156104Smjacob break; 1430147883Sscottl } 1431102199Smjacob mpt_free_request(mpt, req); 1432147883Sscottl return (error); 1433102199Smjacob} 1434102199Smjacob 1435103827Smjacob#define CFG_DATA_OFF 128 1436102199Smjacob 1437102822Smjacobint 1438147883Sscottlmpt_read_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1439147883Sscottl CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1440147883Sscottl int timeout_ms) 1441102199Smjacob{ 1442147883Sscottl request_t *req; 1443147883Sscottl int error; 1444102199Smjacob 1445147883Sscottl req = mpt_get_request(mpt, sleep_ok); 1446147883Sscottl if (req == NULL) { 1447147883Sscottl mpt_prt(mpt, "mpt_read_cfg_page: Get request failed!\n"); 1448147883Sscottl return (-1); 1449147883Sscottl } 1450102199Smjacob 1451147883Sscottl error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1452147883Sscottl hdr->PageLength, hdr->PageNumber, 1453147883Sscottl hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1454147883Sscottl PageAddress, req->req_pbuf + CFG_DATA_OFF, 1455147883Sscottl len, sleep_ok, timeout_ms); 1456147883Sscottl if (error != 0) { 1457147883Sscottl mpt_prt(mpt, "read_cfg_page(%d) timed out\n", Action); 1458147883Sscottl return (-1); 1459147883Sscottl } 1460102199Smjacob 1461147883Sscottl if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1462147883Sscottl mpt_prt(mpt, "mpt_read_cfg_page: Config Info Status %x\n", 1463147883Sscottl req->IOCStatus); 1464147883Sscottl mpt_free_request(mpt, req); 1465102199Smjacob return (-1); 1466102199Smjacob } 1467102199Smjacob bus_dmamap_sync(mpt->request_dmat, mpt->request_dmap, 1468102199Smjacob BUS_DMASYNC_POSTREAD); 1469147883Sscottl memcpy(hdr, ((uint8_t *)req->req_vbuf)+CFG_DATA_OFF, len); 1470102199Smjacob mpt_free_request(mpt, req); 1471102199Smjacob return (0); 1472102199Smjacob} 1473102199Smjacob 1474102822Smjacobint 1475147883Sscottlmpt_write_cfg_page(struct mpt_softc *mpt, int Action, uint32_t PageAddress, 1476147883Sscottl CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok, 1477147883Sscottl int timeout_ms) 1478102199Smjacob{ 1479147883Sscottl request_t *req; 1480147883Sscottl u_int hdr_attr; 1481147883Sscottl int error; 1482102199Smjacob 1483102199Smjacob hdr_attr = hdr->PageType & MPI_CONFIG_PAGEATTR_MASK; 1484102199Smjacob if (hdr_attr != MPI_CONFIG_PAGEATTR_CHANGEABLE && 1485102199Smjacob hdr_attr != MPI_CONFIG_PAGEATTR_PERSISTENT) { 1486147883Sscottl mpt_prt(mpt, "page type 0x%x not changeable\n", 1487147883Sscottl hdr->PageType & MPI_CONFIG_PAGETYPE_MASK); 1488102199Smjacob return (-1); 1489102199Smjacob } 1490147883Sscottl hdr->PageType &= MPI_CONFIG_PAGETYPE_MASK, 1491102199Smjacob 1492147883Sscottl req = mpt_get_request(mpt, sleep_ok); 1493147883Sscottl if (req == NULL) 1494147883Sscottl return (-1); 1495102199Smjacob 1496147883Sscottl memcpy(((caddr_t)req->req_vbuf)+CFG_DATA_OFF, hdr, len); 1497147883Sscottl /* Restore stripped out attributes */ 1498147883Sscottl hdr->PageType |= hdr_attr; 1499102199Smjacob 1500147883Sscottl error = mpt_issue_cfg_req(mpt, req, Action, hdr->PageVersion, 1501147883Sscottl hdr->PageLength, hdr->PageNumber, 1502147883Sscottl hdr->PageType & MPI_CONFIG_PAGETYPE_MASK, 1503147883Sscottl PageAddress, req->req_pbuf + CFG_DATA_OFF, 1504147883Sscottl len, sleep_ok, timeout_ms); 1505147883Sscottl if (error != 0) { 1506147883Sscottl mpt_prt(mpt, "mpt_write_cfg_page timed out\n"); 1507147883Sscottl return (-1); 1508147883Sscottl } 1509102199Smjacob 1510147883Sscottl if ((req->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { 1511147883Sscottl mpt_prt(mpt, "mpt_write_cfg_page: Config Info Status %x\n", 1512147883Sscottl req->IOCStatus); 1513147883Sscottl mpt_free_request(mpt, req); 1514147883Sscottl return (-1); 1515102199Smjacob } 1516147883Sscottl mpt_free_request(mpt, req); 1517147883Sscottl return (0); 1518147883Sscottl} 1519102199Smjacob 1520147883Sscottl/* 1521147883Sscottl * Read IOC configuration information 1522147883Sscottl */ 1523147883Sscottlstatic int 1524147883Sscottlmpt_read_config_info_ioc(struct mpt_softc *mpt) 1525147883Sscottl{ 1526147883Sscottl CONFIG_PAGE_HEADER hdr; 1527147883Sscottl struct mpt_raid_volume *mpt_raid; 1528147883Sscottl int rv; 1529147883Sscottl int i; 1530147883Sscottl size_t len; 1531147883Sscottl 1532147883Sscottl rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1533147883Sscottl /*PageNumber*/2, /*PageAddress*/0, &hdr, 1534147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1535156104Smjacob /* 1536156104Smjacob * If it's an invalid page, so what? Not a supported function.... 1537156104Smjacob */ 1538156104Smjacob if (rv == EINVAL) 1539156104Smjacob return (0); 1540147883Sscottl if (rv) 1541156104Smjacob return (rv); 1542147883Sscottl 1543147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 2 Header: ver %x, len %x, " 1544147883Sscottl "num %x, type %x\n", hdr.PageVersion, 1545147883Sscottl hdr.PageLength * sizeof(uint32_t), 1546147883Sscottl hdr.PageNumber, hdr.PageType); 1547147883Sscottl 1548147883Sscottl len = hdr.PageLength * sizeof(uint32_t); 1549151075Sscottl mpt->ioc_page2 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1550147883Sscottl if (mpt->ioc_page2 == NULL) 1551147883Sscottl return (ENOMEM); 1552147883Sscottl memcpy(&mpt->ioc_page2->Header, &hdr, sizeof(hdr)); 1553147883Sscottl rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1554147883Sscottl &mpt->ioc_page2->Header, len, 1555147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1556147883Sscottl if (rv) { 1557147883Sscottl mpt_prt(mpt, "failed to read IOC Page 2\n"); 1558147883Sscottl } else if (mpt->ioc_page2->CapabilitiesFlags != 0) { 1559147883Sscottl uint32_t mask; 1560147883Sscottl 1561147883Sscottl mpt_prt(mpt, "Capabilities: ("); 1562147883Sscottl for (mask = 1; mask != 0; mask <<= 1) { 1563147883Sscottl if ((mpt->ioc_page2->CapabilitiesFlags & mask) == 0) 1564147883Sscottl continue; 1565147883Sscottl 1566147883Sscottl switch (mask) { 1567147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT: 1568147883Sscottl mpt_prtc(mpt, " RAID-0"); 1569147883Sscottl break; 1570147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT: 1571147883Sscottl mpt_prtc(mpt, " RAID-1E"); 1572147883Sscottl break; 1573147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT: 1574147883Sscottl mpt_prtc(mpt, " RAID-1"); 1575147883Sscottl break; 1576147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT: 1577147883Sscottl mpt_prtc(mpt, " SES"); 1578147883Sscottl break; 1579147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT: 1580147883Sscottl mpt_prtc(mpt, " SAFTE"); 1581147883Sscottl break; 1582147883Sscottl case MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT: 1583147883Sscottl mpt_prtc(mpt, " Multi-Channel-Arrays"); 1584147883Sscottl default: 1585147883Sscottl break; 1586147883Sscottl } 1587102199Smjacob } 1588147883Sscottl mpt_prtc(mpt, " )\n"); 1589147883Sscottl if ((mpt->ioc_page2->CapabilitiesFlags 1590147883Sscottl & (MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT 1591147883Sscottl | MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT 1592147883Sscottl | MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT)) != 0) { 1593147883Sscottl mpt_prt(mpt, "%d Active Volume%s(%d Max)\n", 1594147883Sscottl mpt->ioc_page2->NumActiveVolumes, 1595147883Sscottl mpt->ioc_page2->NumActiveVolumes != 1 1596147883Sscottl ? "s " : " ", 1597147883Sscottl mpt->ioc_page2->MaxVolumes); 1598147883Sscottl mpt_prt(mpt, "%d Hidden Drive Member%s(%d Max)\n", 1599147883Sscottl mpt->ioc_page2->NumActivePhysDisks, 1600147883Sscottl mpt->ioc_page2->NumActivePhysDisks != 1 1601147883Sscottl ? "s " : " ", 1602147883Sscottl mpt->ioc_page2->MaxPhysDisks); 1603147883Sscottl } 1604147883Sscottl } 1605102199Smjacob 1606147883Sscottl len = mpt->ioc_page2->MaxVolumes * sizeof(struct mpt_raid_volume); 1607147883Sscottl mpt->raid_volumes = malloc(len, M_DEVBUF, M_NOWAIT); 1608147883Sscottl if (mpt->raid_volumes == NULL) { 1609147883Sscottl mpt_prt(mpt, "Could not allocate RAID volume data\n"); 1610147883Sscottl } else { 1611147883Sscottl memset(mpt->raid_volumes, 0, len); 1612147883Sscottl } 1613147883Sscottl 1614147883Sscottl /* 1615147883Sscottl * Copy critical data out of ioc_page2 so that we can 1616147883Sscottl * safely refresh the page without windows of unreliable 1617147883Sscottl * data. 1618147883Sscottl */ 1619147883Sscottl mpt->raid_max_volumes = mpt->ioc_page2->MaxVolumes; 1620147883Sscottl 1621147883Sscottl len = sizeof(*mpt->raid_volumes->config_page) 1622147883Sscottl + (sizeof(RAID_VOL0_PHYS_DISK)*(mpt->ioc_page2->MaxPhysDisks - 1)); 1623147883Sscottl for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { 1624147883Sscottl mpt_raid = &mpt->raid_volumes[i]; 1625147883Sscottl mpt_raid->config_page = malloc(len, M_DEVBUF, M_NOWAIT); 1626147883Sscottl if (mpt_raid->config_page == NULL) { 1627147883Sscottl mpt_prt(mpt, "Could not allocate RAID page data\n"); 1628147883Sscottl break; 1629147883Sscottl } 1630147883Sscottl memset(mpt_raid->config_page, 0, len); 1631147883Sscottl } 1632147883Sscottl mpt->raid_page0_len = len; 1633147883Sscottl 1634147883Sscottl len = mpt->ioc_page2->MaxPhysDisks * sizeof(struct mpt_raid_disk); 1635147883Sscottl mpt->raid_disks = malloc(len, M_DEVBUF, M_NOWAIT); 1636147883Sscottl if (mpt->raid_disks == NULL) { 1637147883Sscottl mpt_prt(mpt, "Could not allocate RAID disk data\n"); 1638147883Sscottl } else { 1639147883Sscottl memset(mpt->raid_disks, 0, len); 1640147883Sscottl } 1641147883Sscottl 1642147883Sscottl mpt->raid_max_disks = mpt->ioc_page2->MaxPhysDisks; 1643147883Sscottl 1644147883Sscottl rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_IOC, 1645147883Sscottl /*PageNumber*/3, /*PageAddress*/0, &hdr, 1646147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1647147883Sscottl if (rv) 1648147883Sscottl return (EIO); 1649147883Sscottl 1650147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC Page 3 Header: %x %x %x %x\n", 1651147883Sscottl hdr.PageVersion, hdr.PageLength, hdr.PageNumber, hdr.PageType); 1652147883Sscottl 1653147883Sscottl if (mpt->ioc_page3 != NULL) 1654147883Sscottl free(mpt->ioc_page3, M_DEVBUF); 1655147883Sscottl len = hdr.PageLength * sizeof(uint32_t); 1656151075Sscottl mpt->ioc_page3 = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); 1657147883Sscottl if (mpt->ioc_page3 == NULL) 1658102199Smjacob return (-1); 1659147883Sscottl memcpy(&mpt->ioc_page3->Header, &hdr, sizeof(hdr)); 1660147883Sscottl rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1661147883Sscottl &mpt->ioc_page3->Header, len, 1662147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1663147883Sscottl if (rv) { 1664147883Sscottl mpt_prt(mpt, "failed to read IOC Page 3\n"); 1665102199Smjacob } 1666102199Smjacob 1667147883Sscottl mpt_raid_wakeup(mpt); 1668147883Sscottl 1669102199Smjacob return (0); 1670102199Smjacob} 1671102199Smjacob 1672102199Smjacob/* 1673102199Smjacob * Read SCSI configuration information 1674102199Smjacob */ 1675102199Smjacobstatic int 1676147883Sscottlmpt_read_config_info_spi(struct mpt_softc *mpt) 1677102199Smjacob{ 1678102199Smjacob int rv, i; 1679102199Smjacob 1680102199Smjacob rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 1681147883Sscottl 0, &mpt->mpt_port_page0.Header, 1682147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1683147883Sscottl if (rv) 1684102199Smjacob return (-1); 1685147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1686147883Sscottl "SPI Port Page 0 Header: %x %x %x %x\n", 1687147883Sscottl mpt->mpt_port_page0.Header.PageVersion, 1688147883Sscottl mpt->mpt_port_page0.Header.PageLength, 1689147883Sscottl mpt->mpt_port_page0.Header.PageNumber, 1690147883Sscottl mpt->mpt_port_page0.Header.PageType); 1691102199Smjacob 1692102199Smjacob rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 1693147883Sscottl 0, &mpt->mpt_port_page1.Header, 1694147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1695147883Sscottl if (rv) 1696102199Smjacob return (-1); 1697102199Smjacob 1698147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", 1699147883Sscottl mpt->mpt_port_page1.Header.PageVersion, 1700147883Sscottl mpt->mpt_port_page1.Header.PageLength, 1701147883Sscottl mpt->mpt_port_page1.Header.PageNumber, 1702147883Sscottl mpt->mpt_port_page1.Header.PageType); 1703147883Sscottl 1704102199Smjacob rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 1705147883Sscottl /*PageAddress*/0, &mpt->mpt_port_page2.Header, 1706147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1707147883Sscottl if (rv) 1708102199Smjacob return (-1); 1709102199Smjacob 1710147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1711147883Sscottl "SPI Port Page 2 Header: %x %x %x %x\n", 1712147883Sscottl mpt->mpt_port_page1.Header.PageVersion, 1713147883Sscottl mpt->mpt_port_page1.Header.PageLength, 1714147883Sscottl mpt->mpt_port_page1.Header.PageNumber, 1715147883Sscottl mpt->mpt_port_page1.Header.PageType); 1716102199Smjacob 1717102199Smjacob for (i = 0; i < 16; i++) { 1718102199Smjacob rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 1719147883Sscottl 0, i, &mpt->mpt_dev_page0[i].Header, 1720147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1721147883Sscottl if (rv) 1722102199Smjacob return (-1); 1723147883Sscottl 1724147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1725147883Sscottl "SPI Target %d Device Page 0 Header: %x %x %x %x\n", 1726147883Sscottl i, mpt->mpt_dev_page0[i].Header.PageVersion, 1727147883Sscottl mpt->mpt_dev_page0[i].Header.PageLength, 1728147883Sscottl mpt->mpt_dev_page0[i].Header.PageNumber, 1729147883Sscottl mpt->mpt_dev_page0[i].Header.PageType); 1730102199Smjacob 1731102199Smjacob rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 1732147883Sscottl 1, i, &mpt->mpt_dev_page1[i].Header, 1733147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1734147883Sscottl if (rv) 1735102199Smjacob return (-1); 1736147883Sscottl 1737147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1738147883Sscottl "SPI Target %d Device Page 1 Header: %x %x %x %x\n", 1739147883Sscottl i, mpt->mpt_dev_page1[i].Header.PageVersion, 1740147883Sscottl mpt->mpt_dev_page1[i].Header.PageLength, 1741147883Sscottl mpt->mpt_dev_page1[i].Header.PageNumber, 1742147883Sscottl mpt->mpt_dev_page1[i].Header.PageType); 1743102199Smjacob } 1744102199Smjacob 1745102199Smjacob /* 1746102199Smjacob * At this point, we don't *have* to fail. As long as we have 1747102199Smjacob * valid config header information, we can (barely) lurch 1748102199Smjacob * along. 1749102199Smjacob */ 1750102199Smjacob 1751147883Sscottl rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1752147883Sscottl &mpt->mpt_port_page0.Header, 1753147883Sscottl sizeof(mpt->mpt_port_page0), 1754147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1755102199Smjacob if (rv) { 1756147883Sscottl mpt_prt(mpt, "failed to read SPI Port Page 0\n"); 1757147883Sscottl } else { 1758147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1759147883Sscottl "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", 1760102199Smjacob mpt->mpt_port_page0.Capabilities, 1761102199Smjacob mpt->mpt_port_page0.PhysicalInterface); 1762102199Smjacob } 1763102199Smjacob 1764147883Sscottl rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1765147883Sscottl &mpt->mpt_port_page1.Header, 1766147883Sscottl sizeof(mpt->mpt_port_page1), 1767147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1768102199Smjacob if (rv) { 1769147883Sscottl mpt_prt(mpt, "failed to read SPI Port Page 1\n"); 1770147883Sscottl } else { 1771147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1772147883Sscottl "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", 1773102199Smjacob mpt->mpt_port_page1.Configuration, 1774102199Smjacob mpt->mpt_port_page1.OnBusTimerValue); 1775102199Smjacob } 1776102199Smjacob 1777147883Sscottl rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1778147883Sscottl &mpt->mpt_port_page2.Header, 1779147883Sscottl sizeof(mpt->mpt_port_page2), 1780147883Sscottl /*sleep_ok*/FALSE, /*timeout_ms*/5000); 1781102199Smjacob if (rv) { 1782147883Sscottl mpt_prt(mpt, "failed to read SPI Port Page 2\n"); 1783147883Sscottl } else { 1784147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1785147883Sscottl "SPI Port Page 2: Flags %x Settings %x\n", 1786102199Smjacob mpt->mpt_port_page2.PortFlags, 1787102199Smjacob mpt->mpt_port_page2.PortSettings); 1788102199Smjacob for (i = 0; i < 16; i++) { 1789147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1790147883Sscottl "SPI Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", 1791102199Smjacob i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, 1792102199Smjacob mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, 1793102199Smjacob mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); 1794102199Smjacob } 1795102199Smjacob } 1796102199Smjacob 1797102199Smjacob for (i = 0; i < 16; i++) { 1798147883Sscottl rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i, 1799147883Sscottl &mpt->mpt_dev_page0[i].Header, 1800147883Sscottl sizeof(*mpt->mpt_dev_page0), 1801147883Sscottl /*sleep_ok*/FALSE, 1802147883Sscottl /*timeout_ms*/5000); 1803102199Smjacob if (rv) { 1804147883Sscottl mpt_prt(mpt, 1805147883Sscottl "cannot read SPI Tgt %d Device Page 0\n", i); 1806102199Smjacob continue; 1807102199Smjacob } 1808147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1809147883Sscottl "SPI Tgt %d Page 0: NParms %x Information %x", 1810147883Sscottl i, mpt->mpt_dev_page0[i].NegotiatedParameters, 1811147883Sscottl mpt->mpt_dev_page0[i].Information); 1812147883Sscottl 1813147883Sscottl rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i, 1814147883Sscottl &mpt->mpt_dev_page1[i].Header, 1815147883Sscottl sizeof(*mpt->mpt_dev_page1), 1816147883Sscottl /*sleep_ok*/FALSE, 1817147883Sscottl /*timeout_ms*/5000); 1818147883Sscottl if (rv) { 1819103914Smjacob mpt_prt(mpt, 1820147883Sscottl "cannot read SPI Tgt %d Device Page 1\n", i); 1821102199Smjacob continue; 1822102199Smjacob } 1823147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1824147883Sscottl "SPI Tgt %d Page 1: RParms %x Configuration %x\n", 1825147883Sscottl i, mpt->mpt_dev_page1[i].RequestedParameters, 1826147883Sscottl mpt->mpt_dev_page1[i].Configuration); 1827102199Smjacob } 1828102199Smjacob return (0); 1829102199Smjacob} 1830102199Smjacob 1831102199Smjacob/* 1832102199Smjacob * Validate SPI configuration information. 1833102199Smjacob * 1834102199Smjacob * In particular, validate SPI Port Page 1. 1835102199Smjacob */ 1836102199Smjacobstatic int 1837147883Sscottlmpt_set_initial_config_spi(struct mpt_softc *mpt) 1838102199Smjacob{ 1839102199Smjacob int i, pp1val = ((1 << mpt->mpt_ini_id) << 16) | mpt->mpt_ini_id; 1840147883Sscottl int error; 1841102199Smjacob 1842102822Smjacob mpt->mpt_disc_enable = 0xff; 1843102822Smjacob mpt->mpt_tag_enable = 0; 1844102822Smjacob 1845102199Smjacob if (mpt->mpt_port_page1.Configuration != pp1val) { 1846115778Smjacob CONFIG_PAGE_SCSI_PORT_1 tmp; 1847147883Sscottl 1848103914Smjacob mpt_prt(mpt, 1849147883Sscottl "SPI Port Page 1 Config value bad (%x)- should be %x\n", 1850102199Smjacob mpt->mpt_port_page1.Configuration, pp1val); 1851102199Smjacob tmp = mpt->mpt_port_page1; 1852102199Smjacob tmp.Configuration = pp1val; 1853147883Sscottl error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/0, 1854147883Sscottl &tmp.Header, sizeof(tmp), 1855147883Sscottl /*sleep_ok*/FALSE, 1856147883Sscottl /*timeout_ms*/5000); 1857147883Sscottl if (error) 1858102199Smjacob return (-1); 1859147883Sscottl error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, 1860147883Sscottl &tmp.Header, sizeof(tmp), 1861147883Sscottl /*sleep_ok*/FALSE, 1862147883Sscottl /*timeout_ms*/5000); 1863147883Sscottl if (error) 1864102199Smjacob return (-1); 1865102199Smjacob if (tmp.Configuration != pp1val) { 1866103914Smjacob mpt_prt(mpt, 1867147883Sscottl "failed to reset SPI Port Page 1 Config value\n"); 1868102199Smjacob return (-1); 1869102199Smjacob } 1870102199Smjacob mpt->mpt_port_page1 = tmp; 1871102199Smjacob } 1872102199Smjacob 1873102199Smjacob for (i = 0; i < 16; i++) { 1874115778Smjacob CONFIG_PAGE_SCSI_DEVICE_1 tmp; 1875102199Smjacob tmp = mpt->mpt_dev_page1[i]; 1876102199Smjacob tmp.RequestedParameters = 0; 1877102199Smjacob tmp.Configuration = 0; 1878147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1879147883Sscottl "Set Tgt %d SPI DevicePage 1 values to %x 0 %x\n", 1880147883Sscottl i, tmp.RequestedParameters, tmp.Configuration); 1881147883Sscottl error = mpt_write_cur_cfg_page(mpt, /*PageAddress*/i, 1882147883Sscottl &tmp.Header, sizeof(tmp), 1883147883Sscottl /*sleep_ok*/FALSE, 1884147883Sscottl /*timeout_ms*/5000); 1885147883Sscottl if (error) 1886102199Smjacob return (-1); 1887147883Sscottl error = mpt_read_cur_cfg_page(mpt, /*PageAddress*/i, 1888147883Sscottl &tmp.Header, sizeof(tmp), 1889147883Sscottl /*sleep_ok*/FALSE, 1890147883Sscottl /*timeout_ms*/5000); 1891147883Sscottl if (error) 1892102199Smjacob return (-1); 1893102199Smjacob mpt->mpt_dev_page1[i] = tmp; 1894147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1895147883Sscottl "SPI Tgt %d Page 1: RParm %x Configuration %x\n", i, 1896147883Sscottl mpt->mpt_dev_page1[i].RequestedParameters, 1897147883Sscottl mpt->mpt_dev_page1[i].Configuration); 1898102199Smjacob } 1899102199Smjacob return (0); 1900102199Smjacob} 1901102199Smjacob 1902102199Smjacob/* 1903102199Smjacob * Enable IOC port 1904102199Smjacob */ 1905102199Smjacobstatic int 1906147883Sscottlmpt_send_port_enable(struct mpt_softc *mpt, int port) 1907102199Smjacob{ 1908147883Sscottl request_t *req; 1909101704Smjacob MSG_PORT_ENABLE *enable_req; 1910147883Sscottl int error; 1911101704Smjacob 1912147883Sscottl req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1913147883Sscottl if (req == NULL) 1914147883Sscottl return (-1); 1915101704Smjacob 1916101704Smjacob enable_req = req->req_vbuf; 1917101704Smjacob bzero(enable_req, sizeof *enable_req); 1918101704Smjacob 1919101704Smjacob enable_req->Function = MPI_FUNCTION_PORT_ENABLE; 1920147883Sscottl enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_CONFIG); 1921101704Smjacob enable_req->PortNumber = port; 1922101704Smjacob 1923101704Smjacob mpt_check_doorbell(mpt); 1924147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling port %d\n", port); 1925147883Sscottl 1926147883Sscottl mpt_send_cmd(mpt, req); 1927147883Sscottl error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 1928156021Smjacob /*sleep_ok*/FALSE, 1929156021Smjacob /*time_ms*/(mpt->is_sas || mpt->is_fc)? 30000 : 3000); 1930147883Sscottl if (error != 0) { 1931155521Smjacob mpt_prt(mpt, "port enable timed out\n"); 1932147883Sscottl return (-1); 1933101704Smjacob } 1934101704Smjacob mpt_free_request(mpt, req); 1935101704Smjacob return (0); 1936101704Smjacob} 1937101704Smjacob 1938101704Smjacob/* 1939101704Smjacob * Enable/Disable asynchronous event reporting. 1940101704Smjacob * 1941101704Smjacob * NB: this is the first command we send via shared memory 1942101704Smjacob * instead of the handshake register. 1943101704Smjacob */ 1944101704Smjacobstatic int 1945147883Sscottlmpt_send_event_request(struct mpt_softc *mpt, int onoff) 1946101704Smjacob{ 1947101704Smjacob request_t *req; 1948101704Smjacob MSG_EVENT_NOTIFY *enable_req; 1949101704Smjacob 1950147883Sscottl req = mpt_get_request(mpt, /*sleep_ok*/FALSE); 1951101704Smjacob 1952101704Smjacob enable_req = req->req_vbuf; 1953101704Smjacob bzero(enable_req, sizeof *enable_req); 1954101704Smjacob 1955101704Smjacob enable_req->Function = MPI_FUNCTION_EVENT_NOTIFICATION; 1956147883Sscottl enable_req->MsgContext = htole32(req->index | MPT_REPLY_HANDLER_EVENTS); 1957101704Smjacob enable_req->Switch = onoff; 1958101704Smjacob 1959101704Smjacob mpt_check_doorbell(mpt); 1960147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 1961147883Sscottl "%sabling async events\n", onoff ? "en" : "dis"); 1962101704Smjacob mpt_send_cmd(mpt, req); 1963101704Smjacob 1964101704Smjacob return (0); 1965101704Smjacob} 1966101704Smjacob 1967101704Smjacob/* 1968101704Smjacob * Un-mask the interupts on the chip. 1969101704Smjacob */ 1970101704Smjacobvoid 1971147883Sscottlmpt_enable_ints(struct mpt_softc *mpt) 1972101704Smjacob{ 1973101704Smjacob /* Unmask every thing except door bell int */ 1974101704Smjacob mpt_write(mpt, MPT_OFFSET_INTR_MASK, MPT_INTR_DB_MASK); 1975101704Smjacob} 1976101704Smjacob 1977101704Smjacob/* 1978101704Smjacob * Mask the interupts on the chip. 1979101704Smjacob */ 1980101704Smjacobvoid 1981147883Sscottlmpt_disable_ints(struct mpt_softc *mpt) 1982101704Smjacob{ 1983101704Smjacob /* Mask all interrupts */ 1984156104Smjacob mpt_write(mpt, MPT_OFFSET_INTR_MASK, 1985101704Smjacob MPT_INTR_REPLY_MASK | MPT_INTR_DB_MASK); 1986101704Smjacob} 1987101704Smjacob 1988147883Sscottlstatic void 1989147883Sscottlmpt_sysctl_attach(struct mpt_softc *mpt) 1990147883Sscottl{ 1991147883Sscottl struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); 1992147883Sscottl struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); 1993147883Sscottl 1994156104Smjacob SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, 1995147883Sscottl "debug", CTLFLAG_RW, &mpt->verbose, 0, 1996147883Sscottl "Debugging/Verbose level"); 1997147883Sscottl} 1998147883Sscottl 1999101704Smjacobint 2000147883Sscottlmpt_attach(struct mpt_softc *mpt) 2001101704Smjacob{ 2002147883Sscottl int i; 2003147883Sscottl 2004147883Sscottl for (i = 0; i < MPT_MAX_PERSONALITIES; i++) { 2005147883Sscottl struct mpt_personality *pers; 2006147883Sscottl int error; 2007147883Sscottl 2008147883Sscottl pers = mpt_personalities[i]; 2009147883Sscottl if (pers == NULL) 2010147883Sscottl continue; 2011147883Sscottl 2012147883Sscottl if (pers->probe(mpt) == 0) { 2013147883Sscottl error = pers->attach(mpt); 2014147883Sscottl if (error != 0) { 2015147883Sscottl mpt_detach(mpt); 2016147883Sscottl return (error); 2017147883Sscottl } 2018147883Sscottl mpt->mpt_pers_mask |= (0x1 << pers->id); 2019147883Sscottl pers->use_count++; 2020147883Sscottl } 2021147883Sscottl } 2022155521Smjacob 2023147883Sscottl return (0); 2024147883Sscottl} 2025147883Sscottl 2026147883Sscottlint 2027147883Sscottlmpt_shutdown(struct mpt_softc *mpt) 2028147883Sscottl{ 2029147883Sscottl struct mpt_personality *pers; 2030147883Sscottl 2031147883Sscottl MPT_PERS_FOREACH_REVERSE(mpt, pers) 2032147883Sscottl pers->shutdown(mpt); 2033147883Sscottl 2034147883Sscottl mpt_reset(mpt, /*reinit*/FALSE); 2035147883Sscottl return (0); 2036147883Sscottl} 2037147883Sscottl 2038147883Sscottlint 2039147883Sscottlmpt_detach(struct mpt_softc *mpt) 2040147883Sscottl{ 2041147883Sscottl struct mpt_personality *pers; 2042147883Sscottl 2043147883Sscottl MPT_PERS_FOREACH_REVERSE(mpt, pers) { 2044147883Sscottl pers->detach(mpt); 2045147883Sscottl mpt->mpt_pers_mask &= ~(0x1 << pers->id); 2046147883Sscottl pers->use_count--; 2047147883Sscottl } 2048147883Sscottl 2049147883Sscottl return (0); 2050147883Sscottl} 2051147883Sscottl 2052147883Sscottlint 2053147883Sscottlmpt_core_load(struct mpt_personality *pers) 2054147883Sscottl{ 2055147883Sscottl int i; 2056147883Sscottl 2057147883Sscottl /* 2058147883Sscottl * Setup core handlers and insert the default handler 2059147883Sscottl * into all "empty slots". 2060147883Sscottl */ 2061147883Sscottl for (i = 0; i < MPT_NUM_REPLY_HANDLERS; i++) 2062147883Sscottl mpt_reply_handlers[i] = mpt_default_reply_handler; 2063147883Sscottl 2064147883Sscottl mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_EVENTS)] = 2065147883Sscottl mpt_event_reply_handler; 2066147883Sscottl mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_CONFIG)] = 2067147883Sscottl mpt_config_reply_handler; 2068147883Sscottl mpt_reply_handlers[MPT_CBI(MPT_REPLY_HANDLER_HANDSHAKE)] = 2069147883Sscottl mpt_handshake_reply_handler; 2070147883Sscottl 2071147883Sscottl return (0); 2072147883Sscottl} 2073147883Sscottl 2074147883Sscottl/* 2075147883Sscottl * Initialize per-instance driver data and perform 2076147883Sscottl * initial controller configuration. 2077147883Sscottl */ 2078147883Sscottlint 2079147883Sscottlmpt_core_attach(struct mpt_softc *mpt) 2080147883Sscottl{ 2081101704Smjacob int val; 2082147883Sscottl int error; 2083101704Smjacob 2084147883Sscottl LIST_INIT(&mpt->ack_frames); 2085147883Sscottl 2086147883Sscottl /* Put all request buffers on the free list */ 2087147883Sscottl TAILQ_INIT(&mpt->request_pending_list); 2088147883Sscottl TAILQ_INIT(&mpt->request_free_list); 2089147883Sscottl for (val = 0; val < MPT_MAX_REQUESTS(mpt); val++) 2090103871Smjacob mpt_free_request(mpt, &mpt->request_pool[val]); 2091147883Sscottl 2092147883Sscottl mpt_sysctl_attach(mpt); 2093147883Sscottl 2094147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "doorbell req = %s\n", 2095147883Sscottl mpt_ioc_diag(mpt_read(mpt, MPT_OFFSET_DOORBELL))); 2096147883Sscottl 2097147883Sscottl error = mpt_configure_ioc(mpt); 2098147883Sscottl 2099147883Sscottl return (error); 2100147883Sscottl} 2101147883Sscottl 2102147883Sscottlvoid 2103147883Sscottlmpt_core_shutdown(struct mpt_softc *mpt) 2104147883Sscottl{ 2105147883Sscottl} 2106147883Sscottl 2107147883Sscottlvoid 2108147883Sscottlmpt_core_detach(struct mpt_softc *mpt) 2109147883Sscottl{ 2110147883Sscottl} 2111147883Sscottl 2112147883Sscottlint 2113147883Sscottlmpt_core_unload(struct mpt_personality *pers) 2114147883Sscottl{ 2115147883Sscottl /* Unload is always successfull. */ 2116147883Sscottl return (0); 2117147883Sscottl} 2118147883Sscottl 2119147883Sscottl#define FW_UPLOAD_REQ_SIZE \ 2120147883Sscottl (sizeof(MSG_FW_UPLOAD) - sizeof(SGE_MPI_UNION) \ 2121147883Sscottl + sizeof(FW_UPLOAD_TCSGE) + sizeof(SGE_SIMPLE32)) 2122147883Sscottl 2123147883Sscottlstatic int 2124147883Sscottlmpt_upload_fw(struct mpt_softc *mpt) 2125147883Sscottl{ 2126147883Sscottl uint8_t fw_req_buf[FW_UPLOAD_REQ_SIZE]; 2127147883Sscottl MSG_FW_UPLOAD_REPLY fw_reply; 2128147883Sscottl MSG_FW_UPLOAD *fw_req; 2129147883Sscottl FW_UPLOAD_TCSGE *tsge; 2130147883Sscottl SGE_SIMPLE32 *sge; 2131147883Sscottl uint32_t flags; 2132147883Sscottl int error; 2133147883Sscottl 2134147883Sscottl memset(&fw_req_buf, 0, sizeof(fw_req_buf)); 2135147883Sscottl fw_req = (MSG_FW_UPLOAD *)fw_req_buf; 2136147883Sscottl fw_req->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM; 2137147883Sscottl fw_req->Function = MPI_FUNCTION_FW_UPLOAD; 2138147883Sscottl fw_req->MsgContext = htole32(MPT_REPLY_HANDLER_HANDSHAKE); 2139147883Sscottl tsge = (FW_UPLOAD_TCSGE *)&fw_req->SGL; 2140147883Sscottl tsge->DetailsLength = 12; 2141147883Sscottl tsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT; 2142147883Sscottl tsge->ImageSize = htole32(mpt->fw_image_size); 2143147883Sscottl sge = (SGE_SIMPLE32 *)(tsge + 1); 2144147883Sscottl flags = (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER 2145147883Sscottl | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_SIMPLE_ELEMENT 2146147883Sscottl | MPI_SGE_FLAGS_32_BIT_ADDRESSING | MPI_SGE_FLAGS_IOC_TO_HOST); 2147147883Sscottl flags <<= MPI_SGE_FLAGS_SHIFT; 2148147883Sscottl sge->FlagsLength = htole32(flags | mpt->fw_image_size); 2149147883Sscottl sge->Address = htole32(mpt->fw_phys); 2150147883Sscottl error = mpt_send_handshake_cmd(mpt, sizeof(fw_req_buf), &fw_req_buf); 2151147883Sscottl if (error) 2152147883Sscottl return(error); 2153147883Sscottl error = mpt_recv_handshake_reply(mpt, sizeof(fw_reply), &fw_reply); 2154147883Sscottl return (error); 2155147883Sscottl} 2156147883Sscottl 2157147883Sscottlstatic void 2158147883Sscottlmpt_diag_outsl(struct mpt_softc *mpt, uint32_t addr, 2159147883Sscottl uint32_t *data, bus_size_t len) 2160147883Sscottl{ 2161147883Sscottl uint32_t *data_end; 2162147883Sscottl 2163147883Sscottl data_end = data + (roundup2(len, sizeof(uint32_t)) / 4); 2164155521Smjacob pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2165147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, addr); 2166147883Sscottl while (data != data_end) { 2167147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, *data); 2168147883Sscottl data++; 2169101704Smjacob } 2170155521Smjacob pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2171147883Sscottl} 2172101704Smjacob 2173147883Sscottlstatic int 2174147883Sscottlmpt_download_fw(struct mpt_softc *mpt) 2175147883Sscottl{ 2176147883Sscottl MpiFwHeader_t *fw_hdr; 2177147883Sscottl int error; 2178147883Sscottl uint32_t ext_offset; 2179147883Sscottl uint32_t data; 2180147883Sscottl 2181147883Sscottl mpt_prt(mpt, "Downloading Firmware - Image Size %d\n", 2182147883Sscottl mpt->fw_image_size); 2183147883Sscottl 2184147883Sscottl error = mpt_enable_diag_mode(mpt); 2185147883Sscottl if (error != 0) { 2186147883Sscottl mpt_prt(mpt, "Could not enter diagnostic mode!\n"); 2187147883Sscottl return (EIO); 2188101704Smjacob } 2189101704Smjacob 2190147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, 2191147883Sscottl MPI_DIAG_RW_ENABLE|MPI_DIAG_DISABLE_ARM); 2192147883Sscottl 2193147883Sscottl fw_hdr = (MpiFwHeader_t *)mpt->fw_image; 2194147883Sscottl mpt_diag_outsl(mpt, fw_hdr->LoadStartAddress, (uint32_t*)fw_hdr, 2195147883Sscottl fw_hdr->ImageSize); 2196147883Sscottl 2197147883Sscottl ext_offset = fw_hdr->NextImageHeaderOffset; 2198147883Sscottl while (ext_offset != 0) { 2199147883Sscottl MpiExtImageHeader_t *ext; 2200147883Sscottl 2201147883Sscottl ext = (MpiExtImageHeader_t *)((uintptr_t)fw_hdr + ext_offset); 2202147883Sscottl ext_offset = ext->NextImageHeaderOffset; 2203147883Sscottl 2204147883Sscottl mpt_diag_outsl(mpt, ext->LoadStartAddress, (uint32_t*)ext, 2205147883Sscottl ext->ImageSize); 2206147883Sscottl } 2207147883Sscottl 2208155521Smjacob pci_enable_io(mpt->dev, SYS_RES_IOPORT); 2209147883Sscottl /* Setup the address to jump to on reset. */ 2210147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, fw_hdr->IopResetRegAddr); 2211147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, fw_hdr->IopResetVectorValue); 2212147883Sscottl 2213101704Smjacob /* 2214147883Sscottl * The controller sets the "flash bad" status after attempting 2215147883Sscottl * to auto-boot from flash. Clear the status so that the controller 2216147883Sscottl * will continue the boot process with our newly installed firmware. 2217101704Smjacob */ 2218147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2219147883Sscottl data = mpt_pio_read(mpt, MPT_OFFSET_DIAG_DATA) | MPT_DIAG_MEM_CFG_BADFL; 2220147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_ADDR, MPT_DIAG_MEM_CFG_BASE); 2221147883Sscottl mpt_pio_write(mpt, MPT_OFFSET_DIAG_DATA, data); 2222147883Sscottl 2223155521Smjacob pci_disable_io(mpt->dev, SYS_RES_IOPORT); 2224155521Smjacob 2225147883Sscottl /* 2226147883Sscottl * Re-enable the processor and clear the boot halt flag. 2227147883Sscottl */ 2228147883Sscottl data = mpt_read(mpt, MPT_OFFSET_DIAGNOSTIC); 2229147883Sscottl data &= ~(MPI_DIAG_PREVENT_IOC_BOOT|MPI_DIAG_DISABLE_ARM); 2230147883Sscottl mpt_write(mpt, MPT_OFFSET_DIAGNOSTIC, data); 2231147883Sscottl 2232147883Sscottl mpt_disable_diag_mode(mpt); 2233147883Sscottl return (0); 2234147883Sscottl} 2235147883Sscottl 2236147883Sscottl/* 2237147883Sscottl * Allocate/Initialize data structures for the controller. Called 2238147883Sscottl * once at instance startup. 2239147883Sscottl */ 2240147883Sscottlstatic int 2241147883Sscottlmpt_configure_ioc(struct mpt_softc *mpt) 2242147883Sscottl{ 2243147883Sscottl MSG_PORT_FACTS_REPLY pfp; 2244147883Sscottl MSG_IOC_FACTS_REPLY facts; 2245147883Sscottl int try; 2246147883Sscottl int needreset; 2247155521Smjacob uint32_t max_chain_depth; 2248147883Sscottl 2249147883Sscottl needreset = 0; 2250101704Smjacob for (try = 0; try < MPT_MAX_TRYS; try++) { 2251147883Sscottl 2252101704Smjacob /* 2253101704Smjacob * No need to reset if the IOC is already in the READY state. 2254101704Smjacob * 2255101704Smjacob * Force reset if initialization failed previously. 2256101704Smjacob * Note that a hard_reset of the second channel of a '929 2257101704Smjacob * will stop operation of the first channel. Hopefully, if the 2258156104Smjacob * first channel is ok, the second will not require a hard 2259101704Smjacob * reset. 2260101704Smjacob */ 2261147883Sscottl if (needreset || (mpt_rd_db(mpt) & MPT_DB_STATE_MASK) != 2262101704Smjacob MPT_DB_STATE_READY) { 2263147883Sscottl if (mpt_reset(mpt, /*reinit*/FALSE) != MPT_OK) 2264101704Smjacob continue; 2265101704Smjacob } 2266147883Sscottl needreset = 0; 2267101704Smjacob 2268101704Smjacob if (mpt_get_iocfacts(mpt, &facts) != MPT_OK) { 2269147883Sscottl mpt_prt(mpt, "mpt_get_iocfacts failed\n"); 2270147883Sscottl needreset = 1; 2271101704Smjacob continue; 2272102199Smjacob } 2273102199Smjacob 2274147883Sscottl mpt->mpt_global_credits = le16toh(facts.GlobalCredits); 2275147883Sscottl mpt->request_frame_size = le16toh(facts.RequestFrameSize); 2276155521Smjacob mpt->ioc_facts_flags = facts.Flags; 2277147883Sscottl mpt_prt(mpt, "MPI Version=%d.%d.%d.%d\n", 2278147883Sscottl le16toh(facts.MsgVersion) >> 8, 2279147883Sscottl le16toh(facts.MsgVersion) & 0xFF, 2280147883Sscottl le16toh(facts.HeaderVersion) >> 8, 2281147883Sscottl le16toh(facts.HeaderVersion) & 0xFF); 2282155521Smjacob 2283155521Smjacob /* 2284155521Smjacob * Now that we know request frame size, we can calculate 2285155521Smjacob * the actual (reasonable) segment limit for read/write I/O. 2286155521Smjacob * 2287155521Smjacob * This limit is constrained by: 2288155521Smjacob * 2289155521Smjacob * + The size of each area we allocate per command (and how 2290155521Smjacob * many chain segments we can fit into it). 2291155521Smjacob * + The total number of areas we've set up. 2292155521Smjacob * + The actual chain depth the card will allow. 2293155521Smjacob * 2294155521Smjacob * The first area's segment count is limited by the I/O request 2295155521Smjacob * at the head of it. We cannot allocate realistically more 2296155521Smjacob * than MPT_MAX_REQUESTS areas. Therefore, to account for both 2297155521Smjacob * conditions, we'll just start out with MPT_MAX_REQUESTS-2. 2298155521Smjacob * 2299155521Smjacob */ 2300155521Smjacob max_chain_depth = facts.MaxChainDepth; 2301155521Smjacob 2302155521Smjacob /* total number of request areas we (can) allocate */ 2303155521Smjacob mpt->max_seg_cnt = MPT_MAX_REQUESTS(mpt) - 2; 2304155521Smjacob 2305155521Smjacob /* converted to the number of chain areas possible */ 2306155521Smjacob mpt->max_seg_cnt *= MPT_NRFM(mpt); 2307155521Smjacob 2308155521Smjacob /* limited by the number of chain areas the card will support */ 2309155521Smjacob if (mpt->max_seg_cnt > max_chain_depth) { 2310155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, 2311155521Smjacob "chain depth limited to %u (from %u)\n", 2312155521Smjacob max_chain_depth, mpt->max_seg_cnt); 2313155521Smjacob mpt->max_seg_cnt = max_chain_depth; 2314155521Smjacob } 2315155521Smjacob 2316155521Smjacob /* converted to the number of simple sges in chain segments. */ 2317155521Smjacob mpt->max_seg_cnt *= (MPT_NSGL(mpt) - 1); 2318155521Smjacob 2319147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2320155521Smjacob "Maximum Segment Count: %u\n", mpt->max_seg_cnt); 2321155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, 2322147883Sscottl "MsgLength=%u IOCNumber = %d\n", 2323147883Sscottl facts.MsgLength, facts.IOCNumber); 2324147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2325155521Smjacob "IOCFACTS: GlobalCredits=%d BlockSize=%u bytes " 2326155521Smjacob "Request Frame Size %u bytes Max Chain Depth %u\n", 2327155521Smjacob mpt->mpt_global_credits, facts.BlockSize, 2328155521Smjacob mpt->request_frame_size << 2, max_chain_depth); 2329147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2330147883Sscottl "IOCFACTS: Num Ports %d, FWImageSize %d, " 2331147883Sscottl "Flags=%#x\n", facts.NumberOfPorts, 2332147883Sscottl le32toh(facts.FWImageSize), facts.Flags); 2333147883Sscottl 2334155521Smjacob 2335147883Sscottl if ((facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT) != 0) { 2336147883Sscottl struct mpt_map_info mi; 2337147883Sscottl int error; 2338147883Sscottl 2339147883Sscottl /* 2340147883Sscottl * In some configurations, the IOC's firmware is 2341147883Sscottl * stored in a shared piece of system NVRAM that 2342147883Sscottl * is only accessable via the BIOS. In this 2343147883Sscottl * case, the firmware keeps a copy of firmware in 2344147883Sscottl * RAM until the OS driver retrieves it. Once 2345147883Sscottl * retrieved, we are responsible for re-downloading 2346147883Sscottl * the firmware after any hard-reset. 2347147883Sscottl */ 2348147883Sscottl mpt->fw_image_size = le32toh(facts.FWImageSize); 2349147883Sscottl error = mpt_dma_tag_create(mpt, mpt->parent_dmat, 2350147883Sscottl /*alignment*/1, /*boundary*/0, 2351147883Sscottl /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 2352147883Sscottl /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, 2353147883Sscottl /*filterarg*/NULL, mpt->fw_image_size, 2354147883Sscottl /*nsegments*/1, /*maxsegsz*/mpt->fw_image_size, 2355147883Sscottl /*flags*/0, &mpt->fw_dmat); 2356147883Sscottl if (error != 0) { 2357147883Sscottl mpt_prt(mpt, "cannot create fw dma tag\n"); 2358147883Sscottl return (ENOMEM); 2359147883Sscottl } 2360147883Sscottl error = bus_dmamem_alloc(mpt->fw_dmat, 2361147883Sscottl (void **)&mpt->fw_image, BUS_DMA_NOWAIT, 2362147883Sscottl &mpt->fw_dmap); 2363147883Sscottl if (error != 0) { 2364147883Sscottl mpt_prt(mpt, "cannot allocate fw mem.\n"); 2365147883Sscottl bus_dma_tag_destroy(mpt->fw_dmat); 2366147883Sscottl return (ENOMEM); 2367147883Sscottl } 2368147883Sscottl mi.mpt = mpt; 2369147883Sscottl mi.error = 0; 2370147883Sscottl bus_dmamap_load(mpt->fw_dmat, mpt->fw_dmap, 2371147883Sscottl mpt->fw_image, mpt->fw_image_size, mpt_map_rquest, 2372147883Sscottl &mi, 0); 2373147883Sscottl mpt->fw_phys = mi.phys; 2374147883Sscottl 2375147883Sscottl error = mpt_upload_fw(mpt); 2376147883Sscottl if (error != 0) { 2377147883Sscottl mpt_prt(mpt, "fw upload failed.\n"); 2378147883Sscottl bus_dmamap_unload(mpt->fw_dmat, mpt->fw_dmap); 2379147883Sscottl bus_dmamem_free(mpt->fw_dmat, mpt->fw_image, 2380147883Sscottl mpt->fw_dmap); 2381147883Sscottl bus_dma_tag_destroy(mpt->fw_dmat); 2382147883Sscottl mpt->fw_image = NULL; 2383147883Sscottl return (EIO); 2384147883Sscottl } 2385101704Smjacob } 2386101704Smjacob 2387102199Smjacob if (mpt_get_portfacts(mpt, &pfp) != MPT_OK) { 2388147883Sscottl mpt_prt(mpt, "mpt_get_portfacts failed\n"); 2389147883Sscottl needreset = 1; 2390102199Smjacob continue; 2391102199Smjacob } 2392102199Smjacob 2393147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, 2394147883Sscottl "PORTFACTS: Type %x PFlags %x IID %d MaxDev %d\n", 2395147883Sscottl pfp.PortType, pfp.ProtocolFlags, pfp.PortSCSIID, 2396147883Sscottl pfp.MaxDevices); 2397102199Smjacob 2398147883Sscottl mpt->mpt_port_type = pfp.PortType; 2399147883Sscottl mpt->mpt_proto_flags = pfp.ProtocolFlags; 2400102199Smjacob if (pfp.PortType != MPI_PORTFACTS_PORTTYPE_SCSI && 2401155521Smjacob pfp.PortType != MPI_PORTFACTS_PORTTYPE_SAS && 2402102199Smjacob pfp.PortType != MPI_PORTFACTS_PORTTYPE_FC) { 2403147883Sscottl mpt_prt(mpt, "Unsupported Port Type (%x)\n", 2404102199Smjacob pfp.PortType); 2405102199Smjacob return (ENXIO); 2406102199Smjacob } 2407102199Smjacob if (!(pfp.ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR)) { 2408147883Sscottl mpt_prt(mpt, "initiator role unsupported\n"); 2409102199Smjacob return (ENXIO); 2410102199Smjacob } 2411102199Smjacob if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_FC) { 2412102199Smjacob mpt->is_fc = 1; 2413155521Smjacob mpt->is_sas = 0; 2414155521Smjacob } else if (pfp.PortType == MPI_PORTFACTS_PORTTYPE_SAS) { 2415155521Smjacob mpt->is_fc = 0; 2416155521Smjacob mpt->is_sas = 1; 2417102199Smjacob } else { 2418102199Smjacob mpt->is_fc = 0; 2419155521Smjacob mpt->is_sas = 0; 2420102199Smjacob } 2421102199Smjacob mpt->mpt_ini_id = pfp.PortSCSIID; 2422155521Smjacob mpt->mpt_max_devices = pfp.MaxDevices; 2423102199Smjacob 2424147883Sscottl if (mpt_enable_ioc(mpt) != 0) { 2425147883Sscottl mpt_prt(mpt, "Unable to initialize IOC\n"); 2426147883Sscottl return (ENXIO); 2427102199Smjacob } 2428102199Smjacob 2429101704Smjacob /* 2430147883Sscottl * Read and set up initial configuration information 2431147883Sscottl * (IOC and SPI only for now) 2432101704Smjacob * 2433147883Sscottl * XXX Should figure out what "personalities" are 2434147883Sscottl * available and defer all initialization junk to 2435147883Sscottl * them. 2436101704Smjacob */ 2437147883Sscottl mpt_read_config_info_ioc(mpt); 2438101704Smjacob 2439155521Smjacob if (mpt->is_fc == 0 && mpt->is_sas == 0) { 2440102199Smjacob if (mpt_read_config_info_spi(mpt)) { 2441102199Smjacob return (EIO); 2442102199Smjacob } 2443102199Smjacob if (mpt_set_initial_config_spi(mpt)) { 2444102199Smjacob return (EIO); 2445102199Smjacob } 2446102199Smjacob } 2447102199Smjacob 2448101704Smjacob /* Everything worked */ 2449101704Smjacob break; 2450101704Smjacob } 2451101704Smjacob 2452101704Smjacob if (try >= MPT_MAX_TRYS) { 2453103914Smjacob mpt_prt(mpt, "failed to initialize IOC"); 2454101704Smjacob return (EIO); 2455101704Smjacob } 2456101704Smjacob 2457147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "enabling interrupts\n"); 2458101704Smjacob 2459101704Smjacob mpt_enable_ints(mpt); 2460101704Smjacob return (0); 2461101704Smjacob} 2462147883Sscottl 2463147883Sscottlstatic int 2464147883Sscottlmpt_enable_ioc(struct mpt_softc *mpt) 2465147883Sscottl{ 2466147883Sscottl uint32_t pptr; 2467147883Sscottl int val; 2468147883Sscottl 2469155521Smjacob if (mpt_send_ioc_init(mpt, MPI_WHOINIT_HOST_DRIVER) != MPT_OK) { 2470147883Sscottl mpt_prt(mpt, "mpt_send_ioc_init failed\n"); 2471147883Sscottl return (EIO); 2472147883Sscottl } 2473147883Sscottl 2474147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_send_ioc_init ok\n"); 2475147883Sscottl 2476147883Sscottl if (mpt_wait_state(mpt, MPT_DB_STATE_RUNNING) != MPT_OK) { 2477147883Sscottl mpt_prt(mpt, "IOC failed to go to run state\n"); 2478147883Sscottl return (ENXIO); 2479147883Sscottl } 2480155521Smjacob mpt_lprt(mpt, MPT_PRT_DEBUG, "IOC now at RUNSTATE\n"); 2481147883Sscottl 2482147883Sscottl /* 2483147883Sscottl * Give it reply buffers 2484147883Sscottl * 2485147883Sscottl * Do *not* exceed global credits. 2486147883Sscottl */ 2487156104Smjacob for (val = 0, pptr = mpt->reply_phys; 2488156104Smjacob (pptr + MPT_REPLY_SIZE) < (mpt->reply_phys + PAGE_SIZE); 2489147883Sscottl pptr += MPT_REPLY_SIZE) { 2490147883Sscottl mpt_free_reply(mpt, pptr); 2491147883Sscottl if (++val == mpt->mpt_global_credits - 1) 2492147883Sscottl break; 2493147883Sscottl } 2494147883Sscottl 2495147883Sscottl /* 2496147883Sscottl * Enable asynchronous event reporting 2497147883Sscottl */ 2498147883Sscottl mpt_send_event_request(mpt, 1); 2499147883Sscottl 2500147883Sscottl /* 2501155521Smjacob * Enable the port 2502147883Sscottl */ 2503147883Sscottl if (mpt_send_port_enable(mpt, 0) != MPT_OK) { 2504147883Sscottl mpt_prt(mpt, "failed to enable port 0\n"); 2505147883Sscottl return (ENXIO); 2506147883Sscottl } 2507147883Sscottl mpt_lprt(mpt, MPT_PRT_DEBUG, "enabled port 0\n"); 2508147883Sscottl 2509155521Smjacob 2510156104Smjacob return (MPT_OK); 2511147883Sscottl} 2512