isp_pci.c revision 316084
1/*- 2 * Copyright (c) 1997-2008 by Matthew Jacob 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. The name of the author may not be used to endorse or promote products 12 * derived from this software without specific prior written permission. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 18 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 */ 26/* 27 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 28 * FreeBSD Version. 29 */ 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/11/sys/dev/isp/isp_pci.c 316084 2017-03-28 10:11:00Z mav $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/kernel.h> 36#include <sys/module.h> 37#include <sys/linker.h> 38#include <sys/firmware.h> 39#include <sys/bus.h> 40#include <sys/stdint.h> 41#include <dev/pci/pcireg.h> 42#include <dev/pci/pcivar.h> 43#include <machine/bus.h> 44#include <machine/resource.h> 45#include <sys/rman.h> 46#include <sys/malloc.h> 47#include <sys/uio.h> 48 49#ifdef __sparc64__ 50#include <dev/ofw/openfirm.h> 51#include <machine/ofw_machdep.h> 52#endif 53 54#include <dev/isp/isp_freebsd.h> 55 56static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 57static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 58static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 59static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 60static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 61static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 62static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int); 63static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t); 64static int isp_pci_rd_isr(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 65static int isp_pci_rd_isr_2300(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 66static int isp_pci_rd_isr_2400(ispsoftc_t *, uint16_t *, uint16_t *, uint16_t *); 67static int isp_pci_mbxdma(ispsoftc_t *); 68static void isp_pci_mbxdmafree(ispsoftc_t *); 69static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *); 70static int isp_pci_irqsetup(ispsoftc_t *); 71static void isp_pci_dumpregs(ispsoftc_t *, const char *); 72 73static struct ispmdvec mdvec = { 74 isp_pci_rd_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_common_dmateardown, 80 isp_pci_irqsetup, 81 isp_pci_dumpregs, 82 NULL, 83 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 84}; 85 86static struct ispmdvec mdvec_1080 = { 87 isp_pci_rd_isr, 88 isp_pci_rd_reg_1080, 89 isp_pci_wr_reg_1080, 90 isp_pci_mbxdma, 91 isp_pci_dmasetup, 92 isp_common_dmateardown, 93 isp_pci_irqsetup, 94 isp_pci_dumpregs, 95 NULL, 96 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 97}; 98 99static struct ispmdvec mdvec_12160 = { 100 isp_pci_rd_isr, 101 isp_pci_rd_reg_1080, 102 isp_pci_wr_reg_1080, 103 isp_pci_mbxdma, 104 isp_pci_dmasetup, 105 isp_common_dmateardown, 106 isp_pci_irqsetup, 107 isp_pci_dumpregs, 108 NULL, 109 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 110}; 111 112static struct ispmdvec mdvec_2100 = { 113 isp_pci_rd_isr, 114 isp_pci_rd_reg, 115 isp_pci_wr_reg, 116 isp_pci_mbxdma, 117 isp_pci_dmasetup, 118 isp_common_dmateardown, 119 isp_pci_irqsetup, 120 isp_pci_dumpregs 121}; 122 123static struct ispmdvec mdvec_2200 = { 124 isp_pci_rd_isr, 125 isp_pci_rd_reg, 126 isp_pci_wr_reg, 127 isp_pci_mbxdma, 128 isp_pci_dmasetup, 129 isp_common_dmateardown, 130 isp_pci_irqsetup, 131 isp_pci_dumpregs 132}; 133 134static struct ispmdvec mdvec_2300 = { 135 isp_pci_rd_isr_2300, 136 isp_pci_rd_reg, 137 isp_pci_wr_reg, 138 isp_pci_mbxdma, 139 isp_pci_dmasetup, 140 isp_common_dmateardown, 141 isp_pci_irqsetup, 142 isp_pci_dumpregs 143}; 144 145static struct ispmdvec mdvec_2400 = { 146 isp_pci_rd_isr_2400, 147 isp_pci_rd_reg_2400, 148 isp_pci_wr_reg_2400, 149 isp_pci_mbxdma, 150 isp_pci_dmasetup, 151 isp_common_dmateardown, 152 isp_pci_irqsetup, 153 NULL 154}; 155 156static struct ispmdvec mdvec_2500 = { 157 isp_pci_rd_isr_2400, 158 isp_pci_rd_reg_2400, 159 isp_pci_wr_reg_2400, 160 isp_pci_mbxdma, 161 isp_pci_dmasetup, 162 isp_common_dmateardown, 163 isp_pci_irqsetup, 164 NULL 165}; 166 167static struct ispmdvec mdvec_2600 = { 168 isp_pci_rd_isr_2400, 169 isp_pci_rd_reg_2600, 170 isp_pci_wr_reg_2600, 171 isp_pci_mbxdma, 172 isp_pci_dmasetup, 173 isp_common_dmateardown, 174 isp_pci_irqsetup, 175 NULL 176}; 177 178#ifndef PCIM_CMD_INVEN 179#define PCIM_CMD_INVEN 0x10 180#endif 181#ifndef PCIM_CMD_BUSMASTEREN 182#define PCIM_CMD_BUSMASTEREN 0x0004 183#endif 184#ifndef PCIM_CMD_PERRESPEN 185#define PCIM_CMD_PERRESPEN 0x0040 186#endif 187#ifndef PCIM_CMD_SEREN 188#define PCIM_CMD_SEREN 0x0100 189#endif 190#ifndef PCIM_CMD_INTX_DISABLE 191#define PCIM_CMD_INTX_DISABLE 0x0400 192#endif 193 194#ifndef PCIR_COMMAND 195#define PCIR_COMMAND 0x04 196#endif 197 198#ifndef PCIR_CACHELNSZ 199#define PCIR_CACHELNSZ 0x0c 200#endif 201 202#ifndef PCIR_LATTIMER 203#define PCIR_LATTIMER 0x0d 204#endif 205 206#ifndef PCIR_ROMADDR 207#define PCIR_ROMADDR 0x30 208#endif 209 210#ifndef PCI_VENDOR_QLOGIC 211#define PCI_VENDOR_QLOGIC 0x1077 212#endif 213 214#ifndef PCI_PRODUCT_QLOGIC_ISP1020 215#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 216#endif 217 218#ifndef PCI_PRODUCT_QLOGIC_ISP1080 219#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 220#endif 221 222#ifndef PCI_PRODUCT_QLOGIC_ISP10160 223#define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 224#endif 225 226#ifndef PCI_PRODUCT_QLOGIC_ISP12160 227#define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 228#endif 229 230#ifndef PCI_PRODUCT_QLOGIC_ISP1240 231#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 232#endif 233 234#ifndef PCI_PRODUCT_QLOGIC_ISP1280 235#define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 236#endif 237 238#ifndef PCI_PRODUCT_QLOGIC_ISP2100 239#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 240#endif 241 242#ifndef PCI_PRODUCT_QLOGIC_ISP2200 243#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 244#endif 245 246#ifndef PCI_PRODUCT_QLOGIC_ISP2300 247#define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 248#endif 249 250#ifndef PCI_PRODUCT_QLOGIC_ISP2312 251#define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 252#endif 253 254#ifndef PCI_PRODUCT_QLOGIC_ISP2322 255#define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 256#endif 257 258#ifndef PCI_PRODUCT_QLOGIC_ISP2422 259#define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 260#endif 261 262#ifndef PCI_PRODUCT_QLOGIC_ISP2432 263#define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 264#endif 265 266#ifndef PCI_PRODUCT_QLOGIC_ISP2532 267#define PCI_PRODUCT_QLOGIC_ISP2532 0x2532 268#endif 269 270#ifndef PCI_PRODUCT_QLOGIC_ISP6312 271#define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 272#endif 273 274#ifndef PCI_PRODUCT_QLOGIC_ISP6322 275#define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 276#endif 277 278#ifndef PCI_PRODUCT_QLOGIC_ISP5432 279#define PCI_PRODUCT_QLOGIC_ISP5432 0x5432 280#endif 281 282#ifndef PCI_PRODUCT_QLOGIC_ISP2031 283#define PCI_PRODUCT_QLOGIC_ISP2031 0x2031 284#endif 285 286#ifndef PCI_PRODUCT_QLOGIC_ISP8031 287#define PCI_PRODUCT_QLOGIC_ISP8031 0x8031 288#endif 289 290#define PCI_QLOGIC_ISP5432 \ 291 ((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC) 292 293#define PCI_QLOGIC_ISP1020 \ 294 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 295 296#define PCI_QLOGIC_ISP1080 \ 297 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 298 299#define PCI_QLOGIC_ISP10160 \ 300 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 301 302#define PCI_QLOGIC_ISP12160 \ 303 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 304 305#define PCI_QLOGIC_ISP1240 \ 306 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 307 308#define PCI_QLOGIC_ISP1280 \ 309 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 310 311#define PCI_QLOGIC_ISP2100 \ 312 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 313 314#define PCI_QLOGIC_ISP2200 \ 315 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 316 317#define PCI_QLOGIC_ISP2300 \ 318 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 319 320#define PCI_QLOGIC_ISP2312 \ 321 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 322 323#define PCI_QLOGIC_ISP2322 \ 324 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 325 326#define PCI_QLOGIC_ISP2422 \ 327 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 328 329#define PCI_QLOGIC_ISP2432 \ 330 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 331 332#define PCI_QLOGIC_ISP2532 \ 333 ((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC) 334 335#define PCI_QLOGIC_ISP6312 \ 336 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 337 338#define PCI_QLOGIC_ISP6322 \ 339 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 340 341#define PCI_QLOGIC_ISP2031 \ 342 ((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC) 343 344#define PCI_QLOGIC_ISP8031 \ 345 ((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC) 346 347/* 348 * Odd case for some AMI raid cards... We need to *not* attach to this. 349 */ 350#define AMI_RAID_SUBVENDOR_ID 0x101e 351 352#define PCI_DFLT_LTNCY 0x40 353#define PCI_DFLT_LNSZ 0x10 354 355static int isp_pci_probe (device_t); 356static int isp_pci_attach (device_t); 357static int isp_pci_detach (device_t); 358 359 360#define ISP_PCD(isp) ((struct isp_pcisoftc *)isp)->pci_dev 361struct isp_pcisoftc { 362 ispsoftc_t pci_isp; 363 device_t pci_dev; 364 struct resource * regs; 365 struct resource * regs1; 366 struct resource * regs2; 367 void * irq; 368 int iqd; 369 int rtp; 370 int rgd; 371 int rtp1; 372 int rgd1; 373 int rtp2; 374 int rgd2; 375 void * ih; 376 int16_t pci_poff[_NREG_BLKS]; 377 bus_dma_tag_t dmat; 378 int msicount; 379}; 380 381 382static device_method_t isp_pci_methods[] = { 383 /* Device interface */ 384 DEVMETHOD(device_probe, isp_pci_probe), 385 DEVMETHOD(device_attach, isp_pci_attach), 386 DEVMETHOD(device_detach, isp_pci_detach), 387 { 0, 0 } 388}; 389 390static driver_t isp_pci_driver = { 391 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 392}; 393static devclass_t isp_devclass; 394DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 395MODULE_DEPEND(isp, cam, 1, 1, 1); 396MODULE_DEPEND(isp, firmware, 1, 1, 1); 397static int isp_nvports = 0; 398 399static int 400isp_pci_probe(device_t dev) 401{ 402 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 403 case PCI_QLOGIC_ISP1020: 404 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 405 break; 406 case PCI_QLOGIC_ISP1080: 407 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 408 break; 409 case PCI_QLOGIC_ISP1240: 410 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 411 break; 412 case PCI_QLOGIC_ISP1280: 413 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 414 break; 415 case PCI_QLOGIC_ISP10160: 416 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 417 break; 418 case PCI_QLOGIC_ISP12160: 419 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 420 return (ENXIO); 421 } 422 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 423 break; 424 case PCI_QLOGIC_ISP2100: 425 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 426 break; 427 case PCI_QLOGIC_ISP2200: 428 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 429 break; 430 case PCI_QLOGIC_ISP2300: 431 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 432 break; 433 case PCI_QLOGIC_ISP2312: 434 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 435 break; 436 case PCI_QLOGIC_ISP2322: 437 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 438 break; 439 case PCI_QLOGIC_ISP2422: 440 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 441 break; 442 case PCI_QLOGIC_ISP2432: 443 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 444 break; 445 case PCI_QLOGIC_ISP2532: 446 device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter"); 447 break; 448 case PCI_QLOGIC_ISP5432: 449 device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter"); 450 break; 451 case PCI_QLOGIC_ISP6312: 452 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 453 break; 454 case PCI_QLOGIC_ISP6322: 455 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 456 break; 457 case PCI_QLOGIC_ISP2031: 458 device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter"); 459 break; 460 case PCI_QLOGIC_ISP8031: 461 device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter"); 462 break; 463 default: 464 return (ENXIO); 465 } 466 if (isp_announced == 0 && bootverbose) { 467 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 468 "Core Version %d.%d\n", 469 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 470 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 471 isp_announced++; 472 } 473 /* 474 * XXXX: Here is where we might load the f/w module 475 * XXXX: (or increase a reference count to it). 476 */ 477 return (BUS_PROBE_DEFAULT); 478} 479 480static void 481isp_get_generic_options(device_t dev, ispsoftc_t *isp) 482{ 483 int tval; 484 485 tval = 0; 486 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) { 487 isp->isp_confopts |= ISP_CFG_NORELOAD; 488 } 489 tval = 0; 490 if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) { 491 isp->isp_confopts |= ISP_CFG_NONVRAM; 492 } 493 tval = 0; 494 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval); 495 if (tval) { 496 isp->isp_dblev = tval; 497 } else { 498 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 499 } 500 if (bootverbose) { 501 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 502 } 503 tval = -1; 504 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval); 505 if (tval > 0 && tval <= 254) { 506 isp_nvports = tval; 507 } 508 tval = 7; 509 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval); 510 isp_quickboot_time = tval; 511} 512 513static void 514isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp) 515{ 516 const char *sptr; 517 int tval = 0; 518 char prefix[12], name[16]; 519 520 if (chan == 0) 521 prefix[0] = 0; 522 else 523 snprintf(prefix, sizeof(prefix), "chan%d.", chan); 524 snprintf(name, sizeof(name), "%siid", prefix); 525 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 526 name, &tval)) { 527 if (IS_FC(isp)) { 528 ISP_FC_PC(isp, chan)->default_id = 109 - chan; 529 } else { 530#ifdef __sparc64__ 531 ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev); 532#else 533 ISP_SPI_PC(isp, chan)->iid = 7; 534#endif 535 } 536 } else { 537 if (IS_FC(isp)) { 538 ISP_FC_PC(isp, chan)->default_id = tval - chan; 539 } else { 540 ISP_SPI_PC(isp, chan)->iid = tval; 541 } 542 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 543 } 544 545 if (IS_SCSI(isp)) 546 return; 547 548 tval = -1; 549 snprintf(name, sizeof(name), "%srole", prefix); 550 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 551 name, &tval) == 0) { 552 switch (tval) { 553 case ISP_ROLE_NONE: 554 case ISP_ROLE_INITIATOR: 555 case ISP_ROLE_TARGET: 556 case ISP_ROLE_BOTH: 557 device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval); 558 break; 559 default: 560 tval = -1; 561 break; 562 } 563 } 564 if (tval == -1) { 565 tval = ISP_DEFAULT_ROLES; 566 } 567 ISP_FC_PC(isp, chan)->def_role = tval; 568 569 tval = 0; 570 snprintf(name, sizeof(name), "%sfullduplex", prefix); 571 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 572 name, &tval) == 0 && tval != 0) { 573 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 574 } 575 sptr = NULL; 576 snprintf(name, sizeof(name), "%stopology", prefix); 577 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 578 name, (const char **) &sptr) == 0 && sptr != NULL) { 579 if (strcmp(sptr, "lport") == 0) { 580 isp->isp_confopts |= ISP_CFG_LPORT; 581 } else if (strcmp(sptr, "nport") == 0) { 582 isp->isp_confopts |= ISP_CFG_NPORT; 583 } else if (strcmp(sptr, "lport-only") == 0) { 584 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 585 } else if (strcmp(sptr, "nport-only") == 0) { 586 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 587 } 588 } 589 590#ifdef ISP_FCTAPE_OFF 591 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 592#else 593 isp->isp_confopts |= ISP_CFG_FCTAPE; 594#endif 595 596 tval = 0; 597 snprintf(name, sizeof(name), "%snofctape", prefix); 598 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 599 name, &tval); 600 if (tval) { 601 isp->isp_confopts &= ~ISP_CFG_FCTAPE; 602 isp->isp_confopts |= ISP_CFG_NOFCTAPE; 603 } 604 605 tval = 0; 606 snprintf(name, sizeof(name), "%sfctape", prefix); 607 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 608 name, &tval); 609 if (tval) { 610 isp->isp_confopts &= ~ISP_CFG_NOFCTAPE; 611 isp->isp_confopts |= ISP_CFG_FCTAPE; 612 } 613 614 615 /* 616 * Because the resource_*_value functions can neither return 617 * 64 bit integer values, nor can they be directly coerced 618 * to interpret the right hand side of the assignment as 619 * you want them to interpret it, we have to force WWN 620 * hint replacement to specify WWN strings with a leading 621 * 'w' (e..g w50000000aaaa0001). Sigh. 622 */ 623 sptr = NULL; 624 snprintf(name, sizeof(name), "%sportwwn", prefix); 625 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 626 name, (const char **) &sptr); 627 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 628 char *eptr = NULL; 629 ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16); 630 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) { 631 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 632 ISP_FC_PC(isp, chan)->def_wwpn = 0; 633 } 634 } 635 636 sptr = NULL; 637 snprintf(name, sizeof(name), "%snodewwn", prefix); 638 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 639 name, (const char **) &sptr); 640 if (tval == 0 && sptr != NULL && *sptr++ == 'w') { 641 char *eptr = NULL; 642 ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16); 643 if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) { 644 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 645 ISP_FC_PC(isp, chan)->def_wwnn = 0; 646 } 647 } 648 649 tval = -1; 650 snprintf(name, sizeof(name), "%sloop_down_limit", prefix); 651 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 652 name, &tval); 653 if (tval >= 0 && tval < 0xffff) { 654 ISP_FC_PC(isp, chan)->loop_down_limit = tval; 655 } else { 656 ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit; 657 } 658 659 tval = -1; 660 snprintf(name, sizeof(name), "%sgone_device_time", prefix); 661 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 662 name, &tval); 663 if (tval >= 0 && tval < 0xffff) { 664 ISP_FC_PC(isp, chan)->gone_device_time = tval; 665 } else { 666 ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time; 667 } 668} 669 670static int 671isp_pci_attach(device_t dev) 672{ 673 struct isp_pcisoftc *pcs = device_get_softc(dev); 674 ispsoftc_t *isp = &pcs->pci_isp; 675 int i; 676 uint32_t data, cmd, linesz, did; 677 size_t psize, xsize; 678 char fwname[32]; 679 680 pcs->pci_dev = dev; 681 isp->isp_dev = dev; 682 isp->isp_nchan = 1; 683 if (sizeof (bus_addr_t) > 4) 684 isp->isp_osinfo.sixtyfourbit = 1; 685 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 686 687 /* 688 * Get Generic Options 689 */ 690 isp_nvports = 0; 691 isp_get_generic_options(dev, isp); 692 693 linesz = PCI_DFLT_LNSZ; 694 pcs->irq = pcs->regs = pcs->regs2 = NULL; 695 pcs->rgd = pcs->rtp = pcs->iqd = 0; 696 697 pcs->pci_dev = dev; 698 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 699 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 700 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 701 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 702 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 703 704 switch (pci_get_devid(dev)) { 705 case PCI_QLOGIC_ISP1020: 706 did = 0x1040; 707 isp->isp_mdvec = &mdvec; 708 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 709 break; 710 case PCI_QLOGIC_ISP1080: 711 did = 0x1080; 712 isp->isp_mdvec = &mdvec_1080; 713 isp->isp_type = ISP_HA_SCSI_1080; 714 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 715 break; 716 case PCI_QLOGIC_ISP1240: 717 did = 0x1080; 718 isp->isp_mdvec = &mdvec_1080; 719 isp->isp_type = ISP_HA_SCSI_1240; 720 isp->isp_nchan = 2; 721 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 722 break; 723 case PCI_QLOGIC_ISP1280: 724 did = 0x1080; 725 isp->isp_mdvec = &mdvec_1080; 726 isp->isp_type = ISP_HA_SCSI_1280; 727 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 728 break; 729 case PCI_QLOGIC_ISP10160: 730 did = 0x12160; 731 isp->isp_mdvec = &mdvec_12160; 732 isp->isp_type = ISP_HA_SCSI_10160; 733 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 734 break; 735 case PCI_QLOGIC_ISP12160: 736 did = 0x12160; 737 isp->isp_nchan = 2; 738 isp->isp_mdvec = &mdvec_12160; 739 isp->isp_type = ISP_HA_SCSI_12160; 740 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF; 741 break; 742 case PCI_QLOGIC_ISP2100: 743 did = 0x2100; 744 isp->isp_mdvec = &mdvec_2100; 745 isp->isp_type = ISP_HA_FC_2100; 746 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 747 if (pci_get_revid(dev) < 3) { 748 /* 749 * XXX: Need to get the actual revision 750 * XXX: number of the 2100 FB. At any rate, 751 * XXX: lower cache line size for early revision 752 * XXX; boards. 753 */ 754 linesz = 1; 755 } 756 break; 757 case PCI_QLOGIC_ISP2200: 758 did = 0x2200; 759 isp->isp_mdvec = &mdvec_2200; 760 isp->isp_type = ISP_HA_FC_2200; 761 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF; 762 break; 763 case PCI_QLOGIC_ISP2300: 764 did = 0x2300; 765 isp->isp_mdvec = &mdvec_2300; 766 isp->isp_type = ISP_HA_FC_2300; 767 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 768 break; 769 case PCI_QLOGIC_ISP2312: 770 case PCI_QLOGIC_ISP6312: 771 did = 0x2300; 772 isp->isp_mdvec = &mdvec_2300; 773 isp->isp_type = ISP_HA_FC_2312; 774 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 775 break; 776 case PCI_QLOGIC_ISP2322: 777 case PCI_QLOGIC_ISP6322: 778 did = 0x2322; 779 isp->isp_mdvec = &mdvec_2300; 780 isp->isp_type = ISP_HA_FC_2322; 781 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF; 782 break; 783 case PCI_QLOGIC_ISP2422: 784 case PCI_QLOGIC_ISP2432: 785 did = 0x2400; 786 isp->isp_nchan += isp_nvports; 787 isp->isp_mdvec = &mdvec_2400; 788 isp->isp_type = ISP_HA_FC_2400; 789 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 790 break; 791 case PCI_QLOGIC_ISP2532: 792 did = 0x2500; 793 isp->isp_nchan += isp_nvports; 794 isp->isp_mdvec = &mdvec_2500; 795 isp->isp_type = ISP_HA_FC_2500; 796 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 797 break; 798 case PCI_QLOGIC_ISP5432: 799 did = 0x2500; 800 isp->isp_mdvec = &mdvec_2500; 801 isp->isp_type = ISP_HA_FC_2500; 802 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 803 break; 804 case PCI_QLOGIC_ISP2031: 805 case PCI_QLOGIC_ISP8031: 806 did = 0x2600; 807 isp->isp_nchan += isp_nvports; 808 isp->isp_mdvec = &mdvec_2600; 809 isp->isp_type = ISP_HA_FC_2600; 810 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF; 811 break; 812 default: 813 device_printf(dev, "unknown device type\n"); 814 goto bad; 815 break; 816 } 817 isp->isp_revision = pci_get_revid(dev); 818 819 if (IS_26XX(isp)) { 820 pcs->rtp = SYS_RES_MEMORY; 821 pcs->rgd = PCIR_BAR(0); 822 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 823 RF_ACTIVE); 824 pcs->rtp1 = SYS_RES_MEMORY; 825 pcs->rgd1 = PCIR_BAR(2); 826 pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1, 827 RF_ACTIVE); 828 pcs->rtp2 = SYS_RES_MEMORY; 829 pcs->rgd2 = PCIR_BAR(4); 830 pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2, 831 RF_ACTIVE); 832 } else { 833 pcs->rtp = SYS_RES_MEMORY; 834 pcs->rgd = PCIR_BAR(1); 835 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd, 836 RF_ACTIVE); 837 if (pcs->regs == NULL) { 838 pcs->rtp = SYS_RES_IOPORT; 839 pcs->rgd = PCIR_BAR(0); 840 pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, 841 &pcs->rgd, RF_ACTIVE); 842 } 843 } 844 if (pcs->regs == NULL) { 845 device_printf(dev, "Unable to map any ports\n"); 846 goto bad; 847 } 848 if (bootverbose) { 849 device_printf(dev, "Using %s space register mapping\n", 850 (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory"); 851 } 852 isp->isp_regs = pcs->regs; 853 isp->isp_regs2 = pcs->regs2; 854 855 if (IS_FC(isp)) { 856 psize = sizeof (fcparam); 857 xsize = sizeof (struct isp_fc); 858 } else { 859 psize = sizeof (sdparam); 860 xsize = sizeof (struct isp_spi); 861 } 862 psize *= isp->isp_nchan; 863 xsize *= isp->isp_nchan; 864 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 865 if (isp->isp_param == NULL) { 866 device_printf(dev, "cannot allocate parameter data\n"); 867 goto bad; 868 } 869 isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO); 870 if (isp->isp_osinfo.pc.ptr == NULL) { 871 device_printf(dev, "cannot allocate parameter data\n"); 872 goto bad; 873 } 874 875 /* 876 * Now that we know who we are (roughly) get/set specific options 877 */ 878 for (i = 0; i < isp->isp_nchan; i++) { 879 isp_get_specific_options(dev, i, isp); 880 } 881 882 isp->isp_osinfo.fw = NULL; 883 if (isp->isp_osinfo.fw == NULL) { 884 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 885 isp->isp_osinfo.fw = firmware_get(fwname); 886 } 887 if (isp->isp_osinfo.fw != NULL) { 888 isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname); 889 isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data; 890 } 891 892 /* 893 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set. 894 */ 895 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 896 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 897 if (IS_2300(isp)) { /* per QLogic errata */ 898 cmd &= ~PCIM_CMD_INVEN; 899 } 900 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 901 cmd &= ~PCIM_CMD_INTX_DISABLE; 902 } 903 if (IS_24XX(isp)) { 904 cmd &= ~PCIM_CMD_INTX_DISABLE; 905 } 906 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 907 908 /* 909 * Make sure the Cache Line Size register is set sensibly. 910 */ 911 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 912 if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) { 913 isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data); 914 data = linesz; 915 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 916 } 917 918 /* 919 * Make sure the Latency Timer is sane. 920 */ 921 data = pci_read_config(dev, PCIR_LATTIMER, 1); 922 if (data < PCI_DFLT_LTNCY) { 923 data = PCI_DFLT_LTNCY; 924 isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data); 925 pci_write_config(dev, PCIR_LATTIMER, data, 1); 926 } 927 928 /* 929 * Make sure we've disabled the ROM. 930 */ 931 data = pci_read_config(dev, PCIR_ROMADDR, 4); 932 data &= ~1; 933 pci_write_config(dev, PCIR_ROMADDR, data, 4); 934 935 if (IS_26XX(isp)) { 936 /* 26XX chips support only MSI-X, so start from them. */ 937 pcs->msicount = imin(pci_msix_count(dev), 1); 938 if (pcs->msicount > 0 && 939 (i = pci_alloc_msix(dev, &pcs->msicount)) == 0) { 940 pcs->iqd = 1; 941 } else { 942 pcs->msicount = 0; 943 } 944 } 945 if (pcs->msicount == 0 && (IS_24XX(isp) || IS_2322(isp))) { 946 /* 947 * Older chips support both MSI and MSI-X, but I have 948 * feeling that older firmware may not support MSI-X, 949 * but we have no way to check the firmware flag here. 950 */ 951 pcs->msicount = imin(pci_msi_count(dev), 1); 952 if (pcs->msicount > 0 && 953 pci_alloc_msi(dev, &pcs->msicount) == 0) { 954 pcs->iqd = 1; 955 } else { 956 pcs->msicount = 0; 957 } 958 } 959 pcs->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &pcs->iqd, RF_ACTIVE | RF_SHAREABLE); 960 if (pcs->irq == NULL) { 961 device_printf(dev, "could not allocate interrupt\n"); 962 goto bad; 963 } 964 965 if (isp_setup_intr(dev, pcs->irq, ISP_IFLAGS, NULL, isp_platform_intr, isp, &pcs->ih)) { 966 device_printf(dev, "could not setup interrupt\n"); 967 goto bad; 968 } 969 970 /* 971 * Last minute checks... 972 */ 973 if (IS_23XX(isp) || IS_24XX(isp)) { 974 isp->isp_port = pci_get_function(dev); 975 } 976 977 /* 978 * Make sure we're in reset state. 979 */ 980 ISP_LOCK(isp); 981 if (isp_reinit(isp, 1) != 0) { 982 ISP_UNLOCK(isp); 983 goto bad; 984 } 985 ISP_UNLOCK(isp); 986 if (isp_attach(isp)) { 987 ISP_LOCK(isp); 988 isp_shutdown(isp); 989 ISP_UNLOCK(isp); 990 goto bad; 991 } 992 return (0); 993 994bad: 995 if (pcs->ih) { 996 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 997 } 998 if (pcs->irq) { 999 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1000 } 1001 if (pcs->msicount) { 1002 pci_release_msi(dev); 1003 } 1004 if (pcs->regs) 1005 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1006 if (pcs->regs1) 1007 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 1008 if (pcs->regs2) 1009 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 1010 if (pcs->pci_isp.isp_param) { 1011 free(pcs->pci_isp.isp_param, M_DEVBUF); 1012 pcs->pci_isp.isp_param = NULL; 1013 } 1014 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1015 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1016 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1017 } 1018 mtx_destroy(&isp->isp_osinfo.lock); 1019 return (ENXIO); 1020} 1021 1022static int 1023isp_pci_detach(device_t dev) 1024{ 1025 struct isp_pcisoftc *pcs = device_get_softc(dev); 1026 ispsoftc_t *isp = &pcs->pci_isp; 1027 int status; 1028 1029 status = isp_detach(isp); 1030 if (status) 1031 return (status); 1032 ISP_LOCK(isp); 1033 isp_shutdown(isp); 1034 ISP_UNLOCK(isp); 1035 if (pcs->ih) 1036 (void) bus_teardown_intr(dev, pcs->irq, pcs->ih); 1037 (void) bus_release_resource(dev, SYS_RES_IRQ, pcs->iqd, pcs->irq); 1038 if (pcs->msicount) 1039 pci_release_msi(dev); 1040 (void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs); 1041 if (pcs->regs1) 1042 (void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1); 1043 if (pcs->regs2) 1044 (void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2); 1045 isp_pci_mbxdmafree(isp); 1046 if (pcs->pci_isp.isp_param) { 1047 free(pcs->pci_isp.isp_param, M_DEVBUF); 1048 pcs->pci_isp.isp_param = NULL; 1049 } 1050 if (pcs->pci_isp.isp_osinfo.pc.ptr) { 1051 free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF); 1052 pcs->pci_isp.isp_osinfo.pc.ptr = NULL; 1053 } 1054 mtx_destroy(&isp->isp_osinfo.lock); 1055 return (0); 1056} 1057 1058#define IspVirt2Off(a, x) \ 1059 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1060 _BLK_REG_SHFT] + ((x) & 0xfff)) 1061 1062#define BXR2(isp, off) bus_read_2((isp)->isp_regs, (off)) 1063#define BXW2(isp, off, v) bus_write_2((isp)->isp_regs, (off), (v)) 1064#define BXR4(isp, off) bus_read_4((isp)->isp_regs, (off)) 1065#define BXW4(isp, off, v) bus_write_4((isp)->isp_regs, (off), (v)) 1066#define B2R4(isp, off) bus_read_4((isp)->isp_regs2, (off)) 1067#define B2W4(isp, off, v) bus_write_4((isp)->isp_regs2, (off), (v)) 1068 1069static ISP_INLINE int 1070isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1071{ 1072 uint32_t val0, val1; 1073 int i = 0; 1074 1075 do { 1076 val0 = BXR2(isp, IspVirt2Off(isp, off)); 1077 val1 = BXR2(isp, IspVirt2Off(isp, off)); 1078 } while (val0 != val1 && ++i < 1000); 1079 if (val0 != val1) { 1080 return (1); 1081 } 1082 *rp = val0; 1083 return (0); 1084} 1085 1086static int 1087isp_pci_rd_isr(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1088{ 1089 uint16_t isr, sema; 1090 1091 if (IS_2100(isp)) { 1092 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1093 return (0); 1094 } 1095 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1096 return (0); 1097 } 1098 } else { 1099 isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR)); 1100 sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA)); 1101 } 1102 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1103 isr &= INT_PENDING_MASK(isp); 1104 sema &= BIU_SEMA_LOCK; 1105 if (isr == 0 && sema == 0) { 1106 return (0); 1107 } 1108 *isrp = isr; 1109 if ((*semap = sema) != 0) { 1110 if (IS_2100(isp)) { 1111 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, info)) { 1112 return (0); 1113 } 1114 } else { 1115 *info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0)); 1116 } 1117 } 1118 return (1); 1119} 1120 1121static int 1122isp_pci_rd_isr_2300(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1123{ 1124 uint32_t hccr, r2hisr; 1125 1126 if (!(BXR2(isp, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1127 *isrp = 0; 1128 return (0); 1129 } 1130 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO)); 1131 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1132 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1133 *isrp = 0; 1134 return (0); 1135 } 1136 switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { 1137 case ISPR2HST_ROM_MBX_OK: 1138 case ISPR2HST_ROM_MBX_FAIL: 1139 case ISPR2HST_MBX_OK: 1140 case ISPR2HST_MBX_FAIL: 1141 case ISPR2HST_ASYNC_EVENT: 1142 *semap = 1; 1143 break; 1144 case ISPR2HST_RIO_16: 1145 *info = ASYNC_RIO16_1; 1146 *semap = 1; 1147 return (1); 1148 case ISPR2HST_FPOST: 1149 *info = ASYNC_CMD_CMPLT; 1150 *semap = 1; 1151 return (1); 1152 case ISPR2HST_FPOST_CTIO: 1153 *info = ASYNC_CTIO_DONE; 1154 *semap = 1; 1155 return (1); 1156 case ISPR2HST_RSPQ_UPDATE: 1157 *semap = 0; 1158 break; 1159 default: 1160 hccr = ISP_READ(isp, HCCR); 1161 if (hccr & HCCR_PAUSE) { 1162 ISP_WRITE(isp, HCCR, HCCR_RESET); 1163 isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR)); 1164 ISP_WRITE(isp, BIU_ICR, 0); 1165 } else { 1166 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1167 } 1168 return (0); 1169 } 1170 *info = (r2hisr >> 16); 1171 return (1); 1172} 1173 1174static int 1175isp_pci_rd_isr_2400(ispsoftc_t *isp, uint16_t *isrp, uint16_t *semap, uint16_t *info) 1176{ 1177 uint32_t r2hisr; 1178 1179 r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO)); 1180 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1181 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1182 *isrp = 0; 1183 return (0); 1184 } 1185 switch ((*isrp = r2hisr & BIU_R2HST_ISTAT_MASK)) { 1186 case ISPR2HST_ROM_MBX_OK: 1187 case ISPR2HST_ROM_MBX_FAIL: 1188 case ISPR2HST_MBX_OK: 1189 case ISPR2HST_MBX_FAIL: 1190 case ISPR2HST_ASYNC_EVENT: 1191 *semap = 1; 1192 break; 1193 case ISPR2HST_RSPQ_UPDATE: 1194 case ISPR2HST_RSPQ_UPDATE2: 1195 case ISPR2HST_ATIO_UPDATE: 1196 case ISPR2HST_ATIO_RSPQ_UPDATE: 1197 case ISPR2HST_ATIO_UPDATE2: 1198 *semap = 0; 1199 break; 1200 default: 1201 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1202 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1203 return (0); 1204 } 1205 *info = (r2hisr >> 16); 1206 return (1); 1207} 1208 1209static uint32_t 1210isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1211{ 1212 uint16_t rv; 1213 int oldconf = 0; 1214 1215 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1216 /* 1217 * We will assume that someone has paused the RISC processor. 1218 */ 1219 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1220 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP); 1221 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1222 } 1223 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1224 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1225 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1226 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1227 } 1228 return (rv); 1229} 1230 1231static void 1232isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1233{ 1234 int oldconf = 0; 1235 1236 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1237 /* 1238 * We will assume that someone has paused the RISC processor. 1239 */ 1240 oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1241 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1242 oldconf | BIU_PCI_CONF1_SXP); 1243 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1244 } 1245 BXW2(isp, IspVirt2Off(isp, regoff), val); 1246 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1247 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1248 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf); 1249 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1250 } 1251 1252} 1253 1254static uint32_t 1255isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1256{ 1257 uint32_t rv, oc = 0; 1258 1259 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1260 uint32_t tc; 1261 /* 1262 * We will assume that someone has paused the RISC processor. 1263 */ 1264 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1265 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1266 if (regoff & SXP_BANK1_SELECT) 1267 tc |= BIU_PCI1080_CONF1_SXP1; 1268 else 1269 tc |= BIU_PCI1080_CONF1_SXP0; 1270 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1271 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1272 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1273 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1274 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1275 oc | BIU_PCI1080_CONF1_DMA); 1276 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1277 } 1278 rv = BXR2(isp, IspVirt2Off(isp, regoff)); 1279 if (oc) { 1280 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1281 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1282 } 1283 return (rv); 1284} 1285 1286static void 1287isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1288{ 1289 int oc = 0; 1290 1291 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1292 uint32_t tc; 1293 /* 1294 * We will assume that someone has paused the RISC processor. 1295 */ 1296 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1297 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1298 if (regoff & SXP_BANK1_SELECT) 1299 tc |= BIU_PCI1080_CONF1_SXP1; 1300 else 1301 tc |= BIU_PCI1080_CONF1_SXP0; 1302 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc); 1303 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1304 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1305 oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1)); 1306 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), 1307 oc | BIU_PCI1080_CONF1_DMA); 1308 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1309 } 1310 BXW2(isp, IspVirt2Off(isp, regoff), val); 1311 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1312 if (oc) { 1313 BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc); 1314 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1); 1315 } 1316} 1317 1318static uint32_t 1319isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1320{ 1321 uint32_t rv; 1322 int block = regoff & _BLK_REG_MASK; 1323 1324 switch (block) { 1325 case BIU_BLOCK: 1326 break; 1327 case MBOX_BLOCK: 1328 return (BXR2(isp, IspVirt2Off(isp, regoff))); 1329 case SXP_BLOCK: 1330 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff); 1331 return (0xffffffff); 1332 case RISC_BLOCK: 1333 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff); 1334 return (0xffffffff); 1335 case DMA_BLOCK: 1336 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff); 1337 return (0xffffffff); 1338 default: 1339 isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff); 1340 return (0xffffffff); 1341 } 1342 1343 switch (regoff) { 1344 case BIU2400_FLASH_ADDR: 1345 case BIU2400_FLASH_DATA: 1346 case BIU2400_ICR: 1347 case BIU2400_ISR: 1348 case BIU2400_CSR: 1349 case BIU2400_REQINP: 1350 case BIU2400_REQOUTP: 1351 case BIU2400_RSPINP: 1352 case BIU2400_RSPOUTP: 1353 case BIU2400_PRI_REQINP: 1354 case BIU2400_PRI_REQOUTP: 1355 case BIU2400_ATIO_RSPINP: 1356 case BIU2400_ATIO_RSPOUTP: 1357 case BIU2400_HCCR: 1358 case BIU2400_GPIOD: 1359 case BIU2400_GPIOE: 1360 case BIU2400_HSEMA: 1361 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1362 break; 1363 case BIU2400_R2HSTSLO: 1364 rv = BXR4(isp, IspVirt2Off(isp, regoff)); 1365 break; 1366 case BIU2400_R2HSTSHI: 1367 rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16; 1368 break; 1369 default: 1370 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1371 regoff); 1372 rv = 0xffffffff; 1373 break; 1374 } 1375 return (rv); 1376} 1377 1378static void 1379isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1380{ 1381 int block = regoff & _BLK_REG_MASK; 1382 1383 switch (block) { 1384 case BIU_BLOCK: 1385 break; 1386 case MBOX_BLOCK: 1387 BXW2(isp, IspVirt2Off(isp, regoff), val); 1388 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1); 1389 return; 1390 case SXP_BLOCK: 1391 isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff); 1392 return; 1393 case RISC_BLOCK: 1394 isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff); 1395 return; 1396 case DMA_BLOCK: 1397 isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff); 1398 return; 1399 default: 1400 isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff); 1401 break; 1402 } 1403 1404 switch (regoff) { 1405 case BIU2400_FLASH_ADDR: 1406 case BIU2400_FLASH_DATA: 1407 case BIU2400_ICR: 1408 case BIU2400_ISR: 1409 case BIU2400_CSR: 1410 case BIU2400_REQINP: 1411 case BIU2400_REQOUTP: 1412 case BIU2400_RSPINP: 1413 case BIU2400_RSPOUTP: 1414 case BIU2400_PRI_REQINP: 1415 case BIU2400_PRI_REQOUTP: 1416 case BIU2400_ATIO_RSPINP: 1417 case BIU2400_ATIO_RSPOUTP: 1418 case BIU2400_HCCR: 1419 case BIU2400_GPIOD: 1420 case BIU2400_GPIOE: 1421 case BIU2400_HSEMA: 1422 BXW4(isp, IspVirt2Off(isp, regoff), val); 1423#ifdef MEMORYBARRIERW 1424 if (regoff == BIU2400_REQINP || 1425 regoff == BIU2400_RSPOUTP || 1426 regoff == BIU2400_PRI_REQINP || 1427 regoff == BIU2400_ATIO_RSPOUTP) 1428 MEMORYBARRIERW(isp, SYNC_REG, 1429 IspVirt2Off(isp, regoff), 4, -1) 1430 else 1431#endif 1432 MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1); 1433 break; 1434 default: 1435 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1436 regoff); 1437 break; 1438 } 1439} 1440 1441static uint32_t 1442isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff) 1443{ 1444 uint32_t rv; 1445 1446 switch (regoff) { 1447 case BIU2400_PRI_REQINP: 1448 case BIU2400_PRI_REQOUTP: 1449 isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x", 1450 regoff); 1451 rv = 0xffffffff; 1452 break; 1453 case BIU2400_REQINP: 1454 rv = B2R4(isp, 0x00); 1455 break; 1456 case BIU2400_REQOUTP: 1457 rv = B2R4(isp, 0x04); 1458 break; 1459 case BIU2400_RSPINP: 1460 rv = B2R4(isp, 0x08); 1461 break; 1462 case BIU2400_RSPOUTP: 1463 rv = B2R4(isp, 0x0c); 1464 break; 1465 case BIU2400_ATIO_RSPINP: 1466 rv = B2R4(isp, 0x10); 1467 break; 1468 case BIU2400_ATIO_RSPOUTP: 1469 rv = B2R4(isp, 0x14); 1470 break; 1471 default: 1472 rv = isp_pci_rd_reg_2400(isp, regoff); 1473 break; 1474 } 1475 return (rv); 1476} 1477 1478static void 1479isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val) 1480{ 1481 int off; 1482 1483 switch (regoff) { 1484 case BIU2400_PRI_REQINP: 1485 case BIU2400_PRI_REQOUTP: 1486 isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x", 1487 regoff); 1488 return; 1489 case BIU2400_REQINP: 1490 off = 0x00; 1491 break; 1492 case BIU2400_REQOUTP: 1493 off = 0x04; 1494 break; 1495 case BIU2400_RSPINP: 1496 off = 0x08; 1497 break; 1498 case BIU2400_RSPOUTP: 1499 off = 0x0c; 1500 break; 1501 case BIU2400_ATIO_RSPINP: 1502 off = 0x10; 1503 break; 1504 case BIU2400_ATIO_RSPOUTP: 1505 off = 0x14; 1506 break; 1507 default: 1508 isp_pci_wr_reg_2400(isp, regoff, val); 1509 return; 1510 } 1511 B2W4(isp, off, val); 1512} 1513 1514 1515struct imush { 1516 bus_addr_t maddr; 1517 int error; 1518}; 1519 1520static void 1521imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1522{ 1523 struct imush *imushp = (struct imush *) arg; 1524 1525 if (!(imushp->error = error)) 1526 imushp->maddr = segs[0].ds_addr; 1527} 1528 1529static int 1530isp_pci_mbxdma(ispsoftc_t *isp) 1531{ 1532 caddr_t base; 1533 uint32_t len, nsegs; 1534 int i, error, cmap = 0; 1535 bus_size_t slim; /* segment size */ 1536 bus_addr_t llim; /* low limit of unavailable dma */ 1537 bus_addr_t hlim; /* high limit of unavailable dma */ 1538 struct imush im; 1539 isp_ecmd_t *ecmd; 1540 1541 /* Already been here? If so, leave... */ 1542 if (isp->isp_xflist != NULL) 1543 return (0); 1544 if (isp->isp_rquest != NULL && isp->isp_maxcmds == 0) 1545 return (0); 1546 ISP_UNLOCK(isp); 1547 if (isp->isp_rquest != NULL) 1548 goto gotmaxcmds; 1549 1550 hlim = BUS_SPACE_MAXADDR; 1551 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1552 if (sizeof (bus_size_t) > 4) 1553 slim = (bus_size_t) (1ULL << 32); 1554 else 1555 slim = (bus_size_t) (1UL << 31); 1556 llim = BUS_SPACE_MAXADDR; 1557 } else { 1558 slim = (1UL << 24); 1559 llim = BUS_SPACE_MAXADDR_32BIT; 1560 } 1561 if (isp->isp_osinfo.sixtyfourbit) 1562 nsegs = ISP_NSEG64_MAX; 1563 else 1564 nsegs = ISP_NSEG_MAX; 1565 1566 if (isp_dma_tag_create(BUS_DMA_ROOTARG(ISP_PCD(isp)), 1, 1567 slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0, 1568 &isp->isp_osinfo.dmat)) { 1569 ISP_LOCK(isp); 1570 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1571 return (1); 1572 } 1573 1574 /* 1575 * Allocate and map the request queue and a region for external 1576 * DMA addressable command/status structures (22XX and later). 1577 */ 1578 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1579 if (isp->isp_type >= ISP_HA_FC_2200) 1580 len += (N_XCMDS * XCMD_SIZE); 1581 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1582 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1583 len, 1, len, 0, &isp->isp_osinfo.reqdmat)) { 1584 isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag"); 1585 goto bad; 1586 } 1587 if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base, 1588 BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) { 1589 isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory"); 1590 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1591 goto bad; 1592 } 1593 isp->isp_rquest = base; 1594 im.error = 0; 1595 if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap, 1596 base, len, imc, &im, 0) || im.error) { 1597 isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error); 1598 goto bad; 1599 } 1600 isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx", 1601 (uintmax_t)im.maddr, (uintmax_t)len); 1602 isp->isp_rquest_dma = im.maddr; 1603 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1604 im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1605 if (isp->isp_type >= ISP_HA_FC_2200) { 1606 isp->isp_osinfo.ecmd_dma = im.maddr; 1607 isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base; 1608 isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free; 1609 for (ecmd = isp->isp_osinfo.ecmd_free; 1610 ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) { 1611 if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1]) 1612 ecmd->next = NULL; 1613 else 1614 ecmd->next = ecmd + 1; 1615 } 1616 } 1617 1618 /* 1619 * Allocate and map the result queue. 1620 */ 1621 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1622 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1623 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1624 len, 1, len, 0, &isp->isp_osinfo.respdmat)) { 1625 isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag"); 1626 goto bad; 1627 } 1628 if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base, 1629 BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) { 1630 isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory"); 1631 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1632 goto bad; 1633 } 1634 isp->isp_result = base; 1635 im.error = 0; 1636 if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap, 1637 base, len, imc, &im, 0) || im.error) { 1638 isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error); 1639 goto bad; 1640 } 1641 isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx", 1642 (uintmax_t)im.maddr, (uintmax_t)len); 1643 isp->isp_result_dma = im.maddr; 1644 1645#ifdef ISP_TARGET_MODE 1646 /* 1647 * Allocate and map ATIO queue on 24xx with target mode. 1648 */ 1649 if (IS_24XX(isp)) { 1650 len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1651 if (isp_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim, 1652 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 1653 len, 1, len, 0, &isp->isp_osinfo.atiodmat)) { 1654 isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag"); 1655 goto bad; 1656 } 1657 if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base, 1658 BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) { 1659 isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory"); 1660 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1661 goto bad; 1662 } 1663 isp->isp_atioq = base; 1664 im.error = 0; 1665 if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap, 1666 base, len, imc, &im, 0) || im.error) { 1667 isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error); 1668 goto bad; 1669 } 1670 isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx", 1671 (uintmax_t)im.maddr, (uintmax_t)len); 1672 isp->isp_atioq_dma = im.maddr; 1673 } 1674#endif 1675 1676 if (IS_FC(isp)) { 1677 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1678 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1679 2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, &isp->isp_osinfo.iocbdmat)) { 1680 goto bad; 1681 } 1682 if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat, 1683 (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0) 1684 goto bad; 1685 isp->isp_iocb = base; 1686 im.error = 0; 1687 if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap, 1688 base, 2*QENTRY_LEN, imc, &im, 0) || im.error) 1689 goto bad; 1690 isp->isp_iocb_dma = im.maddr; 1691 1692 if (isp_dma_tag_create(isp->isp_osinfo.dmat, 64, slim, 1693 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1694 ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, &isp->isp_osinfo.scdmat)) 1695 goto bad; 1696 for (cmap = 0; cmap < isp->isp_nchan; cmap++) { 1697 struct isp_fc *fc = ISP_FC_PC(isp, cmap); 1698 if (bus_dmamem_alloc(isp->isp_osinfo.scdmat, 1699 (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0) 1700 goto bad; 1701 FCPARAM(isp, cmap)->isp_scratch = base; 1702 im.error = 0; 1703 if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap, 1704 base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) { 1705 bus_dmamem_free(isp->isp_osinfo.scdmat, 1706 base, fc->scmap); 1707 FCPARAM(isp, cmap)->isp_scratch = NULL; 1708 goto bad; 1709 } 1710 FCPARAM(isp, cmap)->isp_scdma = im.maddr; 1711 if (!IS_2100(isp)) { 1712 for (i = 0; i < INITIAL_NEXUS_COUNT; i++) { 1713 struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO); 1714 if (n == NULL) { 1715 while (fc->nexus_free_list) { 1716 n = fc->nexus_free_list; 1717 fc->nexus_free_list = n->next; 1718 free(n, M_DEVBUF); 1719 } 1720 goto bad; 1721 } 1722 n->next = fc->nexus_free_list; 1723 fc->nexus_free_list = n; 1724 } 1725 } 1726 } 1727 } 1728 1729 if (isp->isp_maxcmds == 0) { 1730 ISP_LOCK(isp); 1731 return (0); 1732 } 1733 1734gotmaxcmds: 1735 len = isp->isp_maxcmds * sizeof (struct isp_pcmd); 1736 isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *) 1737 malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1738 for (i = 0; i < isp->isp_maxcmds; i++) { 1739 struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i]; 1740 error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap); 1741 if (error) { 1742 isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error); 1743 while (--i >= 0) { 1744 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1745 isp->isp_osinfo.pcmd_pool[i].dmap); 1746 } 1747 goto bad; 1748 } 1749 callout_init_mtx(&pcmd->wdog, &isp->isp_osinfo.lock, 0); 1750 if (i == isp->isp_maxcmds-1) 1751 pcmd->next = NULL; 1752 else 1753 pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1]; 1754 } 1755 isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0]; 1756 1757 len = sizeof (isp_hdl_t) * isp->isp_maxcmds; 1758 isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1759 for (len = 0; len < isp->isp_maxcmds - 1; len++) 1760 isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1]; 1761 isp->isp_xffree = isp->isp_xflist; 1762 1763 ISP_LOCK(isp); 1764 return (0); 1765 1766bad: 1767 isp_pci_mbxdmafree(isp); 1768 ISP_LOCK(isp); 1769 return (1); 1770} 1771 1772static void 1773isp_pci_mbxdmafree(ispsoftc_t *isp) 1774{ 1775 int i; 1776 1777 if (isp->isp_xflist != NULL) { 1778 free(isp->isp_xflist, M_DEVBUF); 1779 isp->isp_xflist = NULL; 1780 } 1781 if (isp->isp_osinfo.pcmd_pool != NULL) { 1782 for (i = 0; i < isp->isp_maxcmds; i++) { 1783 bus_dmamap_destroy(isp->isp_osinfo.dmat, 1784 isp->isp_osinfo.pcmd_pool[i].dmap); 1785 } 1786 free(isp->isp_osinfo.pcmd_pool, M_DEVBUF); 1787 isp->isp_osinfo.pcmd_pool = NULL; 1788 } 1789 if (IS_FC(isp)) { 1790 for (i = 0; i < isp->isp_nchan; i++) { 1791 struct isp_fc *fc = ISP_FC_PC(isp, i); 1792 if (FCPARAM(isp, i)->isp_scdma != 0) { 1793 bus_dmamap_unload(isp->isp_osinfo.scdmat, 1794 fc->scmap); 1795 FCPARAM(isp, i)->isp_scdma = 0; 1796 } 1797 if (FCPARAM(isp, i)->isp_scratch != NULL) { 1798 bus_dmamem_free(isp->isp_osinfo.scdmat, 1799 FCPARAM(isp, i)->isp_scratch, fc->scmap); 1800 FCPARAM(isp, i)->isp_scratch = NULL; 1801 } 1802 while (fc->nexus_free_list) { 1803 struct isp_nexus *n = fc->nexus_free_list; 1804 fc->nexus_free_list = n->next; 1805 free(n, M_DEVBUF); 1806 } 1807 } 1808 if (isp->isp_iocb_dma != 0) { 1809 bus_dma_tag_destroy(isp->isp_osinfo.scdmat); 1810 bus_dmamap_unload(isp->isp_osinfo.iocbdmat, 1811 isp->isp_osinfo.iocbmap); 1812 isp->isp_iocb_dma = 0; 1813 } 1814 if (isp->isp_iocb != NULL) { 1815 bus_dmamem_free(isp->isp_osinfo.iocbdmat, 1816 isp->isp_iocb, isp->isp_osinfo.iocbmap); 1817 bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat); 1818 } 1819 } 1820#ifdef ISP_TARGET_MODE 1821 if (IS_24XX(isp)) { 1822 if (isp->isp_atioq_dma != 0) { 1823 bus_dmamap_unload(isp->isp_osinfo.atiodmat, 1824 isp->isp_osinfo.atiomap); 1825 isp->isp_atioq_dma = 0; 1826 } 1827 if (isp->isp_atioq != NULL) { 1828 bus_dmamem_free(isp->isp_osinfo.atiodmat, isp->isp_atioq, 1829 isp->isp_osinfo.atiomap); 1830 bus_dma_tag_destroy(isp->isp_osinfo.atiodmat); 1831 isp->isp_atioq = NULL; 1832 } 1833 } 1834#endif 1835 if (isp->isp_result_dma != 0) { 1836 bus_dmamap_unload(isp->isp_osinfo.respdmat, 1837 isp->isp_osinfo.respmap); 1838 isp->isp_result_dma = 0; 1839 } 1840 if (isp->isp_result != NULL) { 1841 bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result, 1842 isp->isp_osinfo.respmap); 1843 bus_dma_tag_destroy(isp->isp_osinfo.respdmat); 1844 isp->isp_result = NULL; 1845 } 1846 if (isp->isp_rquest_dma != 0) { 1847 bus_dmamap_unload(isp->isp_osinfo.reqdmat, 1848 isp->isp_osinfo.reqmap); 1849 isp->isp_rquest_dma = 0; 1850 } 1851 if (isp->isp_rquest != NULL) { 1852 bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest, 1853 isp->isp_osinfo.reqmap); 1854 bus_dma_tag_destroy(isp->isp_osinfo.reqdmat); 1855 isp->isp_rquest = NULL; 1856 } 1857} 1858 1859typedef struct { 1860 ispsoftc_t *isp; 1861 void *cmd_token; 1862 void *rq; /* original request */ 1863 int error; 1864 bus_size_t mapsize; 1865} mush_t; 1866 1867#define MUSHERR_NOQENTRIES -2 1868 1869#ifdef ISP_TARGET_MODE 1870static void tdma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1871static void tdma2(void *, bus_dma_segment_t *, int, int); 1872 1873static void 1874tdma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1875{ 1876 mush_t *mp; 1877 mp = (mush_t *)arg; 1878 mp->mapsize = mapsize; 1879 tdma2(arg, dm_segs, nseg, error); 1880} 1881 1882static void 1883tdma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1884{ 1885 mush_t *mp; 1886 ispsoftc_t *isp; 1887 struct ccb_scsiio *csio; 1888 isp_ddir_t ddir; 1889 ispreq_t *rq; 1890 1891 mp = (mush_t *) arg; 1892 if (error) { 1893 mp->error = error; 1894 return; 1895 } 1896 csio = mp->cmd_token; 1897 isp = mp->isp; 1898 rq = mp->rq; 1899 if (nseg) { 1900 if (isp->isp_osinfo.sixtyfourbit) { 1901 if (nseg >= ISP_NSEG64_MAX) { 1902 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1903 mp->error = EFAULT; 1904 return; 1905 } 1906 if (rq->req_header.rqs_entry_type == RQSTYPE_CTIO2) { 1907 rq->req_header.rqs_entry_type = RQSTYPE_CTIO3; 1908 } 1909 } else { 1910 if (nseg >= ISP_NSEG_MAX) { 1911 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1912 mp->error = EFAULT; 1913 return; 1914 } 1915 } 1916 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1917 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1918 ddir = ISP_TO_DEVICE; 1919 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1920 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1921 ddir = ISP_FROM_DEVICE; 1922 } else { 1923 dm_segs = NULL; 1924 nseg = 0; 1925 ddir = ISP_NOXFR; 1926 } 1927 } else { 1928 dm_segs = NULL; 1929 nseg = 0; 1930 ddir = ISP_NOXFR; 1931 } 1932 1933 error = isp_send_tgt_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, &csio->sense_data, csio->sense_len); 1934 switch (error) { 1935 case CMD_EAGAIN: 1936 mp->error = MUSHERR_NOQENTRIES; 1937 case CMD_QUEUED: 1938 break; 1939 default: 1940 mp->error = EIO; 1941 } 1942} 1943#endif 1944 1945static void dma2_2(void *, bus_dma_segment_t *, int, bus_size_t, int); 1946static void dma2(void *, bus_dma_segment_t *, int, int); 1947 1948static void 1949dma2_2(void *arg, bus_dma_segment_t *dm_segs, int nseg, bus_size_t mapsize, int error) 1950{ 1951 mush_t *mp; 1952 mp = (mush_t *)arg; 1953 mp->mapsize = mapsize; 1954 dma2(arg, dm_segs, nseg, error); 1955} 1956 1957static void 1958dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1959{ 1960 mush_t *mp; 1961 ispsoftc_t *isp; 1962 struct ccb_scsiio *csio; 1963 isp_ddir_t ddir; 1964 ispreq_t *rq; 1965 1966 mp = (mush_t *) arg; 1967 if (error) { 1968 mp->error = error; 1969 return; 1970 } 1971 csio = mp->cmd_token; 1972 isp = mp->isp; 1973 rq = mp->rq; 1974 if (nseg) { 1975 if (isp->isp_osinfo.sixtyfourbit) { 1976 if (nseg >= ISP_NSEG64_MAX) { 1977 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG64_MAX); 1978 mp->error = EFAULT; 1979 return; 1980 } 1981 if (rq->req_header.rqs_entry_type == RQSTYPE_T2RQS) { 1982 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 1983 } else if (rq->req_header.rqs_entry_type == RQSTYPE_REQUEST) { 1984 rq->req_header.rqs_entry_type = RQSTYPE_A64; 1985 } 1986 } else { 1987 if (nseg >= ISP_NSEG_MAX) { 1988 isp_prt(isp, ISP_LOGERR, "number of segments (%d) exceed maximum we can support (%d)", nseg, ISP_NSEG_MAX); 1989 mp->error = EFAULT; 1990 return; 1991 } 1992 } 1993 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1994 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREREAD); 1995 ddir = ISP_FROM_DEVICE; 1996 } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { 1997 bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, BUS_DMASYNC_PREWRITE); 1998 ddir = ISP_TO_DEVICE; 1999 } else { 2000 ddir = ISP_NOXFR; 2001 } 2002 } else { 2003 dm_segs = NULL; 2004 nseg = 0; 2005 ddir = ISP_NOXFR; 2006 } 2007 2008 error = isp_send_cmd(isp, rq, dm_segs, nseg, XS_XFRLEN(csio), ddir, (ispds64_t *)csio->req_map); 2009 switch (error) { 2010 case CMD_EAGAIN: 2011 mp->error = MUSHERR_NOQENTRIES; 2012 break; 2013 case CMD_QUEUED: 2014 break; 2015 default: 2016 mp->error = EIO; 2017 break; 2018 } 2019} 2020 2021static int 2022isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff) 2023{ 2024 mush_t mush, *mp; 2025 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2026 void (*eptr2)(void *, bus_dma_segment_t *, int, bus_size_t, int); 2027 int error; 2028 2029 mp = &mush; 2030 mp->isp = isp; 2031 mp->cmd_token = csio; 2032 mp->rq = ff; 2033 mp->error = 0; 2034 mp->mapsize = 0; 2035 2036#ifdef ISP_TARGET_MODE 2037 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2038 eptr = tdma2; 2039 eptr2 = tdma2_2; 2040 } else 2041#endif 2042 { 2043 eptr = dma2; 2044 eptr2 = dma2_2; 2045 } 2046 2047 2048 error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, 2049 (union ccb *)csio, eptr, mp, 0); 2050 if (error == EINPROGRESS) { 2051 bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap); 2052 mp->error = EINVAL; 2053 isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported"); 2054 } else if (error && mp->error == 0) { 2055#ifdef DIAGNOSTIC 2056 isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error); 2057#endif 2058 mp->error = error; 2059 } 2060 if (mp->error) { 2061 int retval = CMD_COMPLETE; 2062 if (mp->error == MUSHERR_NOQENTRIES) { 2063 retval = CMD_EAGAIN; 2064 } else if (mp->error == EFBIG) { 2065 csio->ccb_h.status = CAM_REQ_TOO_BIG; 2066 } else if (mp->error == EINVAL) { 2067 csio->ccb_h.status = CAM_REQ_INVALID; 2068 } else { 2069 csio->ccb_h.status = CAM_UNREC_HBA_ERROR; 2070 } 2071 return (retval); 2072 } 2073 return (CMD_QUEUED); 2074} 2075 2076static int 2077isp_pci_irqsetup(ispsoftc_t *isp) 2078{ 2079 2080 return (0); 2081} 2082 2083static void 2084isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2085{ 2086 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2087 if (msg) 2088 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2089 else 2090 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2091 if (IS_SCSI(isp)) 2092 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2093 else 2094 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2095 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2096 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2097 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2098 2099 2100 if (IS_SCSI(isp)) { 2101 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2102 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2103 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2104 ISP_READ(isp, CDMA_FIFO_STS)); 2105 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2106 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2107 ISP_READ(isp, DDMA_FIFO_STS)); 2108 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2109 ISP_READ(isp, SXP_INTERRUPT), 2110 ISP_READ(isp, SXP_GROSS_ERR), 2111 ISP_READ(isp, SXP_PINS_CTRL)); 2112 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2113 } 2114 printf(" mbox regs: %x %x %x %x %x\n", 2115 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2116 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2117 ISP_READ(isp, OUTMAILBOX4)); 2118 printf(" PCI Status Command/Status=%x\n", 2119 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2120} 2121