isp_pci.c revision 166895
1/*- 2 * 3 * Copyright (c) 1997-2006 by Matthew Jacob 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice immediately at the beginning of the file, without modification, 11 * this list of conditions, and the following disclaimer. 12 * 2. The name of the author may not be used to endorse or promote products 13 * derived from this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 19 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 */ 28/* 29 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 30 * FreeBSD Version. 31 */ 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/dev/isp/isp_pci.c 166895 2007-02-23 05:42:41Z mjacob $"); 34 35#include <sys/param.h> 36#include <sys/systm.h> 37#include <sys/kernel.h> 38#include <sys/module.h> 39#if __FreeBSD_version >= 700000 40#include <sys/linker.h> 41#include <sys/firmware.h> 42#endif 43#include <sys/bus.h> 44#if __FreeBSD_version < 500000 45#include <pci/pcireg.h> 46#include <pci/pcivar.h> 47#include <machine/bus_memio.h> 48#include <machine/bus_pio.h> 49#else 50#include <sys/stdint.h> 51#include <dev/pci/pcireg.h> 52#include <dev/pci/pcivar.h> 53#endif 54#include <machine/bus.h> 55#include <machine/resource.h> 56#include <sys/rman.h> 57#include <sys/malloc.h> 58 59#include <dev/isp/isp_freebsd.h> 60 61#if __FreeBSD_version < 500000 62#define BUS_PROBE_DEFAULT 0 63#endif 64 65static uint32_t isp_pci_rd_reg(ispsoftc_t *, int); 66static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t); 67static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int); 68static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t); 69static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int); 70static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t); 71static int 72isp_pci_rd_isr(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 73static int 74isp_pci_rd_isr_2300(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 75static int 76isp_pci_rd_isr_2400(ispsoftc_t *, uint32_t *, uint16_t *, uint16_t *); 77static int isp_pci_mbxdma(ispsoftc_t *); 78static int 79isp_pci_dmasetup(ispsoftc_t *, XS_T *, ispreq_t *, uint32_t *, uint32_t); 80static void 81isp_pci_dmateardown(ispsoftc_t *, XS_T *, uint32_t); 82 83 84static void isp_pci_reset0(ispsoftc_t *); 85static void isp_pci_reset1(ispsoftc_t *); 86static void isp_pci_dumpregs(ispsoftc_t *, const char *); 87 88static struct ispmdvec mdvec = { 89 isp_pci_rd_isr, 90 isp_pci_rd_reg, 91 isp_pci_wr_reg, 92 isp_pci_mbxdma, 93 isp_pci_dmasetup, 94 isp_pci_dmateardown, 95 isp_pci_reset0, 96 isp_pci_reset1, 97 isp_pci_dumpregs, 98 NULL, 99 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 100}; 101 102static struct ispmdvec mdvec_1080 = { 103 isp_pci_rd_isr, 104 isp_pci_rd_reg_1080, 105 isp_pci_wr_reg_1080, 106 isp_pci_mbxdma, 107 isp_pci_dmasetup, 108 isp_pci_dmateardown, 109 isp_pci_reset0, 110 isp_pci_reset1, 111 isp_pci_dumpregs, 112 NULL, 113 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 114}; 115 116static struct ispmdvec mdvec_12160 = { 117 isp_pci_rd_isr, 118 isp_pci_rd_reg_1080, 119 isp_pci_wr_reg_1080, 120 isp_pci_mbxdma, 121 isp_pci_dmasetup, 122 isp_pci_dmateardown, 123 isp_pci_reset0, 124 isp_pci_reset1, 125 isp_pci_dumpregs, 126 NULL, 127 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 128}; 129 130static struct ispmdvec mdvec_2100 = { 131 isp_pci_rd_isr, 132 isp_pci_rd_reg, 133 isp_pci_wr_reg, 134 isp_pci_mbxdma, 135 isp_pci_dmasetup, 136 isp_pci_dmateardown, 137 isp_pci_reset0, 138 isp_pci_reset1, 139 isp_pci_dumpregs 140}; 141 142static struct ispmdvec mdvec_2200 = { 143 isp_pci_rd_isr, 144 isp_pci_rd_reg, 145 isp_pci_wr_reg, 146 isp_pci_mbxdma, 147 isp_pci_dmasetup, 148 isp_pci_dmateardown, 149 isp_pci_reset0, 150 isp_pci_reset1, 151 isp_pci_dumpregs 152}; 153 154static struct ispmdvec mdvec_2300 = { 155 isp_pci_rd_isr_2300, 156 isp_pci_rd_reg, 157 isp_pci_wr_reg, 158 isp_pci_mbxdma, 159 isp_pci_dmasetup, 160 isp_pci_dmateardown, 161 isp_pci_reset0, 162 isp_pci_reset1, 163 isp_pci_dumpregs 164}; 165 166static struct ispmdvec mdvec_2400 = { 167 isp_pci_rd_isr_2400, 168 isp_pci_rd_reg_2400, 169 isp_pci_wr_reg_2400, 170 isp_pci_mbxdma, 171 isp_pci_dmasetup, 172 isp_pci_dmateardown, 173 isp_pci_reset0, 174 isp_pci_reset1, 175 NULL 176}; 177 178#ifndef PCIM_CMD_INVEN 179#define PCIM_CMD_INVEN 0x10 180#endif 181#ifndef PCIM_CMD_BUSMASTEREN 182#define PCIM_CMD_BUSMASTEREN 0x0004 183#endif 184#ifndef PCIM_CMD_PERRESPEN 185#define PCIM_CMD_PERRESPEN 0x0040 186#endif 187#ifndef PCIM_CMD_SEREN 188#define PCIM_CMD_SEREN 0x0100 189#endif 190#ifndef PCIM_CMD_INTX_DISABLE 191#define PCIM_CMD_INTX_DISABLE 0x0400 192#endif 193 194#ifndef PCIR_COMMAND 195#define PCIR_COMMAND 0x04 196#endif 197 198#ifndef PCIR_CACHELNSZ 199#define PCIR_CACHELNSZ 0x0c 200#endif 201 202#ifndef PCIR_LATTIMER 203#define PCIR_LATTIMER 0x0d 204#endif 205 206#ifndef PCIR_ROMADDR 207#define PCIR_ROMADDR 0x30 208#endif 209 210#ifndef PCI_VENDOR_QLOGIC 211#define PCI_VENDOR_QLOGIC 0x1077 212#endif 213 214#ifndef PCI_PRODUCT_QLOGIC_ISP1020 215#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 216#endif 217 218#ifndef PCI_PRODUCT_QLOGIC_ISP1080 219#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 220#endif 221 222#ifndef PCI_PRODUCT_QLOGIC_ISP10160 223#define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 224#endif 225 226#ifndef PCI_PRODUCT_QLOGIC_ISP12160 227#define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 228#endif 229 230#ifndef PCI_PRODUCT_QLOGIC_ISP1240 231#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 232#endif 233 234#ifndef PCI_PRODUCT_QLOGIC_ISP1280 235#define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 236#endif 237 238#ifndef PCI_PRODUCT_QLOGIC_ISP2100 239#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 240#endif 241 242#ifndef PCI_PRODUCT_QLOGIC_ISP2200 243#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 244#endif 245 246#ifndef PCI_PRODUCT_QLOGIC_ISP2300 247#define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 248#endif 249 250#ifndef PCI_PRODUCT_QLOGIC_ISP2312 251#define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 252#endif 253 254#ifndef PCI_PRODUCT_QLOGIC_ISP2322 255#define PCI_PRODUCT_QLOGIC_ISP2322 0x2322 256#endif 257 258#ifndef PCI_PRODUCT_QLOGIC_ISP2422 259#define PCI_PRODUCT_QLOGIC_ISP2422 0x2422 260#endif 261 262#ifndef PCI_PRODUCT_QLOGIC_ISP2432 263#define PCI_PRODUCT_QLOGIC_ISP2432 0x2432 264#endif 265 266#ifndef PCI_PRODUCT_QLOGIC_ISP6312 267#define PCI_PRODUCT_QLOGIC_ISP6312 0x6312 268#endif 269 270#ifndef PCI_PRODUCT_QLOGIC_ISP6322 271#define PCI_PRODUCT_QLOGIC_ISP6322 0x6322 272#endif 273 274 275#define PCI_QLOGIC_ISP1020 \ 276 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 277 278#define PCI_QLOGIC_ISP1080 \ 279 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 280 281#define PCI_QLOGIC_ISP10160 \ 282 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 283 284#define PCI_QLOGIC_ISP12160 \ 285 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 286 287#define PCI_QLOGIC_ISP1240 \ 288 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 289 290#define PCI_QLOGIC_ISP1280 \ 291 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 292 293#define PCI_QLOGIC_ISP2100 \ 294 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 295 296#define PCI_QLOGIC_ISP2200 \ 297 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 298 299#define PCI_QLOGIC_ISP2300 \ 300 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 301 302#define PCI_QLOGIC_ISP2312 \ 303 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 304 305#define PCI_QLOGIC_ISP2322 \ 306 ((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC) 307 308#define PCI_QLOGIC_ISP2422 \ 309 ((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC) 310 311#define PCI_QLOGIC_ISP2432 \ 312 ((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC) 313 314#define PCI_QLOGIC_ISP6312 \ 315 ((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC) 316 317#define PCI_QLOGIC_ISP6322 \ 318 ((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC) 319 320/* 321 * Odd case for some AMI raid cards... We need to *not* attach to this. 322 */ 323#define AMI_RAID_SUBVENDOR_ID 0x101e 324 325#define IO_MAP_REG 0x10 326#define MEM_MAP_REG 0x14 327 328#define PCI_DFLT_LTNCY 0x40 329#define PCI_DFLT_LNSZ 0x10 330 331static int isp_pci_probe (device_t); 332static int isp_pci_attach (device_t); 333static int isp_pci_detach (device_t); 334 335 336struct isp_pcisoftc { 337 ispsoftc_t pci_isp; 338 device_t pci_dev; 339 struct resource * pci_reg; 340 bus_space_tag_t pci_st; 341 bus_space_handle_t pci_sh; 342 void * ih; 343 int16_t pci_poff[_NREG_BLKS]; 344 bus_dma_tag_t dmat; 345 bus_dmamap_t *dmaps; 346}; 347 348 349static device_method_t isp_pci_methods[] = { 350 /* Device interface */ 351 DEVMETHOD(device_probe, isp_pci_probe), 352 DEVMETHOD(device_attach, isp_pci_attach), 353 DEVMETHOD(device_detach, isp_pci_detach), 354 { 0, 0 } 355}; 356static void isp_pci_intr(void *); 357 358static driver_t isp_pci_driver = { 359 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 360}; 361static devclass_t isp_devclass; 362DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 363#if __FreeBSD_version < 700000 364extern ispfwfunc *isp_get_firmware_p; 365#endif 366 367static int 368isp_pci_probe(device_t dev) 369{ 370 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 371 case PCI_QLOGIC_ISP1020: 372 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 373 break; 374 case PCI_QLOGIC_ISP1080: 375 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 376 break; 377 case PCI_QLOGIC_ISP1240: 378 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 379 break; 380 case PCI_QLOGIC_ISP1280: 381 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 382 break; 383 case PCI_QLOGIC_ISP10160: 384 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 385 break; 386 case PCI_QLOGIC_ISP12160: 387 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 388 return (ENXIO); 389 } 390 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 391 break; 392 case PCI_QLOGIC_ISP2100: 393 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 394 break; 395 case PCI_QLOGIC_ISP2200: 396 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 397 break; 398 case PCI_QLOGIC_ISP2300: 399 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 400 break; 401 case PCI_QLOGIC_ISP2312: 402 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 403 break; 404 case PCI_QLOGIC_ISP2322: 405 device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter"); 406 break; 407 case PCI_QLOGIC_ISP2422: 408 device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter"); 409 break; 410 case PCI_QLOGIC_ISP2432: 411 device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter"); 412 break; 413 case PCI_QLOGIC_ISP6312: 414 device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter"); 415 break; 416 case PCI_QLOGIC_ISP6322: 417 device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter"); 418 break; 419 default: 420 return (ENXIO); 421 } 422 if (isp_announced == 0 && bootverbose) { 423 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 424 "Core Version %d.%d\n", 425 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 426 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 427 isp_announced++; 428 } 429 /* 430 * XXXX: Here is where we might load the f/w module 431 * XXXX: (or increase a reference count to it). 432 */ 433 return (BUS_PROBE_DEFAULT); 434} 435 436#if __FreeBSD_version < 500000 437static void 438isp_get_generic_options(device_t dev, ispsoftc_t *isp) 439{ 440 uint64_t wwn; 441 int bitmap, unit; 442 443 unit = device_get_unit(dev); 444 if (getenv_int("isp_disable", &bitmap)) { 445 if (bitmap & (1 << unit)) { 446 isp->isp_osinfo.disabled = 1; 447 return; 448 } 449 } 450 if (getenv_int("isp_no_fwload", &bitmap)) { 451 if (bitmap & (1 << unit)) 452 isp->isp_confopts |= ISP_CFG_NORELOAD; 453 } 454 if (getenv_int("isp_fwload", &bitmap)) { 455 if (bitmap & (1 << unit)) 456 isp->isp_confopts &= ~ISP_CFG_NORELOAD; 457 } 458 if (getenv_int("isp_no_nvram", &bitmap)) { 459 if (bitmap & (1 << unit)) 460 isp->isp_confopts |= ISP_CFG_NONVRAM; 461 } 462 if (getenv_int("isp_nvram", &bitmap)) { 463 if (bitmap & (1 << unit)) 464 isp->isp_confopts &= ~ISP_CFG_NONVRAM; 465 } 466 467 bitmap = 0; 468 (void) getenv_int("isp_debug", &bitmap); 469 if (bitmap) { 470 isp->isp_dblev = bitmap; 471 } else { 472 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 473 } 474 if (bootverbose) { 475 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 476 } 477 478 bitmap = 0; 479 if (getenv_int("role", &bitmap)) { 480 isp->isp_role = bitmap; 481 } else { 482 isp->isp_role = ISP_DEFAULT_ROLES; 483 } 484 485} 486 487static void 488isp_get_pci_options(device_t dev, int *m1, int *m2) 489{ 490 int bitmap; 491 int unit = device_get_unit(dev); 492 493 *m1 = PCIM_CMD_MEMEN; 494 *m2 = PCIM_CMD_PORTEN; 495 if (getenv_int("isp_mem_map", &bitmap)) { 496 if (bitmap & (1 << unit)) { 497 *m1 = PCIM_CMD_MEMEN; 498 *m2 = PCIM_CMD_PORTEN; 499 } 500 } 501 bitmap = 0; 502 if (getenv_int("isp_io_map", &bitmap)) { 503 if (bitmap & (1 << unit)) { 504 *m1 = PCIM_CMD_PORTEN; 505 *m2 = PCIM_CMD_MEMEN; 506 } 507 } 508} 509 510static void 511isp_get_specific_options(device_t dev, ispsoftc_t *isp) 512{ 513 514 callout_handle_init(&isp->isp_osinfo.ldt); 515 callout_handle_init(&isp->isp_osinfo.gdt); 516 517 if (IS_SCSI(isp)) { 518 return; 519 } 520 521 if (getenv_int("isp_fcduplex", &bitmap)) { 522 if (bitmap & (1 << unit)) 523 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 524 } 525 if (getenv_int("isp_no_fcduplex", &bitmap)) { 526 if (bitmap & (1 << unit)) 527 isp->isp_confopts &= ~ISP_CFG_FULL_DUPLEX; 528 } 529 if (getenv_int("isp_nport", &bitmap)) { 530 if (bitmap & (1 << unit)) 531 isp->isp_confopts |= ISP_CFG_NPORT; 532 } 533 534 /* 535 * Because the resource_*_value functions can neither return 536 * 64 bit integer values, nor can they be directly coerced 537 * to interpret the right hand side of the assignment as 538 * you want them to interpret it, we have to force WWN 539 * hint replacement to specify WWN strings with a leading 540 * 'w' (e..g w50000000aaaa0001). Sigh. 541 */ 542 if (getenv_quad("isp_portwwn", &wwn)) { 543 isp->isp_osinfo.default_port_wwn = wwn; 544 isp->isp_confopts |= ISP_CFG_OWNWWPN; 545 } 546 if (isp->isp_osinfo.default_port_wwn == 0) { 547 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 548 } 549 550 if (getenv_quad("isp_nodewwn", &wwn)) { 551 isp->isp_osinfo.default_node_wwn = wwn; 552 isp->isp_confopts |= ISP_CFG_OWNWWNN; 553 } 554 if (isp->isp_osinfo.default_node_wwn == 0) { 555 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 556 } 557 558 bitmap = 0; 559 (void) getenv_int("isp_fabric_hysteresis", &bitmap); 560 if (bitmap >= 0 && bitmap < 256) { 561 isp->isp_osinfo.hysteresis = bitmap; 562 } else { 563 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 564 } 565 566 bitmap = 0; 567 (void) getenv_int("isp_loop_down_limit", &bitmap); 568 if (bitmap >= 0 && bitmap < 0xffff) { 569 isp->isp_osinfo.loop_down_limit = bitmap; 570 } else { 571 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 572 } 573 574 bitmap = 0; 575 (void) getenv_int("isp_gone_device_time", &bitmap); 576 if (bitmap >= 0 && bitmap < 0xffff) { 577 isp->isp_osinfo.gone_device_time = bitmap; 578 } else { 579 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 580 } 581#ifdef ISP_FW_CRASH_DUMP 582 bitmap = 0; 583 if (getenv_int("isp_fw_dump_enable", &bitmap)) { 584 if (bitmap & (1 << unit) { 585 size_t amt = 0; 586 if (IS_2200(isp)) { 587 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 588 } else if (IS_23XX(isp)) { 589 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 590 } 591 if (amt) { 592 FCPARAM(isp)->isp_dump_data = 593 malloc(amt, M_DEVBUF, M_WAITOK); 594 memset(FCPARAM(isp)->isp_dump_data, 0, amt); 595 } else { 596 device_printf(dev, 597 "f/w crash dumps not supported for card\n"); 598 } 599 } 600 } 601#endif 602} 603#else 604static void 605isp_get_generic_options(device_t dev, ispsoftc_t *isp) 606{ 607 int tval; 608 609 /* 610 * Figure out if we're supposed to skip this one. 611 */ 612 tval = 0; 613 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 614 "disable", &tval) == 0 && tval) { 615 device_printf(dev, "disabled at user request\n"); 616 isp->isp_osinfo.disabled = 1; 617 return; 618 } 619 620 tval = -1; 621 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 622 "role", &tval) == 0 && tval != -1) { 623 tval &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 624 isp->isp_role = tval; 625 device_printf(dev, "setting role to 0x%x\n", isp->isp_role); 626 } else { 627#ifdef ISP_TARGET_MODE 628 isp->isp_role = ISP_ROLE_TARGET; 629#else 630 isp->isp_role = ISP_DEFAULT_ROLES; 631#endif 632 } 633 634 tval = 0; 635 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 636 "fwload_disable", &tval) == 0 && tval != 0) { 637 isp->isp_confopts |= ISP_CFG_NORELOAD; 638 } 639 tval = 0; 640 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 641 "ignore_nvram", &tval) == 0 && tval != 0) { 642 isp->isp_confopts |= ISP_CFG_NONVRAM; 643 } 644 645 tval = 0; 646 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 647 "debug", &tval); 648 if (tval) { 649 isp->isp_dblev = tval; 650 } else { 651 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 652 } 653 if (bootverbose) { 654 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 655 } 656 657} 658 659static void 660isp_get_pci_options(device_t dev, int *m1, int *m2) 661{ 662 int tval; 663 /* 664 * Which we should try first - memory mapping or i/o mapping? 665 * 666 * We used to try memory first followed by i/o on alpha, otherwise 667 * the reverse, but we should just try memory first all the time now. 668 */ 669 *m1 = PCIM_CMD_MEMEN; 670 *m2 = PCIM_CMD_PORTEN; 671 672 tval = 0; 673 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 674 "prefer_iomap", &tval) == 0 && tval != 0) { 675 *m1 = PCIM_CMD_PORTEN; 676 *m2 = PCIM_CMD_MEMEN; 677 } 678 tval = 0; 679 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 680 "prefer_memmap", &tval) == 0 && tval != 0) { 681 *m1 = PCIM_CMD_MEMEN; 682 *m2 = PCIM_CMD_PORTEN; 683 } 684} 685 686static void 687isp_get_specific_options(device_t dev, ispsoftc_t *isp) 688{ 689 const char *sptr; 690 int tval; 691 692 isp->isp_osinfo.default_id = -1; 693 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 694 "iid", &tval) == 0) { 695 isp->isp_osinfo.default_id = tval; 696 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 697 } 698 if (isp->isp_osinfo.default_id == -1) { 699 if (IS_FC(isp)) { 700 isp->isp_osinfo.default_id = 109; 701 } else { 702 isp->isp_osinfo.default_id = 7; 703 } 704 } 705 706 callout_handle_init(&isp->isp_osinfo.ldt); 707 callout_handle_init(&isp->isp_osinfo.gdt); 708 709 if (IS_SCSI(isp)) { 710 return; 711 } 712 713 tval = 0; 714 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 715 "fullduplex", &tval) == 0 && tval != 0) { 716 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 717 } 718#ifdef ISP_FW_CRASH_DUMP 719 tval = 0; 720 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 721 "fw_dump_enable", &tval) == 0 && tval != 0) { 722 size_t amt = 0; 723 if (IS_2200(isp)) { 724 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 725 } else if (IS_23XX(isp)) { 726 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 727 } 728 if (amt) { 729 FCPARAM(isp)->isp_dump_data = 730 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 731 } else { 732 device_printf(dev, 733 "f/w crash dumps not supported for this model\n"); 734 } 735 } 736#endif 737 sptr = 0; 738 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 739 "topology", (const char **) &sptr) == 0 && sptr != 0) { 740 if (strcmp(sptr, "lport") == 0) { 741 isp->isp_confopts |= ISP_CFG_LPORT; 742 } else if (strcmp(sptr, "nport") == 0) { 743 isp->isp_confopts |= ISP_CFG_NPORT; 744 } else if (strcmp(sptr, "lport-only") == 0) { 745 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 746 } else if (strcmp(sptr, "nport-only") == 0) { 747 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 748 } 749 } 750 751 /* 752 * Because the resource_*_value functions can neither return 753 * 64 bit integer values, nor can they be directly coerced 754 * to interpret the right hand side of the assignment as 755 * you want them to interpret it, we have to force WWN 756 * hint replacement to specify WWN strings with a leading 757 * 'w' (e..g w50000000aaaa0001). Sigh. 758 */ 759 sptr = 0; 760 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 761 "portwwn", (const char **) &sptr); 762 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 763 char *eptr = 0; 764 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 765 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 766 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 767 isp->isp_osinfo.default_port_wwn = 0; 768 } else { 769 isp->isp_confopts |= ISP_CFG_OWNWWPN; 770 } 771 } 772 if (isp->isp_osinfo.default_port_wwn == 0) { 773 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 774 } 775 776 sptr = 0; 777 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 778 "nodewwn", (const char **) &sptr); 779 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 780 char *eptr = 0; 781 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 782 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 783 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 784 isp->isp_osinfo.default_node_wwn = 0; 785 } else { 786 isp->isp_confopts |= ISP_CFG_OWNWWNN; 787 } 788 } 789 if (isp->isp_osinfo.default_node_wwn == 0) { 790 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 791 } 792 793 794 tval = 0; 795 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 796 "hysteresis", &tval); 797 if (tval >= 0 && tval < 256) { 798 isp->isp_osinfo.hysteresis = tval; 799 } else { 800 isp->isp_osinfo.hysteresis = isp_fabric_hysteresis; 801 } 802 803 tval = -1; 804 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 805 "loop_down_limit", &tval); 806 if (tval >= 0 && tval < 0xffff) { 807 isp->isp_osinfo.loop_down_limit = tval; 808 } else { 809 isp->isp_osinfo.loop_down_limit = isp_loop_down_limit; 810 } 811 812 tval = -1; 813 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 814 "gone_device_time", &tval); 815 if (tval >= 0 && tval < 0xffff) { 816 isp->isp_osinfo.gone_device_time = tval; 817 } else { 818 isp->isp_osinfo.gone_device_time = isp_gone_device_time; 819 } 820} 821#endif 822 823static int 824isp_pci_attach(device_t dev) 825{ 826 struct resource *regs, *irq; 827 int rtp, rgd, iqd, m1, m2; 828 uint32_t data, cmd, linesz, psize, basetype; 829 struct isp_pcisoftc *pcs; 830 ispsoftc_t *isp = NULL; 831 struct ispmdvec *mdvp; 832#if __FreeBSD_version >= 500000 833 int locksetup = 0; 834#endif 835 836 pcs = device_get_softc(dev); 837 if (pcs == NULL) { 838 device_printf(dev, "cannot get softc\n"); 839 return (ENOMEM); 840 } 841 memset(pcs, 0, sizeof (*pcs)); 842 pcs->pci_dev = dev; 843 isp = &pcs->pci_isp; 844 845 /* 846 * Get Generic Options 847 */ 848 isp_get_generic_options(dev, isp); 849 850 /* 851 * Check to see if options have us disabled 852 */ 853 if (isp->isp_osinfo.disabled) { 854 /* 855 * But return zero to preserve unit numbering 856 */ 857 return (0); 858 } 859 860 /* 861 * Get PCI options- which in this case are just mapping preferences. 862 */ 863 isp_get_pci_options(dev, &m1, &m2); 864 865 linesz = PCI_DFLT_LNSZ; 866 irq = regs = NULL; 867 rgd = rtp = iqd = 0; 868 869 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 870 if (cmd & m1) { 871 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 872 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 873 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 874 } 875 if (regs == NULL && (cmd & m2)) { 876 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 877 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 878 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 879 } 880 if (regs == NULL) { 881 device_printf(dev, "unable to map any ports\n"); 882 goto bad; 883 } 884 if (bootverbose) { 885 device_printf(dev, "using %s space register mapping\n", 886 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 887 } 888 pcs->pci_dev = dev; 889 pcs->pci_reg = regs; 890 pcs->pci_st = rman_get_bustag(regs); 891 pcs->pci_sh = rman_get_bushandle(regs); 892 893 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 894 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 895 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 896 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 897 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 898 mdvp = &mdvec; 899 basetype = ISP_HA_SCSI_UNKNOWN; 900 psize = sizeof (sdparam); 901 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 902 mdvp = &mdvec; 903 basetype = ISP_HA_SCSI_UNKNOWN; 904 psize = sizeof (sdparam); 905 } 906 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 907 mdvp = &mdvec_1080; 908 basetype = ISP_HA_SCSI_1080; 909 psize = sizeof (sdparam); 910 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 911 ISP1080_DMA_REGS_OFF; 912 } 913 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 914 mdvp = &mdvec_1080; 915 basetype = ISP_HA_SCSI_1240; 916 psize = 2 * sizeof (sdparam); 917 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 918 ISP1080_DMA_REGS_OFF; 919 } 920 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 921 mdvp = &mdvec_1080; 922 basetype = ISP_HA_SCSI_1280; 923 psize = 2 * sizeof (sdparam); 924 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 925 ISP1080_DMA_REGS_OFF; 926 } 927 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 928 mdvp = &mdvec_12160; 929 basetype = ISP_HA_SCSI_10160; 930 psize = sizeof (sdparam); 931 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 932 ISP1080_DMA_REGS_OFF; 933 } 934 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 935 mdvp = &mdvec_12160; 936 basetype = ISP_HA_SCSI_12160; 937 psize = 2 * sizeof (sdparam); 938 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 939 ISP1080_DMA_REGS_OFF; 940 } 941 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 942 mdvp = &mdvec_2100; 943 basetype = ISP_HA_FC_2100; 944 psize = sizeof (fcparam); 945 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 946 PCI_MBOX_REGS2100_OFF; 947 if (pci_get_revid(dev) < 3) { 948 /* 949 * XXX: Need to get the actual revision 950 * XXX: number of the 2100 FB. At any rate, 951 * XXX: lower cache line size for early revision 952 * XXX; boards. 953 */ 954 linesz = 1; 955 } 956 } 957 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 958 mdvp = &mdvec_2200; 959 basetype = ISP_HA_FC_2200; 960 psize = sizeof (fcparam); 961 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 962 PCI_MBOX_REGS2100_OFF; 963 } 964 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 965 mdvp = &mdvec_2300; 966 basetype = ISP_HA_FC_2300; 967 psize = sizeof (fcparam); 968 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 969 PCI_MBOX_REGS2300_OFF; 970 } 971 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312 || 972 pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 973 mdvp = &mdvec_2300; 974 basetype = ISP_HA_FC_2312; 975 psize = sizeof (fcparam); 976 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 977 PCI_MBOX_REGS2300_OFF; 978 } 979 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2322 || 980 pci_get_devid(dev) == PCI_QLOGIC_ISP6322) { 981 mdvp = &mdvec_2300; 982 basetype = ISP_HA_FC_2322; 983 psize = sizeof (fcparam); 984 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 985 PCI_MBOX_REGS2300_OFF; 986 } 987 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2422 || 988 pci_get_devid(dev) == PCI_QLOGIC_ISP2432) { 989 mdvp = &mdvec_2400; 990 basetype = ISP_HA_FC_2400; 991 psize = sizeof (fcparam); 992 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 993 PCI_MBOX_REGS2400_OFF; 994 } 995 isp = &pcs->pci_isp; 996 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 997 if (isp->isp_param == NULL) { 998 device_printf(dev, "cannot allocate parameter data\n"); 999 goto bad; 1000 } 1001 isp->isp_mdvec = mdvp; 1002 isp->isp_type = basetype; 1003 isp->isp_revision = pci_get_revid(dev); 1004 isp->isp_dev = dev; 1005 1006 /* 1007 * Now that we know who we are (roughly) get/set specific options 1008 */ 1009 isp_get_specific_options(dev, isp); 1010 1011#if __FreeBSD_version >= 700000 1012 /* 1013 * Try and find firmware for this device. 1014 */ 1015 { 1016 char fwname[32]; 1017 unsigned int did = pci_get_device(dev); 1018 1019 /* 1020 * Map a few pci ids to fw names 1021 */ 1022 switch (did) { 1023 case PCI_PRODUCT_QLOGIC_ISP1020: 1024 did = 0x1040; 1025 break; 1026 case PCI_PRODUCT_QLOGIC_ISP1240: 1027 did = 0x1080; 1028 break; 1029 case PCI_PRODUCT_QLOGIC_ISP10160: 1030 case PCI_PRODUCT_QLOGIC_ISP12160: 1031 did = 0x12160; 1032 break; 1033 case PCI_PRODUCT_QLOGIC_ISP6312: 1034 case PCI_PRODUCT_QLOGIC_ISP2312: 1035 did = 0x2300; 1036 break; 1037 case PCI_PRODUCT_QLOGIC_ISP6322: 1038 did = 0x2322; 1039 break; 1040 case PCI_PRODUCT_QLOGIC_ISP2422: 1041 case PCI_PRODUCT_QLOGIC_ISP2432: 1042 did = 0x2400; 1043 break; 1044 default: 1045 break; 1046 } 1047 1048 isp->isp_osinfo.fw = NULL; 1049 if (isp->isp_role & ISP_ROLE_TARGET) { 1050 snprintf(fwname, sizeof (fwname), "isp_%04x_it", did); 1051 isp->isp_osinfo.fw = firmware_get(fwname); 1052 } 1053 if (isp->isp_osinfo.fw == NULL) { 1054 snprintf(fwname, sizeof (fwname), "isp_%04x", did); 1055 isp->isp_osinfo.fw = firmware_get(fwname); 1056 } 1057 if (isp->isp_osinfo.fw != NULL) { 1058 union { 1059 const void *fred; 1060 uint16_t *bob; 1061 } u; 1062 u.fred = isp->isp_osinfo.fw->data; 1063 isp->isp_mdvec->dv_ispfw = u.bob; 1064 } 1065 } 1066#else 1067 if (isp_get_firmware_p) { 1068 int device = (int) pci_get_device(dev); 1069#ifdef ISP_TARGET_MODE 1070 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 1071#else 1072 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 1073#endif 1074 } 1075#endif 1076 1077 /* 1078 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 1079 * are set. 1080 */ 1081 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 1082 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 1083 1084 if (IS_2300(isp)) { /* per QLogic errata */ 1085 cmd &= ~PCIM_CMD_INVEN; 1086 } 1087 1088 if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) { 1089 cmd &= ~PCIM_CMD_INTX_DISABLE; 1090 } 1091 1092#ifdef WE_KNEW_WHAT_WE_WERE_DOING 1093 if (IS_24XX(isp)) { 1094 int reg; 1095 1096 cmd &= ~PCIM_CMD_INTX_DISABLE; 1097 1098 /* 1099 * Is this a PCI-X card? If so, set max read byte count. 1100 */ 1101 if (pci_find_extcap(dev, PCIY_PCIX, ®) == 0) { 1102 uint16_t pxcmd; 1103 reg += 2; 1104 1105 pxcmd = pci_read_config(dev, reg, 2); 1106 pxcmd &= ~0xc; 1107 pxcmd |= 0x8; 1108 pci_write_config(dev, reg, 2, pxcmd); 1109 } 1110 1111 /* 1112 * Is this a PCI Express card? If so, set max read byte count. 1113 */ 1114 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { 1115 uint16_t pectl; 1116 1117 reg += 0x8; 1118 pectl = pci_read_config(dev, reg, 2); 1119 pectl &= ~0x7000; 1120 pectl |= 0x4000; 1121 pci_write_config(dev, reg, 2, pectl); 1122 } 1123 } 1124#else 1125 if (IS_24XX(isp)) { 1126 cmd &= ~PCIM_CMD_INTX_DISABLE; 1127 } 1128#endif 1129 1130 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 1131 1132 /* 1133 * Make sure the Cache Line Size register is set sensibly. 1134 */ 1135 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 1136 if (data != linesz) { 1137 data = PCI_DFLT_LNSZ; 1138 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 1139 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 1140 } 1141 1142 /* 1143 * Make sure the Latency Timer is sane. 1144 */ 1145 data = pci_read_config(dev, PCIR_LATTIMER, 1); 1146 if (data < PCI_DFLT_LTNCY) { 1147 data = PCI_DFLT_LTNCY; 1148 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 1149 pci_write_config(dev, PCIR_LATTIMER, data, 1); 1150 } 1151 1152 /* 1153 * Make sure we've disabled the ROM. 1154 */ 1155 data = pci_read_config(dev, PCIR_ROMADDR, 4); 1156 data &= ~1; 1157 pci_write_config(dev, PCIR_ROMADDR, data, 4); 1158 1159 iqd = 0; 1160 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 1161 RF_ACTIVE | RF_SHAREABLE); 1162 if (irq == NULL) { 1163 device_printf(dev, "could not allocate interrupt\n"); 1164 goto bad; 1165 } 1166 1167#if __FreeBSD_version >= 500000 1168 /* Make sure the lock is set up. */ 1169 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 1170 locksetup++; 1171#endif 1172 1173 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 1174 device_printf(dev, "could not setup interrupt\n"); 1175 goto bad; 1176 } 1177 1178 /* 1179 * Last minute checks... 1180 */ 1181 if (IS_23XX(isp) || IS_24XX(isp)) { 1182 isp->isp_port = pci_get_function(dev); 1183 } 1184 1185 if (IS_23XX(isp)) { 1186 /* 1187 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 1188 */ 1189 isp->isp_touched = 1; 1190 } 1191 1192 /* 1193 * Make sure we're in reset state. 1194 */ 1195 ISP_LOCK(isp); 1196 isp_reset(isp); 1197 if (isp->isp_state != ISP_RESETSTATE) { 1198 ISP_UNLOCK(isp); 1199 goto bad; 1200 } 1201 isp_init(isp); 1202 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 1203 isp_uninit(isp); 1204 ISP_UNLOCK(isp); 1205 goto bad; 1206 } 1207 isp_attach(isp); 1208 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 1209 isp_uninit(isp); 1210 ISP_UNLOCK(isp); 1211 goto bad; 1212 } 1213 /* 1214 * XXXX: Here is where we might unload the f/w module 1215 * XXXX: (or decrease the reference count to it). 1216 */ 1217 ISP_UNLOCK(isp); 1218 1219 return (0); 1220 1221bad: 1222 1223 if (pcs && pcs->ih) { 1224 (void) bus_teardown_intr(dev, irq, pcs->ih); 1225 } 1226 1227#if __FreeBSD_version >= 500000 1228 if (locksetup && isp) { 1229 mtx_destroy(&isp->isp_osinfo.lock); 1230 } 1231#endif 1232 1233 if (irq) { 1234 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 1235 } 1236 1237 1238 if (regs) { 1239 (void) bus_release_resource(dev, rtp, rgd, regs); 1240 } 1241 1242 if (pcs) { 1243 if (pcs->pci_isp.isp_param) { 1244#ifdef ISP_FW_CRASH_DUMP 1245 if (IS_FC(isp) && FCPARAM(isp)->isp_dump_data) { 1246 free(FCPARAM(isp)->isp_dump_data, M_DEVBUF); 1247 } 1248#endif 1249 free(pcs->pci_isp.isp_param, M_DEVBUF); 1250 } 1251 } 1252 1253 /* 1254 * XXXX: Here is where we might unload the f/w module 1255 * XXXX: (or decrease the reference count to it). 1256 */ 1257 return (ENXIO); 1258} 1259 1260static int 1261isp_pci_detach(device_t dev) 1262{ 1263 struct isp_pcisoftc *pcs; 1264 ispsoftc_t *isp; 1265 1266 pcs = device_get_softc(dev); 1267 if (pcs == NULL) { 1268 return (ENXIO); 1269 } 1270 isp = (ispsoftc_t *) pcs; 1271 ISP_DISABLE_INTS(isp); 1272 return (0); 1273} 1274 1275static void 1276isp_pci_intr(void *arg) 1277{ 1278 ispsoftc_t *isp = arg; 1279 uint32_t isr; 1280 uint16_t sema, mbox; 1281 1282 ISP_LOCK(isp); 1283 isp->isp_intcnt++; 1284 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 1285 isp->isp_intbogus++; 1286 } else { 1287 isp_intr(isp, isr, sema, mbox); 1288 } 1289 ISP_UNLOCK(isp); 1290} 1291 1292 1293#define IspVirt2Off(a, x) \ 1294 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 1295 _BLK_REG_SHFT] + ((x) & 0xfff)) 1296 1297#define BXR2(pcs, off) \ 1298 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 1299#define BXW2(pcs, off, v) \ 1300 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 1301#define BXR4(pcs, off) \ 1302 bus_space_read_4(pcs->pci_st, pcs->pci_sh, off) 1303#define BXW4(pcs, off, v) \ 1304 bus_space_write_4(pcs->pci_st, pcs->pci_sh, off, v) 1305 1306 1307static __inline int 1308isp_pci_rd_debounced(ispsoftc_t *isp, int off, uint16_t *rp) 1309{ 1310 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1311 uint32_t val0, val1; 1312 int i = 0; 1313 1314 do { 1315 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 1316 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 1317 } while (val0 != val1 && ++i < 1000); 1318 if (val0 != val1) { 1319 return (1); 1320 } 1321 *rp = val0; 1322 return (0); 1323} 1324 1325static int 1326isp_pci_rd_isr(ispsoftc_t *isp, uint32_t *isrp, 1327 uint16_t *semap, uint16_t *mbp) 1328{ 1329 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1330 uint16_t isr, sema; 1331 1332 if (IS_2100(isp)) { 1333 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 1334 return (0); 1335 } 1336 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 1337 return (0); 1338 } 1339 } else { 1340 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 1341 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 1342 } 1343 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 1344 isr &= INT_PENDING_MASK(isp); 1345 sema &= BIU_SEMA_LOCK; 1346 if (isr == 0 && sema == 0) { 1347 return (0); 1348 } 1349 *isrp = isr; 1350 if ((*semap = sema) != 0) { 1351 if (IS_2100(isp)) { 1352 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 1353 return (0); 1354 } 1355 } else { 1356 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 1357 } 1358 } 1359 return (1); 1360} 1361 1362static int 1363isp_pci_rd_isr_2300(ispsoftc_t *isp, uint32_t *isrp, 1364 uint16_t *semap, uint16_t *mbox0p) 1365{ 1366 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1367 uint32_t hccr; 1368 uint32_t r2hisr; 1369 1370 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 1371 *isrp = 0; 1372 return (0); 1373 } 1374 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU_R2HSTSLO)); 1375 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1376 if ((r2hisr & BIU_R2HST_INTR) == 0) { 1377 *isrp = 0; 1378 return (0); 1379 } 1380 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 1381 case ISPR2HST_ROM_MBX_OK: 1382 case ISPR2HST_ROM_MBX_FAIL: 1383 case ISPR2HST_MBX_OK: 1384 case ISPR2HST_MBX_FAIL: 1385 case ISPR2HST_ASYNC_EVENT: 1386 *isrp = r2hisr & 0xffff; 1387 *mbox0p = (r2hisr >> 16); 1388 *semap = 1; 1389 return (1); 1390 case ISPR2HST_RIO_16: 1391 *isrp = r2hisr & 0xffff; 1392 *mbox0p = ASYNC_RIO1; 1393 *semap = 1; 1394 return (1); 1395 case ISPR2HST_FPOST: 1396 *isrp = r2hisr & 0xffff; 1397 *mbox0p = ASYNC_CMD_CMPLT; 1398 *semap = 1; 1399 return (1); 1400 case ISPR2HST_FPOST_CTIO: 1401 *isrp = r2hisr & 0xffff; 1402 *mbox0p = ASYNC_CTIO_DONE; 1403 *semap = 1; 1404 return (1); 1405 case ISPR2HST_RSPQ_UPDATE: 1406 *isrp = r2hisr & 0xffff; 1407 *mbox0p = 0; 1408 *semap = 0; 1409 return (1); 1410 default: 1411 hccr = ISP_READ(isp, HCCR); 1412 if (hccr & HCCR_PAUSE) { 1413 ISP_WRITE(isp, HCCR, HCCR_RESET); 1414 isp_prt(isp, ISP_LOGERR, 1415 "RISC paused at interrupt (%x->%x)", hccr, 1416 ISP_READ(isp, HCCR)); 1417 ISP_WRITE(isp, BIU_ICR, 0); 1418 } else { 1419 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", 1420 r2hisr); 1421 } 1422 return (0); 1423 } 1424} 1425 1426static int 1427isp_pci_rd_isr_2400(ispsoftc_t *isp, uint32_t *isrp, 1428 uint16_t *semap, uint16_t *mbox0p) 1429{ 1430 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1431 uint32_t r2hisr; 1432 1433 r2hisr = BXR4(pcs, IspVirt2Off(pcs, BIU2400_R2HSTSLO)); 1434 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 1435 if ((r2hisr & BIU2400_R2HST_INTR) == 0) { 1436 *isrp = 0; 1437 return (0); 1438 } 1439 switch (r2hisr & BIU2400_R2HST_ISTAT_MASK) { 1440 case ISP2400R2HST_ROM_MBX_OK: 1441 case ISP2400R2HST_ROM_MBX_FAIL: 1442 case ISP2400R2HST_MBX_OK: 1443 case ISP2400R2HST_MBX_FAIL: 1444 case ISP2400R2HST_ASYNC_EVENT: 1445 *isrp = r2hisr & 0xffff; 1446 *mbox0p = (r2hisr >> 16); 1447 *semap = 1; 1448 return (1); 1449 case ISP2400R2HST_RSPQ_UPDATE: 1450 case ISP2400R2HST_ATIO_RSPQ_UPDATE: 1451 case ISP2400R2HST_ATIO_RQST_UPDATE: 1452 *isrp = r2hisr & 0xffff; 1453 *mbox0p = 0; 1454 *semap = 0; 1455 return (1); 1456 default: 1457 ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT); 1458 isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr); 1459 return (0); 1460 } 1461} 1462 1463static uint32_t 1464isp_pci_rd_reg(ispsoftc_t *isp, int regoff) 1465{ 1466 uint32_t rv; 1467 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1468 int oldconf = 0; 1469 1470 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1471 /* 1472 * We will assume that someone has paused the RISC processor. 1473 */ 1474 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1475 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1476 oldconf | BIU_PCI_CONF1_SXP); 1477 } 1478 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1479 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1480 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1481 } 1482 return (rv); 1483} 1484 1485static void 1486isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val) 1487{ 1488 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1489 int oldconf = 0; 1490 volatile int junk; 1491 1492 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1493 /* 1494 * We will assume that someone has paused the RISC processor. 1495 */ 1496 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1497 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1498 oldconf | BIU_PCI_CONF1_SXP); 1499 if (IS_2100(isp)) { 1500 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1501 } 1502 } 1503 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1504 if (IS_2100(isp)) { 1505 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1506 } 1507 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 1508 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 1509 if (IS_2100(isp)) { 1510 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1511 } 1512 } 1513} 1514 1515static uint32_t 1516isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff) 1517{ 1518 uint32_t rv, oc = 0; 1519 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1520 1521 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1522 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1523 uint32_t tc; 1524 /* 1525 * We will assume that someone has paused the RISC processor. 1526 */ 1527 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1528 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1529 if (regoff & SXP_BANK1_SELECT) 1530 tc |= BIU_PCI1080_CONF1_SXP1; 1531 else 1532 tc |= BIU_PCI1080_CONF1_SXP0; 1533 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1534 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1535 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1536 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1537 oc | BIU_PCI1080_CONF1_DMA); 1538 } 1539 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1540 if (oc) { 1541 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1542 } 1543 return (rv); 1544} 1545 1546static void 1547isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val) 1548{ 1549 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1550 int oc = 0; 1551 volatile int junk; 1552 1553 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1554 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1555 uint32_t tc; 1556 /* 1557 * We will assume that someone has paused the RISC processor. 1558 */ 1559 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1560 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1561 if (regoff & SXP_BANK1_SELECT) 1562 tc |= BIU_PCI1080_CONF1_SXP1; 1563 else 1564 tc |= BIU_PCI1080_CONF1_SXP0; 1565 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1566 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1567 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1568 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1569 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1570 oc | BIU_PCI1080_CONF1_DMA); 1571 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1572 } 1573 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1574 junk = BXR2(pcs, IspVirt2Off(isp, regoff)); 1575 if (oc) { 1576 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1577 junk = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1578 } 1579} 1580 1581static uint32_t 1582isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff) 1583{ 1584 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1585 uint32_t rv; 1586 int block = regoff & _BLK_REG_MASK; 1587 1588 switch (block) { 1589 case BIU_BLOCK: 1590 break; 1591 case MBOX_BLOCK: 1592 return (BXR2(pcs, IspVirt2Off(pcs, regoff))); 1593 case SXP_BLOCK: 1594 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK read at 0x%x", regoff); 1595 return (0xffffffff); 1596 case RISC_BLOCK: 1597 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK read at 0x%x", regoff); 1598 return (0xffffffff); 1599 case DMA_BLOCK: 1600 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK read at 0x%x", regoff); 1601 return (0xffffffff); 1602 default: 1603 isp_prt(isp, ISP_LOGWARN, "unknown block read at 0x%x", regoff); 1604 return (0xffffffff); 1605 } 1606 1607 1608 switch (regoff) { 1609 case BIU2400_FLASH_ADDR: 1610 case BIU2400_FLASH_DATA: 1611 case BIU2400_ICR: 1612 case BIU2400_ISR: 1613 case BIU2400_CSR: 1614 case BIU2400_REQINP: 1615 case BIU2400_REQOUTP: 1616 case BIU2400_RSPINP: 1617 case BIU2400_RSPOUTP: 1618 case BIU2400_PRI_RQINP: 1619 case BIU2400_PRI_RSPINP: 1620 case BIU2400_ATIO_RSPINP: 1621 case BIU2400_ATIO_REQINP: 1622 case BIU2400_HCCR: 1623 case BIU2400_GPIOD: 1624 case BIU2400_GPIOE: 1625 case BIU2400_HSEMA: 1626 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1627 break; 1628 case BIU2400_R2HSTSLO: 1629 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1630 break; 1631 case BIU2400_R2HSTSHI: 1632 rv = BXR4(pcs, IspVirt2Off(pcs, regoff)) >> 16; 1633 break; 1634 default: 1635 isp_prt(isp, ISP_LOGERR, 1636 "isp_pci_rd_reg_2400: unknown offset %x", regoff); 1637 rv = 0xffffffff; 1638 break; 1639 } 1640 return (rv); 1641} 1642 1643static void 1644isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val) 1645{ 1646 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1647 int block = regoff & _BLK_REG_MASK; 1648 volatile int junk; 1649 1650 switch (block) { 1651 case BIU_BLOCK: 1652 break; 1653 case MBOX_BLOCK: 1654 BXW2(pcs, IspVirt2Off(pcs, regoff), val); 1655 junk = BXR2(pcs, IspVirt2Off(pcs, regoff)); 1656 return; 1657 case SXP_BLOCK: 1658 isp_prt(isp, ISP_LOGWARN, "SXP_BLOCK write at 0x%x", regoff); 1659 return; 1660 case RISC_BLOCK: 1661 isp_prt(isp, ISP_LOGWARN, "RISC_BLOCK write at 0x%x", regoff); 1662 return; 1663 case DMA_BLOCK: 1664 isp_prt(isp, ISP_LOGWARN, "DMA_BLOCK write at 0x%x", regoff); 1665 return; 1666 default: 1667 isp_prt(isp, ISP_LOGWARN, "unknown block write at 0x%x", 1668 regoff); 1669 break; 1670 } 1671 1672 switch (regoff) { 1673 case BIU2400_FLASH_ADDR: 1674 case BIU2400_FLASH_DATA: 1675 case BIU2400_ICR: 1676 case BIU2400_ISR: 1677 case BIU2400_CSR: 1678 case BIU2400_REQINP: 1679 case BIU2400_REQOUTP: 1680 case BIU2400_RSPINP: 1681 case BIU2400_RSPOUTP: 1682 case BIU2400_PRI_RQINP: 1683 case BIU2400_PRI_RSPINP: 1684 case BIU2400_ATIO_RSPINP: 1685 case BIU2400_ATIO_REQINP: 1686 case BIU2400_HCCR: 1687 case BIU2400_GPIOD: 1688 case BIU2400_GPIOE: 1689 case BIU2400_HSEMA: 1690 BXW4(pcs, IspVirt2Off(pcs, regoff), val); 1691 junk = BXR4(pcs, IspVirt2Off(pcs, regoff)); 1692 break; 1693 default: 1694 isp_prt(isp, ISP_LOGERR, 1695 "isp_pci_wr_reg_2400: bad offset 0x%x", regoff); 1696 break; 1697 } 1698} 1699 1700 1701struct imush { 1702 ispsoftc_t *isp; 1703 int error; 1704}; 1705 1706static void imc(void *, bus_dma_segment_t *, int, int); 1707 1708static void 1709imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1710{ 1711 struct imush *imushp = (struct imush *) arg; 1712 if (error) { 1713 imushp->error = error; 1714 } else { 1715 ispsoftc_t *isp =imushp->isp; 1716 bus_addr_t addr = segs->ds_addr; 1717 1718 isp->isp_rquest_dma = addr; 1719 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1720 isp->isp_result_dma = addr; 1721 if (IS_FC(isp)) { 1722 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1723 FCPARAM(isp)->isp_scdma = addr; 1724 } 1725 } 1726} 1727 1728static int 1729isp_pci_mbxdma(ispsoftc_t *isp) 1730{ 1731 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1732 caddr_t base; 1733 uint32_t len; 1734 int i, error, ns; 1735 bus_size_t slim; /* segment size */ 1736 bus_addr_t llim; /* low limit of unavailable dma */ 1737 bus_addr_t hlim; /* high limit of unavailable dma */ 1738 struct imush im; 1739 1740 /* 1741 * Already been here? If so, leave... 1742 */ 1743 if (isp->isp_rquest) { 1744 return (0); 1745 } 1746 1747 if (isp->isp_maxcmds == 0) { 1748 isp_prt(isp, ISP_LOGERR, "maxcmds not set"); 1749 return (1); 1750 } 1751 1752 hlim = BUS_SPACE_MAXADDR; 1753 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1754 slim = (bus_size_t) (1ULL << 32); 1755 llim = BUS_SPACE_MAXADDR; 1756 } else { 1757 llim = BUS_SPACE_MAXADDR_32BIT; 1758 slim = (1 << 24); 1759 } 1760 1761 /* 1762 * XXX: We don't really support 64 bit target mode for parallel scsi yet 1763 */ 1764#ifdef ISP_TARGET_MODE 1765 if (IS_SCSI(isp) && sizeof (bus_addr_t) > 4) { 1766 isp_prt(isp, ISP_LOGERR, "we cannot do DAC for SPI cards yet"); 1767 return (1); 1768 } 1769#endif 1770 1771 ISP_UNLOCK(isp); 1772 if (isp_dma_tag_create(BUS_DMA_ROOTARG(pcs->pci_dev), 1, slim, llim, 1773 hlim, NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1774 &pcs->dmat)) { 1775 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1776 ISP_LOCK(isp); 1777 return (1); 1778 } 1779 1780 1781 len = sizeof (XS_T **) * isp->isp_maxcmds; 1782 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1783 if (isp->isp_xflist == NULL) { 1784 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1785 ISP_LOCK(isp); 1786 return (1); 1787 } 1788#ifdef ISP_TARGET_MODE 1789 len = sizeof (void **) * isp->isp_maxcmds; 1790 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1791 if (isp->isp_tgtlist == NULL) { 1792 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1793 ISP_LOCK(isp); 1794 return (1); 1795 } 1796#endif 1797 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1798 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1799 if (pcs->dmaps == NULL) { 1800 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1801 free(isp->isp_xflist, M_DEVBUF); 1802#ifdef ISP_TARGET_MODE 1803 free(isp->isp_tgtlist, M_DEVBUF); 1804#endif 1805 ISP_LOCK(isp); 1806 return (1); 1807 } 1808 1809 /* 1810 * Allocate and map the request, result queues, plus FC scratch area. 1811 */ 1812 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1813 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1814 if (IS_FC(isp)) { 1815 len += ISP2100_SCRLEN; 1816 } 1817 1818 ns = (len / PAGE_SIZE) + 1; 1819 /* 1820 * Create a tag for the control spaces- force it to within 32 bits. 1821 */ 1822 if (isp_dma_tag_create(pcs->dmat, QENTRY_LEN, slim, 1823 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, 1824 NULL, NULL, len, ns, slim, 0, &isp->isp_cdmat)) { 1825 isp_prt(isp, ISP_LOGERR, 1826 "cannot create a dma tag for control spaces"); 1827 free(pcs->dmaps, M_DEVBUF); 1828 free(isp->isp_xflist, M_DEVBUF); 1829#ifdef ISP_TARGET_MODE 1830 free(isp->isp_tgtlist, M_DEVBUF); 1831#endif 1832 ISP_LOCK(isp); 1833 return (1); 1834 } 1835 1836 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1837 &isp->isp_cdmap) != 0) { 1838 isp_prt(isp, ISP_LOGERR, 1839 "cannot allocate %d bytes of CCB memory", len); 1840 bus_dma_tag_destroy(isp->isp_cdmat); 1841 free(isp->isp_xflist, M_DEVBUF); 1842#ifdef ISP_TARGET_MODE 1843 free(isp->isp_tgtlist, M_DEVBUF); 1844#endif 1845 free(pcs->dmaps, M_DEVBUF); 1846 ISP_LOCK(isp); 1847 return (1); 1848 } 1849 1850 for (i = 0; i < isp->isp_maxcmds; i++) { 1851 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1852 if (error) { 1853 isp_prt(isp, ISP_LOGERR, 1854 "error %d creating per-cmd DMA maps", error); 1855 while (--i >= 0) { 1856 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1857 } 1858 goto bad; 1859 } 1860 } 1861 1862 im.isp = isp; 1863 im.error = 0; 1864 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1865 if (im.error) { 1866 isp_prt(isp, ISP_LOGERR, 1867 "error %d loading dma map for control areas", im.error); 1868 goto bad; 1869 } 1870 1871 isp->isp_rquest = base; 1872 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1873 isp->isp_result = base; 1874 if (IS_FC(isp)) { 1875 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1876 FCPARAM(isp)->isp_scratch = base; 1877 } 1878 ISP_LOCK(isp); 1879 return (0); 1880 1881bad: 1882 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1883 bus_dma_tag_destroy(isp->isp_cdmat); 1884 free(isp->isp_xflist, M_DEVBUF); 1885#ifdef ISP_TARGET_MODE 1886 free(isp->isp_tgtlist, M_DEVBUF); 1887#endif 1888 free(pcs->dmaps, M_DEVBUF); 1889 ISP_LOCK(isp); 1890 isp->isp_rquest = NULL; 1891 return (1); 1892} 1893 1894typedef struct { 1895 ispsoftc_t *isp; 1896 void *cmd_token; 1897 void *rq; 1898 uint32_t *nxtip; 1899 uint32_t optr; 1900 int error; 1901} mush_t; 1902 1903#define MUSHERR_NOQENTRIES -2 1904 1905#ifdef ISP_TARGET_MODE 1906/* 1907 * We need to handle DMA for target mode differently from initiator mode. 1908 * 1909 * DMA mapping and construction and submission of CTIO Request Entries 1910 * and rendevous for completion are very tightly coupled because we start 1911 * out by knowing (per platform) how much data we have to move, but we 1912 * don't know, up front, how many DMA mapping segments will have to be used 1913 * cover that data, so we don't know how many CTIO Request Entries we 1914 * will end up using. Further, for performance reasons we may want to 1915 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1916 * 1917 * The standard vector still goes through isp_pci_dmasetup, but the callback 1918 * for the DMA mapping routines comes here instead with the whole transfer 1919 * mapped and a pointer to a partially filled in already allocated request 1920 * queue entry. We finish the job. 1921 */ 1922static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1923static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1924 1925#define STATUS_WITH_DATA 1 1926 1927static void 1928tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1929{ 1930 mush_t *mp; 1931 struct ccb_scsiio *csio; 1932 ispsoftc_t *isp; 1933 struct isp_pcisoftc *pcs; 1934 bus_dmamap_t *dp; 1935 ct_entry_t *cto, *qe; 1936 uint8_t scsi_status; 1937 uint32_t curi, nxti, handle; 1938 uint32_t sflags; 1939 int32_t resid; 1940 int nth_ctio, nctios, send_status; 1941 1942 mp = (mush_t *) arg; 1943 if (error) { 1944 mp->error = error; 1945 return; 1946 } 1947 1948 isp = mp->isp; 1949 csio = mp->cmd_token; 1950 cto = mp->rq; 1951 curi = isp->isp_reqidx; 1952 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1953 1954 cto->ct_xfrlen = 0; 1955 cto->ct_seg_count = 0; 1956 cto->ct_header.rqs_entry_count = 1; 1957 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1958 1959 if (nseg == 0) { 1960 cto->ct_header.rqs_seqno = 1; 1961 isp_prt(isp, ISP_LOGTDEBUG1, 1962 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1963 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1964 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1965 cto->ct_scsi_status, cto->ct_resid); 1966 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1967 isp_put_ctio(isp, cto, qe); 1968 return; 1969 } 1970 1971 nctios = nseg / ISP_RQDSEG; 1972 if (nseg % ISP_RQDSEG) { 1973 nctios++; 1974 } 1975 1976 /* 1977 * Save syshandle, and potentially any SCSI status, which we'll 1978 * reinsert on the last CTIO we're going to send. 1979 */ 1980 1981 handle = cto->ct_syshandle; 1982 cto->ct_syshandle = 0; 1983 cto->ct_header.rqs_seqno = 0; 1984 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1985 1986 if (send_status) { 1987 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1988 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1989 /* 1990 * Preserve residual. 1991 */ 1992 resid = cto->ct_resid; 1993 1994 /* 1995 * Save actual SCSI status. 1996 */ 1997 scsi_status = cto->ct_scsi_status; 1998 1999#ifndef STATUS_WITH_DATA 2000 sflags |= CT_NO_DATA; 2001 /* 2002 * We can't do a status at the same time as a data CTIO, so 2003 * we need to synthesize an extra CTIO at this level. 2004 */ 2005 nctios++; 2006#endif 2007 } else { 2008 sflags = scsi_status = resid = 0; 2009 } 2010 2011 cto->ct_resid = 0; 2012 cto->ct_scsi_status = 0; 2013 2014 pcs = (struct isp_pcisoftc *)isp; 2015 dp = &pcs->dmaps[isp_handle_index(handle)]; 2016 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2017 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2018 } else { 2019 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2020 } 2021 2022 nxti = *mp->nxtip; 2023 2024 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 2025 int seglim; 2026 2027 seglim = nseg; 2028 if (seglim) { 2029 int seg; 2030 2031 if (seglim > ISP_RQDSEG) 2032 seglim = ISP_RQDSEG; 2033 2034 for (seg = 0; seg < seglim; seg++, nseg--) { 2035 /* 2036 * Unlike normal initiator commands, we don't 2037 * do any swizzling here. 2038 */ 2039 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 2040 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 2041 cto->ct_xfrlen += dm_segs->ds_len; 2042 dm_segs++; 2043 } 2044 cto->ct_seg_count = seg; 2045 } else { 2046 /* 2047 * This case should only happen when we're sending an 2048 * extra CTIO with final status. 2049 */ 2050 if (send_status == 0) { 2051 isp_prt(isp, ISP_LOGWARN, 2052 "tdma_mk ran out of segments"); 2053 mp->error = EINVAL; 2054 return; 2055 } 2056 } 2057 2058 /* 2059 * At this point, the fields ct_lun, ct_iid, ct_tagval, 2060 * ct_tagtype, and ct_timeout have been carried over 2061 * unchanged from what our caller had set. 2062 * 2063 * The dataseg fields and the seg_count fields we just got 2064 * through setting. The data direction we've preserved all 2065 * along and only clear it if we're now sending status. 2066 */ 2067 2068 if (nth_ctio == nctios - 1) { 2069 /* 2070 * We're the last in a sequence of CTIOs, so mark 2071 * this CTIO and save the handle to the CCB such that 2072 * when this CTIO completes we can free dma resources 2073 * and do whatever else we need to do to finish the 2074 * rest of the command. We *don't* give this to the 2075 * firmware to work on- the caller will do that. 2076 */ 2077 2078 cto->ct_syshandle = handle; 2079 cto->ct_header.rqs_seqno = 1; 2080 2081 if (send_status) { 2082 cto->ct_scsi_status = scsi_status; 2083 cto->ct_flags |= sflags; 2084 cto->ct_resid = resid; 2085 } 2086 if (send_status) { 2087 isp_prt(isp, ISP_LOGTDEBUG1, 2088 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 2089 "scsi status %x resid %d", 2090 cto->ct_fwhandle, csio->ccb_h.target_lun, 2091 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 2092 cto->ct_scsi_status, cto->ct_resid); 2093 } else { 2094 isp_prt(isp, ISP_LOGTDEBUG1, 2095 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 2096 cto->ct_fwhandle, csio->ccb_h.target_lun, 2097 cto->ct_iid, cto->ct_tag_val, 2098 cto->ct_flags); 2099 } 2100 isp_put_ctio(isp, cto, qe); 2101 ISP_TDQE(isp, "last tdma_mk", curi, cto); 2102 if (nctios > 1) { 2103 MEMORYBARRIER(isp, SYNC_REQUEST, 2104 curi, QENTRY_LEN); 2105 } 2106 } else { 2107 ct_entry_t *oqe = qe; 2108 2109 /* 2110 * Make sure syshandle fields are clean 2111 */ 2112 cto->ct_syshandle = 0; 2113 cto->ct_header.rqs_seqno = 0; 2114 2115 isp_prt(isp, ISP_LOGTDEBUG1, 2116 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 2117 cto->ct_fwhandle, csio->ccb_h.target_lun, 2118 cto->ct_iid, cto->ct_flags); 2119 2120 /* 2121 * Get a new CTIO 2122 */ 2123 qe = (ct_entry_t *) 2124 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2125 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 2126 if (nxti == mp->optr) { 2127 isp_prt(isp, ISP_LOGTDEBUG0, 2128 "Queue Overflow in tdma_mk"); 2129 mp->error = MUSHERR_NOQENTRIES; 2130 return; 2131 } 2132 2133 /* 2134 * Now that we're done with the old CTIO, 2135 * flush it out to the request queue. 2136 */ 2137 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 2138 isp_put_ctio(isp, cto, oqe); 2139 if (nth_ctio != 0) { 2140 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 2141 QENTRY_LEN); 2142 } 2143 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 2144 2145 /* 2146 * Reset some fields in the CTIO so we can reuse 2147 * for the next one we'll flush to the request 2148 * queue. 2149 */ 2150 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 2151 cto->ct_header.rqs_entry_count = 1; 2152 cto->ct_header.rqs_flags = 0; 2153 cto->ct_status = 0; 2154 cto->ct_scsi_status = 0; 2155 cto->ct_xfrlen = 0; 2156 cto->ct_resid = 0; 2157 cto->ct_seg_count = 0; 2158 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 2159 } 2160 } 2161 *mp->nxtip = nxti; 2162} 2163 2164/* 2165 * We don't have to do multiple CTIOs here. Instead, we can just do 2166 * continuation segments as needed. This greatly simplifies the code 2167 * improves performance. 2168 */ 2169 2170static void 2171tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2172{ 2173 mush_t *mp; 2174 struct ccb_scsiio *csio; 2175 ispsoftc_t *isp; 2176 ct2_entry_t *cto, *qe; 2177 uint32_t curi, nxti; 2178 ispds_t *ds; 2179 ispds64_t *ds64; 2180 int segcnt, seglim; 2181 2182 mp = (mush_t *) arg; 2183 if (error) { 2184 mp->error = error; 2185 return; 2186 } 2187 2188 isp = mp->isp; 2189 csio = mp->cmd_token; 2190 cto = mp->rq; 2191 2192 curi = isp->isp_reqidx; 2193 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 2194 2195 if (nseg == 0) { 2196 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 2197 isp_prt(isp, ISP_LOGWARN, 2198 "dma2_tgt_fc, a status CTIO2 without MODE1 " 2199 "set (0x%x)", cto->ct_flags); 2200 mp->error = EINVAL; 2201 return; 2202 } 2203 /* 2204 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 2205 * flags to NO DATA and clear relative offset flags. 2206 * We preserve the ct_resid and the response area. 2207 */ 2208 cto->ct_header.rqs_seqno = 1; 2209 cto->ct_seg_count = 0; 2210 cto->ct_reloff = 0; 2211 isp_prt(isp, ISP_LOGTDEBUG1, 2212 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 2213 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 2214 cto->ct_iid, cto->ct_flags, cto->ct_status, 2215 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 2216 if (FCPARAM(isp)->isp_2klogin) { 2217 isp_put_ctio2e(isp, 2218 (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2219 } else { 2220 isp_put_ctio2(isp, cto, qe); 2221 } 2222 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 2223 return; 2224 } 2225 2226 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 2227 isp_prt(isp, ISP_LOGERR, 2228 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 2229 "(0x%x)", cto->ct_flags); 2230 mp->error = EINVAL; 2231 return; 2232 } 2233 2234 2235 nxti = *mp->nxtip; 2236 2237 /* 2238 * Check to see if we need to DAC addressing or not. 2239 * 2240 * Any address that's over the 4GB boundary causes this 2241 * to happen. 2242 */ 2243 segcnt = nseg; 2244 if (sizeof (bus_addr_t) > 4) { 2245 for (segcnt = 0; segcnt < nseg; segcnt++) { 2246 uint64_t addr = dm_segs[segcnt].ds_addr; 2247 if (addr >= 0x100000000LL) { 2248 break; 2249 } 2250 } 2251 } 2252 if (segcnt != nseg) { 2253 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO3; 2254 seglim = ISP_RQDSEG_T3; 2255 ds64 = &cto->rsp.m0.u.ct_dataseg64[0]; 2256 ds = NULL; 2257 } else { 2258 seglim = ISP_RQDSEG_T2; 2259 ds64 = NULL; 2260 ds = &cto->rsp.m0.u.ct_dataseg[0]; 2261 } 2262 cto->ct_seg_count = 0; 2263 2264 /* 2265 * Set up the CTIO2 data segments. 2266 */ 2267 for (segcnt = 0; cto->ct_seg_count < seglim && segcnt < nseg; 2268 cto->ct_seg_count++, segcnt++) { 2269 if (ds64) { 2270 ds64->ds_basehi = 2271 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2272 ds64->ds_base = dm_segs[segcnt].ds_addr; 2273 ds64->ds_count = dm_segs[segcnt].ds_len; 2274 ds64++; 2275 } else { 2276 ds->ds_base = dm_segs[segcnt].ds_addr; 2277 ds->ds_count = dm_segs[segcnt].ds_len; 2278 ds++; 2279 } 2280 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2281#if __FreeBSD_version < 500000 2282 isp_prt(isp, ISP_LOGTDEBUG1, 2283 "isp_send_ctio2: ent0[%d]0x%llx:%llu", 2284 cto->ct_seg_count, (uint64_t)dm_segs[segcnt].ds_addr, 2285 (uint64_t)dm_segs[segcnt].ds_len); 2286#else 2287 isp_prt(isp, ISP_LOGTDEBUG1, 2288 "isp_send_ctio2: ent0[%d]0x%jx:%ju", 2289 cto->ct_seg_count, (uintmax_t)dm_segs[segcnt].ds_addr, 2290 (uintmax_t)dm_segs[segcnt].ds_len); 2291#endif 2292 } 2293 2294 while (segcnt < nseg) { 2295 uint32_t curip; 2296 int seg; 2297 ispcontreq_t local, *crq = &local, *qep; 2298 2299 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2300 curip = nxti; 2301 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 2302 if (nxti == mp->optr) { 2303 ISP_UNLOCK(isp); 2304 isp_prt(isp, ISP_LOGTDEBUG0, 2305 "tdma_mkfc: request queue overflow"); 2306 mp->error = MUSHERR_NOQENTRIES; 2307 return; 2308 } 2309 cto->ct_header.rqs_entry_count++; 2310 MEMZERO((void *)crq, sizeof (*crq)); 2311 crq->req_header.rqs_entry_count = 1; 2312 if (cto->ct_header.rqs_entry_type == RQSTYPE_CTIO3) { 2313 seglim = ISP_CDSEG64; 2314 ds = NULL; 2315 ds64 = &((ispcontreq64_t *)crq)->req_dataseg[0]; 2316 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2317 } else { 2318 seglim = ISP_CDSEG; 2319 ds = &crq->req_dataseg[0]; 2320 ds64 = NULL; 2321 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2322 } 2323 for (seg = 0; segcnt < nseg && seg < seglim; 2324 segcnt++, seg++) { 2325 if (ds64) { 2326 ds64->ds_basehi = 2327 ((uint64_t) (dm_segs[segcnt].ds_addr) >> 32); 2328 ds64->ds_base = dm_segs[segcnt].ds_addr; 2329 ds64->ds_count = dm_segs[segcnt].ds_len; 2330 ds64++; 2331 } else { 2332 ds->ds_base = dm_segs[segcnt].ds_addr; 2333 ds->ds_count = dm_segs[segcnt].ds_len; 2334 ds++; 2335 } 2336#if __FreeBSD_version < 500000 2337 isp_prt(isp, ISP_LOGTDEBUG1, 2338 "isp_send_ctio2: ent%d[%d]%llx:%llu", 2339 cto->ct_header.rqs_entry_count-1, seg, 2340 (uint64_t)dm_segs[segcnt].ds_addr, 2341 (uint64_t)dm_segs[segcnt].ds_len); 2342#else 2343 isp_prt(isp, ISP_LOGTDEBUG1, 2344 "isp_send_ctio2: ent%d[%d]%jx:%ju", 2345 cto->ct_header.rqs_entry_count-1, seg, 2346 (uintmax_t)dm_segs[segcnt].ds_addr, 2347 (uintmax_t)dm_segs[segcnt].ds_len); 2348#endif 2349 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 2350 cto->ct_seg_count++; 2351 } 2352 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 2353 isp_put_cont_req(isp, crq, qep); 2354 ISP_TDQE(isp, "cont entry", curi, qep); 2355 } 2356 2357 /* 2358 * No do final twiddling for the CTIO itself. 2359 */ 2360 cto->ct_header.rqs_seqno = 1; 2361 isp_prt(isp, ISP_LOGTDEBUG1, 2362 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 2363 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 2364 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 2365 cto->ct_resid); 2366 if (FCPARAM(isp)->isp_2klogin) { 2367 isp_put_ctio2e(isp, (ct2e_entry_t *)cto, (ct2e_entry_t *)qe); 2368 } else { 2369 isp_put_ctio2(isp, cto, qe); 2370 } 2371 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 2372 *mp->nxtip = nxti; 2373} 2374#endif 2375 2376static void dma_2400(void *, bus_dma_segment_t *, int, int); 2377static void dma2_a64(void *, bus_dma_segment_t *, int, int); 2378static void dma2(void *, bus_dma_segment_t *, int, int); 2379 2380static void 2381dma_2400(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2382{ 2383 mush_t *mp; 2384 ispsoftc_t *isp; 2385 struct ccb_scsiio *csio; 2386 struct isp_pcisoftc *pcs; 2387 bus_dmamap_t *dp; 2388 bus_dma_segment_t *eseg; 2389 ispreqt7_t *rq; 2390 int seglim, datalen; 2391 uint32_t nxti; 2392 2393 mp = (mush_t *) arg; 2394 if (error) { 2395 mp->error = error; 2396 return; 2397 } 2398 2399 if (nseg < 1) { 2400 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2401 mp->error = EFAULT; 2402 return; 2403 } 2404 2405 csio = mp->cmd_token; 2406 isp = mp->isp; 2407 rq = mp->rq; 2408 pcs = (struct isp_pcisoftc *)mp->isp; 2409 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2410 nxti = *mp->nxtip; 2411 2412 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2413 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2414 } else { 2415 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2416 } 2417 datalen = XS_XFRLEN(csio); 2418 2419 /* 2420 * We're passed an initial partially filled in entry that 2421 * has most fields filled in except for data transfer 2422 * related values. 2423 * 2424 * Our job is to fill in the initial request queue entry and 2425 * then to start allocating and filling in continuation entries 2426 * until we've covered the entire transfer. 2427 */ 2428 2429 rq->req_header.rqs_entry_type = RQSTYPE_T7RQS; 2430 rq->req_dl = datalen; 2431 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2432 rq->req_alen_datadir = 0x2; 2433 } else { 2434 rq->req_alen_datadir = 0x1; 2435 } 2436 2437 eseg = dm_segs + nseg; 2438 2439 rq->req_dataseg.ds_base = DMA_LO32(dm_segs->ds_addr); 2440 rq->req_dataseg.ds_basehi = DMA_HI32(dm_segs->ds_addr); 2441 rq->req_dataseg.ds_count = dm_segs->ds_len; 2442 2443 datalen -= dm_segs->ds_len; 2444 2445 dm_segs++; 2446 rq->req_seg_count++; 2447 2448 while (datalen > 0 && dm_segs != eseg) { 2449 uint32_t onxti; 2450 ispcontreq64_t local, *crq = &local, *cqe; 2451 2452 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2453 onxti = nxti; 2454 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2455 if (nxti == mp->optr) { 2456 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2457 mp->error = MUSHERR_NOQENTRIES; 2458 return; 2459 } 2460 rq->req_header.rqs_entry_count++; 2461 MEMZERO((void *)crq, sizeof (*crq)); 2462 crq->req_header.rqs_entry_count = 1; 2463 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2464 2465 seglim = 0; 2466 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2467 crq->req_dataseg[seglim].ds_base = 2468 DMA_LO32(dm_segs->ds_addr); 2469 crq->req_dataseg[seglim].ds_basehi = 2470 DMA_HI32(dm_segs->ds_addr); 2471 crq->req_dataseg[seglim].ds_count = 2472 dm_segs->ds_len; 2473 rq->req_seg_count++; 2474 dm_segs++; 2475 seglim++; 2476 datalen -= dm_segs->ds_len; 2477 } 2478 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2479 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2480 } 2481 isp_put_cont64_req(isp, crq, cqe); 2482 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2483 } 2484 *mp->nxtip = nxti; 2485} 2486 2487static void 2488dma2_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2489{ 2490 mush_t *mp; 2491 ispsoftc_t *isp; 2492 struct ccb_scsiio *csio; 2493 struct isp_pcisoftc *pcs; 2494 bus_dmamap_t *dp; 2495 bus_dma_segment_t *eseg; 2496 ispreq64_t *rq; 2497 int seglim, datalen; 2498 uint32_t nxti; 2499 2500 mp = (mush_t *) arg; 2501 if (error) { 2502 mp->error = error; 2503 return; 2504 } 2505 2506 if (nseg < 1) { 2507 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2508 mp->error = EFAULT; 2509 return; 2510 } 2511 csio = mp->cmd_token; 2512 isp = mp->isp; 2513 rq = mp->rq; 2514 pcs = (struct isp_pcisoftc *)mp->isp; 2515 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2516 nxti = *mp->nxtip; 2517 2518 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2519 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2520 } else { 2521 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2522 } 2523 datalen = XS_XFRLEN(csio); 2524 2525 /* 2526 * We're passed an initial partially filled in entry that 2527 * has most fields filled in except for data transfer 2528 * related values. 2529 * 2530 * Our job is to fill in the initial request queue entry and 2531 * then to start allocating and filling in continuation entries 2532 * until we've covered the entire transfer. 2533 */ 2534 2535 if (IS_FC(isp)) { 2536 rq->req_header.rqs_entry_type = RQSTYPE_T3RQS; 2537 seglim = ISP_RQDSEG_T3; 2538 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 2539 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2540 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2541 } else { 2542 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2543 } 2544 } else { 2545 rq->req_header.rqs_entry_type = RQSTYPE_A64; 2546 if (csio->cdb_len > 12) { 2547 seglim = 0; 2548 } else { 2549 seglim = ISP_RQDSEG_A64; 2550 } 2551 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2552 rq->req_flags |= REQFLAG_DATA_IN; 2553 } else { 2554 rq->req_flags |= REQFLAG_DATA_OUT; 2555 } 2556 } 2557 2558 eseg = dm_segs + nseg; 2559 2560 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2561 if (IS_FC(isp)) { 2562 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 2563 rq3->req_dataseg[rq3->req_seg_count].ds_base = 2564 DMA_LO32(dm_segs->ds_addr); 2565 rq3->req_dataseg[rq3->req_seg_count].ds_basehi = 2566 DMA_HI32(dm_segs->ds_addr); 2567 rq3->req_dataseg[rq3->req_seg_count].ds_count = 2568 dm_segs->ds_len; 2569 } else { 2570 rq->req_dataseg[rq->req_seg_count].ds_base = 2571 DMA_LO32(dm_segs->ds_addr); 2572 rq->req_dataseg[rq->req_seg_count].ds_basehi = 2573 DMA_HI32(dm_segs->ds_addr); 2574 rq->req_dataseg[rq->req_seg_count].ds_count = 2575 dm_segs->ds_len; 2576 } 2577 datalen -= dm_segs->ds_len; 2578 rq->req_seg_count++; 2579 dm_segs++; 2580 } 2581 2582 while (datalen > 0 && dm_segs != eseg) { 2583 uint32_t onxti; 2584 ispcontreq64_t local, *crq = &local, *cqe; 2585 2586 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2587 onxti = nxti; 2588 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2589 if (nxti == mp->optr) { 2590 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2591 mp->error = MUSHERR_NOQENTRIES; 2592 return; 2593 } 2594 rq->req_header.rqs_entry_count++; 2595 MEMZERO((void *)crq, sizeof (*crq)); 2596 crq->req_header.rqs_entry_count = 1; 2597 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 2598 2599 seglim = 0; 2600 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 2601 crq->req_dataseg[seglim].ds_base = 2602 DMA_LO32(dm_segs->ds_addr); 2603 crq->req_dataseg[seglim].ds_basehi = 2604 DMA_HI32(dm_segs->ds_addr); 2605 crq->req_dataseg[seglim].ds_count = 2606 dm_segs->ds_len; 2607 rq->req_seg_count++; 2608 dm_segs++; 2609 seglim++; 2610 datalen -= dm_segs->ds_len; 2611 } 2612 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2613 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2614 } 2615 isp_put_cont64_req(isp, crq, cqe); 2616 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2617 } 2618 *mp->nxtip = nxti; 2619} 2620 2621static void 2622dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 2623{ 2624 mush_t *mp; 2625 ispsoftc_t *isp; 2626 struct ccb_scsiio *csio; 2627 struct isp_pcisoftc *pcs; 2628 bus_dmamap_t *dp; 2629 bus_dma_segment_t *eseg; 2630 ispreq_t *rq; 2631 int seglim, datalen; 2632 uint32_t nxti; 2633 2634 mp = (mush_t *) arg; 2635 if (error) { 2636 mp->error = error; 2637 return; 2638 } 2639 2640 if (nseg < 1) { 2641 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 2642 mp->error = EFAULT; 2643 return; 2644 } 2645 csio = mp->cmd_token; 2646 isp = mp->isp; 2647 rq = mp->rq; 2648 pcs = (struct isp_pcisoftc *)mp->isp; 2649 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2650 nxti = *mp->nxtip; 2651 2652 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2653 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 2654 } else { 2655 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 2656 } 2657 2658 datalen = XS_XFRLEN(csio); 2659 2660 /* 2661 * We're passed an initial partially filled in entry that 2662 * has most fields filled in except for data transfer 2663 * related values. 2664 * 2665 * Our job is to fill in the initial request queue entry and 2666 * then to start allocating and filling in continuation entries 2667 * until we've covered the entire transfer. 2668 */ 2669 2670 if (IS_FC(isp)) { 2671 seglim = ISP_RQDSEG_T2; 2672 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 2673 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2674 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 2675 } else { 2676 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 2677 } 2678 } else { 2679 if (csio->cdb_len > 12) { 2680 seglim = 0; 2681 } else { 2682 seglim = ISP_RQDSEG; 2683 } 2684 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2685 rq->req_flags |= REQFLAG_DATA_IN; 2686 } else { 2687 rq->req_flags |= REQFLAG_DATA_OUT; 2688 } 2689 } 2690 2691 eseg = dm_segs + nseg; 2692 2693 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 2694 if (IS_FC(isp)) { 2695 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 2696 rq2->req_dataseg[rq2->req_seg_count].ds_base = 2697 DMA_LO32(dm_segs->ds_addr); 2698 rq2->req_dataseg[rq2->req_seg_count].ds_count = 2699 dm_segs->ds_len; 2700 } else { 2701 rq->req_dataseg[rq->req_seg_count].ds_base = 2702 DMA_LO32(dm_segs->ds_addr); 2703 rq->req_dataseg[rq->req_seg_count].ds_count = 2704 dm_segs->ds_len; 2705 } 2706 datalen -= dm_segs->ds_len; 2707 rq->req_seg_count++; 2708 dm_segs++; 2709 } 2710 2711 while (datalen > 0 && dm_segs != eseg) { 2712 uint32_t onxti; 2713 ispcontreq_t local, *crq = &local, *cqe; 2714 2715 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 2716 onxti = nxti; 2717 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 2718 if (nxti == mp->optr) { 2719 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 2720 mp->error = MUSHERR_NOQENTRIES; 2721 return; 2722 } 2723 rq->req_header.rqs_entry_count++; 2724 MEMZERO((void *)crq, sizeof (*crq)); 2725 crq->req_header.rqs_entry_count = 1; 2726 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 2727 2728 seglim = 0; 2729 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 2730 crq->req_dataseg[seglim].ds_base = 2731 DMA_LO32(dm_segs->ds_addr); 2732 crq->req_dataseg[seglim].ds_count = 2733 dm_segs->ds_len; 2734 rq->req_seg_count++; 2735 dm_segs++; 2736 seglim++; 2737 datalen -= dm_segs->ds_len; 2738 } 2739 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2740 isp_print_bytes(isp, "Continuation", QENTRY_LEN, crq); 2741 } 2742 isp_put_cont_req(isp, crq, cqe); 2743 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 2744 } 2745 *mp->nxtip = nxti; 2746} 2747 2748/* 2749 * We enter with ISP_LOCK held 2750 */ 2751static int 2752isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, ispreq_t *rq, 2753 uint32_t *nxtip, uint32_t optr) 2754{ 2755 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2756 ispreq_t *qep; 2757 bus_dmamap_t *dp = NULL; 2758 mush_t mush, *mp; 2759 void (*eptr)(void *, bus_dma_segment_t *, int, int); 2760 2761 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 2762#ifdef ISP_TARGET_MODE 2763 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 2764 if (IS_FC(isp)) { 2765 eptr = tdma_mkfc; 2766 } else { 2767 eptr = tdma_mk; 2768 } 2769 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2770 (csio->dxfer_len == 0)) { 2771 mp = &mush; 2772 mp->isp = isp; 2773 mp->cmd_token = csio; 2774 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 2775 mp->nxtip = nxtip; 2776 mp->optr = optr; 2777 mp->error = 0; 2778 ISPLOCK_2_CAMLOCK(isp); 2779 (*eptr)(mp, NULL, 0, 0); 2780 CAMLOCK_2_ISPLOCK(isp); 2781 goto mbxsync; 2782 } 2783 } else 2784#endif 2785 if (IS_24XX(isp)) { 2786 eptr = dma_2400; 2787 } else if (sizeof (bus_addr_t) > 4) { 2788 eptr = dma2_a64; 2789 } else { 2790 eptr = dma2; 2791 } 2792 2793 2794 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 2795 (csio->dxfer_len == 0)) { 2796 rq->req_seg_count = 1; 2797 goto mbxsync; 2798 } 2799 2800 /* 2801 * Do a virtual grapevine step to collect info for 2802 * the callback dma allocation that we have to use... 2803 */ 2804 mp = &mush; 2805 mp->isp = isp; 2806 mp->cmd_token = csio; 2807 mp->rq = rq; 2808 mp->nxtip = nxtip; 2809 mp->optr = optr; 2810 mp->error = 0; 2811 2812 ISPLOCK_2_CAMLOCK(isp); 2813 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 2814 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 2815 int error, s; 2816 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 2817 s = splsoftvm(); 2818 error = bus_dmamap_load(pcs->dmat, *dp, 2819 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 2820 if (error == EINPROGRESS) { 2821 bus_dmamap_unload(pcs->dmat, *dp); 2822 mp->error = EINVAL; 2823 isp_prt(isp, ISP_LOGERR, 2824 "deferred dma allocation not supported"); 2825 } else if (error && mp->error == 0) { 2826#ifdef DIAGNOSTIC 2827 isp_prt(isp, ISP_LOGERR, 2828 "error %d in dma mapping code", error); 2829#endif 2830 mp->error = error; 2831 } 2832 splx(s); 2833 } else { 2834 /* Pointer to physical buffer */ 2835 struct bus_dma_segment seg; 2836 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 2837 seg.ds_len = csio->dxfer_len; 2838 (*eptr)(mp, &seg, 1, 0); 2839 } 2840 } else { 2841 struct bus_dma_segment *segs; 2842 2843 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 2844 isp_prt(isp, ISP_LOGERR, 2845 "Physical segment pointers unsupported"); 2846 mp->error = EINVAL; 2847 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 2848 isp_prt(isp, ISP_LOGERR, 2849 "Virtual segment addresses unsupported"); 2850 mp->error = EINVAL; 2851 } else { 2852 /* Just use the segments provided */ 2853 segs = (struct bus_dma_segment *) csio->data_ptr; 2854 (*eptr)(mp, segs, csio->sglist_cnt, 0); 2855 } 2856 } 2857 CAMLOCK_2_ISPLOCK(isp); 2858 if (mp->error) { 2859 int retval = CMD_COMPLETE; 2860 if (mp->error == MUSHERR_NOQENTRIES) { 2861 retval = CMD_EAGAIN; 2862 } else if (mp->error == EFBIG) { 2863 XS_SETERR(csio, CAM_REQ_TOO_BIG); 2864 } else if (mp->error == EINVAL) { 2865 XS_SETERR(csio, CAM_REQ_INVALID); 2866 } else { 2867 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 2868 } 2869 return (retval); 2870 } 2871mbxsync: 2872 if (isp->isp_dblev & ISP_LOGDEBUG1) { 2873 isp_print_bytes(isp, "Request Queue Entry", QENTRY_LEN, rq); 2874 } 2875 switch (rq->req_header.rqs_entry_type) { 2876 case RQSTYPE_REQUEST: 2877 isp_put_request(isp, rq, qep); 2878 break; 2879 case RQSTYPE_CMDONLY: 2880 isp_put_extended_request(isp, (ispextreq_t *)rq, 2881 (ispextreq_t *)qep); 2882 break; 2883 case RQSTYPE_T2RQS: 2884 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2885 break; 2886 case RQSTYPE_A64: 2887 case RQSTYPE_T3RQS: 2888 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2889 break; 2890 case RQSTYPE_T7RQS: 2891 isp_put_request_t7(isp, (ispreqt7_t *) rq, (ispreqt7_t *) qep); 2892 break; 2893 } 2894 return (CMD_QUEUED); 2895} 2896 2897static void 2898isp_pci_dmateardown(ispsoftc_t *isp, XS_T *xs, uint32_t handle) 2899{ 2900 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2901 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2902 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2903 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2904 } else { 2905 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2906 } 2907 bus_dmamap_unload(pcs->dmat, *dp); 2908} 2909 2910 2911static void 2912isp_pci_reset0(ispsoftc_t *isp) 2913{ 2914 ISP_DISABLE_INTS(isp); 2915} 2916 2917static void 2918isp_pci_reset1(ispsoftc_t *isp) 2919{ 2920 if (!IS_24XX(isp)) { 2921 /* Make sure the BIOS is disabled */ 2922 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2923 } 2924 /* and enable interrupts */ 2925 ISP_ENABLE_INTS(isp); 2926} 2927 2928static void 2929isp_pci_dumpregs(ispsoftc_t *isp, const char *msg) 2930{ 2931 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2932 if (msg) 2933 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2934 else 2935 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2936 if (IS_SCSI(isp)) 2937 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2938 else 2939 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2940 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2941 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2942 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2943 2944 2945 if (IS_SCSI(isp)) { 2946 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2947 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2948 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2949 ISP_READ(isp, CDMA_FIFO_STS)); 2950 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2951 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2952 ISP_READ(isp, DDMA_FIFO_STS)); 2953 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2954 ISP_READ(isp, SXP_INTERRUPT), 2955 ISP_READ(isp, SXP_GROSS_ERR), 2956 ISP_READ(isp, SXP_PINS_CTRL)); 2957 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2958 } 2959 printf(" mbox regs: %x %x %x %x %x\n", 2960 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2961 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2962 ISP_READ(isp, OUTMAILBOX4)); 2963 printf(" PCI Status Command/Status=%x\n", 2964 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2965} 2966