isp_pci.c revision 134895
1/*- 2 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 3 * FreeBSD Version. 4 * 5 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice immediately at the beginning of the file, without modification, 12 * this list of conditions, and the following disclaimer. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: head/sys/dev/isp/isp_pci.c 134895 2004-09-07 08:04:09Z mjacob $"); 31 32#include <sys/param.h> 33#include <sys/systm.h> 34#include <sys/kernel.h> 35#include <sys/module.h> 36#include <sys/bus.h> 37 38#include <dev/pci/pcireg.h> 39#include <dev/pci/pcivar.h> 40 41#include <machine/bus_memio.h> 42#include <machine/bus_pio.h> 43#include <machine/bus.h> 44#include <machine/resource.h> 45#include <sys/rman.h> 46#include <sys/malloc.h> 47 48#ifdef ISP_TARGET_MODE 49#ifdef PAE 50#error "PAE and ISP_TARGET_MODE not supported yet" 51#endif 52#endif 53 54#include <dev/isp/isp_freebsd.h> 55 56static u_int16_t isp_pci_rd_reg(struct ispsoftc *, int); 57static void isp_pci_wr_reg(struct ispsoftc *, int, u_int16_t); 58static u_int16_t isp_pci_rd_reg_1080(struct ispsoftc *, int); 59static void isp_pci_wr_reg_1080(struct ispsoftc *, int, u_int16_t); 60static int 61isp_pci_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 62static int 63isp_pci_rd_isr_2300(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 64static int isp_pci_mbxdma(struct ispsoftc *); 65static int 66isp_pci_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 67static void 68isp_pci_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 69 70static void isp_pci_reset1(struct ispsoftc *); 71static void isp_pci_dumpregs(struct ispsoftc *, const char *); 72 73static struct ispmdvec mdvec = { 74 isp_pci_rd_isr, 75 isp_pci_rd_reg, 76 isp_pci_wr_reg, 77 isp_pci_mbxdma, 78 isp_pci_dmasetup, 79 isp_pci_dmateardown, 80 NULL, 81 isp_pci_reset1, 82 isp_pci_dumpregs, 83 NULL, 84 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 85}; 86 87static struct ispmdvec mdvec_1080 = { 88 isp_pci_rd_isr, 89 isp_pci_rd_reg_1080, 90 isp_pci_wr_reg_1080, 91 isp_pci_mbxdma, 92 isp_pci_dmasetup, 93 isp_pci_dmateardown, 94 NULL, 95 isp_pci_reset1, 96 isp_pci_dumpregs, 97 NULL, 98 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 99}; 100 101static struct ispmdvec mdvec_12160 = { 102 isp_pci_rd_isr, 103 isp_pci_rd_reg_1080, 104 isp_pci_wr_reg_1080, 105 isp_pci_mbxdma, 106 isp_pci_dmasetup, 107 isp_pci_dmateardown, 108 NULL, 109 isp_pci_reset1, 110 isp_pci_dumpregs, 111 NULL, 112 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 113}; 114 115static struct ispmdvec mdvec_2100 = { 116 isp_pci_rd_isr, 117 isp_pci_rd_reg, 118 isp_pci_wr_reg, 119 isp_pci_mbxdma, 120 isp_pci_dmasetup, 121 isp_pci_dmateardown, 122 NULL, 123 isp_pci_reset1, 124 isp_pci_dumpregs 125}; 126 127static struct ispmdvec mdvec_2200 = { 128 isp_pci_rd_isr, 129 isp_pci_rd_reg, 130 isp_pci_wr_reg, 131 isp_pci_mbxdma, 132 isp_pci_dmasetup, 133 isp_pci_dmateardown, 134 NULL, 135 isp_pci_reset1, 136 isp_pci_dumpregs 137}; 138 139static struct ispmdvec mdvec_2300 = { 140 isp_pci_rd_isr_2300, 141 isp_pci_rd_reg, 142 isp_pci_wr_reg, 143 isp_pci_mbxdma, 144 isp_pci_dmasetup, 145 isp_pci_dmateardown, 146 NULL, 147 isp_pci_reset1, 148 isp_pci_dumpregs 149}; 150 151#ifndef PCIM_CMD_INVEN 152#define PCIM_CMD_INVEN 0x10 153#endif 154#ifndef PCIM_CMD_BUSMASTEREN 155#define PCIM_CMD_BUSMASTEREN 0x0004 156#endif 157#ifndef PCIM_CMD_PERRESPEN 158#define PCIM_CMD_PERRESPEN 0x0040 159#endif 160#ifndef PCIM_CMD_SEREN 161#define PCIM_CMD_SEREN 0x0100 162#endif 163 164#ifndef PCIR_COMMAND 165#define PCIR_COMMAND 0x04 166#endif 167 168#ifndef PCIR_CACHELNSZ 169#define PCIR_CACHELNSZ 0x0c 170#endif 171 172#ifndef PCIR_LATTIMER 173#define PCIR_LATTIMER 0x0d 174#endif 175 176#ifndef PCIR_ROMADDR 177#define PCIR_ROMADDR 0x30 178#endif 179 180#ifndef PCI_VENDOR_QLOGIC 181#define PCI_VENDOR_QLOGIC 0x1077 182#endif 183 184#ifndef PCI_PRODUCT_QLOGIC_ISP1020 185#define PCI_PRODUCT_QLOGIC_ISP1020 0x1020 186#endif 187 188#ifndef PCI_PRODUCT_QLOGIC_ISP1080 189#define PCI_PRODUCT_QLOGIC_ISP1080 0x1080 190#endif 191 192#ifndef PCI_PRODUCT_QLOGIC_ISP10160 193#define PCI_PRODUCT_QLOGIC_ISP10160 0x1016 194#endif 195 196#ifndef PCI_PRODUCT_QLOGIC_ISP12160 197#define PCI_PRODUCT_QLOGIC_ISP12160 0x1216 198#endif 199 200#ifndef PCI_PRODUCT_QLOGIC_ISP1240 201#define PCI_PRODUCT_QLOGIC_ISP1240 0x1240 202#endif 203 204#ifndef PCI_PRODUCT_QLOGIC_ISP1280 205#define PCI_PRODUCT_QLOGIC_ISP1280 0x1280 206#endif 207 208#ifndef PCI_PRODUCT_QLOGIC_ISP2100 209#define PCI_PRODUCT_QLOGIC_ISP2100 0x2100 210#endif 211 212#ifndef PCI_PRODUCT_QLOGIC_ISP2200 213#define PCI_PRODUCT_QLOGIC_ISP2200 0x2200 214#endif 215 216#ifndef PCI_PRODUCT_QLOGIC_ISP2300 217#define PCI_PRODUCT_QLOGIC_ISP2300 0x2300 218#endif 219 220#ifndef PCI_PRODUCT_QLOGIC_ISP2312 221#define PCI_PRODUCT_QLOGIC_ISP2312 0x2312 222#endif 223 224#define PCI_QLOGIC_ISP1020 \ 225 ((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC) 226 227#define PCI_QLOGIC_ISP1080 \ 228 ((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC) 229 230#define PCI_QLOGIC_ISP10160 \ 231 ((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC) 232 233#define PCI_QLOGIC_ISP12160 \ 234 ((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC) 235 236#define PCI_QLOGIC_ISP1240 \ 237 ((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC) 238 239#define PCI_QLOGIC_ISP1280 \ 240 ((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC) 241 242#define PCI_QLOGIC_ISP2100 \ 243 ((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC) 244 245#define PCI_QLOGIC_ISP2200 \ 246 ((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC) 247 248#define PCI_QLOGIC_ISP2300 \ 249 ((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC) 250 251#define PCI_QLOGIC_ISP2312 \ 252 ((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC) 253 254/* 255 * Odd case for some AMI raid cards... We need to *not* attach to this. 256 */ 257#define AMI_RAID_SUBVENDOR_ID 0x101e 258 259#define IO_MAP_REG 0x10 260#define MEM_MAP_REG 0x14 261 262#define PCI_DFLT_LTNCY 0x40 263#define PCI_DFLT_LNSZ 0x10 264 265static int isp_pci_probe (device_t); 266static int isp_pci_attach (device_t); 267 268 269struct isp_pcisoftc { 270 struct ispsoftc pci_isp; 271 device_t pci_dev; 272 struct resource * pci_reg; 273 bus_space_tag_t pci_st; 274 bus_space_handle_t pci_sh; 275 void * ih; 276 int16_t pci_poff[_NREG_BLKS]; 277 bus_dma_tag_t dmat; 278 bus_dmamap_t *dmaps; 279}; 280extern ispfwfunc *isp_get_firmware_p; 281 282static device_method_t isp_pci_methods[] = { 283 /* Device interface */ 284 DEVMETHOD(device_probe, isp_pci_probe), 285 DEVMETHOD(device_attach, isp_pci_attach), 286 { 0, 0 } 287}; 288static void isp_pci_intr(void *); 289 290static driver_t isp_pci_driver = { 291 "isp", isp_pci_methods, sizeof (struct isp_pcisoftc) 292}; 293static devclass_t isp_devclass; 294DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0); 295 296static int 297isp_pci_probe(device_t dev) 298{ 299 switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) { 300 case PCI_QLOGIC_ISP1020: 301 device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter"); 302 break; 303 case PCI_QLOGIC_ISP1080: 304 device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter"); 305 break; 306 case PCI_QLOGIC_ISP1240: 307 device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter"); 308 break; 309 case PCI_QLOGIC_ISP1280: 310 device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter"); 311 break; 312 case PCI_QLOGIC_ISP10160: 313 device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter"); 314 break; 315 case PCI_QLOGIC_ISP12160: 316 if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) { 317 return (ENXIO); 318 } 319 device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter"); 320 break; 321 case PCI_QLOGIC_ISP2100: 322 device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter"); 323 break; 324 case PCI_QLOGIC_ISP2200: 325 device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter"); 326 break; 327 case PCI_QLOGIC_ISP2300: 328 device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter"); 329 break; 330 case PCI_QLOGIC_ISP2312: 331 device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter"); 332 break; 333 default: 334 return (ENXIO); 335 } 336 if (isp_announced == 0 && bootverbose) { 337 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 338 "Core Version %d.%d\n", 339 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 340 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 341 isp_announced++; 342 } 343 /* 344 * XXXX: Here is where we might load the f/w module 345 * XXXX: (or increase a reference count to it). 346 */ 347 return (0); 348} 349 350static int 351isp_pci_attach(device_t dev) 352{ 353 struct resource *regs, *irq; 354 int tval, rtp, rgd, iqd, m1, m2, isp_debug, role; 355 u_int32_t data, cmd, linesz, psize, basetype; 356 struct isp_pcisoftc *pcs; 357 struct ispsoftc *isp = NULL; 358 struct ispmdvec *mdvp; 359 const char *sptr; 360 int locksetup = 0; 361 362 /* 363 * Figure out if we're supposed to skip this one. 364 */ 365 366 tval = 0; 367 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 368 "disable", &tval) == 0 && tval) { 369 device_printf(dev, "device is disabled\n"); 370 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 371 return (0); 372 } 373 374 role = -1; 375 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 376 "role", &role) == 0 && role != -1) { 377 role &= (ISP_ROLE_INITIATOR|ISP_ROLE_TARGET); 378 device_printf(dev, "setting role to 0x%x\n", role); 379 } else { 380#ifdef ISP_TARGET_MODE 381 role = ISP_ROLE_TARGET; 382#else 383 role = ISP_DEFAULT_ROLES; 384#endif 385 } 386 387 pcs = malloc(sizeof (struct isp_pcisoftc), M_DEVBUF, M_NOWAIT | M_ZERO); 388 if (pcs == NULL) { 389 device_printf(dev, "cannot allocate softc\n"); 390 return (ENOMEM); 391 } 392 393 /* 394 * Figure out which we should try first - memory mapping or i/o mapping? 395 */ 396#ifdef __alpha__ 397 m1 = PCIM_CMD_MEMEN; 398 m2 = PCIM_CMD_PORTEN; 399#else 400 m1 = PCIM_CMD_PORTEN; 401 m2 = PCIM_CMD_MEMEN; 402#endif 403 404 tval = 0; 405 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 406 "prefer_iomap", &tval) == 0 && tval != 0) { 407 m1 = PCIM_CMD_PORTEN; 408 m2 = PCIM_CMD_MEMEN; 409 } 410 tval = 0; 411 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 412 "prefer_memmap", &tval) == 0 && tval != 0) { 413 m1 = PCIM_CMD_MEMEN; 414 m2 = PCIM_CMD_PORTEN; 415 } 416 417 linesz = PCI_DFLT_LNSZ; 418 irq = regs = NULL; 419 rgd = rtp = iqd = 0; 420 421 cmd = pci_read_config(dev, PCIR_COMMAND, 1); 422 if (cmd & m1) { 423 rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 424 rgd = (m1 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 425 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 426 } 427 if (regs == NULL && (cmd & m2)) { 428 rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 429 rgd = (m2 == PCIM_CMD_MEMEN)? MEM_MAP_REG : IO_MAP_REG; 430 regs = bus_alloc_resource_any(dev, rtp, &rgd, RF_ACTIVE); 431 } 432 if (regs == NULL) { 433 device_printf(dev, "unable to map any ports\n"); 434 goto bad; 435 } 436 if (bootverbose) 437 device_printf(dev, "using %s space register mapping\n", 438 (rgd == IO_MAP_REG)? "I/O" : "Memory"); 439 pcs->pci_dev = dev; 440 pcs->pci_reg = regs; 441 pcs->pci_st = rman_get_bustag(regs); 442 pcs->pci_sh = rman_get_bushandle(regs); 443 444 pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 445 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF; 446 pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF; 447 pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF; 448 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 449 mdvp = &mdvec; 450 basetype = ISP_HA_SCSI_UNKNOWN; 451 psize = sizeof (sdparam); 452 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1020) { 453 mdvp = &mdvec; 454 basetype = ISP_HA_SCSI_UNKNOWN; 455 psize = sizeof (sdparam); 456 } 457 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1080) { 458 mdvp = &mdvec_1080; 459 basetype = ISP_HA_SCSI_1080; 460 psize = sizeof (sdparam); 461 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 462 ISP1080_DMA_REGS_OFF; 463 } 464 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1240) { 465 mdvp = &mdvec_1080; 466 basetype = ISP_HA_SCSI_1240; 467 psize = 2 * sizeof (sdparam); 468 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 469 ISP1080_DMA_REGS_OFF; 470 } 471 if (pci_get_devid(dev) == PCI_QLOGIC_ISP1280) { 472 mdvp = &mdvec_1080; 473 basetype = ISP_HA_SCSI_1280; 474 psize = 2 * sizeof (sdparam); 475 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 476 ISP1080_DMA_REGS_OFF; 477 } 478 if (pci_get_devid(dev) == PCI_QLOGIC_ISP10160) { 479 mdvp = &mdvec_12160; 480 basetype = ISP_HA_SCSI_10160; 481 psize = sizeof (sdparam); 482 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 483 ISP1080_DMA_REGS_OFF; 484 } 485 if (pci_get_devid(dev) == PCI_QLOGIC_ISP12160) { 486 mdvp = &mdvec_12160; 487 basetype = ISP_HA_SCSI_12160; 488 psize = 2 * sizeof (sdparam); 489 pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = 490 ISP1080_DMA_REGS_OFF; 491 } 492 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2100) { 493 mdvp = &mdvec_2100; 494 basetype = ISP_HA_FC_2100; 495 psize = sizeof (fcparam); 496 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 497 PCI_MBOX_REGS2100_OFF; 498 if (pci_get_revid(dev) < 3) { 499 /* 500 * XXX: Need to get the actual revision 501 * XXX: number of the 2100 FB. At any rate, 502 * XXX: lower cache line size for early revision 503 * XXX; boards. 504 */ 505 linesz = 1; 506 } 507 } 508 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2200) { 509 mdvp = &mdvec_2200; 510 basetype = ISP_HA_FC_2200; 511 psize = sizeof (fcparam); 512 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 513 PCI_MBOX_REGS2100_OFF; 514 } 515 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2300) { 516 mdvp = &mdvec_2300; 517 basetype = ISP_HA_FC_2300; 518 psize = sizeof (fcparam); 519 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 520 PCI_MBOX_REGS2300_OFF; 521 } 522 if (pci_get_devid(dev) == PCI_QLOGIC_ISP2312) { 523 mdvp = &mdvec_2300; 524 basetype = ISP_HA_FC_2312; 525 psize = sizeof (fcparam); 526 pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = 527 PCI_MBOX_REGS2300_OFF; 528 } 529 isp = &pcs->pci_isp; 530 isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO); 531 if (isp->isp_param == NULL) { 532 device_printf(dev, "cannot allocate parameter data\n"); 533 goto bad; 534 } 535 isp->isp_mdvec = mdvp; 536 isp->isp_type = basetype; 537 isp->isp_revision = pci_get_revid(dev); 538 isp->isp_role = role; 539 isp->isp_dev = dev; 540 541 /* 542 * Try and find firmware for this device. 543 */ 544 545 if (isp_get_firmware_p) { 546 int device = (int) pci_get_device(dev); 547#ifdef ISP_TARGET_MODE 548 (*isp_get_firmware_p)(0, 1, device, &mdvp->dv_ispfw); 549#else 550 (*isp_get_firmware_p)(0, 0, device, &mdvp->dv_ispfw); 551#endif 552 } 553 554 /* 555 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER 556 * are set. 557 */ 558 cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | 559 PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN; 560 if (IS_2300(isp)) { /* per QLogic errata */ 561 cmd &= ~PCIM_CMD_INVEN; 562 } 563 if (IS_23XX(isp)) { 564 /* 565 * Can't tell if ROM will hang on 'ABOUT FIRMWARE' command. 566 */ 567 isp->isp_touched = 1; 568 569 } 570 pci_write_config(dev, PCIR_COMMAND, cmd, 1); 571 572 /* 573 * Make sure the Cache Line Size register is set sensibly. 574 */ 575 data = pci_read_config(dev, PCIR_CACHELNSZ, 1); 576 if (data != linesz) { 577 data = PCI_DFLT_LNSZ; 578 isp_prt(isp, ISP_LOGCONFIG, "set PCI line size to %d", data); 579 pci_write_config(dev, PCIR_CACHELNSZ, data, 1); 580 } 581 582 /* 583 * Make sure the Latency Timer is sane. 584 */ 585 data = pci_read_config(dev, PCIR_LATTIMER, 1); 586 if (data < PCI_DFLT_LTNCY) { 587 data = PCI_DFLT_LTNCY; 588 isp_prt(isp, ISP_LOGCONFIG, "set PCI latency to %d", data); 589 pci_write_config(dev, PCIR_LATTIMER, data, 1); 590 } 591 592 /* 593 * Make sure we've disabled the ROM. 594 */ 595 data = pci_read_config(dev, PCIR_ROMADDR, 4); 596 data &= ~1; 597 pci_write_config(dev, PCIR_ROMADDR, data, 4); 598 599 iqd = 0; 600 irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &iqd, 601 RF_ACTIVE | RF_SHAREABLE); 602 if (irq == NULL) { 603 device_printf(dev, "could not allocate interrupt\n"); 604 goto bad; 605 } 606 607 tval = 0; 608 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 609 "fwload_disable", &tval) == 0 && tval != 0) { 610 isp->isp_confopts |= ISP_CFG_NORELOAD; 611 } 612 tval = 0; 613 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 614 "ignore_nvram", &tval) == 0 && tval != 0) { 615 isp->isp_confopts |= ISP_CFG_NONVRAM; 616 } 617 tval = 0; 618 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 619 "fullduplex", &tval) == 0 && tval != 0) { 620 isp->isp_confopts |= ISP_CFG_FULL_DUPLEX; 621 } 622#ifdef ISP_FW_CRASH_DUMP 623 tval = 0; 624 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 625 "fw_dump_enable", &tval) == 0 && tval != 0) { 626 size_t amt = 0; 627 if (IS_2200(isp)) { 628 amt = QLA2200_RISC_IMAGE_DUMP_SIZE; 629 } else if (IS_23XX(isp)) { 630 amt = QLA2300_RISC_IMAGE_DUMP_SIZE; 631 } 632 if (amt) { 633 FCPARAM(isp)->isp_dump_data = 634 malloc(amt, M_DEVBUF, M_WAITOK | M_ZERO); 635 } else { 636 device_printf(dev, 637 "f/w crash dumps not supported for this model\n"); 638 } 639 } 640#endif 641 642 sptr = 0; 643 if (resource_string_value(device_get_name(dev), device_get_unit(dev), 644 "topology", (const char **) &sptr) == 0 && sptr != 0) { 645 if (strcmp(sptr, "lport") == 0) { 646 isp->isp_confopts |= ISP_CFG_LPORT; 647 } else if (strcmp(sptr, "nport") == 0) { 648 isp->isp_confopts |= ISP_CFG_NPORT; 649 } else if (strcmp(sptr, "lport-only") == 0) { 650 isp->isp_confopts |= ISP_CFG_LPORT_ONLY; 651 } else if (strcmp(sptr, "nport-only") == 0) { 652 isp->isp_confopts |= ISP_CFG_NPORT_ONLY; 653 } 654 } 655 656 /* 657 * Because the resource_*_value functions can neither return 658 * 64 bit integer values, nor can they be directly coerced 659 * to interpret the right hand side of the assignment as 660 * you want them to interpret it, we have to force WWN 661 * hint replacement to specify WWN strings with a leading 662 * 'w' (e..g w50000000aaaa0001). Sigh. 663 */ 664 sptr = 0; 665 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 666 "portwwn", (const char **) &sptr); 667 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 668 char *eptr = 0; 669 isp->isp_osinfo.default_port_wwn = strtouq(sptr, &eptr, 16); 670 if (eptr < sptr + 16 || isp->isp_osinfo.default_port_wwn == 0) { 671 device_printf(dev, "mangled portwwn hint '%s'\n", sptr); 672 isp->isp_osinfo.default_port_wwn = 0; 673 } else { 674 isp->isp_confopts |= ISP_CFG_OWNWWPN; 675 } 676 } 677 if (isp->isp_osinfo.default_port_wwn == 0) { 678 isp->isp_osinfo.default_port_wwn = 0x400000007F000009ull; 679 } 680 681 sptr = 0; 682 tval = resource_string_value(device_get_name(dev), device_get_unit(dev), 683 "nodewwn", (const char **) &sptr); 684 if (tval == 0 && sptr != 0 && *sptr++ == 'w') { 685 char *eptr = 0; 686 isp->isp_osinfo.default_node_wwn = strtouq(sptr, &eptr, 16); 687 if (eptr < sptr + 16 || isp->isp_osinfo.default_node_wwn == 0) { 688 device_printf(dev, "mangled nodewwn hint '%s'\n", sptr); 689 isp->isp_osinfo.default_node_wwn = 0; 690 } else { 691 isp->isp_confopts |= ISP_CFG_OWNWWNN; 692 } 693 } 694 if (isp->isp_osinfo.default_node_wwn == 0) { 695 isp->isp_osinfo.default_node_wwn = 0x400000007F000009ull; 696 } 697 698 isp->isp_osinfo.default_id = -1; 699 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 700 "iid", &tval) == 0) { 701 isp->isp_osinfo.default_id = tval; 702 isp->isp_confopts |= ISP_CFG_OWNLOOPID; 703 } 704 if (isp->isp_osinfo.default_id == -1) { 705 if (IS_FC(isp)) { 706 isp->isp_osinfo.default_id = 109; 707 } else { 708 isp->isp_osinfo.default_id = 7; 709 } 710 } 711 712 isp_debug = 0; 713 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 714 "debug", &isp_debug); 715 716 /* Make sure the lock is set up. */ 717 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 718 locksetup++; 719 720 if (bus_setup_intr(dev, irq, ISP_IFLAGS, isp_pci_intr, isp, &pcs->ih)) { 721 device_printf(dev, "could not setup interrupt\n"); 722 goto bad; 723 } 724 725 /* 726 * Set up logging levels. 727 */ 728 if (isp_debug) { 729 isp->isp_dblev = isp_debug; 730 } else { 731 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 732 } 733 if (bootverbose) 734 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 735 736 /* 737 * Last minute checks... 738 */ 739 if (IS_2312(isp)) { 740 isp->isp_port = pci_get_function(dev); 741 } 742 743 /* 744 * Make sure we're in reset state. 745 */ 746 ISP_LOCK(isp); 747 isp_reset(isp); 748 if (isp->isp_state != ISP_RESETSTATE) { 749 ISP_UNLOCK(isp); 750 goto bad; 751 } 752 isp_init(isp); 753 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 754 isp_uninit(isp); 755 ISP_UNLOCK(isp); 756 goto bad; 757 } 758 isp_attach(isp); 759 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 760 isp_uninit(isp); 761 ISP_UNLOCK(isp); 762 goto bad; 763 } 764 /* 765 * XXXX: Here is where we might unload the f/w module 766 * XXXX: (or decrease the reference count to it). 767 */ 768 ISP_UNLOCK(isp); 769 return (0); 770 771bad: 772 773 if (pcs && pcs->ih) { 774 (void) bus_teardown_intr(dev, irq, pcs->ih); 775 } 776 777 if (locksetup && isp) { 778 mtx_destroy(&isp->isp_osinfo.lock); 779 } 780 781 if (irq) { 782 (void) bus_release_resource(dev, SYS_RES_IRQ, iqd, irq); 783 } 784 785 786 if (regs) { 787 (void) bus_release_resource(dev, rtp, rgd, regs); 788 } 789 790 if (pcs) { 791 if (pcs->pci_isp.isp_param) 792 free(pcs->pci_isp.isp_param, M_DEVBUF); 793 free(pcs, M_DEVBUF); 794 } 795 796 /* 797 * XXXX: Here is where we might unload the f/w module 798 * XXXX: (or decrease the reference count to it). 799 */ 800 return (ENXIO); 801} 802 803static void 804isp_pci_intr(void *arg) 805{ 806 struct ispsoftc *isp = arg; 807 u_int16_t isr, sema, mbox; 808 809 ISP_LOCK(isp); 810 isp->isp_intcnt++; 811 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 812 isp->isp_intbogus++; 813 } else { 814 int iok = isp->isp_osinfo.intsok; 815 isp->isp_osinfo.intsok = 0; 816 isp_intr(isp, isr, sema, mbox); 817 isp->isp_osinfo.intsok = iok; 818 } 819 ISP_UNLOCK(isp); 820} 821 822 823#define IspVirt2Off(a, x) \ 824 (((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \ 825 _BLK_REG_SHFT] + ((x) & 0xff)) 826 827#define BXR2(pcs, off) \ 828 bus_space_read_2(pcs->pci_st, pcs->pci_sh, off) 829#define BXW2(pcs, off, v) \ 830 bus_space_write_2(pcs->pci_st, pcs->pci_sh, off, v) 831 832 833static INLINE int 834isp_pci_rd_debounced(struct ispsoftc *isp, int off, u_int16_t *rp) 835{ 836 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 837 u_int16_t val0, val1; 838 int i = 0; 839 840 do { 841 val0 = BXR2(pcs, IspVirt2Off(isp, off)); 842 val1 = BXR2(pcs, IspVirt2Off(isp, off)); 843 } while (val0 != val1 && ++i < 1000); 844 if (val0 != val1) { 845 return (1); 846 } 847 *rp = val0; 848 return (0); 849} 850 851static int 852isp_pci_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 853 u_int16_t *semap, u_int16_t *mbp) 854{ 855 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 856 u_int16_t isr, sema; 857 858 if (IS_2100(isp)) { 859 if (isp_pci_rd_debounced(isp, BIU_ISR, &isr)) { 860 return (0); 861 } 862 if (isp_pci_rd_debounced(isp, BIU_SEMA, &sema)) { 863 return (0); 864 } 865 } else { 866 isr = BXR2(pcs, IspVirt2Off(isp, BIU_ISR)); 867 sema = BXR2(pcs, IspVirt2Off(isp, BIU_SEMA)); 868 } 869 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 870 isr &= INT_PENDING_MASK(isp); 871 sema &= BIU_SEMA_LOCK; 872 if (isr == 0 && sema == 0) { 873 return (0); 874 } 875 *isrp = isr; 876 if ((*semap = sema) != 0) { 877 if (IS_2100(isp)) { 878 if (isp_pci_rd_debounced(isp, OUTMAILBOX0, mbp)) { 879 return (0); 880 } 881 } else { 882 *mbp = BXR2(pcs, IspVirt2Off(isp, OUTMAILBOX0)); 883 } 884 } 885 return (1); 886} 887 888static int 889isp_pci_rd_isr_2300(struct ispsoftc *isp, u_int16_t *isrp, 890 u_int16_t *semap, u_int16_t *mbox0p) 891{ 892 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 893 u_int32_t r2hisr; 894 895 if (!(BXR2(pcs, IspVirt2Off(isp, BIU_ISR) & BIU2100_ISR_RISC_INT))) { 896 *isrp = 0; 897 return (0); 898 } 899 r2hisr = bus_space_read_4(pcs->pci_st, pcs->pci_sh, 900 IspVirt2Off(pcs, BIU_R2HSTSLO)); 901 isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr); 902 if ((r2hisr & BIU_R2HST_INTR) == 0) { 903 *isrp = 0; 904 return (0); 905 } 906 switch (r2hisr & BIU_R2HST_ISTAT_MASK) { 907 case ISPR2HST_ROM_MBX_OK: 908 case ISPR2HST_ROM_MBX_FAIL: 909 case ISPR2HST_MBX_OK: 910 case ISPR2HST_MBX_FAIL: 911 case ISPR2HST_ASYNC_EVENT: 912 *isrp = r2hisr & 0xffff; 913 *mbox0p = (r2hisr >> 16); 914 *semap = 1; 915 return (1); 916 case ISPR2HST_RIO_16: 917 *isrp = r2hisr & 0xffff; 918 *mbox0p = ASYNC_RIO1; 919 *semap = 1; 920 return (1); 921 case ISPR2HST_FPOST: 922 *isrp = r2hisr & 0xffff; 923 *mbox0p = ASYNC_CMD_CMPLT; 924 *semap = 1; 925 return (1); 926 case ISPR2HST_FPOST_CTIO: 927 *isrp = r2hisr & 0xffff; 928 *mbox0p = ASYNC_CTIO_DONE; 929 *semap = 1; 930 return (1); 931 case ISPR2HST_RSPQ_UPDATE: 932 *isrp = r2hisr & 0xffff; 933 *mbox0p = 0; 934 *semap = 0; 935 return (1); 936 default: 937 return (0); 938 } 939} 940 941static u_int16_t 942isp_pci_rd_reg(struct ispsoftc *isp, int regoff) 943{ 944 u_int16_t rv; 945 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 946 int oldconf = 0; 947 948 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 949 /* 950 * We will assume that someone has paused the RISC processor. 951 */ 952 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 953 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 954 oldconf | BIU_PCI_CONF1_SXP); 955 } 956 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 957 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 958 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 959 } 960 return (rv); 961} 962 963static void 964isp_pci_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 965{ 966 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 967 int oldconf = 0; 968 969 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 970 /* 971 * We will assume that someone has paused the RISC processor. 972 */ 973 oldconf = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 974 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 975 oldconf | BIU_PCI_CONF1_SXP); 976 } 977 BXW2(pcs, IspVirt2Off(isp, regoff), val); 978 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) { 979 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oldconf); 980 } 981} 982 983static u_int16_t 984isp_pci_rd_reg_1080(struct ispsoftc *isp, int regoff) 985{ 986 u_int16_t rv, oc = 0; 987 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 988 989 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 990 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 991 u_int16_t tc; 992 /* 993 * We will assume that someone has paused the RISC processor. 994 */ 995 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 996 tc = oc & ~BIU_PCI1080_CONF1_DMA; 997 if (regoff & SXP_BANK1_SELECT) 998 tc |= BIU_PCI1080_CONF1_SXP1; 999 else 1000 tc |= BIU_PCI1080_CONF1_SXP0; 1001 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1002 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1003 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1004 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1005 oc | BIU_PCI1080_CONF1_DMA); 1006 } 1007 rv = BXR2(pcs, IspVirt2Off(isp, regoff)); 1008 if (oc) { 1009 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1010 } 1011 return (rv); 1012} 1013 1014static void 1015isp_pci_wr_reg_1080(struct ispsoftc *isp, int regoff, u_int16_t val) 1016{ 1017 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *) isp; 1018 int oc = 0; 1019 1020 if ((regoff & _BLK_REG_MASK) == SXP_BLOCK || 1021 (regoff & _BLK_REG_MASK) == (SXP_BLOCK|SXP_BANK1_SELECT)) { 1022 u_int16_t tc; 1023 /* 1024 * We will assume that someone has paused the RISC processor. 1025 */ 1026 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1027 tc = oc & ~BIU_PCI1080_CONF1_DMA; 1028 if (regoff & SXP_BANK1_SELECT) 1029 tc |= BIU_PCI1080_CONF1_SXP1; 1030 else 1031 tc |= BIU_PCI1080_CONF1_SXP0; 1032 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), tc); 1033 } else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) { 1034 oc = BXR2(pcs, IspVirt2Off(isp, BIU_CONF1)); 1035 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), 1036 oc | BIU_PCI1080_CONF1_DMA); 1037 } 1038 BXW2(pcs, IspVirt2Off(isp, regoff), val); 1039 if (oc) { 1040 BXW2(pcs, IspVirt2Off(isp, BIU_CONF1), oc); 1041 } 1042} 1043 1044 1045struct imush { 1046 struct ispsoftc *isp; 1047 int error; 1048}; 1049 1050static void imc(void *, bus_dma_segment_t *, int, int); 1051 1052static void 1053imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1054{ 1055 struct imush *imushp = (struct imush *) arg; 1056 if (error) { 1057 imushp->error = error; 1058 } else { 1059 struct ispsoftc *isp =imushp->isp; 1060 bus_addr_t addr = segs->ds_addr; 1061 1062 isp->isp_rquest_dma = addr; 1063 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1064 isp->isp_result_dma = addr; 1065 if (IS_FC(isp)) { 1066 addr += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1067 FCPARAM(isp)->isp_scdma = addr; 1068 } 1069 } 1070} 1071 1072/* 1073 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 1074 */ 1075#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 1076 1077static int 1078isp_pci_mbxdma(struct ispsoftc *isp) 1079{ 1080 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1081 caddr_t base; 1082 u_int32_t len; 1083 int i, error, ns; 1084 bus_size_t alim, slim; 1085 struct imush im; 1086 1087 /* 1088 * Already been here? If so, leave... 1089 */ 1090 if (isp->isp_rquest) { 1091 return (0); 1092 } 1093 1094#ifdef ISP_DAC_SUPPORTED 1095 alim = BUS_SPACE_UNRESTRICTED; 1096#else 1097 alim = BUS_SPACE_MAXADDR_32BIT; 1098#endif 1099 if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) { 1100 slim = BUS_SPACE_MAXADDR_32BIT; 1101 } else { 1102 slim = BUS_SPACE_MAXADDR_24BIT; 1103 } 1104 1105 ISP_UNLOCK(isp); 1106 if (bus_dma_tag_create(NULL, 1, slim+1, alim, alim, 1107 NULL, NULL, BUS_SPACE_MAXSIZE, ISP_NSEGS, slim, 0, 1108 busdma_lock_mutex, &Giant, &pcs->dmat)) { 1109 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 1110 ISP_LOCK(isp); 1111 return(1); 1112 } 1113 1114 1115 len = sizeof (XS_T **) * isp->isp_maxcmds; 1116 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1117 if (isp->isp_xflist == NULL) { 1118 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 1119 ISP_LOCK(isp); 1120 return (1); 1121 } 1122#ifdef ISP_TARGET_MODE 1123 len = sizeof (void **) * isp->isp_maxcmds; 1124 isp->isp_tgtlist = (void **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 1125 if (isp->isp_tgtlist == NULL) { 1126 isp_prt(isp, ISP_LOGERR, "cannot alloc tgtlist array"); 1127 ISP_LOCK(isp); 1128 return (1); 1129 } 1130#endif 1131 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 1132 pcs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 1133 if (pcs->dmaps == NULL) { 1134 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 1135 free(isp->isp_xflist, M_DEVBUF); 1136#ifdef ISP_TARGET_MODE 1137 free(isp->isp_tgtlist, M_DEVBUF); 1138#endif 1139 ISP_LOCK(isp); 1140 return (1); 1141 } 1142 1143 /* 1144 * Allocate and map the request, result queues, plus FC scratch area. 1145 */ 1146 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1147 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1148 if (IS_FC(isp)) { 1149 len += ISP2100_SCRLEN; 1150 } 1151 1152 ns = (len / PAGE_SIZE) + 1; 1153 if (bus_dma_tag_create(pcs->dmat, QENTRY_LEN, slim+1, alim, alim, 1154 NULL, NULL, len, ns, slim, 0, busdma_lock_mutex, &Giant, 1155 &isp->isp_cdmat)) { 1156 isp_prt(isp, ISP_LOGERR, 1157 "cannot create a dma tag for control spaces"); 1158 free(pcs->dmaps, M_DEVBUF); 1159 free(isp->isp_xflist, M_DEVBUF); 1160#ifdef ISP_TARGET_MODE 1161 free(isp->isp_tgtlist, M_DEVBUF); 1162#endif 1163 ISP_LOCK(isp); 1164 return (1); 1165 } 1166 1167 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 1168 &isp->isp_cdmap) != 0) { 1169 isp_prt(isp, ISP_LOGERR, 1170 "cannot allocate %d bytes of CCB memory", len); 1171 bus_dma_tag_destroy(isp->isp_cdmat); 1172 free(isp->isp_xflist, M_DEVBUF); 1173#ifdef ISP_TARGET_MODE 1174 free(isp->isp_tgtlist, M_DEVBUF); 1175#endif 1176 free(pcs->dmaps, M_DEVBUF); 1177 ISP_LOCK(isp); 1178 return (1); 1179 } 1180 1181 for (i = 0; i < isp->isp_maxcmds; i++) { 1182 error = bus_dmamap_create(pcs->dmat, 0, &pcs->dmaps[i]); 1183 if (error) { 1184 isp_prt(isp, ISP_LOGERR, 1185 "error %d creating per-cmd DMA maps", error); 1186 while (--i >= 0) { 1187 bus_dmamap_destroy(pcs->dmat, pcs->dmaps[i]); 1188 } 1189 goto bad; 1190 } 1191 } 1192 1193 im.isp = isp; 1194 im.error = 0; 1195 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 1196 if (im.error) { 1197 isp_prt(isp, ISP_LOGERR, 1198 "error %d loading dma map for control areas", im.error); 1199 goto bad; 1200 } 1201 1202 isp->isp_rquest = base; 1203 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 1204 isp->isp_result = base; 1205 if (IS_FC(isp)) { 1206 base += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 1207 FCPARAM(isp)->isp_scratch = base; 1208 } 1209 ISP_LOCK(isp); 1210 return (0); 1211 1212bad: 1213 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 1214 bus_dma_tag_destroy(isp->isp_cdmat); 1215 free(isp->isp_xflist, M_DEVBUF); 1216#ifdef ISP_TARGET_MODE 1217 free(isp->isp_tgtlist, M_DEVBUF); 1218#endif 1219 free(pcs->dmaps, M_DEVBUF); 1220 ISP_LOCK(isp); 1221 isp->isp_rquest = NULL; 1222 return (1); 1223} 1224 1225typedef struct { 1226 struct ispsoftc *isp; 1227 void *cmd_token; 1228 void *rq; 1229 u_int16_t *nxtip; 1230 u_int16_t optr; 1231 u_int error; 1232} mush_t; 1233 1234#define MUSHERR_NOQENTRIES -2 1235 1236#ifdef ISP_TARGET_MODE 1237/* 1238 * We need to handle DMA for target mode differently from initiator mode. 1239 * 1240 * DMA mapping and construction and submission of CTIO Request Entries 1241 * and rendevous for completion are very tightly coupled because we start 1242 * out by knowing (per platform) how much data we have to move, but we 1243 * don't know, up front, how many DMA mapping segments will have to be used 1244 * cover that data, so we don't know how many CTIO Request Entries we 1245 * will end up using. Further, for performance reasons we may want to 1246 * (on the last CTIO for Fibre Channel), send status too (if all went well). 1247 * 1248 * The standard vector still goes through isp_pci_dmasetup, but the callback 1249 * for the DMA mapping routines comes here instead with the whole transfer 1250 * mapped and a pointer to a partially filled in already allocated request 1251 * queue entry. We finish the job. 1252 */ 1253static void tdma_mk(void *, bus_dma_segment_t *, int, int); 1254static void tdma_mkfc(void *, bus_dma_segment_t *, int, int); 1255 1256#define STATUS_WITH_DATA 1 1257 1258static void 1259tdma_mk(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1260{ 1261 mush_t *mp; 1262 struct ccb_scsiio *csio; 1263 struct ispsoftc *isp; 1264 struct isp_pcisoftc *pcs; 1265 bus_dmamap_t *dp; 1266 ct_entry_t *cto, *qe; 1267 u_int8_t scsi_status; 1268 u_int16_t curi, nxti, handle; 1269 u_int32_t sflags; 1270 int32_t resid; 1271 int nth_ctio, nctios, send_status; 1272 1273 mp = (mush_t *) arg; 1274 if (error) { 1275 mp->error = error; 1276 return; 1277 } 1278 1279 isp = mp->isp; 1280 csio = mp->cmd_token; 1281 cto = mp->rq; 1282 curi = isp->isp_reqidx; 1283 qe = (ct_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1284 1285 cto->ct_xfrlen = 0; 1286 cto->ct_seg_count = 0; 1287 cto->ct_header.rqs_entry_count = 1; 1288 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1289 1290 if (nseg == 0) { 1291 cto->ct_header.rqs_seqno = 1; 1292 isp_prt(isp, ISP_LOGTDEBUG1, 1293 "CTIO[%x] lun%d iid%d tag %x flgs %x sts %x ssts %x res %d", 1294 cto->ct_fwhandle, csio->ccb_h.target_lun, cto->ct_iid, 1295 cto->ct_tag_val, cto->ct_flags, cto->ct_status, 1296 cto->ct_scsi_status, cto->ct_resid); 1297 ISP_TDQE(isp, "tdma_mk[no data]", curi, cto); 1298 isp_put_ctio(isp, cto, qe); 1299 return; 1300 } 1301 1302 nctios = nseg / ISP_RQDSEG; 1303 if (nseg % ISP_RQDSEG) { 1304 nctios++; 1305 } 1306 1307 /* 1308 * Save syshandle, and potentially any SCSI status, which we'll 1309 * reinsert on the last CTIO we're going to send. 1310 */ 1311 1312 handle = cto->ct_syshandle; 1313 cto->ct_syshandle = 0; 1314 cto->ct_header.rqs_seqno = 0; 1315 send_status = (cto->ct_flags & CT_SENDSTATUS) != 0; 1316 1317 if (send_status) { 1318 sflags = cto->ct_flags & (CT_SENDSTATUS | CT_CCINCR); 1319 cto->ct_flags &= ~(CT_SENDSTATUS | CT_CCINCR); 1320 /* 1321 * Preserve residual. 1322 */ 1323 resid = cto->ct_resid; 1324 1325 /* 1326 * Save actual SCSI status. 1327 */ 1328 scsi_status = cto->ct_scsi_status; 1329 1330#ifndef STATUS_WITH_DATA 1331 sflags |= CT_NO_DATA; 1332 /* 1333 * We can't do a status at the same time as a data CTIO, so 1334 * we need to synthesize an extra CTIO at this level. 1335 */ 1336 nctios++; 1337#endif 1338 } else { 1339 sflags = scsi_status = resid = 0; 1340 } 1341 1342 cto->ct_resid = 0; 1343 cto->ct_scsi_status = 0; 1344 1345 pcs = (struct isp_pcisoftc *)isp; 1346 dp = &pcs->dmaps[isp_handle_index(handle)]; 1347 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1348 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1349 } else { 1350 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1351 } 1352 1353 nxti = *mp->nxtip; 1354 1355 for (nth_ctio = 0; nth_ctio < nctios; nth_ctio++) { 1356 int seglim; 1357 1358 seglim = nseg; 1359 if (seglim) { 1360 int seg; 1361 1362 if (seglim > ISP_RQDSEG) 1363 seglim = ISP_RQDSEG; 1364 1365 for (seg = 0; seg < seglim; seg++, nseg--) { 1366 /* 1367 * Unlike normal initiator commands, we don't 1368 * do any swizzling here. 1369 */ 1370 cto->ct_dataseg[seg].ds_count = dm_segs->ds_len; 1371 cto->ct_dataseg[seg].ds_base = dm_segs->ds_addr; 1372 cto->ct_xfrlen += dm_segs->ds_len; 1373 dm_segs++; 1374 } 1375 cto->ct_seg_count = seg; 1376 } else { 1377 /* 1378 * This case should only happen when we're sending an 1379 * extra CTIO with final status. 1380 */ 1381 if (send_status == 0) { 1382 isp_prt(isp, ISP_LOGWARN, 1383 "tdma_mk ran out of segments"); 1384 mp->error = EINVAL; 1385 return; 1386 } 1387 } 1388 1389 /* 1390 * At this point, the fields ct_lun, ct_iid, ct_tagval, 1391 * ct_tagtype, and ct_timeout have been carried over 1392 * unchanged from what our caller had set. 1393 * 1394 * The dataseg fields and the seg_count fields we just got 1395 * through setting. The data direction we've preserved all 1396 * along and only clear it if we're now sending status. 1397 */ 1398 1399 if (nth_ctio == nctios - 1) { 1400 /* 1401 * We're the last in a sequence of CTIOs, so mark 1402 * this CTIO and save the handle to the CCB such that 1403 * when this CTIO completes we can free dma resources 1404 * and do whatever else we need to do to finish the 1405 * rest of the command. We *don't* give this to the 1406 * firmware to work on- the caller will do that. 1407 */ 1408 1409 cto->ct_syshandle = handle; 1410 cto->ct_header.rqs_seqno = 1; 1411 1412 if (send_status) { 1413 cto->ct_scsi_status = scsi_status; 1414 cto->ct_flags |= sflags; 1415 cto->ct_resid = resid; 1416 } 1417 if (send_status) { 1418 isp_prt(isp, ISP_LOGTDEBUG1, 1419 "CTIO[%x] lun%d iid %d tag %x ct_flags %x " 1420 "scsi status %x resid %d", 1421 cto->ct_fwhandle, csio->ccb_h.target_lun, 1422 cto->ct_iid, cto->ct_tag_val, cto->ct_flags, 1423 cto->ct_scsi_status, cto->ct_resid); 1424 } else { 1425 isp_prt(isp, ISP_LOGTDEBUG1, 1426 "CTIO[%x] lun%d iid%d tag %x ct_flags 0x%x", 1427 cto->ct_fwhandle, csio->ccb_h.target_lun, 1428 cto->ct_iid, cto->ct_tag_val, 1429 cto->ct_flags); 1430 } 1431 isp_put_ctio(isp, cto, qe); 1432 ISP_TDQE(isp, "last tdma_mk", curi, cto); 1433 if (nctios > 1) { 1434 MEMORYBARRIER(isp, SYNC_REQUEST, 1435 curi, QENTRY_LEN); 1436 } 1437 } else { 1438 ct_entry_t *oqe = qe; 1439 1440 /* 1441 * Make sure syshandle fields are clean 1442 */ 1443 cto->ct_syshandle = 0; 1444 cto->ct_header.rqs_seqno = 0; 1445 1446 isp_prt(isp, ISP_LOGTDEBUG1, 1447 "CTIO[%x] lun%d for ID%d ct_flags 0x%x", 1448 cto->ct_fwhandle, csio->ccb_h.target_lun, 1449 cto->ct_iid, cto->ct_flags); 1450 1451 /* 1452 * Get a new CTIO 1453 */ 1454 qe = (ct_entry_t *) 1455 ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1456 nxti = ISP_NXT_QENTRY(nxti, RQUEST_QUEUE_LEN(isp)); 1457 if (nxti == mp->optr) { 1458 isp_prt(isp, ISP_LOGTDEBUG0, 1459 "Queue Overflow in tdma_mk"); 1460 mp->error = MUSHERR_NOQENTRIES; 1461 return; 1462 } 1463 1464 /* 1465 * Now that we're done with the old CTIO, 1466 * flush it out to the request queue. 1467 */ 1468 ISP_TDQE(isp, "dma_tgt_fc", curi, cto); 1469 isp_put_ctio(isp, cto, oqe); 1470 if (nth_ctio != 0) { 1471 MEMORYBARRIER(isp, SYNC_REQUEST, curi, 1472 QENTRY_LEN); 1473 } 1474 curi = ISP_NXT_QENTRY(curi, RQUEST_QUEUE_LEN(isp)); 1475 1476 /* 1477 * Reset some fields in the CTIO so we can reuse 1478 * for the next one we'll flush to the request 1479 * queue. 1480 */ 1481 cto->ct_header.rqs_entry_type = RQSTYPE_CTIO; 1482 cto->ct_header.rqs_entry_count = 1; 1483 cto->ct_header.rqs_flags = 0; 1484 cto->ct_status = 0; 1485 cto->ct_scsi_status = 0; 1486 cto->ct_xfrlen = 0; 1487 cto->ct_resid = 0; 1488 cto->ct_seg_count = 0; 1489 MEMZERO(cto->ct_dataseg, sizeof(cto->ct_dataseg)); 1490 } 1491 } 1492 *mp->nxtip = nxti; 1493} 1494 1495/* 1496 * We don't have to do multiple CTIOs here. Instead, we can just do 1497 * continuation segments as needed. This greatly simplifies the code 1498 * improves performance. 1499 */ 1500 1501static void 1502tdma_mkfc(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1503{ 1504 mush_t *mp; 1505 struct ccb_scsiio *csio; 1506 struct ispsoftc *isp; 1507 ct2_entry_t *cto, *qe; 1508 u_int16_t curi, nxti; 1509 int segcnt; 1510 1511 mp = (mush_t *) arg; 1512 if (error) { 1513 mp->error = error; 1514 return; 1515 } 1516 1517 isp = mp->isp; 1518 csio = mp->cmd_token; 1519 cto = mp->rq; 1520 1521 curi = isp->isp_reqidx; 1522 qe = (ct2_entry_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, curi); 1523 1524 if (nseg == 0) { 1525 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE1) { 1526 isp_prt(isp, ISP_LOGWARN, 1527 "dma2_tgt_fc, a status CTIO2 without MODE1 " 1528 "set (0x%x)", cto->ct_flags); 1529 mp->error = EINVAL; 1530 return; 1531 } 1532 /* 1533 * We preserve ct_lun, ct_iid, ct_rxid. We set the data 1534 * flags to NO DATA and clear relative offset flags. 1535 * We preserve the ct_resid and the response area. 1536 */ 1537 cto->ct_header.rqs_seqno = 1; 1538 cto->ct_seg_count = 0; 1539 cto->ct_reloff = 0; 1540 isp_prt(isp, ISP_LOGTDEBUG1, 1541 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts " 1542 "0x%x res %d", cto->ct_rxid, csio->ccb_h.target_lun, 1543 cto->ct_iid, cto->ct_flags, cto->ct_status, 1544 cto->rsp.m1.ct_scsi_status, cto->ct_resid); 1545 isp_put_ctio2(isp, cto, qe); 1546 ISP_TDQE(isp, "dma2_tgt_fc[no data]", curi, qe); 1547 return; 1548 } 1549 1550 if ((cto->ct_flags & CT2_FLAG_MMASK) != CT2_FLAG_MODE0) { 1551 isp_prt(isp, ISP_LOGERR, 1552 "dma2_tgt_fc, a data CTIO2 without MODE0 set " 1553 "(0x%x)", cto->ct_flags); 1554 mp->error = EINVAL; 1555 return; 1556 } 1557 1558 1559 nxti = *mp->nxtip; 1560 1561 /* 1562 * Set up the CTIO2 data segments. 1563 */ 1564 for (segcnt = 0; cto->ct_seg_count < ISP_RQDSEG_T2 && segcnt < nseg; 1565 cto->ct_seg_count++, segcnt++) { 1566 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_base = 1567 dm_segs[segcnt].ds_addr; 1568 cto->rsp.m0.ct_dataseg[cto->ct_seg_count].ds_count = 1569 dm_segs[segcnt].ds_len; 1570 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1571 isp_prt(isp, ISP_LOGTDEBUG1, 1572 "isp_send_ctio2: ent0[%d]0x%llx:%lld", 1573 cto->ct_seg_count, (long long)dm_segs[segcnt].ds_addr, 1574 (long long)dm_segs[segcnt].ds_len); 1575 } 1576 1577 while (segcnt < nseg) { 1578 u_int16_t curip; 1579 int seg; 1580 ispcontreq_t local, *crq = &local, *qep; 1581 1582 qep = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1583 curip = nxti; 1584 nxti = ISP_NXT_QENTRY(curip, RQUEST_QUEUE_LEN(isp)); 1585 if (nxti == mp->optr) { 1586 ISP_UNLOCK(isp); 1587 isp_prt(isp, ISP_LOGTDEBUG0, 1588 "tdma_mkfc: request queue overflow"); 1589 mp->error = MUSHERR_NOQENTRIES; 1590 return; 1591 } 1592 cto->ct_header.rqs_entry_count++; 1593 MEMZERO((void *)crq, sizeof (*crq)); 1594 crq->req_header.rqs_entry_count = 1; 1595 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1596 for (seg = 0; segcnt < nseg && seg < ISP_CDSEG; 1597 segcnt++, seg++) { 1598 crq->req_dataseg[seg].ds_base = dm_segs[segcnt].ds_addr; 1599 crq->req_dataseg[seg].ds_count = dm_segs[segcnt].ds_len; 1600 isp_prt(isp, ISP_LOGTDEBUG1, 1601 "isp_send_ctio2: ent%d[%d]0x%llx:%lld", 1602 cto->ct_header.rqs_entry_count-1, seg, 1603 (long long) dm_segs[segcnt].ds_addr, 1604 (long long) dm_segs[segcnt].ds_len); 1605 cto->rsp.m0.ct_xfrlen += dm_segs[segcnt].ds_len; 1606 cto->ct_seg_count++; 1607 } 1608 MEMORYBARRIER(isp, SYNC_REQUEST, curip, QENTRY_LEN); 1609 isp_put_cont_req(isp, crq, qep); 1610 ISP_TDQE(isp, "cont entry", curi, qep); 1611 } 1612 1613 /* 1614 * No do final twiddling for the CTIO itself. 1615 */ 1616 cto->ct_header.rqs_seqno = 1; 1617 isp_prt(isp, ISP_LOGTDEBUG1, 1618 "CTIO2[%x] lun %d->iid%d flgs 0x%x sts 0x%x ssts 0x%x resid %d", 1619 cto->ct_rxid, csio->ccb_h.target_lun, (int) cto->ct_iid, 1620 cto->ct_flags, cto->ct_status, cto->rsp.m1.ct_scsi_status, 1621 cto->ct_resid); 1622 isp_put_ctio2(isp, cto, qe); 1623 ISP_TDQE(isp, "last dma2_tgt_fc", curi, qe); 1624 *mp->nxtip = nxti; 1625} 1626#endif 1627 1628static void dma2(void *, bus_dma_segment_t *, int, int); 1629 1630#ifdef PAE 1631static void 1632dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1633{ 1634 mush_t *mp; 1635 struct ispsoftc *isp; 1636 struct ccb_scsiio *csio; 1637 struct isp_pcisoftc *pcs; 1638 bus_dmamap_t *dp; 1639 bus_dma_segment_t *eseg; 1640 ispreq64_t *rq; 1641 int seglim, datalen; 1642 u_int16_t nxti; 1643 1644 mp = (mush_t *) arg; 1645 if (error) { 1646 mp->error = error; 1647 return; 1648 } 1649 1650 if (nseg < 1) { 1651 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1652 mp->error = EFAULT; 1653 return; 1654 } 1655 csio = mp->cmd_token; 1656 isp = mp->isp; 1657 rq = mp->rq; 1658 pcs = (struct isp_pcisoftc *)mp->isp; 1659 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1660 nxti = *mp->nxtip; 1661 1662 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1663 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1664 } else { 1665 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1666 } 1667 1668 datalen = XS_XFRLEN(csio); 1669 1670 /* 1671 * We're passed an initial partially filled in entry that 1672 * has most fields filled in except for data transfer 1673 * related values. 1674 * 1675 * Our job is to fill in the initial request queue entry and 1676 * then to start allocating and filling in continuation entries 1677 * until we've covered the entire transfer. 1678 */ 1679 1680 if (IS_FC(isp)) { 1681 seglim = ISP_RQDSEG_T3; 1682 ((ispreqt3_t *)rq)->req_totalcnt = datalen; 1683 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1684 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1685 } else { 1686 ((ispreqt3_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1687 } 1688 } else { 1689 if (csio->cdb_len > 12) { 1690 seglim = 0; 1691 } else { 1692 seglim = ISP_RQDSEG_A64; 1693 } 1694 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1695 rq->req_flags |= REQFLAG_DATA_IN; 1696 } else { 1697 rq->req_flags |= REQFLAG_DATA_OUT; 1698 } 1699 } 1700 1701 eseg = dm_segs + nseg; 1702 1703 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1704 if (IS_FC(isp)) { 1705 ispreqt3_t *rq3 = (ispreqt3_t *)rq; 1706 rq3->req_dataseg[rq3->req_seg_count].ds_base = 1707 dm_segs->ds_addr; 1708 rq3->req_dataseg[rq3->req_seg_count].ds_count = 1709 dm_segs->ds_len; 1710 } else { 1711 rq->req_dataseg[rq->req_seg_count].ds_base = 1712 dm_segs->ds_addr; 1713 rq->req_dataseg[rq->req_seg_count].ds_count = 1714 dm_segs->ds_len; 1715 } 1716 datalen -= dm_segs->ds_len; 1717 rq->req_seg_count++; 1718 dm_segs++; 1719 } 1720 1721 while (datalen > 0 && dm_segs != eseg) { 1722 u_int16_t onxti; 1723 ispcontreq64_t local, *crq = &local, *cqe; 1724 1725 cqe = (ispcontreq64_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1726 onxti = nxti; 1727 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1728 if (nxti == mp->optr) { 1729 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1730 mp->error = MUSHERR_NOQENTRIES; 1731 return; 1732 } 1733 rq->req_header.rqs_entry_count++; 1734 MEMZERO((void *)crq, sizeof (*crq)); 1735 crq->req_header.rqs_entry_count = 1; 1736 crq->req_header.rqs_entry_type = RQSTYPE_A64_CONT; 1737 1738 seglim = 0; 1739 while (datalen > 0 && seglim < ISP_CDSEG64 && dm_segs != eseg) { 1740 crq->req_dataseg[seglim].ds_base = 1741 dm_segs->ds_addr; 1742 crq->req_dataseg[seglim].ds_count = 1743 dm_segs->ds_len; 1744 rq->req_seg_count++; 1745 dm_segs++; 1746 seglim++; 1747 datalen -= dm_segs->ds_len; 1748 } 1749 isp_put_cont64_req(isp, crq, cqe); 1750 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1751 } 1752 *mp->nxtip = nxti; 1753} 1754#else 1755static void 1756dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 1757{ 1758 mush_t *mp; 1759 struct ispsoftc *isp; 1760 struct ccb_scsiio *csio; 1761 struct isp_pcisoftc *pcs; 1762 bus_dmamap_t *dp; 1763 bus_dma_segment_t *eseg; 1764 ispreq_t *rq; 1765 int seglim, datalen; 1766 u_int16_t nxti; 1767 1768 mp = (mush_t *) arg; 1769 if (error) { 1770 mp->error = error; 1771 return; 1772 } 1773 1774 if (nseg < 1) { 1775 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 1776 mp->error = EFAULT; 1777 return; 1778 } 1779 csio = mp->cmd_token; 1780 isp = mp->isp; 1781 rq = mp->rq; 1782 pcs = (struct isp_pcisoftc *)mp->isp; 1783 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1784 nxti = *mp->nxtip; 1785 1786 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1787 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREREAD); 1788 } else { 1789 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_PREWRITE); 1790 } 1791 1792 datalen = XS_XFRLEN(csio); 1793 1794 /* 1795 * We're passed an initial partially filled in entry that 1796 * has most fields filled in except for data transfer 1797 * related values. 1798 * 1799 * Our job is to fill in the initial request queue entry and 1800 * then to start allocating and filling in continuation entries 1801 * until we've covered the entire transfer. 1802 */ 1803 1804 if (IS_FC(isp)) { 1805 seglim = ISP_RQDSEG_T2; 1806 ((ispreqt2_t *)rq)->req_totalcnt = datalen; 1807 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1808 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_IN; 1809 } else { 1810 ((ispreqt2_t *)rq)->req_flags |= REQFLAG_DATA_OUT; 1811 } 1812 } else { 1813 if (csio->cdb_len > 12) { 1814 seglim = 0; 1815 } else { 1816 seglim = ISP_RQDSEG; 1817 } 1818 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1819 rq->req_flags |= REQFLAG_DATA_IN; 1820 } else { 1821 rq->req_flags |= REQFLAG_DATA_OUT; 1822 } 1823 } 1824 1825 eseg = dm_segs + nseg; 1826 1827 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 1828 if (IS_FC(isp)) { 1829 ispreqt2_t *rq2 = (ispreqt2_t *)rq; 1830 rq2->req_dataseg[rq2->req_seg_count].ds_base = 1831 dm_segs->ds_addr; 1832 rq2->req_dataseg[rq2->req_seg_count].ds_count = 1833 dm_segs->ds_len; 1834 } else { 1835 rq->req_dataseg[rq->req_seg_count].ds_base = 1836 dm_segs->ds_addr; 1837 rq->req_dataseg[rq->req_seg_count].ds_count = 1838 dm_segs->ds_len; 1839 } 1840 datalen -= dm_segs->ds_len; 1841 rq->req_seg_count++; 1842 dm_segs++; 1843 } 1844 1845 while (datalen > 0 && dm_segs != eseg) { 1846 u_int16_t onxti; 1847 ispcontreq_t local, *crq = &local, *cqe; 1848 1849 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 1850 onxti = nxti; 1851 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 1852 if (nxti == mp->optr) { 1853 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 1854 mp->error = MUSHERR_NOQENTRIES; 1855 return; 1856 } 1857 rq->req_header.rqs_entry_count++; 1858 MEMZERO((void *)crq, sizeof (*crq)); 1859 crq->req_header.rqs_entry_count = 1; 1860 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 1861 1862 seglim = 0; 1863 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 1864 crq->req_dataseg[seglim].ds_base = 1865 dm_segs->ds_addr; 1866 crq->req_dataseg[seglim].ds_count = 1867 dm_segs->ds_len; 1868 rq->req_seg_count++; 1869 dm_segs++; 1870 seglim++; 1871 datalen -= dm_segs->ds_len; 1872 } 1873 isp_put_cont_req(isp, crq, cqe); 1874 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 1875 } 1876 *mp->nxtip = nxti; 1877} 1878#endif 1879 1880static int 1881isp_pci_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 1882 u_int16_t *nxtip, u_int16_t optr) 1883{ 1884 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 1885 ispreq_t *qep; 1886 bus_dmamap_t *dp = NULL; 1887 mush_t mush, *mp; 1888 void (*eptr)(void *, bus_dma_segment_t *, int, int); 1889 1890 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 1891#ifdef ISP_TARGET_MODE 1892 if (csio->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1893 if (IS_FC(isp)) { 1894 eptr = tdma_mkfc; 1895 } else { 1896 eptr = tdma_mk; 1897 } 1898 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1899 (csio->dxfer_len == 0)) { 1900 mp = &mush; 1901 mp->isp = isp; 1902 mp->cmd_token = csio; 1903 mp->rq = rq; /* really a ct_entry_t or ct2_entry_t */ 1904 mp->nxtip = nxtip; 1905 mp->optr = optr; 1906 mp->error = 0; 1907 (*eptr)(mp, NULL, 0, 0); 1908 goto mbxsync; 1909 } 1910 } else 1911#endif 1912 eptr = dma2; 1913 1914 1915 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 1916 (csio->dxfer_len == 0)) { 1917 rq->req_seg_count = 1; 1918 goto mbxsync; 1919 } 1920 1921 /* 1922 * Do a virtual grapevine step to collect info for 1923 * the callback dma allocation that we have to use... 1924 */ 1925 mp = &mush; 1926 mp->isp = isp; 1927 mp->cmd_token = csio; 1928 mp->rq = rq; 1929 mp->nxtip = nxtip; 1930 mp->optr = optr; 1931 mp->error = 0; 1932 1933 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 1934 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 1935 int error, s; 1936 dp = &pcs->dmaps[isp_handle_index(rq->req_handle)]; 1937 s = splsoftvm(); 1938 error = bus_dmamap_load(pcs->dmat, *dp, 1939 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 1940 if (error == EINPROGRESS) { 1941 bus_dmamap_unload(pcs->dmat, *dp); 1942 mp->error = EINVAL; 1943 isp_prt(isp, ISP_LOGERR, 1944 "deferred dma allocation not supported"); 1945 } else if (error && mp->error == 0) { 1946#ifdef DIAGNOSTIC 1947 isp_prt(isp, ISP_LOGERR, 1948 "error %d in dma mapping code", error); 1949#endif 1950 mp->error = error; 1951 } 1952 splx(s); 1953 } else { 1954 /* Pointer to physical buffer */ 1955 struct bus_dma_segment seg; 1956 seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; 1957 seg.ds_len = csio->dxfer_len; 1958 (*eptr)(mp, &seg, 1, 0); 1959 } 1960 } else { 1961 struct bus_dma_segment *segs; 1962 1963 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 1964 isp_prt(isp, ISP_LOGERR, 1965 "Physical segment pointers unsupported"); 1966 mp->error = EINVAL; 1967 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 1968 isp_prt(isp, ISP_LOGERR, 1969 "Virtual segment addresses unsupported"); 1970 mp->error = EINVAL; 1971 } else { 1972 /* Just use the segments provided */ 1973 segs = (struct bus_dma_segment *) csio->data_ptr; 1974 (*eptr)(mp, segs, csio->sglist_cnt, 0); 1975 } 1976 } 1977 if (mp->error) { 1978 int retval = CMD_COMPLETE; 1979 if (mp->error == MUSHERR_NOQENTRIES) { 1980 retval = CMD_EAGAIN; 1981 } else if (mp->error == EFBIG) { 1982 XS_SETERR(csio, CAM_REQ_TOO_BIG); 1983 } else if (mp->error == EINVAL) { 1984 XS_SETERR(csio, CAM_REQ_INVALID); 1985 } else { 1986 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 1987 } 1988 return (retval); 1989 } 1990mbxsync: 1991 switch (rq->req_header.rqs_entry_type) { 1992 case RQSTYPE_REQUEST: 1993 isp_put_request(isp, rq, qep); 1994 break; 1995 case RQSTYPE_CMDONLY: 1996 isp_put_extended_request(isp, (ispextreq_t *)rq, 1997 (ispextreq_t *)qep); 1998 break; 1999 case RQSTYPE_T2RQS: 2000 isp_put_request_t2(isp, (ispreqt2_t *) rq, (ispreqt2_t *) qep); 2001 break; 2002 case RQSTYPE_A64: 2003 case RQSTYPE_T3RQS: 2004 isp_put_request_t3(isp, (ispreqt3_t *) rq, (ispreqt3_t *) qep); 2005 break; 2006 } 2007 return (CMD_QUEUED); 2008} 2009 2010static void 2011isp_pci_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 2012{ 2013 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2014 bus_dmamap_t *dp = &pcs->dmaps[isp_handle_index(handle)]; 2015 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 2016 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTREAD); 2017 } else { 2018 bus_dmamap_sync(pcs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 2019 } 2020 bus_dmamap_unload(pcs->dmat, *dp); 2021} 2022 2023 2024static void 2025isp_pci_reset1(struct ispsoftc *isp) 2026{ 2027 /* Make sure the BIOS is disabled */ 2028 isp_pci_wr_reg(isp, HCCR, PCI_HCCR_CMD_BIOS); 2029 /* and enable interrupts */ 2030 ENABLE_INTS(isp); 2031} 2032 2033static void 2034isp_pci_dumpregs(struct ispsoftc *isp, const char *msg) 2035{ 2036 struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp; 2037 if (msg) 2038 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 2039 else 2040 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 2041 if (IS_SCSI(isp)) 2042 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 2043 else 2044 printf(" biu_csr=%x", ISP_READ(isp, BIU2100_CSR)); 2045 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 2046 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 2047 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 2048 2049 2050 if (IS_SCSI(isp)) { 2051 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 2052 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 2053 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 2054 ISP_READ(isp, CDMA_FIFO_STS)); 2055 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 2056 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 2057 ISP_READ(isp, DDMA_FIFO_STS)); 2058 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 2059 ISP_READ(isp, SXP_INTERRUPT), 2060 ISP_READ(isp, SXP_GROSS_ERR), 2061 ISP_READ(isp, SXP_PINS_CTRL)); 2062 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 2063 } 2064 printf(" mbox regs: %x %x %x %x %x\n", 2065 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 2066 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 2067 ISP_READ(isp, OUTMAILBOX4)); 2068 printf(" PCI Status Command/Status=%x\n", 2069 pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1)); 2070} 2071