isp_sbus.c revision 100680
1/* $FreeBSD: head/sys/dev/isp/isp_sbus.c 100680 2002-07-25 16:02:09Z mjacob $ */ 2/* 3 * PCI specific probe and attach routines for Qlogic ISP SCSI adapters. 4 * FreeBSD Version. 5 * 6 * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice immediately at the beginning of the file, without modification, 13 * this list of conditions, and the following disclaimer. 14 * 2. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 31#include <sys/param.h> 32#include <sys/systm.h> 33#include <sys/bus.h> 34#include <sys/kernel.h> 35#include <sys/resource.h> 36#include <machine/bus.h> 37#include <machine/ofw_machdep.h> 38#include <machine/resource.h> 39#include <sys/rman.h> 40#include <ofw/openfirm.h> 41#include <sparc64/sbus/sbusvar.h> 42 43#include <dev/isp/isp_freebsd.h> 44 45static u_int16_t isp_sbus_rd_reg(struct ispsoftc *, int); 46static void isp_sbus_wr_reg(struct ispsoftc *, int, u_int16_t); 47static int 48isp_sbus_rd_isr(struct ispsoftc *, u_int16_t *, u_int16_t *, u_int16_t *); 49static int isp_sbus_mbxdma(struct ispsoftc *); 50static int 51isp_sbus_dmasetup(struct ispsoftc *, XS_T *, ispreq_t *, u_int16_t *, u_int16_t); 52static void 53isp_sbus_dmateardown(struct ispsoftc *, XS_T *, u_int16_t); 54 55static void isp_sbus_reset1(struct ispsoftc *); 56static void isp_sbus_dumpregs(struct ispsoftc *, const char *); 57 58static struct ispmdvec mdvec = { 59 isp_sbus_rd_isr, 60 isp_sbus_rd_reg, 61 isp_sbus_wr_reg, 62 isp_sbus_mbxdma, 63 isp_sbus_dmasetup, 64 isp_sbus_dmateardown, 65 NULL, 66 isp_sbus_reset1, 67 isp_sbus_dumpregs, 68 NULL, 69 BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64 70}; 71 72static int isp_sbus_probe (device_t); 73static int isp_sbus_attach (device_t); 74 75 76struct isp_sbussoftc { 77 struct ispsoftc sbus_isp; 78 device_t sbus_dev; 79 struct resource * sbus_reg; 80 bus_space_tag_t sbus_st; 81 bus_space_handle_t sbus_sh; 82 void * ih; 83 int16_t sbus_poff[_NREG_BLKS]; 84 bus_dma_tag_t dmat; 85 bus_dmamap_t *dmaps; 86 sdparam sbus_param; 87 struct ispmdvec sbus_mdvec; 88 struct resource * sbus_ires; 89}; 90 91extern ispfwfunc *isp_get_firmware_p; 92 93static device_method_t isp_sbus_methods[] = { 94 /* Device interface */ 95 DEVMETHOD(device_probe, isp_sbus_probe), 96 DEVMETHOD(device_attach, isp_sbus_attach), 97 { 0, 0 } 98}; 99static void isp_sbus_intr(void *); 100 101static driver_t isp_sbus_driver = { 102 "isp", isp_sbus_methods, sizeof (struct isp_sbussoftc) 103}; 104static devclass_t isp_devclass; 105DRIVER_MODULE(isp, sbus, isp_sbus_driver, isp_devclass, 0, 0); 106 107static int 108isp_sbus_probe(device_t dev) 109{ 110 int found = 0; 111 char *name = sbus_get_name(dev); 112 if (strcmp(name, "SUNW,isp") == 0 || 113 strcmp(name, "QLGC,isp") == 0 || 114 strcmp(name, "ptisp") == 0 || 115 strcmp(name, "PTI,ptisp") == 0) { 116 found++; 117 } 118 if (!found) 119 return (ENXIO); 120 121 if (isp_announced == 0 && bootverbose) { 122 printf("Qlogic ISP Driver, FreeBSD Version %d.%d, " 123 "Core Version %d.%d\n", 124 ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR, 125 ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR); 126 isp_announced++; 127 } 128 return (0); 129} 130 131static int 132isp_sbus_attach(device_t dev) 133{ 134 struct resource *regs; 135 int tval, iqd, isp_debug, role, rid, ispburst, freq; 136 struct isp_sbussoftc *sbs; 137 struct ispsoftc *isp = NULL; 138 int locksetup = 0; 139 140 /* 141 * Figure out if we're supposed to skip this one. 142 * If we are, we actually go to ISP_ROLE_NONE. 143 */ 144 145 tval = 0; 146 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 147 "disable", &tval) == 0 && tval) { 148 device_printf(dev, "device is disabled\n"); 149 /* but return 0 so the !$)$)*!$*) unit isn't reused */ 150 return (0); 151 } 152 153 role = 0; 154 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 155 "role", &role) == 0 && 156 ((role & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) == 0)) { 157 device_printf(dev, "setting role to 0x%x\n", role); 158 } else { 159#ifdef ISP_TARGET_MODE 160 role = ISP_ROLE_INITIATOR|ISP_ROLE_TARGET; 161#else 162 role = ISP_DEFAULT_ROLES; 163#endif 164 } 165 166 sbs = malloc(sizeof (*sbs), M_DEVBUF, M_NOWAIT | M_ZERO); 167 if (sbs == NULL) { 168 device_printf(dev, "cannot allocate softc\n"); 169 return (ENOMEM); 170 } 171 172 regs = NULL; 173 iqd = 0; 174 175 rid = 0; 176 regs = 177 bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); 178 if (regs == 0) { 179 device_printf(dev, "unable to map registers\n"); 180 goto bad; 181 } 182 sbs->sbus_dev = dev; 183 sbs->sbus_reg = regs; 184 sbs->sbus_st = rman_get_bustag(regs); 185 sbs->sbus_sh = rman_get_bushandle(regs); 186 sbs->sbus_mdvec = mdvec; 187 188 sbs->sbus_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF; 189 sbs->sbus_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = SBUS_MBOX_REGS_OFF; 190 sbs->sbus_poff[SXP_BLOCK >> _BLK_REG_SHFT] = SBUS_SXP_REGS_OFF; 191 sbs->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF; 192 sbs->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF; 193 isp = &sbs->sbus_isp; 194 isp->isp_mdvec = &sbs->sbus_mdvec; 195 isp->isp_bustype = ISP_BT_SBUS; 196 isp->isp_type = ISP_HA_SCSI_UNKNOWN; 197 isp->isp_param = &sbs->sbus_param; 198 isp->isp_revision = 0; /* XXX */ 199 isp->isp_role = role; 200 isp->isp_dev = dev; 201 202 freq = sbus_get_clockfreq(dev); 203 if (freq) { 204 /* 205 * Convert from HZ to MHz, rounding up. 206 */ 207 freq = (freq + 500000)/1000000; 208 } else { 209 freq = 25000000; 210 } 211 sbs->sbus_mdvec.dv_clock = freq; 212 213 /* 214 * Now figure out what the proper burst sizes, etc., to use. 215 * Unfortunately, there is no ddi_dma_burstsizes here which 216 * walks up the tree finding the limiting burst size node (if 217 * any). We just use what's here for isp. 218 */ 219 ispburst = sbus_get_burstsz(dev); 220 if (ispburst == 0) { 221 ispburst = SBUS_BURST_32 - 1; 222 } 223 sbs->sbus_mdvec.dv_conf1 = 0; 224 if (ispburst & (1 << 5)) { 225 sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_32; 226 } else if (ispburst & (1 << 4)) { 227 sbs->sbus_mdvec.dv_conf1 = BIU_SBUS_CONF1_FIFO_16; 228 } else if (ispburst & (1 << 3)) { 229 sbs->sbus_mdvec.dv_conf1 = 230 BIU_SBUS_CONF1_BURST8 | BIU_SBUS_CONF1_FIFO_8; 231 } 232 if (sbs->sbus_mdvec.dv_conf1) { 233 sbs->sbus_mdvec.dv_conf1 |= BIU_BURST_ENABLE; 234 } 235 236 /* 237 * Some early versions of the PTI SBus adapter 238 * would fail in trying to download (via poking) 239 * FW. We give up on them. 240 */ 241 if (strcmp("PTI,ptisp", sbus_get_name(dev)) == 0 || 242 strcmp("ptisp", sbus_get_name(dev)) == 0) { 243 sbs->sbus_mdvec.dv_ispfw = NULL; 244 } 245 /* 246 * Try and find firmware for this device. 247 */ 248 249 if (isp_get_firmware_p) { 250 (*isp_get_firmware_p)(0, 0, 0x1000, &sbs->sbus_mdvec.dv_ispfw); 251 } 252 253 iqd = 0; 254 sbs->sbus_ires = bus_alloc_resource(dev, SYS_RES_IRQ, &iqd, 0, ~0, 255 1, RF_ACTIVE | RF_SHAREABLE); 256 if (sbs->sbus_ires == NULL) { 257 device_printf(dev, "could not allocate interrupt\n"); 258 goto bad; 259 } 260 261 tval = 0; 262 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 263 "fwload_disable", &tval) == 0 && tval != 0) { 264 isp->isp_confopts |= ISP_CFG_NORELOAD; 265 } 266 tval = 0; 267 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 268 "ignore_nvram", &tval) == 0 && tval != 0) { 269 isp->isp_confopts |= ISP_CFG_NONVRAM; 270 } 271 272 isp_debug = 0; 273 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 274 "debug", &isp_debug); 275 276 /* Make sure the lock is set up. */ 277 mtx_init(&isp->isp_osinfo.lock, "isp", NULL, MTX_DEF); 278 locksetup++; 279 280 if (bus_setup_intr(dev, sbs->sbus_ires, ISP_IFLAGS, 281 isp_sbus_intr, isp, &sbs->ih)) { 282 device_printf(dev, "could not setup interrupt\n"); 283 goto bad; 284 } 285 286 /* 287 * Set up logging levels. 288 */ 289 if (isp_debug) { 290 isp->isp_dblev = isp_debug; 291 } else { 292 isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR; 293 } 294 if (bootverbose) 295 isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO; 296 297 /* 298 * Make sure we're in reset state. 299 */ 300 ISP_LOCK(isp); 301 isp_reset(isp); 302 if (isp->isp_state != ISP_RESETSTATE) { 303 ISP_UNLOCK(isp); 304 goto bad; 305 } 306 isp_init(isp); 307 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_INITSTATE) { 308 isp_uninit(isp); 309 ISP_UNLOCK(isp); 310 goto bad; 311 } 312 isp_attach(isp); 313 if (isp->isp_role != ISP_ROLE_NONE && isp->isp_state != ISP_RUNSTATE) { 314 isp_uninit(isp); 315 ISP_UNLOCK(isp); 316 goto bad; 317 } 318 /* 319 * XXXX: Here is where we might unload the f/w module 320 * XXXX: (or decrease the reference count to it). 321 */ 322 ISP_UNLOCK(isp); 323 return (0); 324 325bad: 326 327 if (sbs && sbs->ih) { 328 (void) bus_teardown_intr(dev, sbs->sbus_ires, sbs->ih); 329 } 330 331 if (locksetup && isp) { 332 mtx_destroy(&isp->isp_osinfo.lock); 333 } 334 335 if (sbs && sbs->sbus_ires) { 336 bus_release_resource(dev, SYS_RES_IRQ, iqd, sbs->sbus_ires); 337 } 338 339 340 if (regs) { 341 (void) bus_release_resource(dev, 0, 0, regs); 342 } 343 344 if (sbs) { 345 if (sbs->sbus_isp.isp_param) 346 free(sbs->sbus_isp.isp_param, M_DEVBUF); 347 free(sbs, M_DEVBUF); 348 } 349 350 /* 351 * XXXX: Here is where we might unload the f/w module 352 * XXXX: (or decrease the reference count to it). 353 */ 354 return (ENXIO); 355} 356 357static void 358isp_sbus_intr(void *arg) 359{ 360 struct ispsoftc *isp = arg; 361 u_int16_t isr, sema, mbox; 362 363 ISP_LOCK(isp); 364 isp->isp_intcnt++; 365 if (ISP_READ_ISR(isp, &isr, &sema, &mbox) == 0) { 366 isp->isp_intbogus++; 367 } else { 368 int iok = isp->isp_osinfo.intsok; 369 isp->isp_osinfo.intsok = 0; 370 isp_intr(isp, isr, sema, mbox); 371 isp->isp_osinfo.intsok = iok; 372 } 373 ISP_UNLOCK(isp); 374} 375 376#define IspVirt2Off(a, x) \ 377 (((struct isp_sbussoftc *)a)->sbus_poff[((x) & _BLK_REG_MASK) >> \ 378 _BLK_REG_SHFT] + ((x) & 0xff)) 379 380#define BXR2(sbc, off) \ 381 bus_space_read_2(sbc->sbus_st, sbc->sbus_sh, off) 382 383static int 384isp_sbus_rd_isr(struct ispsoftc *isp, u_int16_t *isrp, 385 u_int16_t *semap, u_int16_t *mbp) 386{ 387 struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp; 388 u_int16_t isr, sema; 389 390 isr = BXR2(sbc, IspVirt2Off(isp, BIU_ISR)); 391 sema = BXR2(sbc, IspVirt2Off(isp, BIU_SEMA)); 392 isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema); 393 isr &= INT_PENDING_MASK(isp); 394 sema &= BIU_SEMA_LOCK; 395 if (isr == 0 && sema == 0) { 396 return (0); 397 } 398 *isrp = isr; 399 if ((*semap = sema) != 0) { 400 *mbp = BXR2(sbc, IspVirt2Off(isp, OUTMAILBOX0)); 401 } 402 return (1); 403} 404 405static u_int16_t 406isp_sbus_rd_reg(struct ispsoftc *isp, int regoff) 407{ 408 u_int16_t rval; 409 struct isp_sbussoftc *sbs = (struct isp_sbussoftc *) isp; 410 int offset = sbs->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 411 offset += (regoff & 0xff); 412 rval = bus_space_read_2(sbs->sbus_st, sbs->sbus_sh, offset); 413 isp_prt(isp, ISP_LOGDEBUG3, 414 "isp_sbus_rd_reg(off %x) = %x", regoff, rval); 415 return (rval); 416} 417 418static void 419isp_sbus_wr_reg(struct ispsoftc *isp, int regoff, u_int16_t val) 420{ 421 struct isp_sbussoftc *sbs = (struct isp_sbussoftc *) isp; 422 int offset = sbs->sbus_poff[(regoff & _BLK_REG_MASK) >> _BLK_REG_SHFT]; 423 offset += (regoff & 0xff); 424 isp_prt(isp, ISP_LOGDEBUG3, 425 "isp_sbus_wr_reg(off %x) = %x", regoff, val); 426 bus_space_write_2(sbs->sbus_st, sbs->sbus_sh, offset, val); 427} 428 429struct imush { 430 struct ispsoftc *isp; 431 int error; 432}; 433 434static void imc(void *, bus_dma_segment_t *, int, int); 435 436static void 437imc(void *arg, bus_dma_segment_t *segs, int nseg, int error) 438{ 439 struct imush *imushp = (struct imush *) arg; 440 if (error) { 441 imushp->error = error; 442 } else { 443 struct ispsoftc *isp =imushp->isp; 444 bus_addr_t addr = segs->ds_addr; 445 446 isp->isp_rquest_dma = addr; 447 addr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 448 isp->isp_result_dma = addr; 449 } 450} 451 452/* 453 * Should be BUS_SPACE_MAXSIZE, but MAXPHYS is larger than BUS_SPACE_MAXSIZE 454 */ 455#define ISP_NSEGS ((MAXPHYS / PAGE_SIZE) + 1) 456 457static int 458isp_sbus_mbxdma(struct ispsoftc *isp) 459{ 460 struct isp_sbussoftc *sbs = (struct isp_sbussoftc *)isp; 461 caddr_t base; 462 u_int32_t len; 463 int i, error, ns; 464 bus_size_t bl; 465 struct imush im; 466 467 /* 468 * Already been here? If so, leave... 469 */ 470 if (isp->isp_rquest) { 471 return (0); 472 } 473 474 ISP_UNLOCK(isp); 475 bl = BUS_SPACE_MAXADDR_24BIT; 476 477 if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR, 478 BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 479 ISP_NSEGS, bl, 0, &sbs->dmat)) { 480 isp_prt(isp, ISP_LOGERR, "could not create master dma tag"); 481 ISP_LOCK(isp); 482 return(1); 483 } 484 485 486 len = sizeof (XS_T **) * isp->isp_maxcmds; 487 isp->isp_xflist = (XS_T **) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); 488 if (isp->isp_xflist == NULL) { 489 isp_prt(isp, ISP_LOGERR, "cannot alloc xflist array"); 490 ISP_LOCK(isp); 491 return (1); 492 } 493 len = sizeof (bus_dmamap_t) * isp->isp_maxcmds; 494 sbs->dmaps = (bus_dmamap_t *) malloc(len, M_DEVBUF, M_WAITOK); 495 if (sbs->dmaps == NULL) { 496 isp_prt(isp, ISP_LOGERR, "can't alloc dma map storage"); 497 free(isp->isp_xflist, M_DEVBUF); 498 ISP_LOCK(isp); 499 return (1); 500 } 501 502 /* 503 * Allocate and map the request, result queues, plus FC scratch area. 504 */ 505 len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 506 len += ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp)); 507 508 ns = (len / PAGE_SIZE) + 1; 509 if (bus_dma_tag_create(sbs->dmat, QENTRY_LEN, 0, BUS_SPACE_MAXADDR, 510 BUS_SPACE_MAXADDR, NULL, NULL, len, ns, bl, 0, &isp->isp_cdmat)) { 511 isp_prt(isp, ISP_LOGERR, 512 "cannot create a dma tag for control spaces"); 513 free(sbs->dmaps, M_DEVBUF); 514 free(isp->isp_xflist, M_DEVBUF); 515 ISP_LOCK(isp); 516 return (1); 517 } 518 519 if (bus_dmamem_alloc(isp->isp_cdmat, (void **)&base, BUS_DMA_NOWAIT, 520 &isp->isp_cdmap) != 0) { 521 isp_prt(isp, ISP_LOGERR, 522 "cannot allocate %d bytes of CCB memory", len); 523 bus_dma_tag_destroy(isp->isp_cdmat); 524 free(isp->isp_xflist, M_DEVBUF); 525 free(sbs->dmaps, M_DEVBUF); 526 ISP_LOCK(isp); 527 return (1); 528 } 529 530 for (i = 0; i < isp->isp_maxcmds; i++) { 531 error = bus_dmamap_create(sbs->dmat, 0, &sbs->dmaps[i]); 532 if (error) { 533 isp_prt(isp, ISP_LOGERR, 534 "error %d creating per-cmd DMA maps", error); 535 while (--i >= 0) { 536 bus_dmamap_destroy(sbs->dmat, sbs->dmaps[i]); 537 } 538 goto bad; 539 } 540 } 541 542 im.isp = isp; 543 im.error = 0; 544 bus_dmamap_load(isp->isp_cdmat, isp->isp_cdmap, base, len, imc, &im, 0); 545 if (im.error) { 546 isp_prt(isp, ISP_LOGERR, 547 "error %d loading dma map for control areas", im.error); 548 goto bad; 549 } 550 551 isp->isp_rquest = base; 552 base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp)); 553 ISP_LOCK(isp); 554 isp->isp_result = base; 555 return (0); 556 557bad: 558 bus_dmamem_free(isp->isp_cdmat, base, isp->isp_cdmap); 559 bus_dma_tag_destroy(isp->isp_cdmat); 560 free(isp->isp_xflist, M_DEVBUF); 561 free(sbs->dmaps, M_DEVBUF); 562 ISP_LOCK(isp); 563 isp->isp_rquest = NULL; 564 return (1); 565} 566 567typedef struct { 568 struct ispsoftc *isp; 569 void *cmd_token; 570 void *rq; 571 u_int16_t *nxtip; 572 u_int16_t optr; 573 u_int error; 574} mush_t; 575 576#define MUSHERR_NOQENTRIES -2 577 578 579static void dma2(void *, bus_dma_segment_t *, int, int); 580 581static void 582dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 583{ 584 mush_t *mp; 585 struct ispsoftc *isp; 586 struct ccb_scsiio *csio; 587 struct isp_sbussoftc *sbs; 588 bus_dmamap_t *dp; 589 bus_dma_segment_t *eseg; 590 ispreq_t *rq; 591 int seglim, datalen; 592 u_int16_t nxti; 593 594 mp = (mush_t *) arg; 595 if (error) { 596 mp->error = error; 597 return; 598 } 599 600 if (nseg < 1) { 601 isp_prt(mp->isp, ISP_LOGERR, "bad segment count (%d)", nseg); 602 mp->error = EFAULT; 603 return; 604 } 605 csio = mp->cmd_token; 606 isp = mp->isp; 607 rq = mp->rq; 608 sbs = (struct isp_sbussoftc *)mp->isp; 609 dp = &sbs->dmaps[isp_handle_index(rq->req_handle)]; 610 nxti = *mp->nxtip; 611 612 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 613 bus_dmamap_sync(sbs->dmat, *dp, BUS_DMASYNC_PREREAD); 614 } else { 615 bus_dmamap_sync(sbs->dmat, *dp, BUS_DMASYNC_PREWRITE); 616 } 617 618 datalen = XS_XFRLEN(csio); 619 620 /* 621 * We're passed an initial partially filled in entry that 622 * has most fields filled in except for data transfer 623 * related values. 624 * 625 * Our job is to fill in the initial request queue entry and 626 * then to start allocating and filling in continuation entries 627 * until we've covered the entire transfer. 628 */ 629 630 if (csio->cdb_len > 12) { 631 seglim = 0; 632 } else { 633 seglim = ISP_RQDSEG; 634 } 635 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 636 rq->req_flags |= REQFLAG_DATA_IN; 637 } else { 638 rq->req_flags |= REQFLAG_DATA_OUT; 639 } 640 641 eseg = dm_segs + nseg; 642 643 while (datalen != 0 && rq->req_seg_count < seglim && dm_segs != eseg) { 644 rq->req_dataseg[rq->req_seg_count].ds_base = dm_segs->ds_addr; 645 rq->req_dataseg[rq->req_seg_count].ds_count = dm_segs->ds_len; 646 datalen -= dm_segs->ds_len; 647 rq->req_seg_count++; 648 dm_segs++; 649 } 650 651 while (datalen > 0 && dm_segs != eseg) { 652 u_int16_t onxti; 653 ispcontreq_t local, *crq = &local, *cqe; 654 655 cqe = (ispcontreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, nxti); 656 onxti = nxti; 657 nxti = ISP_NXT_QENTRY(onxti, RQUEST_QUEUE_LEN(isp)); 658 if (nxti == mp->optr) { 659 isp_prt(isp, ISP_LOGDEBUG0, "Request Queue Overflow++"); 660 mp->error = MUSHERR_NOQENTRIES; 661 return; 662 } 663 rq->req_header.rqs_entry_count++; 664 MEMZERO((void *)crq, sizeof (*crq)); 665 crq->req_header.rqs_entry_count = 1; 666 crq->req_header.rqs_entry_type = RQSTYPE_DATASEG; 667 668 seglim = 0; 669 while (datalen > 0 && seglim < ISP_CDSEG && dm_segs != eseg) { 670 crq->req_dataseg[seglim].ds_base = 671 dm_segs->ds_addr; 672 crq->req_dataseg[seglim].ds_count = 673 dm_segs->ds_len; 674 rq->req_seg_count++; 675 dm_segs++; 676 seglim++; 677 datalen -= dm_segs->ds_len; 678 } 679 isp_put_cont_req(isp, crq, cqe); 680 MEMORYBARRIER(isp, SYNC_REQUEST, onxti, QENTRY_LEN); 681 } 682 *mp->nxtip = nxti; 683} 684 685static int 686isp_sbus_dmasetup(struct ispsoftc *isp, struct ccb_scsiio *csio, ispreq_t *rq, 687 u_int16_t *nxtip, u_int16_t optr) 688{ 689 struct isp_sbussoftc *sbs = (struct isp_sbussoftc *)isp; 690 ispreq_t *qep; 691 bus_dmamap_t *dp = NULL; 692 mush_t mush, *mp; 693 void (*eptr)(void *, bus_dma_segment_t *, int, int); 694 695 qep = (ispreq_t *) ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx); 696 eptr = dma2; 697 698 699 if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE || 700 (csio->dxfer_len == 0)) { 701 rq->req_seg_count = 1; 702 goto mbxsync; 703 } 704 705 /* 706 * Do a virtual grapevine step to collect info for 707 * the callback dma allocation that we have to use... 708 */ 709 mp = &mush; 710 mp->isp = isp; 711 mp->cmd_token = csio; 712 mp->rq = rq; 713 mp->nxtip = nxtip; 714 mp->optr = optr; 715 mp->error = 0; 716 717 if ((csio->ccb_h.flags & CAM_SCATTER_VALID) == 0) { 718 if ((csio->ccb_h.flags & CAM_DATA_PHYS) == 0) { 719 int error, s; 720 dp = &sbs->dmaps[isp_handle_index(rq->req_handle)]; 721 s = splsoftvm(); 722 error = bus_dmamap_load(sbs->dmat, *dp, 723 csio->data_ptr, csio->dxfer_len, eptr, mp, 0); 724 if (error == EINPROGRESS) { 725 bus_dmamap_unload(sbs->dmat, *dp); 726 mp->error = EINVAL; 727 isp_prt(isp, ISP_LOGERR, 728 "deferred dma allocation not supported"); 729 } else if (error && mp->error == 0) { 730#ifdef DIAGNOSTIC 731 isp_prt(isp, ISP_LOGERR, 732 "error %d in dma mapping code", error); 733#endif 734 mp->error = error; 735 } 736 splx(s); 737 } else { 738 /* Pointer to physical buffer */ 739 struct bus_dma_segment seg; 740 seg.ds_addr = (bus_addr_t)csio->data_ptr; 741 seg.ds_len = csio->dxfer_len; 742 (*eptr)(mp, &seg, 1, 0); 743 } 744 } else { 745 struct bus_dma_segment *segs; 746 747 if ((csio->ccb_h.flags & CAM_DATA_PHYS) != 0) { 748 isp_prt(isp, ISP_LOGERR, 749 "Physical segment pointers unsupported"); 750 mp->error = EINVAL; 751 } else if ((csio->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) { 752 isp_prt(isp, ISP_LOGERR, 753 "Virtual segment addresses unsupported"); 754 mp->error = EINVAL; 755 } else { 756 /* Just use the segments provided */ 757 segs = (struct bus_dma_segment *) csio->data_ptr; 758 (*eptr)(mp, segs, csio->sglist_cnt, 0); 759 } 760 } 761 if (mp->error) { 762 int retval = CMD_COMPLETE; 763 if (mp->error == MUSHERR_NOQENTRIES) { 764 retval = CMD_EAGAIN; 765 } else if (mp->error == EFBIG) { 766 XS_SETERR(csio, CAM_REQ_TOO_BIG); 767 } else if (mp->error == EINVAL) { 768 XS_SETERR(csio, CAM_REQ_INVALID); 769 } else { 770 XS_SETERR(csio, CAM_UNREC_HBA_ERROR); 771 } 772 return (retval); 773 } 774mbxsync: 775 switch (rq->req_header.rqs_entry_type) { 776 case RQSTYPE_REQUEST: 777 isp_put_request(isp, rq, qep); 778 break; 779 case RQSTYPE_CMDONLY: 780 isp_put_extended_request(isp, (ispextreq_t *)rq, 781 (ispextreq_t *)qep); 782 break; 783 } 784 return (CMD_QUEUED); 785} 786 787static void 788isp_sbus_dmateardown(struct ispsoftc *isp, XS_T *xs, u_int16_t handle) 789{ 790 struct isp_sbussoftc *sbs = (struct isp_sbussoftc *)isp; 791 bus_dmamap_t *dp = &sbs->dmaps[isp_handle_index(handle)]; 792 if ((xs->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 793 bus_dmamap_sync(sbs->dmat, *dp, BUS_DMASYNC_POSTREAD); 794 } else { 795 bus_dmamap_sync(sbs->dmat, *dp, BUS_DMASYNC_POSTWRITE); 796 } 797 bus_dmamap_unload(sbs->dmat, *dp); 798} 799 800 801static void 802isp_sbus_reset1(struct ispsoftc *isp) 803{ 804 /* enable interrupts */ 805 ENABLE_INTS(isp); 806} 807 808static void 809isp_sbus_dumpregs(struct ispsoftc *isp, const char *msg) 810{ 811 if (msg) 812 printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg); 813 else 814 printf("%s:\n", device_get_nameunit(isp->isp_dev)); 815 printf(" biu_conf1=%x", ISP_READ(isp, BIU_CONF1)); 816 printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR), 817 ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA)); 818 printf("risc_hccr=%x\n", ISP_READ(isp, HCCR)); 819 820 821 ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE); 822 printf(" cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n", 823 ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS), 824 ISP_READ(isp, CDMA_FIFO_STS)); 825 printf(" ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n", 826 ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS), 827 ISP_READ(isp, DDMA_FIFO_STS)); 828 printf(" sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n", 829 ISP_READ(isp, SXP_INTERRUPT), 830 ISP_READ(isp, SXP_GROSS_ERR), 831 ISP_READ(isp, SXP_PINS_CTRL)); 832 ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE); 833 printf(" mbox regs: %x %x %x %x %x\n", 834 ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1), 835 ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3), 836 ISP_READ(isp, OUTMAILBOX4)); 837} 838