48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/kernel.h> 52#include <sys/malloc.h> 53#include <sys/lock.h> 54#include <sys/module.h> 55#include <sys/mutex.h> 56#include <sys/bus.h> 57 58#include <machine/bus.h> 59#include <machine/resource.h> 60 61#include <sys/rman.h> 62 63#include <cam/cam.h> 64#include <cam/cam_ccb.h> 65#include <cam/cam_sim.h> 66#include <cam/cam_xpt_sim.h> 67#include <cam/cam_debug.h> 68 69#include <cam/scsi/scsi_message.h> 70 71#include <dev/advansys/adwvar.h> 72 73/* Definitions for our use of the SIM private CCB area */ 74#define ccb_acb_ptr spriv_ptr0 75#define ccb_adw_ptr spriv_ptr1 76 77u_long adw_unit; 78 79static __inline cam_status adwccbstatus(union ccb*); 80static __inline struct acb* adwgetacb(struct adw_softc *adw); 81static __inline void adwfreeacb(struct adw_softc *adw, 82 struct acb *acb); 83 84static void adwmapmem(void *arg, bus_dma_segment_t *segs, 85 int nseg, int error); 86static struct sg_map_node* 87 adwallocsgmap(struct adw_softc *adw); 88static int adwallocacbs(struct adw_softc *adw); 89 90static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 91 int nseg, int error); 92static void adw_action(struct cam_sim *sim, union ccb *ccb); 93static void adw_poll(struct cam_sim *sim); 94static void adw_async(void *callback_arg, u_int32_t code, 95 struct cam_path *path, void *arg); 96static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 97static void adwtimeout(void *arg); 98static void adw_handle_device_reset(struct adw_softc *adw, 99 u_int target); 100static void adw_handle_bus_reset(struct adw_softc *adw, 101 int initiated); 102 103static __inline cam_status 104adwccbstatus(union ccb* ccb) 105{ 106 return (ccb->ccb_h.status & CAM_STATUS_MASK); 107} 108 109static __inline struct acb* 110adwgetacb(struct adw_softc *adw) 111{ 112 struct acb* acb; 113 int s; 114 115 s = splcam(); 116 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 117 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 118 } else if (adw->num_acbs < adw->max_acbs) { 119 adwallocacbs(adw); 120 acb = SLIST_FIRST(&adw->free_acb_list); 121 if (acb == NULL) 122 printf("%s: Can't malloc ACB\n", adw_name(adw)); 123 else { 124 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 125 } 126 } 127 splx(s); 128 129 return (acb); 130} 131 132static __inline void 133adwfreeacb(struct adw_softc *adw, struct acb *acb) 134{ 135 int s; 136 137 s = splcam(); 138 if ((acb->state & ACB_ACTIVE) != 0) 139 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 140 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 141 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 142 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 143 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 144 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 145 adw->state &= ~ADW_RESOURCE_SHORTAGE; 146 } 147 acb->state = ACB_FREE; 148 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 149 splx(s); 150} 151 152static void 153adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 154{ 155 bus_addr_t *busaddrp; 156 157 busaddrp = (bus_addr_t *)arg; 158 *busaddrp = segs->ds_addr; 159} 160 161static struct sg_map_node * 162adwallocsgmap(struct adw_softc *adw) 163{ 164 struct sg_map_node *sg_map; 165 166 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 167 168 if (sg_map == NULL) 169 return (NULL); 170 171 /* Allocate S/G space for the next batch of ACBS */ 172 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 173 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 174 free(sg_map, M_DEVBUF); 175 return (NULL); 176 } 177 178 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 179 180 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 181 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 182 183 bzero(sg_map->sg_vaddr, PAGE_SIZE); 184 return (sg_map); 185} 186 187/* 188 * Allocate another chunk of CCB's. Return count of entries added. 189 * Assumed to be called at splcam(). 190 */ 191static int 192adwallocacbs(struct adw_softc *adw) 193{ 194 struct acb *next_acb; 195 struct sg_map_node *sg_map; 196 bus_addr_t busaddr; 197 struct adw_sg_block *blocks; 198 int newcount; 199 int i; 200 201 next_acb = &adw->acbs[adw->num_acbs]; 202 sg_map = adwallocsgmap(adw); 203 204 if (sg_map == NULL) 205 return (0); 206 207 blocks = sg_map->sg_vaddr; 208 busaddr = sg_map->sg_physaddr; 209 210 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 211 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 212 int error; 213 214 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 215 &next_acb->dmamap); 216 if (error != 0) 217 break; 218 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 219 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 220 next_acb->queue.sense_baddr = 221 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 222 next_acb->sg_blocks = blocks; 223 next_acb->sg_busaddr = busaddr; 224 next_acb->state = ACB_FREE; 225 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 226 blocks += ADW_SG_BLOCKCNT; 227 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 228 next_acb++; 229 adw->num_acbs++; 230 } 231 return (i); 232} 233 234static void 235adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 236{ 237 struct acb *acb; 238 union ccb *ccb; 239 struct adw_softc *adw; 240 int s; 241 242 acb = (struct acb *)arg; 243 ccb = acb->ccb; 244 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 245 246 if (error != 0) { 247 if (error != EFBIG) 248 printf("%s: Unexepected error 0x%x returned from " 249 "bus_dmamap_load\n", adw_name(adw), error); 250 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 251 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 252 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 253 } 254 adwfreeacb(adw, acb); 255 xpt_done(ccb); 256 return; 257 } 258 259 if (nseg != 0) { 260 bus_dmasync_op_t op; 261 262 acb->queue.data_addr = dm_segs[0].ds_addr; 263 acb->queue.data_cnt = ccb->csio.dxfer_len; 264 if (nseg > 1) { 265 struct adw_sg_block *sg_block; 266 struct adw_sg_elm *sg; 267 bus_addr_t sg_busaddr; 268 u_int sg_index; 269 bus_dma_segment_t *end_seg; 270 271 end_seg = dm_segs + nseg; 272 273 sg_busaddr = acb->sg_busaddr; 274 sg_index = 0; 275 /* Copy the segments into our SG list */ 276 for (sg_block = acb->sg_blocks;; sg_block++) { 277 u_int i; 278 279 sg = sg_block->sg_list; 280 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 281 if (dm_segs >= end_seg) 282 break; 283 284 sg->sg_addr = dm_segs->ds_addr; 285 sg->sg_count = dm_segs->ds_len; 286 sg++; 287 dm_segs++; 288 } 289 sg_block->sg_cnt = i; 290 sg_index += i; 291 if (dm_segs == end_seg) { 292 sg_block->sg_busaddr_next = 0; 293 break; 294 } else { 295 sg_busaddr += 296 sizeof(struct adw_sg_block); 297 sg_block->sg_busaddr_next = sg_busaddr; 298 } 299 } 300 acb->queue.sg_real_addr = acb->sg_busaddr; 301 } else { 302 acb->queue.sg_real_addr = 0; 303 } 304 305 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 306 op = BUS_DMASYNC_PREREAD; 307 else 308 op = BUS_DMASYNC_PREWRITE; 309 310 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 311 312 } else { 313 acb->queue.data_addr = 0; 314 acb->queue.data_cnt = 0; 315 acb->queue.sg_real_addr = 0; 316 } 317 318 s = splcam(); 319 320 /* 321 * Last time we need to check if this CCB needs to 322 * be aborted. 323 */ 324 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 325 if (nseg != 0) 326 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 327 adwfreeacb(adw, acb); 328 xpt_done(ccb); 329 splx(s); 330 return; 331 } 332 333 acb->state |= ACB_ACTIVE; 334 ccb->ccb_h.status |= CAM_SIM_QUEUED; 335 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 336 ccb->ccb_h.timeout_ch = 337 timeout(adwtimeout, (caddr_t)acb, 338 (ccb->ccb_h.timeout * hz) / 1000); 339 340 adw_send_acb(adw, acb, acbvtob(adw, acb)); 341 342 splx(s); 343} 344 345static void 346adw_action(struct cam_sim *sim, union ccb *ccb) 347{ 348 struct adw_softc *adw; 349 350 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 351 352 adw = (struct adw_softc *)cam_sim_softc(sim); 353 354 switch (ccb->ccb_h.func_code) { 355 /* Common cases first */ 356 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 357 { 358 struct ccb_scsiio *csio; 359 struct ccb_hdr *ccbh; 360 struct acb *acb; 361 362 csio = &ccb->csio; 363 ccbh = &ccb->ccb_h; 364 365 /* Max supported CDB length is 12 bytes */ 366 if (csio->cdb_len > 12) { 367 ccb->ccb_h.status = CAM_REQ_INVALID; 368 xpt_done(ccb); 369 return; 370 } 371 372 if ((acb = adwgetacb(adw)) == NULL) { 373 int s; 374 375 s = splcam(); 376 adw->state |= ADW_RESOURCE_SHORTAGE; 377 splx(s); 378 xpt_freeze_simq(sim, /*count*/1); 379 ccb->ccb_h.status = CAM_REQUEUE_REQ; 380 xpt_done(ccb); 381 return; 382 } 383 384 /* Link acb and ccb so we can find one from the other */ 385 acb->ccb = ccb; 386 ccb->ccb_h.ccb_acb_ptr = acb; 387 ccb->ccb_h.ccb_adw_ptr = adw; 388 389 acb->queue.cntl = 0; 390 acb->queue.target_cmd = 0; 391 acb->queue.target_id = ccb->ccb_h.target_id; 392 acb->queue.target_lun = ccb->ccb_h.target_lun; 393 394 acb->queue.mflag = 0; 395 acb->queue.sense_len = 396 MIN(csio->sense_len, sizeof(acb->sense_data)); 397 acb->queue.cdb_len = csio->cdb_len; 398 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 399 switch (csio->tag_action) { 400 case MSG_SIMPLE_Q_TAG: 401 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 402 break; 403 case MSG_HEAD_OF_Q_TAG: 404 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 405 break; 406 case MSG_ORDERED_Q_TAG: 407 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 408 break; 409 default: 410 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 411 break; 412 } 413 } else 414 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 415 416 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 417 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 418 419 acb->queue.done_status = 0; 420 acb->queue.scsi_status = 0; 421 acb->queue.host_status = 0; 422 acb->queue.sg_wk_ix = 0; 423 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 424 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 425 bcopy(csio->cdb_io.cdb_ptr, 426 acb->queue.cdb, csio->cdb_len); 427 } else { 428 /* I guess I could map it in... */ 429 ccb->ccb_h.status = CAM_REQ_INVALID; 430 adwfreeacb(adw, acb); 431 xpt_done(ccb); 432 return; 433 } 434 } else { 435 bcopy(csio->cdb_io.cdb_bytes, 436 acb->queue.cdb, csio->cdb_len); 437 } 438 439 /* 440 * If we have any data to send with this command, 441 * map it into bus space. 442 */ 443 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 444 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 445 /* 446 * We've been given a pointer 447 * to a single buffer. 448 */ 449 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 450 int s; 451 int error; 452 453 s = splsoftvm(); 454 error = 455 bus_dmamap_load(adw->buffer_dmat, 456 acb->dmamap, 457 csio->data_ptr, 458 csio->dxfer_len, 459 adwexecuteacb, 460 acb, /*flags*/0); 461 if (error == EINPROGRESS) { 462 /* 463 * So as to maintain ordering, 464 * freeze the controller queue 465 * until our mapping is 466 * returned. 467 */ 468 xpt_freeze_simq(sim, 1); 469 acb->state |= CAM_RELEASE_SIMQ; 470 } 471 splx(s); 472 } else { 473 struct bus_dma_segment seg; 474 475 /* Pointer to physical buffer */ 476 seg.ds_addr = 477 (bus_addr_t)csio->data_ptr; 478 seg.ds_len = csio->dxfer_len; 479 adwexecuteacb(acb, &seg, 1, 0); 480 } 481 } else { 482 struct bus_dma_segment *segs; 483 484 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 485 panic("adw_action - Physical " 486 "segment pointers " 487 "unsupported"); 488 489 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 490 panic("adw_action - Virtual " 491 "segment addresses " 492 "unsupported"); 493 494 /* Just use the segments provided */ 495 segs = (struct bus_dma_segment *)csio->data_ptr; 496 adwexecuteacb(acb, segs, csio->sglist_cnt, 497 (csio->sglist_cnt < ADW_SGSIZE) 498 ? 0 : EFBIG); 499 } 500 } else { 501 adwexecuteacb(acb, NULL, 0, 0); 502 } 503 break; 504 } 505 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 506 { 507 adw_idle_cmd_status_t status; 508 509 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 510 ccb->ccb_h.target_id); 511 if (status == ADW_IDLE_CMD_SUCCESS) { 512 ccb->ccb_h.status = CAM_REQ_CMP; 513 if (bootverbose) { 514 xpt_print_path(ccb->ccb_h.path); 515 printf("BDR Delivered\n"); 516 } 517 } else 518 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 519 xpt_done(ccb); 520 break; 521 } 522 case XPT_ABORT: /* Abort the specified CCB */ 523 /* XXX Implement */ 524 ccb->ccb_h.status = CAM_REQ_INVALID; 525 xpt_done(ccb); 526 break; 527 case XPT_SET_TRAN_SETTINGS: 528 { 529 struct ccb_trans_settings_scsi *scsi; 530 struct ccb_trans_settings_spi *spi; 531 struct ccb_trans_settings *cts; 532 u_int target_mask; 533 int s; 534 535 cts = &ccb->cts; 536 target_mask = 0x01 << ccb->ccb_h.target_id; 537 538 s = splcam(); 539 scsi = &cts->proto_specific.scsi; 540 spi = &cts->xport_specific.spi; 541 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 542 u_int sdtrdone; 543 544 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 545 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 546 u_int discenb; 547 548 discenb = 549 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 550 551 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 552 discenb |= target_mask; 553 else 554 discenb &= ~target_mask; 555 556 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 557 discenb); 558 } 559 560 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 561 562 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 563 adw->tagenb |= target_mask; 564 else 565 adw->tagenb &= ~target_mask; 566 } 567 568 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 569 u_int wdtrenb_orig; 570 u_int wdtrenb; 571 u_int wdtrdone; 572 573 wdtrenb_orig = 574 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 575 wdtrenb = wdtrenb_orig; 576 wdtrdone = adw_lram_read_16(adw, 577 ADW_MC_WDTR_DONE); 578 switch (spi->bus_width) { 579 case MSG_EXT_WDTR_BUS_32_BIT: 580 case MSG_EXT_WDTR_BUS_16_BIT: 581 wdtrenb |= target_mask; 582 break; 583 case MSG_EXT_WDTR_BUS_8_BIT: 584 default: 585 wdtrenb &= ~target_mask; 586 break; 587 } 588 if (wdtrenb != wdtrenb_orig) { 589 adw_lram_write_16(adw, 590 ADW_MC_WDTR_ABLE, 591 wdtrenb); 592 wdtrdone &= ~target_mask; 593 adw_lram_write_16(adw, 594 ADW_MC_WDTR_DONE, 595 wdtrdone); 596 /* Wide negotiation forces async */ 597 sdtrdone &= ~target_mask; 598 adw_lram_write_16(adw, 599 ADW_MC_SDTR_DONE, 600 sdtrdone); 601 } 602 } 603 604 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 605 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 606 u_int sdtr_orig; 607 u_int sdtr; 608 u_int sdtrable_orig; 609 u_int sdtrable; 610 611 sdtr = adw_get_chip_sdtr(adw, 612 ccb->ccb_h.target_id); 613 sdtr_orig = sdtr; 614 sdtrable = adw_lram_read_16(adw, 615 ADW_MC_SDTR_ABLE); 616 sdtrable_orig = sdtrable; 617 618 if ((spi->valid 619 & CTS_SPI_VALID_SYNC_RATE) != 0) { 620 621 sdtr = 622 adw_find_sdtr(adw, 623 spi->sync_period); 624 } 625 626 if ((spi->valid 627 & CTS_SPI_VALID_SYNC_OFFSET) != 0) { 628 if (spi->sync_offset == 0) 629 sdtr = ADW_MC_SDTR_ASYNC; 630 } 631 632 if (sdtr == ADW_MC_SDTR_ASYNC) 633 sdtrable &= ~target_mask; 634 else 635 sdtrable |= target_mask; 636 if (sdtr != sdtr_orig 637 || sdtrable != sdtrable_orig) { 638 adw_set_chip_sdtr(adw, 639 ccb->ccb_h.target_id, 640 sdtr); 641 sdtrdone &= ~target_mask; 642 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 643 sdtrable); 644 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 645 sdtrdone); 646 647 } 648 } 649 } 650 splx(s); 651 ccb->ccb_h.status = CAM_REQ_CMP; 652 xpt_done(ccb); 653 break; 654 } 655 case XPT_GET_TRAN_SETTINGS: 656 /* Get default/user set transfer settings for the target */ 657 { 658 struct ccb_trans_settings_scsi *scsi; 659 struct ccb_trans_settings_spi *spi; 660 struct ccb_trans_settings *cts; 661 u_int target_mask; 662 663 cts = &ccb->cts; 664 target_mask = 0x01 << ccb->ccb_h.target_id; 665 cts->protocol = PROTO_SCSI; 666 cts->protocol_version = SCSI_REV_2; 667 cts->transport = XPORT_SPI; 668 cts->transport_version = 2; 669 670 scsi = &cts->proto_specific.scsi; 671 spi = &cts->xport_specific.spi; 672 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 673 u_int mc_sdtr; 674 675 spi->flags = 0; 676 if ((adw->user_discenb & target_mask) != 0) 677 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 678 679 if ((adw->user_tagenb & target_mask) != 0) 680 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 681 682 if ((adw->user_wdtr & target_mask) != 0) 683 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 684 else 685 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 686 687 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 688 spi->sync_period = adw_find_period(adw, mc_sdtr); 689 if (spi->sync_period != 0) 690 spi->sync_offset = 15; /* XXX ??? */ 691 else 692 spi->sync_offset = 0; 693 694 695 } else { 696 u_int targ_tinfo; 697 698 spi->flags = 0; 699 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 700 & target_mask) != 0) 701 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 702 703 if ((adw->tagenb & target_mask) != 0) 704 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 705 706 targ_tinfo = 707 adw_lram_read_16(adw, 708 ADW_MC_DEVICE_HSHK_CFG_TABLE 709 + (2 * ccb->ccb_h.target_id)); 710 711 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 712 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 713 else 714 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 715 716 spi->sync_period = 717 adw_hshk_cfg_period_factor(targ_tinfo); 718 719 spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 720 if (spi->sync_period == 0) 721 spi->sync_offset = 0; 722 723 if (spi->sync_offset == 0) 724 spi->sync_period = 0; 725 } 726 727 spi->valid = CTS_SPI_VALID_SYNC_RATE 728 | CTS_SPI_VALID_SYNC_OFFSET 729 | CTS_SPI_VALID_BUS_WIDTH 730 | CTS_SPI_VALID_DISC; 731 scsi->valid = CTS_SCSI_VALID_TQ; 732 ccb->ccb_h.status = CAM_REQ_CMP; 733 xpt_done(ccb); 734 break; 735 } 736 case XPT_CALC_GEOMETRY: 737 { 738 /* 739 * XXX Use Adaptec translation until I find out how to 740 * get this information from the card. 741 */ 742 cam_calc_geometry(&ccb->ccg, /*extended*/1); 743 xpt_done(ccb); 744 break; 745 } 746 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 747 { 748 int failure; 749 750 failure = adw_reset_bus(adw); 751 if (failure != 0) { 752 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 753 } else { 754 if (bootverbose) { 755 xpt_print_path(adw->path); 756 printf("Bus Reset Delivered\n"); 757 } 758 ccb->ccb_h.status = CAM_REQ_CMP; 759 } 760 xpt_done(ccb); 761 break; 762 } 763 case XPT_TERM_IO: /* Terminate the I/O process */ 764 /* XXX Implement */ 765 ccb->ccb_h.status = CAM_REQ_INVALID; 766 xpt_done(ccb); 767 break; 768 case XPT_PATH_INQ: /* Path routing inquiry */ 769 { 770 struct ccb_pathinq *cpi = &ccb->cpi; 771 772 cpi->version_num = 1; 773 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 774 cpi->target_sprt = 0; 775 cpi->hba_misc = 0; 776 cpi->hba_eng_cnt = 0; 777 cpi->max_target = ADW_MAX_TID; 778 cpi->max_lun = ADW_MAX_LUN; 779 cpi->initiator_id = adw->initiator_id; 780 cpi->bus_id = cam_sim_bus(sim); 781 cpi->base_transfer_speed = 3300; 782 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 783 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 784 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 785 cpi->unit_number = cam_sim_unit(sim); 786 cpi->transport = XPORT_SPI; 787 cpi->transport_version = 2; 788 cpi->protocol = PROTO_SCSI; 789 cpi->protocol_version = SCSI_REV_2; 790 cpi->ccb_h.status = CAM_REQ_CMP; 791 xpt_done(ccb); 792 break; 793 } 794 default: 795 ccb->ccb_h.status = CAM_REQ_INVALID; 796 xpt_done(ccb); 797 break; 798 } 799} 800 801static void 802adw_poll(struct cam_sim *sim) 803{ 804 adw_intr(cam_sim_softc(sim)); 805} 806 807static void 808adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 809{ 810} 811 812struct adw_softc * 813adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 814{ 815 struct adw_softc *adw; 816 int i; 817 818 /* 819 * Allocate a storage area for us 820 */ 821 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT | M_ZERO); 822 if (adw == NULL) { 823 printf("adw%d: cannot malloc!\n", device_get_unit(dev)); 824 return NULL; 825 } 826 LIST_INIT(&adw->pending_ccbs); 827 SLIST_INIT(&adw->sg_maps); 828 adw->device = dev; 829 adw->unit = device_get_unit(dev); 830 adw->regs_res_type = regs_type; 831 adw->regs_res_id = regs_id; 832 adw->regs = regs; 833 adw->tag = rman_get_bustag(regs); 834 adw->bsh = rman_get_bushandle(regs); 835 i = adw->unit / 10; 836 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT); 837 if (adw->name == NULL) { 838 printf("adw%d: cannot malloc name!\n", adw->unit); 839 free(adw, M_DEVBUF); 840 return NULL; 841 } 842 sprintf(adw->name, "adw%d", adw->unit); 843 return(adw); 844} 845 846void 847adw_free(struct adw_softc *adw) 848{ 849 switch (adw->init_level) { 850 case 9: 851 { 852 struct sg_map_node *sg_map; 853 854 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 855 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 856 bus_dmamap_unload(adw->sg_dmat, 857 sg_map->sg_dmamap); 858 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 859 sg_map->sg_dmamap); 860 free(sg_map, M_DEVBUF); 861 } 862 bus_dma_tag_destroy(adw->sg_dmat); 863 } 864 case 8: 865 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 866 case 7: 867 bus_dmamem_free(adw->acb_dmat, adw->acbs, 868 adw->acb_dmamap); 869 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 870 case 6: 871 bus_dma_tag_destroy(adw->acb_dmat); 872 case 5: 873 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 874 case 4: 875 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 876 adw->carrier_dmamap); 877 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 878 case 3: 879 bus_dma_tag_destroy(adw->carrier_dmat); 880 case 2: 881 bus_dma_tag_destroy(adw->buffer_dmat); 882 case 1: 883 bus_dma_tag_destroy(adw->parent_dmat); 884 case 0: 885 break; 886 } 887 888 if (adw->regs != NULL) 889 bus_release_resource(adw->device, 890 adw->regs_res_type, 891 adw->regs_res_id, 892 adw->regs); 893 894 if (adw->irq != NULL) 895 bus_release_resource(adw->device, 896 adw->irq_res_type, 897 0, adw->irq); 898 899 if (adw->sim != NULL) { 900 if (adw->path != NULL) { 901 xpt_async(AC_LOST_DEVICE, adw->path, NULL); 902 xpt_free_path(adw->path); 903 } 904 xpt_bus_deregister(cam_sim_path(adw->sim)); 905 cam_sim_free(adw->sim, /*free_devq*/TRUE); 906 } 907 free(adw->name, M_DEVBUF); 908 free(adw, M_DEVBUF); 909} 910 911int 912adw_init(struct adw_softc *adw) 913{ 914 struct adw_eeprom eep_config; 915 u_int tid; 916 u_int i; 917 u_int16_t checksum; 918 u_int16_t scsicfg1; 919 920 checksum = adw_eeprom_read(adw, &eep_config); 921 bcopy(eep_config.serial_number, adw->serial_number, 922 sizeof(adw->serial_number)); 923 if (checksum != eep_config.checksum) { 924 u_int16_t serial_number[3]; 925 926 adw->flags |= ADW_EEPROM_FAILED; 927 printf("%s: EEPROM checksum failed. Restoring Defaults\n", 928 adw_name(adw)); 929 930 /* 931 * Restore the default EEPROM settings. 932 * Assume the 6 byte board serial number that was read 933 * from EEPROM is correct even if the EEPROM checksum 934 * failed. 935 */ 936 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 937 bcopy(adw->serial_number, eep_config.serial_number, 938 sizeof(serial_number)); 939 adw_eeprom_write(adw, &eep_config); 940 } 941 942 /* Pull eeprom information into our softc. */ 943 adw->bios_ctrl = eep_config.bios_ctrl; 944 adw->user_wdtr = eep_config.wdtr_able; 945 for (tid = 0; tid < ADW_MAX_TID; tid++) { 946 u_int mc_sdtr; 947 u_int16_t tid_mask; 948 949 tid_mask = 0x1 << tid; 950 if ((adw->features & ADW_ULTRA) != 0) { 951 /* 952 * Ultra chips store sdtr and ultraenb 953 * bits in their seeprom, so we must 954 * construct valid mc_sdtr entries for 955 * indirectly. 956 */ 957 if (eep_config.sync1.sync_enable & tid_mask) { 958 if (eep_config.sync2.ultra_enable & tid_mask) 959 mc_sdtr = ADW_MC_SDTR_20; 960 else 961 mc_sdtr = ADW_MC_SDTR_10; 962 } else 963 mc_sdtr = ADW_MC_SDTR_ASYNC; 964 } else { 965 switch (ADW_TARGET_GROUP(tid)) { 966 case 3: 967 mc_sdtr = eep_config.sync4.sdtr4; 968 break; 969 case 2: 970 mc_sdtr = eep_config.sync3.sdtr3; 971 break; 972 case 1: 973 mc_sdtr = eep_config.sync2.sdtr2; 974 break; 975 default: /* Shut up compiler */ 976 case 0: 977 mc_sdtr = eep_config.sync1.sdtr1; 978 break; 979 } 980 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 981 mc_sdtr &= 0xFF; 982 } 983 adw_set_user_sdtr(adw, tid, mc_sdtr); 984 } 985 adw->user_tagenb = eep_config.tagqng_able; 986 adw->user_discenb = eep_config.disc_enable; 987 adw->max_acbs = eep_config.max_host_qng; 988 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 989 990 /* 991 * Sanity check the number of host openings. 992 */ 993 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 994 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 995 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 996 /* If the value is zero, assume it is uninitialized. */ 997 if (adw->max_acbs == 0) 998 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 999 else 1000 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 1001 } 1002 1003 scsicfg1 = 0; 1004 if ((adw->features & ADW_ULTRA2) != 0) { 1005 switch (eep_config.termination_lvd) { 1006 default: 1007 printf("%s: Invalid EEPROM LVD Termination Settings.\n", 1008 adw_name(adw)); 1009 printf("%s: Reverting to Automatic LVD Termination\n", 1010 adw_name(adw)); 1011 /* FALLTHROUGH */ 1012 case ADW_EEPROM_TERM_AUTO: 1013 break; 1014 case ADW_EEPROM_TERM_BOTH_ON: 1015 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 1016 /* FALLTHROUGH */ 1017 case ADW_EEPROM_TERM_HIGH_ON: 1018 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 1019 /* FALLTHROUGH */ 1020 case ADW_EEPROM_TERM_OFF: 1021 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 1022 break; 1023 } 1024 } 1025 1026 switch (eep_config.termination_se) { 1027 default: 1028 printf("%s: Invalid SE EEPROM Termination Settings.\n", 1029 adw_name(adw)); 1030 printf("%s: Reverting to Automatic SE Termination\n", 1031 adw_name(adw)); 1032 /* FALLTHROUGH */ 1033 case ADW_EEPROM_TERM_AUTO: 1034 break; 1035 case ADW_EEPROM_TERM_BOTH_ON: 1036 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1037 /* FALLTHROUGH */ 1038 case ADW_EEPROM_TERM_HIGH_ON: 1039 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1040 /* FALLTHROUGH */ 1041 case ADW_EEPROM_TERM_OFF: 1042 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1043 break; 1044 } 1045 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1046 1047 /* DMA tag for mapping buffers into device visible space. */ 1048 if (bus_dma_tag_create( 1049 /* parent */ adw->parent_dmat, 1050 /* alignment */ 1, 1051 /* boundary */ 0, 1052 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, 1053 /* highaddr */ BUS_SPACE_MAXADDR, 1054 /* filter */ NULL, 1055 /* filterarg */ NULL, 1056 /* maxsize */ MAXBSIZE, 1057 /* nsegments */ ADW_SGSIZE, 1058 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1059 /* flags */ BUS_DMA_ALLOCNOW, 1060 /* lockfunc */ busdma_lock_mutex, 1061 /* lockarg */ &Giant, 1062 &adw->buffer_dmat) != 0) { 1063 return (ENOMEM); 1064 } 1065 1066 adw->init_level++; 1067 1068 /* DMA tag for our ccb carrier structures */ 1069 if (bus_dma_tag_create( 1070 /* parent */ adw->parent_dmat, 1071 /* alignment */ 0x10, 1072 /* boundary */ 0, 1073 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, 1074 /* highaddr */ BUS_SPACE_MAXADDR, 1075 /* filter */ NULL, 1076 /* filterarg */ NULL, 1077 /* maxsize */ (adw->max_acbs + 1078 ADW_NUM_CARRIER_QUEUES + 1) * 1079 sizeof(struct adw_carrier), 1080 /* nsegments */ 1, 1081 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1082 /* flags */ 0, 1083 /* lockfunc */ busdma_lock_mutex, 1084 /* lockarg */ &Giant, 1085 &adw->carrier_dmat) != 0) { 1086 return (ENOMEM); 1087 } 1088 1089 adw->init_level++; 1090 1091 /* Allocation for our ccb carrier structures */ 1092 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1093 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1094 return (ENOMEM); 1095 } 1096 1097 adw->init_level++; 1098 1099 /* And permanently map them */ 1100 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1101 adw->carriers, 1102 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1103 * sizeof(struct adw_carrier), 1104 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1105 1106 /* Clear them out. */ 1107 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1108 * sizeof(struct adw_carrier)); 1109 1110 /* Setup our free carrier list */ 1111 adw->free_carriers = adw->carriers; 1112 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1113 adw->carriers[i].carr_offset = 1114 carriervtobo(adw, &adw->carriers[i]); 1115 adw->carriers[i].carr_ba = 1116 carriervtob(adw, &adw->carriers[i]); 1117 adw->carriers[i].areq_ba = 0; 1118 adw->carriers[i].next_ba = 1119 carriervtobo(adw, &adw->carriers[i+1]); 1120 } 1121 /* Terminal carrier. Never leaves the freelist */ 1122 adw->carriers[i].carr_offset = 1123 carriervtobo(adw, &adw->carriers[i]); 1124 adw->carriers[i].carr_ba = 1125 carriervtob(adw, &adw->carriers[i]); 1126 adw->carriers[i].areq_ba = 0; 1127 adw->carriers[i].next_ba = ~0; 1128 1129 adw->init_level++; 1130 1131 /* DMA tag for our acb structures */ 1132 if (bus_dma_tag_create( 1133 /* parent */ adw->parent_dmat, 1134 /* alignment */ 1, 1135 /* boundary */ 0, 1136 /* lowaddr */ BUS_SPACE_MAXADDR, 1137 /* highaddr */ BUS_SPACE_MAXADDR, 1138 /* filter */ NULL, 1139 /* filterarg */ NULL, 1140 /* maxsize */ adw->max_acbs * sizeof(struct acb), 1141 /* nsegments */ 1, 1142 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1143 /* flags */ 0, 1144 /* lockfunc */ busdma_lock_mutex, 1145 /* lockarg */ &Giant, 1146 &adw->acb_dmat) != 0) { 1147 return (ENOMEM); 1148 } 1149 1150 adw->init_level++; 1151 1152 /* Allocation for our ccbs */ 1153 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1154 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1155 return (ENOMEM); 1156 1157 adw->init_level++; 1158 1159 /* And permanently map them */ 1160 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1161 adw->acbs, 1162 adw->max_acbs * sizeof(struct acb), 1163 adwmapmem, &adw->acb_busbase, /*flags*/0); 1164 1165 /* Clear them out. */ 1166 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1167 1168 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1169 if (bus_dma_tag_create( 1170 /* parent */ adw->parent_dmat, 1171 /* alignment */ 1, 1172 /* boundary */ 0, 1173 /* lowaddr */ BUS_SPACE_MAXADDR, 1174 /* highaddr */ BUS_SPACE_MAXADDR, 1175 /* filter */ NULL, 1176 /* filterarg */ NULL, 1177 /* maxsize */ PAGE_SIZE, 1178 /* nsegments */ 1, 1179 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1180 /* flags */ 0, 1181 /* lockfunc */ busdma_lock_mutex, 1182 /* lockarg */ &Giant, 1183 &adw->sg_dmat) != 0) { 1184 return (ENOMEM); 1185 } 1186 1187 adw->init_level++; 1188 1189 /* Allocate our first batch of ccbs */ 1190 if (adwallocacbs(adw) == 0) 1191 return (ENOMEM); 1192 1193 if (adw_init_chip(adw, scsicfg1) != 0) 1194 return (ENXIO); 1195 1196 printf("Queue Depth %d\n", adw->max_acbs); 1197 1198 return (0); 1199} 1200 1201/* 1202 * Attach all the sub-devices we can find 1203 */ 1204int 1205adw_attach(struct adw_softc *adw) 1206{ 1207 struct ccb_setasync csa; 1208 struct cam_devq *devq; 1209 int s; 1210 int error; 1211 1212 error = 0; 1213 s = splcam(); 1214 /* Hook up our interrupt handler */ 1215 if ((error = bus_setup_intr(adw->device, adw->irq, 1216 INTR_TYPE_CAM | INTR_ENTROPY, NULL, 1217 adw_intr, adw, &adw->ih)) != 0) { 1218 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1219 error); 1220 goto fail; 1221 } 1222 1223 /* Start the Risc processor now that we are fully configured. */ 1224 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1225 1226 /* 1227 * Create the device queue for our SIM. 1228 */ 1229 devq = cam_simq_alloc(adw->max_acbs); 1230 if (devq == NULL) 1231 return (ENOMEM); 1232 1233 /* 1234 * Construct our SIM entry. 1235 */ 1236 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit,
|
1238 if (adw->sim == NULL) { 1239 error = ENOMEM; 1240 goto fail; 1241 } 1242 1243 /* 1244 * Register the bus. 1245 */ 1246 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1247 cam_sim_free(adw->sim, /*free devq*/TRUE); 1248 error = ENOMEM; 1249 goto fail; 1250 } 1251 1252 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1253 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1254 == CAM_REQ_CMP) { 1255 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1256 csa.ccb_h.func_code = XPT_SASYNC_CB; 1257 csa.event_enable = AC_LOST_DEVICE; 1258 csa.callback = adw_async; 1259 csa.callback_arg = adw; 1260 xpt_action((union ccb *)&csa); 1261 } 1262 1263fail: 1264 splx(s); 1265 return (error); 1266} 1267 1268void 1269adw_intr(void *arg) 1270{ 1271 struct adw_softc *adw; 1272 u_int int_stat; 1273 1274 adw = (struct adw_softc *)arg; 1275 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1276 return; 1277 1278 /* Reading the register clears the interrupt. */ 1279 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1280 1281 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1282 u_int intrb_code; 1283 1284 /* Async Microcode Event */ 1285 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1286 switch (intrb_code) { 1287 case ADW_ASYNC_CARRIER_READY_FAILURE: 1288 /* 1289 * The RISC missed our update of 1290 * the commandq. 1291 */ 1292 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1293 adw_tickle_risc(adw, ADW_TICKLE_A); 1294 break; 1295 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1296 /* 1297 * The firmware detected a SCSI Bus reset. 1298 */ 1299 printf("Someone Reset the Bus\n"); 1300 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1301 break; 1302 case ADW_ASYNC_RDMA_FAILURE: 1303 /* 1304 * Handle RDMA failure by resetting the 1305 * SCSI Bus and chip. 1306 */ 1307#if 0 /* XXX */ 1308 AdvResetChipAndSB(adv_dvc_varp); 1309#endif 1310 break; 1311 1312 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1313 /* 1314 * Host generated SCSI bus reset occurred. 1315 */ 1316 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1317 break; 1318 default: 1319 printf("adw_intr: unknown async code 0x%x\n", 1320 intrb_code); 1321 break; 1322 } 1323 } 1324 1325 /* 1326 * Run down the RequestQ. 1327 */ 1328 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1329 struct adw_carrier *free_carrier; 1330 struct acb *acb; 1331 union ccb *ccb; 1332 1333#if 0 1334 printf("0x%x, 0x%x, 0x%x, 0x%x\n", 1335 adw->responseq->carr_offset, 1336 adw->responseq->carr_ba, 1337 adw->responseq->areq_ba, 1338 adw->responseq->next_ba); 1339#endif 1340 /* 1341 * The firmware copies the adw_scsi_req_q.acb_baddr 1342 * field into the areq_ba field of the carrier. 1343 */ 1344 acb = acbbotov(adw, adw->responseq->areq_ba); 1345 1346 /* 1347 * The least significant four bits of the next_ba 1348 * field are used as flags. Mask them out and then 1349 * advance through the list. 1350 */ 1351 free_carrier = adw->responseq; 1352 adw->responseq = 1353 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1354 free_carrier->next_ba = adw->free_carriers->carr_offset; 1355 adw->free_carriers = free_carrier; 1356 1357 /* Process CCB */ 1358 ccb = acb->ccb; 1359 untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch); 1360 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1361 bus_dmasync_op_t op; 1362 1363 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1364 op = BUS_DMASYNC_POSTREAD; 1365 else 1366 op = BUS_DMASYNC_POSTWRITE; 1367 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1368 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1369 ccb->csio.resid = acb->queue.data_cnt; 1370 } else 1371 ccb->csio.resid = 0; 1372 1373 /* Common Cases inline... */ 1374 if (acb->queue.host_status == QHSTA_NO_ERROR 1375 && (acb->queue.done_status == QD_NO_ERROR 1376 || acb->queue.done_status == QD_WITH_ERROR)) { 1377 ccb->csio.scsi_status = acb->queue.scsi_status; 1378 ccb->ccb_h.status = 0; 1379 switch (ccb->csio.scsi_status) { 1380 case SCSI_STATUS_OK: 1381 ccb->ccb_h.status |= CAM_REQ_CMP; 1382 break; 1383 case SCSI_STATUS_CHECK_COND: 1384 case SCSI_STATUS_CMD_TERMINATED: 1385 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1386 ccb->csio.sense_len); 1387 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1388 ccb->csio.sense_resid = acb->queue.sense_len; 1389 /* FALLTHROUGH */ 1390 default: 1391 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1392 | CAM_DEV_QFRZN; 1393 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1394 break; 1395 } 1396 adwfreeacb(adw, acb); 1397 xpt_done(ccb); 1398 } else { 1399 adwprocesserror(adw, acb); 1400 } 1401 } 1402} 1403 1404static void 1405adwprocesserror(struct adw_softc *adw, struct acb *acb) 1406{ 1407 union ccb *ccb; 1408 1409 ccb = acb->ccb; 1410 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1411 ccb->ccb_h.status = CAM_REQ_ABORTED; 1412 } else { 1413 1414 switch (acb->queue.host_status) { 1415 case QHSTA_M_SEL_TIMEOUT: 1416 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1417 break; 1418 case QHSTA_M_SXFR_OFF_UFLW: 1419 case QHSTA_M_SXFR_OFF_OFLW: 1420 case QHSTA_M_DATA_OVER_RUN: 1421 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1422 break; 1423 case QHSTA_M_SXFR_DESELECTED: 1424 case QHSTA_M_UNEXPECTED_BUS_FREE: 1425 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1426 break; 1427 case QHSTA_M_SCSI_BUS_RESET: 1428 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1429 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1430 break; 1431 case QHSTA_M_BUS_DEVICE_RESET: 1432 ccb->ccb_h.status = CAM_BDR_SENT; 1433 break; 1434 case QHSTA_M_QUEUE_ABORTED: 1435 /* BDR or Bus Reset */ 1436 printf("Saw Queue Aborted\n"); 1437 ccb->ccb_h.status = adw->last_reset; 1438 break; 1439 case QHSTA_M_SXFR_SDMA_ERR: 1440 case QHSTA_M_SXFR_SXFR_PERR: 1441 case QHSTA_M_RDMA_PERR: 1442 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1443 break; 1444 case QHSTA_M_WTM_TIMEOUT: 1445 case QHSTA_M_SXFR_WD_TMO: 1446 { 1447 /* The SCSI bus hung in a phase */ 1448 xpt_print_path(adw->path); 1449 printf("Watch Dog timer expired. Reseting bus\n"); 1450 adw_reset_bus(adw); 1451 break; 1452 } 1453 case QHSTA_M_SXFR_XFR_PH_ERR: 1454 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1455 break; 1456 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1457 break; 1458 case QHSTA_M_BAD_CMPL_STATUS_IN: 1459 /* No command complete after a status message */ 1460 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1461 break; 1462 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1463 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1464 break; 1465 case QHSTA_M_INVALID_DEVICE: 1466 ccb->ccb_h.status = CAM_PATH_INVALID; 1467 break; 1468 case QHSTA_M_NO_AUTO_REQ_SENSE: 1469 /* 1470 * User didn't request sense, but we got a 1471 * check condition. 1472 */ 1473 ccb->csio.scsi_status = acb->queue.scsi_status; 1474 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1475 break; 1476 default: 1477 panic("%s: Unhandled Host status error %x", 1478 adw_name(adw), acb->queue.host_status); 1479 /* NOTREACHED */ 1480 } 1481 } 1482 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1483 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1484 || ccb->ccb_h.status == CAM_BDR_SENT) 1485 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1486 } 1487 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1488 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1489 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1490 } 1491 adwfreeacb(adw, acb); 1492 xpt_done(ccb); 1493} 1494 1495static void 1496adwtimeout(void *arg) 1497{ 1498 struct acb *acb; 1499 union ccb *ccb; 1500 struct adw_softc *adw; 1501 adw_idle_cmd_status_t status; 1502 int target_id; 1503 int s; 1504 1505 acb = (struct acb *)arg; 1506 ccb = acb->ccb; 1507 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1508 xpt_print_path(ccb->ccb_h.path); 1509 printf("ACB %p - timed out\n", (void *)acb); 1510 1511 s = splcam(); 1512 1513 if ((acb->state & ACB_ACTIVE) == 0) { 1514 xpt_print_path(ccb->ccb_h.path); 1515 printf("ACB %p - timed out CCB already completed\n", 1516 (void *)acb); 1517 splx(s); 1518 return; 1519 } 1520 1521 acb->state |= ACB_RECOVERY_ACB; 1522 target_id = ccb->ccb_h.target_id; 1523 1524 /* Attempt a BDR first */ 1525 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1526 ccb->ccb_h.target_id); 1527 splx(s); 1528 if (status == ADW_IDLE_CMD_SUCCESS) { 1529 printf("%s: BDR Delivered. No longer in timeout\n", 1530 adw_name(adw)); 1531 adw_handle_device_reset(adw, target_id); 1532 } else { 1533 adw_reset_bus(adw); 1534 xpt_print_path(adw->path); 1535 printf("Bus Reset Delivered. No longer in timeout\n"); 1536 } 1537} 1538 1539static void 1540adw_handle_device_reset(struct adw_softc *adw, u_int target) 1541{ 1542 struct cam_path *path; 1543 cam_status error; 1544 1545 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1546 target, CAM_LUN_WILDCARD); 1547 1548 if (error == CAM_REQ_CMP) { 1549 xpt_async(AC_SENT_BDR, path, NULL); 1550 xpt_free_path(path); 1551 } 1552 adw->last_reset = CAM_BDR_SENT; 1553} 1554 1555static void 1556adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1557{ 1558 if (initiated) { 1559 /* 1560 * The microcode currently sets the SCSI Bus Reset signal 1561 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1562 * command above. But the SCSI Bus Reset Hold Time in the 1563 * microcode is not deterministic (it may in fact be for less 1564 * than the SCSI Spec. minimum of 25 us). Therefore on return 1565 * the Adv Library sets the SCSI Bus Reset signal for 1566 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1567 * than 25 us. 1568 */ 1569 u_int scsi_ctrl; 1570 1571 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1572 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1573 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1574 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1575 1576 /* 1577 * We will perform the async notification when the 1578 * SCSI Reset interrupt occurs. 1579 */ 1580 } else 1581 xpt_async(AC_BUS_RESET, adw->path, NULL); 1582 adw->last_reset = CAM_SCSI_BUS_RESET; 1583} 1584MODULE_DEPEND(adw, cam, 1, 1, 1); 1585
|