50 51#include <sys/param.h> 52#include <sys/systm.h> 53#include <sys/malloc.h> 54#include <sys/kernel.h> 55#include <sys/lock.h> 56#include <sys/module.h> 57#include <sys/mutex.h> 58 59#include <machine/bus.h> 60#include <machine/resource.h> 61#include <sys/bus.h> 62#include <sys/rman.h> 63 64#include <cam/cam.h> 65#include <cam/cam_ccb.h> 66#include <cam/cam_sim.h> 67#include <cam/cam_xpt_sim.h> 68#include <cam/cam_debug.h> 69 70#include <cam/scsi/scsi_all.h> 71#include <cam/scsi/scsi_message.h> 72 73#include <vm/vm.h> 74#include <vm/vm_param.h> 75#include <vm/pmap.h> 76 77#include <dev/advansys/advansys.h> 78 79static void adv_action(struct cam_sim *sim, union ccb *ccb); 80static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 81 int nsegments, int error); 82static void adv_poll(struct cam_sim *sim); 83static void adv_run_doneq(struct adv_softc *adv); 84static struct adv_ccb_info * 85 adv_alloc_ccb_info(struct adv_softc *adv); 86static void adv_destroy_ccb_info(struct adv_softc *adv, 87 struct adv_ccb_info *cinfo); 88static __inline struct adv_ccb_info * 89 adv_get_ccb_info(struct adv_softc *adv); 90static __inline void adv_free_ccb_info(struct adv_softc *adv, 91 struct adv_ccb_info *cinfo); 92static __inline void adv_set_state(struct adv_softc *adv, adv_state state); 93static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb); 94static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb); 95 96static __inline struct adv_ccb_info * 97adv_get_ccb_info(struct adv_softc *adv) 98{ 99 struct adv_ccb_info *cinfo; 100 int opri; 101 102 opri = splcam(); 103 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 104 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 105 } else { 106 cinfo = adv_alloc_ccb_info(adv); 107 } 108 splx(opri); 109 110 return (cinfo); 111} 112 113static __inline void 114adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 115{ 116 int opri; 117 118 opri = splcam(); 119 cinfo->state = ACCB_FREE; 120 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links); 121 splx(opri); 122} 123 124static __inline void 125adv_set_state(struct adv_softc *adv, adv_state state) 126{ 127 if (adv->state == 0) 128 xpt_freeze_simq(adv->sim, /*count*/1); 129 adv->state |= state; 130} 131 132static __inline void 133adv_clear_state(struct adv_softc *adv, union ccb* ccb) 134{ 135 if (adv->state != 0) 136 adv_clear_state_really(adv, ccb); 137} 138 139static void 140adv_clear_state_really(struct adv_softc *adv, union ccb* ccb) 141{ 142 if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0) 143 adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK); 144 if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) { 145 int openings; 146 147 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q; 148 if (openings >= adv->openings_needed) { 149 adv->state &= ~ADV_RESOURCE_SHORTAGE; 150 adv->openings_needed = 0; 151 } 152 } 153 154 if ((adv->state & ADV_IN_TIMEOUT) != 0) { 155 struct adv_ccb_info *cinfo; 156 157 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 158 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) { 159 struct ccb_hdr *ccb_h; 160 161 /* 162 * We now traverse our list of pending CCBs 163 * and reinstate their timeouts. 164 */ 165 ccb_h = LIST_FIRST(&adv->pending_ccbs); 166 while (ccb_h != NULL) { 167 ccb_h->timeout_ch = 168 timeout(adv_timeout, (caddr_t)ccb_h, 169 (ccb_h->timeout * hz) / 1000); 170 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 171 } 172 adv->state &= ~ADV_IN_TIMEOUT; 173 printf("%s: No longer in timeout\n", adv_name(adv)); 174 } 175 } 176 if (adv->state == 0) 177 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 178} 179 180void 181adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 182{ 183 bus_addr_t* physaddr; 184 185 physaddr = (bus_addr_t*)arg; 186 *physaddr = segs->ds_addr; 187} 188 189char * 190adv_name(struct adv_softc *adv) 191{ 192 static char name[10]; 193 194 snprintf(name, sizeof(name), "adv%d", adv->unit); 195 return (name); 196} 197 198static void 199adv_action(struct cam_sim *sim, union ccb *ccb) 200{ 201 struct adv_softc *adv; 202 203 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); 204 205 adv = (struct adv_softc *)cam_sim_softc(sim); 206 207 switch (ccb->ccb_h.func_code) { 208 /* Common cases first */ 209 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 210 { 211 struct ccb_hdr *ccb_h; 212 struct ccb_scsiio *csio; 213 struct adv_ccb_info *cinfo; 214 215 ccb_h = &ccb->ccb_h; 216 csio = &ccb->csio; 217 cinfo = adv_get_ccb_info(adv); 218 if (cinfo == NULL) 219 panic("XXX Handle CCB info error!!!"); 220 221 ccb_h->ccb_cinfo_ptr = cinfo; 222 cinfo->ccb = ccb; 223 224 /* Only use S/G if there is a transfer */ 225 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 226 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 227 /* 228 * We've been given a pointer 229 * to a single buffer 230 */ 231 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 232 int s; 233 int error; 234 235 s = splsoftvm(); 236 error = 237 bus_dmamap_load(adv->buffer_dmat, 238 cinfo->dmamap, 239 csio->data_ptr, 240 csio->dxfer_len, 241 adv_execute_ccb, 242 csio, /*flags*/0); 243 if (error == EINPROGRESS) { 244 /* 245 * So as to maintain ordering, 246 * freeze the controller queue 247 * until our mapping is 248 * returned. 249 */ 250 adv_set_state(adv, 251 ADV_BUSDMA_BLOCK); 252 } 253 splx(s); 254 } else { 255 struct bus_dma_segment seg; 256 257 /* Pointer to physical buffer */ 258 seg.ds_addr = 259 (bus_addr_t)csio->data_ptr; 260 seg.ds_len = csio->dxfer_len; 261 adv_execute_ccb(csio, &seg, 1, 0); 262 } 263 } else { 264 struct bus_dma_segment *segs; 265 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 266 panic("adv_setup_data - Physical " 267 "segment pointers unsupported"); 268 269 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 270 panic("adv_setup_data - Virtual " 271 "segment addresses unsupported"); 272 273 /* Just use the segments provided */ 274 segs = (struct bus_dma_segment *)csio->data_ptr; 275 adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0); 276 } 277 } else { 278 adv_execute_ccb(ccb, NULL, 0, 0); 279 } 280 break; 281 } 282 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 283 case XPT_TARGET_IO: /* Execute target I/O request */ 284 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 285 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 286 case XPT_EN_LUN: /* Enable LUN as a target */ 287 case XPT_ABORT: /* Abort the specified CCB */ 288 /* XXX Implement */ 289 ccb->ccb_h.status = CAM_REQ_INVALID; 290 xpt_done(ccb); 291 break; 292#define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) 293#define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) 294 case XPT_SET_TRAN_SETTINGS: 295 { 296 struct ccb_trans_settings_scsi *scsi; 297 struct ccb_trans_settings_spi *spi; 298 struct ccb_trans_settings *cts; 299 target_bit_vector targ_mask; 300 struct adv_transinfo *tconf; 301 u_int update_type; 302 int s; 303 304 cts = &ccb->cts; 305 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 306 update_type = 0; 307 308 /* 309 * The user must specify which type of settings he wishes 310 * to change. 311 */ 312 if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) { 313 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 314 update_type |= ADV_TRANS_GOAL; 315 } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) { 316 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 317 update_type |= ADV_TRANS_USER; 318 } else { 319 ccb->ccb_h.status = CAM_REQ_INVALID; 320 break; 321 } 322 323 s = splcam(); 324 scsi = &cts->proto_specific.scsi; 325 spi = &cts->xport_specific.spi; 326 if ((update_type & ADV_TRANS_GOAL) != 0) { 327 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 328 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 329 adv->disc_enable |= targ_mask; 330 else 331 adv->disc_enable &= ~targ_mask; 332 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, 333 adv->disc_enable); 334 } 335 336 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 337 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 338 adv->cmd_qng_enabled |= targ_mask; 339 else 340 adv->cmd_qng_enabled &= ~targ_mask; 341 } 342 } 343 344 if ((update_type & ADV_TRANS_USER) != 0) { 345 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 346 if ((spi->flags & CTS_SPI_VALID_DISC) != 0) 347 adv->user_disc_enable |= targ_mask; 348 else 349 adv->user_disc_enable &= ~targ_mask; 350 } 351 352 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 353 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 354 adv->user_cmd_qng_enabled |= targ_mask; 355 else 356 adv->user_cmd_qng_enabled &= ~targ_mask; 357 } 358 } 359 360 /* 361 * If the user specifies either the sync rate, or offset, 362 * but not both, the unspecified parameter defaults to its 363 * current value in transfer negotiations. 364 */ 365 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 366 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 367 /* 368 * If the user provided a sync rate but no offset, 369 * use the current offset. 370 */ 371 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) 372 spi->sync_offset = tconf->offset; 373 374 /* 375 * If the user provided an offset but no sync rate, 376 * use the current sync rate. 377 */ 378 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) 379 spi->sync_period = tconf->period; 380 381 adv_period_offset_to_sdtr(adv, &spi->sync_period, 382 &spi->sync_offset, 383 cts->ccb_h.target_id); 384 385 adv_set_syncrate(adv, /*struct cam_path */NULL, 386 cts->ccb_h.target_id, spi->sync_period, 387 spi->sync_offset, update_type); 388 } 389 390 splx(s); 391 ccb->ccb_h.status = CAM_REQ_CMP; 392 xpt_done(ccb); 393 break; 394 } 395 case XPT_GET_TRAN_SETTINGS: 396 /* Get default/user set transfer settings for the target */ 397 { 398 struct ccb_trans_settings_scsi *scsi; 399 struct ccb_trans_settings_spi *spi; 400 struct ccb_trans_settings *cts; 401 struct adv_transinfo *tconf; 402 target_bit_vector target_mask; 403 int s; 404 405 cts = &ccb->cts; 406 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 407 408 scsi = &cts->proto_specific.scsi; 409 spi = &cts->xport_specific.spi; 410 411 cts->protocol = PROTO_SCSI; 412 cts->protocol_version = SCSI_REV_2; 413 cts->transport = XPORT_SPI; 414 cts->transport_version = 2; 415 416 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 417 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 418 419 s = splcam(); 420 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 421 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 422 if ((adv->disc_enable & target_mask) != 0) 423 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 424 if ((adv->cmd_qng_enabled & target_mask) != 0) 425 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 426 } else { 427 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 428 if ((adv->user_disc_enable & target_mask) != 0) 429 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 430 if ((adv->user_cmd_qng_enabled & target_mask) != 0) 431 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 432 } 433 spi->sync_period = tconf->period; 434 spi->sync_offset = tconf->offset; 435 splx(s); 436 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 437 spi->valid = CTS_SPI_VALID_SYNC_RATE 438 | CTS_SPI_VALID_SYNC_OFFSET 439 | CTS_SPI_VALID_BUS_WIDTH 440 | CTS_SPI_VALID_DISC; 441 scsi->valid = CTS_SCSI_VALID_TQ; 442 ccb->ccb_h.status = CAM_REQ_CMP; 443 xpt_done(ccb); 444 break; 445 } 446 case XPT_CALC_GEOMETRY: 447 { 448 int extended; 449 450 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; 451 cam_calc_geometry(&ccb->ccg, extended); 452 xpt_done(ccb); 453 break; 454 } 455 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 456 { 457 int s; 458 459 s = splcam(); 460 adv_stop_execution(adv); 461 adv_reset_bus(adv, /*initiate_reset*/TRUE); 462 adv_start_execution(adv); 463 splx(s); 464 465 ccb->ccb_h.status = CAM_REQ_CMP; 466 xpt_done(ccb); 467 break; 468 } 469 case XPT_TERM_IO: /* Terminate the I/O process */ 470 /* XXX Implement */ 471 ccb->ccb_h.status = CAM_REQ_INVALID; 472 xpt_done(ccb); 473 break; 474 case XPT_PATH_INQ: /* Path routing inquiry */ 475 { 476 struct ccb_pathinq *cpi = &ccb->cpi; 477 478 cpi->version_num = 1; /* XXX??? */ 479 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 480 cpi->target_sprt = 0; 481 cpi->hba_misc = 0; 482 cpi->hba_eng_cnt = 0; 483 cpi->max_target = 7; 484 cpi->max_lun = 7; 485 cpi->initiator_id = adv->scsi_id; 486 cpi->bus_id = cam_sim_bus(sim); 487 cpi->base_transfer_speed = 3300; 488 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 489 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN); 490 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 491 cpi->unit_number = cam_sim_unit(sim); 492 cpi->ccb_h.status = CAM_REQ_CMP; 493 cpi->transport = XPORT_SPI; 494 cpi->transport_version = 2; 495 cpi->protocol = PROTO_SCSI; 496 cpi->protocol_version = SCSI_REV_2; 497 xpt_done(ccb); 498 break; 499 } 500 default: 501 ccb->ccb_h.status = CAM_REQ_INVALID; 502 xpt_done(ccb); 503 break; 504 } 505} 506 507/* 508 * Currently, the output of bus_dmammap_load suits our needs just 509 * fine, but should it change, we'd need to do something here. 510 */ 511#define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs) 512 513static void 514adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 515 int nsegments, int error) 516{ 517 struct ccb_scsiio *csio; 518 struct ccb_hdr *ccb_h; 519 struct cam_sim *sim; 520 struct adv_softc *adv; 521 struct adv_ccb_info *cinfo; 522 struct adv_scsi_q scsiq; 523 struct adv_sg_head sghead; 524 int s; 525 526 csio = (struct ccb_scsiio *)arg; 527 ccb_h = &csio->ccb_h; 528 sim = xpt_path_sim(ccb_h->path); 529 adv = (struct adv_softc *)cam_sim_softc(sim); 530 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; 531 532 /* 533 * Setup our done routine to release the simq on 534 * the next ccb that completes. 535 */ 536 if ((adv->state & ADV_BUSDMA_BLOCK) != 0) 537 adv->state |= ADV_BUSDMA_BLOCK_CLEARED; 538 539 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 540 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { 541 /* XXX Need phystovirt!!!! */ 542 /* How about pmap_kenter??? */ 543 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 544 } else { 545 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 546 } 547 } else { 548 scsiq.cdbptr = csio->cdb_io.cdb_bytes; 549 } 550 /* 551 * Build up the request 552 */ 553 scsiq.q1.status = 0; 554 scsiq.q1.q_no = 0; 555 scsiq.q1.cntl = 0; 556 scsiq.q1.sg_queue_cnt = 0; 557 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); 558 scsiq.q1.target_lun = ccb_h->target_lun; 559 scsiq.q1.sense_len = csio->sense_len; 560 scsiq.q1.extra_bytes = 0; 561 scsiq.q2.ccb_index = cinfo - adv->ccb_infos; 562 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, 563 ccb_h->target_lun); 564 scsiq.q2.flag = 0; 565 scsiq.q2.cdb_len = csio->cdb_len; 566 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) 567 scsiq.q2.tag_code = csio->tag_action; 568 else 569 scsiq.q2.tag_code = 0; 570 scsiq.q2.vm_id = 0; 571 572 if (nsegments != 0) { 573 bus_dmasync_op_t op; 574 575 scsiq.q1.data_addr = dm_segs->ds_addr; 576 scsiq.q1.data_cnt = dm_segs->ds_len; 577 if (nsegments > 1) { 578 scsiq.q1.cntl |= QC_SG_HEAD; 579 sghead.entry_cnt 580 = sghead.entry_to_copy 581 = nsegments; 582 sghead.res = 0; 583 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); 584 scsiq.sg_head = &sghead; 585 } else { 586 scsiq.sg_head = NULL; 587 } 588 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) 589 op = BUS_DMASYNC_PREREAD; 590 else 591 op = BUS_DMASYNC_PREWRITE; 592 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 593 } else { 594 scsiq.q1.data_addr = 0; 595 scsiq.q1.data_cnt = 0; 596 scsiq.sg_head = NULL; 597 } 598 599 s = splcam(); 600 601 /* 602 * Last time we need to check if this SCB needs to 603 * be aborted. 604 */ 605 if (ccb_h->status != CAM_REQ_INPROG) { 606 if (nsegments != 0) 607 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 608 adv_clear_state(adv, (union ccb *)csio); 609 adv_free_ccb_info(adv, cinfo); 610 xpt_done((union ccb *)csio); 611 splx(s); 612 return; 613 } 614 615 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { 616 /* Temporary resource shortage */ 617 adv_set_state(adv, ADV_RESOURCE_SHORTAGE); 618 if (nsegments != 0) 619 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 620 csio->ccb_h.status = CAM_REQUEUE_REQ; 621 adv_clear_state(adv, (union ccb *)csio); 622 adv_free_ccb_info(adv, cinfo); 623 xpt_done((union ccb *)csio); 624 splx(s); 625 return; 626 } 627 cinfo->state |= ACCB_ACTIVE; 628 ccb_h->status |= CAM_SIM_QUEUED; 629 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); 630 /* Schedule our timeout */ 631 ccb_h->timeout_ch = 632 timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000); 633 splx(s); 634} 635 636static struct adv_ccb_info * 637adv_alloc_ccb_info(struct adv_softc *adv) 638{ 639 int error; 640 struct adv_ccb_info *cinfo; 641 642 cinfo = &adv->ccb_infos[adv->ccb_infos_allocated]; 643 cinfo->state = ACCB_FREE; 644 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0, 645 &cinfo->dmamap); 646 if (error != 0) { 647 printf("%s: Unable to allocate CCB info " 648 "dmamap - error %d\n", adv_name(adv), error); 649 return (NULL); 650 } 651 adv->ccb_infos_allocated++; 652 return (cinfo); 653} 654 655static void 656adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 657{ 658 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap); 659} 660 661void 662adv_timeout(void *arg) 663{ 664 int s; 665 union ccb *ccb; 666 struct adv_softc *adv; 667 struct adv_ccb_info *cinfo; 668 669 ccb = (union ccb *)arg; 670 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc; 671 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 672 673 xpt_print_path(ccb->ccb_h.path); 674 printf("Timed out\n"); 675 676 s = splcam(); 677 /* Have we been taken care of already?? */ 678 if (cinfo == NULL || cinfo->state == ACCB_FREE) { 679 splx(s); 680 return; 681 } 682 683 adv_stop_execution(adv); 684 685 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) { 686 struct ccb_hdr *ccb_h; 687 688 /* 689 * In order to simplify the recovery process, we ask the XPT 690 * layer to halt the queue of new transactions and we traverse 691 * the list of pending CCBs and remove their timeouts. This 692 * means that the driver attempts to clear only one error 693 * condition at a time. In general, timeouts that occur 694 * close together are related anyway, so there is no benefit 695 * in attempting to handle errors in parrallel. Timeouts will 696 * be reinstated when the recovery process ends. 697 */ 698 adv_set_state(adv, ADV_IN_TIMEOUT); 699 700 /* This CCB is the CCB representing our recovery actions */ 701 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED; 702 703 ccb_h = LIST_FIRST(&adv->pending_ccbs); 704 while (ccb_h != NULL) { 705 untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch); 706 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 707 } 708 709 /* XXX Should send a BDR */ 710 /* Attempt an abort as our first tact */ 711 xpt_print_path(ccb->ccb_h.path); 712 printf("Attempting abort\n"); 713 adv_abort_ccb(adv, ccb->ccb_h.target_id, 714 ccb->ccb_h.target_lun, ccb, 715 CAM_CMD_TIMEOUT, /*queued_only*/FALSE); 716 ccb->ccb_h.timeout_ch = 717 timeout(adv_timeout, ccb, 2 * hz); 718 } else { 719 /* Our attempt to perform an abort failed, go for a reset */ 720 xpt_print_path(ccb->ccb_h.path); 721 printf("Resetting bus\n"); 722 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 723 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 724 adv_reset_bus(adv, /*initiate_reset*/TRUE); 725 } 726 adv_start_execution(adv); 727 splx(s); 728} 729 730struct adv_softc * 731adv_alloc(device_t dev, bus_space_tag_t tag, bus_space_handle_t bsh) 732{ 733 struct adv_softc *adv = device_get_softc(dev); 734 735 /* 736 * Allocate a storage area for us 737 */ 738 LIST_INIT(&adv->pending_ccbs); 739 SLIST_INIT(&adv->free_ccb_infos); 740 adv->dev = dev; 741 adv->unit = device_get_unit(dev); 742 adv->tag = tag; 743 adv->bsh = bsh; 744 745 return(adv); 746} 747 748void 749adv_free(struct adv_softc *adv) 750{ 751 switch (adv->init_level) { 752 case 6: 753 { 754 struct adv_ccb_info *cinfo; 755 756 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 757 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 758 adv_destroy_ccb_info(adv, cinfo); 759 } 760 761 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap); 762 } 763 case 5: 764 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers, 765 adv->sense_dmamap); 766 case 4: 767 bus_dma_tag_destroy(adv->sense_dmat); 768 case 3: 769 bus_dma_tag_destroy(adv->buffer_dmat); 770 case 2: 771 bus_dma_tag_destroy(adv->parent_dmat); 772 case 1: 773 if (adv->ccb_infos != NULL) 774 free(adv->ccb_infos, M_DEVBUF); 775 case 0: 776 break; 777 } 778} 779 780int 781adv_init(struct adv_softc *adv) 782{ 783 struct adv_eeprom_config eeprom_config; 784 int checksum, i; 785 int max_sync; 786 u_int16_t config_lsw; 787 u_int16_t config_msw; 788 789 adv_lib_init(adv); 790 791 /* 792 * Stop script execution. 793 */ 794 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE); 795 adv_stop_execution(adv); 796 if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) { 797 printf("adv%d: Unable to halt adapter. Initialization" 798 "failed\n", adv->unit); 799 return (1); 800 } 801 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); 802 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { 803 printf("adv%d: Unable to set program counter. Initialization" 804 "failed\n", adv->unit); 805 return (1); 806 } 807 808 config_msw = ADV_INW(adv, ADV_CONFIG_MSW); 809 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW); 810 811 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) { 812 config_msw &= ~ADV_CFG_MSW_CLR_MASK; 813 /* 814 * XXX The Linux code flags this as an error, 815 * but what should we report to the user??? 816 * It seems that clearing the config register 817 * makes this error recoverable. 818 */ 819 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 820 } 821 822 /* Suck in the configuration from the EEProm */ 823 checksum = adv_get_eeprom_config(adv, &eeprom_config); 824 825 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) { 826 /* 827 * XXX The Linux code sets a warning level for this 828 * condition, yet nothing of meaning is printed to 829 * the user. What does this mean??? 830 */ 831 if (adv->chip_version == 3) { 832 if (eeprom_config.cfg_lsw != config_lsw) 833 eeprom_config.cfg_lsw = config_lsw; 834 if (eeprom_config.cfg_msw != config_msw) { 835 eeprom_config.cfg_msw = config_msw; 836 } 837 } 838 } 839 if (checksum == eeprom_config.chksum) { 840 841 /* Range/Sanity checking */ 842 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) { 843 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG; 844 } 845 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) { 846 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG; 847 } 848 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) { 849 eeprom_config.max_tag_qng = eeprom_config.max_total_qng; 850 } 851 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) { 852 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC; 853 } 854 adv->max_openings = eeprom_config.max_total_qng; 855 adv->user_disc_enable = eeprom_config.disc_enable; 856 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng; 857 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config); 858 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID; 859 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id); 860 adv->control = eeprom_config.cntl; 861 for (i = 0; i <= ADV_MAX_TID; i++) { 862 u_int8_t sync_data; 863 864 if ((eeprom_config.init_sdtr & (0x1 << i)) == 0) 865 sync_data = 0; 866 else 867 sync_data = eeprom_config.sdtr_data[i]; 868 adv_sdtr_to_period_offset(adv, 869 sync_data, 870 &adv->tinfo[i].user.period, 871 &adv->tinfo[i].user.offset, 872 i); 873 } 874 config_lsw = eeprom_config.cfg_lsw; 875 eeprom_config.cfg_msw = config_msw; 876 } else { 877 u_int8_t sync_data; 878 879 printf("adv%d: Warning EEPROM Checksum mismatch. " 880 "Using default device parameters\n", adv->unit); 881 882 /* Set reasonable defaults since we can't read the EEPROM */ 883 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1; 884 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG; 885 adv->disc_enable = TARGET_BIT_VECTOR_SET; 886 adv->user_disc_enable = TARGET_BIT_VECTOR_SET; 887 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 888 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 889 adv->scsi_id = 7; 890 adv->control = 0xFFFF; 891 892 if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) 893 /* Default to no Ultra to support the 3030 */ 894 adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA; 895 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4); 896 for (i = 0; i <= ADV_MAX_TID; i++) { 897 adv_sdtr_to_period_offset(adv, sync_data, 898 &adv->tinfo[i].user.period, 899 &adv->tinfo[i].user.offset, 900 i); 901 } 902 config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON; 903 } 904 config_msw &= ~ADV_CFG_MSW_CLR_MASK; 905 config_lsw |= ADV_CFG_LSW_HOST_INT_ON; 906 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA) 907 && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0) 908 /* 25ns or 10MHz */ 909 max_sync = 25; 910 else 911 /* Unlimited */ 912 max_sync = 0; 913 for (i = 0; i <= ADV_MAX_TID; i++) { 914 if (adv->tinfo[i].user.period < max_sync) 915 adv->tinfo[i].user.period = max_sync; 916 } 917 918 if (adv_test_external_lram(adv) == 0) { 919 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) { 920 eeprom_config.max_total_qng = 921 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; 922 eeprom_config.max_tag_qng = 923 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG; 924 } else { 925 eeprom_config.cfg_msw |= 0x0800; 926 config_msw |= 0x0800; 927 eeprom_config.max_total_qng = 928 ADV_MAX_PCI_INRAM_TOTAL_QNG; 929 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG; 930 } 931 adv->max_openings = eeprom_config.max_total_qng; 932 } 933 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 934 ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw); 935#if 0 936 /* 937 * Don't write the eeprom data back for now. 938 * I'd rather not mess up the user's card. We also don't 939 * fully sanitize the eeprom settings above for the write-back 940 * to be 100% correct. 941 */ 942 if (adv_set_eeprom_config(adv, &eeprom_config) != 0) 943 printf("%s: WARNING! Failure writing to EEPROM.\n", 944 adv_name(adv)); 945#endif 946 947 adv_set_chip_scsiid(adv, adv->scsi_id); 948 if (adv_init_lram_and_mcode(adv)) 949 return (1); 950 951 adv->disc_enable = adv->user_disc_enable; 952 953 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); 954 for (i = 0; i <= ADV_MAX_TID; i++) { 955 /* 956 * Start off in async mode. 957 */ 958 adv_set_syncrate(adv, /*struct cam_path */NULL, 959 i, /*period*/0, /*offset*/0, 960 ADV_TRANS_CUR); 961 /* 962 * Enable the use of tagged commands on all targets. 963 * This allows the kernel driver to make up it's own mind 964 * as it sees fit to tag queue instead of having the 965 * firmware try and second guess the tag_code settins. 966 */ 967 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i, 968 adv->max_openings); 969 } 970 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 971 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 972 printf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n", 973 adv->unit, (adv->type & ADV_ULTRA) && (max_sync == 0) 974 ? "Ultra SCSI" : "SCSI", 975 adv->scsi_id, adv->max_openings); 976 return (0); 977} 978 979void 980adv_intr(void *arg) 981{ 982 struct adv_softc *adv; 983 u_int16_t chipstat; 984 u_int16_t saved_ram_addr; 985 u_int8_t ctrl_reg; 986 u_int8_t saved_ctrl_reg; 987 u_int8_t host_flag; 988 989 adv = (struct adv_softc *)arg; 990 991 chipstat = ADV_INW(adv, ADV_CHIP_STATUS); 992 993 /* Is it for us? */ 994 if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0) 995 return; 996 997 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL); 998 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET | 999 ADV_CC_SINGLE_STEP | ADV_CC_DIAG | 1000 ADV_CC_TEST)); 1001 1002 if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) { 1003 printf("Detected Bus Reset\n"); 1004 adv_reset_bus(adv, /*initiate_reset*/FALSE); 1005 return; 1006 } 1007 1008 if ((chipstat & ADV_CSW_INT_PENDING) != 0) { 1009 1010 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR); 1011 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); 1012 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, 1013 host_flag | ADV_HOST_FLAG_IN_ISR); 1014 1015 adv_ack_interrupt(adv); 1016 1017 if ((chipstat & ADV_CSW_HALTED) != 0 1018 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) { 1019 adv_isr_chip_halted(adv); 1020 saved_ctrl_reg &= ~ADV_CC_HALT; 1021 } else { 1022 adv_run_doneq(adv); 1023 } 1024 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr); 1025#ifdef DIAGNOSTIC 1026 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr) 1027 panic("adv_intr: Unable to set LRAM addr"); 1028#endif 1029 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); 1030 } 1031 1032 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg); 1033} 1034 1035static void 1036adv_run_doneq(struct adv_softc *adv) 1037{ 1038 struct adv_q_done_info scsiq; 1039 u_int doneq_head; 1040 u_int done_qno; 1041 1042 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF; 1043 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head) 1044 + ADV_SCSIQ_B_FWD); 1045 while (done_qno != ADV_QLINK_END) { 1046 union ccb* ccb; 1047 struct adv_ccb_info *cinfo; 1048 u_int done_qaddr; 1049 u_int sg_queue_cnt; 1050 int aborted; 1051 1052 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 1053 1054 /* Pull status from this request */ 1055 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq, 1056 adv->max_dma_count); 1057 1058 /* Mark it as free */ 1059 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, 1060 scsiq.q_status & ~(QS_READY|QS_ABORTED)); 1061 1062 /* Process request based on retrieved info */ 1063 if ((scsiq.cntl & QC_SG_HEAD) != 0) { 1064 u_int i; 1065 1066 /* 1067 * S/G based request. Free all of the queue 1068 * structures that contained S/G information. 1069 */ 1070 for (i = 0; i < sg_queue_cnt; i++) { 1071 done_qno = adv_read_lram_8(adv, done_qaddr 1072 + ADV_SCSIQ_B_FWD); 1073 1074#ifdef DIAGNOSTIC 1075 if (done_qno == ADV_QLINK_END) { 1076 panic("adv_qdone: Corrupted SG " 1077 "list encountered"); 1078 } 1079#endif 1080 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 1081 1082 /* Mark SG queue as free */ 1083 adv_write_lram_8(adv, done_qaddr 1084 + ADV_SCSIQ_B_STATUS, QS_FREE); 1085 } 1086 } else 1087 sg_queue_cnt = 0; 1088#ifdef DIAGNOSTIC 1089 if (adv->cur_active < (sg_queue_cnt + 1)) 1090 panic("adv_qdone: Attempting to free more " 1091 "queues than are active"); 1092#endif 1093 adv->cur_active -= sg_queue_cnt + 1; 1094 1095 aborted = (scsiq.q_status & QS_ABORTED) != 0; 1096 1097 if ((scsiq.q_status != QS_DONE) 1098 && (scsiq.q_status & QS_ABORTED) == 0) 1099 panic("adv_qdone: completed scsiq with unknown status"); 1100 1101 scsiq.remain_bytes += scsiq.extra_bytes; 1102 1103 if ((scsiq.d3.done_stat == QD_WITH_ERROR) && 1104 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) { 1105 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) { 1106 scsiq.d3.done_stat = QD_NO_ERROR; 1107 scsiq.d3.host_stat = QHSTA_NO_ERROR; 1108 } 1109 } 1110 1111 cinfo = &adv->ccb_infos[scsiq.d2.ccb_index]; 1112 ccb = cinfo->ccb; 1113 ccb->csio.resid = scsiq.remain_bytes; 1114 adv_done(adv, ccb, 1115 scsiq.d3.done_stat, scsiq.d3.host_stat, 1116 scsiq.d3.scsi_stat, scsiq.q_no); 1117 1118 doneq_head = done_qno; 1119 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); 1120 } 1121 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head); 1122} 1123 1124 1125void 1126adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat, 1127 u_int host_stat, u_int scsi_status, u_int q_no) 1128{ 1129 struct adv_ccb_info *cinfo; 1130 1131 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 1132 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 1133 untimeout(adv_timeout, ccb, ccb->ccb_h.timeout_ch); 1134 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1135 bus_dmasync_op_t op; 1136 1137 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1138 op = BUS_DMASYNC_POSTREAD; 1139 else 1140 op = BUS_DMASYNC_POSTWRITE; 1141 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 1142 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 1143 } 1144 1145 switch (done_stat) { 1146 case QD_NO_ERROR: 1147 if (host_stat == QHSTA_NO_ERROR) { 1148 ccb->ccb_h.status = CAM_REQ_CMP; 1149 break; 1150 } 1151 xpt_print_path(ccb->ccb_h.path); 1152 printf("adv_done - queue done without error, " 1153 "but host status non-zero(%x)\n", host_stat); 1154 /*FALLTHROUGH*/ 1155 case QD_WITH_ERROR: 1156 switch (host_stat) { 1157 case QHSTA_M_TARGET_STATUS_BUSY: 1158 case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY: 1159 /* 1160 * Assume that if we were a tagged transaction 1161 * the target reported queue full. Otherwise, 1162 * report busy. The firmware really should just 1163 * pass the original status back up to us even 1164 * if it thinks the target was in error for 1165 * returning this status as no other transactions 1166 * from this initiator are in effect, but this 1167 * ignores multi-initiator setups and there is 1168 * evidence that the firmware gets its per-device 1169 * transaction counts screwed up occassionally. 1170 */ 1171 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1172 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 1173 && host_stat != QHSTA_M_TARGET_STATUS_BUSY) 1174 scsi_status = SCSI_STATUS_QUEUE_FULL; 1175 else 1176 scsi_status = SCSI_STATUS_BUSY; 1177 adv_abort_ccb(adv, ccb->ccb_h.target_id, 1178 ccb->ccb_h.target_lun, 1179 /*ccb*/NULL, CAM_REQUEUE_REQ, 1180 /*queued_only*/TRUE); 1181 /*FALLTHROUGH*/ 1182 case QHSTA_M_NO_AUTO_REQ_SENSE: 1183 case QHSTA_NO_ERROR: 1184 ccb->csio.scsi_status = scsi_status; 1185 switch (scsi_status) { 1186 case SCSI_STATUS_CHECK_COND: 1187 case SCSI_STATUS_CMD_TERMINATED: 1188 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1189 /* Structure copy */ 1190 ccb->csio.sense_data = 1191 adv->sense_buffers[q_no - 1]; 1192 /* FALLTHROUGH */ 1193 case SCSI_STATUS_BUSY: 1194 case SCSI_STATUS_RESERV_CONFLICT: 1195 case SCSI_STATUS_QUEUE_FULL: 1196 case SCSI_STATUS_COND_MET: 1197 case SCSI_STATUS_INTERMED: 1198 case SCSI_STATUS_INTERMED_COND_MET: 1199 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1200 break; 1201 case SCSI_STATUS_OK: 1202 ccb->ccb_h.status |= CAM_REQ_CMP; 1203 break; 1204 } 1205 break; 1206 case QHSTA_M_SEL_TIMEOUT: 1207 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1208 break; 1209 case QHSTA_M_DATA_OVER_RUN: 1210 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1211 break; 1212 case QHSTA_M_UNEXPECTED_BUS_FREE: 1213 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1214 break; 1215 case QHSTA_M_BAD_BUS_PHASE_SEQ: 1216 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1217 break; 1218 case QHSTA_M_BAD_CMPL_STATUS_IN: 1219 /* No command complete after a status message */ 1220 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1221 break; 1222 case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT: 1223 case QHSTA_M_WTM_TIMEOUT: 1224 case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET: 1225 /* The SCSI bus hung in a phase */ 1226 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1227 adv_reset_bus(adv, /*initiate_reset*/TRUE); 1228 break; 1229 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1230 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1231 break; 1232 case QHSTA_D_QDONE_SG_LIST_CORRUPTED: 1233 case QHSTA_D_ASC_DVC_ERROR_CODE_SET: 1234 case QHSTA_D_HOST_ABORT_FAILED: 1235 case QHSTA_D_EXE_SCSI_Q_FAILED: 1236 case QHSTA_D_ASPI_NO_BUF_POOL: 1237 case QHSTA_M_BAD_TAG_CODE: 1238 case QHSTA_D_LRAM_CMP_ERROR: 1239 case QHSTA_M_MICRO_CODE_ERROR_HALT: 1240 default: 1241 panic("%s: Unhandled Host status error %x", 1242 adv_name(adv), host_stat); 1243 /* NOTREACHED */ 1244 } 1245 break; 1246 1247 case QD_ABORTED_BY_HOST: 1248 /* Don't clobber any, more explicit, error codes we've set */ 1249 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) 1250 ccb->ccb_h.status = CAM_REQ_ABORTED; 1251 break; 1252 1253 default: 1254 xpt_print_path(ccb->ccb_h.path); 1255 printf("adv_done - queue done with unknown status %x:%x\n", 1256 done_stat, host_stat); 1257 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1258 break; 1259 } 1260 adv_clear_state(adv, ccb); 1261 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP 1262 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1263 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1264 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1265 } 1266 adv_free_ccb_info(adv, cinfo); 1267 /* 1268 * Null this out so that we catch driver bugs that cause a 1269 * ccb to be completed twice. 1270 */ 1271 ccb->ccb_h.ccb_cinfo_ptr = NULL; 1272 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1273 xpt_done(ccb); 1274} 1275 1276/* 1277 * Function to poll for command completion when 1278 * interrupts are disabled (crash dumps) 1279 */ 1280static void 1281adv_poll(struct cam_sim *sim) 1282{ 1283 adv_intr(cam_sim_softc(sim)); 1284} 1285 1286/* 1287 * Attach all the sub-devices we can find 1288 */ 1289int 1290adv_attach(adv) 1291 struct adv_softc *adv; 1292{ 1293 struct ccb_setasync csa; 1294 struct cam_devq *devq; 1295 int max_sg; 1296 1297 /* 1298 * Allocate an array of ccb mapping structures. We put the 1299 * index of the ccb_info structure into the queue representing 1300 * a transaction and use it for mapping the queue to the 1301 * upper level SCSI transaction it represents. 1302 */ 1303 adv->ccb_infos = malloc(sizeof(*adv->ccb_infos) * adv->max_openings, 1304 M_DEVBUF, M_NOWAIT); 1305 1306 if (adv->ccb_infos == NULL) 1307 return (ENOMEM); 1308 1309 adv->init_level++; 1310 1311 /* 1312 * Create our DMA tags. These tags define the kinds of device 1313 * accessible memory allocations and memory mappings we will 1314 * need to perform during normal operation. 1315 * 1316 * Unless we need to further restrict the allocation, we rely 1317 * on the restrictions of the parent dmat, hence the common 1318 * use of MAXADDR and MAXSIZE. 1319 * 1320 * The ASC boards use chains of "queues" (the transactional 1321 * resources on the board) to represent long S/G lists. 1322 * The first queue represents the command and holds a 1323 * single address and data pair. The queues that follow 1324 * can each hold ADV_SG_LIST_PER_Q entries. Given the 1325 * total number of queues, we can express the largest 1326 * transaction we can map. We reserve a few queues for 1327 * error recovery. Take those into account as well. 1328 * 1329 * There is a way to take an interrupt to download the 1330 * next batch of S/G entries if there are more than 255 1331 * of them (the counter in the queue structure is a u_int8_t). 1332 * We don't use this feature, so limit the S/G list size 1333 * accordingly. 1334 */ 1335 max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q; 1336 if (max_sg > 255) 1337 max_sg = 255; 1338 1339 /* DMA tag for mapping buffers into device visible space. */ 1340 if (bus_dma_tag_create( 1341 /* parent */ adv->parent_dmat, 1342 /* alignment */ 1, 1343 /* boundary */ 0, 1344 /* lowaddr */ BUS_SPACE_MAXADDR, 1345 /* highaddr */ BUS_SPACE_MAXADDR, 1346 /* filter */ NULL, 1347 /* filterarg */ NULL, 1348 /* maxsize */ MAXPHYS, 1349 /* nsegments */ max_sg, 1350 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1351 /* flags */ BUS_DMA_ALLOCNOW, 1352 /* lockfunc */ busdma_lock_mutex, 1353 /* lockarg */ &Giant, 1354 &adv->buffer_dmat) != 0) { 1355 return (ENXIO); 1356 } 1357 adv->init_level++; 1358 1359 /* DMA tag for our sense buffers */ 1360 if (bus_dma_tag_create( 1361 /* parent */ adv->parent_dmat, 1362 /* alignment */ 1, 1363 /* boundary */ 0, 1364 /* lowaddr */ BUS_SPACE_MAXADDR, 1365 /* highaddr */ BUS_SPACE_MAXADDR, 1366 /* filter */ NULL, 1367 /* filterarg */ NULL, 1368 /* maxsize */ sizeof(struct scsi_sense_data) * 1369 adv->max_openings, 1370 /* nsegments */ 1, 1371 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1372 /* flags */ 0, 1373 /* lockfunc */ busdma_lock_mutex, 1374 /* lockarg */ &Giant, 1375 &adv->sense_dmat) != 0) { 1376 return (ENXIO); 1377 } 1378 1379 adv->init_level++; 1380 1381 /* Allocation for our sense buffers */ 1382 if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers, 1383 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) { 1384 return (ENOMEM); 1385 } 1386 1387 adv->init_level++; 1388 1389 /* And permanently map them */ 1390 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap, 1391 adv->sense_buffers, 1392 sizeof(struct scsi_sense_data)*adv->max_openings, 1393 adv_map, &adv->sense_physbase, /*flags*/0); 1394 1395 adv->init_level++; 1396 1397 /* 1398 * Fire up the chip 1399 */ 1400 if (adv_start_chip(adv) != 1) { 1401 printf("adv%d: Unable to start on board processor. Aborting.\n", 1402 adv->unit); 1403 return (ENXIO); 1404 } 1405 1406 /* 1407 * Create the device queue for our SIM. 1408 */ 1409 devq = cam_simq_alloc(adv->max_openings); 1410 if (devq == NULL) 1411 return (ENOMEM); 1412 1413 /* 1414 * Construct our SIM entry. 1415 */ 1416 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit,
|