aic7xxx_osm.c revision 71717
1/* 2 * Bus independent FreeBSD shim for the aic7xxx based adaptec SCSI controllers 3 * 4 * Copyright (c) 1994-2001 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU Public License ("GPL"). 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $Id$ 32 * 33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx_osm.c 71717 2001-01-27 20:54:24Z gibbs $ 34 */ 35 36#include <dev/aic7xxx/aic7xxx_freebsd.h> 37#include <dev/aic7xxx/aic7xxx_inline.h> 38 39#ifndef AHC_TMODE_ENABLE 40#define AHC_TMODE_ENABLE 0 41#endif 42 43#define ccb_scb_ptr spriv_ptr0 44 45#ifdef AHC_DEBUG 46static int ahc_debug = AHC_DEBUG; 47#endif 48 49#if UNUSED 50static void ahc_dump_targcmd(struct target_cmd *cmd); 51#endif 52static void ahc_action(struct cam_sim *sim, union ccb *ccb); 53static void ahc_get_tran_settings(struct ahc_softc *ahc, 54 int our_id, char channel, 55 struct ccb_trans_settings *cts); 56static void ahc_async(void *callback_arg, uint32_t code, 57 struct cam_path *path, void *arg); 58static void ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, 59 int nsegments, int error); 60static void ahc_poll(struct cam_sim *sim); 61static void ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, 62 struct ccb_scsiio *csio, struct scb *scb); 63static void ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, 64 union ccb *ccb); 65static int ahc_create_path(struct ahc_softc *ahc, 66 char channel, u_int target, u_int lun, 67 struct cam_path **path); 68 69static void ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb); 70 71static int 72ahc_create_path(struct ahc_softc *ahc, char channel, u_int target, 73 u_int lun, struct cam_path **path) 74{ 75 path_id_t path_id; 76 77 if (channel == 'B') 78 path_id = cam_sim_path(ahc->platform_data->sim_b); 79 else 80 path_id = cam_sim_path(ahc->platform_data->sim); 81 82 return (xpt_create_path(path, /*periph*/NULL, 83 path_id, target, lun)); 84} 85 86/* 87 * Attach all the sub-devices we can find 88 */ 89int 90ahc_attach(struct ahc_softc *ahc) 91{ 92 char ahc_info[256]; 93 struct ccb_setasync csa; 94 struct cam_devq *devq; 95 int bus_id; 96 int bus_id2; 97 struct cam_sim *sim; 98 struct cam_sim *sim2; 99 struct cam_path *path; 100 struct cam_path *path2; 101 long s; 102 int count; 103 int error; 104 105 count = 0; 106 sim = NULL; 107 sim2 = NULL; 108 109 ahc_controller_info(ahc, ahc_info); 110 printf("%s\n", ahc_info); 111 ahc_lock(ahc, &s); 112 /* Hook up our interrupt handler */ 113 if ((error = bus_setup_intr(ahc->dev_softc, ahc->platform_data->irq, 114 INTR_TYPE_CAM, ahc_platform_intr, ahc, 115 &ahc->platform_data->ih)) != 0) { 116 device_printf(ahc->dev_softc, "bus_setup_intr() failed: %d\n", 117 error); 118 goto fail; 119 } 120 121 /* 122 * Attach secondary channel first if the user has 123 * declared it the primary channel. 124 */ 125 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) { 126 bus_id = 1; 127 bus_id2 = 0; 128 } else { 129 bus_id = 0; 130 bus_id2 = 1; 131 } 132 133 /* 134 * Create the device queue for our SIM(s). 135 */ 136 devq = cam_simq_alloc(AHC_MAX_QUEUE); 137 if (devq == NULL) 138 goto fail; 139 140 /* 141 * Construct our first channel SIM entry 142 */ 143 sim = cam_sim_alloc(ahc_action, ahc_poll, "ahc", ahc, 144 device_get_unit(ahc->dev_softc), 145 1, AHC_MAX_QUEUE, devq); 146 if (sim == NULL) { 147 cam_simq_free(devq); 148 goto fail; 149 } 150 151 if (xpt_bus_register(sim, bus_id) != CAM_SUCCESS) { 152 cam_sim_free(sim, /*free_devq*/TRUE); 153 sim = NULL; 154 goto fail; 155 } 156 157 if (xpt_create_path(&path, /*periph*/NULL, 158 cam_sim_path(sim), CAM_TARGET_WILDCARD, 159 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 160 xpt_bus_deregister(cam_sim_path(sim)); 161 cam_sim_free(sim, /*free_devq*/TRUE); 162 sim = NULL; 163 goto fail; 164 } 165 166 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 167 csa.ccb_h.func_code = XPT_SASYNC_CB; 168 csa.event_enable = AC_LOST_DEVICE; 169 csa.callback = ahc_async; 170 csa.callback_arg = sim; 171 xpt_action((union ccb *)&csa); 172 count++; 173 174 if (ahc->features & AHC_TWIN) { 175 sim2 = cam_sim_alloc(ahc_action, ahc_poll, "ahc", 176 ahc, device_get_unit(ahc->dev_softc), 1, 177 AHC_MAX_QUEUE, devq); 178 179 if (sim2 == NULL) { 180 printf("ahc_attach: Unable to attach second " 181 "bus due to resource shortage"); 182 goto fail; 183 } 184 185 if (xpt_bus_register(sim2, bus_id2) != CAM_SUCCESS) { 186 printf("ahc_attach: Unable to attach second " 187 "bus due to resource shortage"); 188 /* 189 * We do not want to destroy the device queue 190 * because the first bus is using it. 191 */ 192 cam_sim_free(sim2, /*free_devq*/FALSE); 193 goto fail; 194 } 195 196 if (xpt_create_path(&path2, /*periph*/NULL, 197 cam_sim_path(sim2), 198 CAM_TARGET_WILDCARD, 199 CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 200 xpt_bus_deregister(cam_sim_path(sim2)); 201 cam_sim_free(sim2, /*free_devq*/FALSE); 202 sim2 = NULL; 203 goto fail; 204 } 205 xpt_setup_ccb(&csa.ccb_h, path2, /*priority*/5); 206 csa.ccb_h.func_code = XPT_SASYNC_CB; 207 csa.event_enable = AC_LOST_DEVICE; 208 csa.callback = ahc_async; 209 csa.callback_arg = sim2; 210 xpt_action((union ccb *)&csa); 211 count++; 212 } 213 214fail: 215 if ((ahc->flags & AHC_CHANNEL_B_PRIMARY) != 0) { 216 ahc->platform_data->sim_b = sim; 217 ahc->platform_data->path_b = path; 218 ahc->platform_data->sim = sim2; 219 ahc->platform_data->path = path2; 220 } else { 221 ahc->platform_data->sim = sim; 222 ahc->platform_data->path = path; 223 ahc->platform_data->sim_b = sim2; 224 ahc->platform_data->path_b = path2; 225 } 226 ahc_unlock(ahc, &s); 227 228 if (count != 0) 229 /* We have to wait until after any system dumps... */ 230 ahc->platform_data->eh = 231 EVENTHANDLER_REGISTER(shutdown_final, ahc_shutdown, 232 ahc, SHUTDOWN_PRI_DEFAULT); 233 234 return (count); 235} 236 237/* 238 * Catch an interrupt from the adapter 239 */ 240void 241ahc_platform_intr(void *arg) 242{ 243 struct ahc_softc *ahc; 244 245 ahc = (struct ahc_softc *)arg; 246 ahc_intr(ahc); 247} 248 249/* 250 * We have an scb which has been processed by the 251 * adaptor, now we look to see how the operation 252 * went. 253 */ 254void 255ahc_done(struct ahc_softc *ahc, struct scb *scb) 256{ 257 union ccb *ccb; 258 259 CAM_DEBUG(scb->io_ctx->ccb_h.path, CAM_DEBUG_TRACE, 260 ("ahc_done - scb %d\n", scb->hscb->tag)); 261 262 ccb = scb->io_ctx; 263 LIST_REMOVE(scb, pending_links); 264 if ((scb->flags & SCB_UNTAGGEDQ) != 0) { 265 struct scb_tailq *untagged_q; 266 267 untagged_q = &ahc->untagged_queues[ccb->ccb_h.target_id]; 268 TAILQ_REMOVE(untagged_q, scb, links.tqe); 269 scb->flags &= ~SCB_UNTAGGEDQ; 270 ahc_run_untagged_queue(ahc, untagged_q); 271 } 272 273 untimeout(ahc_timeout, (caddr_t)scb, ccb->ccb_h.timeout_ch); 274 275 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 276 bus_dmasync_op_t op; 277 278 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 279 op = BUS_DMASYNC_POSTREAD; 280 else 281 op = BUS_DMASYNC_POSTWRITE; 282 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 283 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 284 } 285 286 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 287 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) 288 ccb->ccb_h.status |= CAM_REQ_CMP; 289 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 290 ahc_free_scb(ahc, scb); 291 xpt_done(ccb); 292 return; 293 } 294 295 /* 296 * If the recovery SCB completes, we have to be 297 * out of our timeout. 298 */ 299 if ((scb->flags & SCB_RECOVERY_SCB) != 0) { 300 struct scb *list_scb; 301 302 /* 303 * We were able to complete the command successfully, 304 * so reinstate the timeouts for all other pending 305 * commands. 306 */ 307 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 308 union ccb *ccb; 309 uint64_t time; 310 311 ccb = list_scb->io_ctx; 312 if (ccb->ccb_h.timeout == CAM_TIME_INFINITY) 313 continue; 314 315 time = ccb->ccb_h.timeout; 316 time *= hz; 317 time /= 1000; 318 ccb->ccb_h.timeout_ch = 319 timeout(ahc_timeout, list_scb, time); 320 } 321 322 if (ahc_get_transaction_status(scb) == CAM_BDR_SENT 323 || ahc_get_transaction_status(scb) == CAM_REQ_ABORTED) 324 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 325 ahc_print_path(ahc, scb); 326 printf("no longer in timeout, status = %x\n", 327 ccb->ccb_h.status); 328 } 329 330 /* Don't clobber any existing error state */ 331 if (ahc_get_transaction_status(scb) == CAM_REQ_INPROG) { 332 ccb->ccb_h.status |= CAM_REQ_CMP; 333 } else if ((scb->flags & SCB_SENSE) != 0) { 334 /* 335 * We performed autosense retrieval. 336 * 337 * Zero any sense not transferred by the 338 * device. The SCSI spec mandates that any 339 * untransfered data should be assumed to be 340 * zero. Complete the 'bounce' of sense information 341 * through buffers accessible via bus-space by 342 * copying it into the clients csio. 343 */ 344 memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); 345 memcpy(&ccb->csio.sense_data, 346 ahc_get_sense_buf(ahc, scb), 347 (scb->sg_list->len & AHC_SG_LEN_MASK) 348 - ccb->csio.sense_resid); 349 scb->io_ctx->ccb_h.status |= CAM_AUTOSNS_VALID; 350 } 351 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 352 ahc_free_scb(ahc, scb); 353 xpt_done(ccb); 354} 355 356static void 357ahc_action(struct cam_sim *sim, union ccb *ccb) 358{ 359 struct ahc_softc *ahc; 360 struct tmode_lstate *lstate; 361 u_int target_id; 362 u_int our_id; 363 long s; 364 365 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahc_action\n")); 366 367 ahc = (struct ahc_softc *)cam_sim_softc(sim); 368 369 target_id = ccb->ccb_h.target_id; 370 our_id = SIM_SCSI_ID(ahc, sim); 371 372 switch (ccb->ccb_h.func_code) { 373 /* Common cases first */ 374 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 375 case XPT_CONT_TARGET_IO:/* Continue Host Target I/O Connection*/ 376 { 377 struct tmode_tstate *tstate; 378 cam_status status; 379 380 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 381 &lstate, TRUE); 382 383 if (status != CAM_REQ_CMP) { 384 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 385 /* Response from the black hole device */ 386 tstate = NULL; 387 lstate = ahc->black_hole; 388 } else { 389 ccb->ccb_h.status = status; 390 xpt_done(ccb); 391 break; 392 } 393 } 394 if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { 395 396 ahc_lock(ahc, &s); 397 SLIST_INSERT_HEAD(&lstate->accept_tios, &ccb->ccb_h, 398 sim_links.sle); 399 ccb->ccb_h.status = CAM_REQ_INPROG; 400 if ((ahc->flags & AHC_TQINFIFO_BLOCKED) != 0) 401 ahc_run_tqinfifo(ahc, /*paused*/FALSE); 402 ahc_unlock(ahc, &s); 403 break; 404 } 405 406 /* 407 * The target_id represents the target we attempt to 408 * select. In target mode, this is the initiator of 409 * the original command. 410 */ 411 our_id = target_id; 412 target_id = ccb->csio.init_id; 413 /* FALLTHROUGH */ 414 } 415 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 416 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 417 { 418 struct scb *scb; 419 struct hardware_scb *hscb; 420 421 if ((ahc->flags & AHC_INITIATORROLE) == 0 422 && (ccb->ccb_h.func_code == XPT_SCSI_IO 423 || ccb->ccb_h.func_code == XPT_RESET_DEV)) { 424 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 425 xpt_done(ccb); 426 } 427 428 /* 429 * get an scb to use. 430 */ 431 ahc_lock(ahc, &s); 432 if ((scb = ahc_get_scb(ahc)) == NULL) { 433 434 xpt_freeze_simq(sim, /*count*/1); 435 ahc->flags |= AHC_RESOURCE_SHORTAGE; 436 ahc_unlock(ahc, &s); 437 ccb->ccb_h.status = CAM_REQUEUE_REQ; 438 xpt_done(ccb); 439 return; 440 } 441 ahc_unlock(ahc, &s); 442 443 hscb = scb->hscb; 444 445 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_SUBTRACE, 446 ("start scb(%p)\n", scb)); 447 scb->io_ctx = ccb; 448 /* 449 * So we can find the SCB when an abort is requested 450 */ 451 ccb->ccb_h.ccb_scb_ptr = scb; 452 453 /* 454 * Put all the arguments for the xfer in the scb 455 */ 456 hscb->control = 0; 457 hscb->scsiid = BUILD_SCSIID(ahc, sim, target_id, our_id); 458 hscb->lun = ccb->ccb_h.target_lun; 459 if (ccb->ccb_h.func_code == XPT_RESET_DEV) { 460 hscb->cdb_len = 0; 461 scb->flags |= SCB_DEVICE_RESET; 462 hscb->control |= MK_MESSAGE; 463 ahc_execute_scb(scb, NULL, 0, 0); 464 } else { 465 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 466 struct target_data *tdata; 467 468 tdata = &hscb->shared_data.tdata; 469 if (ahc->pending_device == lstate) { 470 scb->flags |= SCB_TARGET_IMMEDIATE; 471 ahc->pending_device = NULL; 472 } 473 hscb->control |= TARGET_SCB; 474 tdata->target_phases = IDENTIFY_SEEN; 475 if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) { 476 tdata->target_phases |= SPHASE_PENDING; 477 tdata->scsi_status = 478 ccb->csio.scsi_status; 479 } 480 tdata->initiator_tag = ccb->csio.tag_id; 481 } 482 if (ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) 483 hscb->control |= ccb->csio.tag_action; 484 485 ahc_setup_data(ahc, sim, &ccb->csio, scb); 486 } 487 break; 488 } 489 case XPT_NOTIFY_ACK: 490 case XPT_IMMED_NOTIFY: 491 { 492 struct tmode_tstate *tstate; 493 struct tmode_lstate *lstate; 494 cam_status status; 495 496 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, 497 &lstate, TRUE); 498 499 if (status != CAM_REQ_CMP) { 500 ccb->ccb_h.status = status; 501 xpt_done(ccb); 502 break; 503 } 504 SLIST_INSERT_HEAD(&lstate->immed_notifies, &ccb->ccb_h, 505 sim_links.sle); 506 ccb->ccb_h.status = CAM_REQ_INPROG; 507 ahc_send_lstate_events(ahc, lstate); 508 break; 509 } 510 case XPT_EN_LUN: /* Enable LUN as a target */ 511 ahc_handle_en_lun(ahc, sim, ccb); 512 xpt_done(ccb); 513 break; 514 case XPT_ABORT: /* Abort the specified CCB */ 515 { 516 ahc_abort_ccb(ahc, sim, ccb); 517 break; 518 } 519 case XPT_SET_TRAN_SETTINGS: 520 { 521#ifdef AHC_NEW_TRAN_SETTINGS 522 struct ahc_devinfo devinfo; 523 struct ccb_trans_settings *cts; 524 struct ccb_trans_settings_scsi *scsi; 525 struct ccb_trans_settings_spi *spi; 526 struct ahc_initiator_tinfo *tinfo; 527 struct tmode_tstate *tstate; 528 uint16_t *discenable; 529 uint16_t *tagenable; 530 u_int update_type; 531 532 cts = &ccb->cts; 533 scsi = &cts->proto_specific.scsi; 534 spi = &cts->xport_specific.spi; 535 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 536 cts->ccb_h.target_id, 537 cts->ccb_h.target_lun, 538 SIM_CHANNEL(ahc, sim), 539 ROLE_UNKNOWN); 540 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 541 devinfo.our_scsiid, 542 devinfo.target, &tstate); 543 update_type = 0; 544 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 545 update_type |= AHC_TRANS_GOAL; 546 discenable = &tstate->discenable; 547 tagenable = &tstate->tagenable; 548 tinfo->current.protocol_version = 549 cts->protocol_version; 550 tinfo->current.transport_version = 551 cts->transport_version; 552 tinfo->goal.protocol_version = 553 cts->protocol_version; 554 tinfo->goal.transport_version = 555 cts->transport_version; 556 } else if (cts->type == CTS_TYPE_USER_SETTINGS) { 557 update_type |= AHC_TRANS_USER; 558 discenable = &ahc->user_discenable; 559 tagenable = &ahc->user_tagenable; 560 tinfo->user.protocol_version = 561 cts->protocol_version; 562 tinfo->user.transport_version = 563 cts->transport_version; 564 } else { 565 ccb->ccb_h.status = CAM_REQ_INVALID; 566 xpt_done(ccb); 567 break; 568 } 569 570 ahc_lock(ahc, &s); 571 572 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 573 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 574 *discenable |= devinfo.target_mask; 575 else 576 *discenable &= ~devinfo.target_mask; 577 } 578 579 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 580 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 581 *tagenable |= devinfo.target_mask; 582 else 583 *tagenable &= ~devinfo.target_mask; 584 } 585 586 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 587 ahc_validate_width(ahc, /*tinfo limit*/NULL, 588 &spi->bus_width, ROLE_UNKNOWN); 589 ahc_set_width(ahc, &devinfo, spi->bus_width, 590 update_type, /*paused*/FALSE); 591 } 592 593 if ((spi->valid & CTS_SPI_VALID_PPR_OPTIONS) == 0) { 594 if (update_type == AHC_TRANS_USER) 595 spi->ppr_options = tinfo->user.ppr_options; 596 else 597 spi->ppr_options = tinfo->goal.ppr_options; 598 } 599 600 if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) { 601 if (update_type == AHC_TRANS_USER) 602 spi->sync_offset = tinfo->user.offset; 603 else 604 spi->sync_offset = tinfo->goal.offset; 605 } 606 607 if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) { 608 if (update_type == AHC_TRANS_USER) 609 spi->sync_period = tinfo->user.period; 610 else 611 spi->sync_period = tinfo->goal.period; 612 } 613 614 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 615 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 616 struct ahc_syncrate *syncrate; 617 u_int maxsync; 618 619 if ((ahc->features & AHC_ULTRA2) != 0) 620 maxsync = AHC_SYNCRATE_DT; 621 else if ((ahc->features & AHC_ULTRA) != 0) 622 maxsync = AHC_SYNCRATE_ULTRA; 623 else 624 maxsync = AHC_SYNCRATE_FAST; 625 626 syncrate = ahc_find_syncrate(ahc, &spi->sync_period, 627 &spi->ppr_options, 628 maxsync); 629 ahc_validate_offset(ahc, /*tinfo limit*/NULL, 630 syncrate, &spi->sync_offset, 631 spi->bus_width, ROLE_UNKNOWN); 632 633 /* We use a period of 0 to represent async */ 634 if (spi->sync_offset == 0) { 635 spi->sync_period = 0; 636 spi->ppr_options = 0; 637 } 638 639 ahc_set_syncrate(ahc, &devinfo, syncrate, 640 spi->sync_period, spi->sync_offset, 641 spi->ppr_options, update_type, 642 /*paused*/FALSE); 643 } 644 ahc_unlock(ahc, &s); 645 ccb->ccb_h.status = CAM_REQ_CMP; 646 xpt_done(ccb); 647#else 648 struct ahc_devinfo devinfo; 649 struct ccb_trans_settings *cts; 650 struct ahc_initiator_tinfo *tinfo; 651 struct tmode_tstate *tstate; 652 uint16_t *discenable; 653 uint16_t *tagenable; 654 u_int update_type; 655 long s; 656 657 cts = &ccb->cts; 658 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 659 cts->ccb_h.target_id, 660 cts->ccb_h.target_lun, 661 SIM_CHANNEL(ahc, sim), 662 ROLE_UNKNOWN); 663 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 664 devinfo.our_scsiid, 665 devinfo.target, &tstate); 666 update_type = 0; 667 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 668 update_type |= AHC_TRANS_GOAL; 669 discenable = &tstate->discenable; 670 tagenable = &tstate->tagenable; 671 } else if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 672 update_type |= AHC_TRANS_USER; 673 discenable = &ahc->user_discenable; 674 tagenable = &ahc->user_tagenable; 675 } else { 676 ccb->ccb_h.status = CAM_REQ_INVALID; 677 xpt_done(ccb); 678 break; 679 } 680 681 ahc_lock(ahc, &s); 682 683 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 684 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 685 *discenable |= devinfo.target_mask; 686 else 687 *discenable &= ~devinfo.target_mask; 688 } 689 690 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 691 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 692 *tagenable |= devinfo.target_mask; 693 else 694 *tagenable &= ~devinfo.target_mask; 695 } 696 697 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 698 ahc_validate_width(ahc, /*tinfo limit*/NULL, 699 &cts->bus_width, ROLE_UNKNOWN); 700 ahc_set_width(ahc, &devinfo, cts->bus_width, 701 update_type, /*paused*/FALSE); 702 } 703 704 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) { 705 if (update_type == AHC_TRANS_USER) 706 cts->sync_offset = tinfo->user.offset; 707 else 708 cts->sync_offset = tinfo->goal.offset; 709 } 710 711 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) { 712 if (update_type == AHC_TRANS_USER) 713 cts->sync_period = tinfo->user.period; 714 else 715 cts->sync_period = tinfo->goal.period; 716 } 717 718 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 719 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 720 struct ahc_syncrate *syncrate; 721 u_int ppr_options; 722 u_int maxsync; 723 724 if ((ahc->features & AHC_ULTRA2) != 0) 725 maxsync = AHC_SYNCRATE_DT; 726 else if ((ahc->features & AHC_ULTRA) != 0) 727 maxsync = AHC_SYNCRATE_ULTRA; 728 else 729 maxsync = AHC_SYNCRATE_FAST; 730 731 ppr_options = 0; 732 if (cts->sync_period <= 9) 733 ppr_options = MSG_EXT_PPR_DT_REQ; 734 735 syncrate = ahc_find_syncrate(ahc, &cts->sync_period, 736 &ppr_options, 737 maxsync); 738 ahc_validate_offset(ahc, /*tinfo limit*/NULL, 739 syncrate, &cts->sync_offset, 740 MSG_EXT_WDTR_BUS_8_BIT, 741 ROLE_UNKNOWN); 742 743 /* We use a period of 0 to represent async */ 744 if (cts->sync_offset == 0) { 745 cts->sync_period = 0; 746 ppr_options = 0; 747 } 748 749 if (ppr_options == MSG_EXT_PPR_DT_REQ 750 && tinfo->user.transport_version >= 3) { 751 tinfo->goal.transport_version = 752 tinfo->user.transport_version; 753 tinfo->current.transport_version = 754 tinfo->user.transport_version; 755 } 756 757 ahc_set_syncrate(ahc, &devinfo, syncrate, 758 cts->sync_period, cts->sync_offset, 759 ppr_options, update_type, 760 /*paused*/FALSE); 761 } 762 ahc_unlock(ahc, &s); 763 ccb->ccb_h.status = CAM_REQ_CMP; 764 xpt_done(ccb); 765#endif 766 break; 767 } 768 case XPT_GET_TRAN_SETTINGS: 769 /* Get default/user set transfer settings for the target */ 770 { 771 772 ahc_lock(ahc, &s); 773 ahc_get_tran_settings(ahc, SIM_SCSI_ID(ahc, sim), 774 SIM_CHANNEL(ahc, sim), &ccb->cts); 775 ahc_unlock(ahc, &s); 776 xpt_done(ccb); 777 break; 778 } 779 case XPT_CALC_GEOMETRY: 780 { 781 struct ccb_calc_geometry *ccg; 782 uint32_t size_mb; 783 uint32_t secs_per_cylinder; 784 int extended; 785 786 ccg = &ccb->ccg; 787 size_mb = ccg->volume_size 788 / ((1024L * 1024L) / ccg->block_size); 789 extended = SIM_IS_SCSIBUS_B(ahc, sim) 790 ? ahc->flags & AHC_EXTENDED_TRANS_B 791 : ahc->flags & AHC_EXTENDED_TRANS_A; 792 793 if (size_mb > 1024 && extended) { 794 ccg->heads = 255; 795 ccg->secs_per_track = 63; 796 } else { 797 ccg->heads = 64; 798 ccg->secs_per_track = 32; 799 } 800 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 801 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 802 ccb->ccb_h.status = CAM_REQ_CMP; 803 xpt_done(ccb); 804 break; 805 } 806 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 807 { 808 int found; 809 810 ahc_lock(ahc, &s); 811 found = ahc_reset_channel(ahc, SIM_CHANNEL(ahc, sim), 812 /*initiate reset*/TRUE); 813 ahc_unlock(ahc, &s); 814 if (bootverbose) { 815 xpt_print_path(SIM_PATH(ahc, sim)); 816 printf("SCSI bus reset delivered. " 817 "%d SCBs aborted.\n", found); 818 } 819 ccb->ccb_h.status = CAM_REQ_CMP; 820 xpt_done(ccb); 821 break; 822 } 823 case XPT_TERM_IO: /* Terminate the I/O process */ 824 /* XXX Implement */ 825 ccb->ccb_h.status = CAM_REQ_INVALID; 826 xpt_done(ccb); 827 break; 828 case XPT_PATH_INQ: /* Path routing inquiry */ 829 { 830 struct ccb_pathinq *cpi = &ccb->cpi; 831 832 cpi->version_num = 1; /* XXX??? */ 833 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 834 if ((ahc->features & AHC_WIDE) != 0) 835 cpi->hba_inquiry |= PI_WIDE_16; 836 if ((ahc->features & AHC_TARGETMODE) != 0) { 837 cpi->target_sprt = PIT_PROCESSOR 838 | PIT_DISCONNECT 839 | PIT_TERM_IO; 840 } else { 841 cpi->target_sprt = 0; 842 } 843 cpi->hba_misc = 0; 844 cpi->hba_eng_cnt = 0; 845 cpi->max_target = (ahc->features & AHC_WIDE) ? 15 : 7; 846 cpi->max_lun = AHC_NUM_LUNS - 1; 847 if (SIM_IS_SCSIBUS_B(ahc, sim)) { 848 cpi->initiator_id = ahc->our_id_b; 849 if ((ahc->flags & AHC_RESET_BUS_B) == 0) 850 cpi->hba_misc |= PIM_NOBUSRESET; 851 } else { 852 cpi->initiator_id = ahc->our_id; 853 if ((ahc->flags & AHC_RESET_BUS_A) == 0) 854 cpi->hba_misc |= PIM_NOBUSRESET; 855 } 856 cpi->bus_id = cam_sim_bus(sim); 857 cpi->base_transfer_speed = 3300; 858 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 859 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN); 860 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 861 cpi->unit_number = cam_sim_unit(sim); 862#ifdef AHC_NEW_TRAN_SETTINGS 863 cpi->protocol = PROTO_SCSI; 864 cpi->protocol_version = SCSI_REV_2; 865 cpi->transport = XPORT_SPI; 866 cpi->transport_version = 2; 867 cpi->xport_specific.spi.ppr_options = SID_SPI_CLOCK_ST; 868 if ((ahc->features & AHC_DT) != 0) { 869 cpi->transport_version = 3; 870 cpi->xport_specific.spi.ppr_options = 871 SID_SPI_CLOCK_DT_ST; 872 } 873#endif 874 cpi->ccb_h.status = CAM_REQ_CMP; 875 xpt_done(ccb); 876 break; 877 } 878 default: 879 ccb->ccb_h.status = CAM_PROVIDE_FAIL; 880 xpt_done(ccb); 881 break; 882 } 883} 884 885static void 886ahc_get_tran_settings(struct ahc_softc *ahc, int our_id, char channel, 887 struct ccb_trans_settings *cts) 888{ 889#ifdef AHC_NEW_TRAN_SETTINGS 890 struct ahc_devinfo devinfo; 891 struct ccb_trans_settings_scsi *scsi; 892 struct ccb_trans_settings_spi *spi; 893 struct ahc_initiator_tinfo *targ_info; 894 struct tmode_tstate *tstate; 895 struct ahc_transinfo *tinfo; 896 897 scsi = &cts->proto_specific.scsi; 898 spi = &cts->xport_specific.spi; 899 ahc_compile_devinfo(&devinfo, our_id, 900 cts->ccb_h.target_id, 901 cts->ccb_h.target_lun, 902 channel, ROLE_UNKNOWN); 903 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 904 devinfo.our_scsiid, 905 devinfo.target, &tstate); 906 907 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 908 tinfo = &targ_info->current; 909 else 910 tinfo = &targ_info->user; 911 912 scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; 913 spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; 914 if (cts->type == CTS_TYPE_USER_SETTINGS) { 915 if ((ahc->user_discenable & devinfo.target_mask) != 0) 916 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 917 918 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 919 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 920 } else { 921 if ((tstate->discenable & devinfo.target_mask) != 0) 922 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 923 924 if ((tstate->tagenable & devinfo.target_mask) != 0) 925 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 926 } 927 cts->protocol_version = tinfo->protocol_version; 928 cts->transport_version = tinfo->transport_version; 929 930 spi->sync_period = tinfo->period; 931 spi->sync_offset = tinfo->offset; 932 spi->bus_width = tinfo->width; 933 spi->ppr_options = tinfo->ppr_options; 934 935 cts->protocol = PROTO_SCSI; 936 cts->transport = XPORT_SPI; 937 spi->valid = CTS_SPI_VALID_SYNC_RATE 938 | CTS_SPI_VALID_SYNC_OFFSET 939 | CTS_SPI_VALID_BUS_WIDTH 940 | CTS_SPI_VALID_PPR_OPTIONS; 941 942 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { 943 scsi->valid = CTS_SCSI_VALID_TQ; 944 spi->valid |= CTS_SPI_VALID_DISC; 945 } else { 946 scsi->valid = 0; 947 } 948 949 cts->ccb_h.status = CAM_REQ_CMP; 950#else 951 struct ahc_devinfo devinfo; 952 struct ahc_initiator_tinfo *targ_info; 953 struct tmode_tstate *tstate; 954 struct ahc_transinfo *tinfo; 955 long s; 956 957 ahc_compile_devinfo(&devinfo, our_id, 958 cts->ccb_h.target_id, 959 cts->ccb_h.target_lun, 960 channel, ROLE_UNKNOWN); 961 targ_info = ahc_fetch_transinfo(ahc, devinfo.channel, 962 devinfo.our_scsiid, 963 devinfo.target, &tstate); 964 965 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 966 tinfo = &targ_info->current; 967 else 968 tinfo = &targ_info->user; 969 970 ahc_lock(ahc, &s); 971 972 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 973 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0) { 974 if ((ahc->user_discenable & devinfo.target_mask) != 0) 975 cts->flags |= CCB_TRANS_DISC_ENB; 976 977 if ((ahc->user_tagenable & devinfo.target_mask) != 0) 978 cts->flags |= CCB_TRANS_TAG_ENB; 979 } else { 980 if ((tstate->discenable & devinfo.target_mask) != 0) 981 cts->flags |= CCB_TRANS_DISC_ENB; 982 983 if ((tstate->tagenable & devinfo.target_mask) != 0) 984 cts->flags |= CCB_TRANS_TAG_ENB; 985 } 986 cts->sync_period = tinfo->period; 987 cts->sync_offset = tinfo->offset; 988 cts->bus_width = tinfo->width; 989 990 ahc_unlock(ahc, &s); 991 992 cts->valid = CCB_TRANS_SYNC_RATE_VALID 993 | CCB_TRANS_SYNC_OFFSET_VALID 994 | CCB_TRANS_BUS_WIDTH_VALID; 995 996 if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) 997 cts->valid |= CCB_TRANS_DISC_VALID|CCB_TRANS_TQ_VALID; 998 999 cts->ccb_h.status = CAM_REQ_CMP; 1000#endif 1001} 1002 1003static void 1004ahc_async(void *callback_arg, uint32_t code, struct cam_path *path, void *arg) 1005{ 1006 struct ahc_softc *ahc; 1007 struct cam_sim *sim; 1008 1009 sim = (struct cam_sim *)callback_arg; 1010 ahc = (struct ahc_softc *)cam_sim_softc(sim); 1011 switch (code) { 1012 case AC_LOST_DEVICE: 1013 { 1014 struct ahc_devinfo devinfo; 1015 long s; 1016 1017 ahc_compile_devinfo(&devinfo, SIM_SCSI_ID(ahc, sim), 1018 xpt_path_target_id(path), 1019 xpt_path_lun_id(path), 1020 SIM_CHANNEL(ahc, sim), 1021 ROLE_UNKNOWN); 1022 1023 /* 1024 * Revert to async/narrow transfers 1025 * for the next device. 1026 */ 1027 ahc_lock(ahc, &s); 1028 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 1029 AHC_TRANS_GOAL|AHC_TRANS_CUR, /*paused*/FALSE); 1030 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 1031 /*period*/0, /*offset*/0, /*ppr_options*/0, 1032 AHC_TRANS_GOAL|AHC_TRANS_CUR, 1033 /*paused*/FALSE); 1034 ahc_unlock(ahc, &s); 1035 break; 1036 } 1037 default: 1038 break; 1039 } 1040} 1041 1042static void 1043ahc_execute_scb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, 1044 int error) 1045{ 1046 struct scb *scb; 1047 union ccb *ccb; 1048 struct ahc_softc *ahc; 1049 struct ahc_initiator_tinfo *tinfo; 1050 struct tmode_tstate *tstate; 1051 u_int mask; 1052 long s; 1053 1054 scb = (struct scb *)arg; 1055 ccb = scb->io_ctx; 1056 ahc = scb->ahc_softc; 1057 1058 if (error != 0) { 1059 if (error == EFBIG) 1060 ahc_set_transaction_status(scb, CAM_REQ_TOO_BIG); 1061 else 1062 ahc_set_transaction_status(scb, CAM_REQ_CMP_ERR); 1063 if (nsegments != 0) 1064 bus_dmamap_unload(ahc->buffer_dmat, scb->dmamap); 1065 ahc_lock(ahc, &s); 1066 ahc_free_scb(ahc, scb); 1067 ahc_unlock(ahc, &s); 1068 xpt_done(ccb); 1069 return; 1070 } 1071 if (nsegments != 0) { 1072 struct ahc_dma_seg *sg; 1073 bus_dma_segment_t *end_seg; 1074 bus_dmasync_op_t op; 1075 1076 end_seg = dm_segs + nsegments; 1077 1078 /* Copy the segments into our SG list */ 1079 sg = scb->sg_list; 1080 while (dm_segs < end_seg) { 1081 sg->addr = dm_segs->ds_addr; 1082/* XXX Add in the 5th byte of the address later. */ 1083 sg->len = dm_segs->ds_len; 1084 sg++; 1085 dm_segs++; 1086 } 1087 1088 /* 1089 * Note where to find the SG entries in bus space. 1090 * We also set the full residual flag which the 1091 * sequencer will clear as soon as a data transfer 1092 * occurs. 1093 */ 1094 scb->hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 1095 1096 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1097 op = BUS_DMASYNC_PREREAD; 1098 else 1099 op = BUS_DMASYNC_PREWRITE; 1100 1101 bus_dmamap_sync(ahc->buffer_dmat, scb->dmamap, op); 1102 1103 if (ccb->ccb_h.func_code == XPT_CONT_TARGET_IO) { 1104 struct target_data *tdata; 1105 1106 tdata = &scb->hscb->shared_data.tdata; 1107 tdata->target_phases |= DPHASE_PENDING; 1108 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1109 tdata->data_phase = P_DATAOUT; 1110 else 1111 tdata->data_phase = P_DATAIN; 1112 1113 /* 1114 * If the transfer is of an odd length and in the 1115 * "in" direction (scsi->HostBus), then it may 1116 * trigger a bug in the 'WideODD' feature of 1117 * non-Ultra2 chips. Force the total data-length 1118 * to be even by adding an extra, 1 byte, SG, 1119 * element. We do this even if we are not currently 1120 * negotiated wide as negotiation could occur before 1121 * this command is executed. 1122 */ 1123 if ((ahc->bugs & AHC_TMODE_WIDEODD_BUG) != 0 1124 && (ccb->csio.dxfer_len & 0x1) != 0 1125 && (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { 1126 1127 nsegments++; 1128 if (nsegments > AHC_NSEG) { 1129 1130 ahc_set_transaction_status(scb, 1131 CAM_REQ_TOO_BIG); 1132 bus_dmamap_unload(ahc->buffer_dmat, 1133 scb->dmamap); 1134 ahc_lock(ahc, &s); 1135 ahc_free_scb(ahc, scb); 1136 ahc_unlock(ahc, &s); 1137 xpt_done(ccb); 1138 return; 1139 } 1140 sg->addr = ahc->dma_bug_buf; 1141 sg->len = 1; 1142 sg++; 1143 } 1144 } 1145 sg--; 1146 sg->len |= AHC_DMA_LAST_SEG; 1147 1148 /* Copy the first SG into the "current" data pointer area */ 1149 scb->hscb->dataptr = scb->sg_list->addr; 1150 scb->hscb->datacnt = scb->sg_list->len; 1151 } else { 1152 scb->hscb->sgptr = SG_LIST_NULL; 1153 scb->hscb->dataptr = 0; 1154 scb->hscb->datacnt = 0; 1155 } 1156 1157 scb->sg_count = nsegments; 1158 1159 ahc_lock(ahc, &s); 1160 1161 /* 1162 * Last time we need to check if this SCB needs to 1163 * be aborted. 1164 */ 1165 if (ahc_get_transaction_status(scb) != CAM_REQ_INPROG) { 1166 if (nsegments != 0) 1167 bus_dmamap_unload(ahc->buffer_dmat, 1168 scb->dmamap); 1169 ahc_free_scb(ahc, scb); 1170 ahc_unlock(ahc, &s); 1171 xpt_done(ccb); 1172 return; 1173 } 1174 1175 tinfo = ahc_fetch_transinfo(ahc, SCSIID_CHANNEL(ahc, scb->hscb->scsiid), 1176 SCSIID_OUR_ID(scb->hscb->scsiid), 1177 SCSIID_TARGET(ahc, scb->hscb->scsiid), 1178 &tstate); 1179 1180 mask = SCB_GET_TARGET_MASK(ahc, scb); 1181 scb->hscb->scsirate = tinfo->scsirate; 1182 scb->hscb->scsioffset = tinfo->current.offset; 1183 if ((tstate->ultraenb & mask) != 0) 1184 scb->hscb->control |= ULTRAENB; 1185 1186 if ((tstate->discenable & mask) != 0 1187 && (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) == 0) 1188 scb->hscb->control |= DISCENB; 1189 1190 if ((ccb->ccb_h.flags & CAM_NEGOTIATE) != 0 1191 && (tinfo->goal.width != 0 1192 || tinfo->goal.period != 0 1193 || tinfo->goal.ppr_options != 0)) { 1194 scb->flags |= SCB_NEGOTIATE; 1195 scb->hscb->control |= MK_MESSAGE; 1196 } 1197 1198 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); 1199 1200 ccb->ccb_h.status |= CAM_SIM_QUEUED; 1201 1202 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { 1203 uint64_t time; 1204 1205 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) 1206 ccb->ccb_h.timeout = 5 * 1000; 1207 1208 time = ccb->ccb_h.timeout; 1209 time *= hz; 1210 time /= 1000; 1211 ccb->ccb_h.timeout_ch = 1212 timeout(ahc_timeout, (caddr_t)scb, time); 1213 } 1214 1215 /* 1216 * We only allow one untagged transaction 1217 * per target in the initiator role unless 1218 * we are storing a full busy target *lun* 1219 * table in SCB space. 1220 */ 1221 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 1222 && (ahc->flags & AHC_SCB_BTT) == 0) { 1223 struct scb_tailq *untagged_q; 1224 1225 untagged_q = &(ahc->untagged_queues[ccb->ccb_h.target_id]); 1226 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); 1227 scb->flags |= SCB_UNTAGGEDQ; 1228 if (TAILQ_FIRST(untagged_q) != scb) { 1229 ahc_unlock(ahc, &s); 1230 return; 1231 } 1232 } 1233 scb->flags |= SCB_ACTIVE; 1234 1235 if ((scb->flags & SCB_TARGET_IMMEDIATE) != 0) { 1236 pause_sequencer(ahc); 1237 if ((ahc->flags & AHC_PAGESCBS) == 0) 1238 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 1239 ahc_outb(ahc, SCB_TAG, scb->hscb->tag); 1240 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 1241 unpause_sequencer(ahc); 1242 } else { 1243 ahc_queue_scb(ahc, scb); 1244 } 1245 1246 ahc_unlock(ahc, &s); 1247} 1248 1249static void 1250ahc_poll(struct cam_sim *sim) 1251{ 1252 ahc_intr(cam_sim_softc(sim)); 1253} 1254 1255static void 1256ahc_setup_data(struct ahc_softc *ahc, struct cam_sim *sim, 1257 struct ccb_scsiio *csio, struct scb *scb) 1258{ 1259 struct hardware_scb *hscb; 1260 struct ccb_hdr *ccb_h; 1261 1262 hscb = scb->hscb; 1263 ccb_h = &csio->ccb_h; 1264 1265 if (ccb_h->func_code == XPT_SCSI_IO) { 1266 hscb->cdb_len = csio->cdb_len; 1267 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 1268 1269 if (hscb->cdb_len > sizeof(hscb->cdb32) 1270 || (ccb_h->flags & CAM_CDB_PHYS) != 0) { 1271 u_long s; 1272 1273 ahc_set_transaction_status(scb, 1274 CAM_REQ_INVALID); 1275 ahc_lock(ahc, &s); 1276 ahc_free_scb(ahc, scb); 1277 ahc_unlock(ahc, &s); 1278 xpt_done((union ccb *)csio); 1279 return; 1280 } 1281 if (hscb->cdb_len > 12) { 1282 memcpy(hscb->cdb32, 1283 csio->cdb_io.cdb_ptr, 1284 hscb->cdb_len); 1285 scb->flags |= SCB_CDB32_PTR; 1286 } else { 1287 memcpy(hscb->shared_data.cdb, 1288 csio->cdb_io.cdb_ptr, 1289 hscb->cdb_len); 1290 } 1291 } else { 1292 if (hscb->cdb_len > 12) { 1293 memcpy(hscb->cdb32, csio->cdb_io.cdb_bytes, 1294 hscb->cdb_len); 1295 scb->flags |= SCB_CDB32_PTR; 1296 } else { 1297 memcpy(hscb->shared_data.cdb, 1298 csio->cdb_io.cdb_bytes, 1299 hscb->cdb_len); 1300 } 1301 } 1302 } 1303 1304 /* Only use S/G if there is a transfer */ 1305 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1306 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 1307 /* We've been given a pointer to a single buffer */ 1308 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 1309 int s; 1310 int error; 1311 1312 s = splsoftvm(); 1313 error = bus_dmamap_load(ahc->buffer_dmat, 1314 scb->dmamap, 1315 csio->data_ptr, 1316 csio->dxfer_len, 1317 ahc_execute_scb, 1318 scb, /*flags*/0); 1319 if (error == EINPROGRESS) { 1320 /* 1321 * So as to maintain ordering, 1322 * freeze the controller queue 1323 * until our mapping is 1324 * returned. 1325 */ 1326 xpt_freeze_simq(sim, 1327 /*count*/1); 1328 scb->io_ctx->ccb_h.status |= 1329 CAM_RELEASE_SIMQ; 1330 } 1331 splx(s); 1332 } else { 1333 struct bus_dma_segment seg; 1334 1335 /* Pointer to physical buffer */ 1336 if (csio->dxfer_len > AHC_MAXTRANSFER_SIZE) 1337 panic("ahc_setup_data - Transfer size " 1338 "larger than can device max"); 1339 1340 seg.ds_addr = (bus_addr_t)csio->data_ptr; 1341 seg.ds_len = csio->dxfer_len; 1342 ahc_execute_scb(scb, &seg, 1, 0); 1343 } 1344 } else { 1345 struct bus_dma_segment *segs; 1346 1347 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 1348 panic("ahc_setup_data - Physical segment " 1349 "pointers unsupported"); 1350 1351 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 1352 panic("ahc_setup_data - Virtual segment " 1353 "addresses unsupported"); 1354 1355 /* Just use the segments provided */ 1356 segs = (struct bus_dma_segment *)csio->data_ptr; 1357 ahc_execute_scb(scb, segs, csio->sglist_cnt, 0); 1358 } 1359 } else { 1360 ahc_execute_scb(scb, NULL, 0, 0); 1361 } 1362} 1363 1364static void 1365ahc_set_recoveryscb(struct ahc_softc *ahc, struct scb *scb) { 1366 1367 if ((scb->flags & SCB_RECOVERY_SCB) == 0) { 1368 struct scb *list_scb; 1369 1370 scb->flags |= SCB_RECOVERY_SCB; 1371 1372 /* 1373 * Take all queued, but not sent SCBs out of the equation. 1374 * Also ensure that no new CCBs are queued to us while we 1375 * try to fix this problem. 1376 */ 1377 if ((scb->io_ctx->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 1378 xpt_freeze_simq(SCB_GET_SIM(ahc, scb), /*count*/1); 1379 scb->io_ctx->ccb_h.status |= CAM_RELEASE_SIMQ; 1380 } 1381 1382 /* 1383 * Go through all of our pending SCBs and remove 1384 * any scheduled timeouts for them. We will reschedule 1385 * them after we've successfully fixed this problem. 1386 */ 1387 LIST_FOREACH(list_scb, &ahc->pending_scbs, pending_links) { 1388 union ccb *ccb; 1389 1390 ccb = list_scb->io_ctx; 1391 untimeout(ahc_timeout, list_scb, ccb->ccb_h.timeout_ch); 1392 } 1393 } 1394} 1395 1396void 1397ahc_timeout(void *arg) 1398{ 1399 struct scb *scb; 1400 struct ahc_softc *ahc; 1401 long s; 1402 int found; 1403 u_int last_phase; 1404 int target; 1405 int lun; 1406 int i; 1407 char channel; 1408 1409 scb = (struct scb *)arg; 1410 ahc = (struct ahc_softc *)scb->ahc_softc; 1411 1412 ahc_lock(ahc, &s); 1413 1414 ahc_pause_and_flushwork(ahc); 1415 1416 if ((scb->flags & SCB_ACTIVE) == 0) { 1417 /* Previous timeout took care of me already */ 1418 printf("%s: Timedout SCB already complete. " 1419 "Interrupts may not be functioning.\n", ahc_name(ahc)); 1420 unpause_sequencer(ahc); 1421 ahc_unlock(ahc, &s); 1422 return; 1423 } 1424 1425 target = SCB_GET_TARGET(ahc, scb); 1426 channel = SCB_GET_CHANNEL(ahc, scb); 1427 lun = SCB_GET_LUN(scb); 1428 1429 ahc_print_path(ahc, scb); 1430 printf("SCB 0x%x - timed out ", scb->hscb->tag); 1431 /* 1432 * Take a snapshot of the bus state and print out 1433 * some information so we can track down driver bugs. 1434 */ 1435 last_phase = ahc_inb(ahc, LASTPHASE); 1436 1437 for (i = 0; i < num_phases; i++) { 1438 if (last_phase == phase_table[i].phase) 1439 break; 1440 } 1441 printf("%s", phase_table[i].phasemsg); 1442 1443 printf(", SEQADDR == 0x%x\n", 1444 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8)); 1445 1446 printf("STACK == 0x%x, 0x%x, 0x%x, 0x%x\n", 1447 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 1448 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 1449 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8), 1450 ahc_inb(ahc, STACK) | (ahc_inb(ahc, STACK) << 8)); 1451 1452 printf("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0)); 1453 1454 ahc_dump_card_state(ahc); 1455 if (scb->sg_count > 0) { 1456 for (i = 0; i < scb->sg_count; i++) { 1457 printf("sg[%d] - Addr 0x%x : Length %d\n", 1458 i, 1459 scb->sg_list[i].addr, 1460 scb->sg_list[i].len & AHC_SG_LEN_MASK); 1461 } 1462 } 1463 if (scb->flags & (SCB_DEVICE_RESET|SCB_ABORT)) { 1464 /* 1465 * Been down this road before. 1466 * Do a full bus reset. 1467 */ 1468bus_reset: 1469 ahc_set_transaction_status(scb, CAM_CMD_TIMEOUT); 1470 found = ahc_reset_channel(ahc, channel, /*Initiate Reset*/TRUE); 1471 printf("%s: Issued Channel %c Bus Reset. " 1472 "%d SCBs aborted\n", ahc_name(ahc), channel, found); 1473 } else { 1474 /* 1475 * If we are a target, transition to bus free and report 1476 * the timeout. 1477 * 1478 * The target/initiator that is holding up the bus may not 1479 * be the same as the one that triggered this timeout 1480 * (different commands have different timeout lengths). 1481 * If the bus is idle and we are actiing as the initiator 1482 * for this request, queue a BDR message to the timed out 1483 * target. Otherwise, if the timed out transaction is 1484 * active: 1485 * Initiator transaction: 1486 * Stuff the message buffer with a BDR message and assert 1487 * ATN in the hopes that the target will let go of the bus 1488 * and go to the mesgout phase. If this fails, we'll 1489 * get another timeout 2 seconds later which will attempt 1490 * a bus reset. 1491 * 1492 * Target transaction: 1493 * Transition to BUS FREE and report the error. 1494 * It's good to be the target! 1495 */ 1496 u_int active_scb_index; 1497 u_int saved_scbptr; 1498 1499 saved_scbptr = ahc_inb(ahc, SCBPTR); 1500 active_scb_index = ahc_inb(ahc, SCB_TAG); 1501 1502 if (last_phase != P_BUSFREE 1503 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) != 0 1504 && (active_scb_index < ahc->scb_data->numscbs)) { 1505 struct scb *active_scb; 1506 1507 /* 1508 * If the active SCB is not us, assume that 1509 * the active SCB has a longer timeout than 1510 * the timedout SCB, and wait for the active 1511 * SCB to timeout. 1512 */ 1513 active_scb = ahc_lookup_scb(ahc, active_scb_index); 1514 if (active_scb != scb) { 1515 struct ccb_hdr *ccbh; 1516 uint64_t newtimeout; 1517 1518 ahc_print_path(ahc, scb); 1519 printf("Other SCB Timeout%s", 1520 (scb->flags & SCB_OTHERTCL_TIMEOUT) != 0 1521 ? " again\n" : "\n"); 1522 scb->flags |= SCB_OTHERTCL_TIMEOUT; 1523 newtimeout = 1524 MAX(active_scb->io_ctx->ccb_h.timeout, 1525 scb->io_ctx->ccb_h.timeout); 1526 newtimeout *= hz; 1527 newtimeout /= 1000; 1528 ccbh = &scb->io_ctx->ccb_h; 1529 scb->io_ctx->ccb_h.timeout_ch = 1530 timeout(ahc_timeout, scb, newtimeout); 1531 ahc_unlock(ahc, &s); 1532 return; 1533 } 1534 1535 /* It's us */ 1536 if ((scb->hscb->control & TARGET_SCB) != 0) { 1537 1538 /* 1539 * Send back any queued up transactions 1540 * and properly record the error condition. 1541 */ 1542 ahc_freeze_devq(ahc, scb); 1543 ahc_set_transaction_status(scb, 1544 CAM_CMD_TIMEOUT); 1545 ahc_freeze_scb(scb); 1546 ahc_done(ahc, scb); 1547 1548 /* Will clear us from the bus */ 1549 restart_sequencer(ahc); 1550 ahc_unlock(ahc, &s); 1551 return; 1552 } 1553 1554 ahc_set_recoveryscb(ahc, active_scb); 1555 ahc_outb(ahc, MSG_OUT, HOST_MSG); 1556 ahc_outb(ahc, SCSISIGO, last_phase|ATNO); 1557 ahc_print_path(ahc, active_scb); 1558 printf("BDR message in message buffer\n"); 1559 active_scb->flags |= SCB_DEVICE_RESET; 1560 active_scb->io_ctx->ccb_h.timeout_ch = 1561 timeout(ahc_timeout, (caddr_t)active_scb, 2 * hz); 1562 unpause_sequencer(ahc); 1563 } else { 1564 int disconnected; 1565 1566 /* XXX Shouldn't panic. Just punt instead */ 1567 if ((scb->hscb->control & TARGET_SCB) != 0) 1568 panic("Timed-out target SCB but bus idle"); 1569 1570 if (last_phase != P_BUSFREE 1571 && (ahc_inb(ahc, SSTAT0) & TARGET) != 0) { 1572 /* XXX What happened to the SCB? */ 1573 /* Hung target selection. Goto busfree */ 1574 printf("%s: Hung target selection\n", 1575 ahc_name(ahc)); 1576 restart_sequencer(ahc); 1577 ahc_unlock(ahc, &s); 1578 return; 1579 } 1580 1581 if (ahc_search_qinfifo(ahc, target, channel, lun, 1582 scb->hscb->tag, ROLE_INITIATOR, 1583 /*status*/0, SEARCH_COUNT) > 0) { 1584 disconnected = FALSE; 1585 } else { 1586 disconnected = TRUE; 1587 } 1588 1589 if (disconnected) { 1590 1591 ahc_set_recoveryscb(ahc, scb); 1592 /* 1593 * Actually re-queue this SCB in an attempt 1594 * to select the device before it reconnects. 1595 * In either case (selection or reselection), 1596 * we will now issue a target reset to the 1597 * timed-out device. 1598 * 1599 * Set the MK_MESSAGE control bit indicating 1600 * that we desire to send a message. We 1601 * also set the disconnected flag since 1602 * in the paging case there is no guarantee 1603 * that our SCB control byte matches the 1604 * version on the card. We don't want the 1605 * sequencer to abort the command thinking 1606 * an unsolicited reselection occurred. 1607 */ 1608 scb->hscb->control |= MK_MESSAGE|DISCONNECTED; 1609 scb->flags |= SCB_DEVICE_RESET; 1610 1611 /* 1612 * Remove any cached copy of this SCB in the 1613 * disconnected list in preparation for the 1614 * queuing of our abort SCB. We use the 1615 * same element in the SCB, SCB_NEXT, for 1616 * both the qinfifo and the disconnected list. 1617 */ 1618 ahc_search_disc_list(ahc, target, channel, 1619 lun, scb->hscb->tag, 1620 /*stop_on_first*/TRUE, 1621 /*remove*/TRUE, 1622 /*save_state*/FALSE); 1623 1624 /* 1625 * In the non-paging case, the sequencer will 1626 * never re-reference the in-core SCB. 1627 * To make sure we are notified during 1628 * reslection, set the MK_MESSAGE flag in 1629 * the card's copy of the SCB. 1630 */ 1631 if ((ahc->flags & AHC_PAGESCBS) == 0) { 1632 ahc_outb(ahc, SCBPTR, scb->hscb->tag); 1633 ahc_outb(ahc, SCB_CONTROL, 1634 ahc_inb(ahc, SCB_CONTROL) 1635 | MK_MESSAGE); 1636 } 1637 1638 /* 1639 * Clear out any entries in the QINFIFO first 1640 * so we are the next SCB for this target 1641 * to run. 1642 */ 1643 ahc_search_qinfifo(ahc, 1644 SCB_GET_TARGET(ahc, scb), 1645 channel, SCB_GET_LUN(scb), 1646 SCB_LIST_NULL, 1647 ROLE_INITIATOR, 1648 CAM_REQUEUE_REQ, 1649 SEARCH_COMPLETE); 1650 ahc_print_path(ahc, scb); 1651 printf("Queuing a BDR SCB\n"); 1652 ahc_qinfifo_requeue_tail(ahc, scb); 1653 ahc_outb(ahc, SCBPTR, saved_scbptr); 1654 scb->io_ctx->ccb_h.timeout_ch = 1655 timeout(ahc_timeout, (caddr_t)scb, 2 * hz); 1656 unpause_sequencer(ahc); 1657 } else { 1658 /* Go "immediatly" to the bus reset */ 1659 /* This shouldn't happen */ 1660 ahc_set_recoveryscb(ahc, scb); 1661 ahc_print_path(ahc, scb); 1662 printf("SCB %d: Immediate reset. " 1663 "Flags = 0x%x\n", scb->hscb->tag, 1664 scb->flags); 1665 goto bus_reset; 1666 } 1667 } 1668 } 1669 ahc_unlock(ahc, &s); 1670} 1671 1672static void 1673ahc_abort_ccb(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 1674{ 1675 union ccb *abort_ccb; 1676 1677 abort_ccb = ccb->cab.abort_ccb; 1678 switch (abort_ccb->ccb_h.func_code) { 1679 case XPT_ACCEPT_TARGET_IO: 1680 case XPT_IMMED_NOTIFY: 1681 case XPT_CONT_TARGET_IO: 1682 { 1683 struct tmode_tstate *tstate; 1684 struct tmode_lstate *lstate; 1685 struct ccb_hdr_slist *list; 1686 cam_status status; 1687 1688 status = ahc_find_tmode_devs(ahc, sim, abort_ccb, &tstate, 1689 &lstate, TRUE); 1690 1691 if (status != CAM_REQ_CMP) { 1692 ccb->ccb_h.status = status; 1693 break; 1694 } 1695 1696 if (abort_ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) 1697 list = &lstate->accept_tios; 1698 else if (abort_ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) 1699 list = &lstate->immed_notifies; 1700 else 1701 list = NULL; 1702 1703 if (list != NULL) { 1704 struct ccb_hdr *curelm; 1705 int found; 1706 1707 curelm = SLIST_FIRST(list); 1708 found = 0; 1709 if (curelm == &abort_ccb->ccb_h) { 1710 found = 1; 1711 SLIST_REMOVE_HEAD(list, sim_links.sle); 1712 } else { 1713 while(curelm != NULL) { 1714 struct ccb_hdr *nextelm; 1715 1716 nextelm = 1717 SLIST_NEXT(curelm, sim_links.sle); 1718 1719 if (nextelm == &abort_ccb->ccb_h) { 1720 found = 1; 1721 SLIST_NEXT(curelm, 1722 sim_links.sle) = 1723 SLIST_NEXT(nextelm, 1724 sim_links.sle); 1725 break; 1726 } 1727 curelm = nextelm; 1728 } 1729 } 1730 1731 if (found) { 1732 abort_ccb->ccb_h.status = CAM_REQ_ABORTED; 1733 xpt_done(abort_ccb); 1734 ccb->ccb_h.status = CAM_REQ_CMP; 1735 } else { 1736 xpt_print_path(abort_ccb->ccb_h.path); 1737 printf("Not found\n"); 1738 ccb->ccb_h.status = CAM_PATH_INVALID; 1739 } 1740 break; 1741 } 1742 /* FALLTHROUGH */ 1743 } 1744 case XPT_SCSI_IO: 1745 /* XXX Fully implement the hard ones */ 1746 ccb->ccb_h.status = CAM_UA_ABORT; 1747 break; 1748 default: 1749 ccb->ccb_h.status = CAM_REQ_INVALID; 1750 break; 1751 } 1752 xpt_done(ccb); 1753} 1754 1755void 1756ahc_send_async(struct ahc_softc *ahc, char channel, u_int target, 1757 u_int lun, ac_code code) 1758{ 1759 struct ccb_trans_settings cts; 1760 struct cam_path *path; 1761 void *arg; 1762 int error; 1763 1764 arg = NULL; 1765 error = ahc_create_path(ahc, channel, target, lun, &path); 1766 1767 if (error != CAM_REQ_CMP) 1768 return; 1769 1770 switch (code) { 1771 case AC_TRANSFER_NEG: 1772#ifdef AHC_NEW_TRAN_SETTINGS 1773 cts.type = CTS_TYPE_CURRENT_SETTINGS; 1774#else 1775 cts.flags = CCB_TRANS_CURRENT_SETTINGS; 1776#endif 1777 cts.ccb_h.path = path; 1778 cts.ccb_h.target_id = target; 1779 cts.ccb_h.target_lun = lun; 1780 ahc_get_tran_settings(ahc, channel == 'A' ? ahc->our_id 1781 : ahc->our_id_b, 1782 channel, &cts); 1783 arg = &cts; 1784 break; 1785 case AC_SENT_BDR: 1786 case AC_BUS_RESET: 1787 break; 1788 default: 1789 panic("ahc_send_async: Unexpected async event"); 1790 } 1791 xpt_async(code, path, arg); 1792 xpt_free_path(path); 1793} 1794 1795void 1796ahc_platform_set_tags(struct ahc_softc *ahc, 1797 struct ahc_devinfo *devinfo, int enable) 1798{ 1799} 1800 1801int 1802ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg) 1803{ 1804 ahc->platform_data = malloc(sizeof(struct ahc_platform_data), M_DEVBUF, 1805 M_NOWAIT | M_ZERO); 1806 if (ahc->platform_data == NULL) 1807 return (ENOMEM); 1808 return (0); 1809} 1810 1811void 1812ahc_platform_free(struct ahc_softc *ahc) 1813{ 1814 struct ahc_platform_data *pdata; 1815 1816 pdata = ahc->platform_data; 1817 if (pdata != NULL) { 1818 device_printf(ahc->dev_softc, "Platform free\n"); 1819 if (pdata->regs != NULL) 1820 bus_release_resource(ahc->dev_softc, 1821 pdata->regs_res_type, 1822 pdata->regs_res_id, 1823 pdata->regs); 1824 1825 if (pdata->irq != NULL) 1826 bus_release_resource(ahc->dev_softc, 1827 pdata->irq_res_type, 1828 0, pdata->irq); 1829 1830 if (pdata->sim_b != NULL) { 1831 xpt_async(AC_LOST_DEVICE, pdata->path_b, NULL); 1832 xpt_free_path(pdata->path_b); 1833 xpt_bus_deregister(cam_sim_path(pdata->sim_b)); 1834 cam_sim_free(pdata->sim_b, /*free_devq*/TRUE); 1835 } 1836 if (pdata->sim != NULL) { 1837 xpt_async(AC_LOST_DEVICE, pdata->path, NULL); 1838 xpt_free_path(pdata->path); 1839 xpt_bus_deregister(cam_sim_path(pdata->sim)); 1840 cam_sim_free(pdata->sim, /*free_devq*/TRUE); 1841 } 1842 if (pdata->eh != NULL) 1843 EVENTHANDLER_DEREGISTER(shutdown_final, pdata->eh); 1844 free(ahc->platform_data, M_DEVBUF); 1845 } 1846} 1847 1848int 1849ahc_softc_comp(struct ahc_softc *lahc, struct ahc_softc *rahc) 1850{ 1851 /* We don't sort softcs under FreeBSD so report equal always */ 1852 return (0); 1853} 1854 1855int 1856ahc_detach(device_t dev) 1857{ 1858 struct ahc_softc *ahc; 1859 u_long s; 1860 1861 device_printf(dev, "detaching device\n"); 1862 ahc = device_get_softc(dev); 1863 ahc_lock(ahc, &s); 1864 bus_teardown_intr(dev, ahc->platform_data->irq, ahc->platform_data->ih); 1865 ahc_unlock(ahc, &s); 1866 ahc_free(ahc); 1867 return (0); 1868} 1869 1870#if UNUSED 1871static void 1872ahc_dump_targcmd(struct target_cmd *cmd) 1873{ 1874 uint8_t *byte; 1875 uint8_t *last_byte; 1876 int i; 1877 1878 byte = &cmd->initiator_channel; 1879 /* Debugging info for received commands */ 1880 last_byte = &cmd[1].initiator_channel; 1881 1882 i = 0; 1883 while (byte < last_byte) { 1884 if (i == 0) 1885 printf("\t"); 1886 printf("%#x", *byte++); 1887 i++; 1888 if (i == 8) { 1889 printf("\n"); 1890 i = 0; 1891 } else { 1892 printf(", "); 1893 } 1894 } 1895} 1896#endif 1897