aic7xxx.c revision 66986
1/* 2 * Core routines and tables shareable across OS platforms. 3 * 4 * Copyright (c) 1994, 1995, 1996, 1997, 1998, 1999, 2000 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * Alternatively, this software may be distributed under the terms of the 17 * GNU Public License ("GPL"). 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 23 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $Id: //depot/src/aic7xxx/aic7xxx.c#9 $ 32 * 33 * $FreeBSD: head/sys/dev/aic7xxx/aic7xxx.c 66986 2000-10-11 23:46:34Z gibbs $ 34 */ 35 36#ifdef __linux__ 37#include "aic7xxx_linux.h" 38#include "aic7xxx_inline.h" 39#include "aicasm/aicasm_insformat.h" 40#endif 41 42#ifdef __FreeBSD__ 43#include <dev/aic7xxx/aic7xxx_freebsd.h> 44#include <dev/aic7xxx/aic7xxx_inline.h> 45#include <dev/aic7xxx/aicasm/aicasm_insformat.h> 46#endif 47 48/****************************** Softc Data ************************************/ 49struct ahc_softc_tailq ahc_tailq = TAILQ_HEAD_INITIALIZER(ahc_tailq); 50 51/***************************** Lookup Tables **********************************/ 52char *ahc_chip_names[] = 53{ 54 "NONE", 55 "aic7770", 56 "aic7850", 57 "aic7855", 58 "aic7859", 59 "aic7860", 60 "aic7870", 61 "aic7880", 62 "aic7895", 63 "aic7895C", 64 "aic7890/91", 65 "aic7896/97", 66 "aic7892", 67 "aic7899" 68}; 69const u_int num_chip_names = NUM_ELEMENTS(ahc_chip_names); 70 71struct hard_error_entry hard_error[] = { 72 { ILLHADDR, "Illegal Host Access" }, 73 { ILLSADDR, "Illegal Sequencer Address referrenced" }, 74 { ILLOPCODE, "Illegal Opcode in sequencer program" }, 75 { SQPARERR, "Sequencer Parity Error" }, 76 { DPARERR, "Data-path Parity Error" }, 77 { MPARERR, "Scratch or SCB Memory Parity Error" }, 78 { PCIERRSTAT, "PCI Error detected" }, 79 { CIOPARERR, "CIOBUS Parity Error" }, 80}; 81const u_int num_errors = NUM_ELEMENTS(hard_error); 82 83struct phase_table_entry phase_table[] = 84{ 85 { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, 86 { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, 87 { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, 88 { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, 89 { P_COMMAND, MSG_NOOP, "in Command phase" }, 90 { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, 91 { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, 92 { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, 93 { P_BUSFREE, MSG_NOOP, "while idle" }, 94 { 0, MSG_NOOP, "in unknown phase" } 95}; 96 97/* 98 * In most cases we only wish to itterate over real phases, so 99 * exclude the last element from the count. 100 */ 101const u_int num_phases = NUM_ELEMENTS(phase_table) - 1; 102 103/* 104 * Valid SCSIRATE values. (p. 3-17) 105 * Provides a mapping of tranfer periods in ns to the proper value to 106 * stick in the scsixfer reg. 107 */ 108struct ahc_syncrate ahc_syncrates[] = 109{ 110 /* ultra2 fast/ultra period rate */ 111 { 0x42, 0x000, 9, "80.0" }, 112 { 0x03, 0x000, 10, "40.0" }, 113 { 0x04, 0x000, 11, "33.0" }, 114 { 0x05, 0x100, 12, "20.0" }, 115 { 0x06, 0x110, 15, "16.0" }, 116 { 0x07, 0x120, 18, "13.4" }, 117 { 0x08, 0x000, 25, "10.0" }, 118 { 0x19, 0x010, 31, "8.0" }, 119 { 0x1a, 0x020, 37, "6.67" }, 120 { 0x1b, 0x030, 43, "5.7" }, 121 { 0x1c, 0x040, 50, "5.0" }, 122 { 0x00, 0x050, 56, "4.4" }, 123 { 0x00, 0x060, 62, "4.0" }, 124 { 0x00, 0x070, 68, "3.6" }, 125 { 0x00, 0x000, 0, NULL } 126}; 127 128/* Our Sequencer Program */ 129#include "aic7xxx_seq.h" 130 131/**************************** Function Declarations ***************************/ 132static struct tmode_tstate* 133 ahc_alloc_tstate(struct ahc_softc *ahc, 134 u_int scsi_id, char channel); 135static void ahc_free_tstate(struct ahc_softc *ahc, 136 u_int scsi_id, char channel, int force); 137static struct ahc_syncrate* 138 ahc_devlimited_syncrate(struct ahc_softc *ahc, 139 u_int *period, 140 u_int *ppr_options); 141static void ahc_update_target_msg_request(struct ahc_softc *ahc, 142 struct ahc_devinfo *devinfo, 143 struct ahc_initiator_tinfo *tinfo, 144 int force, int paused); 145static void ahc_update_pending_syncrates(struct ahc_softc *ahc); 146static void ahc_fetch_devinfo(struct ahc_softc *ahc, 147 struct ahc_devinfo *devinfo); 148static void ahc_scb_devinfo(struct ahc_softc *ahc, 149 struct ahc_devinfo *devinfo, 150 struct scb *scb); 151static void ahc_setup_initiator_msgout(struct ahc_softc *ahc, 152 struct ahc_devinfo *devinfo, 153 struct scb *scb); 154static void ahc_build_transfer_msg(struct ahc_softc *ahc, 155 struct ahc_devinfo *devinfo); 156static void ahc_construct_sdtr(struct ahc_softc *ahc, 157 struct ahc_devinfo *devinfo, 158 u_int period, u_int offset); 159static void ahc_construct_wdtr(struct ahc_softc *ahc, 160 struct ahc_devinfo *devinfo, 161 u_int bus_width); 162static void ahc_construct_ppr(struct ahc_softc *ahc, 163 struct ahc_devinfo *devinfo, 164 u_int period, u_int offset, 165 u_int bus_width, u_int ppr_options); 166static void ahc_clear_msg_state(struct ahc_softc *ahc); 167static void ahc_handle_message_phase(struct ahc_softc *ahc); 168static int ahc_sent_msg(struct ahc_softc *ahc, 169 u_int msgtype, int full); 170static int ahc_parse_msg(struct ahc_softc *ahc, 171 struct ahc_devinfo *devinfo); 172static int ahc_handle_msg_reject(struct ahc_softc *ahc, 173 struct ahc_devinfo *devinfo); 174static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc, 175 struct ahc_devinfo *devinfo); 176static void ahc_handle_devreset(struct ahc_softc *ahc, 177 struct ahc_devinfo *devinfo, 178 cam_status status, char *message, 179 int verbose_level); 180 181static bus_dmamap_callback_t ahc_dmamap_cb; 182static int ahc_init_scbdata(struct ahc_softc *ahc); 183static void ahc_fini_scbdata(struct ahc_softc *ahc); 184 185static void ahc_busy_tcl(struct ahc_softc *ahc, 186 u_int tcl, u_int busyid); 187static int ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, 188 int target, char channel, int lun, 189 u_int tag, role_t role); 190 191static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, 192 u_int prev, u_int scbptr); 193static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc); 194static u_int ahc_rem_wscb(struct ahc_softc *ahc, 195 u_int scbpos, u_int prev); 196static int ahc_abort_scbs(struct ahc_softc *ahc, int target, 197 char channel, int lun, u_int tag, 198 role_t role, uint32_t status); 199static void ahc_reset_current_bus(struct ahc_softc *ahc); 200static void ahc_calc_residual(struct scb *scb); 201#ifdef AHC_DUMP_SEQ 202static void ahc_dumpseq(struct ahc_softc *ahc); 203#endif 204static void ahc_loadseq(struct ahc_softc *ahc); 205static int ahc_check_patch(struct ahc_softc *ahc, 206 struct patch **start_patch, 207 u_int start_instr, u_int *skip_addr); 208static void ahc_download_instr(struct ahc_softc *ahc, 209 u_int instrptr, uint8_t *dconsts); 210#ifdef AHC_TARGET_MODE 211static void ahc_queue_lstate_event(struct ahc_softc *ahc, 212 struct tmode_lstate *lstate, 213 u_int initiator_id, 214 u_int event_type, 215 u_int event_arg); 216static void ahc_update_scsiid(struct ahc_softc *ahc, 217 u_int targid_mask); 218static int ahc_handle_target_cmd(struct ahc_softc *ahc, 219 struct target_cmd *cmd); 220#endif 221/************************* Sequencer Execution Control ************************/ 222/* 223 * Restart the sequencer program from address zero 224 */ 225void 226restart_sequencer(struct ahc_softc *ahc) 227{ 228 pause_sequencer(ahc); 229 ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */ 230 ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */ 231 ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET); 232 /* 233 * Ensure that the sequencer's idea of TQINPOS 234 * matches our own. The sequencer increments TQINPOS 235 * only after it sees a DMA complete and a reset could 236 * occur before the increment leaving the kernel to believe 237 * the command arrived but the sequencer to not. 238 */ 239 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 240 241 /* Always allow reselection */ 242 ahc_outb(ahc, SCSISEQ, 243 ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); 244 if ((ahc->features & AHC_CMD_CHAN) != 0) { 245 /* Ensure that no DMA operations are in progress */ 246 ahc_outb(ahc, CCSGCTL, 0); 247 ahc_outb(ahc, CCSCBCTL, 0); 248 } 249 ahc_outb(ahc, MWI_RESIDUAL, 0); 250 ahc_outb(ahc, SEQCTL, FASTMODE|SEQRESET); 251 unpause_sequencer(ahc); 252} 253 254/************************* Input/Output Queues ********************************/ 255void 256ahc_run_qoutfifo(struct ahc_softc *ahc) 257{ 258 struct scb *scb; 259 u_int scb_index; 260 261 while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) { 262 263 scb_index = ahc->qoutfifo[ahc->qoutfifonext]; 264 if ((ahc->qoutfifonext & 0x03) == 0x03) { 265 u_int modnext; 266 267 /* 268 * Clear 32bits of QOUTFIFO at a time 269 * so that we don't clobber an incomming 270 * byte DMA to the array on architectures 271 * that only support 32bit load and store 272 * operations. 273 */ 274 modnext = ahc->qoutfifonext & ~0x3; 275 *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL; 276 } 277 ahc->qoutfifonext++; 278 279 scb = ahc_lookup_scb(ahc, scb_index); 280 if (scb == NULL) { 281 printf("%s: WARNING no command for scb %d " 282 "(cmdcmplt)\nQOUTPOS = %d\n", 283 ahc_name(ahc), scb_index, 284 ahc->qoutfifonext - 1); 285 continue; 286 } 287 288 /* 289 * Save off the residual 290 * if there is one. 291 */ 292 if (ahc_check_residual(scb) != 0) 293 ahc_calc_residual(scb); 294 else 295 ahc_set_residual(scb, 0); 296 ahc_done(ahc, scb); 297 } 298} 299 300void 301ahc_run_untagged_queues(struct ahc_softc *ahc) 302{ 303 int i; 304 305 for (i = 0; i < 16; i++) 306 ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]); 307} 308 309void 310ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue) 311{ 312 struct scb *scb; 313 314 if (ahc->untagged_queue_lock != 0) 315 return; 316 317 if ((scb = TAILQ_FIRST(queue)) != NULL 318 && (scb->flags & SCB_ACTIVE) == 0) { 319 scb->flags |= SCB_ACTIVE; 320 ahc_queue_scb(ahc, scb); 321 } 322} 323 324/************************* Interrupt Handling *********************************/ 325void 326ahc_handle_brkadrint(struct ahc_softc *ahc) 327{ 328 /* 329 * We upset the sequencer :-( 330 * Lookup the error message 331 */ 332 int i, error, num_errors; 333 334 error = ahc_inb(ahc, ERROR); 335 num_errors = sizeof(hard_error)/sizeof(hard_error[0]); 336 for (i = 0; error != 1 && i < num_errors; i++) 337 error >>= 1; 338 panic("%s: brkadrint, %s at seqaddr = 0x%x\n", 339 ahc_name(ahc), hard_error[i].errmesg, 340 ahc_inb(ahc, SEQADDR0) | 341 (ahc_inb(ahc, SEQADDR1) << 8)); 342 343 /* Tell everyone that this HBA is no longer availible */ 344 ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS, 345 CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, 346 CAM_NO_HBA); 347} 348 349void 350ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat) 351{ 352 struct scb *scb; 353 struct ahc_devinfo devinfo; 354 355 ahc_fetch_devinfo(ahc, &devinfo); 356 357 /* 358 * Clear the upper byte that holds SEQINT status 359 * codes and clear the SEQINT bit. We will unpause 360 * the sequencer, if appropriate, after servicing 361 * the request. 362 */ 363 ahc_outb(ahc, CLRINT, CLRSEQINT); 364 switch (intstat & SEQINT_MASK) { 365 case BAD_STATUS: 366 { 367 u_int scb_index; 368 struct hardware_scb *hscb; 369 370 /* 371 * Set the default return value to 0 (don't 372 * send sense). The sense code will change 373 * this if needed. 374 */ 375 ahc_outb(ahc, RETURN_1, 0); 376 377 /* 378 * The sequencer will notify us when a command 379 * has an error that would be of interest to 380 * the kernel. This allows us to leave the sequencer 381 * running in the common case of command completes 382 * without error. The sequencer will already have 383 * dma'd the SCB back up to us, so we can reference 384 * the in kernel copy directly. 385 */ 386 scb_index = ahc_inb(ahc, SCB_TAG); 387 scb = ahc_lookup_scb(ahc, scb_index); 388 if (scb == NULL) { 389 printf("%s:%c:%d: ahc_intr - referenced scb " 390 "not valid during seqint 0x%x scb(%d)\n", 391 ahc_name(ahc), devinfo.channel, 392 devinfo.target, intstat, scb_index); 393 goto unpause; 394 } 395 396 hscb = scb->hscb; 397 398 /* Don't want to clobber the original sense code */ 399 if ((scb->flags & SCB_SENSE) != 0) { 400 /* 401 * Clear the SCB_SENSE Flag and have 402 * the sequencer do a normal command 403 * complete. 404 */ 405 scb->flags &= ~SCB_SENSE; 406 ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); 407 break; 408 } 409 ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); 410 /* Freeze the queue until the client sees the error. */ 411 ahc_freeze_devq(ahc, scb); 412 ahc_freeze_scb(scb); 413 ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status); 414 switch (hscb->shared_data.status.scsi_status) { 415 case SCSI_STATUS_OK: 416 printf("%s: Interrupted for staus of 0???\n", 417 ahc_name(ahc)); 418 break; 419 case SCSI_STATUS_CMD_TERMINATED: 420 case SCSI_STATUS_CHECK_COND: 421#ifdef AHC_DEBUG 422 if (ahc_debug & AHC_SHOWSENSE) { 423 ahc_print_path(ahc, scb); 424 printf("SCB %d: requests Check Status\n", 425 scb->hscb->tag); 426 } 427#endif 428 429 if (ahc_perform_autosense(scb)) { 430 struct ahc_dma_seg *sg; 431 struct scsi_sense *sc; 432 struct ahc_initiator_tinfo *targ_info; 433 struct tmode_tstate *tstate; 434 struct ahc_transinfo *tinfo; 435 436 targ_info = 437 ahc_fetch_transinfo(ahc, 438 devinfo.channel, 439 devinfo.our_scsiid, 440 devinfo.target, 441 &tstate); 442 tinfo = &targ_info->current; 443 sg = scb->sg_list; 444 sc = (struct scsi_sense *) 445 (&hscb->shared_data.cdb); 446 /* 447 * Save off the residual if there is one. 448 */ 449 if (ahc_check_residual(scb)) 450 ahc_calc_residual(scb); 451 else 452 ahc_set_residual(scb, 0); 453#ifdef AHC_DEBUG 454 if (ahc_debug & AHC_SHOWSENSE) { 455 ahc_print_path(ahc, scb); 456 printf("Sending Sense\n"); 457 } 458#endif 459 sg->addr = ahc->scb_data->sense_busaddr 460 + (hscb->tag*sizeof(struct scsi_sense_data)); 461 sg->len = ahc_get_sense_bufsize(ahc, scb); 462 sg->len |= AHC_DMA_LAST_SEG; 463 464 sc->opcode = REQUEST_SENSE; 465 sc->byte2 = 0; 466 if (tinfo->protocol_version <= SCSI_REV_2 467 && SCB_GET_LUN(scb) < 8) 468 sc->byte2 = SCB_GET_LUN(scb) << 5; 469 sc->unused[0] = 0; 470 sc->unused[1] = 0; 471 sc->length = sg->len; 472 sc->control = 0; 473 474 /* 475 * Would be nice to preserve DISCENB here, 476 * but due to the way we manage busy targets, 477 * we can't. 478 */ 479 hscb->control = 0; 480 481 /* 482 * This request sense could be because the 483 * the device lost power or in some other 484 * way has lost our transfer negotiations. 485 * Renegotiate if appropriate. Unit attention 486 * errors will be reported before any data 487 * phases occur. 488 */ 489 if (ahc_get_residual(scb) 490 == ahc_get_transfer_length(scb)) { 491 ahc_update_target_msg_request(ahc, 492 &devinfo, 493 targ_info, 494 /*force*/TRUE, 495 /*paused*/TRUE); 496 } 497 hscb->cdb_len = sizeof(*sc); 498 hscb->dataptr = sg->addr; 499 hscb->datacnt = sg->len; 500 hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID; 501 scb->sg_count = 1; 502 scb->flags |= SCB_SENSE; 503 ahc_outb(ahc, RETURN_1, SEND_SENSE); 504 505#ifdef __FreeBSD__ 506 /* 507 * Ensure we have enough time to actually 508 * retrieve the sense. 509 */ 510 untimeout(ahc_timeout, (caddr_t)scb, 511 scb->io_ctx->ccb_h.timeout_ch); 512 scb->io_ctx->ccb_h.timeout_ch = 513 timeout(ahc_timeout, (caddr_t)scb, 5 * hz); 514#endif 515 } 516 break; 517 default: 518 break; 519 } 520 break; 521 } 522 case NO_MATCH: 523 { 524 /* Ensure we don't leave the selection hardware on */ 525 ahc_outb(ahc, SCSISEQ, 526 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 527 528 printf("%s:%c:%d: no active SCB for reconnecting " 529 "target - issuing BUS DEVICE RESET\n", 530 ahc_name(ahc), devinfo.channel, devinfo.target); 531 printf("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " 532 "ARG_1 == 0x%x ARG_2 = 0x%x, SEQ_FLAGS == 0x%x\n", 533 ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN), 534 ahc_inb(ahc, ARG_1), ahc_inb(ahc, ARG_2), 535 ahc_inb(ahc, SEQ_FLAGS)); 536 printf("SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " 537 "SCB_TAG == 0x%x\n", 538 ahc_inb(ahc, SCB_SCSIID), ahc_inb(ahc, SCB_LUN), 539 ahc_inb(ahc, SCB_TAG)); 540 ahc->msgout_buf[0] = MSG_BUS_DEV_RESET; 541 ahc->msgout_len = 1; 542 ahc->msgout_index = 0; 543 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 544 ahc_outb(ahc, MSG_OUT, HOST_MSG); 545 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, LASTPHASE) | ATNO); 546 break; 547 } 548 case SEND_REJECT: 549 { 550 u_int rejbyte = ahc_inb(ahc, ACCUM); 551 printf("%s:%c:%d: Warning - unknown message received from " 552 "target (0x%x). Rejecting\n", 553 ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte); 554 break; 555 } 556 case NO_IDENT: 557 { 558 /* 559 * The reconnecting target either did not send an identify 560 * message, or did, but we didn't find an SCB to match and 561 * before it could respond to our ATN/abort, it hit a dataphase. 562 * The only safe thing to do is to blow it away with a bus 563 * reset. 564 */ 565 int found; 566 567 printf("%s:%c:%d: Target did not send an IDENTIFY message. " 568 "LASTPHASE = 0x%x, SAVED_SCSIID == 0x%x\n", 569 ahc_name(ahc), devinfo.channel, devinfo.target, 570 ahc_inb(ahc, LASTPHASE), ahc_inb(ahc, SAVED_SCSIID)); 571 found = ahc_reset_channel(ahc, devinfo.channel, 572 /*initiate reset*/TRUE); 573 printf("%s: Issued Channel %c Bus Reset. " 574 "%d SCBs aborted\n", ahc_name(ahc), devinfo.channel, 575 found); 576 return; 577 } 578 case IGN_WIDE_RES: 579 ahc_handle_ign_wide_residue(ahc, &devinfo); 580 break; 581 case BAD_PHASE: 582 { 583 u_int lastphase; 584 585 lastphase = ahc_inb(ahc, LASTPHASE); 586 if (lastphase == P_BUSFREE) { 587 printf("%s:%c:%d: Missed busfree. Curphase = 0x%x\n", 588 ahc_name(ahc), devinfo.channel, devinfo.target, 589 ahc_inb(ahc, SCSISIGI)); 590 restart_sequencer(ahc); 591 return; 592 } else { 593 printf("%s:%c:%d: unknown scsi bus phase %x. " 594 "Attempting to continue\n", 595 ahc_name(ahc), devinfo.channel, devinfo.target, 596 ahc_inb(ahc, SCSISIGI)); 597 } 598 break; 599 } 600 case HOST_MSG_LOOP: 601 { 602 /* 603 * The sequencer has encountered a message phase 604 * that requires host assistance for completion. 605 * While handling the message phase(s), we will be 606 * notified by the sequencer after each byte is 607 * transfered so we can track bus phase changes. 608 * 609 * If this is the first time we've seen a HOST_MSG_LOOP 610 * interrupt, initialize the state of the host message 611 * loop. 612 */ 613 if (ahc->msg_type == MSG_TYPE_NONE) { 614 u_int bus_phase; 615 616 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 617 if (bus_phase != P_MESGIN 618 && bus_phase != P_MESGOUT) { 619 printf("ahc_intr: HOST_MSG_LOOP bad " 620 "phase 0x%x\n", 621 bus_phase); 622 /* 623 * Probably transitioned to bus free before 624 * we got here. Just punt the message. 625 */ 626 ahc_clear_intstat(ahc); 627 restart_sequencer(ahc); 628 return; 629 } 630 631 if (devinfo.role == ROLE_INITIATOR) { 632 struct scb *scb; 633 u_int scb_index; 634 635 scb_index = ahc_inb(ahc, SCB_TAG); 636 scb = ahc_lookup_scb(ahc, scb_index); 637 638 if (scb == NULL) 639 panic("HOST_MSG_LOOP with " 640 "invalid SCB %x\n", scb_index); 641 642 if (bus_phase == P_MESGOUT) 643 ahc_setup_initiator_msgout(ahc, 644 &devinfo, 645 scb); 646 else { 647 ahc->msg_type = 648 MSG_TYPE_INITIATOR_MSGIN; 649 ahc->msgin_index = 0; 650 } 651 } else { 652 if (bus_phase == P_MESGOUT) { 653 ahc->msg_type = 654 MSG_TYPE_TARGET_MSGOUT; 655 ahc->msgin_index = 0; 656 } 657#if AHC_TARGET_MODE 658 else 659 /* XXX Ever executed??? */ 660 ahc_setup_target_msgin(ahc, &devinfo); 661#endif 662 } 663 } 664 665 ahc_handle_message_phase(ahc); 666 break; 667 } 668 case PERR_DETECTED: 669 { 670 /* 671 * If we've cleared the parity error interrupt 672 * but the sequencer still believes that SCSIPERR 673 * is true, it must be that the parity error is 674 * for the currently presented byte on the bus, 675 * and we are not in a phase (data-in) where we will 676 * eventually ack this byte. Ack the byte and 677 * throw it away in the hope that the target will 678 * take us to message out to deliver the appropriate 679 * error message. 680 */ 681 if ((intstat & SCSIINT) == 0 682 && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) { 683 u_int curphase; 684 685 /* 686 * The hardware will only let you ack bytes 687 * if the expected phase in SCSISIGO matches 688 * the current phase. Make sure this is 689 * currently the case. 690 */ 691 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 692 ahc_outb(ahc, LASTPHASE, curphase); 693 ahc_outb(ahc, SCSISIGO, curphase); 694 ahc_inb(ahc, SCSIDATL); 695 } 696 break; 697 } 698 case DATA_OVERRUN: 699 { 700 /* 701 * When the sequencer detects an overrun, it 702 * places the controller in "BITBUCKET" mode 703 * and allows the target to complete its transfer. 704 * Unfortunately, none of the counters get updated 705 * when the controller is in this mode, so we have 706 * no way of knowing how large the overrun was. 707 */ 708 u_int scbindex = ahc_inb(ahc, SCB_TAG); 709 u_int lastphase = ahc_inb(ahc, LASTPHASE); 710 u_int i; 711 712 scb = ahc_lookup_scb(ahc, scbindex); 713 for (i = 0; i < num_phases; i++) { 714 if (lastphase == phase_table[i].phase) 715 break; 716 } 717 ahc_print_path(ahc, scb); 718 printf("data overrun detected %s." 719 " Tag == 0x%x.\n", 720 phase_table[i].phasemsg, 721 scb->hscb->tag); 722 ahc_print_path(ahc, scb); 723 printf("%s seen Data Phase. Length = %ld. NumSGs = %d.\n", 724 ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", 725 ahc_get_transfer_length(scb), scb->sg_count); 726 if (scb->sg_count > 0) { 727 for (i = 0; i < scb->sg_count; i++) { 728 printf("sg[%d] - Addr 0x%x : Length %d\n", 729 i, 730 scb->sg_list[i].addr, 731 scb->sg_list[i].len & AHC_SG_LEN_MASK); 732 } 733 } 734 /* 735 * Set this and it will take effect when the 736 * target does a command complete. 737 */ 738 ahc_freeze_devq(ahc, scb); 739 ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR); 740 ahc_freeze_scb(scb); 741 break; 742 } 743 case TRACEPOINT: 744 { 745 break; 746 } 747 case TRACEPOINT2: 748 { 749 break; 750 } 751 default: 752 printf("ahc_intr: seqint, " 753 "intstat == 0x%x, scsisigi = 0x%x\n", 754 intstat, ahc_inb(ahc, SCSISIGI)); 755 break; 756 } 757unpause: 758 /* 759 * The sequencer is paused immediately on 760 * a SEQINT, so we should restart it when 761 * we're done. 762 */ 763 unpause_sequencer(ahc); 764} 765 766void 767ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat) 768{ 769 u_int scb_index; 770 u_int status; 771 struct scb *scb; 772 char cur_channel; 773 char intr_channel; 774 775 /* Make sure the sequencer is in a safe location. */ 776 ahc_clear_critical_section(ahc); 777 778 if ((ahc->features & AHC_TWIN) != 0 779 && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0)) 780 cur_channel = 'B'; 781 else 782 cur_channel = 'A'; 783 intr_channel = cur_channel; 784 785 status = ahc_inb(ahc, SSTAT1); 786 if (status == 0) { 787 if ((ahc->features & AHC_TWIN) != 0) { 788 /* Try the other channel */ 789 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 790 status = ahc_inb(ahc, SSTAT1); 791 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB); 792 intr_channel = (cur_channel == 'A') ? 'B' : 'A'; 793 } 794 if (status == 0) { 795 printf("%s: Spurious SCSI interrupt\n", ahc_name(ahc)); 796 ahc_outb(ahc, CLRINT, CLRSCSIINT); 797 unpause_sequencer(ahc); 798 return; 799 } 800 } 801 802 scb_index = ahc_inb(ahc, SCB_TAG); 803 scb = ahc_lookup_scb(ahc, scb_index); 804 if (scb != NULL 805 && (ahc_inb(ahc, SEQ_FLAGS) & IDENTIFY_SEEN) == 0) 806 scb = NULL; 807 808 if ((status & SCSIRSTI) != 0) { 809 printf("%s: Someone reset channel %c\n", 810 ahc_name(ahc), intr_channel); 811 ahc_reset_channel(ahc, intr_channel, /* Initiate Reset */FALSE); 812 } else if ((status & SCSIPERR) != 0) { 813 /* 814 * Determine the bus phase and queue an appropriate message. 815 * SCSIPERR is latched true as soon as a parity error 816 * occurs. If the sequencer acked the transfer that 817 * caused the parity error and the currently presented 818 * transfer on the bus has correct parity, SCSIPERR will 819 * be cleared by CLRSCSIPERR. Use this to determine if 820 * we should look at the last phase the sequencer recorded, 821 * or the current phase presented on the bus. 822 */ 823 u_int mesg_out; 824 u_int curphase; 825 u_int errorphase; 826 u_int lastphase; 827 u_int scsirate; 828 u_int i; 829 u_int sstat2; 830 831 lastphase = ahc_inb(ahc, LASTPHASE); 832 curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 833 sstat2 = ahc_inb(ahc, SSTAT2); 834 ahc_outb(ahc, CLRSINT1, CLRSCSIPERR); 835 /* 836 * For all phases save DATA, the sequencer won't 837 * automatically ack a byte that has a parity error 838 * in it. So the only way that the current phase 839 * could be 'data-in' is if the parity error is for 840 * an already acked byte in the data phase. During 841 * synchronous data-in transfers, we may actually 842 * ack bytes before latching the current phase in 843 * LASTPHASE, leading to the discrepancy between 844 * curphase and lastphase. 845 */ 846 if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0 847 || curphase == P_DATAIN || curphase == P_DATAIN_DT) 848 errorphase = curphase; 849 else 850 errorphase = lastphase; 851 852 for (i = 0; i < num_phases; i++) { 853 if (errorphase == phase_table[i].phase) 854 break; 855 } 856 mesg_out = phase_table[i].mesg_out; 857 if (scb != NULL) 858 ahc_print_path(ahc, scb); 859 else 860 printf("%s:%c:%d: ", ahc_name(ahc), 861 intr_channel, 862 SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID))); 863 scsirate = ahc_inb(ahc, SCSIRATE); 864 printf("parity error detected %s. " 865 "SEQADDR(0x%x) SCSIRATE(0x%x)\n", 866 phase_table[i].phasemsg, 867 ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8), 868 scsirate); 869 870 if ((ahc->features & AHC_DT) != 0) { 871 872 if ((sstat2 & CRCVALERR) != 0) 873 printf("\tCRC Value Mismatch\n"); 874 if ((sstat2 & CRCENDERR) != 0) 875 printf("\tNo terminal CRC packet recevied\n"); 876 if ((sstat2 & CRCREQERR) != 0) 877 printf("\tIllegal CRC packet request\n"); 878 if ((sstat2 & DUAL_EDGE_ERR) != 0) 879 printf("\tUnexpected %sDT Data Phase\n", 880 (scsirate & SINGLE_EDGE) ? "" : "non-"); 881 } 882 883 /* 884 * We've set the hardware to assert ATN if we 885 * get a parity error on "in" phases, so all we 886 * need to do is stuff the message buffer with 887 * the appropriate message. "In" phases have set 888 * mesg_out to something other than MSG_NOP. 889 */ 890 if (mesg_out != MSG_NOOP) { 891 if (ahc->msg_type != MSG_TYPE_NONE) 892 ahc->send_msg_perror = TRUE; 893 else 894 ahc_outb(ahc, MSG_OUT, mesg_out); 895 } 896 ahc_outb(ahc, CLRINT, CLRSCSIINT); 897 unpause_sequencer(ahc); 898 } else if ((status & BUSFREE) != 0 899 && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) { 900 /* 901 * First look at what phase we were last in. 902 * If its message out, chances are pretty good 903 * that the busfree was in response to one of 904 * our abort requests. 905 */ 906 u_int lastphase = ahc_inb(ahc, LASTPHASE); 907 u_int saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 908 u_int saved_lun = ahc_inb(ahc, SAVED_LUN); 909 u_int target = SCSIID_TARGET(ahc, saved_scsiid); 910 u_int initiator_role_id = SCSIID_OUR_ID(saved_scsiid); 911 char channel = SCSIID_CHANNEL(ahc, saved_scsiid); 912 int printerror = 1; 913 914 ahc_outb(ahc, SCSISEQ, 915 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 916 if (lastphase == P_MESGOUT) { 917 u_int message; 918 u_int tag; 919 920 message = ahc->msgout_buf[ahc->msgout_index - 1]; 921 tag = SCB_LIST_NULL; 922 switch (message) { 923 case MSG_ABORT_TAG: 924 tag = scb->hscb->tag; 925 /* FALLTRHOUGH */ 926 case MSG_ABORT: 927 ahc_print_path(ahc, scb); 928 printf("SCB %d - Abort %s Completed.\n", 929 scb->hscb->tag, tag == SCB_LIST_NULL ? 930 "" : "Tag"); 931 ahc_abort_scbs(ahc, target, channel, 932 saved_lun, tag, 933 ROLE_INITIATOR, 934 CAM_REQ_ABORTED); 935 printerror = 0; 936 break; 937 case MSG_BUS_DEV_RESET: 938 { 939 struct ahc_devinfo devinfo; 940#ifdef __FreeBSD__ 941 /* 942 * Don't mark the user's request for this BDR 943 * as completing with CAM_BDR_SENT. CAM3 944 * specifies CAM_REQ_CMP. 945 */ 946 if (scb != NULL 947 && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV 948 && ahc_match_scb(ahc, scb, target, channel, 949 CAM_LUN_WILDCARD, 950 SCB_LIST_NULL, 951 ROLE_INITIATOR)) { 952 ahc_set_transaction_status(scb, CAM_REQ_CMP); 953 } 954#endif 955 ahc_compile_devinfo(&devinfo, 956 initiator_role_id, 957 target, 958 CAM_LUN_WILDCARD, 959 channel, 960 ROLE_INITIATOR); 961 ahc_handle_devreset(ahc, &devinfo, 962 CAM_BDR_SENT, 963 "Bus Device Reset", 964 /*verbose_level*/0); 965 printerror = 0; 966 break; 967 } 968 default: 969 break; 970 } 971 } 972 if (printerror != 0) { 973 u_int i; 974 975 if (scb != NULL) { 976 u_int tag; 977 978 if ((scb->hscb->control & TAG_ENB) != 0) 979 tag = scb->hscb->tag; 980 else 981 tag = SCB_LIST_NULL; 982 ahc_print_path(ahc, scb); 983 ahc_abort_scbs(ahc, target, channel, 984 SCB_GET_LUN(scb), tag, 985 ROLE_INITIATOR, 986 CAM_UNEXP_BUSFREE); 987 } else { 988 /* 989 * We had not fully identified this connection, 990 * so we cannot abort anything. 991 */ 992 printf("%s: ", ahc_name(ahc)); 993 } 994 for (i = 0; i < num_phases; i++) { 995 if (lastphase == phase_table[i].phase) 996 break; 997 } 998 printf("Unexpected busfree %s\n" 999 "SEQADDR == 0x%x\n", 1000 phase_table[i].phasemsg, ahc_inb(ahc, SEQADDR0) 1001 | (ahc_inb(ahc, SEQADDR1) << 8)); 1002 } 1003 ahc_clear_msg_state(ahc); 1004 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 1005 ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR); 1006 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1007 restart_sequencer(ahc); 1008 } else if ((status & SELTO) != 0) { 1009 u_int scbptr; 1010 1011 scbptr = ahc_inb(ahc, WAITING_SCBH); 1012 ahc_outb(ahc, SCBPTR, scbptr); 1013 scb_index = ahc_inb(ahc, SCB_TAG); 1014 1015 scb = ahc_lookup_scb(ahc, scb_index); 1016 if (scb == NULL) { 1017 printf("%s: ahc_intr - referenced scb not " 1018 "valid during SELTO scb(%d, %d)\n", 1019 ahc_name(ahc), scbptr, scb_index); 1020 } else { 1021 ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT); 1022 ahc_freeze_devq(ahc, scb); 1023 } 1024 /* Stop the selection */ 1025 ahc_outb(ahc, SCSISEQ, 0); 1026 1027 /* No more pending messages */ 1028 ahc_clear_msg_state(ahc); 1029 1030 /* 1031 * Although the driver does not care about the 1032 * 'Selection in Progress' status bit, the busy 1033 * LED does. SELINGO is only cleared by a sucessful 1034 * selection, so we must manually clear it to insure 1035 * the LED turns off just incase no future successful 1036 * selections occur (e.g. no devices on the bus). 1037 */ 1038 ahc_outb(ahc, CLRSINT0, CLRSELINGO); 1039 1040 /* Clear interrupt state */ 1041 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); 1042 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1043 restart_sequencer(ahc); 1044 } else { 1045 ahc_print_path(ahc, scb); 1046 printf("Unknown SCSIINT. Status = 0x%x\n", status); 1047 ahc_outb(ahc, CLRSINT1, status); 1048 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1049 unpause_sequencer(ahc); 1050 } 1051} 1052 1053void 1054ahc_clear_critical_section(struct ahc_softc *ahc) 1055{ 1056 int stepping; 1057 u_int simode0; 1058 u_int simode1; 1059 1060 if (ahc->num_critical_sections == 0) 1061 return; 1062 1063 stepping = FALSE; 1064 simode0 = 0; 1065 simode1 = 0; 1066 for (;;) { 1067 struct cs *cs; 1068 u_int seqaddr; 1069 u_int i; 1070 1071 seqaddr = ahc_inb(ahc, SEQADDR0) 1072 | (ahc_inb(ahc, SEQADDR1) << 8); 1073 1074 cs = ahc->critical_sections; 1075 for (i = 0; i < ahc->num_critical_sections; i++, cs++) { 1076 1077 if (cs->begin < seqaddr && cs->end >= seqaddr) 1078 break; 1079 } 1080 1081 if (i == ahc->num_critical_sections) 1082 break; 1083 1084 if (stepping == FALSE) { 1085 1086 /* 1087 * Disable all interrupt sources so that the 1088 * sequencer will not be stuck by a pausing 1089 * interrupt condition while we attempt to 1090 * leave a critical section. 1091 */ 1092 simode0 = ahc_inb(ahc, SIMODE0); 1093 ahc_outb(ahc, SIMODE0, 0); 1094 simode1 = ahc_inb(ahc, SIMODE1); 1095 ahc_outb(ahc, SIMODE1, 0); 1096 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1097 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) | STEP); 1098 stepping = TRUE; 1099 } 1100 ahc_outb(ahc, HCNTRL, ahc->unpause); 1101 do { 1102 ahc_delay(200); 1103 } while (!sequencer_paused(ahc)); 1104 } 1105 if (stepping) { 1106 ahc_outb(ahc, SIMODE0, simode0); 1107 ahc_outb(ahc, SIMODE1, simode1); 1108 ahc_outb(ahc, SEQCTL, ahc_inb(ahc, SEQCTL) & ~STEP); 1109 } 1110} 1111 1112/* 1113 * Clear any pending interrupt status. 1114 */ 1115void 1116ahc_clear_intstat(struct ahc_softc *ahc) 1117{ 1118 /* Clear any interrupt conditions this may have caused */ 1119 ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO); 1120 ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI 1121 |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG| 1122 CLRREQINIT); 1123 ahc_outb(ahc, CLRINT, CLRSCSIINT); 1124} 1125 1126/**************************** Debugging Routines ******************************/ 1127void 1128ahc_print_scb(struct scb *scb) 1129{ 1130 int i; 1131 1132 struct hardware_scb *hscb = scb->hscb; 1133 1134 printf("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", 1135 scb, 1136 hscb->control, 1137 hscb->scsiid, 1138 hscb->lun, 1139 hscb->cdb_len); 1140 i = 0; 1141 printf("Shared Data: %#02x %#02x %#02x %#02x\n", 1142 hscb->shared_data.cdb[i++], 1143 hscb->shared_data.cdb[i++], 1144 hscb->shared_data.cdb[i++], 1145 hscb->shared_data.cdb[i++]); 1146 printf(" %#02x %#02x %#02x %#02x\n", 1147 hscb->shared_data.cdb[i++], 1148 hscb->shared_data.cdb[i++], 1149 hscb->shared_data.cdb[i++], 1150 hscb->shared_data.cdb[i++]); 1151 printf(" %#02x %#02x %#02x %#02x\n", 1152 hscb->shared_data.cdb[i++], 1153 hscb->shared_data.cdb[i++], 1154 hscb->shared_data.cdb[i++], 1155 hscb->shared_data.cdb[i++]); 1156 printf(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n", 1157 hscb->dataptr, 1158 hscb->datacnt, 1159 hscb->sgptr, 1160 hscb->tag); 1161 if (scb->sg_count > 0) { 1162 for (i = 0; i < scb->sg_count; i++) { 1163 printf("sg[%d] - Addr 0x%x : Length %d\n", 1164 i, 1165 scb->sg_list[i].addr, 1166 scb->sg_list[i].len); 1167 } 1168 } 1169} 1170 1171/************************* Transfer Negotiation *******************************/ 1172/* 1173 * Allocate per target mode instance (ID we respond to as a target) 1174 * transfer negotiation data structures. 1175 */ 1176static struct tmode_tstate * 1177ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel) 1178{ 1179 struct tmode_tstate *master_tstate; 1180 struct tmode_tstate *tstate; 1181 int i; 1182 1183 master_tstate = ahc->enabled_targets[ahc->our_id]; 1184 if (channel == 'B') { 1185 scsi_id += 8; 1186 master_tstate = ahc->enabled_targets[ahc->our_id_b + 8]; 1187 } 1188 if (ahc->enabled_targets[scsi_id] != NULL 1189 && ahc->enabled_targets[scsi_id] != master_tstate) 1190 panic("%s: ahc_alloc_tstate - Target already allocated", 1191 ahc_name(ahc)); 1192 tstate = malloc(sizeof(*tstate), M_DEVBUF, M_NOWAIT); 1193 if (tstate == NULL) 1194 return (NULL); 1195 1196 /* 1197 * If we have allocated a master tstate, copy user settings from 1198 * the master tstate (taken from SRAM or the EEPROM) for this 1199 * channel, but reset our current and goal settings to async/narrow 1200 * until an initiator talks to us. 1201 */ 1202 if (master_tstate != NULL) { 1203 memcpy(tstate, master_tstate, sizeof(*tstate)); 1204 memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); 1205 tstate->ultraenb = 0; 1206 for (i = 0; i < 16; i++) { 1207 memset(&tstate->transinfo[i].current, 0, 1208 sizeof(tstate->transinfo[i].current)); 1209 memset(&tstate->transinfo[i].goal, 0, 1210 sizeof(tstate->transinfo[i].goal)); 1211 } 1212 } else 1213 memset(tstate, 0, sizeof(*tstate)); 1214 ahc->enabled_targets[scsi_id] = tstate; 1215 return (tstate); 1216} 1217 1218/* 1219 * Free per target mode instance (ID we respond to as a target) 1220 * transfer negotiation data structures. 1221 */ 1222static void 1223ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force) 1224{ 1225 struct tmode_tstate *tstate; 1226 1227 /* Don't clean up the entry for our initiator role */ 1228 if ((ahc->flags & AHC_INITIATORMODE) != 0 1229 && ((channel == 'B' && scsi_id == ahc->our_id_b) 1230 || (channel == 'A' && scsi_id == ahc->our_id)) 1231 && force == FALSE) 1232 return; 1233 1234 if (channel == 'B') 1235 scsi_id += 8; 1236 tstate = ahc->enabled_targets[scsi_id]; 1237 if (tstate != NULL) 1238 free(tstate, M_DEVBUF); 1239 ahc->enabled_targets[scsi_id] = NULL; 1240} 1241 1242/* 1243 * Called when we have an active connection to a target on the bus, 1244 * this function finds the nearest syncrate to the input period limited 1245 * by the capabilities of the bus connectivity of the target. 1246 */ 1247struct ahc_syncrate * 1248ahc_devlimited_syncrate(struct ahc_softc *ahc, u_int *period, 1249 u_int *ppr_options) { 1250 u_int maxsync; 1251 1252 if ((ahc->features & AHC_ULTRA2) != 0) { 1253 if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0 1254 && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) { 1255 maxsync = AHC_SYNCRATE_DT; 1256 } else { 1257 maxsync = AHC_SYNCRATE_ULTRA; 1258 /* Can't do DT on an SE bus */ 1259 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1260 } 1261 } else if ((ahc->features & AHC_ULTRA) != 0) { 1262 maxsync = AHC_SYNCRATE_ULTRA; 1263 } else { 1264 maxsync = AHC_SYNCRATE_FAST; 1265 } 1266 return (ahc_find_syncrate(ahc, period, ppr_options, maxsync)); 1267} 1268 1269/* 1270 * Look up the valid period to SCSIRATE conversion in our table. 1271 * Return the period and offset that should be sent to the target 1272 * if this was the beginning of an SDTR. 1273 */ 1274struct ahc_syncrate * 1275ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, 1276 u_int *ppr_options, u_int maxsync) 1277{ 1278 struct ahc_syncrate *syncrate; 1279 1280 if ((ahc->features & AHC_DT) == 0) 1281 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1282 1283 for (syncrate = &ahc_syncrates[maxsync]; 1284 syncrate->rate != NULL; 1285 syncrate++) { 1286 1287 /* 1288 * The Ultra2 table doesn't go as low 1289 * as for the Fast/Ultra cards. 1290 */ 1291 if ((ahc->features & AHC_ULTRA2) != 0 1292 && (syncrate->sxfr_u2 == 0)) 1293 break; 1294 1295 /* Skip any DT entries if DT is not available */ 1296 if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 1297 && (syncrate->sxfr_u2 & DT_SXFR) != 0) 1298 continue; 1299 1300 if (*period <= syncrate->period) { 1301 /* 1302 * When responding to a target that requests 1303 * sync, the requested rate may fall between 1304 * two rates that we can output, but still be 1305 * a rate that we can receive. Because of this, 1306 * we want to respond to the target with 1307 * the same rate that it sent to us even 1308 * if the period we use to send data to it 1309 * is lower. Only lower the response period 1310 * if we must. 1311 */ 1312 if (syncrate == &ahc_syncrates[maxsync]) 1313 *period = syncrate->period; 1314 1315 /* 1316 * At some speeds, we only support 1317 * ST transfers. 1318 */ 1319 if ((syncrate->sxfr_u2 & ST_SXFR) != 0) 1320 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1321 break; 1322 } 1323 } 1324 1325 if ((*period == 0) 1326 || (syncrate->rate == NULL) 1327 || ((ahc->features & AHC_ULTRA2) != 0 1328 && (syncrate->sxfr_u2 == 0))) { 1329 /* Use asynchronous transfers. */ 1330 *period = 0; 1331 syncrate = NULL; 1332 *ppr_options &= ~MSG_EXT_PPR_DT_REQ; 1333 } 1334 return (syncrate); 1335} 1336 1337/* 1338 * Convert from an entry in our syncrate table to the SCSI equivalent 1339 * sync "period" factor. 1340 */ 1341u_int 1342ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync) 1343{ 1344 struct ahc_syncrate *syncrate; 1345 1346 if ((ahc->features & AHC_ULTRA2) != 0) 1347 scsirate &= SXFR_ULTRA2; 1348 else 1349 scsirate &= SXFR; 1350 1351 syncrate = &ahc_syncrates[maxsync]; 1352 while (syncrate->rate != NULL) { 1353 1354 if ((ahc->features & AHC_ULTRA2) != 0) { 1355 if (syncrate->sxfr_u2 == 0) 1356 break; 1357 else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2)) 1358 return (syncrate->period); 1359 } else if (scsirate == (syncrate->sxfr & SXFR)) { 1360 return (syncrate->period); 1361 } 1362 syncrate++; 1363 } 1364 return (0); /* async */ 1365} 1366 1367/* 1368 * Truncate the given synchronous offset to a value the 1369 * current adapter type and syncrate are capable of. 1370 */ 1371void 1372ahc_validate_offset(struct ahc_softc *ahc, struct ahc_syncrate *syncrate, 1373 u_int *offset, int wide) 1374{ 1375 u_int maxoffset; 1376 1377 /* Limit offset to what we can do */ 1378 if (syncrate == NULL) { 1379 maxoffset = 0; 1380 } else if ((ahc->features & AHC_ULTRA2) != 0) { 1381 maxoffset = MAX_OFFSET_ULTRA2; 1382 } else { 1383 if (wide) 1384 maxoffset = MAX_OFFSET_16BIT; 1385 else 1386 maxoffset = MAX_OFFSET_8BIT; 1387 } 1388 *offset = MIN(*offset, maxoffset); 1389} 1390 1391/* 1392 * Truncate the given transfer width parameter to a value the 1393 * current adapter type is capable of. 1394 */ 1395void 1396ahc_validate_width(struct ahc_softc *ahc, u_int *bus_width) 1397{ 1398 switch (*bus_width) { 1399 default: 1400 if (ahc->features & AHC_WIDE) { 1401 /* Respond Wide */ 1402 *bus_width = MSG_EXT_WDTR_BUS_16_BIT; 1403 break; 1404 } 1405 /* FALLTHROUGH */ 1406 case MSG_EXT_WDTR_BUS_8_BIT: 1407 bus_width = MSG_EXT_WDTR_BUS_8_BIT; 1408 break; 1409 } 1410} 1411 1412/* 1413 * Update the bitmask of targets for which the controller should 1414 * negotiate with at the next convenient oportunity. This currently 1415 * means the next time we send the initial identify messages for 1416 * a new transaction. 1417 */ 1418static void 1419ahc_update_target_msg_request(struct ahc_softc *ahc, 1420 struct ahc_devinfo *devinfo, 1421 struct ahc_initiator_tinfo *tinfo, 1422 int force, int paused) 1423{ 1424 u_int targ_msg_req_orig; 1425 1426 targ_msg_req_orig = ahc->targ_msg_req; 1427 if (tinfo->current.period != tinfo->goal.period 1428 || tinfo->current.width != tinfo->goal.width 1429 || tinfo->current.offset != tinfo->goal.offset 1430 || (force 1431 && (tinfo->goal.period != 0 1432 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT))) 1433 ahc->targ_msg_req |= devinfo->target_mask; 1434 else 1435 ahc->targ_msg_req &= ~devinfo->target_mask; 1436 1437 if (ahc->targ_msg_req != targ_msg_req_orig) { 1438 /* Update the message request bit for this target */ 1439 if (!paused) 1440 pause_sequencer(ahc); 1441 1442 ahc_outb(ahc, TARGET_MSG_REQUEST, 1443 ahc->targ_msg_req & 0xFF); 1444 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 1445 (ahc->targ_msg_req >> 8) & 0xFF); 1446 1447 if (!paused) 1448 unpause_sequencer(ahc); 1449 } 1450} 1451 1452/* 1453 * Update the user/goal/current tables of synchronous negotiation 1454 * parameters as well as, in the case of a current or active update, 1455 * any data structures on the host controller. In the case of an 1456 * active update, the specified target is currently talking to us on 1457 * the bus, so the transfer parameter update must take effect 1458 * immediately. 1459 */ 1460void 1461ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1462 struct ahc_syncrate *syncrate, u_int period, 1463 u_int offset, u_int ppr_options, u_int type, int paused) 1464{ 1465 struct ahc_initiator_tinfo *tinfo; 1466 struct tmode_tstate *tstate; 1467 u_int old_period; 1468 u_int old_offset; 1469 u_int old_ppr; 1470 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1471 1472 if (syncrate == NULL) { 1473 period = 0; 1474 offset = 0; 1475 } 1476 1477 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1478 devinfo->target, &tstate); 1479 old_period = tinfo->current.period; 1480 old_offset = tinfo->current.offset; 1481 old_ppr = tinfo->current.ppr_options; 1482 1483 if ((type & AHC_TRANS_CUR) != 0 1484 && (old_period != period 1485 || old_offset != offset 1486 || old_ppr != ppr_options)) { 1487 u_int scsirate; 1488 1489 scsirate = tinfo->scsirate; 1490 if ((ahc->features & AHC_ULTRA2) != 0) { 1491 1492 scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC); 1493 if (syncrate != NULL) { 1494 scsirate |= syncrate->sxfr_u2; 1495 if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) 1496 scsirate |= ENABLE_CRC; 1497 else 1498 scsirate |= SINGLE_EDGE; 1499 } 1500 } else { 1501 1502 scsirate &= ~(SXFR|SOFS); 1503 /* 1504 * Ensure Ultra mode is set properly for 1505 * this target. 1506 */ 1507 tstate->ultraenb &= ~devinfo->target_mask; 1508 if (syncrate != NULL) { 1509 if (syncrate->sxfr & ULTRA_SXFR) { 1510 tstate->ultraenb |= 1511 devinfo->target_mask; 1512 } 1513 scsirate |= syncrate->sxfr & SXFR; 1514 scsirate |= offset & SOFS; 1515 } 1516 if (active) { 1517 u_int sxfrctl0; 1518 1519 sxfrctl0 = ahc_inb(ahc, SXFRCTL0); 1520 sxfrctl0 &= ~FAST20; 1521 if (tstate->ultraenb & devinfo->target_mask) 1522 sxfrctl0 |= FAST20; 1523 ahc_outb(ahc, SXFRCTL0, sxfrctl0); 1524 } 1525 } 1526 if (active) { 1527 ahc_outb(ahc, SCSIRATE, scsirate); 1528 if ((ahc->features & AHC_ULTRA2) != 0) 1529 ahc_outb(ahc, SCSIOFFSET, offset); 1530 } 1531 1532 tinfo->scsirate = scsirate; 1533 tinfo->current.period = period; 1534 tinfo->current.offset = offset; 1535 tinfo->current.ppr_options = ppr_options; 1536 1537 /* Update the syncrates in any pending scbs */ 1538 ahc_update_pending_syncrates(ahc); 1539 1540 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1541 CAM_LUN_WILDCARD, AC_TRANSFER_NEG); 1542 if (bootverbose) { 1543 if (offset != 0) { 1544 printf("%s: target %d synchronous at %sMHz%s, " 1545 "offset = 0x%x\n", ahc_name(ahc), 1546 devinfo->target, syncrate->rate, 1547 (ppr_options & MSG_EXT_PPR_DT_REQ) 1548 ? " DT" : "", offset); 1549 } else { 1550 printf("%s: target %d using " 1551 "asynchronous transfers\n", 1552 ahc_name(ahc), devinfo->target); 1553 } 1554 } 1555 } 1556 1557 if ((type & AHC_TRANS_GOAL) != 0) { 1558 tinfo->goal.period = period; 1559 tinfo->goal.offset = offset; 1560 tinfo->goal.ppr_options = ppr_options; 1561 } 1562 1563 if ((type & AHC_TRANS_USER) != 0) { 1564 tinfo->user.period = period; 1565 tinfo->user.offset = offset; 1566 tinfo->user.ppr_options = ppr_options; 1567 } 1568 1569 ahc_update_target_msg_request(ahc, devinfo, tinfo, 1570 /*force*/FALSE, 1571 paused); 1572} 1573 1574/* 1575 * Update the user/goal/current tables of wide negotiation 1576 * parameters as well as, in the case of a current or active update, 1577 * any data structures on the host controller. In the case of an 1578 * active update, the specified target is currently talking to us on 1579 * the bus, so the transfer parameter update must take effect 1580 * immediately. 1581 */ 1582void 1583ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1584 u_int width, u_int type, int paused) 1585{ 1586 struct ahc_initiator_tinfo *tinfo; 1587 struct tmode_tstate *tstate; 1588 u_int oldwidth; 1589 int active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE; 1590 1591 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1592 devinfo->target, &tstate); 1593 oldwidth = tinfo->current.width; 1594 1595 if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) { 1596 u_int scsirate; 1597 1598 scsirate = tinfo->scsirate; 1599 scsirate &= ~WIDEXFER; 1600 if (width == MSG_EXT_WDTR_BUS_16_BIT) 1601 scsirate |= WIDEXFER; 1602 1603 tinfo->scsirate = scsirate; 1604 1605 if (active) 1606 ahc_outb(ahc, SCSIRATE, scsirate); 1607 1608 tinfo->current.width = width; 1609 1610 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1611 CAM_LUN_WILDCARD, AC_TRANSFER_NEG); 1612 if (bootverbose) { 1613 printf("%s: target %d using %dbit transfers\n", 1614 ahc_name(ahc), devinfo->target, 1615 8 * (0x01 << width)); 1616 } 1617 } 1618 if ((type & AHC_TRANS_GOAL) != 0) 1619 tinfo->goal.width = width; 1620 if ((type & AHC_TRANS_USER) != 0) 1621 tinfo->user.width = width; 1622 1623 ahc_update_target_msg_request(ahc, devinfo, tinfo, 1624 /*force*/FALSE, paused); 1625} 1626 1627/* 1628 * Update the current state of tagged queuing for a given target. 1629 */ 1630void 1631ahc_set_tags(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, int enable) 1632{ 1633 struct ahc_initiator_tinfo *tinfo; 1634 struct tmode_tstate *tstate; 1635 uint16_t orig_tagenable; 1636 1637 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1638 devinfo->target, &tstate); 1639 1640 orig_tagenable = tstate->tagenable; 1641 if (enable) 1642 tstate->tagenable |= devinfo->target_mask; 1643 else 1644 tstate->tagenable &= ~devinfo->target_mask; 1645 1646 if (orig_tagenable != tstate->tagenable) { 1647 ahc_platform_set_tags(ahc, devinfo, enable); 1648 ahc_send_async(ahc, devinfo->channel, devinfo->target, 1649 devinfo->lun, AC_TRANSFER_NEG); 1650 } 1651 1652} 1653 1654/* 1655 * When the transfer settings for a connection change, update any 1656 * in-transit SCBs to contain the new data so the hardware will 1657 * be set correctly during future (re)selections. 1658 */ 1659static void 1660ahc_update_pending_syncrates(struct ahc_softc *ahc) 1661{ 1662 struct scb *pending_scb; 1663 int pending_scb_count; 1664 int i; 1665 u_int saved_scbptr; 1666 1667 /* 1668 * Traverse the pending SCB list and ensure that all of the 1669 * SCBs there have the proper settings. 1670 */ 1671 pending_scb_count = 0; 1672 LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) { 1673 struct ahc_devinfo devinfo; 1674 struct hardware_scb *pending_hscb; 1675 struct ahc_initiator_tinfo *tinfo; 1676 struct tmode_tstate *tstate; 1677 1678 ahc_scb_devinfo(ahc, &devinfo, pending_scb); 1679 tinfo = ahc_fetch_transinfo(ahc, devinfo.channel, 1680 devinfo.our_scsiid, 1681 devinfo.target, &tstate); 1682 pending_hscb = pending_scb->hscb; 1683 pending_hscb->control &= ~ULTRAENB; 1684 if ((tstate->ultraenb & devinfo.target_mask) != 0) 1685 pending_hscb->control |= ULTRAENB; 1686 pending_hscb->scsirate = tinfo->scsirate; 1687 pending_hscb->scsioffset = tinfo->current.offset; 1688 pending_scb_count++; 1689 } 1690 1691 if (pending_scb_count == 0) 1692 return; 1693 1694 saved_scbptr = ahc_inb(ahc, SCBPTR); 1695 /* Ensure that the hscbs down on the card match the new information */ 1696 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 1697 struct hardware_scb *pending_hscb; 1698 u_int control; 1699 u_int scb_tag; 1700 1701 ahc_outb(ahc, SCBPTR, i); 1702 scb_tag = ahc_inb(ahc, SCB_TAG); 1703 pending_scb = ahc_lookup_scb(ahc, scb_tag); 1704 if (pending_scb == NULL) 1705 continue; 1706 1707 pending_hscb = pending_scb->hscb; 1708 control = ahc_inb(ahc, SCB_CONTROL); 1709 control &= ~ULTRAENB; 1710 if ((pending_hscb->control & ULTRAENB) != 0) 1711 control |= ULTRAENB; 1712 ahc_outb(ahc, SCB_CONTROL, control); 1713 ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate); 1714 ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset); 1715 } 1716 ahc_outb(ahc, SCBPTR, saved_scbptr); 1717} 1718 1719/**************************** Pathing Information *****************************/ 1720static void 1721ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1722{ 1723 u_int saved_scsiid; 1724 role_t role; 1725 int our_id; 1726 1727 if (ahc_inb(ahc, SSTAT0) & TARGET) 1728 role = ROLE_TARGET; 1729 else 1730 role = ROLE_INITIATOR; 1731 1732 if (role == ROLE_TARGET 1733 && (ahc->features & AHC_MULTI_TID) != 0 1734 && (ahc_inb(ahc, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { 1735 /* We were selected, so pull our id from TARGIDIN */ 1736 our_id = ahc_inb(ahc, TARGIDIN) & OID; 1737 } else if ((ahc->features & AHC_ULTRA2) != 0) 1738 our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID; 1739 else 1740 our_id = ahc_inb(ahc, SCSIID) & OID; 1741 1742 saved_scsiid = ahc_inb(ahc, SAVED_SCSIID); 1743 ahc_compile_devinfo(devinfo, 1744 our_id, 1745 SCSIID_TARGET(ahc, saved_scsiid), 1746 ahc_inb(ahc, SAVED_LUN), 1747 SCSIID_CHANNEL(ahc, saved_scsiid), 1748 role); 1749} 1750 1751void 1752ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target, 1753 u_int lun, char channel, role_t role) 1754{ 1755 devinfo->our_scsiid = our_id; 1756 devinfo->target = target; 1757 devinfo->lun = lun; 1758 devinfo->target_offset = target; 1759 devinfo->channel = channel; 1760 devinfo->role = role; 1761 if (channel == 'B') 1762 devinfo->target_offset += 8; 1763 devinfo->target_mask = (0x01 << devinfo->target_offset); 1764} 1765 1766static void 1767ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1768 struct scb *scb) 1769{ 1770 role_t role; 1771 int our_id; 1772 1773 our_id = SCSIID_OUR_ID(scb->hscb->scsiid); 1774 role = ROLE_INITIATOR; 1775 if ((scb->hscb->control & TARGET_SCB) != 0) 1776 role = ROLE_TARGET; 1777 ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb), 1778 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role); 1779} 1780 1781 1782/************************ Message Phase Processing ****************************/ 1783/* 1784 * When an initiator transaction with the MK_MESSAGE flag either reconnects 1785 * or enters the initial message out phase, we are interrupted. Fill our 1786 * outgoing message buffer with the appropriate message and beging handing 1787 * the message phase(s) manually. 1788 */ 1789static void 1790ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1791 struct scb *scb) 1792{ 1793 /* 1794 * To facilitate adding multiple messages together, 1795 * each routine should increment the index and len 1796 * variables instead of setting them explicitly. 1797 */ 1798 ahc->msgout_index = 0; 1799 ahc->msgout_len = 0; 1800 1801 if ((scb->flags & SCB_DEVICE_RESET) == 0 1802 && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) { 1803 u_int identify_msg; 1804 1805 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); 1806 if ((scb->hscb->control & DISCENB) != 0) 1807 identify_msg |= MSG_IDENTIFY_DISCFLAG; 1808 ahc->msgout_buf[ahc->msgout_index++] = identify_msg; 1809 ahc->msgout_len++; 1810 1811 if ((scb->hscb->control & TAG_ENB) != 0) { 1812 ahc->msgout_buf[ahc->msgout_index++] = 1813 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); 1814 ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag; 1815 ahc->msgout_len += 2; 1816 } 1817 } 1818 1819 if (scb->flags & SCB_DEVICE_RESET) { 1820 ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET; 1821 ahc->msgout_len++; 1822 ahc_print_path(ahc, scb); 1823 printf("Bus Device Reset Message Sent\n"); 1824 } else if ((scb->flags & SCB_ABORT) != 0) { 1825 if ((scb->hscb->control & TAG_ENB) != 0) 1826 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG; 1827 else 1828 ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT; 1829 ahc->msgout_len++; 1830 ahc_print_path(ahc, scb); 1831 printf("Abort Message Sent\n"); 1832 } else if ((ahc->targ_msg_req & devinfo->target_mask) != 0 1833 || (scb->flags & SCB_NEGOTIATE) != 0) { 1834 ahc_build_transfer_msg(ahc, devinfo); 1835 } else { 1836 printf("ahc_intr: AWAITING_MSG for an SCB that " 1837 "does not have a waiting message\n"); 1838 printf("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, 1839 devinfo->target_mask); 1840 panic("SCB = %d, SCB Control = %x, MSG_OUT = %x " 1841 "SCB flags = %x", scb->hscb->tag, scb->hscb->control, 1842 ahc_inb(ahc, MSG_OUT), scb->flags); 1843 } 1844 1845 /* 1846 * Clear the MK_MESSAGE flag from the SCB so we aren't 1847 * asked to send this message again. 1848 */ 1849 ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE); 1850 ahc->msgout_index = 0; 1851 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 1852} 1853/* 1854 * Build an appropriate transfer negotiation message for the 1855 * currently active target. 1856 */ 1857static void 1858ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 1859{ 1860 /* 1861 * We need to initiate transfer negotiations. 1862 * If our current and goal settings are identical, 1863 * we want to renegotiate due to a check condition. 1864 */ 1865 struct ahc_initiator_tinfo *tinfo; 1866 struct tmode_tstate *tstate; 1867 struct ahc_syncrate *rate; 1868 int dowide; 1869 int dosync; 1870 int doppr; 1871 int use_ppr; 1872 u_int period; 1873 u_int ppr_options; 1874 u_int offset; 1875 1876 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 1877 devinfo->target, &tstate); 1878 dowide = tinfo->current.width != tinfo->goal.width; 1879 dosync = tinfo->current.period != tinfo->goal.period; 1880 doppr = tinfo->current.ppr_options != tinfo->goal.ppr_options; 1881 1882 if (!dowide && !dosync && !doppr) { 1883 dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; 1884 dosync = tinfo->goal.period != 0; 1885 doppr = tinfo->goal.ppr_options != 0; 1886 } 1887 1888 if (!dowide && !dosync && !doppr) { 1889 panic("ahc_intr: AWAITING_MSG for negotiation, " 1890 "but no negotiation needed\n"); 1891 } 1892 1893 use_ppr = (tinfo->current.transport_version >= 3) || doppr; 1894 /* 1895 * Both the PPR message and SDTR message require the 1896 * goal syncrate to be limited to what the target device 1897 * is capable of handling (based on whether an LVD->SE 1898 * expander is on the bus), so combine these two cases. 1899 * Regardless, guarantee that if we are using WDTR and SDTR 1900 * messages that WDTR comes first. 1901 */ 1902 if (use_ppr || (dosync && !dowide)) { 1903 1904 period = tinfo->goal.period; 1905 ppr_options = tinfo->goal.ppr_options; 1906 if (use_ppr == 0) 1907 ppr_options = 0; 1908 rate = ahc_devlimited_syncrate(ahc, &period, &ppr_options); 1909 offset = tinfo->goal.offset; 1910 ahc_validate_offset(ahc, rate, &offset, 1911 tinfo->current.width); 1912 if (use_ppr) { 1913 ahc_construct_ppr(ahc, devinfo, period, offset, 1914 tinfo->goal.width, ppr_options); 1915 } else { 1916 ahc_construct_sdtr(ahc, devinfo, period, offset); 1917 } 1918 } else { 1919 ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width); 1920 } 1921} 1922 1923/* 1924 * Build a synchronous negotiation message in our message 1925 * buffer based on the input parameters. 1926 */ 1927static void 1928ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1929 u_int period, u_int offset) 1930{ 1931 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 1932 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR_LEN; 1933 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_SDTR; 1934 ahc->msgout_buf[ahc->msgout_index++] = period; 1935 ahc->msgout_buf[ahc->msgout_index++] = offset; 1936 ahc->msgout_len += 5; 1937 if (bootverbose) { 1938 printf("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", 1939 ahc_name(ahc), devinfo->channel, devinfo->target, 1940 devinfo->lun, period, offset); 1941 } 1942} 1943 1944/* 1945 * Build a wide negotiateion message in our message 1946 * buffer based on the input parameters. 1947 */ 1948static void 1949ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1950 u_int bus_width) 1951{ 1952 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 1953 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR_LEN; 1954 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_WDTR; 1955 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 1956 ahc->msgout_len += 4; 1957 if (bootverbose) { 1958 printf("(%s:%c:%d:%d): Sending WDTR %x\n", 1959 ahc_name(ahc), devinfo->channel, devinfo->target, 1960 devinfo->lun, bus_width); 1961 } 1962} 1963 1964/* 1965 * Build a parallel protocol request message in our message 1966 * buffer based on the input parameters. 1967 */ 1968static void 1969ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 1970 u_int period, u_int offset, u_int bus_width, 1971 u_int ppr_options) 1972{ 1973 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXTENDED; 1974 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR_LEN; 1975 ahc->msgout_buf[ahc->msgout_index++] = MSG_EXT_PPR; 1976 ahc->msgout_buf[ahc->msgout_index++] = period; 1977 ahc->msgout_buf[ahc->msgout_index++] = 0; 1978 ahc->msgout_buf[ahc->msgout_index++] = offset; 1979 ahc->msgout_buf[ahc->msgout_index++] = bus_width; 1980 ahc->msgout_buf[ahc->msgout_index++] = ppr_options; 1981 ahc->msgout_len += 8; 1982 if (bootverbose) { 1983 printf("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " 1984 "offset %x, ppr_options %x\n", ahc_name(ahc), 1985 devinfo->channel, devinfo->target, devinfo->lun, 1986 bus_width, period, offset, ppr_options); 1987 } 1988} 1989 1990/* 1991 * Clear any active message state. 1992 */ 1993static void 1994ahc_clear_msg_state(struct ahc_softc *ahc) 1995{ 1996 ahc->msgout_len = 0; 1997 ahc->msgin_index = 0; 1998 ahc->msg_type = MSG_TYPE_NONE; 1999 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 2000} 2001 2002/* 2003 * Manual message loop handler. 2004 */ 2005static void 2006ahc_handle_message_phase(struct ahc_softc *ahc) 2007{ 2008 struct ahc_devinfo devinfo; 2009 u_int bus_phase; 2010 int end_session; 2011 2012 ahc_fetch_devinfo(ahc, &devinfo); 2013 end_session = FALSE; 2014 bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK; 2015 2016reswitch: 2017 switch (ahc->msg_type) { 2018 case MSG_TYPE_INITIATOR_MSGOUT: 2019 { 2020 int lastbyte; 2021 int phasemis; 2022 int msgdone; 2023 2024 if (ahc->msgout_len == 0) 2025 panic("HOST_MSG_LOOP interrupt with no active message"); 2026 2027 phasemis = bus_phase != P_MESGOUT; 2028 if (phasemis) { 2029 if (bus_phase == P_MESGIN) { 2030 /* 2031 * Change gears and see if 2032 * this messages is of interest to 2033 * us or should be passed back to 2034 * the sequencer. 2035 */ 2036 ahc_outb(ahc, CLRSINT1, CLRATNO); 2037 ahc->send_msg_perror = FALSE; 2038 ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN; 2039 ahc->msgin_index = 0; 2040 goto reswitch; 2041 } 2042 end_session = TRUE; 2043 break; 2044 } 2045 2046 if (ahc->send_msg_perror) { 2047 ahc_outb(ahc, CLRSINT1, CLRATNO); 2048 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2049 ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR); 2050 break; 2051 } 2052 2053 msgdone = ahc->msgout_index == ahc->msgout_len; 2054 if (msgdone) { 2055 /* 2056 * The target has requested a retry. 2057 * Re-assert ATN, reset our message index to 2058 * 0, and try again. 2059 */ 2060 ahc->msgout_index = 0; 2061 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO); 2062 } 2063 2064 lastbyte = ahc->msgout_index == (ahc->msgout_len - 1); 2065 if (lastbyte) { 2066 /* Last byte is signified by dropping ATN */ 2067 ahc_outb(ahc, CLRSINT1, CLRATNO); 2068 } 2069 2070 /* 2071 * Clear our interrupt status and present 2072 * the next byte on the bus. 2073 */ 2074 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2075 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2076 break; 2077 } 2078 case MSG_TYPE_INITIATOR_MSGIN: 2079 { 2080 int phasemis; 2081 int message_done; 2082 2083 phasemis = bus_phase != P_MESGIN; 2084 2085 if (phasemis) { 2086 ahc->msgin_index = 0; 2087 if (bus_phase == P_MESGOUT 2088 && (ahc->send_msg_perror == TRUE 2089 || (ahc->msgout_len != 0 2090 && ahc->msgout_index == 0))) { 2091 ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT; 2092 goto reswitch; 2093 } 2094 end_session = TRUE; 2095 break; 2096 } 2097 2098 /* Pull the byte in without acking it */ 2099 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL); 2100 2101 message_done = ahc_parse_msg(ahc, &devinfo); 2102 2103 if (message_done) { 2104 /* 2105 * Clear our incoming message buffer in case there 2106 * is another message following this one. 2107 */ 2108 ahc->msgin_index = 0; 2109 2110 /* 2111 * If this message illicited a response, 2112 * assert ATN so the target takes us to the 2113 * message out phase. 2114 */ 2115 if (ahc->msgout_len != 0) 2116 ahc_outb(ahc, SCSISIGO, 2117 ahc_inb(ahc, SCSISIGO) | ATNO); 2118 } else 2119 ahc->msgin_index++; 2120 2121 /* Ack the byte */ 2122 ahc_outb(ahc, CLRSINT1, CLRREQINIT); 2123 ahc_inb(ahc, SCSIDATL); 2124 break; 2125 } 2126 case MSG_TYPE_TARGET_MSGIN: 2127 { 2128 int msgdone; 2129 int msgout_request; 2130 2131 if (ahc->msgout_len == 0) 2132 panic("Target MSGIN with no active message"); 2133 2134 /* 2135 * If we interrupted a mesgout session, the initiator 2136 * will not know this until our first REQ. So, we 2137 * only honor mesgout requests after we've sent our 2138 * first byte. 2139 */ 2140 if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0 2141 && ahc->msgout_index > 0) 2142 msgout_request = TRUE; 2143 else 2144 msgout_request = FALSE; 2145 2146 if (msgout_request) { 2147 2148 /* 2149 * Change gears and see if 2150 * this messages is of interest to 2151 * us or should be passed back to 2152 * the sequencer. 2153 */ 2154 ahc->msg_type = MSG_TYPE_TARGET_MSGOUT; 2155 ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO); 2156 ahc->msgin_index = 0; 2157 /* Dummy read to REQ for first byte */ 2158 ahc_inb(ahc, SCSIDATL); 2159 ahc_outb(ahc, SXFRCTL0, 2160 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2161 break; 2162 } 2163 2164 msgdone = ahc->msgout_index == ahc->msgout_len; 2165 if (msgdone) { 2166 ahc_outb(ahc, SXFRCTL0, 2167 ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2168 end_session = TRUE; 2169 break; 2170 } 2171 2172 /* 2173 * Present the next byte on the bus. 2174 */ 2175 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2176 ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]); 2177 break; 2178 } 2179 case MSG_TYPE_TARGET_MSGOUT: 2180 { 2181 int lastbyte; 2182 int msgdone; 2183 2184 /* 2185 * The initiator signals that this is 2186 * the last byte by dropping ATN. 2187 */ 2188 lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0; 2189 2190 /* 2191 * Read the latched byte, but turn off SPIOEN first 2192 * so that we don't inadvertantly cause a REQ for the 2193 * next byte. 2194 */ 2195 ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN); 2196 ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL); 2197 msgdone = ahc_parse_msg(ahc, &devinfo); 2198 if (msgdone == MSGLOOP_TERMINATED) { 2199 /* 2200 * The message is *really* done in that it caused 2201 * us to go to bus free. The sequencer has already 2202 * been reset at this point, so pull the ejection 2203 * handle. 2204 */ 2205 return; 2206 } 2207 2208 ahc->msgin_index++; 2209 2210 /* 2211 * XXX Read spec about initiator dropping ATN too soon 2212 * and use msgdone to detect it. 2213 */ 2214 if (msgdone == MSGLOOP_MSGCOMPLETE) { 2215 ahc->msgin_index = 0; 2216 2217 /* 2218 * If this message illicited a response, transition 2219 * to the Message in phase and send it. 2220 */ 2221 if (ahc->msgout_len != 0) { 2222 ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO); 2223 ahc_outb(ahc, SXFRCTL0, 2224 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2225 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2226 ahc->msgin_index = 0; 2227 break; 2228 } 2229 } 2230 2231 if (lastbyte) 2232 end_session = TRUE; 2233 else { 2234 /* Ask for the next byte. */ 2235 ahc_outb(ahc, SXFRCTL0, 2236 ahc_inb(ahc, SXFRCTL0) | SPIOEN); 2237 } 2238 2239 break; 2240 } 2241 default: 2242 panic("Unknown REQINIT message type"); 2243 } 2244 2245 if (end_session) { 2246 ahc_clear_msg_state(ahc); 2247 ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP); 2248 } else 2249 ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP); 2250} 2251 2252/* 2253 * See if we sent a particular extended message to the target. 2254 * If "full" is true, return true only if the target saw the full 2255 * message. If "full" is false, return true if the target saw at 2256 * least the first byte of the message. 2257 */ 2258static int 2259ahc_sent_msg(struct ahc_softc *ahc, u_int msgtype, int full) 2260{ 2261 int found; 2262 u_int index; 2263 2264 found = FALSE; 2265 index = 0; 2266 2267 while (index < ahc->msgout_len) { 2268 if (ahc->msgout_buf[index] == MSG_EXTENDED) { 2269 2270 /* Found a candidate */ 2271 if (ahc->msgout_buf[index+2] == msgtype) { 2272 u_int end_index; 2273 2274 end_index = index + 1 2275 + ahc->msgout_buf[index + 1]; 2276 if (full) { 2277 if (ahc->msgout_index > end_index) 2278 found = TRUE; 2279 } else if (ahc->msgout_index > index) 2280 found = TRUE; 2281 } 2282 break; 2283 } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_Q_TAG 2284 && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { 2285 2286 /* Skip tag type and tag id or residue param*/ 2287 index += 2; 2288 } else { 2289 /* Single byte message */ 2290 index++; 2291 } 2292 } 2293 return (found); 2294} 2295 2296/* 2297 * Wait for a complete incomming message, parse it, and respond accordingly. 2298 */ 2299static int 2300ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2301{ 2302 struct ahc_initiator_tinfo *tinfo; 2303 struct tmode_tstate *tstate; 2304 int reject; 2305 int done; 2306 int response; 2307 u_int targ_scsirate; 2308 2309 done = MSGLOOP_IN_PROG; 2310 response = FALSE; 2311 reject = FALSE; 2312 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid, 2313 devinfo->target, &tstate); 2314 targ_scsirate = tinfo->scsirate; 2315 2316 /* 2317 * Parse as much of the message as is availible, 2318 * rejecting it if we don't support it. When 2319 * the entire message is availible and has been 2320 * handled, return MSGLOOP_MSGCOMPLETE, indicating 2321 * that we have parsed an entire message. 2322 * 2323 * In the case of extended messages, we accept the length 2324 * byte outright and perform more checking once we know the 2325 * extended message type. 2326 */ 2327 switch (ahc->msgin_buf[0]) { 2328 case MSG_MESSAGE_REJECT: 2329 response = ahc_handle_msg_reject(ahc, devinfo); 2330 /* FALLTHROUGH */ 2331 case MSG_NOOP: 2332 done = MSGLOOP_MSGCOMPLETE; 2333 break; 2334 case MSG_EXTENDED: 2335 { 2336 /* Wait for enough of the message to begin validation */ 2337 if (ahc->msgin_index < 2) 2338 break; 2339 switch (ahc->msgin_buf[2]) { 2340 case MSG_EXT_SDTR: 2341 { 2342 struct ahc_syncrate *syncrate; 2343 u_int period; 2344 u_int ppr_options; 2345 u_int offset; 2346 u_int saved_offset; 2347 2348 if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) { 2349 reject = TRUE; 2350 break; 2351 } 2352 2353 /* 2354 * Wait until we have both args before validating 2355 * and acting on this message. 2356 * 2357 * Add one to MSG_EXT_SDTR_LEN to account for 2358 * the extended message preamble. 2359 */ 2360 if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1)) 2361 break; 2362 2363 period = ahc->msgin_buf[3]; 2364 ppr_options = 0; 2365 saved_offset = offset = ahc->msgin_buf[4]; 2366 syncrate = ahc_devlimited_syncrate(ahc, &period, 2367 &ppr_options); 2368 ahc_validate_offset(ahc, syncrate, &offset, 2369 targ_scsirate & WIDEXFER); 2370 if (bootverbose) { 2371 printf("(%s:%c:%d:%d): Received " 2372 "SDTR period %x, offset %x\n\t" 2373 "Filtered to period %x, offset %x\n", 2374 ahc_name(ahc), devinfo->channel, 2375 devinfo->target, devinfo->lun, 2376 ahc->msgin_buf[3], saved_offset, 2377 period, offset); 2378 } 2379 ahc_set_syncrate(ahc, devinfo, 2380 syncrate, period, 2381 offset, ppr_options, 2382 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2383 /*paused*/TRUE); 2384 2385 /* 2386 * See if we initiated Sync Negotiation 2387 * and didn't have to fall down to async 2388 * transfers. 2389 */ 2390 if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/TRUE)) { 2391 /* We started it */ 2392 if (saved_offset != offset) { 2393 /* Went too low - force async */ 2394 reject = TRUE; 2395 } 2396 } else { 2397 /* 2398 * Send our own SDTR in reply 2399 */ 2400 if (bootverbose) { 2401 printf("(%s:%c:%d:%d): Target " 2402 "Initiated SDTR\n", 2403 ahc_name(ahc), devinfo->channel, 2404 devinfo->target, devinfo->lun); 2405 } 2406 ahc->msgout_index = 0; 2407 ahc->msgout_len = 0; 2408 ahc_construct_sdtr(ahc, devinfo, 2409 period, offset); 2410 ahc->msgout_index = 0; 2411 response = TRUE; 2412 } 2413 done = MSGLOOP_MSGCOMPLETE; 2414 break; 2415 } 2416 case MSG_EXT_WDTR: 2417 { 2418 u_int bus_width; 2419 u_int saved_width; 2420 u_int sending_reply; 2421 2422 sending_reply = FALSE; 2423 if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) { 2424 reject = TRUE; 2425 break; 2426 } 2427 2428 /* 2429 * Wait until we have our arg before validating 2430 * and acting on this message. 2431 * 2432 * Add one to MSG_EXT_WDTR_LEN to account for 2433 * the extended message preamble. 2434 */ 2435 if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1)) 2436 break; 2437 2438 bus_width = ahc->msgin_buf[3]; 2439 saved_width = bus_width; 2440 ahc_validate_width(ahc, &bus_width); 2441 if (bootverbose) { 2442 printf("(%s:%c:%d:%d): Received WDTR " 2443 "%x filtered to %x\n", 2444 ahc_name(ahc), devinfo->channel, 2445 devinfo->target, devinfo->lun, 2446 saved_width, bus_width); 2447 } 2448 2449 if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/TRUE)) { 2450 /* 2451 * Don't send a WDTR back to the 2452 * target, since we asked first. 2453 * If the width went higher than our 2454 * request, reject it. 2455 */ 2456 if (saved_width > bus_width) { 2457 reject = TRUE; 2458 printf("(%s:%c:%d:%d): requested %dBit " 2459 "transfers. Rejecting...\n", 2460 ahc_name(ahc), devinfo->channel, 2461 devinfo->target, devinfo->lun, 2462 8 * (0x01 << bus_width)); 2463 bus_width = 0; 2464 } 2465 } else { 2466 /* 2467 * Send our own WDTR in reply 2468 */ 2469 if (bootverbose) { 2470 printf("(%s:%c:%d:%d): Target " 2471 "Initiated WDTR\n", 2472 ahc_name(ahc), devinfo->channel, 2473 devinfo->target, devinfo->lun); 2474 } 2475 ahc->msgout_index = 0; 2476 ahc->msgout_len = 0; 2477 ahc_construct_wdtr(ahc, devinfo, bus_width); 2478 ahc->msgout_index = 0; 2479 response = TRUE; 2480 sending_reply = TRUE; 2481 } 2482 ahc_set_width(ahc, devinfo, bus_width, 2483 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2484 /*paused*/TRUE); 2485 /* After a wide message, we are async */ 2486 ahc_set_syncrate(ahc, devinfo, 2487 /*syncrate*/NULL, /*period*/0, 2488 /*offset*/0, /*ppr_options*/0, 2489 AHC_TRANS_ACTIVE, /*paused*/TRUE); 2490 if (sending_reply == FALSE && reject == FALSE) { 2491 2492 if (tinfo->goal.period) { 2493 ahc->msgout_index = 0; 2494 ahc->msgout_len = 0; 2495 ahc_build_transfer_msg(ahc, devinfo); 2496 ahc->msgout_index = 0; 2497 response = TRUE; 2498 } 2499 } 2500 done = MSGLOOP_MSGCOMPLETE; 2501 break; 2502 } 2503 case MSG_EXT_PPR: 2504 { 2505 struct ahc_syncrate *syncrate; 2506 u_int period; 2507 u_int offset; 2508 u_int bus_width; 2509 u_int ppr_options; 2510 u_int saved_width; 2511 u_int saved_offset; 2512 u_int saved_ppr_options; 2513 2514 if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) { 2515 reject = TRUE; 2516 break; 2517 } 2518 2519 /* 2520 * Wait until we have all args before validating 2521 * and acting on this message. 2522 * 2523 * Add one to MSG_EXT_PPR_LEN to account for 2524 * the extended message preamble. 2525 */ 2526 if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1)) 2527 break; 2528 2529 period = ahc->msgin_buf[3]; 2530 offset = ahc->msgin_buf[5]; 2531 bus_width = ahc->msgin_buf[6]; 2532 saved_width = bus_width; 2533 ppr_options = ahc->msgin_buf[7]; 2534 /* 2535 * According to the spec, a DT only 2536 * period factor with no DT option 2537 * set implies async. 2538 */ 2539 if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 2540 && period == 9) 2541 offset = 0; 2542 saved_ppr_options = ppr_options; 2543 saved_offset = offset; 2544 2545 /* 2546 * Mask out any options we don't support 2547 * on any controller. Transfer options are 2548 * only available if we are negotiating wide. 2549 */ 2550 ppr_options &= MSG_EXT_PPR_DT_REQ; 2551 if (bus_width == 0) 2552 ppr_options = 0; 2553 2554 ahc_validate_width(ahc, &bus_width); 2555 syncrate = ahc_devlimited_syncrate(ahc, &period, 2556 &ppr_options); 2557 ahc_validate_offset(ahc, syncrate, &offset, bus_width); 2558 2559 if (ahc_sent_msg(ahc, MSG_EXT_PPR, /*full*/TRUE)) { 2560 /* 2561 * If we are unable to do any of the 2562 * requested options (we went too low), 2563 * then we'll have to reject the message. 2564 */ 2565 if (saved_width > bus_width 2566 || saved_offset != offset 2567 || saved_ppr_options != ppr_options) { 2568 reject = TRUE; 2569 period = 0; 2570 offset = 0; 2571 bus_width = 0; 2572 ppr_options = 0; 2573 syncrate = NULL; 2574 } 2575 } else { 2576 printf("Target Initated PPR detected!\n"); 2577 response = TRUE; 2578 } 2579 if (bootverbose) { 2580 printf("(%s:%c:%d:%d): Received PPR width %x, " 2581 "period %x, offset %x,options %x\n" 2582 "\tFiltered to width %x, period %x, " 2583 "offset %x, options %x\n", 2584 ahc_name(ahc), devinfo->channel, 2585 devinfo->target, devinfo->lun, 2586 ahc->msgin_buf[3], saved_width, 2587 saved_offset, saved_ppr_options, 2588 bus_width, period, offset, ppr_options); 2589 } 2590 ahc_set_width(ahc, devinfo, bus_width, 2591 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2592 /*paused*/TRUE); 2593 ahc_set_syncrate(ahc, devinfo, 2594 syncrate, period, 2595 offset, ppr_options, 2596 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2597 /*paused*/TRUE); 2598 break; 2599 } 2600 default: 2601 /* Unknown extended message. Reject it. */ 2602 reject = TRUE; 2603 break; 2604 } 2605 break; 2606 } 2607 case MSG_BUS_DEV_RESET: 2608 ahc_handle_devreset(ahc, devinfo, 2609 CAM_BDR_SENT, 2610 "Bus Device Reset Received", 2611 /*verbose_level*/0); 2612 restart_sequencer(ahc); 2613 done = MSGLOOP_TERMINATED; 2614 break; 2615 case MSG_ABORT_TAG: 2616 case MSG_ABORT: 2617 case MSG_CLEAR_QUEUE: 2618#ifdef AHC_TARGET_MODE 2619 /* Target mode messages */ 2620 if (devinfo->role != ROLE_TARGET) { 2621 reject = TRUE; 2622 break; 2623 } 2624 ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 2625 devinfo->lun, 2626 ahc->msgin_buf[0] == MSG_ABORT_TAG 2627 ? SCB_LIST_NULL 2628 : ahc_inb(ahc, INITIATOR_TAG), 2629 ROLE_TARGET, CAM_REQ_ABORTED); 2630 2631 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 2632 if (tstate != NULL) { 2633 struct tmode_lstate* lstate; 2634 2635 lstate = tstate->enabled_luns[devinfo->lun]; 2636 if (lstate != NULL) { 2637 ahc_queue_lstate_event(ahc, lstate, 2638 devinfo->our_scsiid, 2639 ahc->msgin_buf[0], 2640 /*arg*/0); 2641 ahc_send_lstate_events(ahc, lstate); 2642 } 2643 } 2644 done = MSGLOOP_MSGCOMPLETE; 2645 break; 2646#endif 2647 case MSG_TERM_IO_PROC: 2648 default: 2649 reject = TRUE; 2650 break; 2651 } 2652 2653 if (reject) { 2654 /* 2655 * Setup to reject the message. 2656 */ 2657 ahc->msgout_index = 0; 2658 ahc->msgout_len = 1; 2659 ahc->msgout_buf[0] = MSG_MESSAGE_REJECT; 2660 done = MSGLOOP_MSGCOMPLETE; 2661 response = TRUE; 2662 } 2663 2664 if (done != MSGLOOP_IN_PROG && !response) 2665 /* Clear the outgoing message buffer */ 2666 ahc->msgout_len = 0; 2667 2668 return (done); 2669} 2670 2671/* 2672 * Process a message reject message. 2673 */ 2674static int 2675ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2676{ 2677 /* 2678 * What we care about here is if we had an 2679 * outstanding SDTR or WDTR message for this 2680 * target. If we did, this is a signal that 2681 * the target is refusing negotiation. 2682 */ 2683 struct scb *scb; 2684 struct ahc_initiator_tinfo *tinfo; 2685 struct tmode_tstate *tstate; 2686 u_int scb_index; 2687 u_int last_msg; 2688 int response = 0; 2689 2690 scb_index = ahc_inb(ahc, SCB_TAG); 2691 scb = ahc_lookup_scb(ahc, scb_index); 2692 tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, 2693 devinfo->our_scsiid, 2694 devinfo->target, &tstate); 2695 /* Might be necessary */ 2696 last_msg = ahc_inb(ahc, LAST_MSG); 2697 2698 if (ahc_sent_msg(ahc, MSG_EXT_PPR, /*full*/FALSE)) { 2699 /* 2700 * Target does not support the PPR message. 2701 * Attempt to negotiate SPI-2 style. 2702 */ 2703 if (bootverbose) { 2704 printf("(%s:%c:%d:%d): PPR Rejected. " 2705 "Trying WDTR/SDTR\n", 2706 ahc_name(ahc), devinfo->channel, 2707 devinfo->target, devinfo->lun); 2708 } 2709 tinfo->goal.ppr_options = 0; 2710 tinfo->current.transport_version = 2; 2711 tinfo->goal.transport_version = 2; 2712 ahc->msgout_index = 0; 2713 ahc->msgout_len = 0; 2714 ahc_build_transfer_msg(ahc, devinfo); 2715 ahc->msgout_index = 0; 2716 response = 1; 2717 } else if (ahc_sent_msg(ahc, MSG_EXT_WDTR, /*full*/FALSE)) { 2718 2719 /* note 8bit xfers */ 2720 printf("(%s:%c:%d:%d): refuses WIDE negotiation. Using " 2721 "8bit transfers\n", ahc_name(ahc), 2722 devinfo->channel, devinfo->target, devinfo->lun); 2723 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 2724 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2725 /*paused*/TRUE); 2726 /* 2727 * No need to clear the sync rate. If the target 2728 * did not accept the command, our syncrate is 2729 * unaffected. If the target started the negotiation, 2730 * but rejected our response, we already cleared the 2731 * sync rate before sending our WDTR. 2732 */ 2733 if (tinfo->goal.period) { 2734 2735 /* Start the sync negotiation */ 2736 ahc->msgout_index = 0; 2737 ahc->msgout_len = 0; 2738 ahc_build_transfer_msg(ahc, devinfo); 2739 ahc->msgout_index = 0; 2740 response = 1; 2741 } 2742 } else if (ahc_sent_msg(ahc, MSG_EXT_SDTR, /*full*/FALSE)) { 2743 /* note asynch xfers and clear flag */ 2744 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0, 2745 /*offset*/0, /*ppr_options*/0, 2746 AHC_TRANS_ACTIVE|AHC_TRANS_GOAL, 2747 /*paused*/TRUE); 2748 printf("(%s:%c:%d:%d): refuses synchronous negotiation. " 2749 "Using asynchronous transfers\n", 2750 ahc_name(ahc), devinfo->channel, 2751 devinfo->target, devinfo->lun); 2752 } else if ((scb->hscb->control & MSG_SIMPLE_Q_TAG) != 0) { 2753 2754 printf("(%s:%c:%d:%d): refuses tagged commands. Performing " 2755 "non-tagged I/O\n", ahc_name(ahc), 2756 devinfo->channel, devinfo->target, devinfo->lun); 2757 ahc_set_tags(ahc, devinfo, FALSE); 2758 2759 /* 2760 * Resend the identify for this CCB as the target 2761 * may believe that the selection is invalid otherwise. 2762 */ 2763 ahc_outb(ahc, SCB_CONTROL, 2764 ahc_inb(ahc, SCB_CONTROL) & ~MSG_SIMPLE_Q_TAG); 2765 scb->hscb->control &= ~MSG_SIMPLE_Q_TAG; 2766 ahc_set_transaction_tag(scb, /*enabled*/FALSE, 2767 /*type*/MSG_SIMPLE_Q_TAG); 2768 ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG); 2769 ahc_outb(ahc, SCSISIGO, ahc_inb(ahc, SCSISIGO) | ATNO); 2770 2771 /* 2772 * This transaction is now at the head of 2773 * the untagged queue for this target. 2774 */ 2775 if ((ahc->features & AHC_SCB_BTT) == 0) { 2776 struct scb_tailq *untagged_q; 2777 2778 untagged_q = &(ahc->untagged_queues[devinfo->target]); 2779 TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe); 2780 } 2781 ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), 2782 scb->hscb->tag); 2783 2784 /* 2785 * Requeue all tagged commands for this target 2786 * currently in our posession so they can be 2787 * converted to untagged commands. 2788 */ 2789 ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb), 2790 SCB_GET_CHANNEL(ahc, scb), 2791 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, 2792 ROLE_INITIATOR, CAM_REQUEUE_REQ, 2793 SEARCH_COMPLETE); 2794 } else { 2795 /* 2796 * Otherwise, we ignore it. 2797 */ 2798 printf("%s:%c:%d: Message reject for %x -- ignored\n", 2799 ahc_name(ahc), devinfo->channel, devinfo->target, 2800 last_msg); 2801 } 2802 return (response); 2803} 2804 2805/* 2806 * Process an ingnore wide residue message. 2807 */ 2808static void 2809ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2810{ 2811 u_int scb_index; 2812 struct scb *scb; 2813 2814 scb_index = ahc_inb(ahc, SCB_TAG); 2815 scb = ahc_lookup_scb(ahc, scb_index); 2816 /* 2817 * XXX Actually check data direction in the sequencer? 2818 * Perhaps add datadir to some spare bits in the hscb? 2819 */ 2820 if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0 2821 || ahc_get_transfer_dir(scb) != CAM_DIR_IN) { 2822 /* 2823 * Ignore the message if we haven't 2824 * seen an appropriate data phase yet. 2825 */ 2826 } else { 2827 /* 2828 * If the residual occurred on the last 2829 * transfer and the transfer request was 2830 * expected to end on an odd count, do 2831 * nothing. Otherwise, subtract a byte 2832 * and update the residual count accordingly. 2833 */ 2834 uint32_t sgptr; 2835 2836 sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR); 2837 if ((sgptr & SG_LIST_NULL) != 0 2838 && ahc_inb(ahc, DATA_COUNT_ODD) == 1) { 2839 /* 2840 * If the residual occurred on the last 2841 * transfer and the transfer request was 2842 * expected to end on an odd count, do 2843 * nothing. 2844 */ 2845 } else { 2846 struct ahc_dma_seg *sg; 2847 uint32_t data_cnt; 2848 uint32_t data_addr; 2849 2850 /* Pull in the rest of the sgptr */ 2851 sgptr |= (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24) 2852 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16) 2853 | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8); 2854 sgptr &= SG_PTR_MASK; 2855 data_cnt = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+2) << 16) 2856 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT+1) << 8) 2857 | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT)); 2858 2859 data_addr = (ahc_inb(ahc, SHADDR + 3) << 24) 2860 | (ahc_inb(ahc, SHADDR + 2) << 16) 2861 | (ahc_inb(ahc, SHADDR + 1) << 8) 2862 | (ahc_inb(ahc, SHADDR)); 2863 2864 data_cnt += 1; 2865 data_addr -= 1; 2866 2867 sg = ahc_sg_bus_to_virt(scb, sgptr); 2868 /* 2869 * The residual sg ptr points to the next S/G 2870 * to load so we must go back one. 2871 */ 2872 sg--; 2873 if (sg != scb->sg_list 2874 && (sg->len & AHC_SG_LEN_MASK) < data_cnt) { 2875 2876 sg--; 2877 data_cnt = 1 | (sg->len & AHC_DMA_LAST_SEG); 2878 data_addr = sg->addr 2879 + (sg->len & AHC_SG_LEN_MASK) - 1; 2880 2881 /* 2882 * Increment sg so it points to the 2883 * "next" sg. 2884 */ 2885 sg++; 2886 sgptr = ahc_sg_virt_to_bus(scb, sg); 2887 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 3, 2888 sgptr >> 24); 2889 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 2, 2890 sgptr >> 16); 2891 ahc_outb(ahc, SCB_RESIDUAL_SGPTR + 1, 2892 sgptr >> 8); 2893 ahc_outb(ahc, SCB_RESIDUAL_SGPTR, sgptr); 2894 } 2895 2896/* XXX What about high address byte??? */ 2897 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 3, data_cnt >> 24); 2898 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 2, data_cnt >> 16); 2899 ahc_outb(ahc, SCB_RESIDUAL_DATACNT + 1, data_cnt >> 8); 2900 ahc_outb(ahc, SCB_RESIDUAL_DATACNT, data_cnt); 2901 2902/* XXX Perhaps better to just keep the saved address in sram */ 2903 if ((ahc->features & AHC_ULTRA2) != 0) { 2904 ahc_outb(ahc, HADDR + 3, data_addr >> 24); 2905 ahc_outb(ahc, HADDR + 2, data_addr >> 16); 2906 ahc_outb(ahc, HADDR + 1, data_addr >> 8); 2907 ahc_outb(ahc, HADDR, data_addr); 2908 ahc_outb(ahc, DFCNTRL, PRELOADEN); 2909 ahc_outb(ahc, SXFRCTL0, 2910 ahc_inb(ahc, SXFRCTL0) | CLRCHN); 2911 } else { 2912 ahc_outb(ahc, SHADDR + 3, data_addr >> 24); 2913 ahc_outb(ahc, SHADDR + 2, data_addr >> 16); 2914 ahc_outb(ahc, SHADDR + 1, data_addr >> 8); 2915 ahc_outb(ahc, SHADDR, data_addr); 2916 } 2917 } 2918 } 2919} 2920 2921/* 2922 * Handle the effects of issuing a bus device reset message. 2923 */ 2924static void 2925ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo, 2926 cam_status status, char *message, int verbose_level) 2927{ 2928#ifdef AHC_TARGET_MODE 2929 struct tmode_tstate* tstate; 2930 u_int lun; 2931#endif 2932 int found; 2933 2934 found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel, 2935 CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role, 2936 status); 2937 2938#ifdef AHC_TARGET_MODE 2939 /* 2940 * Send an immediate notify ccb to all target mord peripheral 2941 * drivers affected by this action. 2942 */ 2943 tstate = ahc->enabled_targets[devinfo->our_scsiid]; 2944 if (tstate != NULL) { 2945 for (lun = 0; lun <= 7; lun++) { 2946 struct tmode_lstate* lstate; 2947 2948 lstate = tstate->enabled_luns[lun]; 2949 if (lstate == NULL) 2950 continue; 2951 2952 ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid, 2953 MSG_BUS_DEV_RESET, /*arg*/0); 2954 ahc_send_lstate_events(ahc, lstate); 2955 } 2956 } 2957#endif 2958 2959 /* 2960 * Go back to async/narrow transfers and renegotiate. 2961 */ 2962 ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT, 2963 AHC_TRANS_CUR, /*paused*/TRUE); 2964 ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, 2965 /*period*/0, /*offset*/0, /*ppr_options*/0, 2966 AHC_TRANS_CUR, /*paused*/TRUE); 2967 2968 ahc_send_async(ahc, devinfo->channel, devinfo->target, 2969 CAM_LUN_WILDCARD, AC_SENT_BDR); 2970 2971 if (message != NULL 2972 && (verbose_level <= bootverbose)) 2973 printf("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc), 2974 message, devinfo->channel, devinfo->target, found); 2975} 2976 2977#ifdef AHC_TARGET_MODE 2978void 2979ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo) 2980{ 2981 /* 2982 * To facilitate adding multiple messages together, 2983 * each routine should increment the index and len 2984 * variables instead of setting them explicitly. 2985 */ 2986 ahc->msgout_index = 0; 2987 ahc->msgout_len = 0; 2988 2989 if ((ahc->targ_msg_req & devinfo->target_mask) != 0) 2990 ahc_build_transfer_msg(ahc, devinfo); 2991 else 2992 panic("ahc_intr: AWAITING target message with no message"); 2993 2994 ahc->msgout_index = 0; 2995 ahc->msg_type = MSG_TYPE_TARGET_MSGIN; 2996} 2997#endif 2998/**************************** Initialization **********************************/ 2999/* 3000 * Allocate a controller structure for a new device 3001 * and perform initial initializion. 3002 */ 3003struct ahc_softc * 3004ahc_alloc(void *platform_arg, char *name) 3005{ 3006 struct ahc_softc *ahc; 3007 int i; 3008 3009 ahc = malloc(sizeof(*ahc), M_DEVBUF, M_NOWAIT); 3010 if (!ahc) { 3011 printf("aic7xxx: cannot malloc softc!\n"); 3012 free(name, M_DEVBUF); 3013 return NULL; 3014 } 3015 memset(ahc, 0, sizeof(*ahc)); 3016 LIST_INIT(&ahc->pending_scbs); 3017 /* We don't know or unit number until the OSM sets it */ 3018 ahc->name = name; 3019 for (i = 0; i < 16; i++) 3020 TAILQ_INIT(&ahc->untagged_queues[i]); 3021 if (ahc_platform_alloc(ahc, platform_arg) != 0) { 3022 ahc_free(ahc); 3023 ahc = NULL; 3024 } 3025 return (ahc); 3026} 3027 3028int 3029ahc_softc_init(struct ahc_softc *ahc, struct ahc_probe_config *config) 3030{ 3031 3032 ahc->chip = config->chip; 3033 ahc->features = config->features; 3034 ahc->bugs = config->bugs; 3035 ahc->flags = config->flags; 3036 ahc->channel = config->channel; 3037 ahc->unpause = (ahc_inb(ahc, HCNTRL) & IRQMS) | INTEN; 3038 ahc->description = config->description; 3039 /* The IRQMS bit is only valid on VL and EISA chips */ 3040 if ((ahc->chip & AHC_PCI) != 0) 3041 ahc->unpause &= ~IRQMS; 3042 ahc->pause = ahc->unpause | PAUSE; 3043 /* XXX The shared scb data stuff should be depricated */ 3044 if (ahc->scb_data == NULL) { 3045 ahc->scb_data = malloc(sizeof(*ahc->scb_data), 3046 M_DEVBUF, M_NOWAIT); 3047 if (ahc->scb_data == NULL) 3048 return (ENOMEM); 3049 memset(ahc->scb_data, 0, sizeof(*ahc->scb_data)); 3050 } 3051 3052 return (0); 3053} 3054 3055void 3056ahc_softc_insert(struct ahc_softc *ahc) 3057{ 3058 struct ahc_softc *list_ahc; 3059 3060#ifdef AHC_SUPPORT_PCI 3061 /* 3062 * Second Function PCI devices need to inherit some 3063 * settings from function 0. We assume that function 0 3064 * will always be found prior to function 1. 3065 */ 3066 if ((ahc->chip & AHC_BUS_MASK) == AHC_PCI 3067 && ahc_get_pci_function(ahc->dev_softc) == 1) { 3068 TAILQ_FOREACH(list_ahc, &ahc_tailq, links) { 3069 ahc_dev_softc_t list_pci; 3070 ahc_dev_softc_t pci; 3071 3072 list_pci = list_ahc->dev_softc; 3073 pci = ahc->dev_softc; 3074 if (ahc_get_pci_bus(list_pci) == ahc_get_pci_bus(pci) 3075 && ahc_get_pci_slot(list_pci) == ahc_get_pci_slot(pci) 3076 && ahc_get_pci_function(list_pci) == 0) { 3077 ahc->flags &= ~AHC_BIOS_ENABLED; 3078 ahc->flags |= 3079 list_ahc->flags & AHC_BIOS_ENABLED; 3080 ahc->flags &= ~AHC_CHANNEL_B_PRIMARY; 3081 ahc->flags |= 3082 list_ahc->flags & AHC_CHANNEL_B_PRIMARY; 3083 break; 3084 } 3085 } 3086 } 3087#endif 3088 3089 /* 3090 * Insertion sort into our list of softcs. 3091 */ 3092 list_ahc = TAILQ_FIRST(&ahc_tailq); 3093 while (list_ahc != NULL 3094 && ahc_softc_comp(list_ahc, ahc) <= 0) 3095 list_ahc = TAILQ_NEXT(list_ahc, links); 3096 if (list_ahc != NULL) 3097 TAILQ_INSERT_BEFORE(list_ahc, ahc, links); 3098 else 3099 TAILQ_INSERT_TAIL(&ahc_tailq, ahc, links); 3100 ahc->init_level++; 3101} 3102 3103void 3104ahc_set_unit(struct ahc_softc *ahc, int unit) 3105{ 3106 ahc->unit = unit; 3107} 3108 3109void 3110ahc_set_name(struct ahc_softc *ahc, char *name) 3111{ 3112 if (ahc->name != NULL) 3113 free(ahc->name, M_DEVBUF); 3114 ahc->name = name; 3115} 3116 3117void 3118ahc_free(struct ahc_softc *ahc) 3119{ 3120 ahc_fini_scbdata(ahc); 3121 switch (ahc->init_level) { 3122 case 4: 3123 ahc_shutdown(ahc); 3124 TAILQ_REMOVE(&ahc_tailq, ahc, links); 3125 /* FALLTHROUGH */ 3126 case 3: 3127 ahc_dmamap_unload(ahc, ahc->shared_data_dmat, 3128 ahc->shared_data_dmamap); 3129 /* FALLTHROUGH */ 3130 case 2: 3131 ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo, 3132 ahc->shared_data_dmamap); 3133 ahc_dmamap_destroy(ahc, ahc->shared_data_dmat, 3134 ahc->shared_data_dmamap); 3135 /* FALLTHROUGH */ 3136 case 1: 3137#ifndef __linux__ 3138 ahc_dma_tag_destroy(ahc, ahc->buffer_dmat); 3139#endif 3140 break; 3141 } 3142 3143 ahc_platform_free(ahc); 3144#if XXX 3145 for () { 3146 ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, 3147 char channel, int force); 3148 } 3149#endif 3150 if (ahc->name != NULL) 3151 free(ahc->name, M_DEVBUF); 3152 free(ahc, M_DEVBUF); 3153 return; 3154} 3155 3156void 3157ahc_shutdown(void *arg) 3158{ 3159 struct ahc_softc *ahc; 3160 int i; 3161 3162 ahc = (struct ahc_softc *)arg; 3163 3164 /* This will reset most registers to 0, but not all */ 3165 ahc_reset(ahc); 3166 ahc_outb(ahc, SCSISEQ, 0); 3167 ahc_outb(ahc, SXFRCTL0, 0); 3168 ahc_outb(ahc, DSPCISTATUS, 0); 3169 3170 for (i = TARG_SCSIRATE; i < HA_274_BIOSCTRL; i++) 3171 ahc_outb(ahc, i, 0); 3172} 3173 3174/* 3175 * Reset the controller and record some information about it 3176 * that is only availabel just after a reset. 3177 */ 3178int 3179ahc_reset(struct ahc_softc *ahc) 3180{ 3181 u_int sblkctl; 3182 u_int sxfrctl1_a, sxfrctl1_b; 3183 int wait; 3184 3185 /* 3186 * Preserve the value of the SXFRCTL1 register for all channels. 3187 * It contains settings that affect termination and we don't want 3188 * to disturb the integrity of the bus. 3189 */ 3190 pause_sequencer(ahc); 3191 sxfrctl1_b = 0; 3192 if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) { 3193 u_int sblkctl; 3194 3195 /* 3196 * Save channel B's settings in case this chip 3197 * is setup for TWIN channel operation. 3198 */ 3199 sblkctl = ahc_inb(ahc, SBLKCTL); 3200 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3201 sxfrctl1_b = ahc_inb(ahc, SXFRCTL1); 3202 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3203 } 3204 sxfrctl1_a = ahc_inb(ahc, SXFRCTL1); 3205 3206 ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause); 3207 3208 /* 3209 * Ensure that the reset has finished 3210 */ 3211 wait = 1000; 3212 do { 3213 ahc_delay(1000); 3214 } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK)); 3215 3216 if (wait == 0) { 3217 printf("%s: WARNING - Failed chip reset! " 3218 "Trying to initialize anyway.\n", ahc_name(ahc)); 3219 ahc_outb(ahc, HCNTRL, ahc->pause); 3220 } 3221 3222 /* Determine channel configuration */ 3223 sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE); 3224 /* No Twin Channel PCI cards */ 3225 if ((ahc->chip & AHC_PCI) != 0) 3226 sblkctl &= ~SELBUSB; 3227 switch (sblkctl) { 3228 case 0: 3229 /* Single Narrow Channel */ 3230 break; 3231 case 2: 3232 /* Wide Channel */ 3233 ahc->features |= AHC_WIDE; 3234 break; 3235 case 8: 3236 /* Twin Channel */ 3237 ahc->features |= AHC_TWIN; 3238 break; 3239 default: 3240 printf(" Unsupported adapter type. Ignoring\n"); 3241 return(-1); 3242 } 3243 3244 /* 3245 * Reload sxfrctl1. 3246 * 3247 * We must always initialize STPWEN to 1 before we 3248 * restore the saved values. STPWEN is initialized 3249 * to a tri-state condition which can only be cleared 3250 * by turning it on. 3251 */ 3252 if ((ahc->features & AHC_TWIN) != 0) { 3253 u_int sblkctl; 3254 3255 sblkctl = ahc_inb(ahc, SBLKCTL); 3256 ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB); 3257 ahc_outb(ahc, SXFRCTL1, sxfrctl1_b); 3258 ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB); 3259 } 3260 ahc_outb(ahc, SXFRCTL1, sxfrctl1_a); 3261 3262#ifdef AHC_DUMP_SEQ 3263 if (ahc->init_level == 0) 3264 ahc_dumpseq(ahc); 3265#endif 3266 3267 return (0); 3268} 3269 3270/* 3271 * Determine the number of SCBs available on the controller 3272 */ 3273int 3274ahc_probe_scbs(struct ahc_softc *ahc) { 3275 int i; 3276 3277 for (i = 0; i < AHC_SCB_MAX; i++) { 3278 ahc_outb(ahc, SCBPTR, i); 3279 ahc_outb(ahc, SCB_BASE, i); 3280 if (ahc_inb(ahc, SCB_BASE) != i) 3281 break; 3282 ahc_outb(ahc, SCBPTR, 0); 3283 if (ahc_inb(ahc, SCB_BASE) != 0) 3284 break; 3285 } 3286 return (i); 3287} 3288 3289void 3290ahc_init_probe_config(struct ahc_probe_config *probe_config) 3291{ 3292 probe_config->description = NULL; 3293 probe_config->channel = 'A'; 3294 probe_config->channel_b = 'B'; 3295 probe_config->chip = AHC_NONE; 3296 probe_config->features = AHC_FENONE; 3297 probe_config->bugs = AHC_BUGNONE; 3298 probe_config->flags = AHC_FNONE; 3299} 3300 3301static void 3302ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3303{ 3304 bus_addr_t *baddr; 3305 3306 baddr = (bus_addr_t *)arg; 3307 *baddr = segs->ds_addr; 3308} 3309 3310static int 3311ahc_init_scbdata(struct ahc_softc *ahc) 3312{ 3313 struct scb_data *scb_data; 3314 int i; 3315 3316 scb_data = ahc->scb_data; 3317 SLIST_INIT(&scb_data->free_scbs); 3318 SLIST_INIT(&scb_data->sg_maps); 3319 3320 /* Allocate SCB resources */ 3321 scb_data->scbarray = 3322 (struct scb *)malloc(sizeof(struct scb) * AHC_SCB_MAX, 3323 M_DEVBUF, M_NOWAIT); 3324 if (scb_data->scbarray == NULL) 3325 return (ENOMEM); 3326 memset(scb_data->scbarray, 0, sizeof(struct scb) * AHC_SCB_MAX); 3327 3328 /* Determine the number of hardware SCBs and initialize them */ 3329 3330 scb_data->maxhscbs = ahc_probe_scbs(ahc); 3331 /* SCB 0 heads the free list */ 3332 ahc_outb(ahc, FREE_SCBH, 0); 3333 for (i = 0; i < ahc->scb_data->maxhscbs; i++) { 3334 ahc_outb(ahc, SCBPTR, i); 3335 3336 /* Clear the control byte. */ 3337 ahc_outb(ahc, SCB_CONTROL, 0); 3338 3339 /* Set the next pointer */ 3340 ahc_outb(ahc, SCB_NEXT, i+1); 3341 3342 /* Make the tag number invalid */ 3343 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 3344 } 3345 3346 /* Make sure that the last SCB terminates the free list */ 3347 ahc_outb(ahc, SCBPTR, i-1); 3348 ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL); 3349 3350 /* Ensure we clear the 0 SCB's control byte. */ 3351 ahc_outb(ahc, SCBPTR, 0); 3352 ahc_outb(ahc, SCB_CONTROL, 0); 3353 3354 scb_data->maxhscbs = i; 3355 3356 if (ahc->scb_data->maxhscbs == 0) 3357 panic("%s: No SCB space found", ahc_name(ahc)); 3358 3359 /* 3360 * Create our DMA tags. These tags define the kinds of device 3361 * accessible memory allocations and memory mappings we will 3362 * need to perform during normal operation. 3363 * 3364 * Unless we need to further restrict the allocation, we rely 3365 * on the restrictions of the parent dmat, hence the common 3366 * use of MAXADDR and MAXSIZE. 3367 */ 3368 3369 /* DMA tag for our hardware scb structures */ 3370 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3371 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 3372 /*highaddr*/BUS_SPACE_MAXADDR, 3373 /*filter*/NULL, /*filterarg*/NULL, 3374 AHC_SCB_MAX * sizeof(struct hardware_scb), 3375 /*nsegments*/1, 3376 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3377 /*flags*/0, &scb_data->hscb_dmat) != 0) { 3378 goto error_exit; 3379 } 3380 3381 scb_data->init_level++; 3382 3383 /* Allocation for our ccbs */ 3384 if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat, 3385 (void **)&scb_data->hscbs, 3386 BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) { 3387 goto error_exit; 3388 } 3389 3390 scb_data->init_level++; 3391 3392 /* And permanently map them */ 3393 ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap, 3394 scb_data->hscbs, 3395 AHC_SCB_MAX * sizeof(struct hardware_scb), 3396 ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0); 3397 3398 scb_data->init_level++; 3399 3400 /* DMA tag for our sense buffers */ 3401 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3402 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 3403 /*highaddr*/BUS_SPACE_MAXADDR, 3404 /*filter*/NULL, /*filterarg*/NULL, 3405 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 3406 /*nsegments*/1, 3407 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3408 /*flags*/0, &scb_data->sense_dmat) != 0) { 3409 goto error_exit; 3410 } 3411 3412 scb_data->init_level++; 3413 3414 /* Allocate them */ 3415 if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat, 3416 (void **)&scb_data->sense, 3417 BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) { 3418 goto error_exit; 3419 } 3420 3421 scb_data->init_level++; 3422 3423 /* And permanently map them */ 3424 ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap, 3425 scb_data->sense, 3426 AHC_SCB_MAX * sizeof(struct scsi_sense_data), 3427 ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0); 3428 3429 scb_data->init_level++; 3430 3431 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 3432 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3433 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 3434 /*highaddr*/BUS_SPACE_MAXADDR, 3435 /*filter*/NULL, /*filterarg*/NULL, 3436 PAGE_SIZE, /*nsegments*/1, 3437 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3438 /*flags*/0, &scb_data->sg_dmat) != 0) { 3439 goto error_exit; 3440 } 3441 3442 scb_data->init_level++; 3443 3444 /* Perform initial CCB allocation */ 3445 memset(scb_data->hscbs, 0, AHC_SCB_MAX * sizeof(struct hardware_scb)); 3446 ahc_alloc_scbs(ahc); 3447 3448 if (scb_data->numscbs == 0) { 3449 printf("%s: ahc_init_scbdata - " 3450 "Unable to allocate initial scbs\n", 3451 ahc_name(ahc)); 3452 goto error_exit; 3453 } 3454 3455 /* 3456 * Tell the sequencer which SCB will be the next one it receives. 3457 */ 3458 ahc->next_queued_scb = ahc_get_scb(ahc); 3459 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 3460 3461 /* 3462 * Note that we were successfull 3463 */ 3464 return (0); 3465 3466error_exit: 3467 3468 return (ENOMEM); 3469} 3470 3471static void 3472ahc_fini_scbdata(struct ahc_softc *ahc) 3473{ 3474 struct scb_data *scb_data; 3475 3476 scb_data = ahc->scb_data; 3477 3478 switch (scb_data->init_level) { 3479 default: 3480 case 7: 3481 { 3482 struct sg_map_node *sg_map; 3483 3484 while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) { 3485 SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); 3486 ahc_dmamap_unload(ahc, scb_data->sg_dmat, 3487 sg_map->sg_dmamap); 3488 ahc_dmamem_free(ahc, scb_data->sg_dmat, 3489 sg_map->sg_vaddr, 3490 sg_map->sg_dmamap); 3491 free(sg_map, M_DEVBUF); 3492 } 3493 ahc_dma_tag_destroy(ahc, scb_data->sg_dmat); 3494 } 3495 case 6: 3496 ahc_dmamap_unload(ahc, scb_data->sense_dmat, 3497 scb_data->sense_dmamap); 3498 case 5: 3499 ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense, 3500 scb_data->sense_dmamap); 3501 ahc_dmamap_destroy(ahc, scb_data->sense_dmat, 3502 scb_data->sense_dmamap); 3503 case 4: 3504 ahc_dma_tag_destroy(ahc, scb_data->sense_dmat); 3505 case 3: 3506 ahc_dmamap_unload(ahc, scb_data->hscb_dmat, 3507 scb_data->hscb_dmamap); 3508 case 2: 3509 ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs, 3510 scb_data->hscb_dmamap); 3511 ahc_dmamap_destroy(ahc, scb_data->hscb_dmat, 3512 scb_data->hscb_dmamap); 3513 case 1: 3514 ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat); 3515 break; 3516 } 3517 if (scb_data->scbarray != NULL) 3518 free(scb_data->scbarray, M_DEVBUF); 3519} 3520 3521void 3522ahc_alloc_scbs(struct ahc_softc *ahc) 3523{ 3524 struct scb_data *scb_data; 3525 struct scb *next_scb; 3526 struct sg_map_node *sg_map; 3527 bus_addr_t physaddr; 3528 struct ahc_dma_seg *segs; 3529 int newcount; 3530 int i; 3531 3532 scb_data = ahc->scb_data; 3533 if (scb_data->numscbs >= AHC_SCB_MAX) 3534 /* Can't allocate any more */ 3535 return; 3536 3537 next_scb = &scb_data->scbarray[scb_data->numscbs]; 3538 3539 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 3540 3541 if (sg_map == NULL) 3542 return; 3543 3544 /* Allocate S/G space for the next batch of SCBS */ 3545 if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat, 3546 (void **)&sg_map->sg_vaddr, 3547 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 3548 free(sg_map, M_DEVBUF); 3549 return; 3550 } 3551 3552 SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); 3553 3554 ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap, 3555 sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb, 3556 &sg_map->sg_physaddr, /*flags*/0); 3557 3558 segs = sg_map->sg_vaddr; 3559 physaddr = sg_map->sg_physaddr; 3560 3561 newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg))); 3562 for (i = 0; scb_data->numscbs < AHC_SCB_MAX && i < newcount; i++) { 3563 struct scb_platform_data *pdata; 3564#ifndef __linux__ 3565 int error; 3566#endif 3567 pdata = (struct scb_platform_data *)malloc(sizeof(*pdata), 3568 M_DEVBUF, M_NOWAIT); 3569 if (pdata == NULL) 3570 break; 3571 next_scb->platform_data = pdata; 3572 next_scb->sg_list = segs; 3573 /* 3574 * The sequencer always starts with the second entry. 3575 * The first entry is embedded in the scb. 3576 */ 3577 next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg); 3578 next_scb->ahc_softc = ahc; 3579 next_scb->flags = SCB_FREE; 3580#ifndef __linux__ 3581 error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0, 3582 &next_scb->dmamap); 3583 if (error != 0) 3584 break; 3585#endif 3586 next_scb->hscb = &scb_data->hscbs[scb_data->numscbs]; 3587 next_scb->hscb->tag = ahc->scb_data->numscbs; 3588 next_scb->cdb32_busaddr = 3589 ahc_hscb_busaddr(ahc, next_scb->hscb->tag) 3590 + offsetof(struct hardware_scb, cdb32); 3591 SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, 3592 next_scb, links.sle); 3593 segs += AHC_NSEG; 3594 physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg)); 3595 next_scb++; 3596 ahc->scb_data->numscbs++; 3597 } 3598} 3599 3600void 3601ahc_controller_info(struct ahc_softc *ahc, char *buf) 3602{ 3603 int len; 3604 3605 len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]); 3606 buf += len; 3607 if ((ahc->features & AHC_TWIN) != 0) 3608 len = sprintf(buf, "Twin Channel, A SCSI Id=%d, " 3609 "B SCSI Id=%d, primary %c, ", 3610 ahc->our_id, ahc->our_id_b, 3611 ahc->flags & AHC_CHANNEL_B_PRIMARY ? 'B': 'A'); 3612 else { 3613 const char *type; 3614 3615 if ((ahc->features & AHC_WIDE) != 0) { 3616 type = "Wide"; 3617 } else { 3618 type = "Single"; 3619 } 3620 len = sprintf(buf, "%s Channel %c, SCSI Id=%d, ", 3621 type, ahc->channel, ahc->our_id); 3622 } 3623 buf += len; 3624 3625 if (ahc->flags & AHC_PAGESCBS) 3626 sprintf(buf, "%d/%d SCBs", 3627 ahc->scb_data->maxhscbs, AHC_SCB_MAX); 3628 else 3629 sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs); 3630} 3631 3632/* 3633 * Start the board, ready for normal operation 3634 */ 3635int 3636ahc_init(struct ahc_softc *ahc) 3637{ 3638 int max_targ; 3639 int i; 3640 int term; 3641 u_int scsi_conf; 3642 u_int scsiseq_template; 3643 u_int ultraenb; 3644 u_int discenable; 3645 u_int tagenable; 3646 size_t driver_data_size; 3647 uint32_t physaddr; 3648 3649#ifdef AHC_PRINT_SRAM 3650 printf("Scratch Ram:"); 3651 for (i = 0x20; i < 0x5f; i++) { 3652 if (((i % 8) == 0) && (i != 0)) { 3653 printf ("\n "); 3654 } 3655 printf (" 0x%x", ahc_inb(ahc, i)); 3656 } 3657 if ((ahc->features & AHC_MORE_SRAM) != 0) { 3658 for (i = 0x70; i < 0x7f; i++) { 3659 if (((i % 8) == 0) && (i != 0)) { 3660 printf ("\n "); 3661 } 3662 printf (" 0x%x", ahc_inb(ahc, i)); 3663 } 3664 } 3665 printf ("\n"); 3666#endif 3667 max_targ = 15; 3668 3669 /* 3670 * Assume we have a board at this stage and it has been reset. 3671 */ 3672 if ((ahc->flags & AHC_USEDEFAULTS) != 0) 3673 ahc->our_id = ahc->our_id_b = 7; 3674 3675 /* 3676 * Default to allowing initiator operations. 3677 */ 3678 ahc->flags |= AHC_INITIATORMODE; 3679 3680 if ((ahc->flags & AHC_TARGETMODE) != 0) { 3681 /* 3682 * Although we have space for both the initiator and 3683 * target roles on ULTRA2 chips, we currently disable 3684 * the initiator role to allow multi-scsi-id target mode 3685 * configurations. We can only respond on the same SCSI 3686 * ID as our initiator role if we allow initiator operation. 3687 * At some point, we should add a configuration knob to 3688 * allow both roles to be loaded. 3689 */ 3690 ahc->flags &= ~AHC_INITIATORMODE; 3691 } 3692 3693#ifndef __linux__ 3694 /* DMA tag for mapping buffers into device visible space. */ 3695 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3696 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 3697 /*highaddr*/BUS_SPACE_MAXADDR, 3698 /*filter*/NULL, /*filterarg*/NULL, 3699 /*maxsize*/MAXBSIZE, /*nsegments*/AHC_NSEG, 3700 /*maxsegsz*/AHC_MAXTRANSFER_SIZE, 3701 /*flags*/BUS_DMA_ALLOCNOW, 3702 &ahc->buffer_dmat) != 0) { 3703 return (ENOMEM); 3704 } 3705#endif 3706 3707 ahc->init_level++; 3708 3709 /* 3710 * DMA tag for our command fifos and other data in system memory 3711 * the card's sequencer must be able to access. For initiator 3712 * roles, we need to allocate space for the the qinfifo and qoutfifo. 3713 * The qinfifo and qoutfifo are composed of 256 1 byte elements. 3714 * When providing for the target mode role, we must additionally 3715 * provide space for the incoming target command fifo and an extra 3716 * byte to deal with a dma bug in some chip versions. 3717 */ 3718 driver_data_size = 2 * 256 * sizeof(uint8_t); 3719 if ((ahc->flags & AHC_TARGETMODE) != 0) 3720 driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd) 3721 + /*DMA WideOdd Bug Buffer*/1; 3722 if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1, 3723 /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR, 3724 /*highaddr*/BUS_SPACE_MAXADDR, 3725 /*filter*/NULL, /*filterarg*/NULL, 3726 driver_data_size, 3727 /*nsegments*/1, 3728 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 3729 /*flags*/0, &ahc->shared_data_dmat) != 0) { 3730 return (ENOMEM); 3731 } 3732 3733 ahc->init_level++; 3734 3735 /* Allocation of driver data */ 3736 if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat, 3737 (void **)&ahc->qoutfifo, 3738 BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) { 3739 return (ENOMEM); 3740 } 3741 3742 ahc->init_level++; 3743 3744 /* And permanently map it in */ 3745 ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap, 3746 ahc->qoutfifo, driver_data_size, ahc_dmamap_cb, 3747 &ahc->shared_data_busaddr, /*flags*/0); 3748 3749 if ((ahc->flags & AHC_TARGETMODE) != 0) { 3750 ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo; 3751 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 3752 ahc->dma_bug_buf = ahc->shared_data_busaddr 3753 + driver_data_size - 1; 3754 /* All target command blocks start out invalid. */ 3755 for (i = 0; i < AHC_TMODE_CMDS; i++) 3756 ahc->targetcmds[i].cmd_valid = 0; 3757 ahc->tqinfifonext = 1; 3758 ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1); 3759 ahc_outb(ahc, TQINPOS, ahc->tqinfifonext); 3760 ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256]; 3761 } 3762 ahc->qinfifo = &ahc->qoutfifo[256]; 3763 3764 ahc->init_level++; 3765 3766 /* Allocate SCB data now that buffer_dmat is initialized */ 3767 if (ahc->scb_data->maxhscbs == 0) 3768 if (ahc_init_scbdata(ahc) != 0) 3769 return (ENOMEM); 3770 3771 /* 3772 * Allocate a tstate to house information for our 3773 * initiator presence on the bus as well as the user 3774 * data for any target mode initiator. 3775 */ 3776 if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) { 3777 printf("%s: unable to allocate tmode_tstate. " 3778 "Failing attach\n", ahc_name(ahc)); 3779 return (-1); 3780 } 3781 3782 if ((ahc->features & AHC_TWIN) != 0) { 3783 if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) { 3784 printf("%s: unable to allocate tmode_tstate. " 3785 "Failing attach\n", ahc_name(ahc)); 3786 return (-1); 3787 } 3788 } 3789 3790 ahc_outb(ahc, SEQ_FLAGS, 0); 3791 3792 if (ahc->scb_data->maxhscbs < AHC_SCB_MAX) { 3793 ahc->flags |= AHC_PAGESCBS; 3794 } else { 3795 ahc->flags &= ~AHC_PAGESCBS; 3796 } 3797 3798#ifdef AHC_DEBUG 3799 if (ahc_debug & AHC_SHOWMISC) { 3800 printf("%s: hardware scb %d bytes; kernel scb %d bytes; " 3801 "ahc_dma %d bytes\n", 3802 ahc_name(ahc), 3803 sizeof(struct hardware_scb), 3804 sizeof(struct scb), 3805 sizeof(struct ahc_dma_seg)); 3806 } 3807#endif /* AHC_DEBUG */ 3808 3809 /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/ 3810 if (ahc->features & AHC_TWIN) { 3811 3812 /* 3813 * The device is gated to channel B after a chip reset, 3814 * so set those values first 3815 */ 3816 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB); 3817 term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0; 3818 ahc_outb(ahc, SCSIID, ahc->our_id_b); 3819 scsi_conf = ahc_inb(ahc, SCSICONF + 1); 3820 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 3821 |term|ENSTIMER|ACTNEGEN); 3822 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 3823 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 3824 3825 if ((scsi_conf & RESET_SCSI) != 0 3826 && (ahc->flags & AHC_INITIATORMODE) != 0) 3827 ahc->flags |= AHC_RESET_BUS_B; 3828 3829 /* Select Channel A */ 3830 ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB); 3831 } 3832 term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0; 3833 if ((ahc->features & AHC_ULTRA2) != 0) 3834 ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id); 3835 else 3836 ahc_outb(ahc, SCSIID, ahc->our_id); 3837 scsi_conf = ahc_inb(ahc, SCSICONF); 3838 ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL)) 3839 |term 3840 |ENSTIMER|ACTNEGEN); 3841 ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); 3842 ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN); 3843 3844 if ((scsi_conf & RESET_SCSI) != 0 3845 && (ahc->flags & AHC_INITIATORMODE) != 0) 3846 ahc->flags |= AHC_RESET_BUS_A; 3847 3848 /* 3849 * Look at the information that board initialization or 3850 * the board bios has left us. 3851 */ 3852 ultraenb = 0; 3853 tagenable = ALL_TARGETS_MASK; 3854 3855 /* Grab the disconnection disable table and invert it for our needs */ 3856 if (ahc->flags & AHC_USEDEFAULTS) { 3857 printf("%s: Host Adapter Bios disabled. Using default SCSI " 3858 "device parameters\n", ahc_name(ahc)); 3859 ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B| 3860 AHC_TERM_ENB_A|AHC_TERM_ENB_B; 3861 discenable = ALL_TARGETS_MASK; 3862 if ((ahc->features & AHC_ULTRA) != 0) 3863 ultraenb = ALL_TARGETS_MASK; 3864 } else { 3865 discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8) 3866 | ahc_inb(ahc, DISC_DSB)); 3867 if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0) 3868 ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8) 3869 | ahc_inb(ahc, ULTRA_ENB); 3870 } 3871 3872 if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0) 3873 max_targ = 7; 3874 3875 for (i = 0; i <= max_targ; i++) { 3876 struct ahc_initiator_tinfo *tinfo; 3877 struct tmode_tstate *tstate; 3878 u_int our_id; 3879 u_int target_id; 3880 char channel; 3881 3882 channel = 'A'; 3883 our_id = ahc->our_id; 3884 target_id = i; 3885 if (i > 7 && (ahc->features & AHC_TWIN) != 0) { 3886 channel = 'B'; 3887 our_id = ahc->our_id_b; 3888 target_id = i % 8; 3889 } 3890 tinfo = ahc_fetch_transinfo(ahc, channel, our_id, 3891 target_id, &tstate); 3892 /* Default to async narrow across the board */ 3893 memset(tinfo, 0, sizeof(*tinfo)); 3894 if (ahc->flags & AHC_USEDEFAULTS) { 3895 if ((ahc->features & AHC_WIDE) != 0) 3896 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 3897 3898 /* 3899 * These will be truncated when we determine the 3900 * connection type we have with the target. 3901 */ 3902 tinfo->user.period = ahc_syncrates->period; 3903 tinfo->user.offset = ~0; 3904 } else { 3905 u_int scsirate; 3906 uint16_t mask; 3907 3908 /* Take the settings leftover in scratch RAM. */ 3909 scsirate = ahc_inb(ahc, TARG_SCSIRATE + i); 3910 mask = (0x01 << i); 3911 if ((ahc->features & AHC_ULTRA2) != 0) { 3912 u_int offset; 3913 u_int maxsync; 3914 3915 if ((scsirate & SOFS) == 0x0F) { 3916 /* 3917 * Haven't negotiated yet, 3918 * so the format is different. 3919 */ 3920 scsirate = (scsirate & SXFR) >> 4 3921 | (ultraenb & mask) 3922 ? 0x08 : 0x0 3923 | (scsirate & WIDEXFER); 3924 offset = MAX_OFFSET_ULTRA2; 3925 } else 3926 offset = ahc_inb(ahc, TARG_OFFSET + i); 3927 maxsync = AHC_SYNCRATE_ULTRA2; 3928 if ((ahc->features & AHC_DT) != 0) 3929 maxsync = AHC_SYNCRATE_DT; 3930 tinfo->user.period = 3931 ahc_find_period(ahc, scsirate, maxsync); 3932 if (offset == 0) 3933 tinfo->user.period = 0; 3934 else 3935 tinfo->user.offset = ~0; 3936 if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/ 3937 && (ahc->features & AHC_DT) != 0) 3938 tinfo->user.ppr_options = 3939 MSG_EXT_PPR_DT_REQ; 3940 } else if ((scsirate & SOFS) != 0) { 3941 tinfo->user.period = 3942 ahc_find_period(ahc, scsirate, 3943 (ultraenb & mask) 3944 ? AHC_SYNCRATE_ULTRA 3945 : AHC_SYNCRATE_FAST); 3946 if (tinfo->user.period != 0) 3947 tinfo->user.offset = ~0; 3948 } 3949 if ((scsirate & WIDEXFER) != 0 3950 && (ahc->features & AHC_WIDE) != 0) 3951 tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; 3952 tinfo->user.protocol_version = 4; 3953 if ((ahc->features & AHC_DT) != 0) 3954 tinfo->user.transport_version = 3; 3955 else 3956 tinfo->user.transport_version = 2; 3957 tinfo->goal.protocol_version = 2; 3958 tinfo->goal.transport_version = 2; 3959 tinfo->current.protocol_version = 2; 3960 tinfo->current.transport_version = 2; 3961 } 3962 tstate->ultraenb = ultraenb; 3963 tstate->discenable = discenable; 3964 tstate->tagenable = 0; /* Wait until the XPT says its okay */ 3965 } 3966 ahc->user_discenable = discenable; 3967 ahc->user_tagenable = tagenable; 3968 3969 /* There are no untagged SCBs active yet. */ 3970 for (i = 0; i < 16; i++) { 3971 ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4, 0), /*unbusy*/TRUE); 3972 if ((ahc->features & AHC_SCB_BTT) != 0) { 3973 int lun; 3974 3975 /* 3976 * The SCB based BTT allows an entry per 3977 * target and lun pair. 3978 */ 3979 for (lun = 1; lun < AHC_NUM_LUNS; lun++) { 3980 ahc_index_busy_tcl(ahc, 3981 BUILD_TCL(i << 4, lun), 3982 /*unbusy*/TRUE); 3983 } 3984 } 3985 } 3986 3987 /* All of our queues are empty */ 3988 for (i = 0; i < 256; i++) 3989 ahc->qoutfifo[i] = SCB_LIST_NULL; 3990 3991 for (i = 0; i < 256; i++) 3992 ahc->qinfifo[i] = SCB_LIST_NULL; 3993 3994 if ((ahc->features & AHC_MULTI_TID) != 0) { 3995 ahc_outb(ahc, TARGID, 0); 3996 ahc_outb(ahc, TARGID + 1, 0); 3997 } 3998 3999 /* 4000 * Tell the sequencer where it can find our arrays in memory. 4001 */ 4002 physaddr = ahc->scb_data->hscb_busaddr; 4003 ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF); 4004 ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF); 4005 ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF); 4006 ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF); 4007 4008 physaddr = ahc->shared_data_busaddr; 4009 ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF); 4010 ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF); 4011 ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF); 4012 ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF); 4013 4014 /* 4015 * Initialize the group code to command length table. 4016 * This overrides the values in TARG_SCSIRATE, so only 4017 * setup the table after we have processed that information. 4018 */ 4019 ahc_outb(ahc, CMDSIZE_TABLE, 5); 4020 ahc_outb(ahc, CMDSIZE_TABLE + 1, 9); 4021 ahc_outb(ahc, CMDSIZE_TABLE + 2, 9); 4022 ahc_outb(ahc, CMDSIZE_TABLE + 3, 0); 4023 ahc_outb(ahc, CMDSIZE_TABLE + 4, 15); 4024 ahc_outb(ahc, CMDSIZE_TABLE + 5, 11); 4025 ahc_outb(ahc, CMDSIZE_TABLE + 6, 0); 4026 ahc_outb(ahc, CMDSIZE_TABLE + 7, 0); 4027 4028 /* Tell the sequencer of our initial queue positions */ 4029 ahc_outb(ahc, KERNEL_QINPOS, 0); 4030 ahc_outb(ahc, QINPOS, 0); 4031 ahc_outb(ahc, QOUTPOS, 0); 4032 4033 /* Don't have any special messages to send to targets */ 4034 ahc_outb(ahc, TARGET_MSG_REQUEST, 0); 4035 ahc_outb(ahc, TARGET_MSG_REQUEST + 1, 0); 4036 4037 /* 4038 * Use the built in queue management registers 4039 * if they are available. 4040 */ 4041 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4042 ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256); 4043 ahc_outb(ahc, SDSCB_QOFF, 0); 4044 ahc_outb(ahc, SNSCB_QOFF, 0); 4045 ahc_outb(ahc, HNSCB_QOFF, 0); 4046 } 4047 4048 4049 /* We don't have any waiting selections */ 4050 ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL); 4051 4052 /* Our disconnection list is empty too */ 4053 ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL); 4054 4055 /* Message out buffer starts empty */ 4056 ahc_outb(ahc, MSG_OUT, MSG_NOOP); 4057 4058 /* 4059 * Setup the allowed SCSI Sequences based on operational mode. 4060 * If we are a target, we'll enalbe select in operations once 4061 * we've had a lun enabled. 4062 */ 4063 scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP; 4064 if ((ahc->flags & AHC_INITIATORMODE) != 0) 4065 scsiseq_template |= ENRSELI; 4066 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template); 4067 4068 /* 4069 * Load the Sequencer program and Enable the adapter 4070 * in "fast" mode. 4071 */ 4072 if (bootverbose) 4073 printf("%s: Downloading Sequencer Program...", 4074 ahc_name(ahc)); 4075 4076 ahc_loadseq(ahc); 4077 4078 if ((ahc->features & AHC_ULTRA2) != 0) { 4079 int wait; 4080 4081 /* 4082 * Wait for up to 500ms for our transceivers 4083 * to settle. If the adapter does not have 4084 * a cable attached, the tranceivers may 4085 * never settle, so don't complain if we 4086 * fail here. 4087 */ 4088 pause_sequencer(ahc); 4089 for (wait = 5000; 4090 (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; 4091 wait--) 4092 ahc_delay(100); 4093 unpause_sequencer(ahc); 4094 } 4095 return (0); 4096} 4097 4098/************************** Busy Target Table *********************************/ 4099/* 4100 * Return the untagged transaction id for a given target/channel lun. 4101 * Optionally, clear the entry. 4102 */ 4103u_int 4104ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl, int unbusy) 4105{ 4106 u_int scbid; 4107 u_int target_offset; 4108 4109 if ((ahc->features & AHC_SCB_BTT) != 0) { 4110 u_int saved_scbptr; 4111 4112 saved_scbptr = ahc_inb(ahc, SCBPTR); 4113 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 4114 scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl)); 4115 if (unbusy) 4116 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), 4117 SCB_LIST_NULL); 4118 ahc_outb(ahc, SCBPTR, saved_scbptr); 4119 } else { 4120 target_offset = TCL_TARGET_OFFSET(tcl); 4121 scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset); 4122 if (unbusy) 4123 ahc_outb(ahc, BUSY_TARGETS + target_offset, 4124 SCB_LIST_NULL); 4125 } 4126 4127 return (scbid); 4128} 4129 4130void 4131ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid) 4132{ 4133 u_int target_offset; 4134 4135 if ((ahc->features & AHC_SCB_BTT) != 0) { 4136 u_int saved_scbptr; 4137 4138 saved_scbptr = ahc_inb(ahc, SCBPTR); 4139 ahc_outb(ahc, SCBPTR, TCL_LUN(tcl)); 4140 ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid); 4141 ahc_outb(ahc, SCBPTR, saved_scbptr); 4142 } else { 4143 target_offset = TCL_TARGET_OFFSET(tcl); 4144 ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid); 4145 } 4146} 4147 4148/************************** SCB and SCB queue management **********************/ 4149int 4150ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target, 4151 char channel, int lun, u_int tag, role_t role) 4152{ 4153 int targ = SCB_GET_TARGET(ahc, scb); 4154 char chan = SCB_GET_CHANNEL(ahc, scb); 4155 int slun = SCB_GET_LUN(scb); 4156 int match; 4157 4158 match = ((chan == channel) || (channel == ALL_CHANNELS)); 4159 if (match != 0) 4160 match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); 4161 if (match != 0) 4162 match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); 4163 if (match != 0) { 4164#if AHC_TARGET_MODE 4165 int group; 4166 4167 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); 4168 if (role == ROLE_INITIATOR) { 4169 match = (group == XPT_FC_GROUP_COMMON) 4170 && ((tag == scb->hscb->tag) 4171 || (tag == SCB_LIST_NULL)); 4172 } else if (role == ROLE_TARGET) { 4173 match = (group == XPT_FC_GROUP_TMODE) 4174 && ((tag == scb->io_ctx->csio.tag_id) 4175 || (tag == SCB_LIST_NULL)); 4176 } 4177#else /* !AHC_TARGET_MODE */ 4178 match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL)); 4179#endif /* AHC_TARGET_MODE */ 4180 } 4181 4182 return match; 4183} 4184 4185void 4186ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb) 4187{ 4188 int target; 4189 char channel; 4190 int lun; 4191 4192 target = SCB_GET_TARGET(ahc, scb); 4193 lun = SCB_GET_LUN(scb); 4194 channel = SCB_GET_CHANNEL(ahc, scb); 4195 4196 ahc_search_qinfifo(ahc, target, channel, lun, 4197 /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, 4198 CAM_REQUEUE_REQ, SEARCH_COMPLETE); 4199 4200 ahc_platform_freeze_devq(ahc, scb); 4201} 4202 4203void 4204ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb, 4205 struct scb *scb) 4206{ 4207 if (prev_scb == NULL) 4208 ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag); 4209 else 4210 prev_scb->hscb->next = scb->hscb->tag; 4211 ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag; 4212 scb->hscb->next = ahc->next_queued_scb->hscb->tag; 4213} 4214 4215int 4216ahc_qinfifo_count(struct ahc_softc *ahc) 4217{ 4218 u_int8_t qinpos; 4219 4220 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4221 qinpos = ahc_inb(ahc, SNSCB_QOFF); 4222 ahc_outb(ahc, SNSCB_QOFF, qinpos); 4223 } else 4224 qinpos = ahc_inb(ahc, QINPOS); 4225 return (ahc->qinfifonext - qinpos); 4226} 4227 4228int 4229ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel, 4230 int lun, u_int tag, role_t role, uint32_t status, 4231 ahc_search_action action) 4232{ 4233 struct scb *scb; 4234 struct scb *prev_scb; 4235 uint8_t qinstart; 4236 uint8_t qinpos; 4237 uint8_t qintail; 4238 uint8_t next, prev; 4239 uint8_t curscbptr; 4240 int found; 4241 int maxtarget; 4242 int i; 4243 int have_qregs; 4244 4245 qintail = ahc->qinfifonext; 4246 have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0; 4247 if (have_qregs) { 4248 qinstart = ahc_inb(ahc, SNSCB_QOFF); 4249 ahc_outb(ahc, SNSCB_QOFF, qinstart); 4250 } else 4251 qinstart = ahc_inb(ahc, QINPOS); 4252 qinpos = qinstart; 4253 4254 /* 4255 * If the next qinfifo SCB does not match the 4256 * entry in our qinfifo, the sequencer is in 4257 * the process of dmaing down the SCB that just 4258 * preceeds qinstart. So, start our search in 4259 * the qinfifo back by an entry. The sequencer 4260 * is smart enough to check after the SCB dma 4261 * completes to ensure that the newly DMAed 4262 * SCB is still relevant. 4263 */ 4264 next = ahc_inb(ahc, NEXT_QUEUED_SCB); 4265 if (qinstart == qintail) { 4266 if (next != ahc->next_queued_scb->hscb->tag) 4267 qinpos--; 4268 } else if (next != ahc->qinfifo[qinstart]) { 4269 qinpos--; 4270 } 4271 4272 found = 0; 4273 prev_scb = NULL; 4274 4275 if (action == SEARCH_COMPLETE) { 4276 /* 4277 * Don't attempt to run any queued untagged transactions 4278 * until we are done with the abort process. 4279 */ 4280 ahc_freeze_untagged_queues(ahc); 4281 } 4282 4283 /* 4284 * Start with an empty queue. Entries that are not chosen 4285 * for removal will be re-added to the queue as we go. 4286 */ 4287 ahc->qinfifonext = qinpos; 4288 ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag); 4289 4290 while (qinpos != qintail) { 4291 scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]); 4292 if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) { 4293 /* 4294 * We found an scb that needs to be acted on. 4295 */ 4296 found++; 4297 switch (action) { 4298 case SEARCH_COMPLETE: 4299 { 4300 cam_status ostat; 4301 4302 ostat = ahc_get_transaction_status(scb); 4303 if (ostat == CAM_REQ_INPROG) 4304 ahc_set_transaction_status(scb, 4305 status); 4306 ahc_freeze_scb(scb); 4307 if ((scb->flags & SCB_ACTIVE) == 0) 4308 printf("Inactive SCB in qinfifo\n"); 4309 ahc_done(ahc, scb); 4310 4311 /* FALLTHROUGH */ 4312 case SEARCH_REMOVE: 4313 /* 4314 * The sequencer increments its position in 4315 * the qinfifo as soon as it determines that 4316 * an SCB needs to be DMA'ed down to the card. 4317 * So, if we are aborting a command that is 4318 * still in the process of being DMAed, we 4319 * must move the sequencer's qinfifo pointer 4320 * back as well. 4321 */ 4322 if (qinpos == (qinstart - 1)) { 4323 if (have_qregs) { 4324 ahc_outb(ahc, SNSCB_QOFF, 4325 qinpos); 4326 } else { 4327 ahc_outb(ahc, QINPOS, qinpos); 4328 } 4329 } 4330 break; 4331 } 4332 case SEARCH_COUNT: 4333 ahc_qinfifo_requeue(ahc, prev_scb, scb); 4334 prev_scb = scb; 4335 break; 4336 } 4337 } else { 4338 ahc_qinfifo_requeue(ahc, prev_scb, scb); 4339 prev_scb = scb; 4340 } 4341 qinpos++; 4342 } 4343 4344 if ((ahc->features & AHC_QUEUE_REGS) != 0) { 4345 ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext); 4346 } else { 4347 ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext); 4348 } 4349 4350 /* 4351 * Search waiting for selection list. 4352 */ 4353 curscbptr = ahc_inb(ahc, SCBPTR); 4354 next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */ 4355 prev = SCB_LIST_NULL; 4356 4357 while (next != SCB_LIST_NULL) { 4358 uint8_t scb_index; 4359 4360 ahc_outb(ahc, SCBPTR, next); 4361 scb_index = ahc_inb(ahc, SCB_TAG); 4362 if (scb_index >= ahc->scb_data->numscbs) { 4363 panic("Waiting List inconsistency. " 4364 "SCB index == %d, yet numscbs == %d.", 4365 scb_index, ahc->scb_data->numscbs); 4366 } 4367 scb = ahc_lookup_scb(ahc, scb_index); 4368 if (ahc_match_scb(ahc, scb, target, channel, 4369 lun, SCB_LIST_NULL, role)) { 4370 /* 4371 * We found an scb that needs to be acted on. 4372 */ 4373 found++; 4374 switch (action) { 4375 case SEARCH_COMPLETE: 4376 { 4377 cam_status ostat; 4378 4379 next = ahc_rem_wscb(ahc, next, prev); 4380 ostat = ahc_get_transaction_status(scb); 4381 if (ostat == CAM_REQ_INPROG) 4382 ahc_set_transaction_status(scb, 4383 status); 4384 ahc_freeze_scb(scb); 4385 if ((scb->flags & SCB_ACTIVE) == 0) 4386 printf("Inactive SCB in Waiting List\n"); 4387 ahc_done(ahc, scb); 4388 break; 4389 } 4390 case SEARCH_COUNT: 4391 prev = next; 4392 next = ahc_inb(ahc, SCB_NEXT); 4393 break; 4394 case SEARCH_REMOVE: 4395 next = ahc_rem_wscb(ahc, next, prev); 4396 break; 4397 } 4398 } else { 4399 4400 prev = next; 4401 next = ahc_inb(ahc, SCB_NEXT); 4402 } 4403 } 4404 ahc_outb(ahc, SCBPTR, curscbptr); 4405 4406 /* 4407 * And lastly, the untagged holding queues. 4408 */ 4409 i = 0; 4410 if ((ahc->flags & AHC_SCB_BTT) == 0) { 4411 4412 maxtarget = 16; 4413 if (target != CAM_TARGET_WILDCARD) { 4414 4415 i = target; 4416 if (channel == 'B') 4417 i += 8; 4418 maxtarget = i + 1; 4419 } 4420 } else { 4421 maxtarget = 0; 4422 } 4423 4424 for (; i < maxtarget; i++) { 4425 struct scb_tailq *untagged_q; 4426 struct scb *next_scb; 4427 4428 untagged_q = &(ahc->untagged_queues[i]); 4429 next_scb = TAILQ_FIRST(untagged_q); 4430 while (next_scb != NULL) { 4431 4432 scb = next_scb; 4433 next_scb = TAILQ_NEXT(scb, links.tqe); 4434 4435 /* 4436 * The head of the list may be the currently 4437 * active untagged command for a device. 4438 * We're only searching for commands that 4439 * have not been started. A transaction 4440 * marked active but still in the qinfifo 4441 * is removed by the qinfifo scanning code 4442 * above. 4443 */ 4444 if ((scb->flags & SCB_ACTIVE) != 0) 4445 continue; 4446 4447 if (ahc_match_scb(ahc, scb, target, channel, 4448 lun, SCB_LIST_NULL, role)) { 4449 /* 4450 * We found an scb that needs to be acted on. 4451 */ 4452 found++; 4453 switch (action) { 4454 case SEARCH_COMPLETE: 4455 { 4456 cam_status ostat; 4457 4458 ostat = ahc_get_transaction_status(scb); 4459 if (ostat == CAM_REQ_INPROG) 4460 ahc_set_transaction_status(scb, 4461 status); 4462 ahc_freeze_scb(scb); 4463 if ((scb->flags & SCB_ACTIVE) == 0) 4464 printf("Inactive SCB in untaggedQ\n"); 4465 ahc_done(ahc, scb); 4466 break; 4467 } 4468 case SEARCH_REMOVE: 4469 TAILQ_REMOVE(untagged_q, scb, 4470 links.tqe); 4471 break; 4472 case SEARCH_COUNT: 4473 break; 4474 } 4475 } 4476 } 4477 } 4478 4479 if (action == SEARCH_COMPLETE) 4480 ahc_release_untagged_queues(ahc); 4481 return (found); 4482} 4483 4484int 4485ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel, 4486 int lun, u_int tag, int stop_on_first, int remove, 4487 int save_state) 4488{ 4489 struct scb *scbp; 4490 u_int next; 4491 u_int prev; 4492 u_int count; 4493 u_int active_scb; 4494 4495 count = 0; 4496 next = ahc_inb(ahc, DISCONNECTED_SCBH); 4497 prev = SCB_LIST_NULL; 4498 4499 if (save_state) { 4500 /* restore this when we're done */ 4501 active_scb = ahc_inb(ahc, SCBPTR); 4502 } else 4503 /* Silence compiler */ 4504 active_scb = SCB_LIST_NULL; 4505 4506 while (next != SCB_LIST_NULL) { 4507 u_int scb_index; 4508 4509 ahc_outb(ahc, SCBPTR, next); 4510 scb_index = ahc_inb(ahc, SCB_TAG); 4511 if (scb_index >= ahc->scb_data->numscbs) { 4512 panic("Disconnected List inconsistency. " 4513 "SCB index == %d, yet numscbs == %d.", 4514 scb_index, ahc->scb_data->numscbs); 4515 } 4516 4517 if (next == prev) { 4518 panic("Disconnected List Loop. " 4519 "cur SCBPTR == %x, prev SCBPTR == %x.", 4520 next, prev); 4521 } 4522 scbp = ahc_lookup_scb(ahc, scb_index); 4523 if (ahc_match_scb(ahc, scbp, target, channel, lun, 4524 tag, ROLE_INITIATOR)) { 4525 count++; 4526 if (remove) { 4527 next = 4528 ahc_rem_scb_from_disc_list(ahc, prev, next); 4529 } else { 4530 prev = next; 4531 next = ahc_inb(ahc, SCB_NEXT); 4532 } 4533 if (stop_on_first) 4534 break; 4535 } else { 4536 prev = next; 4537 next = ahc_inb(ahc, SCB_NEXT); 4538 } 4539 } 4540 if (save_state) 4541 ahc_outb(ahc, SCBPTR, active_scb); 4542 return (count); 4543} 4544 4545/* 4546 * Remove an SCB from the on chip list of disconnected transactions. 4547 * This is empty/unused if we are not performing SCB paging. 4548 */ 4549static u_int 4550ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr) 4551{ 4552 u_int next; 4553 4554 ahc_outb(ahc, SCBPTR, scbptr); 4555 next = ahc_inb(ahc, SCB_NEXT); 4556 4557 ahc_outb(ahc, SCB_CONTROL, 0); 4558 4559 ahc_add_curscb_to_free_list(ahc); 4560 4561 if (prev != SCB_LIST_NULL) { 4562 ahc_outb(ahc, SCBPTR, prev); 4563 ahc_outb(ahc, SCB_NEXT, next); 4564 } else 4565 ahc_outb(ahc, DISCONNECTED_SCBH, next); 4566 4567 return (next); 4568} 4569 4570/* 4571 * Add the SCB as selected by SCBPTR onto the on chip list of 4572 * free hardware SCBs. This list is empty/unused if we are not 4573 * performing SCB paging. 4574 */ 4575static void 4576ahc_add_curscb_to_free_list(struct ahc_softc *ahc) 4577{ 4578 /* 4579 * Invalidate the tag so that our abort 4580 * routines don't think it's active. 4581 */ 4582 ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL); 4583 4584 ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH)); 4585 ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR)); 4586} 4587 4588/* 4589 * Manipulate the waiting for selection list and return the 4590 * scb that follows the one that we remove. 4591 */ 4592static u_int 4593ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev) 4594{ 4595 u_int curscb, next; 4596 4597 /* 4598 * Select the SCB we want to abort and 4599 * pull the next pointer out of it. 4600 */ 4601 curscb = ahc_inb(ahc, SCBPTR); 4602 ahc_outb(ahc, SCBPTR, scbpos); 4603 next = ahc_inb(ahc, SCB_NEXT); 4604 4605 /* Clear the necessary fields */ 4606 ahc_outb(ahc, SCB_CONTROL, 0); 4607 4608 ahc_add_curscb_to_free_list(ahc); 4609 4610 /* update the waiting list */ 4611 if (prev == SCB_LIST_NULL) { 4612 /* First in the list */ 4613 ahc_outb(ahc, WAITING_SCBH, next); 4614 4615 /* 4616 * Ensure we aren't attempting to perform 4617 * selection for this entry. 4618 */ 4619 ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO)); 4620 } else { 4621 /* 4622 * Select the scb that pointed to us 4623 * and update its next pointer. 4624 */ 4625 ahc_outb(ahc, SCBPTR, prev); 4626 ahc_outb(ahc, SCB_NEXT, next); 4627 } 4628 4629 /* 4630 * Point us back at the original scb position. 4631 */ 4632 ahc_outb(ahc, SCBPTR, curscb); 4633 return next; 4634} 4635 4636/******************************** Error Handling ******************************/ 4637/* 4638 * Abort all SCBs that match the given description (target/channel/lun/tag), 4639 * setting their status to the passed in status if the status has not already 4640 * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer 4641 * is paused before it is called. 4642 */ 4643int 4644ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel, 4645 int lun, u_int tag, role_t role, uint32_t status) 4646{ 4647 struct scb *scbp; 4648 struct scb *scbp_next; 4649 u_int active_scb; 4650 int i, j; 4651 int maxtarget; 4652 int minlun; 4653 int maxlun; 4654 4655 int found; 4656 4657 /* 4658 * Don't attempt to run any queued untagged transactions 4659 * until we are done with the abort process. 4660 */ 4661 ahc_freeze_untagged_queues(ahc); 4662 4663 /* restore this when we're done */ 4664 active_scb = ahc_inb(ahc, SCBPTR); 4665 4666 found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL, 4667 role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); 4668 4669 /* 4670 * Clean out the busy target table for any untagged commands. 4671 */ 4672 i = 0; 4673 maxtarget = 16; 4674 if (target != CAM_TARGET_WILDCARD) { 4675 i = target; 4676 if (channel == 'B') 4677 i += 8; 4678 maxtarget = i + 1; 4679 } 4680 4681 if (lun == CAM_LUN_WILDCARD) { 4682 4683 /* 4684 * Unless we are using an SCB based 4685 * busy targets table, there is only 4686 * one table entry for all luns of 4687 * a target. 4688 */ 4689 minlun = 0; 4690 maxlun = 1; 4691 if ((ahc->flags & AHC_SCB_BTT) != 0) 4692 maxlun = AHC_NUM_LUNS; 4693 } else { 4694 minlun = lun; 4695 maxlun = lun + 1; 4696 } 4697 4698 for (;i < maxtarget; i++) { 4699 for (j = minlun;j < maxlun; j++) 4700 ahc_index_busy_tcl(ahc, BUILD_TCL(i << 4, j), 4701 /*unbusy*/TRUE); 4702 } 4703 4704 /* 4705 * Go through the disconnected list and remove any entries we 4706 * have queued for completion, 0'ing their control byte too. 4707 * We save the active SCB and restore it ourselves, so there 4708 * is no reason for this search to restore it too. 4709 */ 4710 ahc_search_disc_list(ahc, target, channel, lun, tag, 4711 /*stop_on_first*/FALSE, /*remove*/TRUE, 4712 /*save_state*/FALSE); 4713 4714 /* 4715 * Go through the hardware SCB array looking for commands that 4716 * were active but not on any list. 4717 */ 4718 for(i = 0; i < ahc->scb_data->maxhscbs; i++) { 4719 u_int scbid; 4720 4721 ahc_outb(ahc, SCBPTR, i); 4722 scbid = ahc_inb(ahc, SCB_TAG); 4723 scbp = ahc_lookup_scb(ahc, scbid); 4724 if (scbp != NULL 4725 && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) 4726 ahc_add_curscb_to_free_list(ahc); 4727 } 4728 4729 /* 4730 * Go through the pending CCB list and look for 4731 * commands for this target that are still active. 4732 * These are other tagged commands that were 4733 * disconnected when the reset occured. 4734 */ 4735 scbp_next = LIST_FIRST(&ahc->pending_scbs); 4736 while (scbp_next != NULL) { 4737 scbp = scbp_next; 4738 scbp_next = LIST_NEXT(scbp, pending_links); 4739 if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) { 4740 cam_status ostat; 4741 4742 ostat = ahc_get_transaction_status(scbp); 4743 if (ostat == CAM_REQ_INPROG) 4744 ahc_set_transaction_status(scbp, status); 4745 ahc_freeze_scb(scbp); 4746 if ((scbp->flags & SCB_ACTIVE) == 0) 4747 printf("Inactive SCB on pending list\n"); 4748 ahc_done(ahc, scbp); 4749 found++; 4750 } 4751 } 4752 ahc_outb(ahc, SCBPTR, active_scb); 4753 ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status); 4754 ahc_release_untagged_queues(ahc); 4755 return found; 4756} 4757 4758static void 4759ahc_reset_current_bus(struct ahc_softc *ahc) 4760{ 4761 uint8_t scsiseq; 4762 4763 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST); 4764 scsiseq = ahc_inb(ahc, SCSISEQ); 4765 ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO); 4766 ahc_delay(AHC_BUSRESET_DELAY); 4767 /* Turn off the bus reset */ 4768 ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO); 4769 4770 ahc_clear_intstat(ahc); 4771 4772 /* Re-enable reset interrupts */ 4773 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST); 4774} 4775 4776int 4777ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset) 4778{ 4779 struct ahc_devinfo devinfo; 4780 u_int initiator, target, max_scsiid; 4781 u_int sblkctl; 4782 int found; 4783 int restart_needed; 4784 char cur_channel; 4785 4786 ahc->pending_device = NULL; 4787 4788 ahc_compile_devinfo(&devinfo, 4789 CAM_TARGET_WILDCARD, 4790 CAM_TARGET_WILDCARD, 4791 CAM_LUN_WILDCARD, 4792 channel, ROLE_UNKNOWN); 4793 pause_sequencer(ahc); 4794 4795 /* Make sure the sequencer is in a safe location. */ 4796 ahc_clear_critical_section(ahc); 4797 4798 /* 4799 * Run our command complete fifos to ensure that we perform 4800 * completion processing on any commands that 'completed' 4801 * before the reset occurred. 4802 */ 4803 ahc_run_qoutfifo(ahc); 4804#if AHC_TARGET_MODE 4805 if ((ahc->flags & AHC_TARGETMODE) != 0) { 4806 ahc_run_tqinfifo(ahc, /*paused*/TRUE); 4807 } 4808#endif 4809 4810 /* 4811 * Reset the bus if we are initiating this reset 4812 */ 4813 sblkctl = ahc_inb(ahc, SBLKCTL); 4814 cur_channel = 'A'; 4815 if ((ahc->features & AHC_TWIN) != 0 4816 && ((sblkctl & SELBUSB) != 0)) 4817 cur_channel = 'B'; 4818 if (cur_channel != channel) { 4819 /* Case 1: Command for another bus is active 4820 * Stealthily reset the other bus without 4821 * upsetting the current bus. 4822 */ 4823 ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB); 4824 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 4825 ahc_outb(ahc, SCSISEQ, 4826 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 4827 if (initiate_reset) 4828 ahc_reset_current_bus(ahc); 4829 ahc_clear_intstat(ahc); 4830 ahc_outb(ahc, SBLKCTL, sblkctl); 4831 restart_needed = FALSE; 4832 } else { 4833 /* Case 2: A command from this bus is active or we're idle */ 4834 ahc_clear_msg_state(ahc); 4835 ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE); 4836 ahc_outb(ahc, SCSISEQ, 4837 ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP)); 4838 if (initiate_reset) 4839 ahc_reset_current_bus(ahc); 4840 ahc_clear_intstat(ahc); 4841 restart_needed = TRUE; 4842 } 4843 4844 /* 4845 * Clean up all the state information for the 4846 * pending transactions on this bus. 4847 */ 4848 found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel, 4849 CAM_LUN_WILDCARD, SCB_LIST_NULL, 4850 ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); 4851 4852 max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7; 4853 4854#ifdef AHC_TARGET_MODE 4855 /* 4856 * Send an immediate notify ccb to all target more peripheral 4857 * drivers affected by this action. 4858 */ 4859 for (target = 0; target <= max_scsiid; target++) { 4860 struct tmode_tstate* tstate; 4861 u_int lun; 4862 4863 tstate = ahc->enabled_targets[target]; 4864 if (tstate == NULL) 4865 continue; 4866 for (lun = 0; lun <= 7; lun++) { 4867 struct tmode_lstate* lstate; 4868 4869 lstate = tstate->enabled_luns[lun]; 4870 if (lstate == NULL) 4871 continue; 4872 4873 ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD, 4874 EVENT_TYPE_BUS_RESET, /*arg*/0); 4875 ahc_send_lstate_events(ahc, lstate); 4876 } 4877 } 4878#endif 4879 /* Notify the XPT that a bus reset occurred */ 4880 ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD, 4881 CAM_LUN_WILDCARD, AC_BUS_RESET); 4882 4883 /* 4884 * Revert to async/narrow transfers until we renegotiate. 4885 */ 4886 for (target = 0; target <= max_scsiid; target++) { 4887 4888 if (ahc->enabled_targets[target] == NULL) 4889 continue; 4890 for (initiator = 0; initiator <= max_scsiid; initiator++) { 4891 struct ahc_devinfo devinfo; 4892 4893 ahc_compile_devinfo(&devinfo, target, initiator, 4894 CAM_LUN_WILDCARD, 4895 channel, ROLE_UNKNOWN); 4896 ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, 4897 AHC_TRANS_CUR, /*paused*/TRUE); 4898 ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL, 4899 /*period*/0, /*offset*/0, 4900 /*ppr_options*/0, AHC_TRANS_CUR, 4901 /*paused*/TRUE); 4902 } 4903 } 4904 4905 if (restart_needed) 4906 restart_sequencer(ahc); 4907 else 4908 unpause_sequencer(ahc); 4909 return found; 4910} 4911 4912 4913/***************************** Residual Processing ****************************/ 4914/* 4915 * Calculate the residual for a just completed SCB. 4916 */ 4917static void 4918ahc_calc_residual(struct scb *scb) 4919{ 4920 struct hardware_scb *hscb; 4921 struct status_pkt *spkt; 4922 uint32_t resid; 4923 4924 /* 4925 * 5 cases. 4926 * 1) No residual. 4927 * SG_RESID_VALID clear in sgptr. 4928 * 2) Transferless command 4929 * 3) Never performed any transfers. 4930 * sgptr has SG_FULL_RESID set. 4931 * 4) No residual but target did not 4932 * save data pointers after the 4933 * last transfer, so sgptr was 4934 * never updated. 4935 * 5) We have a partial residual. 4936 * Use residual_sgptr to determine 4937 * where we are. 4938 */ 4939 4940 hscb = scb->hscb; 4941 if ((hscb->sgptr & SG_RESID_VALID) == 0) 4942 /* Case 1 */ 4943 return; 4944 hscb->sgptr &= ~SG_RESID_VALID; 4945 4946 if ((hscb->sgptr & SG_LIST_NULL) != 0) 4947 /* Case 2 */ 4948 return; 4949 4950 spkt = &hscb->shared_data.status; 4951 if ((hscb->sgptr & SG_FULL_RESID) != 0) { 4952 /* Case 3 */ 4953 resid = ahc_get_transfer_length(scb); 4954 } else if ((spkt->residual_sg_ptr & SG_LIST_NULL) != 0) { 4955 /* Case 4 */ 4956 return; 4957 } else if ((spkt->residual_sg_ptr & ~SG_PTR_MASK) != 0) { 4958 panic("Bogus resid sgptr value 0x%x\n", spkt->residual_sg_ptr); 4959 } else { 4960 struct ahc_dma_seg *sg; 4961 4962 /* 4963 * Remainder of the SG where the transfer 4964 * stopped. 4965 */ 4966 resid = spkt->residual_datacnt & AHC_SG_LEN_MASK; 4967 sg = ahc_sg_bus_to_virt(scb, 4968 spkt->residual_sg_ptr & SG_PTR_MASK); 4969 4970 /* The residual sg_ptr always points to the next sg */ 4971 sg--; 4972 4973 /* 4974 * Add up the contents of all residual 4975 * SG segments that are after the SG where 4976 * the transfer stopped. 4977 */ 4978 while ((sg->len & AHC_DMA_LAST_SEG) == 0) { 4979 sg++; 4980 resid += sg->len & AHC_SG_LEN_MASK; 4981 } 4982 } 4983 if ((scb->flags & SCB_SENSE) == 0) 4984 ahc_set_residual(scb, resid); 4985 else 4986 ahc_set_sense_residual(scb, resid); 4987 4988#ifdef AHC_DEBUG 4989 if (ahc_debug & AHC_SHOWMISC) { 4990 ahc_print_path(ahc, scb); 4991 printf("Handled Residual of %d bytes\n", resid); 4992 } 4993#endif 4994} 4995 4996/******************************* Target Mode **********************************/ 4997#ifdef AHC_TARGET_MODE 4998/* 4999 * Add a target mode event to this lun's queue 5000 */ 5001static void 5002ahc_queue_lstate_event(struct ahc_softc *ahc, struct tmode_lstate *lstate, 5003 u_int initiator_id, u_int event_type, u_int event_arg) 5004{ 5005 struct ahc_tmode_event *event; 5006 int pending; 5007 5008 xpt_freeze_devq(lstate->path, /*count*/1); 5009 if (lstate->event_w_idx >= lstate->event_r_idx) 5010 pending = lstate->event_w_idx - lstate->event_r_idx; 5011 else 5012 pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1 5013 - (lstate->event_r_idx - lstate->event_w_idx); 5014 5015 if (event_type == EVENT_TYPE_BUS_RESET 5016 || event_type == MSG_BUS_DEV_RESET) { 5017 /* 5018 * Any earlier events are irrelevant, so reset our buffer. 5019 * This has the effect of allowing us to deal with reset 5020 * floods (an external device holding down the reset line) 5021 * without losing the event that is really interesting. 5022 */ 5023 lstate->event_r_idx = 0; 5024 lstate->event_w_idx = 0; 5025 xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); 5026 } 5027 5028 if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) { 5029 xpt_print_path(lstate->path); 5030 printf("immediate event %x:%x lost\n", 5031 lstate->event_buffer[lstate->event_r_idx].event_type, 5032 lstate->event_buffer[lstate->event_r_idx].event_arg); 5033 lstate->event_r_idx++; 5034 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 5035 lstate->event_r_idx = 0; 5036 xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); 5037 } 5038 5039 event = &lstate->event_buffer[lstate->event_w_idx]; 5040 event->initiator_id = initiator_id; 5041 event->event_type = event_type; 5042 event->event_arg = event_arg; 5043 lstate->event_w_idx++; 5044 if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 5045 lstate->event_w_idx = 0; 5046} 5047 5048/* 5049 * Send any target mode events queued up waiting 5050 * for immediate notify resources. 5051 */ 5052void 5053ahc_send_lstate_events(struct ahc_softc *ahc, struct tmode_lstate *lstate) 5054{ 5055 struct ccb_hdr *ccbh; 5056 struct ccb_immed_notify *inot; 5057 5058 while (lstate->event_r_idx != lstate->event_w_idx 5059 && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { 5060 struct ahc_tmode_event *event; 5061 5062 event = &lstate->event_buffer[lstate->event_r_idx]; 5063 SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); 5064 inot = (struct ccb_immed_notify *)ccbh; 5065 switch (event->event_type) { 5066 case EVENT_TYPE_BUS_RESET: 5067 ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; 5068 break; 5069 default: 5070 ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; 5071 inot->message_args[0] = event->event_type; 5072 inot->message_args[1] = event->event_arg; 5073 break; 5074 } 5075 inot->initiator_id = event->initiator_id; 5076 inot->sense_len = 0; 5077 xpt_done((union ccb *)inot); 5078 lstate->event_r_idx++; 5079 if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE) 5080 lstate->event_r_idx = 0; 5081 } 5082} 5083#endif 5084 5085/******************** Sequencer Program Patching/Download *********************/ 5086 5087#ifdef AHC_DUMP_SEQ 5088void 5089ahc_dumpseq(struct ahc_softc* ahc) 5090{ 5091 int i; 5092 int max_prog; 5093 5094 if ((ahc->chip & AHC_BUS_MASK) < AHC_PCI) 5095 max_prog = 448; 5096 else if ((ahc->features & AHC_ULTRA2) != 0) 5097 max_prog = 768; 5098 else 5099 max_prog = 512; 5100 5101 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5102 ahc_outb(ahc, SEQADDR0, 0); 5103 ahc_outb(ahc, SEQADDR1, 0); 5104 for (i = 0; i < max_prog; i++) { 5105 uint8_t ins_bytes[4]; 5106 5107 ahc_insb(ahc, SEQRAM, ins_bytes, 4); 5108 printf("0x%08x\n", ins_bytes[0] << 24 5109 | ins_bytes[1] << 16 5110 | ins_bytes[2] << 8 5111 | ins_bytes[3]); 5112 } 5113} 5114#endif 5115 5116static void 5117ahc_loadseq(struct ahc_softc *ahc) 5118{ 5119 struct cs cs_table[num_critical_sections]; 5120 u_int begin_set[num_critical_sections]; 5121 u_int end_set[num_critical_sections]; 5122 struct patch *cur_patch; 5123 u_int cs_count; 5124 u_int cur_cs; 5125 u_int i; 5126 int downloaded; 5127 u_int skip_addr; 5128 u_int sg_prefetch_cnt; 5129 uint8_t download_consts[7]; 5130 5131 /* 5132 * Start out with 0 critical sections 5133 * that apply to this firmware load. 5134 */ 5135 cs_count = 0; 5136 cur_cs = 0; 5137 memset(begin_set, 0, sizeof(begin_set)); 5138 memset(end_set, 0, sizeof(end_set)); 5139 5140 /* Setup downloadable constant table */ 5141 download_consts[QOUTFIFO_OFFSET] = 0; 5142 if (ahc->targetcmds != NULL) 5143 download_consts[QOUTFIFO_OFFSET] += 32; 5144 download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1; 5145 download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1; 5146 download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1); 5147 sg_prefetch_cnt = ahc->pci_cachesize; 5148 if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg))) 5149 sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg); 5150 download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; 5151 download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1); 5152 download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1); 5153 5154 cur_patch = patches; 5155 downloaded = 0; 5156 skip_addr = 0; 5157 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); 5158 ahc_outb(ahc, SEQADDR0, 0); 5159 ahc_outb(ahc, SEQADDR1, 0); 5160 5161 for (i = 0; i < sizeof(seqprog)/4; i++) { 5162 if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) { 5163 /* 5164 * Don't download this instruction as it 5165 * is in a patch that was removed. 5166 */ 5167 continue; 5168 } 5169 /* 5170 * Move through the CS table until we find a CS 5171 * that might apply to this instruction. 5172 */ 5173 for (; cur_cs < num_critical_sections; cur_cs++) { 5174 if (critical_sections[cur_cs].end <= i) { 5175 if (begin_set[cs_count] == TRUE 5176 && end_set[cs_count] == FALSE) { 5177 cs_table[cs_count].end = downloaded; 5178 end_set[cs_count] = TRUE; 5179 cs_count++; 5180 } 5181 continue; 5182 } 5183 if (critical_sections[cur_cs].begin <= i 5184 && begin_set[cs_count] == FALSE) { 5185 cs_table[cs_count].begin = downloaded; 5186 begin_set[cs_count] = TRUE; 5187 } 5188 break; 5189 } 5190 ahc_download_instr(ahc, i, download_consts); 5191 downloaded++; 5192 } 5193 5194 ahc->num_critical_sections = cs_count; 5195 if (cs_count != 0) { 5196 5197 cs_count *= sizeof(struct cs); 5198 ahc->critical_sections = malloc(cs_count, M_DEVBUF, M_NOWAIT); 5199 if (ahc->critical_sections == NULL) 5200 panic("ahc_loadseq: Could not malloc"); 5201 memcpy(ahc->critical_sections, cs_table, cs_count); 5202 } 5203 ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE); 5204 restart_sequencer(ahc); 5205 5206 if (bootverbose) 5207 printf(" %d instructions downloaded\n", downloaded); 5208} 5209 5210static int 5211ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch, 5212 u_int start_instr, u_int *skip_addr) 5213{ 5214 struct patch *cur_patch; 5215 struct patch *last_patch; 5216 u_int num_patches; 5217 5218 num_patches = sizeof(patches)/sizeof(struct patch); 5219 last_patch = &patches[num_patches]; 5220 cur_patch = *start_patch; 5221 5222 while (cur_patch < last_patch && start_instr == cur_patch->begin) { 5223 5224 if (cur_patch->patch_func(ahc) == 0) { 5225 5226 /* Start rejecting code */ 5227 *skip_addr = start_instr + cur_patch->skip_instr; 5228 cur_patch += cur_patch->skip_patch; 5229 } else { 5230 /* Accepted this patch. Advance to the next 5231 * one and wait for our intruction pointer to 5232 * hit this point. 5233 */ 5234 cur_patch++; 5235 } 5236 } 5237 5238 *start_patch = cur_patch; 5239 if (start_instr < *skip_addr) 5240 /* Still skipping */ 5241 return (0); 5242 5243 return (1); 5244} 5245 5246static void 5247ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts) 5248{ 5249 union ins_formats instr; 5250 struct ins_format1 *fmt1_ins; 5251 struct ins_format3 *fmt3_ins; 5252 u_int opcode; 5253 5254 /* Structure copy */ 5255 instr = *(union ins_formats*)&seqprog[instrptr * 4]; 5256 5257#if BYTE_ORDER == BIG_ENDIAN 5258 opcode = instr.format.bytes[0]; 5259 instr.format.bytes[0] = instr.format.bytes[3]; 5260 instr.format.bytes[3] = opcode; 5261 opcode = instr.format.bytes[1]; 5262 instr.format.bytes[1] = instr.format.bytes[2]; 5263 instr.format.bytes[2] = opcode; 5264#endif 5265 5266 fmt1_ins = &instr.format1; 5267 fmt3_ins = NULL; 5268 5269 /* Pull the opcode */ 5270 opcode = instr.format1.opcode; 5271 switch (opcode) { 5272 case AIC_OP_JMP: 5273 case AIC_OP_JC: 5274 case AIC_OP_JNC: 5275 case AIC_OP_CALL: 5276 case AIC_OP_JNE: 5277 case AIC_OP_JNZ: 5278 case AIC_OP_JE: 5279 case AIC_OP_JZ: 5280 { 5281 struct patch *cur_patch; 5282 int address_offset; 5283 u_int address; 5284 u_int skip_addr; 5285 u_int i; 5286 5287 fmt3_ins = &instr.format3; 5288 address_offset = 0; 5289 address = fmt3_ins->address; 5290 cur_patch = patches; 5291 skip_addr = 0; 5292 5293 for (i = 0; i < address;) { 5294 5295 ahc_check_patch(ahc, &cur_patch, i, &skip_addr); 5296 5297 if (skip_addr > i) { 5298 int end_addr; 5299 5300 end_addr = MIN(address, skip_addr); 5301 address_offset += end_addr - i; 5302 i = skip_addr; 5303 } else { 5304 i++; 5305 } 5306 } 5307 address -= address_offset; 5308 fmt3_ins->address = address; 5309 /* FALLTHROUGH */ 5310 } 5311 case AIC_OP_OR: 5312 case AIC_OP_AND: 5313 case AIC_OP_XOR: 5314 case AIC_OP_ADD: 5315 case AIC_OP_ADC: 5316 case AIC_OP_BMOV: 5317 if (fmt1_ins->parity != 0) { 5318 fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; 5319 } 5320 fmt1_ins->parity = 0; 5321 /* FALLTHROUGH */ 5322 case AIC_OP_ROL: 5323 if ((ahc->features & AHC_ULTRA2) != 0) { 5324 int i, count; 5325 5326 /* Calculate odd parity for the instruction */ 5327 for (i = 0, count = 0; i < 31; i++) { 5328 uint32_t mask; 5329 5330 mask = 0x01 << i; 5331 if ((instr.integer & mask) != 0) 5332 count++; 5333 } 5334 if ((count & 0x01) == 0) 5335 instr.format1.parity = 1; 5336 } else { 5337 /* Compress the instruction for older sequencers */ 5338 if (fmt3_ins != NULL) { 5339 instr.integer = 5340 fmt3_ins->immediate 5341 | (fmt3_ins->source << 8) 5342 | (fmt3_ins->address << 16) 5343 | (fmt3_ins->opcode << 25); 5344 } else { 5345 instr.integer = 5346 fmt1_ins->immediate 5347 | (fmt1_ins->source << 8) 5348 | (fmt1_ins->destination << 16) 5349 | (fmt1_ins->ret << 24) 5350 | (fmt1_ins->opcode << 25); 5351 } 5352 } 5353#if BYTE_ORDER == BIG_ENDIAN 5354 opcode = instr.format.bytes[0]; 5355 instr.format.bytes[0] = instr.format.bytes[3]; 5356 instr.format.bytes[3] = opcode; 5357 opcode = instr.format.bytes[1]; 5358 instr.format.bytes[1] = instr.format.bytes[2]; 5359 instr.format.bytes[2] = opcode; 5360#endif 5361 ahc_outsb(ahc, SEQRAM, instr.bytes, 4); 5362 break; 5363 default: 5364 panic("Unknown opcode encountered in seq program"); 5365 break; 5366 } 5367} 5368 5369void 5370ahc_dump_card_state(struct ahc_softc *ahc) 5371{ 5372 struct scb *scb; 5373 struct scb_tailq *untagged_q; 5374 int target; 5375 int maxtarget; 5376 int i; 5377 uint8_t qinpos; 5378 uint8_t qintail; 5379 uint8_t qoutpos; 5380 uint8_t scb_index; 5381 uint8_t saved_scbptr; 5382 5383 saved_scbptr = ahc_inb(ahc, SCBPTR); 5384 5385 printf("SCB count = %d\n", ahc->scb_data->numscbs); 5386 /* QINFIFO */ 5387 printf("QINFIFO entries: "); 5388 qinpos = ahc_inb(ahc, QINPOS); 5389 qintail = ahc->qinfifonext; 5390 while (qinpos != qintail) { 5391 printf("%d ", ahc->qinfifo[qinpos]); 5392 qinpos++; 5393 } 5394 printf("\n"); 5395 5396 printf("Waiting Queue entries: "); 5397 scb_index = ahc_inb(ahc, WAITING_SCBH); 5398 i = 0; 5399 while (scb_index != SCB_LIST_NULL && i++ < 256) { 5400 ahc_outb(ahc, SCBPTR, scb_index); 5401 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 5402 scb_index = ahc_inb(ahc, SCB_NEXT); 5403 } 5404 printf("\n"); 5405 5406 printf("Disconnected Queue entries: "); 5407 scb_index = ahc_inb(ahc, DISCONNECTED_SCBH); 5408 i = 0; 5409 while (scb_index != SCB_LIST_NULL && i++ < 256) { 5410 ahc_outb(ahc, SCBPTR, scb_index); 5411 printf("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG)); 5412 scb_index = ahc_inb(ahc, SCB_NEXT); 5413 } 5414 printf("\n"); 5415 5416 printf("QOUTFIFO entries: "); 5417 qoutpos = ahc->qoutfifonext; 5418 i = 0; 5419 while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) { 5420 printf("%d ", ahc->qoutfifo[qoutpos]); 5421 qoutpos++; 5422 } 5423 printf("\n"); 5424 5425 printf("Sequencer Free SCB List: "); 5426 scb_index = ahc_inb(ahc, FREE_SCBH); 5427 i = 0; 5428 while (scb_index != SCB_LIST_NULL && i++ < 256) { 5429 ahc_outb(ahc, SCBPTR, scb_index); 5430 printf("%d ", scb_index); 5431 scb_index = ahc_inb(ahc, SCB_NEXT); 5432 } 5433 printf("\n"); 5434 5435 printf("Pending list: "); 5436 i = 0; 5437 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 5438 if (i++ > 256) 5439 break; 5440 printf("%d ", scb->hscb->tag); 5441 } 5442 printf("\n"); 5443 5444 printf("Kernel Free SCB list: "); 5445 i = 0; 5446 SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) { 5447 if (i++ > 256) 5448 break; 5449 printf("%d ", scb->hscb->tag); 5450 } 5451 printf("\n"); 5452 5453 maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7; 5454 for (target = 0; target <= maxtarget; target++) { 5455 untagged_q = &ahc->untagged_queues[0]; 5456 if (TAILQ_FIRST(untagged_q) == NULL) 5457 continue; 5458 printf("Untagged Q(%d): ", target); 5459 i = 0; 5460 TAILQ_FOREACH(scb, untagged_q, links.tqe) { 5461 if (i++ > 256) 5462 break; 5463 printf("%d ", scb->hscb->tag); 5464 } 5465 printf("\n"); 5466 } 5467 5468 ahc_platform_dump_card_state(ahc); 5469 ahc_outb(ahc, SCBPTR, saved_scbptr); 5470} 5471 5472/************************* Target Mode ****************************************/ 5473#ifdef AHC_TARGET_MODE 5474cam_status 5475ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb, 5476 struct tmode_tstate **tstate, struct tmode_lstate **lstate, 5477 int notfound_failure) 5478{ 5479 u_int our_id; 5480 5481 /* 5482 * If we are not configured for target mode, someone 5483 * is really confused to be sending this to us. 5484 */ 5485 if ((ahc->flags & AHC_TARGETMODE) == 0) 5486 return (CAM_REQ_INVALID); 5487 5488 /* Range check target and lun */ 5489 5490 /* 5491 * Handle the 'black hole' device that sucks up 5492 * requests to unattached luns on enabled targets. 5493 */ 5494 if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD 5495 && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { 5496 *tstate = NULL; 5497 *lstate = ahc->black_hole; 5498 } else { 5499 u_int max_id; 5500 5501 if (cam_sim_bus(sim) == 0) 5502 our_id = ahc->our_id; 5503 else 5504 our_id = ahc->our_id_b; 5505 5506 max_id = (ahc->features & AHC_WIDE) ? 15 : 7; 5507 if (ccb->ccb_h.target_id > max_id) 5508 return (CAM_TID_INVALID); 5509 5510 if (ccb->ccb_h.target_lun > 7) 5511 return (CAM_LUN_INVALID); 5512 5513 if (ccb->ccb_h.target_id != our_id) { 5514 if ((ahc->features & AHC_MULTI_TID) != 0) { 5515 /* 5516 * Only allow additional targets if 5517 * the initiator role is disabled. 5518 * The hardware cannot handle a re-select-in 5519 * on the initiator id during a re-select-out 5520 * on a different target id. 5521 */ 5522 if ((ahc->flags & AHC_INITIATORMODE) != 0) 5523 return (CAM_TID_INVALID); 5524 } else { 5525 /* 5526 * Only allow our target id to change 5527 * if the initiator role is not configured 5528 * and there are no enabled luns which 5529 * are attached to the currently registered 5530 * scsi id. 5531 */ 5532 if ((ahc->flags & AHC_INITIATORMODE) != 0 5533 || ahc->enabled_luns > 0) 5534 return (CAM_TID_INVALID); 5535 } 5536 } 5537 5538 *tstate = ahc->enabled_targets[ccb->ccb_h.target_id]; 5539 *lstate = NULL; 5540 if (*tstate != NULL) 5541 *lstate = 5542 (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; 5543 } 5544 5545 if (notfound_failure != 0 && *lstate == NULL) 5546 return (CAM_PATH_INVALID); 5547 5548 return (CAM_REQ_CMP); 5549} 5550 5551void 5552ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb) 5553{ 5554 struct tmode_tstate *tstate; 5555 struct tmode_lstate *lstate; 5556 struct ccb_en_lun *cel; 5557 cam_status status; 5558 u_int target; 5559 u_int lun; 5560 u_int target_mask; 5561 u_long s; 5562 char channel; 5563 5564 status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate, 5565 /* notfound_failure*/FALSE); 5566 5567 if (status != CAM_REQ_CMP) { 5568 ccb->ccb_h.status = status; 5569 return; 5570 } 5571 5572 cel = &ccb->cel; 5573 target = ccb->ccb_h.target_id; 5574 lun = ccb->ccb_h.target_lun; 5575 channel = SIM_CHANNEL(ahc, sim); 5576 target_mask = 0x01 << target; 5577 if (channel == 'B') 5578 target_mask <<= 8; 5579 5580 if (cel->enable != 0) { 5581 u_int scsiseq; 5582 5583 /* Are we already enabled?? */ 5584 if (lstate != NULL) { 5585 xpt_print_path(ccb->ccb_h.path); 5586 printf("Lun already enabled\n"); 5587 ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; 5588 return; 5589 } 5590 5591 if (cel->grp6_len != 0 5592 || cel->grp7_len != 0) { 5593 /* 5594 * Don't (yet?) support vendor 5595 * specific commands. 5596 */ 5597 ccb->ccb_h.status = CAM_REQ_INVALID; 5598 printf("Non-zero Group Codes\n"); 5599 return; 5600 } 5601 5602 /* 5603 * Seems to be okay. 5604 * Setup our data structures. 5605 */ 5606 if (target != CAM_TARGET_WILDCARD && tstate == NULL) { 5607 tstate = ahc_alloc_tstate(ahc, target, channel); 5608 if (tstate == NULL) { 5609 xpt_print_path(ccb->ccb_h.path); 5610 printf("Couldn't allocate tstate\n"); 5611 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 5612 return; 5613 } 5614 } 5615 lstate = malloc(sizeof(*lstate), M_DEVBUF, M_NOWAIT); 5616 if (lstate == NULL) { 5617 xpt_print_path(ccb->ccb_h.path); 5618 printf("Couldn't allocate lstate\n"); 5619 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 5620 return; 5621 } 5622 memset(lstate, 0, sizeof(*lstate)); 5623 status = xpt_create_path(&lstate->path, /*periph*/NULL, 5624 xpt_path_path_id(ccb->ccb_h.path), 5625 xpt_path_target_id(ccb->ccb_h.path), 5626 xpt_path_lun_id(ccb->ccb_h.path)); 5627 if (status != CAM_REQ_CMP) { 5628 free(lstate, M_DEVBUF); 5629 xpt_print_path(ccb->ccb_h.path); 5630 printf("Couldn't allocate path\n"); 5631 ccb->ccb_h.status = CAM_RESRC_UNAVAIL; 5632 return; 5633 } 5634 SLIST_INIT(&lstate->accept_tios); 5635 SLIST_INIT(&lstate->immed_notifies); 5636 ahc_lock(ahc, &s); 5637 pause_sequencer(ahc); 5638 if (target != CAM_TARGET_WILDCARD) { 5639 tstate->enabled_luns[lun] = lstate; 5640 ahc->enabled_luns++; 5641 5642 if ((ahc->features & AHC_MULTI_TID) != 0) { 5643 u_int targid_mask; 5644 5645 targid_mask = ahc_inb(ahc, TARGID) 5646 | (ahc_inb(ahc, TARGID + 1) << 8); 5647 5648 targid_mask |= target_mask; 5649 ahc_outb(ahc, TARGID, targid_mask); 5650 ahc_outb(ahc, TARGID+1, (targid_mask >> 8)); 5651 5652 ahc_update_scsiid(ahc, targid_mask); 5653 } else { 5654 u_int our_id; 5655 char channel; 5656 5657 channel = SIM_CHANNEL(ahc, sim); 5658 our_id = SIM_SCSI_ID(ahc, sim); 5659 5660 /* 5661 * This can only happen if selections 5662 * are not enabled 5663 */ 5664 if (target != our_id) { 5665 u_int sblkctl; 5666 char cur_channel; 5667 int swap; 5668 5669 sblkctl = ahc_inb(ahc, SBLKCTL); 5670 cur_channel = (sblkctl & SELBUSB) 5671 ? 'B' : 'A'; 5672 if ((ahc->features & AHC_TWIN) == 0) 5673 cur_channel = 'A'; 5674 swap = cur_channel != channel; 5675 if (channel == 'A') 5676 ahc->our_id = target; 5677 else 5678 ahc->our_id_b = target; 5679 5680 if (swap) 5681 ahc_outb(ahc, SBLKCTL, 5682 sblkctl ^ SELBUSB); 5683 5684 ahc_outb(ahc, SCSIID, target); 5685 5686 if (swap) 5687 ahc_outb(ahc, SBLKCTL, sblkctl); 5688 } 5689 } 5690 } else 5691 ahc->black_hole = lstate; 5692 /* Allow select-in operations */ 5693 if (ahc->black_hole != NULL && ahc->enabled_luns > 0) { 5694 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 5695 scsiseq |= ENSELI; 5696 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 5697 scsiseq = ahc_inb(ahc, SCSISEQ); 5698 scsiseq |= ENSELI; 5699 ahc_outb(ahc, SCSISEQ, scsiseq); 5700 } 5701 unpause_sequencer(ahc); 5702 ahc_unlock(ahc, &s); 5703 ccb->ccb_h.status = CAM_REQ_CMP; 5704 xpt_print_path(ccb->ccb_h.path); 5705 printf("Lun now enabled for target mode\n"); 5706 } else { 5707 struct scb *scb; 5708 int i, empty; 5709 5710 if (lstate == NULL) { 5711 ccb->ccb_h.status = CAM_LUN_INVALID; 5712 return; 5713 } 5714 5715 ahc_lock(ahc, &s); 5716 5717 ccb->ccb_h.status = CAM_REQ_CMP; 5718 LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) { 5719 struct ccb_hdr *ccbh; 5720 5721 ccbh = &scb->io_ctx->ccb_h; 5722 if (ccbh->func_code == XPT_CONT_TARGET_IO 5723 && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ 5724 printf("CTIO pending\n"); 5725 ccb->ccb_h.status = CAM_REQ_INVALID; 5726 ahc_unlock(ahc, &s); 5727 return; 5728 } 5729 } 5730 5731 if (SLIST_FIRST(&lstate->accept_tios) != NULL) { 5732 printf("ATIOs pending\n"); 5733 ccb->ccb_h.status = CAM_REQ_INVALID; 5734 } 5735 5736 if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { 5737 printf("INOTs pending\n"); 5738 ccb->ccb_h.status = CAM_REQ_INVALID; 5739 } 5740 5741 if (ccb->ccb_h.status != CAM_REQ_CMP) { 5742 ahc_unlock(ahc, &s); 5743 return; 5744 } 5745 5746 xpt_print_path(ccb->ccb_h.path); 5747 printf("Target mode disabled\n"); 5748 xpt_free_path(lstate->path); 5749 free(lstate, M_DEVBUF); 5750 5751 pause_sequencer(ahc); 5752 /* Can we clean up the target too? */ 5753 if (target != CAM_TARGET_WILDCARD) { 5754 tstate->enabled_luns[lun] = NULL; 5755 ahc->enabled_luns--; 5756 for (empty = 1, i = 0; i < 8; i++) 5757 if (tstate->enabled_luns[i] != NULL) { 5758 empty = 0; 5759 break; 5760 } 5761 5762 if (empty) { 5763 ahc_free_tstate(ahc, target, channel, 5764 /*force*/FALSE); 5765 if (ahc->features & AHC_MULTI_TID) { 5766 u_int targid_mask; 5767 5768 targid_mask = ahc_inb(ahc, TARGID) 5769 | (ahc_inb(ahc, TARGID + 1) 5770 << 8); 5771 5772 targid_mask &= ~target_mask; 5773 ahc_outb(ahc, TARGID, targid_mask); 5774 ahc_outb(ahc, TARGID+1, 5775 (targid_mask >> 8)); 5776 ahc_update_scsiid(ahc, targid_mask); 5777 } 5778 } 5779 } else { 5780 5781 ahc->black_hole = NULL; 5782 5783 /* 5784 * We can't allow selections without 5785 * our black hole device. 5786 */ 5787 empty = TRUE; 5788 } 5789 if (ahc->enabled_luns == 0) { 5790 /* Disallow select-in */ 5791 u_int scsiseq; 5792 5793 scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE); 5794 scsiseq &= ~ENSELI; 5795 ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq); 5796 scsiseq = ahc_inb(ahc, SCSISEQ); 5797 scsiseq &= ~ENSELI; 5798 ahc_outb(ahc, SCSISEQ, scsiseq); 5799 } 5800 unpause_sequencer(ahc); 5801 ahc_unlock(ahc, &s); 5802 } 5803} 5804 5805static void 5806ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask) 5807{ 5808 u_int scsiid_mask; 5809 u_int scsiid; 5810 5811 if ((ahc->features & AHC_MULTI_TID) == 0) 5812 panic("ahc_update_scsiid called on non-multitid unit\n"); 5813 5814 /* 5815 * Since we will rely on the the TARGID mask 5816 * for selection enables, ensure that OID 5817 * in SCSIID is not set to some other ID 5818 * that we don't want to allow selections on. 5819 */ 5820 if ((ahc->features & AHC_ULTRA2) != 0) 5821 scsiid = ahc_inb(ahc, SCSIID_ULTRA2); 5822 else 5823 scsiid = ahc_inb(ahc, SCSIID); 5824 scsiid_mask = 0x1 << (scsiid & OID); 5825 if ((targid_mask & scsiid_mask) == 0) { 5826 u_int our_id; 5827 5828 /* ffs counts from 1 */ 5829 our_id = ffs(targid_mask); 5830 if (our_id == 0) 5831 our_id = ahc->our_id; 5832 else 5833 our_id--; 5834 scsiid &= TID; 5835 scsiid |= our_id; 5836 } 5837 if ((ahc->features & AHC_ULTRA2) != 0) 5838 ahc_outb(ahc, SCSIID_ULTRA2, scsiid); 5839 else 5840 ahc_outb(ahc, SCSIID, scsiid); 5841} 5842 5843void 5844ahc_run_tqinfifo(struct ahc_softc *ahc, int paused) 5845{ 5846 struct target_cmd *cmd; 5847 5848 /* 5849 * If the card supports auto-access pause, 5850 * we can access the card directly regardless 5851 * of whether it is paused or not. 5852 */ 5853 if ((ahc->features & AHC_AUTOPAUSE) != 0) 5854 paused = TRUE; 5855 5856 while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) { 5857 5858 /* 5859 * Only advance through the queue if we 5860 * have the resources to process the command. 5861 */ 5862 if (ahc_handle_target_cmd(ahc, cmd) != 0) 5863 break; 5864 5865 ahc->tqinfifonext++; 5866 cmd->cmd_valid = 0; 5867 5868 /* 5869 * Lazily update our position in the target mode incomming 5870 * command queue as seen by the sequencer. 5871 */ 5872 if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { 5873 if ((ahc->features & AHC_HS_MAILBOX) != 0) { 5874 u_int hs_mailbox; 5875 5876 hs_mailbox = ahc_inb(ahc, HS_MAILBOX); 5877 hs_mailbox &= ~HOST_TQINPOS; 5878 hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS; 5879 ahc_outb(ahc, HS_MAILBOX, hs_mailbox); 5880 } else { 5881 if (!paused) 5882 pause_sequencer(ahc); 5883 ahc_outb(ahc, KERNEL_TQINPOS, 5884 ahc->tqinfifonext & HOST_TQINPOS); 5885 if (!paused) 5886 unpause_sequencer(ahc); 5887 } 5888 } 5889 } 5890} 5891 5892static int 5893ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd) 5894{ 5895 struct tmode_tstate *tstate; 5896 struct tmode_lstate *lstate; 5897 struct ccb_accept_tio *atio; 5898 uint8_t *byte; 5899 int initiator; 5900 int target; 5901 int lun; 5902 5903 initiator = SCSIID_TARGET(ahc, cmd->scsiid); 5904 target = SCSIID_OUR_ID(cmd->scsiid); 5905 lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); 5906 5907 byte = cmd->bytes; 5908 tstate = ahc->enabled_targets[target]; 5909 lstate = NULL; 5910 if (tstate != NULL) 5911 lstate = tstate->enabled_luns[lun]; 5912 5913 /* 5914 * Commands for disabled luns go to the black hole driver. 5915 */ 5916 if (lstate == NULL) 5917 lstate = ahc->black_hole; 5918 5919 atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); 5920 if (atio == NULL) { 5921 ahc->flags |= AHC_TQINFIFO_BLOCKED; 5922 /* 5923 * Wait for more ATIOs from the peripheral driver for this lun. 5924 */ 5925 return (1); 5926 } else 5927 ahc->flags &= ~AHC_TQINFIFO_BLOCKED; 5928#if 0 5929 printf("Incoming command from %d for %d:%d%s\n", 5930 initiator, target, lun, 5931 lstate == ahc->black_hole ? "(Black Holed)" : ""); 5932#endif 5933 SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); 5934 5935 if (lstate == ahc->black_hole) { 5936 /* Fill in the wildcards */ 5937 atio->ccb_h.target_id = target; 5938 atio->ccb_h.target_lun = lun; 5939 } 5940 5941 /* 5942 * Package it up and send it off to 5943 * whomever has this lun enabled. 5944 */ 5945 atio->sense_len = 0; 5946 atio->init_id = initiator; 5947 if (byte[0] != 0xFF) { 5948 /* Tag was included */ 5949 atio->tag_action = *byte++; 5950 atio->tag_id = *byte++; 5951 atio->ccb_h.flags = CAM_TAG_ACTION_VALID; 5952 } else { 5953 atio->ccb_h.flags = 0; 5954 } 5955 byte++; 5956 5957 /* Okay. Now determine the cdb size based on the command code */ 5958 switch (*byte >> CMD_GROUP_CODE_SHIFT) { 5959 case 0: 5960 atio->cdb_len = 6; 5961 break; 5962 case 1: 5963 case 2: 5964 atio->cdb_len = 10; 5965 break; 5966 case 4: 5967 atio->cdb_len = 16; 5968 break; 5969 case 5: 5970 atio->cdb_len = 12; 5971 break; 5972 case 3: 5973 default: 5974 /* Only copy the opcode. */ 5975 atio->cdb_len = 1; 5976 printf("Reserved or VU command code type encountered\n"); 5977 break; 5978 } 5979 5980 memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); 5981 5982 atio->ccb_h.status |= CAM_CDB_RECVD; 5983 5984 if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { 5985 /* 5986 * We weren't allowed to disconnect. 5987 * We're hanging on the bus until a 5988 * continue target I/O comes in response 5989 * to this accept tio. 5990 */ 5991#if 0 5992 printf("Received Immediate Command %d:%d:%d - %p\n", 5993 initiator, target, lun, ahc->pending_device); 5994#endif 5995 ahc->pending_device = lstate; 5996 ahc_freeze_ccb((union ccb *)atio); 5997 atio->ccb_h.flags |= CAM_DIS_DISCONNECT; 5998 } 5999 xpt_done((union ccb*)atio); 6000 return (0); 6001} 6002 6003#endif 6004