1/*- 2 * CAM SCSI interface for the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33/* 34 * Ported from: 35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 36 * 37 * Copyright (c) 1995-1998 Advanced System Products, Inc. 38 * All Rights Reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that redistributions of source 42 * code retain the above copyright notice and this comment without 43 * modification. 44 */ 45 46#include <sys/cdefs.h> 47__FBSDID("$FreeBSD: stable/11/sys/dev/advansys/adwcam.c 335137 2018-06-14 14:45:08Z mav $"); 48 49#include <sys/param.h> 50#include <sys/conf.h> 51#include <sys/systm.h> 52#include <sys/kernel.h> 53#include <sys/malloc.h> 54#include <sys/lock.h> 55#include <sys/module.h> 56#include <sys/mutex.h> 57#include <sys/bus.h> 58 59#include <machine/bus.h> 60#include <machine/resource.h> 61 62#include <sys/rman.h> 63 64#include <cam/cam.h> 65#include <cam/cam_ccb.h> 66#include <cam/cam_sim.h> 67#include <cam/cam_xpt_sim.h> 68#include <cam/cam_debug.h> 69 70#include <cam/scsi/scsi_message.h> 71 72#include <dev/advansys/adwvar.h> 73 74/* Definitions for our use of the SIM private CCB area */ 75#define ccb_acb_ptr spriv_ptr0 76#define ccb_adw_ptr spriv_ptr1 77 78static __inline struct acb* adwgetacb(struct adw_softc *adw); 79static __inline void adwfreeacb(struct adw_softc *adw, 80 struct acb *acb); 81 82static void adwmapmem(void *arg, bus_dma_segment_t *segs, 83 int nseg, int error); 84static struct sg_map_node* 85 adwallocsgmap(struct adw_softc *adw); 86static int adwallocacbs(struct adw_softc *adw); 87 88static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 89 int nseg, int error); 90static void adw_action(struct cam_sim *sim, union ccb *ccb); 91static void adw_intr_locked(struct adw_softc *adw); 92static void adw_poll(struct cam_sim *sim); 93static void adw_async(void *callback_arg, u_int32_t code, 94 struct cam_path *path, void *arg); 95static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 96static void adwtimeout(void *arg); 97static void adw_handle_device_reset(struct adw_softc *adw, 98 u_int target); 99static void adw_handle_bus_reset(struct adw_softc *adw, 100 int initiated); 101 102static __inline struct acb* 103adwgetacb(struct adw_softc *adw) 104{ 105 struct acb* acb; 106 107 if (!dumping) 108 mtx_assert(&adw->lock, MA_OWNED); 109 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 110 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 111 } else if (adw->num_acbs < adw->max_acbs) { 112 adwallocacbs(adw); 113 acb = SLIST_FIRST(&adw->free_acb_list); 114 if (acb == NULL) 115 device_printf(adw->device, "Can't malloc ACB\n"); 116 else { 117 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 118 } 119 } 120 121 return (acb); 122} 123 124static __inline void 125adwfreeacb(struct adw_softc *adw, struct acb *acb) 126{ 127 128 if (!dumping) 129 mtx_assert(&adw->lock, MA_OWNED); 130 if ((acb->state & ACB_ACTIVE) != 0) 131 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 132 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 133 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 134 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 135 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 136 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 137 adw->state &= ~ADW_RESOURCE_SHORTAGE; 138 } 139 acb->state = ACB_FREE; 140 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 141} 142 143static void 144adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 145{ 146 bus_addr_t *busaddrp; 147 148 busaddrp = (bus_addr_t *)arg; 149 *busaddrp = segs->ds_addr; 150} 151 152static struct sg_map_node * 153adwallocsgmap(struct adw_softc *adw) 154{ 155 struct sg_map_node *sg_map; 156 157 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 158 159 if (sg_map == NULL) 160 return (NULL); 161 162 /* Allocate S/G space for the next batch of ACBS */ 163 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 164 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 165 free(sg_map, M_DEVBUF); 166 return (NULL); 167 } 168 169 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 170 171 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 172 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 173 174 bzero(sg_map->sg_vaddr, PAGE_SIZE); 175 return (sg_map); 176} 177 178/* 179 * Allocate another chunk of CCB's. Return count of entries added. 180 */ 181static int 182adwallocacbs(struct adw_softc *adw) 183{ 184 struct acb *next_acb; 185 struct sg_map_node *sg_map; 186 bus_addr_t busaddr; 187 struct adw_sg_block *blocks; 188 int newcount; 189 int i; 190 191 next_acb = &adw->acbs[adw->num_acbs]; 192 sg_map = adwallocsgmap(adw); 193 194 if (sg_map == NULL) 195 return (0); 196 197 blocks = sg_map->sg_vaddr; 198 busaddr = sg_map->sg_physaddr; 199 200 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 201 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 202 int error; 203 204 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 205 &next_acb->dmamap); 206 if (error != 0) 207 break; 208 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 209 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 210 next_acb->queue.sense_baddr = 211 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 212 next_acb->sg_blocks = blocks; 213 next_acb->sg_busaddr = busaddr; 214 next_acb->state = ACB_FREE; 215 callout_init_mtx(&next_acb->timer, &adw->lock, 0); 216 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 217 blocks += ADW_SG_BLOCKCNT; 218 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 219 next_acb++; 220 adw->num_acbs++; 221 } 222 return (i); 223} 224 225static void 226adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 227{ 228 struct acb *acb; 229 union ccb *ccb; 230 struct adw_softc *adw; 231 232 acb = (struct acb *)arg; 233 ccb = acb->ccb; 234 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 235 236 if (!dumping) 237 mtx_assert(&adw->lock, MA_OWNED); 238 if (error != 0) { 239 if (error != EFBIG) 240 device_printf(adw->device, "Unexepected error 0x%x " 241 "returned from bus_dmamap_load\n", error); 242 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 243 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 244 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 245 } 246 adwfreeacb(adw, acb); 247 xpt_done(ccb); 248 return; 249 } 250 251 if (nseg != 0) { 252 bus_dmasync_op_t op; 253 254 acb->queue.data_addr = dm_segs[0].ds_addr; 255 acb->queue.data_cnt = ccb->csio.dxfer_len; 256 if (nseg > 1) { 257 struct adw_sg_block *sg_block; 258 struct adw_sg_elm *sg; 259 bus_addr_t sg_busaddr; 260 u_int sg_index; 261 bus_dma_segment_t *end_seg; 262 263 end_seg = dm_segs + nseg; 264 265 sg_busaddr = acb->sg_busaddr; 266 sg_index = 0; 267 /* Copy the segments into our SG list */ 268 for (sg_block = acb->sg_blocks;; sg_block++) { 269 u_int i; 270 271 sg = sg_block->sg_list; 272 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 273 if (dm_segs >= end_seg) 274 break; 275 276 sg->sg_addr = dm_segs->ds_addr; 277 sg->sg_count = dm_segs->ds_len; 278 sg++; 279 dm_segs++; 280 } 281 sg_block->sg_cnt = i; 282 sg_index += i; 283 if (dm_segs == end_seg) { 284 sg_block->sg_busaddr_next = 0; 285 break; 286 } else { 287 sg_busaddr += 288 sizeof(struct adw_sg_block); 289 sg_block->sg_busaddr_next = sg_busaddr; 290 } 291 } 292 acb->queue.sg_real_addr = acb->sg_busaddr; 293 } else { 294 acb->queue.sg_real_addr = 0; 295 } 296 297 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 298 op = BUS_DMASYNC_PREREAD; 299 else 300 op = BUS_DMASYNC_PREWRITE; 301 302 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 303 304 } else { 305 acb->queue.data_addr = 0; 306 acb->queue.data_cnt = 0; 307 acb->queue.sg_real_addr = 0; 308 } 309 310 /* 311 * Last time we need to check if this CCB needs to 312 * be aborted. 313 */ 314 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 315 if (nseg != 0) 316 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 317 adwfreeacb(adw, acb); 318 xpt_done(ccb); 319 return; 320 } 321 322 acb->state |= ACB_ACTIVE; 323 ccb->ccb_h.status |= CAM_SIM_QUEUED; 324 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 325 callout_reset_sbt(&acb->timer, SBT_1MS * ccb->ccb_h.timeout, 0, 326 adwtimeout, acb, 0); 327 328 adw_send_acb(adw, acb, acbvtob(adw, acb)); 329} 330 331static void 332adw_action(struct cam_sim *sim, union ccb *ccb) 333{ 334 struct adw_softc *adw; 335 336 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 337 338 adw = (struct adw_softc *)cam_sim_softc(sim); 339 if (!dumping) 340 mtx_assert(&adw->lock, MA_OWNED); 341 342 switch (ccb->ccb_h.func_code) { 343 /* Common cases first */ 344 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 345 { 346 struct ccb_scsiio *csio; 347 struct acb *acb; 348 int error; 349 350 csio = &ccb->csio; 351 352 /* Max supported CDB length is 12 bytes */ 353 if (csio->cdb_len > 12) { 354 ccb->ccb_h.status = CAM_REQ_INVALID; 355 xpt_done(ccb); 356 return; 357 } 358 359 if ((acb = adwgetacb(adw)) == NULL) { 360 adw->state |= ADW_RESOURCE_SHORTAGE; 361 xpt_freeze_simq(sim, /*count*/1); 362 ccb->ccb_h.status = CAM_REQUEUE_REQ; 363 xpt_done(ccb); 364 return; 365 } 366 367 /* Link acb and ccb so we can find one from the other */ 368 acb->ccb = ccb; 369 ccb->ccb_h.ccb_acb_ptr = acb; 370 ccb->ccb_h.ccb_adw_ptr = adw; 371 372 acb->queue.cntl = 0; 373 acb->queue.target_cmd = 0; 374 acb->queue.target_id = ccb->ccb_h.target_id; 375 acb->queue.target_lun = ccb->ccb_h.target_lun; 376 377 acb->queue.mflag = 0; 378 acb->queue.sense_len = 379 MIN(csio->sense_len, sizeof(acb->sense_data)); 380 acb->queue.cdb_len = csio->cdb_len; 381 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 382 switch (csio->tag_action) { 383 case MSG_SIMPLE_Q_TAG: 384 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 385 break; 386 case MSG_HEAD_OF_Q_TAG: 387 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 388 break; 389 case MSG_ORDERED_Q_TAG: 390 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 391 break; 392 default: 393 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 394 break; 395 } 396 } else 397 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 398 399 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 400 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 401 402 acb->queue.done_status = 0; 403 acb->queue.scsi_status = 0; 404 acb->queue.host_status = 0; 405 acb->queue.sg_wk_ix = 0; 406 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 407 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 408 bcopy(csio->cdb_io.cdb_ptr, 409 acb->queue.cdb, csio->cdb_len); 410 } else { 411 /* I guess I could map it in... */ 412 ccb->ccb_h.status = CAM_REQ_INVALID; 413 adwfreeacb(adw, acb); 414 xpt_done(ccb); 415 return; 416 } 417 } else { 418 bcopy(csio->cdb_io.cdb_bytes, 419 acb->queue.cdb, csio->cdb_len); 420 } 421 422 error = bus_dmamap_load_ccb(adw->buffer_dmat, 423 acb->dmamap, 424 ccb, 425 adwexecuteacb, 426 acb, /*flags*/0); 427 if (error == EINPROGRESS) { 428 /* 429 * So as to maintain ordering, freeze the controller 430 * queue until our mapping is returned. 431 */ 432 xpt_freeze_simq(sim, 1); 433 acb->state |= CAM_RELEASE_SIMQ; 434 } 435 break; 436 } 437 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 438 { 439 adw_idle_cmd_status_t status; 440 441 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 442 ccb->ccb_h.target_id); 443 if (status == ADW_IDLE_CMD_SUCCESS) { 444 ccb->ccb_h.status = CAM_REQ_CMP; 445 if (bootverbose) { 446 xpt_print_path(ccb->ccb_h.path); 447 printf("BDR Delivered\n"); 448 } 449 } else 450 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 451 xpt_done(ccb); 452 break; 453 } 454 case XPT_ABORT: /* Abort the specified CCB */ 455 /* XXX Implement */ 456 ccb->ccb_h.status = CAM_REQ_INVALID; 457 xpt_done(ccb); 458 break; 459 case XPT_SET_TRAN_SETTINGS: 460 { 461 struct ccb_trans_settings_scsi *scsi; 462 struct ccb_trans_settings_spi *spi; 463 struct ccb_trans_settings *cts; 464 u_int target_mask; 465 466 cts = &ccb->cts; 467 target_mask = 0x01 << ccb->ccb_h.target_id; 468 469 scsi = &cts->proto_specific.scsi; 470 spi = &cts->xport_specific.spi; 471 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 472 u_int sdtrdone; 473 474 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 475 if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { 476 u_int discenb; 477 478 discenb = 479 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 480 481 if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) 482 discenb |= target_mask; 483 else 484 discenb &= ~target_mask; 485 486 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 487 discenb); 488 } 489 490 if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { 491 492 if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) 493 adw->tagenb |= target_mask; 494 else 495 adw->tagenb &= ~target_mask; 496 } 497 498 if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { 499 u_int wdtrenb_orig; 500 u_int wdtrenb; 501 u_int wdtrdone; 502 503 wdtrenb_orig = 504 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 505 wdtrenb = wdtrenb_orig; 506 wdtrdone = adw_lram_read_16(adw, 507 ADW_MC_WDTR_DONE); 508 switch (spi->bus_width) { 509 case MSG_EXT_WDTR_BUS_32_BIT: 510 case MSG_EXT_WDTR_BUS_16_BIT: 511 wdtrenb |= target_mask; 512 break; 513 case MSG_EXT_WDTR_BUS_8_BIT: 514 default: 515 wdtrenb &= ~target_mask; 516 break; 517 } 518 if (wdtrenb != wdtrenb_orig) { 519 adw_lram_write_16(adw, 520 ADW_MC_WDTR_ABLE, 521 wdtrenb); 522 wdtrdone &= ~target_mask; 523 adw_lram_write_16(adw, 524 ADW_MC_WDTR_DONE, 525 wdtrdone); 526 /* Wide negotiation forces async */ 527 sdtrdone &= ~target_mask; 528 adw_lram_write_16(adw, 529 ADW_MC_SDTR_DONE, 530 sdtrdone); 531 } 532 } 533 534 if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) 535 || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { 536 u_int sdtr_orig; 537 u_int sdtr; 538 u_int sdtrable_orig; 539 u_int sdtrable; 540 541 sdtr = adw_get_chip_sdtr(adw, 542 ccb->ccb_h.target_id); 543 sdtr_orig = sdtr; 544 sdtrable = adw_lram_read_16(adw, 545 ADW_MC_SDTR_ABLE); 546 sdtrable_orig = sdtrable; 547 548 if ((spi->valid 549 & CTS_SPI_VALID_SYNC_RATE) != 0) { 550 551 sdtr = 552 adw_find_sdtr(adw, 553 spi->sync_period); 554 } 555 556 if ((spi->valid 557 & CTS_SPI_VALID_SYNC_OFFSET) != 0) { 558 if (spi->sync_offset == 0) 559 sdtr = ADW_MC_SDTR_ASYNC; 560 } 561 562 if (sdtr == ADW_MC_SDTR_ASYNC) 563 sdtrable &= ~target_mask; 564 else 565 sdtrable |= target_mask; 566 if (sdtr != sdtr_orig 567 || sdtrable != sdtrable_orig) { 568 adw_set_chip_sdtr(adw, 569 ccb->ccb_h.target_id, 570 sdtr); 571 sdtrdone &= ~target_mask; 572 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 573 sdtrable); 574 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 575 sdtrdone); 576 577 } 578 } 579 } 580 ccb->ccb_h.status = CAM_REQ_CMP; 581 xpt_done(ccb); 582 break; 583 } 584 case XPT_GET_TRAN_SETTINGS: 585 /* Get default/user set transfer settings for the target */ 586 { 587 struct ccb_trans_settings_scsi *scsi; 588 struct ccb_trans_settings_spi *spi; 589 struct ccb_trans_settings *cts; 590 u_int target_mask; 591 592 cts = &ccb->cts; 593 target_mask = 0x01 << ccb->ccb_h.target_id; 594 cts->protocol = PROTO_SCSI; 595 cts->protocol_version = SCSI_REV_2; 596 cts->transport = XPORT_SPI; 597 cts->transport_version = 2; 598 599 scsi = &cts->proto_specific.scsi; 600 spi = &cts->xport_specific.spi; 601 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 602 u_int mc_sdtr; 603 604 spi->flags = 0; 605 if ((adw->user_discenb & target_mask) != 0) 606 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 607 608 if ((adw->user_tagenb & target_mask) != 0) 609 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 610 611 if ((adw->user_wdtr & target_mask) != 0) 612 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 613 else 614 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 615 616 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 617 spi->sync_period = adw_find_period(adw, mc_sdtr); 618 if (spi->sync_period != 0) 619 spi->sync_offset = 15; /* XXX ??? */ 620 else 621 spi->sync_offset = 0; 622 623 624 } else { 625 u_int targ_tinfo; 626 627 spi->flags = 0; 628 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 629 & target_mask) != 0) 630 spi->flags |= CTS_SPI_FLAGS_DISC_ENB; 631 632 if ((adw->tagenb & target_mask) != 0) 633 scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; 634 635 targ_tinfo = 636 adw_lram_read_16(adw, 637 ADW_MC_DEVICE_HSHK_CFG_TABLE 638 + (2 * ccb->ccb_h.target_id)); 639 640 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 641 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 642 else 643 spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 644 645 spi->sync_period = 646 adw_hshk_cfg_period_factor(targ_tinfo); 647 648 spi->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 649 if (spi->sync_period == 0) 650 spi->sync_offset = 0; 651 652 if (spi->sync_offset == 0) 653 spi->sync_period = 0; 654 } 655 656 spi->valid = CTS_SPI_VALID_SYNC_RATE 657 | CTS_SPI_VALID_SYNC_OFFSET 658 | CTS_SPI_VALID_BUS_WIDTH 659 | CTS_SPI_VALID_DISC; 660 scsi->valid = CTS_SCSI_VALID_TQ; 661 ccb->ccb_h.status = CAM_REQ_CMP; 662 xpt_done(ccb); 663 break; 664 } 665 case XPT_CALC_GEOMETRY: 666 { 667 /* 668 * XXX Use Adaptec translation until I find out how to 669 * get this information from the card. 670 */ 671 cam_calc_geometry(&ccb->ccg, /*extended*/1); 672 xpt_done(ccb); 673 break; 674 } 675 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 676 { 677 int failure; 678 679 failure = adw_reset_bus(adw); 680 if (failure != 0) { 681 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 682 } else { 683 if (bootverbose) { 684 xpt_print_path(adw->path); 685 printf("Bus Reset Delivered\n"); 686 } 687 ccb->ccb_h.status = CAM_REQ_CMP; 688 } 689 xpt_done(ccb); 690 break; 691 } 692 case XPT_TERM_IO: /* Terminate the I/O process */ 693 /* XXX Implement */ 694 ccb->ccb_h.status = CAM_REQ_INVALID; 695 xpt_done(ccb); 696 break; 697 case XPT_PATH_INQ: /* Path routing inquiry */ 698 { 699 struct ccb_pathinq *cpi = &ccb->cpi; 700 701 cpi->version_num = 1; 702 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 703 cpi->target_sprt = 0; 704 cpi->hba_misc = 0; 705 cpi->hba_eng_cnt = 0; 706 cpi->max_target = ADW_MAX_TID; 707 cpi->max_lun = ADW_MAX_LUN; 708 cpi->initiator_id = adw->initiator_id; 709 cpi->bus_id = cam_sim_bus(sim); 710 cpi->base_transfer_speed = 3300; 711 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 712 strlcpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 713 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 714 cpi->unit_number = cam_sim_unit(sim); 715 cpi->transport = XPORT_SPI; 716 cpi->transport_version = 2; 717 cpi->protocol = PROTO_SCSI; 718 cpi->protocol_version = SCSI_REV_2; 719 cpi->ccb_h.status = CAM_REQ_CMP; 720 xpt_done(ccb); 721 break; 722 } 723 default: 724 ccb->ccb_h.status = CAM_REQ_INVALID; 725 xpt_done(ccb); 726 break; 727 } 728} 729 730static void 731adw_poll(struct cam_sim *sim) 732{ 733 adw_intr_locked(cam_sim_softc(sim)); 734} 735 736static void 737adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 738{ 739} 740 741struct adw_softc * 742adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 743{ 744 struct adw_softc *adw; 745 746 adw = device_get_softc(dev); 747 LIST_INIT(&adw->pending_ccbs); 748 SLIST_INIT(&adw->sg_maps); 749 mtx_init(&adw->lock, "adw", NULL, MTX_DEF); 750 adw->device = dev; 751 adw->regs_res_type = regs_type; 752 adw->regs_res_id = regs_id; 753 adw->regs = regs; 754 return(adw); 755} 756 757void 758adw_free(struct adw_softc *adw) 759{ 760 switch (adw->init_level) { 761 case 9: 762 { 763 struct sg_map_node *sg_map; 764 765 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 766 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 767 bus_dmamap_unload(adw->sg_dmat, 768 sg_map->sg_dmamap); 769 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 770 sg_map->sg_dmamap); 771 free(sg_map, M_DEVBUF); 772 } 773 bus_dma_tag_destroy(adw->sg_dmat); 774 } 775 case 8: 776 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 777 case 7: 778 bus_dmamem_free(adw->acb_dmat, adw->acbs, 779 adw->acb_dmamap); 780 case 6: 781 bus_dma_tag_destroy(adw->acb_dmat); 782 case 5: 783 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 784 case 4: 785 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 786 adw->carrier_dmamap); 787 case 3: 788 bus_dma_tag_destroy(adw->carrier_dmat); 789 case 2: 790 bus_dma_tag_destroy(adw->buffer_dmat); 791 case 1: 792 bus_dma_tag_destroy(adw->parent_dmat); 793 case 0: 794 break; 795 } 796 797 if (adw->regs != NULL) 798 bus_release_resource(adw->device, 799 adw->regs_res_type, 800 adw->regs_res_id, 801 adw->regs); 802 803 if (adw->irq != NULL) 804 bus_release_resource(adw->device, 805 adw->irq_res_type, 806 0, adw->irq); 807 808 if (adw->sim != NULL) { 809 if (adw->path != NULL) { 810 xpt_async(AC_LOST_DEVICE, adw->path, NULL); 811 xpt_free_path(adw->path); 812 } 813 xpt_bus_deregister(cam_sim_path(adw->sim)); 814 cam_sim_free(adw->sim, /*free_devq*/TRUE); 815 } 816 mtx_destroy(&adw->lock); 817} 818 819int 820adw_init(struct adw_softc *adw) 821{ 822 struct adw_eeprom eep_config; 823 u_int tid; 824 u_int i; 825 u_int16_t checksum; 826 u_int16_t scsicfg1; 827 828 checksum = adw_eeprom_read(adw, &eep_config); 829 bcopy(eep_config.serial_number, adw->serial_number, 830 sizeof(adw->serial_number)); 831 if (checksum != eep_config.checksum) { 832 u_int16_t serial_number[3]; 833 834 adw->flags |= ADW_EEPROM_FAILED; 835 device_printf(adw->device, 836 "EEPROM checksum failed. Restoring Defaults\n"); 837 838 /* 839 * Restore the default EEPROM settings. 840 * Assume the 6 byte board serial number that was read 841 * from EEPROM is correct even if the EEPROM checksum 842 * failed. 843 */ 844 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 845 bcopy(adw->serial_number, eep_config.serial_number, 846 sizeof(serial_number)); 847 adw_eeprom_write(adw, &eep_config); 848 } 849 850 /* Pull eeprom information into our softc. */ 851 adw->bios_ctrl = eep_config.bios_ctrl; 852 adw->user_wdtr = eep_config.wdtr_able; 853 for (tid = 0; tid < ADW_MAX_TID; tid++) { 854 u_int mc_sdtr; 855 u_int16_t tid_mask; 856 857 tid_mask = 0x1 << tid; 858 if ((adw->features & ADW_ULTRA) != 0) { 859 /* 860 * Ultra chips store sdtr and ultraenb 861 * bits in their seeprom, so we must 862 * construct valid mc_sdtr entries for 863 * indirectly. 864 */ 865 if (eep_config.sync1.sync_enable & tid_mask) { 866 if (eep_config.sync2.ultra_enable & tid_mask) 867 mc_sdtr = ADW_MC_SDTR_20; 868 else 869 mc_sdtr = ADW_MC_SDTR_10; 870 } else 871 mc_sdtr = ADW_MC_SDTR_ASYNC; 872 } else { 873 switch (ADW_TARGET_GROUP(tid)) { 874 case 3: 875 mc_sdtr = eep_config.sync4.sdtr4; 876 break; 877 case 2: 878 mc_sdtr = eep_config.sync3.sdtr3; 879 break; 880 case 1: 881 mc_sdtr = eep_config.sync2.sdtr2; 882 break; 883 default: /* Shut up compiler */ 884 case 0: 885 mc_sdtr = eep_config.sync1.sdtr1; 886 break; 887 } 888 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 889 mc_sdtr &= 0xFF; 890 } 891 adw_set_user_sdtr(adw, tid, mc_sdtr); 892 } 893 adw->user_tagenb = eep_config.tagqng_able; 894 adw->user_discenb = eep_config.disc_enable; 895 adw->max_acbs = eep_config.max_host_qng; 896 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 897 898 /* 899 * Sanity check the number of host openings. 900 */ 901 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 902 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 903 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 904 /* If the value is zero, assume it is uninitialized. */ 905 if (adw->max_acbs == 0) 906 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 907 else 908 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 909 } 910 911 scsicfg1 = 0; 912 if ((adw->features & ADW_ULTRA2) != 0) { 913 switch (eep_config.termination_lvd) { 914 default: 915 device_printf(adw->device, 916 "Invalid EEPROM LVD Termination Settings.\n"); 917 device_printf(adw->device, 918 "Reverting to Automatic LVD Termination\n"); 919 /* FALLTHROUGH */ 920 case ADW_EEPROM_TERM_AUTO: 921 break; 922 case ADW_EEPROM_TERM_BOTH_ON: 923 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 924 /* FALLTHROUGH */ 925 case ADW_EEPROM_TERM_HIGH_ON: 926 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 927 /* FALLTHROUGH */ 928 case ADW_EEPROM_TERM_OFF: 929 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 930 break; 931 } 932 } 933 934 switch (eep_config.termination_se) { 935 default: 936 device_printf(adw->device, 937 "Invalid SE EEPROM Termination Settings.\n"); 938 device_printf(adw->device, 939 "Reverting to Automatic SE Termination\n"); 940 /* FALLTHROUGH */ 941 case ADW_EEPROM_TERM_AUTO: 942 break; 943 case ADW_EEPROM_TERM_BOTH_ON: 944 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 945 /* FALLTHROUGH */ 946 case ADW_EEPROM_TERM_HIGH_ON: 947 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 948 /* FALLTHROUGH */ 949 case ADW_EEPROM_TERM_OFF: 950 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 951 break; 952 } 953 device_printf(adw->device, "SCSI ID %d, ", adw->initiator_id); 954 955 /* DMA tag for mapping buffers into device visible space. */ 956 if (bus_dma_tag_create( 957 /* parent */ adw->parent_dmat, 958 /* alignment */ 1, 959 /* boundary */ 0, 960 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, 961 /* highaddr */ BUS_SPACE_MAXADDR, 962 /* filter */ NULL, 963 /* filterarg */ NULL, 964 /* maxsize */ DFLTPHYS, 965 /* nsegments */ ADW_SGSIZE, 966 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 967 /* flags */ BUS_DMA_ALLOCNOW, 968 /* lockfunc */ busdma_lock_mutex, 969 /* lockarg */ &adw->lock, 970 &adw->buffer_dmat) != 0) { 971 return (ENOMEM); 972 } 973 974 adw->init_level++; 975 976 /* DMA tag for our ccb carrier structures */ 977 if (bus_dma_tag_create( 978 /* parent */ adw->parent_dmat, 979 /* alignment */ 0x10, 980 /* boundary */ 0, 981 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, 982 /* highaddr */ BUS_SPACE_MAXADDR, 983 /* filter */ NULL, 984 /* filterarg */ NULL, 985 /* maxsize */ (adw->max_acbs + 986 ADW_NUM_CARRIER_QUEUES + 1) * 987 sizeof(struct adw_carrier), 988 /* nsegments */ 1, 989 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 990 /* flags */ 0, 991 /* lockfunc */ NULL, 992 /* lockarg */ NULL, 993 &adw->carrier_dmat) != 0) { 994 return (ENOMEM); 995 } 996 997 adw->init_level++; 998 999 /* Allocation for our ccb carrier structures */ 1000 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1001 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1002 return (ENOMEM); 1003 } 1004 1005 adw->init_level++; 1006 1007 /* And permanently map them */ 1008 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1009 adw->carriers, 1010 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1011 * sizeof(struct adw_carrier), 1012 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1013 1014 /* Clear them out. */ 1015 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1016 * sizeof(struct adw_carrier)); 1017 1018 /* Setup our free carrier list */ 1019 adw->free_carriers = adw->carriers; 1020 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1021 adw->carriers[i].carr_offset = 1022 carriervtobo(adw, &adw->carriers[i]); 1023 adw->carriers[i].carr_ba = 1024 carriervtob(adw, &adw->carriers[i]); 1025 adw->carriers[i].areq_ba = 0; 1026 adw->carriers[i].next_ba = 1027 carriervtobo(adw, &adw->carriers[i+1]); 1028 } 1029 /* Terminal carrier. Never leaves the freelist */ 1030 adw->carriers[i].carr_offset = 1031 carriervtobo(adw, &adw->carriers[i]); 1032 adw->carriers[i].carr_ba = 1033 carriervtob(adw, &adw->carriers[i]); 1034 adw->carriers[i].areq_ba = 0; 1035 adw->carriers[i].next_ba = ~0; 1036 1037 adw->init_level++; 1038 1039 /* DMA tag for our acb structures */ 1040 if (bus_dma_tag_create( 1041 /* parent */ adw->parent_dmat, 1042 /* alignment */ 1, 1043 /* boundary */ 0, 1044 /* lowaddr */ BUS_SPACE_MAXADDR, 1045 /* highaddr */ BUS_SPACE_MAXADDR, 1046 /* filter */ NULL, 1047 /* filterarg */ NULL, 1048 /* maxsize */ adw->max_acbs * sizeof(struct acb), 1049 /* nsegments */ 1, 1050 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1051 /* flags */ 0, 1052 /* lockfunc */ NULL, 1053 /* lockarg */ NULL, 1054 &adw->acb_dmat) != 0) { 1055 return (ENOMEM); 1056 } 1057 1058 adw->init_level++; 1059 1060 /* Allocation for our ccbs */ 1061 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1062 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1063 return (ENOMEM); 1064 1065 adw->init_level++; 1066 1067 /* And permanently map them */ 1068 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1069 adw->acbs, 1070 adw->max_acbs * sizeof(struct acb), 1071 adwmapmem, &adw->acb_busbase, /*flags*/0); 1072 1073 /* Clear them out. */ 1074 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1075 1076 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1077 if (bus_dma_tag_create( 1078 /* parent */ adw->parent_dmat, 1079 /* alignment */ 1, 1080 /* boundary */ 0, 1081 /* lowaddr */ BUS_SPACE_MAXADDR, 1082 /* highaddr */ BUS_SPACE_MAXADDR, 1083 /* filter */ NULL, 1084 /* filterarg */ NULL, 1085 /* maxsize */ PAGE_SIZE, 1086 /* nsegments */ 1, 1087 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1088 /* flags */ 0, 1089 /* lockfunc */ NULL, 1090 /* lockarg */ NULL, 1091 &adw->sg_dmat) != 0) { 1092 return (ENOMEM); 1093 } 1094 1095 adw->init_level++; 1096 1097 /* Allocate our first batch of ccbs */ 1098 mtx_lock(&adw->lock); 1099 if (adwallocacbs(adw) == 0) { 1100 mtx_unlock(&adw->lock); 1101 return (ENOMEM); 1102 } 1103 1104 if (adw_init_chip(adw, scsicfg1) != 0) { 1105 mtx_unlock(&adw->lock); 1106 return (ENXIO); 1107 } 1108 1109 printf("Queue Depth %d\n", adw->max_acbs); 1110 mtx_unlock(&adw->lock); 1111 1112 return (0); 1113} 1114 1115/* 1116 * Attach all the sub-devices we can find 1117 */ 1118int 1119adw_attach(struct adw_softc *adw) 1120{ 1121 struct ccb_setasync csa; 1122 struct cam_devq *devq; 1123 int error; 1124 1125 /* Hook up our interrupt handler */ 1126 error = bus_setup_intr(adw->device, adw->irq, 1127 INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE, NULL, adw_intr, adw, 1128 &adw->ih); 1129 if (error != 0) { 1130 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1131 error); 1132 return (error); 1133 } 1134 1135 /* Start the Risc processor now that we are fully configured. */ 1136 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1137 1138 /* 1139 * Create the device queue for our SIM. 1140 */ 1141 devq = cam_simq_alloc(adw->max_acbs); 1142 if (devq == NULL) 1143 return (ENOMEM); 1144 1145 /* 1146 * Construct our SIM entry. 1147 */ 1148 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, 1149 device_get_unit(adw->device), &adw->lock, 1, adw->max_acbs, devq); 1150 if (adw->sim == NULL) 1151 return (ENOMEM); 1152 1153 /* 1154 * Register the bus. 1155 */ 1156 mtx_lock(&adw->lock); 1157 if (xpt_bus_register(adw->sim, adw->device, 0) != CAM_SUCCESS) { 1158 cam_sim_free(adw->sim, /*free devq*/TRUE); 1159 error = ENOMEM; 1160 goto fail; 1161 } 1162 1163 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1164 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1165 == CAM_REQ_CMP) { 1166 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1167 csa.ccb_h.func_code = XPT_SASYNC_CB; 1168 csa.event_enable = AC_LOST_DEVICE; 1169 csa.callback = adw_async; 1170 csa.callback_arg = adw; 1171 xpt_action((union ccb *)&csa); 1172 } 1173 1174fail: 1175 mtx_unlock(&adw->lock); 1176 return (error); 1177} 1178 1179void 1180adw_intr(void *arg) 1181{ 1182 struct adw_softc *adw; 1183 1184 adw = arg; 1185 mtx_lock(&adw->lock); 1186 adw_intr_locked(adw); 1187 mtx_unlock(&adw->lock); 1188} 1189 1190void 1191adw_intr_locked(struct adw_softc *adw) 1192{ 1193 u_int int_stat; 1194 1195 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1196 return; 1197 1198 /* Reading the register clears the interrupt. */ 1199 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1200 1201 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1202 u_int intrb_code; 1203 1204 /* Async Microcode Event */ 1205 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1206 switch (intrb_code) { 1207 case ADW_ASYNC_CARRIER_READY_FAILURE: 1208 /* 1209 * The RISC missed our update of 1210 * the commandq. 1211 */ 1212 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1213 adw_tickle_risc(adw, ADW_TICKLE_A); 1214 break; 1215 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1216 /* 1217 * The firmware detected a SCSI Bus reset. 1218 */ 1219 device_printf(adw->device, "Someone Reset the Bus\n"); 1220 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1221 break; 1222 case ADW_ASYNC_RDMA_FAILURE: 1223 /* 1224 * Handle RDMA failure by resetting the 1225 * SCSI Bus and chip. 1226 */ 1227#if 0 /* XXX */ 1228 AdvResetChipAndSB(adv_dvc_varp); 1229#endif 1230 break; 1231 1232 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1233 /* 1234 * Host generated SCSI bus reset occurred. 1235 */ 1236 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1237 break; 1238 default: 1239 printf("adw_intr: unknown async code 0x%x\n", 1240 intrb_code); 1241 break; 1242 } 1243 } 1244 1245 /* 1246 * Run down the RequestQ. 1247 */ 1248 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1249 struct adw_carrier *free_carrier; 1250 struct acb *acb; 1251 union ccb *ccb; 1252 1253#if 0 1254 printf("0x%x, 0x%x, 0x%x, 0x%x\n", 1255 adw->responseq->carr_offset, 1256 adw->responseq->carr_ba, 1257 adw->responseq->areq_ba, 1258 adw->responseq->next_ba); 1259#endif 1260 /* 1261 * The firmware copies the adw_scsi_req_q.acb_baddr 1262 * field into the areq_ba field of the carrier. 1263 */ 1264 acb = acbbotov(adw, adw->responseq->areq_ba); 1265 1266 /* 1267 * The least significant four bits of the next_ba 1268 * field are used as flags. Mask them out and then 1269 * advance through the list. 1270 */ 1271 free_carrier = adw->responseq; 1272 adw->responseq = 1273 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1274 free_carrier->next_ba = adw->free_carriers->carr_offset; 1275 adw->free_carriers = free_carrier; 1276 1277 /* Process CCB */ 1278 ccb = acb->ccb; 1279 callout_stop(&acb->timer); 1280 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1281 bus_dmasync_op_t op; 1282 1283 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1284 op = BUS_DMASYNC_POSTREAD; 1285 else 1286 op = BUS_DMASYNC_POSTWRITE; 1287 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1288 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1289 ccb->csio.resid = acb->queue.data_cnt; 1290 } else 1291 ccb->csio.resid = 0; 1292 1293 /* Common Cases inline... */ 1294 if (acb->queue.host_status == QHSTA_NO_ERROR 1295 && (acb->queue.done_status == QD_NO_ERROR 1296 || acb->queue.done_status == QD_WITH_ERROR)) { 1297 ccb->csio.scsi_status = acb->queue.scsi_status; 1298 ccb->ccb_h.status = 0; 1299 switch (ccb->csio.scsi_status) { 1300 case SCSI_STATUS_OK: 1301 ccb->ccb_h.status |= CAM_REQ_CMP; 1302 break; 1303 case SCSI_STATUS_CHECK_COND: 1304 case SCSI_STATUS_CMD_TERMINATED: 1305 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1306 ccb->csio.sense_len); 1307 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1308 ccb->csio.sense_resid = acb->queue.sense_len; 1309 /* FALLTHROUGH */ 1310 default: 1311 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1312 | CAM_DEV_QFRZN; 1313 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1314 break; 1315 } 1316 adwfreeacb(adw, acb); 1317 xpt_done(ccb); 1318 } else { 1319 adwprocesserror(adw, acb); 1320 } 1321 } 1322} 1323 1324static void 1325adwprocesserror(struct adw_softc *adw, struct acb *acb) 1326{ 1327 union ccb *ccb; 1328 1329 ccb = acb->ccb; 1330 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1331 ccb->ccb_h.status = CAM_REQ_ABORTED; 1332 } else { 1333 1334 switch (acb->queue.host_status) { 1335 case QHSTA_M_SEL_TIMEOUT: 1336 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1337 break; 1338 case QHSTA_M_SXFR_OFF_UFLW: 1339 case QHSTA_M_SXFR_OFF_OFLW: 1340 case QHSTA_M_DATA_OVER_RUN: 1341 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1342 break; 1343 case QHSTA_M_SXFR_DESELECTED: 1344 case QHSTA_M_UNEXPECTED_BUS_FREE: 1345 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1346 break; 1347 case QHSTA_M_SCSI_BUS_RESET: 1348 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1349 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1350 break; 1351 case QHSTA_M_BUS_DEVICE_RESET: 1352 ccb->ccb_h.status = CAM_BDR_SENT; 1353 break; 1354 case QHSTA_M_QUEUE_ABORTED: 1355 /* BDR or Bus Reset */ 1356 xpt_print_path(adw->path); 1357 printf("Saw Queue Aborted\n"); 1358 ccb->ccb_h.status = adw->last_reset; 1359 break; 1360 case QHSTA_M_SXFR_SDMA_ERR: 1361 case QHSTA_M_SXFR_SXFR_PERR: 1362 case QHSTA_M_RDMA_PERR: 1363 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1364 break; 1365 case QHSTA_M_WTM_TIMEOUT: 1366 case QHSTA_M_SXFR_WD_TMO: 1367 { 1368 /* The SCSI bus hung in a phase */ 1369 xpt_print_path(adw->path); 1370 printf("Watch Dog timer expired. Resetting bus\n"); 1371 adw_reset_bus(adw); 1372 break; 1373 } 1374 case QHSTA_M_SXFR_XFR_PH_ERR: 1375 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1376 break; 1377 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1378 break; 1379 case QHSTA_M_BAD_CMPL_STATUS_IN: 1380 /* No command complete after a status message */ 1381 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1382 break; 1383 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1384 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1385 break; 1386 case QHSTA_M_INVALID_DEVICE: 1387 ccb->ccb_h.status = CAM_PATH_INVALID; 1388 break; 1389 case QHSTA_M_NO_AUTO_REQ_SENSE: 1390 /* 1391 * User didn't request sense, but we got a 1392 * check condition. 1393 */ 1394 ccb->csio.scsi_status = acb->queue.scsi_status; 1395 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1396 break; 1397 default: 1398 panic("%s: Unhandled Host status error %x", 1399 device_get_nameunit(adw->device), 1400 acb->queue.host_status); 1401 /* NOTREACHED */ 1402 } 1403 } 1404 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1405 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1406 || ccb->ccb_h.status == CAM_BDR_SENT) 1407 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1408 } 1409 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1410 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1411 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1412 } 1413 adwfreeacb(adw, acb); 1414 xpt_done(ccb); 1415} 1416 1417static void 1418adwtimeout(void *arg) 1419{ 1420 struct acb *acb; 1421 union ccb *ccb; 1422 struct adw_softc *adw; 1423 adw_idle_cmd_status_t status; 1424 int target_id; 1425 1426 acb = (struct acb *)arg; 1427 ccb = acb->ccb; 1428 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1429 xpt_print_path(ccb->ccb_h.path); 1430 printf("ACB %p - timed out\n", (void *)acb); 1431 1432 mtx_assert(&adw->lock, MA_OWNED); 1433 1434 if ((acb->state & ACB_ACTIVE) == 0) { 1435 xpt_print_path(ccb->ccb_h.path); 1436 printf("ACB %p - timed out CCB already completed\n", 1437 (void *)acb); 1438 return; 1439 } 1440 1441 acb->state |= ACB_RECOVERY_ACB; 1442 target_id = ccb->ccb_h.target_id; 1443 1444 /* Attempt a BDR first */ 1445 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1446 ccb->ccb_h.target_id); 1447 if (status == ADW_IDLE_CMD_SUCCESS) { 1448 device_printf(adw->device, 1449 "BDR Delivered. No longer in timeout\n"); 1450 adw_handle_device_reset(adw, target_id); 1451 } else { 1452 adw_reset_bus(adw); 1453 xpt_print_path(adw->path); 1454 printf("Bus Reset Delivered. No longer in timeout\n"); 1455 } 1456} 1457 1458static void 1459adw_handle_device_reset(struct adw_softc *adw, u_int target) 1460{ 1461 struct cam_path *path; 1462 cam_status error; 1463 1464 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1465 target, CAM_LUN_WILDCARD); 1466 1467 if (error == CAM_REQ_CMP) { 1468 xpt_async(AC_SENT_BDR, path, NULL); 1469 xpt_free_path(path); 1470 } 1471 adw->last_reset = CAM_BDR_SENT; 1472} 1473 1474static void 1475adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1476{ 1477 if (initiated) { 1478 /* 1479 * The microcode currently sets the SCSI Bus Reset signal 1480 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1481 * command above. But the SCSI Bus Reset Hold Time in the 1482 * microcode is not deterministic (it may in fact be for less 1483 * than the SCSI Spec. minimum of 25 us). Therefore on return 1484 * the Adv Library sets the SCSI Bus Reset signal for 1485 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1486 * than 25 us. 1487 */ 1488 u_int scsi_ctrl; 1489 1490 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1491 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1492 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1493 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1494 1495 /* 1496 * We will perform the async notification when the 1497 * SCSI Reset interrupt occurs. 1498 */ 1499 } else 1500 xpt_async(AC_BUS_RESET, adw->path, NULL); 1501 adw->last_reset = CAM_SCSI_BUS_RESET; 1502} 1503MODULE_DEPEND(adw, cam, 1, 1, 1); 1504 1505