adwcam.c revision 57679
1/* 2 * CAM SCSI interface for the the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD: head/sys/dev/advansys/adwcam.c 57679 2000-03-02 00:08:35Z gibbs $ 34 */ 35/* 36 * Ported from: 37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 38 * 39 * Copyright (c) 1995-1998 Advanced System Products, Inc. 40 * All Rights Reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that redistributions of source 44 * code retain the above copyright notice and this comment without 45 * modification. 46 */ 47#include <stddef.h> /* For offsetof */ 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/kernel.h> 52#include <sys/malloc.h> 53#include <sys/bus.h> 54 55#include <machine/bus_pio.h> 56#include <machine/bus_memio.h> 57#include <machine/bus.h> 58#include <machine/clock.h> 59#include <machine/resource.h> 60 61#include <sys/rman.h> 62 63#include <cam/cam.h> 64#include <cam/cam_ccb.h> 65#include <cam/cam_sim.h> 66#include <cam/cam_xpt_sim.h> 67#include <cam/cam_debug.h> 68 69#include <cam/scsi/scsi_message.h> 70 71#include <dev/advansys/adwvar.h> 72 73/* Definitions for our use of the SIM private CCB area */ 74#define ccb_acb_ptr spriv_ptr0 75#define ccb_adw_ptr spriv_ptr1 76 77#define MIN(a, b) (((a) < (b)) ? (a) : (b)) 78 79u_long adw_unit; 80 81static __inline cam_status adwccbstatus(union ccb*); 82static __inline struct acb* adwgetacb(struct adw_softc *adw); 83static __inline void adwfreeacb(struct adw_softc *adw, 84 struct acb *acb); 85 86static void adwmapmem(void *arg, bus_dma_segment_t *segs, 87 int nseg, int error); 88static struct sg_map_node* 89 adwallocsgmap(struct adw_softc *adw); 90static int adwallocacbs(struct adw_softc *adw); 91 92static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 93 int nseg, int error); 94static void adw_action(struct cam_sim *sim, union ccb *ccb); 95static void adw_poll(struct cam_sim *sim); 96static void adw_async(void *callback_arg, u_int32_t code, 97 struct cam_path *path, void *arg); 98static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 99static void adwtimeout(void *arg); 100static void adw_handle_device_reset(struct adw_softc *adw, 101 u_int target); 102static void adw_handle_bus_reset(struct adw_softc *adw, 103 int initiated); 104 105static __inline cam_status 106adwccbstatus(union ccb* ccb) 107{ 108 return (ccb->ccb_h.status & CAM_STATUS_MASK); 109} 110 111static __inline struct acb* 112adwgetacb(struct adw_softc *adw) 113{ 114 struct acb* acb; 115 int s; 116 117 s = splcam(); 118 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 119 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 120 } else if (adw->num_acbs < adw->max_acbs) { 121 adwallocacbs(adw); 122 acb = SLIST_FIRST(&adw->free_acb_list); 123 if (acb == NULL) 124 printf("%s: Can't malloc ACB\n", adw_name(adw)); 125 else { 126 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 127 } 128 } 129 splx(s); 130 131 return (acb); 132} 133 134static __inline void 135adwfreeacb(struct adw_softc *adw, struct acb *acb) 136{ 137 int s; 138 139 s = splcam(); 140 if ((acb->state & ACB_ACTIVE) != 0) 141 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 142 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 143 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 144 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 145 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 146 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 147 adw->state &= ~ADW_RESOURCE_SHORTAGE; 148 } 149 acb->state = ACB_FREE; 150 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 151 splx(s); 152} 153 154static void 155adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 156{ 157 bus_addr_t *busaddrp; 158 159 busaddrp = (bus_addr_t *)arg; 160 *busaddrp = segs->ds_addr; 161} 162 163static struct sg_map_node * 164adwallocsgmap(struct adw_softc *adw) 165{ 166 struct sg_map_node *sg_map; 167 168 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 169 170 if (sg_map == NULL) 171 return (NULL); 172 173 /* Allocate S/G space for the next batch of ACBS */ 174 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 175 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 176 free(sg_map, M_DEVBUF); 177 return (NULL); 178 } 179 180 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 181 182 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 183 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 184 185 bzero(sg_map->sg_vaddr, PAGE_SIZE); 186 return (sg_map); 187} 188 189/* 190 * Allocate another chunk of CCB's. Return count of entries added. 191 * Assumed to be called at splcam(). 192 */ 193static int 194adwallocacbs(struct adw_softc *adw) 195{ 196 struct acb *next_acb; 197 struct sg_map_node *sg_map; 198 bus_addr_t busaddr; 199 struct adw_sg_block *blocks; 200 int newcount; 201 int i; 202 203 next_acb = &adw->acbs[adw->num_acbs]; 204 sg_map = adwallocsgmap(adw); 205 206 if (sg_map == NULL) 207 return (0); 208 209 blocks = sg_map->sg_vaddr; 210 busaddr = sg_map->sg_physaddr; 211 212 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 213 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 214 int error; 215 216 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 217 &next_acb->dmamap); 218 if (error != 0) 219 break; 220 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 221 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 222 next_acb->queue.sense_baddr = 223 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 224 next_acb->sg_blocks = blocks; 225 next_acb->sg_busaddr = busaddr; 226 next_acb->state = ACB_FREE; 227 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 228 blocks += ADW_SG_BLOCKCNT; 229 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 230 next_acb++; 231 adw->num_acbs++; 232 } 233 return (i); 234} 235 236static void 237adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 238{ 239 struct acb *acb; 240 union ccb *ccb; 241 struct adw_softc *adw; 242 int s; 243 244 acb = (struct acb *)arg; 245 ccb = acb->ccb; 246 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 247 248 if (error != 0) { 249 if (error != EFBIG) 250 printf("%s: Unexepected error 0x%x returned from " 251 "bus_dmamap_load\n", adw_name(adw), error); 252 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 253 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 254 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 255 } 256 adwfreeacb(adw, acb); 257 xpt_done(ccb); 258 return; 259 } 260 261 if (nseg != 0) { 262 bus_dmasync_op_t op; 263 264 acb->queue.data_addr = dm_segs[0].ds_addr; 265 acb->queue.data_cnt = ccb->csio.dxfer_len; 266 if (nseg > 1) { 267 struct adw_sg_block *sg_block; 268 struct adw_sg_elm *sg; 269 bus_addr_t sg_busaddr; 270 u_int sg_index; 271 bus_dma_segment_t *end_seg; 272 273 end_seg = dm_segs + nseg; 274 275 sg_busaddr = acb->sg_busaddr; 276 sg_index = 0; 277 /* Copy the segments into our SG list */ 278 for (sg_block = acb->sg_blocks;; sg_block++) { 279 u_int i; 280 281 sg = sg_block->sg_list; 282 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 283 if (dm_segs >= end_seg) 284 break; 285 286 sg->sg_addr = dm_segs->ds_addr; 287 sg->sg_count = dm_segs->ds_len; 288 sg++; 289 dm_segs++; 290 } 291 sg_block->sg_cnt = i; 292 sg_index += i; 293 if (dm_segs == end_seg) { 294 sg_block->sg_busaddr_next = 0; 295 break; 296 } else { 297 sg_busaddr += 298 sizeof(struct adw_sg_block); 299 sg_block->sg_busaddr_next = sg_busaddr; 300 } 301 } 302 acb->queue.sg_real_addr = acb->sg_busaddr; 303 } else { 304 acb->queue.sg_real_addr = 0; 305 } 306 307 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 308 op = BUS_DMASYNC_PREREAD; 309 else 310 op = BUS_DMASYNC_PREWRITE; 311 312 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 313 314 } else { 315 acb->queue.data_addr = 0; 316 acb->queue.data_cnt = 0; 317 acb->queue.sg_real_addr = 0; 318 } 319 320 s = splcam(); 321 322 /* 323 * Last time we need to check if this CCB needs to 324 * be aborted. 325 */ 326 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 327 if (nseg != 0) 328 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 329 adwfreeacb(adw, acb); 330 xpt_done(ccb); 331 splx(s); 332 return; 333 } 334 335 acb->state |= ACB_ACTIVE; 336 ccb->ccb_h.status |= CAM_SIM_QUEUED; 337 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 338 ccb->ccb_h.timeout_ch = 339 timeout(adwtimeout, (caddr_t)acb, 340 (ccb->ccb_h.timeout * hz) / 1000); 341 342 adw_send_acb(adw, acb, acbvtob(adw, acb)); 343 344 splx(s); 345} 346 347static void 348adw_action(struct cam_sim *sim, union ccb *ccb) 349{ 350 struct adw_softc *adw; 351 352 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 353 354 adw = (struct adw_softc *)cam_sim_softc(sim); 355 356 switch (ccb->ccb_h.func_code) { 357 /* Common cases first */ 358 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 359 { 360 struct ccb_scsiio *csio; 361 struct ccb_hdr *ccbh; 362 struct acb *acb; 363 364 csio = &ccb->csio; 365 ccbh = &ccb->ccb_h; 366 367 /* Max supported CDB length is 12 bytes */ 368 if (csio->cdb_len > 12) { 369 ccb->ccb_h.status = CAM_REQ_INVALID; 370 xpt_done(ccb); 371 return; 372 } 373 374 if ((acb = adwgetacb(adw)) == NULL) { 375 int s; 376 377 s = splcam(); 378 adw->state |= ADW_RESOURCE_SHORTAGE; 379 splx(s); 380 xpt_freeze_simq(sim, /*count*/1); 381 ccb->ccb_h.status = CAM_REQUEUE_REQ; 382 xpt_done(ccb); 383 return; 384 } 385 386 /* Link acb and ccb so we can find one from the other */ 387 acb->ccb = ccb; 388 ccb->ccb_h.ccb_acb_ptr = acb; 389 ccb->ccb_h.ccb_adw_ptr = adw; 390 391 acb->queue.cntl = 0; 392 acb->queue.target_cmd = 0; 393 acb->queue.target_id = ccb->ccb_h.target_id; 394 acb->queue.target_lun = ccb->ccb_h.target_lun; 395 396 acb->queue.mflag = 0; 397 acb->queue.sense_len = 398 MIN(csio->sense_len, sizeof(acb->sense_data)); 399 acb->queue.cdb_len = csio->cdb_len; 400 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 401 switch (csio->tag_action) { 402 case MSG_SIMPLE_Q_TAG: 403 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 404 break; 405 case MSG_HEAD_OF_Q_TAG: 406 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 407 break; 408 case MSG_ORDERED_Q_TAG: 409 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 410 break; 411 default: 412 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 413 break; 414 } 415 } else 416 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 417 418 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 419 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 420 421 acb->queue.done_status = 0; 422 acb->queue.scsi_status = 0; 423 acb->queue.host_status = 0; 424 acb->queue.sg_wk_ix = 0; 425 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 426 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 427 bcopy(csio->cdb_io.cdb_ptr, 428 acb->queue.cdb, csio->cdb_len); 429 } else { 430 /* I guess I could map it in... */ 431 ccb->ccb_h.status = CAM_REQ_INVALID; 432 adwfreeacb(adw, acb); 433 xpt_done(ccb); 434 return; 435 } 436 } else { 437 bcopy(csio->cdb_io.cdb_bytes, 438 acb->queue.cdb, csio->cdb_len); 439 } 440 441 /* 442 * If we have any data to send with this command, 443 * map it into bus space. 444 */ 445 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 446 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 447 /* 448 * We've been given a pointer 449 * to a single buffer. 450 */ 451 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 452 int s; 453 int error; 454 455 s = splsoftvm(); 456 error = 457 bus_dmamap_load(adw->buffer_dmat, 458 acb->dmamap, 459 csio->data_ptr, 460 csio->dxfer_len, 461 adwexecuteacb, 462 acb, /*flags*/0); 463 if (error == EINPROGRESS) { 464 /* 465 * So as to maintain ordering, 466 * freeze the controller queue 467 * until our mapping is 468 * returned. 469 */ 470 xpt_freeze_simq(sim, 1); 471 acb->state |= CAM_RELEASE_SIMQ; 472 } 473 splx(s); 474 } else { 475 struct bus_dma_segment seg; 476 477 /* Pointer to physical buffer */ 478 seg.ds_addr = 479 (bus_addr_t)csio->data_ptr; 480 seg.ds_len = csio->dxfer_len; 481 adwexecuteacb(acb, &seg, 1, 0); 482 } 483 } else { 484 struct bus_dma_segment *segs; 485 486 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 487 panic("adw_action - Physical " 488 "segment pointers " 489 "unsupported"); 490 491 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 492 panic("adw_action - Virtual " 493 "segment addresses " 494 "unsupported"); 495 496 /* Just use the segments provided */ 497 segs = (struct bus_dma_segment *)csio->data_ptr; 498 adwexecuteacb(acb, segs, csio->sglist_cnt, 499 (csio->sglist_cnt < ADW_SGSIZE) 500 ? 0 : EFBIG); 501 } 502 } else { 503 adwexecuteacb(acb, NULL, 0, 0); 504 } 505 break; 506 } 507 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 508 { 509 adw_idle_cmd_status_t status; 510 511 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 512 ccb->ccb_h.target_id); 513 if (status == ADW_IDLE_CMD_SUCCESS) { 514 ccb->ccb_h.status = CAM_REQ_CMP; 515 if (bootverbose) { 516 xpt_print_path(ccb->ccb_h.path); 517 printf("BDR Delivered\n"); 518 } 519 } else 520 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 521 xpt_done(ccb); 522 break; 523 } 524 case XPT_ABORT: /* Abort the specified CCB */ 525 /* XXX Implement */ 526 ccb->ccb_h.status = CAM_REQ_INVALID; 527 xpt_done(ccb); 528 break; 529 case XPT_SET_TRAN_SETTINGS: 530 { 531 struct ccb_trans_settings *cts; 532 u_int target_mask; 533 int s; 534 535 cts = &ccb->cts; 536 target_mask = 0x01 << ccb->ccb_h.target_id; 537 538 s = splcam(); 539 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 540 u_int sdtrdone; 541 542 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 543 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 544 u_int discenb; 545 546 discenb = 547 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 548 549 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 550 discenb |= target_mask; 551 else 552 discenb &= ~target_mask; 553 554 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 555 discenb); 556 } 557 558 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 559 560 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 561 adw->tagenb |= target_mask; 562 else 563 adw->tagenb &= ~target_mask; 564 } 565 566 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 567 u_int wdtrenb_orig; 568 u_int wdtrenb; 569 u_int wdtrdone; 570 571 wdtrenb_orig = 572 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 573 wdtrenb = wdtrenb_orig; 574 wdtrdone = adw_lram_read_16(adw, 575 ADW_MC_WDTR_DONE); 576 switch (cts->bus_width) { 577 case MSG_EXT_WDTR_BUS_32_BIT: 578 case MSG_EXT_WDTR_BUS_16_BIT: 579 wdtrenb |= target_mask; 580 break; 581 case MSG_EXT_WDTR_BUS_8_BIT: 582 default: 583 wdtrenb &= ~target_mask; 584 break; 585 } 586 if (wdtrenb != wdtrenb_orig) { 587 adw_lram_write_16(adw, 588 ADW_MC_WDTR_ABLE, 589 wdtrenb); 590 wdtrdone &= ~target_mask; 591 adw_lram_write_16(adw, 592 ADW_MC_WDTR_DONE, 593 wdtrdone); 594 /* Wide negotiation forces async */ 595 sdtrdone &= ~target_mask; 596 adw_lram_write_16(adw, 597 ADW_MC_SDTR_DONE, 598 sdtrdone); 599 } 600 } 601 602 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 603 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 604 u_int sdtr_orig; 605 u_int sdtr; 606 u_int sdtrable_orig; 607 u_int sdtrable; 608 609 sdtr = adw_get_chip_sdtr(adw, 610 ccb->ccb_h.target_id); 611 sdtr_orig = sdtr; 612 sdtrable = adw_lram_read_16(adw, 613 ADW_MC_SDTR_ABLE); 614 sdtrable_orig = sdtrable; 615 616 if ((cts->valid 617 & CCB_TRANS_SYNC_RATE_VALID) != 0) { 618 619 sdtr = 620 adw_find_sdtr(adw, 621 cts->sync_period); 622 } 623 624 if ((cts->valid 625 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { 626 if (cts->sync_offset == 0) 627 sdtr = ADW_MC_SDTR_ASYNC; 628 } 629 630 if (sdtr == ADW_MC_SDTR_ASYNC) 631 sdtrable &= ~target_mask; 632 else 633 sdtrable |= target_mask; 634 if (sdtr != sdtr_orig 635 || sdtrable != sdtrable_orig) { 636 adw_set_chip_sdtr(adw, 637 ccb->ccb_h.target_id, 638 sdtr); 639 sdtrdone &= ~target_mask; 640 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 641 sdtrable); 642 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 643 sdtrdone); 644 645 } 646 } 647 } 648 splx(s); 649 ccb->ccb_h.status = CAM_REQ_CMP; 650 xpt_done(ccb); 651 break; 652 } 653 case XPT_GET_TRAN_SETTINGS: 654 /* Get default/user set transfer settings for the target */ 655 { 656 struct ccb_trans_settings *cts; 657 u_int target_mask; 658 659 cts = &ccb->cts; 660 target_mask = 0x01 << ccb->ccb_h.target_id; 661 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 662 u_int mc_sdtr; 663 664 cts->flags = 0; 665 if ((adw->user_discenb & target_mask) != 0) 666 cts->flags |= CCB_TRANS_DISC_ENB; 667 668 if ((adw->user_tagenb & target_mask) != 0) 669 cts->flags |= CCB_TRANS_TAG_ENB; 670 671 if ((adw->user_wdtr & target_mask) != 0) 672 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 673 else 674 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 675 676 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 677 cts->sync_period = adw_find_period(adw, mc_sdtr); 678 if (cts->sync_period != 0) 679 cts->sync_offset = 15; /* XXX ??? */ 680 else 681 cts->sync_offset = 0; 682 683 cts->valid = CCB_TRANS_SYNC_RATE_VALID 684 | CCB_TRANS_SYNC_OFFSET_VALID 685 | CCB_TRANS_BUS_WIDTH_VALID 686 | CCB_TRANS_DISC_VALID 687 | CCB_TRANS_TQ_VALID; 688 ccb->ccb_h.status = CAM_REQ_CMP; 689 } else { 690 u_int targ_tinfo; 691 692 cts->flags = 0; 693 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 694 & target_mask) != 0) 695 cts->flags |= CCB_TRANS_DISC_ENB; 696 697 if ((adw->tagenb & target_mask) != 0) 698 cts->flags |= CCB_TRANS_TAG_ENB; 699 700 targ_tinfo = 701 adw_lram_read_16(adw, 702 ADW_MC_DEVICE_HSHK_CFG_TABLE 703 + (2 * ccb->ccb_h.target_id)); 704 705 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 706 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 707 else 708 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 709 710 cts->sync_period = 711 adw_hshk_cfg_period_factor(targ_tinfo); 712 713 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 714 if (cts->sync_period == 0) 715 cts->sync_offset = 0; 716 717 if (cts->sync_offset == 0) 718 cts->sync_period = 0; 719 } 720 cts->valid = CCB_TRANS_SYNC_RATE_VALID 721 | CCB_TRANS_SYNC_OFFSET_VALID 722 | CCB_TRANS_BUS_WIDTH_VALID 723 | CCB_TRANS_DISC_VALID 724 | CCB_TRANS_TQ_VALID; 725 ccb->ccb_h.status = CAM_REQ_CMP; 726 xpt_done(ccb); 727 break; 728 } 729 case XPT_CALC_GEOMETRY: 730 { 731 struct ccb_calc_geometry *ccg; 732 u_int32_t size_mb; 733 u_int32_t secs_per_cylinder; 734 int extended; 735 736 /* 737 * XXX Use Adaptec translation until I find out how to 738 * get this information from the card. 739 */ 740 ccg = &ccb->ccg; 741 size_mb = ccg->volume_size 742 / ((1024L * 1024L) / ccg->block_size); 743 extended = 1; 744 745 if (size_mb > 1024 && extended) { 746 ccg->heads = 255; 747 ccg->secs_per_track = 63; 748 } else { 749 ccg->heads = 64; 750 ccg->secs_per_track = 32; 751 } 752 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 753 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 754 ccb->ccb_h.status = CAM_REQ_CMP; 755 xpt_done(ccb); 756 break; 757 } 758 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 759 { 760 int failure; 761 762 failure = adw_reset_bus(adw); 763 if (failure != 0) { 764 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 765 } else { 766 if (bootverbose) { 767 xpt_print_path(adw->path); 768 printf("Bus Reset Delivered\n"); 769 } 770 ccb->ccb_h.status = CAM_REQ_CMP; 771 } 772 xpt_done(ccb); 773 break; 774 } 775 case XPT_TERM_IO: /* Terminate the I/O process */ 776 /* XXX Implement */ 777 ccb->ccb_h.status = CAM_REQ_INVALID; 778 xpt_done(ccb); 779 break; 780 case XPT_PATH_INQ: /* Path routing inquiry */ 781 { 782 struct ccb_pathinq *cpi = &ccb->cpi; 783 784 cpi->version_num = 1; 785 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 786 cpi->target_sprt = 0; 787 cpi->hba_misc = 0; 788 cpi->hba_eng_cnt = 0; 789 cpi->max_target = ADW_MAX_TID; 790 cpi->max_lun = ADW_MAX_LUN; 791 cpi->initiator_id = adw->initiator_id; 792 cpi->bus_id = cam_sim_bus(sim); 793 cpi->base_transfer_speed = 3300; 794 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 795 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 796 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 797 cpi->unit_number = cam_sim_unit(sim); 798 cpi->ccb_h.status = CAM_REQ_CMP; 799 xpt_done(ccb); 800 break; 801 } 802 default: 803 ccb->ccb_h.status = CAM_REQ_INVALID; 804 xpt_done(ccb); 805 break; 806 } 807} 808 809static void 810adw_poll(struct cam_sim *sim) 811{ 812 adw_intr(cam_sim_softc(sim)); 813} 814 815static void 816adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 817{ 818} 819 820struct adw_softc * 821adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 822{ 823 struct adw_softc *adw; 824 int i; 825 826 /* 827 * Allocate a storage area for us 828 */ 829 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT); 830 if (adw == NULL) { 831 printf("adw%d: cannot malloc!\n", device_get_unit(dev)); 832 return NULL; 833 } 834 bzero(adw, sizeof(struct adw_softc)); 835 LIST_INIT(&adw->pending_ccbs); 836 SLIST_INIT(&adw->sg_maps); 837 adw->device = dev; 838 adw->unit = device_get_unit(dev); 839 adw->regs_res_type = regs_type; 840 adw->regs_res_id = regs_id; 841 adw->regs = regs; 842 adw->tag = rman_get_bustag(regs); 843 adw->bsh = rman_get_bushandle(regs); 844 i = adw->unit / 10; 845 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT); 846 if (adw->name == NULL) { 847 printf("adw%d: cannot malloc name!\n", adw->unit); 848 free(adw, M_DEVBUF); 849 return NULL; 850 } 851 sprintf(adw->name, "adw%d", adw->unit); 852 return(adw); 853} 854 855void 856adw_free(struct adw_softc *adw) 857{ 858 switch (adw->init_level) { 859 case 9: 860 { 861 struct sg_map_node *sg_map; 862 863 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 864 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 865 bus_dmamap_unload(adw->sg_dmat, 866 sg_map->sg_dmamap); 867 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 868 sg_map->sg_dmamap); 869 free(sg_map, M_DEVBUF); 870 } 871 bus_dma_tag_destroy(adw->sg_dmat); 872 } 873 case 8: 874 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 875 case 7: 876 bus_dmamem_free(adw->acb_dmat, adw->acbs, 877 adw->acb_dmamap); 878 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 879 case 6: 880 bus_dma_tag_destroy(adw->acb_dmat); 881 case 5: 882 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 883 case 4: 884 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 885 adw->carrier_dmamap); 886 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 887 case 3: 888 bus_dma_tag_destroy(adw->carrier_dmat); 889 case 2: 890 bus_dma_tag_destroy(adw->buffer_dmat); 891 case 1: 892 bus_dma_tag_destroy(adw->parent_dmat); 893 case 0: 894 break; 895 } 896 free(adw->name, M_DEVBUF); 897 free(adw, M_DEVBUF); 898} 899 900int 901adw_init(struct adw_softc *adw) 902{ 903 struct adw_eeprom eep_config; 904 u_int tid; 905 u_int i; 906 u_int16_t checksum; 907 u_int16_t scsicfg1; 908 909 checksum = adw_eeprom_read(adw, &eep_config); 910 bcopy(eep_config.serial_number, adw->serial_number, 911 sizeof(adw->serial_number)); 912 if (checksum != eep_config.checksum) { 913 u_int16_t serial_number[3]; 914 915 adw->flags |= ADW_EEPROM_FAILED; 916 printf("%s: EEPROM checksum failed. Restoring Defaults\n", 917 adw_name(adw)); 918 919 /* 920 * Restore the default EEPROM settings. 921 * Assume the 6 byte board serial number that was read 922 * from EEPROM is correct even if the EEPROM checksum 923 * failed. 924 */ 925 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 926 bcopy(adw->serial_number, eep_config.serial_number, 927 sizeof(serial_number)); 928 adw_eeprom_write(adw, &eep_config); 929 } 930 931 /* Pull eeprom information into our softc. */ 932 adw->bios_ctrl = eep_config.bios_ctrl; 933 adw->user_wdtr = eep_config.wdtr_able; 934 for (tid = 0; tid < ADW_MAX_TID; tid++) { 935 u_int mc_sdtr; 936 u_int16_t tid_mask; 937 938 tid_mask = 0x1 << tid; 939 if ((adw->features & ADW_ULTRA) != 0) { 940 /* 941 * Ultra chips store sdtr and ultraenb 942 * bits in their seeprom, so we must 943 * construct valid mc_sdtr entries for 944 * indirectly. 945 */ 946 if (eep_config.sync1.sync_enable & tid_mask) { 947 if (eep_config.sync2.ultra_enable & tid_mask) 948 mc_sdtr = ADW_MC_SDTR_20; 949 else 950 mc_sdtr = ADW_MC_SDTR_10; 951 } else 952 mc_sdtr = ADW_MC_SDTR_ASYNC; 953 } else { 954 switch (ADW_TARGET_GROUP(tid)) { 955 case 3: 956 mc_sdtr = eep_config.sync4.sdtr4; 957 break; 958 case 2: 959 mc_sdtr = eep_config.sync3.sdtr3; 960 break; 961 case 1: 962 mc_sdtr = eep_config.sync2.sdtr2; 963 break; 964 default: /* Shut up compiler */ 965 case 0: 966 mc_sdtr = eep_config.sync1.sdtr1; 967 break; 968 } 969 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 970 mc_sdtr &= 0xFF; 971 } 972 adw_set_user_sdtr(adw, tid, mc_sdtr); 973 } 974 adw->user_tagenb = eep_config.tagqng_able; 975 adw->user_discenb = eep_config.disc_enable; 976 adw->max_acbs = eep_config.max_host_qng; 977 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 978 979 /* 980 * Sanity check the number of host openings. 981 */ 982 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 983 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 984 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 985 /* If the value is zero, assume it is uninitialized. */ 986 if (adw->max_acbs == 0) 987 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 988 else 989 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 990 } 991 992 scsicfg1 = 0; 993 if ((adw->features & ADW_ULTRA2) != 0) { 994 switch (eep_config.termination_lvd) { 995 default: 996 printf("%s: Invalid EEPROM LVD Termination Settings.\n", 997 adw_name(adw)); 998 printf("%s: Reverting to Automatic LVD Termination\n", 999 adw_name(adw)); 1000 /* FALLTHROUGH */ 1001 case ADW_EEPROM_TERM_AUTO: 1002 break; 1003 case ADW_EEPROM_TERM_BOTH_ON: 1004 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 1005 /* FALLTHROUGH */ 1006 case ADW_EEPROM_TERM_HIGH_ON: 1007 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 1008 /* FALLTHROUGH */ 1009 case ADW_EEPROM_TERM_OFF: 1010 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 1011 break; 1012 } 1013 } 1014 1015 switch (eep_config.termination_se) { 1016 default: 1017 printf("%s: Invalid SE EEPROM Termination Settings.\n", 1018 adw_name(adw)); 1019 printf("%s: Reverting to Automatic SE Termination\n", 1020 adw_name(adw)); 1021 /* FALLTHROUGH */ 1022 case ADW_EEPROM_TERM_AUTO: 1023 break; 1024 case ADW_EEPROM_TERM_BOTH_ON: 1025 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1026 /* FALLTHROUGH */ 1027 case ADW_EEPROM_TERM_HIGH_ON: 1028 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1029 /* FALLTHROUGH */ 1030 case ADW_EEPROM_TERM_OFF: 1031 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1032 break; 1033 } 1034 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1035 1036 /* DMA tag for mapping buffers into device visible space. */ 1037 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1038 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1039 /*highaddr*/BUS_SPACE_MAXADDR, 1040 /*filter*/NULL, /*filterarg*/NULL, 1041 /*maxsize*/MAXBSIZE, /*nsegments*/ADW_SGSIZE, 1042 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1043 /*flags*/BUS_DMA_ALLOCNOW, 1044 &adw->buffer_dmat) != 0) { 1045 return (ENOMEM); 1046 } 1047 1048 adw->init_level++; 1049 1050 /* DMA tag for our ccb carrier structures */ 1051 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/0x10, 1052 /*boundary*/0, 1053 /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, 1054 /*highaddr*/BUS_SPACE_MAXADDR, 1055 /*filter*/NULL, /*filterarg*/NULL, 1056 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1057 * sizeof(struct adw_carrier), 1058 /*nsegments*/1, 1059 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1060 /*flags*/0, &adw->carrier_dmat) != 0) { 1061 return (ENOMEM); 1062 } 1063 1064 adw->init_level++; 1065 1066 /* Allocation for our ccb carrier structures */ 1067 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1068 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1069 return (ENOMEM); 1070 } 1071 1072 adw->init_level++; 1073 1074 /* And permanently map them */ 1075 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1076 adw->carriers, 1077 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1078 * sizeof(struct adw_carrier), 1079 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1080 1081 /* Clear them out. */ 1082 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1083 * sizeof(struct adw_carrier)); 1084 1085 /* Setup our free carrier list */ 1086 adw->free_carriers = adw->carriers; 1087 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1088 adw->carriers[i].carr_offset = 1089 carriervtobo(adw, &adw->carriers[i]); 1090 adw->carriers[i].carr_ba = 1091 carriervtob(adw, &adw->carriers[i]); 1092 adw->carriers[i].areq_ba = 0; 1093 adw->carriers[i].next_ba = 1094 carriervtobo(adw, &adw->carriers[i+1]); 1095 } 1096 /* Terminal carrier. Never leaves the freelist */ 1097 adw->carriers[i].carr_offset = 1098 carriervtobo(adw, &adw->carriers[i]); 1099 adw->carriers[i].carr_ba = 1100 carriervtob(adw, &adw->carriers[i]); 1101 adw->carriers[i].areq_ba = 0; 1102 adw->carriers[i].next_ba = ~0; 1103 1104 adw->init_level++; 1105 1106 /* DMA tag for our acb structures */ 1107 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1108 /*lowaddr*/BUS_SPACE_MAXADDR, 1109 /*highaddr*/BUS_SPACE_MAXADDR, 1110 /*filter*/NULL, /*filterarg*/NULL, 1111 adw->max_acbs * sizeof(struct acb), 1112 /*nsegments*/1, 1113 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1114 /*flags*/0, &adw->acb_dmat) != 0) { 1115 return (ENOMEM); 1116 } 1117 1118 adw->init_level++; 1119 1120 /* Allocation for our ccbs */ 1121 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1122 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1123 return (ENOMEM); 1124 1125 adw->init_level++; 1126 1127 /* And permanently map them */ 1128 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1129 adw->acbs, 1130 adw->max_acbs * sizeof(struct acb), 1131 adwmapmem, &adw->acb_busbase, /*flags*/0); 1132 1133 /* Clear them out. */ 1134 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1135 1136 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1137 if (bus_dma_tag_create(adw->parent_dmat, /*alignment*/1, /*boundary*/0, 1138 /*lowaddr*/BUS_SPACE_MAXADDR, 1139 /*highaddr*/BUS_SPACE_MAXADDR, 1140 /*filter*/NULL, /*filterarg*/NULL, 1141 PAGE_SIZE, /*nsegments*/1, 1142 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1143 /*flags*/0, &adw->sg_dmat) != 0) { 1144 return (ENOMEM); 1145 } 1146 1147 adw->init_level++; 1148 1149 /* Allocate our first batch of ccbs */ 1150 if (adwallocacbs(adw) == 0) 1151 return (ENOMEM); 1152 1153 if (adw_init_chip(adw, scsicfg1) != 0) 1154 return (ENXIO); 1155 1156 printf("Queue Depth %d\n", adw->max_acbs); 1157 1158 return (0); 1159} 1160 1161/* 1162 * Attach all the sub-devices we can find 1163 */ 1164int 1165adw_attach(struct adw_softc *adw) 1166{ 1167 struct ccb_setasync csa; 1168 struct cam_devq *devq; 1169 int s; 1170 int error; 1171 1172 error = 0; 1173 s = splcam(); 1174 /* Hook up our interrupt handler */ 1175 if ((error = bus_setup_intr(adw->device, adw->irq, INTR_TYPE_CAM, 1176 adw_intr, adw, &adw->ih)) != 0) { 1177 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1178 error); 1179 goto fail; 1180 } 1181 1182 /* Start the Risc processor now that we are fully configured. */ 1183 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1184 1185 /* 1186 * Create the device queue for our SIM. 1187 */ 1188 devq = cam_simq_alloc(adw->max_acbs); 1189 if (devq == NULL) 1190 return (ENOMEM); 1191 1192 /* 1193 * Construct our SIM entry. 1194 */ 1195 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1196 1, adw->max_acbs, devq); 1197 if (adw->sim == NULL) { 1198 error = ENOMEM; 1199 goto fail; 1200 } 1201 1202 /* 1203 * Register the bus. 1204 */ 1205 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1206 cam_sim_free(adw->sim, /*free devq*/TRUE); 1207 error = ENOMEM; 1208 goto fail; 1209 } 1210 1211 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1212 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1213 == CAM_REQ_CMP) { 1214 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1215 csa.ccb_h.func_code = XPT_SASYNC_CB; 1216 csa.event_enable = AC_LOST_DEVICE; 1217 csa.callback = adw_async; 1218 csa.callback_arg = adw; 1219 xpt_action((union ccb *)&csa); 1220 } 1221 1222fail: 1223 splx(s); 1224 return (error); 1225} 1226 1227void 1228adw_intr(void *arg) 1229{ 1230 struct adw_softc *adw; 1231 u_int int_stat; 1232 1233 adw = (struct adw_softc *)arg; 1234 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1235 return; 1236 1237 /* Reading the register clears the interrupt. */ 1238 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1239 1240 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1241 u_int intrb_code; 1242 1243 /* Async Microcode Event */ 1244 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1245 switch (intrb_code) { 1246 case ADW_ASYNC_CARRIER_READY_FAILURE: 1247 /* 1248 * The RISC missed our update of 1249 * the commandq. 1250 */ 1251 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1252 adw_tickle_risc(adw, ADW_TICKLE_A); 1253 break; 1254 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1255 /* 1256 * The firmware detected a SCSI Bus reset. 1257 */ 1258 printf("Someone Reset the Bus\n"); 1259 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1260 break; 1261 case ADW_ASYNC_RDMA_FAILURE: 1262 /* 1263 * Handle RDMA failure by resetting the 1264 * SCSI Bus and chip. 1265 */ 1266#if XXX 1267 AdvResetChipAndSB(adv_dvc_varp); 1268#endif 1269 break; 1270 1271 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1272 /* 1273 * Host generated SCSI bus reset occurred. 1274 */ 1275 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1276 break; 1277 default: 1278 printf("adw_intr: unknown async code 0x%x\n", 1279 intrb_code); 1280 break; 1281 } 1282 } 1283 1284 /* 1285 * Run down the RequestQ. 1286 */ 1287 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1288 struct adw_carrier *free_carrier; 1289 struct acb *acb; 1290 union ccb *ccb; 1291 1292#if 0 1293 printf("0x%x, 0x%x, 0x%x, 0x%x\n", 1294 adw->responseq->carr_offset, 1295 adw->responseq->carr_ba, 1296 adw->responseq->areq_ba, 1297 adw->responseq->next_ba); 1298#endif 1299 /* 1300 * The firmware copies the adw_scsi_req_q.acb_baddr 1301 * field into the areq_ba field of the carrier. 1302 */ 1303 acb = acbbotov(adw, adw->responseq->areq_ba); 1304 1305 /* 1306 * The least significant four bits of the next_ba 1307 * field are used as flags. Mask them out and then 1308 * advance through the list. 1309 */ 1310 free_carrier = adw->responseq; 1311 adw->responseq = 1312 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1313 free_carrier->next_ba = adw->free_carriers->carr_offset; 1314 adw->free_carriers = free_carrier; 1315 1316 /* Process CCB */ 1317 ccb = acb->ccb; 1318 untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch); 1319 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1320 bus_dmasync_op_t op; 1321 1322 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1323 op = BUS_DMASYNC_POSTREAD; 1324 else 1325 op = BUS_DMASYNC_POSTWRITE; 1326 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1327 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1328 ccb->csio.resid = acb->queue.data_cnt; 1329 } else 1330 ccb->csio.resid = 0; 1331 1332 /* Common Cases inline... */ 1333 if (acb->queue.host_status == QHSTA_NO_ERROR 1334 && (acb->queue.done_status == QD_NO_ERROR 1335 || acb->queue.done_status == QD_WITH_ERROR)) { 1336 ccb->csio.scsi_status = acb->queue.scsi_status; 1337 ccb->ccb_h.status = 0; 1338 switch (ccb->csio.scsi_status) { 1339 case SCSI_STATUS_OK: 1340 ccb->ccb_h.status |= CAM_REQ_CMP; 1341 break; 1342 case SCSI_STATUS_CHECK_COND: 1343 case SCSI_STATUS_CMD_TERMINATED: 1344 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1345 ccb->csio.sense_len); 1346 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1347 ccb->csio.sense_resid = acb->queue.sense_len; 1348 /* FALLTHROUGH */ 1349 default: 1350 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1351 | CAM_DEV_QFRZN; 1352 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1353 break; 1354 } 1355 adwfreeacb(adw, acb); 1356 xpt_done(ccb); 1357 } else { 1358 adwprocesserror(adw, acb); 1359 } 1360 } 1361} 1362 1363static void 1364adwprocesserror(struct adw_softc *adw, struct acb *acb) 1365{ 1366 union ccb *ccb; 1367 1368 ccb = acb->ccb; 1369 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1370 ccb->ccb_h.status = CAM_REQ_ABORTED; 1371 } else { 1372 1373 switch (acb->queue.host_status) { 1374 case QHSTA_M_SEL_TIMEOUT: 1375 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1376 break; 1377 case QHSTA_M_SXFR_OFF_UFLW: 1378 case QHSTA_M_SXFR_OFF_OFLW: 1379 case QHSTA_M_DATA_OVER_RUN: 1380 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1381 break; 1382 case QHSTA_M_SXFR_DESELECTED: 1383 case QHSTA_M_UNEXPECTED_BUS_FREE: 1384 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1385 break; 1386 case QHSTA_M_SCSI_BUS_RESET: 1387 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1388 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1389 break; 1390 case QHSTA_M_BUS_DEVICE_RESET: 1391 ccb->ccb_h.status = CAM_BDR_SENT; 1392 break; 1393 case QHSTA_M_QUEUE_ABORTED: 1394 /* BDR or Bus Reset */ 1395 printf("Saw Queue Aborted\n"); 1396 ccb->ccb_h.status = adw->last_reset; 1397 break; 1398 case QHSTA_M_SXFR_SDMA_ERR: 1399 case QHSTA_M_SXFR_SXFR_PERR: 1400 case QHSTA_M_RDMA_PERR: 1401 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1402 break; 1403 case QHSTA_M_WTM_TIMEOUT: 1404 case QHSTA_M_SXFR_WD_TMO: 1405 { 1406 /* The SCSI bus hung in a phase */ 1407 xpt_print_path(adw->path); 1408 printf("Watch Dog timer expired. Reseting bus\n"); 1409 adw_reset_bus(adw); 1410 break; 1411 } 1412 case QHSTA_M_SXFR_XFR_PH_ERR: 1413 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1414 break; 1415 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1416 break; 1417 case QHSTA_M_BAD_CMPL_STATUS_IN: 1418 /* No command complete after a status message */ 1419 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1420 break; 1421 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1422 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1423 break; 1424 case QHSTA_M_INVALID_DEVICE: 1425 ccb->ccb_h.status = CAM_PATH_INVALID; 1426 break; 1427 case QHSTA_M_NO_AUTO_REQ_SENSE: 1428 /* 1429 * User didn't request sense, but we got a 1430 * check condition. 1431 */ 1432 ccb->csio.scsi_status = acb->queue.scsi_status; 1433 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1434 break; 1435 default: 1436 panic("%s: Unhandled Host status error %x", 1437 adw_name(adw), acb->queue.host_status); 1438 /* NOTREACHED */ 1439 } 1440 } 1441 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1442 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1443 || ccb->ccb_h.status == CAM_BDR_SENT) 1444 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1445 } 1446 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1447 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1448 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1449 } 1450 adwfreeacb(adw, acb); 1451 xpt_done(ccb); 1452} 1453 1454static void 1455adwtimeout(void *arg) 1456{ 1457 struct acb *acb; 1458 union ccb *ccb; 1459 struct adw_softc *adw; 1460 adw_idle_cmd_status_t status; 1461 int target_id; 1462 int s; 1463 1464 acb = (struct acb *)arg; 1465 ccb = acb->ccb; 1466 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1467 xpt_print_path(ccb->ccb_h.path); 1468 printf("ACB %p - timed out\n", (void *)acb); 1469 1470 s = splcam(); 1471 1472 if ((acb->state & ACB_ACTIVE) == 0) { 1473 xpt_print_path(ccb->ccb_h.path); 1474 printf("ACB %p - timed out CCB already completed\n", 1475 (void *)acb); 1476 splx(s); 1477 return; 1478 } 1479 1480 acb->state |= ACB_RECOVERY_ACB; 1481 target_id = ccb->ccb_h.target_id; 1482 1483 /* Attempt a BDR first */ 1484 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1485 ccb->ccb_h.target_id); 1486 splx(s); 1487 if (status == ADW_IDLE_CMD_SUCCESS) { 1488 printf("%s: BDR Delivered. No longer in timeout\n", 1489 adw_name(adw)); 1490 adw_handle_device_reset(adw, target_id); 1491 } else { 1492 adw_reset_bus(adw); 1493 xpt_print_path(adw->path); 1494 printf("Bus Reset Delivered. No longer in timeout\n"); 1495 } 1496} 1497 1498static void 1499adw_handle_device_reset(struct adw_softc *adw, u_int target) 1500{ 1501 struct cam_path *path; 1502 cam_status error; 1503 1504 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1505 target, CAM_LUN_WILDCARD); 1506 1507 if (error == CAM_REQ_CMP) { 1508 xpt_async(AC_SENT_BDR, path, NULL); 1509 xpt_free_path(path); 1510 } 1511 adw->last_reset = CAM_BDR_SENT; 1512} 1513 1514static void 1515adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1516{ 1517 if (initiated) { 1518 /* 1519 * The microcode currently sets the SCSI Bus Reset signal 1520 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1521 * command above. But the SCSI Bus Reset Hold Time in the 1522 * microcode is not deterministic (it may in fact be for less 1523 * than the SCSI Spec. minimum of 25 us). Therefore on return 1524 * the Adv Library sets the SCSI Bus Reset signal for 1525 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1526 * than 25 us. 1527 */ 1528 u_int scsi_ctrl; 1529 1530 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1531 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1532 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1533 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1534 1535 /* 1536 * We will perform the async notification when the 1537 * SCSI Reset interrupt occurs. 1538 */ 1539 } else 1540 xpt_async(AC_BUS_RESET, adw->path, NULL); 1541 adw->last_reset = CAM_SCSI_BUS_RESET; 1542} 1543