adwcam.c revision 119418
1/* 2 * CAM SCSI interface for the the Advanced Systems Inc. 3 * Second Generation SCSI controllers. 4 * 5 * Product specific probe and attach routines can be found in: 6 * 7 * adw_pci.c ABP[3]940UW, ABP950UW, ABP3940U2W 8 * 9 * Copyright (c) 1998, 1999, 2000 Justin Gibbs. 10 * All rights reserved. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions, and the following disclaimer, 17 * without modification. 18 * 2. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 25 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 */ 33/* 34 * Ported from: 35 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 36 * 37 * Copyright (c) 1995-1998 Advanced System Products, Inc. 38 * All Rights Reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that redistributions of source 42 * code retain the above copyright notice and this comment without 43 * modification. 44 */ 45 46#include <sys/cdefs.h> 47__FBSDID("$FreeBSD: head/sys/dev/advansys/adwcam.c 119418 2003-08-24 17:55:58Z obrien $"); 48 49#include <sys/param.h> 50#include <sys/systm.h> 51#include <sys/kernel.h> 52#include <sys/malloc.h> 53#include <sys/lock.h> 54#include <sys/mutex.h> 55#include <sys/bus.h> 56 57#include <machine/bus_pio.h> 58#include <machine/bus_memio.h> 59#include <machine/bus.h> 60#include <machine/resource.h> 61 62#include <sys/rman.h> 63 64#include <cam/cam.h> 65#include <cam/cam_ccb.h> 66#include <cam/cam_sim.h> 67#include <cam/cam_xpt_sim.h> 68#include <cam/cam_debug.h> 69 70#include <cam/scsi/scsi_message.h> 71 72#include <dev/advansys/adwvar.h> 73 74/* Definitions for our use of the SIM private CCB area */ 75#define ccb_acb_ptr spriv_ptr0 76#define ccb_adw_ptr spriv_ptr1 77 78u_long adw_unit; 79 80static __inline cam_status adwccbstatus(union ccb*); 81static __inline struct acb* adwgetacb(struct adw_softc *adw); 82static __inline void adwfreeacb(struct adw_softc *adw, 83 struct acb *acb); 84 85static void adwmapmem(void *arg, bus_dma_segment_t *segs, 86 int nseg, int error); 87static struct sg_map_node* 88 adwallocsgmap(struct adw_softc *adw); 89static int adwallocacbs(struct adw_softc *adw); 90 91static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 92 int nseg, int error); 93static void adw_action(struct cam_sim *sim, union ccb *ccb); 94static void adw_poll(struct cam_sim *sim); 95static void adw_async(void *callback_arg, u_int32_t code, 96 struct cam_path *path, void *arg); 97static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 98static void adwtimeout(void *arg); 99static void adw_handle_device_reset(struct adw_softc *adw, 100 u_int target); 101static void adw_handle_bus_reset(struct adw_softc *adw, 102 int initiated); 103 104static __inline cam_status 105adwccbstatus(union ccb* ccb) 106{ 107 return (ccb->ccb_h.status & CAM_STATUS_MASK); 108} 109 110static __inline struct acb* 111adwgetacb(struct adw_softc *adw) 112{ 113 struct acb* acb; 114 int s; 115 116 s = splcam(); 117 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 118 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 119 } else if (adw->num_acbs < adw->max_acbs) { 120 adwallocacbs(adw); 121 acb = SLIST_FIRST(&adw->free_acb_list); 122 if (acb == NULL) 123 printf("%s: Can't malloc ACB\n", adw_name(adw)); 124 else { 125 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 126 } 127 } 128 splx(s); 129 130 return (acb); 131} 132 133static __inline void 134adwfreeacb(struct adw_softc *adw, struct acb *acb) 135{ 136 int s; 137 138 s = splcam(); 139 if ((acb->state & ACB_ACTIVE) != 0) 140 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 141 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 142 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 143 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 144 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 145 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 146 adw->state &= ~ADW_RESOURCE_SHORTAGE; 147 } 148 acb->state = ACB_FREE; 149 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 150 splx(s); 151} 152 153static void 154adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 155{ 156 bus_addr_t *busaddrp; 157 158 busaddrp = (bus_addr_t *)arg; 159 *busaddrp = segs->ds_addr; 160} 161 162static struct sg_map_node * 163adwallocsgmap(struct adw_softc *adw) 164{ 165 struct sg_map_node *sg_map; 166 167 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 168 169 if (sg_map == NULL) 170 return (NULL); 171 172 /* Allocate S/G space for the next batch of ACBS */ 173 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 174 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 175 free(sg_map, M_DEVBUF); 176 return (NULL); 177 } 178 179 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 180 181 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 182 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 183 184 bzero(sg_map->sg_vaddr, PAGE_SIZE); 185 return (sg_map); 186} 187 188/* 189 * Allocate another chunk of CCB's. Return count of entries added. 190 * Assumed to be called at splcam(). 191 */ 192static int 193adwallocacbs(struct adw_softc *adw) 194{ 195 struct acb *next_acb; 196 struct sg_map_node *sg_map; 197 bus_addr_t busaddr; 198 struct adw_sg_block *blocks; 199 int newcount; 200 int i; 201 202 next_acb = &adw->acbs[adw->num_acbs]; 203 sg_map = adwallocsgmap(adw); 204 205 if (sg_map == NULL) 206 return (0); 207 208 blocks = sg_map->sg_vaddr; 209 busaddr = sg_map->sg_physaddr; 210 211 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 212 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 213 int error; 214 215 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 216 &next_acb->dmamap); 217 if (error != 0) 218 break; 219 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 220 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 221 next_acb->queue.sense_baddr = 222 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 223 next_acb->sg_blocks = blocks; 224 next_acb->sg_busaddr = busaddr; 225 next_acb->state = ACB_FREE; 226 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 227 blocks += ADW_SG_BLOCKCNT; 228 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 229 next_acb++; 230 adw->num_acbs++; 231 } 232 return (i); 233} 234 235static void 236adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 237{ 238 struct acb *acb; 239 union ccb *ccb; 240 struct adw_softc *adw; 241 int s; 242 243 acb = (struct acb *)arg; 244 ccb = acb->ccb; 245 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 246 247 if (error != 0) { 248 if (error != EFBIG) 249 printf("%s: Unexepected error 0x%x returned from " 250 "bus_dmamap_load\n", adw_name(adw), error); 251 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 252 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 253 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 254 } 255 adwfreeacb(adw, acb); 256 xpt_done(ccb); 257 return; 258 } 259 260 if (nseg != 0) { 261 bus_dmasync_op_t op; 262 263 acb->queue.data_addr = dm_segs[0].ds_addr; 264 acb->queue.data_cnt = ccb->csio.dxfer_len; 265 if (nseg > 1) { 266 struct adw_sg_block *sg_block; 267 struct adw_sg_elm *sg; 268 bus_addr_t sg_busaddr; 269 u_int sg_index; 270 bus_dma_segment_t *end_seg; 271 272 end_seg = dm_segs + nseg; 273 274 sg_busaddr = acb->sg_busaddr; 275 sg_index = 0; 276 /* Copy the segments into our SG list */ 277 for (sg_block = acb->sg_blocks;; sg_block++) { 278 u_int i; 279 280 sg = sg_block->sg_list; 281 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 282 if (dm_segs >= end_seg) 283 break; 284 285 sg->sg_addr = dm_segs->ds_addr; 286 sg->sg_count = dm_segs->ds_len; 287 sg++; 288 dm_segs++; 289 } 290 sg_block->sg_cnt = i; 291 sg_index += i; 292 if (dm_segs == end_seg) { 293 sg_block->sg_busaddr_next = 0; 294 break; 295 } else { 296 sg_busaddr += 297 sizeof(struct adw_sg_block); 298 sg_block->sg_busaddr_next = sg_busaddr; 299 } 300 } 301 acb->queue.sg_real_addr = acb->sg_busaddr; 302 } else { 303 acb->queue.sg_real_addr = 0; 304 } 305 306 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 307 op = BUS_DMASYNC_PREREAD; 308 else 309 op = BUS_DMASYNC_PREWRITE; 310 311 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 312 313 } else { 314 acb->queue.data_addr = 0; 315 acb->queue.data_cnt = 0; 316 acb->queue.sg_real_addr = 0; 317 } 318 319 s = splcam(); 320 321 /* 322 * Last time we need to check if this CCB needs to 323 * be aborted. 324 */ 325 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 326 if (nseg != 0) 327 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 328 adwfreeacb(adw, acb); 329 xpt_done(ccb); 330 splx(s); 331 return; 332 } 333 334 acb->state |= ACB_ACTIVE; 335 ccb->ccb_h.status |= CAM_SIM_QUEUED; 336 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 337 ccb->ccb_h.timeout_ch = 338 timeout(adwtimeout, (caddr_t)acb, 339 (ccb->ccb_h.timeout * hz) / 1000); 340 341 adw_send_acb(adw, acb, acbvtob(adw, acb)); 342 343 splx(s); 344} 345 346static void 347adw_action(struct cam_sim *sim, union ccb *ccb) 348{ 349 struct adw_softc *adw; 350 351 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 352 353 adw = (struct adw_softc *)cam_sim_softc(sim); 354 355 switch (ccb->ccb_h.func_code) { 356 /* Common cases first */ 357 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 358 { 359 struct ccb_scsiio *csio; 360 struct ccb_hdr *ccbh; 361 struct acb *acb; 362 363 csio = &ccb->csio; 364 ccbh = &ccb->ccb_h; 365 366 /* Max supported CDB length is 12 bytes */ 367 if (csio->cdb_len > 12) { 368 ccb->ccb_h.status = CAM_REQ_INVALID; 369 xpt_done(ccb); 370 return; 371 } 372 373 if ((acb = adwgetacb(adw)) == NULL) { 374 int s; 375 376 s = splcam(); 377 adw->state |= ADW_RESOURCE_SHORTAGE; 378 splx(s); 379 xpt_freeze_simq(sim, /*count*/1); 380 ccb->ccb_h.status = CAM_REQUEUE_REQ; 381 xpt_done(ccb); 382 return; 383 } 384 385 /* Link acb and ccb so we can find one from the other */ 386 acb->ccb = ccb; 387 ccb->ccb_h.ccb_acb_ptr = acb; 388 ccb->ccb_h.ccb_adw_ptr = adw; 389 390 acb->queue.cntl = 0; 391 acb->queue.target_cmd = 0; 392 acb->queue.target_id = ccb->ccb_h.target_id; 393 acb->queue.target_lun = ccb->ccb_h.target_lun; 394 395 acb->queue.mflag = 0; 396 acb->queue.sense_len = 397 MIN(csio->sense_len, sizeof(acb->sense_data)); 398 acb->queue.cdb_len = csio->cdb_len; 399 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 400 switch (csio->tag_action) { 401 case MSG_SIMPLE_Q_TAG: 402 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 403 break; 404 case MSG_HEAD_OF_Q_TAG: 405 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 406 break; 407 case MSG_ORDERED_Q_TAG: 408 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 409 break; 410 default: 411 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 412 break; 413 } 414 } else 415 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 416 417 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 418 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 419 420 acb->queue.done_status = 0; 421 acb->queue.scsi_status = 0; 422 acb->queue.host_status = 0; 423 acb->queue.sg_wk_ix = 0; 424 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 425 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 426 bcopy(csio->cdb_io.cdb_ptr, 427 acb->queue.cdb, csio->cdb_len); 428 } else { 429 /* I guess I could map it in... */ 430 ccb->ccb_h.status = CAM_REQ_INVALID; 431 adwfreeacb(adw, acb); 432 xpt_done(ccb); 433 return; 434 } 435 } else { 436 bcopy(csio->cdb_io.cdb_bytes, 437 acb->queue.cdb, csio->cdb_len); 438 } 439 440 /* 441 * If we have any data to send with this command, 442 * map it into bus space. 443 */ 444 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 445 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 446 /* 447 * We've been given a pointer 448 * to a single buffer. 449 */ 450 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 451 int s; 452 int error; 453 454 s = splsoftvm(); 455 error = 456 bus_dmamap_load(adw->buffer_dmat, 457 acb->dmamap, 458 csio->data_ptr, 459 csio->dxfer_len, 460 adwexecuteacb, 461 acb, /*flags*/0); 462 if (error == EINPROGRESS) { 463 /* 464 * So as to maintain ordering, 465 * freeze the controller queue 466 * until our mapping is 467 * returned. 468 */ 469 xpt_freeze_simq(sim, 1); 470 acb->state |= CAM_RELEASE_SIMQ; 471 } 472 splx(s); 473 } else { 474 struct bus_dma_segment seg; 475 476 /* Pointer to physical buffer */ 477 seg.ds_addr = 478 (bus_addr_t)csio->data_ptr; 479 seg.ds_len = csio->dxfer_len; 480 adwexecuteacb(acb, &seg, 1, 0); 481 } 482 } else { 483 struct bus_dma_segment *segs; 484 485 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 486 panic("adw_action - Physical " 487 "segment pointers " 488 "unsupported"); 489 490 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 491 panic("adw_action - Virtual " 492 "segment addresses " 493 "unsupported"); 494 495 /* Just use the segments provided */ 496 segs = (struct bus_dma_segment *)csio->data_ptr; 497 adwexecuteacb(acb, segs, csio->sglist_cnt, 498 (csio->sglist_cnt < ADW_SGSIZE) 499 ? 0 : EFBIG); 500 } 501 } else { 502 adwexecuteacb(acb, NULL, 0, 0); 503 } 504 break; 505 } 506 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 507 { 508 adw_idle_cmd_status_t status; 509 510 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 511 ccb->ccb_h.target_id); 512 if (status == ADW_IDLE_CMD_SUCCESS) { 513 ccb->ccb_h.status = CAM_REQ_CMP; 514 if (bootverbose) { 515 xpt_print_path(ccb->ccb_h.path); 516 printf("BDR Delivered\n"); 517 } 518 } else 519 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 520 xpt_done(ccb); 521 break; 522 } 523 case XPT_ABORT: /* Abort the specified CCB */ 524 /* XXX Implement */ 525 ccb->ccb_h.status = CAM_REQ_INVALID; 526 xpt_done(ccb); 527 break; 528 case XPT_SET_TRAN_SETTINGS: 529 { 530 struct ccb_trans_settings *cts; 531 u_int target_mask; 532 int s; 533 534 cts = &ccb->cts; 535 target_mask = 0x01 << ccb->ccb_h.target_id; 536 537 s = splcam(); 538 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 539 u_int sdtrdone; 540 541 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 542 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 543 u_int discenb; 544 545 discenb = 546 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 547 548 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 549 discenb |= target_mask; 550 else 551 discenb &= ~target_mask; 552 553 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 554 discenb); 555 } 556 557 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 558 559 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 560 adw->tagenb |= target_mask; 561 else 562 adw->tagenb &= ~target_mask; 563 } 564 565 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 566 u_int wdtrenb_orig; 567 u_int wdtrenb; 568 u_int wdtrdone; 569 570 wdtrenb_orig = 571 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 572 wdtrenb = wdtrenb_orig; 573 wdtrdone = adw_lram_read_16(adw, 574 ADW_MC_WDTR_DONE); 575 switch (cts->bus_width) { 576 case MSG_EXT_WDTR_BUS_32_BIT: 577 case MSG_EXT_WDTR_BUS_16_BIT: 578 wdtrenb |= target_mask; 579 break; 580 case MSG_EXT_WDTR_BUS_8_BIT: 581 default: 582 wdtrenb &= ~target_mask; 583 break; 584 } 585 if (wdtrenb != wdtrenb_orig) { 586 adw_lram_write_16(adw, 587 ADW_MC_WDTR_ABLE, 588 wdtrenb); 589 wdtrdone &= ~target_mask; 590 adw_lram_write_16(adw, 591 ADW_MC_WDTR_DONE, 592 wdtrdone); 593 /* Wide negotiation forces async */ 594 sdtrdone &= ~target_mask; 595 adw_lram_write_16(adw, 596 ADW_MC_SDTR_DONE, 597 sdtrdone); 598 } 599 } 600 601 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 602 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 603 u_int sdtr_orig; 604 u_int sdtr; 605 u_int sdtrable_orig; 606 u_int sdtrable; 607 608 sdtr = adw_get_chip_sdtr(adw, 609 ccb->ccb_h.target_id); 610 sdtr_orig = sdtr; 611 sdtrable = adw_lram_read_16(adw, 612 ADW_MC_SDTR_ABLE); 613 sdtrable_orig = sdtrable; 614 615 if ((cts->valid 616 & CCB_TRANS_SYNC_RATE_VALID) != 0) { 617 618 sdtr = 619 adw_find_sdtr(adw, 620 cts->sync_period); 621 } 622 623 if ((cts->valid 624 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { 625 if (cts->sync_offset == 0) 626 sdtr = ADW_MC_SDTR_ASYNC; 627 } 628 629 if (sdtr == ADW_MC_SDTR_ASYNC) 630 sdtrable &= ~target_mask; 631 else 632 sdtrable |= target_mask; 633 if (sdtr != sdtr_orig 634 || sdtrable != sdtrable_orig) { 635 adw_set_chip_sdtr(adw, 636 ccb->ccb_h.target_id, 637 sdtr); 638 sdtrdone &= ~target_mask; 639 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 640 sdtrable); 641 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 642 sdtrdone); 643 644 } 645 } 646 } 647 splx(s); 648 ccb->ccb_h.status = CAM_REQ_CMP; 649 xpt_done(ccb); 650 break; 651 } 652 case XPT_GET_TRAN_SETTINGS: 653 /* Get default/user set transfer settings for the target */ 654 { 655 struct ccb_trans_settings *cts; 656 u_int target_mask; 657 658 cts = &ccb->cts; 659 target_mask = 0x01 << ccb->ccb_h.target_id; 660 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 661 u_int mc_sdtr; 662 663 cts->flags = 0; 664 if ((adw->user_discenb & target_mask) != 0) 665 cts->flags |= CCB_TRANS_DISC_ENB; 666 667 if ((adw->user_tagenb & target_mask) != 0) 668 cts->flags |= CCB_TRANS_TAG_ENB; 669 670 if ((adw->user_wdtr & target_mask) != 0) 671 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 672 else 673 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 674 675 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 676 cts->sync_period = adw_find_period(adw, mc_sdtr); 677 if (cts->sync_period != 0) 678 cts->sync_offset = 15; /* XXX ??? */ 679 else 680 cts->sync_offset = 0; 681 682 cts->valid = CCB_TRANS_SYNC_RATE_VALID 683 | CCB_TRANS_SYNC_OFFSET_VALID 684 | CCB_TRANS_BUS_WIDTH_VALID 685 | CCB_TRANS_DISC_VALID 686 | CCB_TRANS_TQ_VALID; 687 ccb->ccb_h.status = CAM_REQ_CMP; 688 } else { 689 u_int targ_tinfo; 690 691 cts->flags = 0; 692 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 693 & target_mask) != 0) 694 cts->flags |= CCB_TRANS_DISC_ENB; 695 696 if ((adw->tagenb & target_mask) != 0) 697 cts->flags |= CCB_TRANS_TAG_ENB; 698 699 targ_tinfo = 700 adw_lram_read_16(adw, 701 ADW_MC_DEVICE_HSHK_CFG_TABLE 702 + (2 * ccb->ccb_h.target_id)); 703 704 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 705 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 706 else 707 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 708 709 cts->sync_period = 710 adw_hshk_cfg_period_factor(targ_tinfo); 711 712 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 713 if (cts->sync_period == 0) 714 cts->sync_offset = 0; 715 716 if (cts->sync_offset == 0) 717 cts->sync_period = 0; 718 } 719 cts->valid = CCB_TRANS_SYNC_RATE_VALID 720 | CCB_TRANS_SYNC_OFFSET_VALID 721 | CCB_TRANS_BUS_WIDTH_VALID 722 | CCB_TRANS_DISC_VALID 723 | CCB_TRANS_TQ_VALID; 724 ccb->ccb_h.status = CAM_REQ_CMP; 725 xpt_done(ccb); 726 break; 727 } 728 case XPT_CALC_GEOMETRY: 729 { 730 /* 731 * XXX Use Adaptec translation until I find out how to 732 * get this information from the card. 733 */ 734 cam_calc_geometry(&ccb->ccg, /*extended*/1); 735 xpt_done(ccb); 736 break; 737 } 738 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 739 { 740 int failure; 741 742 failure = adw_reset_bus(adw); 743 if (failure != 0) { 744 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 745 } else { 746 if (bootverbose) { 747 xpt_print_path(adw->path); 748 printf("Bus Reset Delivered\n"); 749 } 750 ccb->ccb_h.status = CAM_REQ_CMP; 751 } 752 xpt_done(ccb); 753 break; 754 } 755 case XPT_TERM_IO: /* Terminate the I/O process */ 756 /* XXX Implement */ 757 ccb->ccb_h.status = CAM_REQ_INVALID; 758 xpt_done(ccb); 759 break; 760 case XPT_PATH_INQ: /* Path routing inquiry */ 761 { 762 struct ccb_pathinq *cpi = &ccb->cpi; 763 764 cpi->version_num = 1; 765 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 766 cpi->target_sprt = 0; 767 cpi->hba_misc = 0; 768 cpi->hba_eng_cnt = 0; 769 cpi->max_target = ADW_MAX_TID; 770 cpi->max_lun = ADW_MAX_LUN; 771 cpi->initiator_id = adw->initiator_id; 772 cpi->bus_id = cam_sim_bus(sim); 773 cpi->base_transfer_speed = 3300; 774 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 775 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 776 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 777 cpi->unit_number = cam_sim_unit(sim); 778 cpi->ccb_h.status = CAM_REQ_CMP; 779 xpt_done(ccb); 780 break; 781 } 782 default: 783 ccb->ccb_h.status = CAM_REQ_INVALID; 784 xpt_done(ccb); 785 break; 786 } 787} 788 789static void 790adw_poll(struct cam_sim *sim) 791{ 792 adw_intr(cam_sim_softc(sim)); 793} 794 795static void 796adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 797{ 798} 799 800struct adw_softc * 801adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 802{ 803 struct adw_softc *adw; 804 int i; 805 806 /* 807 * Allocate a storage area for us 808 */ 809 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT | M_ZERO); 810 if (adw == NULL) { 811 printf("adw%d: cannot malloc!\n", device_get_unit(dev)); 812 return NULL; 813 } 814 LIST_INIT(&adw->pending_ccbs); 815 SLIST_INIT(&adw->sg_maps); 816 adw->device = dev; 817 adw->unit = device_get_unit(dev); 818 adw->regs_res_type = regs_type; 819 adw->regs_res_id = regs_id; 820 adw->regs = regs; 821 adw->tag = rman_get_bustag(regs); 822 adw->bsh = rman_get_bushandle(regs); 823 i = adw->unit / 10; 824 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT); 825 if (adw->name == NULL) { 826 printf("adw%d: cannot malloc name!\n", adw->unit); 827 free(adw, M_DEVBUF); 828 return NULL; 829 } 830 sprintf(adw->name, "adw%d", adw->unit); 831 return(adw); 832} 833 834void 835adw_free(struct adw_softc *adw) 836{ 837 switch (adw->init_level) { 838 case 9: 839 { 840 struct sg_map_node *sg_map; 841 842 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 843 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 844 bus_dmamap_unload(adw->sg_dmat, 845 sg_map->sg_dmamap); 846 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 847 sg_map->sg_dmamap); 848 free(sg_map, M_DEVBUF); 849 } 850 bus_dma_tag_destroy(adw->sg_dmat); 851 } 852 case 8: 853 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 854 case 7: 855 bus_dmamem_free(adw->acb_dmat, adw->acbs, 856 adw->acb_dmamap); 857 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 858 case 6: 859 bus_dma_tag_destroy(adw->acb_dmat); 860 case 5: 861 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 862 case 4: 863 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 864 adw->carrier_dmamap); 865 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 866 case 3: 867 bus_dma_tag_destroy(adw->carrier_dmat); 868 case 2: 869 bus_dma_tag_destroy(adw->buffer_dmat); 870 case 1: 871 bus_dma_tag_destroy(adw->parent_dmat); 872 case 0: 873 break; 874 } 875 free(adw->name, M_DEVBUF); 876 free(adw, M_DEVBUF); 877} 878 879int 880adw_init(struct adw_softc *adw) 881{ 882 struct adw_eeprom eep_config; 883 u_int tid; 884 u_int i; 885 u_int16_t checksum; 886 u_int16_t scsicfg1; 887 888 checksum = adw_eeprom_read(adw, &eep_config); 889 bcopy(eep_config.serial_number, adw->serial_number, 890 sizeof(adw->serial_number)); 891 if (checksum != eep_config.checksum) { 892 u_int16_t serial_number[3]; 893 894 adw->flags |= ADW_EEPROM_FAILED; 895 printf("%s: EEPROM checksum failed. Restoring Defaults\n", 896 adw_name(adw)); 897 898 /* 899 * Restore the default EEPROM settings. 900 * Assume the 6 byte board serial number that was read 901 * from EEPROM is correct even if the EEPROM checksum 902 * failed. 903 */ 904 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 905 bcopy(adw->serial_number, eep_config.serial_number, 906 sizeof(serial_number)); 907 adw_eeprom_write(adw, &eep_config); 908 } 909 910 /* Pull eeprom information into our softc. */ 911 adw->bios_ctrl = eep_config.bios_ctrl; 912 adw->user_wdtr = eep_config.wdtr_able; 913 for (tid = 0; tid < ADW_MAX_TID; tid++) { 914 u_int mc_sdtr; 915 u_int16_t tid_mask; 916 917 tid_mask = 0x1 << tid; 918 if ((adw->features & ADW_ULTRA) != 0) { 919 /* 920 * Ultra chips store sdtr and ultraenb 921 * bits in their seeprom, so we must 922 * construct valid mc_sdtr entries for 923 * indirectly. 924 */ 925 if (eep_config.sync1.sync_enable & tid_mask) { 926 if (eep_config.sync2.ultra_enable & tid_mask) 927 mc_sdtr = ADW_MC_SDTR_20; 928 else 929 mc_sdtr = ADW_MC_SDTR_10; 930 } else 931 mc_sdtr = ADW_MC_SDTR_ASYNC; 932 } else { 933 switch (ADW_TARGET_GROUP(tid)) { 934 case 3: 935 mc_sdtr = eep_config.sync4.sdtr4; 936 break; 937 case 2: 938 mc_sdtr = eep_config.sync3.sdtr3; 939 break; 940 case 1: 941 mc_sdtr = eep_config.sync2.sdtr2; 942 break; 943 default: /* Shut up compiler */ 944 case 0: 945 mc_sdtr = eep_config.sync1.sdtr1; 946 break; 947 } 948 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 949 mc_sdtr &= 0xFF; 950 } 951 adw_set_user_sdtr(adw, tid, mc_sdtr); 952 } 953 adw->user_tagenb = eep_config.tagqng_able; 954 adw->user_discenb = eep_config.disc_enable; 955 adw->max_acbs = eep_config.max_host_qng; 956 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 957 958 /* 959 * Sanity check the number of host openings. 960 */ 961 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 962 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 963 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 964 /* If the value is zero, assume it is uninitialized. */ 965 if (adw->max_acbs == 0) 966 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 967 else 968 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 969 } 970 971 scsicfg1 = 0; 972 if ((adw->features & ADW_ULTRA2) != 0) { 973 switch (eep_config.termination_lvd) { 974 default: 975 printf("%s: Invalid EEPROM LVD Termination Settings.\n", 976 adw_name(adw)); 977 printf("%s: Reverting to Automatic LVD Termination\n", 978 adw_name(adw)); 979 /* FALLTHROUGH */ 980 case ADW_EEPROM_TERM_AUTO: 981 break; 982 case ADW_EEPROM_TERM_BOTH_ON: 983 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 984 /* FALLTHROUGH */ 985 case ADW_EEPROM_TERM_HIGH_ON: 986 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 987 /* FALLTHROUGH */ 988 case ADW_EEPROM_TERM_OFF: 989 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 990 break; 991 } 992 } 993 994 switch (eep_config.termination_se) { 995 default: 996 printf("%s: Invalid SE EEPROM Termination Settings.\n", 997 adw_name(adw)); 998 printf("%s: Reverting to Automatic SE Termination\n", 999 adw_name(adw)); 1000 /* FALLTHROUGH */ 1001 case ADW_EEPROM_TERM_AUTO: 1002 break; 1003 case ADW_EEPROM_TERM_BOTH_ON: 1004 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1005 /* FALLTHROUGH */ 1006 case ADW_EEPROM_TERM_HIGH_ON: 1007 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1008 /* FALLTHROUGH */ 1009 case ADW_EEPROM_TERM_OFF: 1010 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1011 break; 1012 } 1013 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1014 1015 /* DMA tag for mapping buffers into device visible space. */ 1016 if (bus_dma_tag_create( 1017 /* parent */ adw->parent_dmat, 1018 /* alignment */ 1, 1019 /* boundary */ 0, 1020 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, 1021 /* highaddr */ BUS_SPACE_MAXADDR, 1022 /* filter */ NULL, 1023 /* filterarg */ NULL, 1024 /* maxsize */ MAXBSIZE, 1025 /* nsegments */ ADW_SGSIZE, 1026 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1027 /* flags */ BUS_DMA_ALLOCNOW, 1028 /* lockfunc */ busdma_lock_mutex, 1029 /* lockarg */ &Giant, 1030 &adw->buffer_dmat) != 0) { 1031 return (ENOMEM); 1032 } 1033 1034 adw->init_level++; 1035 1036 /* DMA tag for our ccb carrier structures */ 1037 if (bus_dma_tag_create( 1038 /* parent */ adw->parent_dmat, 1039 /* alignment */ 0x10, 1040 /* boundary */ 0, 1041 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, 1042 /* highaddr */ BUS_SPACE_MAXADDR, 1043 /* filter */ NULL, 1044 /* filterarg */ NULL, 1045 /* maxsize */ (adw->max_acbs + 1046 ADW_NUM_CARRIER_QUEUES + 1) * 1047 sizeof(struct adw_carrier), 1048 /* nsegments */ 1, 1049 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1050 /* flags */ 0, 1051 /* lockfunc */ busdma_lock_mutex, 1052 /* lockarg */ &Giant, 1053 &adw->carrier_dmat) != 0) { 1054 return (ENOMEM); 1055 } 1056 1057 adw->init_level++; 1058 1059 /* Allocation for our ccb carrier structures */ 1060 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1061 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1062 return (ENOMEM); 1063 } 1064 1065 adw->init_level++; 1066 1067 /* And permanently map them */ 1068 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1069 adw->carriers, 1070 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1071 * sizeof(struct adw_carrier), 1072 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1073 1074 /* Clear them out. */ 1075 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1076 * sizeof(struct adw_carrier)); 1077 1078 /* Setup our free carrier list */ 1079 adw->free_carriers = adw->carriers; 1080 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1081 adw->carriers[i].carr_offset = 1082 carriervtobo(adw, &adw->carriers[i]); 1083 adw->carriers[i].carr_ba = 1084 carriervtob(adw, &adw->carriers[i]); 1085 adw->carriers[i].areq_ba = 0; 1086 adw->carriers[i].next_ba = 1087 carriervtobo(adw, &adw->carriers[i+1]); 1088 } 1089 /* Terminal carrier. Never leaves the freelist */ 1090 adw->carriers[i].carr_offset = 1091 carriervtobo(adw, &adw->carriers[i]); 1092 adw->carriers[i].carr_ba = 1093 carriervtob(adw, &adw->carriers[i]); 1094 adw->carriers[i].areq_ba = 0; 1095 adw->carriers[i].next_ba = ~0; 1096 1097 adw->init_level++; 1098 1099 /* DMA tag for our acb structures */ 1100 if (bus_dma_tag_create( 1101 /* parent */ adw->parent_dmat, 1102 /* alignment */ 1, 1103 /* boundary */ 0, 1104 /* lowaddr */ BUS_SPACE_MAXADDR, 1105 /* highaddr */ BUS_SPACE_MAXADDR, 1106 /* filter */ NULL, 1107 /* filterarg */ NULL, 1108 /* maxsize */ adw->max_acbs * sizeof(struct acb), 1109 /* nsegments */ 1, 1110 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1111 /* flags */ 0, 1112 /* lockfunc */ busdma_lock_mutex, 1113 /* lockarg */ &Giant, 1114 &adw->acb_dmat) != 0) { 1115 return (ENOMEM); 1116 } 1117 1118 adw->init_level++; 1119 1120 /* Allocation for our ccbs */ 1121 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1122 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1123 return (ENOMEM); 1124 1125 adw->init_level++; 1126 1127 /* And permanently map them */ 1128 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1129 adw->acbs, 1130 adw->max_acbs * sizeof(struct acb), 1131 adwmapmem, &adw->acb_busbase, /*flags*/0); 1132 1133 /* Clear them out. */ 1134 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1135 1136 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1137 if (bus_dma_tag_create( 1138 /* parent */ adw->parent_dmat, 1139 /* alignment */ 1, 1140 /* boundary */ 0, 1141 /* lowaddr */ BUS_SPACE_MAXADDR, 1142 /* highaddr */ BUS_SPACE_MAXADDR, 1143 /* filter */ NULL, 1144 /* filterarg */ NULL, 1145 /* maxsize */ PAGE_SIZE, 1146 /* nsegments */ 1, 1147 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1148 /* flags */ 0, 1149 /* lockfunc */ busdma_lock_mutex, 1150 /* lockarg */ &Giant, 1151 &adw->sg_dmat) != 0) { 1152 return (ENOMEM); 1153 } 1154 1155 adw->init_level++; 1156 1157 /* Allocate our first batch of ccbs */ 1158 if (adwallocacbs(adw) == 0) 1159 return (ENOMEM); 1160 1161 if (adw_init_chip(adw, scsicfg1) != 0) 1162 return (ENXIO); 1163 1164 printf("Queue Depth %d\n", adw->max_acbs); 1165 1166 return (0); 1167} 1168 1169/* 1170 * Attach all the sub-devices we can find 1171 */ 1172int 1173adw_attach(struct adw_softc *adw) 1174{ 1175 struct ccb_setasync csa; 1176 struct cam_devq *devq; 1177 int s; 1178 int error; 1179 1180 error = 0; 1181 s = splcam(); 1182 /* Hook up our interrupt handler */ 1183 if ((error = bus_setup_intr(adw->device, adw->irq, 1184 INTR_TYPE_CAM | INTR_ENTROPY, adw_intr, 1185 adw, &adw->ih)) != 0) { 1186 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1187 error); 1188 goto fail; 1189 } 1190 1191 /* Start the Risc processor now that we are fully configured. */ 1192 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1193 1194 /* 1195 * Create the device queue for our SIM. 1196 */ 1197 devq = cam_simq_alloc(adw->max_acbs); 1198 if (devq == NULL) 1199 return (ENOMEM); 1200 1201 /* 1202 * Construct our SIM entry. 1203 */ 1204 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1205 1, adw->max_acbs, devq); 1206 if (adw->sim == NULL) { 1207 error = ENOMEM; 1208 goto fail; 1209 } 1210 1211 /* 1212 * Register the bus. 1213 */ 1214 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1215 cam_sim_free(adw->sim, /*free devq*/TRUE); 1216 error = ENOMEM; 1217 goto fail; 1218 } 1219 1220 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1221 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1222 == CAM_REQ_CMP) { 1223 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1224 csa.ccb_h.func_code = XPT_SASYNC_CB; 1225 csa.event_enable = AC_LOST_DEVICE; 1226 csa.callback = adw_async; 1227 csa.callback_arg = adw; 1228 xpt_action((union ccb *)&csa); 1229 } 1230 1231fail: 1232 splx(s); 1233 return (error); 1234} 1235 1236void 1237adw_intr(void *arg) 1238{ 1239 struct adw_softc *adw; 1240 u_int int_stat; 1241 1242 adw = (struct adw_softc *)arg; 1243 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1244 return; 1245 1246 /* Reading the register clears the interrupt. */ 1247 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1248 1249 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1250 u_int intrb_code; 1251 1252 /* Async Microcode Event */ 1253 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1254 switch (intrb_code) { 1255 case ADW_ASYNC_CARRIER_READY_FAILURE: 1256 /* 1257 * The RISC missed our update of 1258 * the commandq. 1259 */ 1260 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1261 adw_tickle_risc(adw, ADW_TICKLE_A); 1262 break; 1263 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1264 /* 1265 * The firmware detected a SCSI Bus reset. 1266 */ 1267 printf("Someone Reset the Bus\n"); 1268 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1269 break; 1270 case ADW_ASYNC_RDMA_FAILURE: 1271 /* 1272 * Handle RDMA failure by resetting the 1273 * SCSI Bus and chip. 1274 */ 1275#if XXX 1276 AdvResetChipAndSB(adv_dvc_varp); 1277#endif 1278 break; 1279 1280 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1281 /* 1282 * Host generated SCSI bus reset occurred. 1283 */ 1284 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1285 break; 1286 default: 1287 printf("adw_intr: unknown async code 0x%x\n", 1288 intrb_code); 1289 break; 1290 } 1291 } 1292 1293 /* 1294 * Run down the RequestQ. 1295 */ 1296 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1297 struct adw_carrier *free_carrier; 1298 struct acb *acb; 1299 union ccb *ccb; 1300 1301#if 0 1302 printf("0x%x, 0x%x, 0x%x, 0x%x\n", 1303 adw->responseq->carr_offset, 1304 adw->responseq->carr_ba, 1305 adw->responseq->areq_ba, 1306 adw->responseq->next_ba); 1307#endif 1308 /* 1309 * The firmware copies the adw_scsi_req_q.acb_baddr 1310 * field into the areq_ba field of the carrier. 1311 */ 1312 acb = acbbotov(adw, adw->responseq->areq_ba); 1313 1314 /* 1315 * The least significant four bits of the next_ba 1316 * field are used as flags. Mask them out and then 1317 * advance through the list. 1318 */ 1319 free_carrier = adw->responseq; 1320 adw->responseq = 1321 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1322 free_carrier->next_ba = adw->free_carriers->carr_offset; 1323 adw->free_carriers = free_carrier; 1324 1325 /* Process CCB */ 1326 ccb = acb->ccb; 1327 untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch); 1328 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1329 bus_dmasync_op_t op; 1330 1331 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1332 op = BUS_DMASYNC_POSTREAD; 1333 else 1334 op = BUS_DMASYNC_POSTWRITE; 1335 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 1336 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 1337 ccb->csio.resid = acb->queue.data_cnt; 1338 } else 1339 ccb->csio.resid = 0; 1340 1341 /* Common Cases inline... */ 1342 if (acb->queue.host_status == QHSTA_NO_ERROR 1343 && (acb->queue.done_status == QD_NO_ERROR 1344 || acb->queue.done_status == QD_WITH_ERROR)) { 1345 ccb->csio.scsi_status = acb->queue.scsi_status; 1346 ccb->ccb_h.status = 0; 1347 switch (ccb->csio.scsi_status) { 1348 case SCSI_STATUS_OK: 1349 ccb->ccb_h.status |= CAM_REQ_CMP; 1350 break; 1351 case SCSI_STATUS_CHECK_COND: 1352 case SCSI_STATUS_CMD_TERMINATED: 1353 bcopy(&acb->sense_data, &ccb->csio.sense_data, 1354 ccb->csio.sense_len); 1355 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1356 ccb->csio.sense_resid = acb->queue.sense_len; 1357 /* FALLTHROUGH */ 1358 default: 1359 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR 1360 | CAM_DEV_QFRZN; 1361 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1362 break; 1363 } 1364 adwfreeacb(adw, acb); 1365 xpt_done(ccb); 1366 } else { 1367 adwprocesserror(adw, acb); 1368 } 1369 } 1370} 1371 1372static void 1373adwprocesserror(struct adw_softc *adw, struct acb *acb) 1374{ 1375 union ccb *ccb; 1376 1377 ccb = acb->ccb; 1378 if (acb->queue.done_status == QD_ABORTED_BY_HOST) { 1379 ccb->ccb_h.status = CAM_REQ_ABORTED; 1380 } else { 1381 1382 switch (acb->queue.host_status) { 1383 case QHSTA_M_SEL_TIMEOUT: 1384 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1385 break; 1386 case QHSTA_M_SXFR_OFF_UFLW: 1387 case QHSTA_M_SXFR_OFF_OFLW: 1388 case QHSTA_M_DATA_OVER_RUN: 1389 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1390 break; 1391 case QHSTA_M_SXFR_DESELECTED: 1392 case QHSTA_M_UNEXPECTED_BUS_FREE: 1393 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1394 break; 1395 case QHSTA_M_SCSI_BUS_RESET: 1396 case QHSTA_M_SCSI_BUS_RESET_UNSOL: 1397 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 1398 break; 1399 case QHSTA_M_BUS_DEVICE_RESET: 1400 ccb->ccb_h.status = CAM_BDR_SENT; 1401 break; 1402 case QHSTA_M_QUEUE_ABORTED: 1403 /* BDR or Bus Reset */ 1404 printf("Saw Queue Aborted\n"); 1405 ccb->ccb_h.status = adw->last_reset; 1406 break; 1407 case QHSTA_M_SXFR_SDMA_ERR: 1408 case QHSTA_M_SXFR_SXFR_PERR: 1409 case QHSTA_M_RDMA_PERR: 1410 ccb->ccb_h.status = CAM_UNCOR_PARITY; 1411 break; 1412 case QHSTA_M_WTM_TIMEOUT: 1413 case QHSTA_M_SXFR_WD_TMO: 1414 { 1415 /* The SCSI bus hung in a phase */ 1416 xpt_print_path(adw->path); 1417 printf("Watch Dog timer expired. Reseting bus\n"); 1418 adw_reset_bus(adw); 1419 break; 1420 } 1421 case QHSTA_M_SXFR_XFR_PH_ERR: 1422 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1423 break; 1424 case QHSTA_M_SXFR_UNKNOWN_ERROR: 1425 break; 1426 case QHSTA_M_BAD_CMPL_STATUS_IN: 1427 /* No command complete after a status message */ 1428 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1429 break; 1430 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1431 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1432 break; 1433 case QHSTA_M_INVALID_DEVICE: 1434 ccb->ccb_h.status = CAM_PATH_INVALID; 1435 break; 1436 case QHSTA_M_NO_AUTO_REQ_SENSE: 1437 /* 1438 * User didn't request sense, but we got a 1439 * check condition. 1440 */ 1441 ccb->csio.scsi_status = acb->queue.scsi_status; 1442 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1443 break; 1444 default: 1445 panic("%s: Unhandled Host status error %x", 1446 adw_name(adw), acb->queue.host_status); 1447 /* NOTREACHED */ 1448 } 1449 } 1450 if ((acb->state & ACB_RECOVERY_ACB) != 0) { 1451 if (ccb->ccb_h.status == CAM_SCSI_BUS_RESET 1452 || ccb->ccb_h.status == CAM_BDR_SENT) 1453 ccb->ccb_h.status = CAM_CMD_TIMEOUT; 1454 } 1455 if (ccb->ccb_h.status != CAM_REQ_CMP) { 1456 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1457 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1458 } 1459 adwfreeacb(adw, acb); 1460 xpt_done(ccb); 1461} 1462 1463static void 1464adwtimeout(void *arg) 1465{ 1466 struct acb *acb; 1467 union ccb *ccb; 1468 struct adw_softc *adw; 1469 adw_idle_cmd_status_t status; 1470 int target_id; 1471 int s; 1472 1473 acb = (struct acb *)arg; 1474 ccb = acb->ccb; 1475 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 1476 xpt_print_path(ccb->ccb_h.path); 1477 printf("ACB %p - timed out\n", (void *)acb); 1478 1479 s = splcam(); 1480 1481 if ((acb->state & ACB_ACTIVE) == 0) { 1482 xpt_print_path(ccb->ccb_h.path); 1483 printf("ACB %p - timed out CCB already completed\n", 1484 (void *)acb); 1485 splx(s); 1486 return; 1487 } 1488 1489 acb->state |= ACB_RECOVERY_ACB; 1490 target_id = ccb->ccb_h.target_id; 1491 1492 /* Attempt a BDR first */ 1493 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 1494 ccb->ccb_h.target_id); 1495 splx(s); 1496 if (status == ADW_IDLE_CMD_SUCCESS) { 1497 printf("%s: BDR Delivered. No longer in timeout\n", 1498 adw_name(adw)); 1499 adw_handle_device_reset(adw, target_id); 1500 } else { 1501 adw_reset_bus(adw); 1502 xpt_print_path(adw->path); 1503 printf("Bus Reset Delivered. No longer in timeout\n"); 1504 } 1505} 1506 1507static void 1508adw_handle_device_reset(struct adw_softc *adw, u_int target) 1509{ 1510 struct cam_path *path; 1511 cam_status error; 1512 1513 error = xpt_create_path(&path, /*periph*/NULL, cam_sim_path(adw->sim), 1514 target, CAM_LUN_WILDCARD); 1515 1516 if (error == CAM_REQ_CMP) { 1517 xpt_async(AC_SENT_BDR, path, NULL); 1518 xpt_free_path(path); 1519 } 1520 adw->last_reset = CAM_BDR_SENT; 1521} 1522 1523static void 1524adw_handle_bus_reset(struct adw_softc *adw, int initiated) 1525{ 1526 if (initiated) { 1527 /* 1528 * The microcode currently sets the SCSI Bus Reset signal 1529 * while handling the AscSendIdleCmd() IDLE_CMD_SCSI_RESET 1530 * command above. But the SCSI Bus Reset Hold Time in the 1531 * microcode is not deterministic (it may in fact be for less 1532 * than the SCSI Spec. minimum of 25 us). Therefore on return 1533 * the Adv Library sets the SCSI Bus Reset signal for 1534 * ADW_SCSI_RESET_HOLD_TIME_US, which is defined to be greater 1535 * than 25 us. 1536 */ 1537 u_int scsi_ctrl; 1538 1539 scsi_ctrl = adw_inw(adw, ADW_SCSI_CTRL) & ~ADW_SCSI_CTRL_RSTOUT; 1540 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl | ADW_SCSI_CTRL_RSTOUT); 1541 DELAY(ADW_SCSI_RESET_HOLD_TIME_US); 1542 adw_outw(adw, ADW_SCSI_CTRL, scsi_ctrl); 1543 1544 /* 1545 * We will perform the async notification when the 1546 * SCSI Reset interrupt occurs. 1547 */ 1548 } else 1549 xpt_async(AC_BUS_RESET, adw->path, NULL); 1550 adw->last_reset = CAM_SCSI_BUS_RESET; 1551} 1552