advansys.c revision 46581
1/* 2 * Generic driver for the Advanced Systems Inc. SCSI controllers 3 * Product specific probe and attach routines can be found in: 4 * 5 * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852 6 * i386/eisa/adv_eisa.c ABP742, ABP752 7 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U, 8 * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA, 9 * ABP970, ABP970U 10 * 11 * Copyright (c) 1996-1998 Justin Gibbs. 12 * All rights reserved. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions, and the following disclaimer, 19 * without modification, immediately at the beginning of the file. 20 * 2. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 27 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * $Id: advansys.c,v 1.9 1999/04/19 21:27:35 gibbs Exp $ 36 */ 37/* 38 * Ported from: 39 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 40 * 41 * Copyright (c) 1995-1997 Advanced System Products, Inc. 42 * All Rights Reserved. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that redistributions of source 46 * code retain the above copyright notice and this comment without 47 * modification. 48 */ 49 50#include <sys/param.h> 51#include <sys/systm.h> 52#include <sys/malloc.h> 53#include <sys/buf.h> 54#include <sys/kernel.h> 55 56#include <machine/bus_pio.h> 57#include <machine/bus.h> 58#include <machine/clock.h> 59 60#include <cam/cam.h> 61#include <cam/cam_ccb.h> 62#include <cam/cam_sim.h> 63#include <cam/cam_xpt_sim.h> 64#include <cam/cam_xpt_periph.h> 65#include <cam/cam_debug.h> 66 67#include <cam/scsi/scsi_all.h> 68#include <cam/scsi/scsi_message.h> 69 70#include <vm/vm.h> 71#include <vm/vm_param.h> 72#include <vm/pmap.h> 73 74#include <dev/advansys/advansys.h> 75 76u_long adv_unit; 77 78static void adv_action(struct cam_sim *sim, union ccb *ccb); 79static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 80 int nsegments, int error); 81static void adv_poll(struct cam_sim *sim); 82static void adv_run_doneq(struct adv_softc *adv); 83static struct adv_ccb_info * 84 adv_alloc_ccb_info(struct adv_softc *adv); 85static void adv_destroy_ccb_info(struct adv_softc *adv, 86 struct adv_ccb_info *cinfo); 87static __inline struct adv_ccb_info * 88 adv_get_ccb_info(struct adv_softc *adv); 89static __inline void adv_free_ccb_info(struct adv_softc *adv, 90 struct adv_ccb_info *cinfo); 91 92 93struct adv_softc *advsoftcs[NADV]; /* XXX Config should handle this */ 94 95static __inline struct adv_ccb_info * 96adv_get_ccb_info(struct adv_softc *adv) 97{ 98 struct adv_ccb_info *cinfo; 99 int opri; 100 101 opri = splcam(); 102 if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 103 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 104 } else { 105 cinfo = adv_alloc_ccb_info(adv); 106 } 107 splx(opri); 108 109 return (cinfo); 110} 111 112static __inline void 113adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 114{ 115 int opri; 116 117 opri = splcam(); 118 cinfo->state = ACCB_FREE; 119 SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links); 120 splx(opri); 121} 122 123void 124adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) 125{ 126 bus_addr_t* physaddr; 127 128 physaddr = (bus_addr_t*)arg; 129 *physaddr = segs->ds_addr; 130} 131 132char * 133adv_name(struct adv_softc *adv) 134{ 135 static char name[10]; 136 137 snprintf(name, sizeof(name), "adv%d", adv->unit); 138 return (name); 139} 140 141static void 142adv_action(struct cam_sim *sim, union ccb *ccb) 143{ 144 struct adv_softc *adv; 145 146 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); 147 148 adv = (struct adv_softc *)cam_sim_softc(sim); 149 150 switch (ccb->ccb_h.func_code) { 151 /* Common cases first */ 152 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 153 { 154 struct ccb_hdr *ccb_h; 155 struct ccb_scsiio *csio; 156 struct adv_ccb_info *cinfo; 157 158 ccb_h = &ccb->ccb_h; 159 csio = &ccb->csio; 160 cinfo = adv_get_ccb_info(adv); 161 if (cinfo == NULL) 162 panic("XXX Handle CCB info error!!!"); 163 164 ccb_h->ccb_cinfo_ptr = cinfo; 165 166 /* Only use S/G if there is a transfer */ 167 if ((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 168 if ((ccb_h->flags & CAM_SCATTER_VALID) == 0) { 169 /* 170 * We've been given a pointer 171 * to a single buffer 172 */ 173 if ((ccb_h->flags & CAM_DATA_PHYS) == 0) { 174 int s; 175 int error; 176 177 s = splsoftvm(); 178 error = 179 bus_dmamap_load(adv->buffer_dmat, 180 cinfo->dmamap, 181 csio->data_ptr, 182 csio->dxfer_len, 183 adv_execute_ccb, 184 csio, /*flags*/0); 185 if (error == EINPROGRESS) { 186 /* 187 * So as to maintain ordering, 188 * freeze the controller queue 189 * until our mapping is 190 * returned. 191 */ 192 xpt_freeze_simq(adv->sim, 193 /*count*/1); 194 cinfo->state |= 195 ACCB_RELEASE_SIMQ; 196 } 197 splx(s); 198 } else { 199 struct bus_dma_segment seg; 200 201 /* Pointer to physical buffer */ 202 seg.ds_addr = 203 (bus_addr_t)csio->data_ptr; 204 seg.ds_len = csio->dxfer_len; 205 adv_execute_ccb(csio, &seg, 1, 0); 206 } 207 } else { 208 struct bus_dma_segment *segs; 209 if ((ccb_h->flags & CAM_DATA_PHYS) != 0) 210 panic("adv_setup_data - Physical " 211 "segment pointers unsupported"); 212 213 if ((ccb_h->flags & CAM_SG_LIST_PHYS) == 0) 214 panic("adv_setup_data - Virtual " 215 "segment addresses unsupported"); 216 217 /* Just use the segments provided */ 218 segs = (struct bus_dma_segment *)csio->data_ptr; 219 adv_execute_ccb(ccb, segs, csio->sglist_cnt, 0); 220 } 221 } else { 222 adv_execute_ccb(ccb, NULL, 0, 0); 223 } 224 break; 225 } 226 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 227 case XPT_TARGET_IO: /* Execute target I/O request */ 228 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 229 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 230 case XPT_EN_LUN: /* Enable LUN as a target */ 231 case XPT_ABORT: /* Abort the specified CCB */ 232 /* XXX Implement */ 233 ccb->ccb_h.status = CAM_REQ_INVALID; 234 xpt_done(ccb); 235 break; 236 case XPT_SET_TRAN_SETTINGS: 237 { 238 struct ccb_trans_settings *cts; 239 target_bit_vector targ_mask; 240 struct adv_transinfo *tconf; 241 u_int update_type; 242 int s; 243 244 cts = &ccb->cts; 245 targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 246 update_type = 0; 247 248 /* 249 * The user must specify which type of settings he wishes 250 * to change. 251 */ 252 if (((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) 253 && ((cts->flags & CCB_TRANS_USER_SETTINGS) == 0)) { 254 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 255 update_type |= ADV_TRANS_GOAL; 256 } else if (((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) 257 && ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) == 0)) { 258 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 259 update_type |= ADV_TRANS_USER; 260 } else { 261 ccb->ccb_h.status = CAM_REQ_INVALID; 262 break; 263 } 264 265 s = splcam(); 266 267 if ((update_type & ADV_TRANS_GOAL) != 0) { 268 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 269 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 270 adv->disc_enable |= targ_mask; 271 else 272 adv->disc_enable &= ~targ_mask; 273 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, 274 adv->disc_enable); 275 } 276 277 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 278 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 279 adv->cmd_qng_enabled |= targ_mask; 280 else 281 adv->cmd_qng_enabled &= ~targ_mask; 282 } 283 } 284 285 if ((update_type & ADV_TRANS_USER) != 0) { 286 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 287 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 288 adv->user_disc_enable |= targ_mask; 289 else 290 adv->user_disc_enable &= ~targ_mask; 291 } 292 293 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 294 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 295 adv->user_cmd_qng_enabled |= targ_mask; 296 else 297 adv->user_cmd_qng_enabled &= ~targ_mask; 298 } 299 } 300 301 /* 302 * If the user specifies either the sync rate, or offset, 303 * but not both, the unspecified parameter defaults to its 304 * current value in transfer negotiations. 305 */ 306 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 307 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 308 /* 309 * If the user provided a sync rate but no offset, 310 * use the current offset. 311 */ 312 if ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) == 0) 313 cts->sync_offset = tconf->offset; 314 315 /* 316 * If the user provided an offset but no sync rate, 317 * use the current sync rate. 318 */ 319 if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) == 0) 320 cts->sync_period = tconf->period; 321 322 adv_period_offset_to_sdtr(adv, &cts->sync_period, 323 &cts->sync_offset, 324 cts->ccb_h.target_id); 325 326 adv_set_syncrate(adv, /*struct cam_path */NULL, 327 cts->ccb_h.target_id, cts->sync_period, 328 cts->sync_offset, update_type); 329 } 330 331 splx(s); 332 ccb->ccb_h.status = CAM_REQ_CMP; 333 xpt_done(ccb); 334 break; 335 } 336 case XPT_GET_TRAN_SETTINGS: 337 /* Get default/user set transfer settings for the target */ 338 { 339 struct ccb_trans_settings *cts; 340 struct adv_transinfo *tconf; 341 target_bit_vector target_mask; 342 int s; 343 344 cts = &ccb->cts; 345 target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); 346 347 cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB); 348 349 s = splcam(); 350 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 351 tconf = &adv->tinfo[cts->ccb_h.target_id].current; 352 if ((adv->disc_enable & target_mask) != 0) 353 cts->flags |= CCB_TRANS_DISC_ENB; 354 if ((adv->cmd_qng_enabled & target_mask) != 0) 355 cts->flags |= CCB_TRANS_TAG_ENB; 356 } else { 357 tconf = &adv->tinfo[cts->ccb_h.target_id].user; 358 if ((adv->user_disc_enable & target_mask) != 0) 359 cts->flags |= CCB_TRANS_DISC_ENB; 360 if ((adv->user_cmd_qng_enabled & target_mask) != 0) 361 cts->flags |= CCB_TRANS_TAG_ENB; 362 } 363 364 cts->sync_period = tconf->period; 365 cts->sync_offset = tconf->offset; 366 splx(s); 367 368 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 369 cts->valid = CCB_TRANS_SYNC_RATE_VALID 370 | CCB_TRANS_SYNC_OFFSET_VALID 371 | CCB_TRANS_BUS_WIDTH_VALID 372 | CCB_TRANS_DISC_VALID 373 | CCB_TRANS_TQ_VALID; 374 ccb->ccb_h.status = CAM_REQ_CMP; 375 xpt_done(ccb); 376 break; 377 } 378 case XPT_CALC_GEOMETRY: 379 { 380 struct ccb_calc_geometry *ccg; 381 u_int32_t size_mb; 382 u_int32_t secs_per_cylinder; 383 int extended; 384 385 ccg = &ccb->ccg; 386 size_mb = ccg->volume_size 387 / ((1024L * 1024L) / ccg->block_size); 388 extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; 389 390 if (size_mb > 1024 && extended) { 391 ccg->heads = 255; 392 ccg->secs_per_track = 63; 393 } else { 394 ccg->heads = 64; 395 ccg->secs_per_track = 32; 396 } 397 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 398 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 399 ccb->ccb_h.status = CAM_REQ_CMP; 400 xpt_done(ccb); 401 break; 402 } 403 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 404 { 405 int s; 406 407 s = splcam(); 408 adv_stop_execution(adv); 409 adv_reset_bus(adv); 410 adv_start_execution(adv); 411 splx(s); 412 413 ccb->ccb_h.status = CAM_REQ_CMP; 414 xpt_done(ccb); 415 break; 416 } 417 case XPT_TERM_IO: /* Terminate the I/O process */ 418 /* XXX Implement */ 419 ccb->ccb_h.status = CAM_REQ_INVALID; 420 xpt_done(ccb); 421 break; 422 case XPT_PATH_INQ: /* Path routing inquiry */ 423 { 424 struct ccb_pathinq *cpi = &ccb->cpi; 425 426 cpi->version_num = 1; /* XXX??? */ 427 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; 428 cpi->target_sprt = 0; 429 cpi->hba_misc = 0; 430 cpi->hba_eng_cnt = 0; 431 cpi->max_target = 7; 432 cpi->max_lun = 7; 433 cpi->initiator_id = adv->scsi_id; 434 cpi->bus_id = cam_sim_bus(sim); 435 cpi->base_transfer_speed = 3300; 436 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 437 strncpy(cpi->hba_vid, "Advansys", HBA_IDLEN); 438 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 439 cpi->unit_number = cam_sim_unit(sim); 440 cpi->ccb_h.status = CAM_REQ_CMP; 441 xpt_done(ccb); 442 break; 443 } 444 default: 445 ccb->ccb_h.status = CAM_REQ_INVALID; 446 xpt_done(ccb); 447 break; 448 } 449} 450 451/* 452 * Currently, the output of bus_dmammap_load suits our needs just 453 * fine, but should it change, we'd need to do something here. 454 */ 455#define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs) 456 457static void 458adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, 459 int nsegments, int error) 460{ 461 struct ccb_scsiio *csio; 462 struct ccb_hdr *ccb_h; 463 struct cam_sim *sim; 464 struct adv_softc *adv; 465 struct adv_ccb_info *cinfo; 466 struct adv_scsi_q scsiq; 467 struct adv_sg_head sghead; 468 int s; 469 470 csio = (struct ccb_scsiio *)arg; 471 ccb_h = &csio->ccb_h; 472 sim = xpt_path_sim(ccb_h->path); 473 adv = (struct adv_softc *)cam_sim_softc(sim); 474 cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; 475 476 if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { 477 if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { 478 /* XXX Need phystovirt!!!! */ 479 /* How about pmap_kenter??? */ 480 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 481 } else { 482 scsiq.cdbptr = csio->cdb_io.cdb_ptr; 483 } 484 } else { 485 scsiq.cdbptr = csio->cdb_io.cdb_bytes; 486 } 487 /* 488 * Build up the request 489 */ 490 scsiq.q1.status = 0; 491 scsiq.q1.q_no = 0; 492 scsiq.q1.cntl = 0; 493 scsiq.q1.sg_queue_cnt = 0; 494 scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); 495 scsiq.q1.target_lun = ccb_h->target_lun; 496 scsiq.q1.sense_len = csio->sense_len; 497 scsiq.q1.extra_bytes = 0; 498 scsiq.q2.ccb_ptr = (u_int32_t)csio; 499 scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, 500 ccb_h->target_lun); 501 scsiq.q2.flag = 0; 502 scsiq.q2.cdb_len = csio->cdb_len; 503 if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) 504 scsiq.q2.tag_code = csio->tag_action; 505 else 506 scsiq.q2.tag_code = 0; 507 scsiq.q2.vm_id = 0; 508 509 if (nsegments != 0) { 510 bus_dmasync_op_t op; 511 512 scsiq.q1.data_addr = dm_segs->ds_addr; 513 scsiq.q1.data_cnt = dm_segs->ds_len; 514 if (nsegments > 1) { 515 scsiq.q1.cntl |= QC_SG_HEAD; 516 sghead.entry_cnt 517 = sghead.entry_to_copy 518 = nsegments; 519 sghead.res = 0; 520 sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); 521 scsiq.sg_head = &sghead; 522 } else { 523 scsiq.sg_head = NULL; 524 } 525 if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) 526 op = BUS_DMASYNC_PREREAD; 527 else 528 op = BUS_DMASYNC_PREWRITE; 529 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 530 } else { 531 scsiq.q1.data_addr = 0; 532 scsiq.q1.data_cnt = 0; 533 scsiq.sg_head = NULL; 534 } 535 536 s = splcam(); 537 538 /* 539 * Last time we need to check if this SCB needs to 540 * be aborted. 541 */ 542 if (ccb_h->status != CAM_REQ_INPROG) { 543 if (nsegments != 0) { 544 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 545 } 546 if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0) { 547 ccb_h->status |= CAM_RELEASE_SIMQ; 548 } 549 adv_free_ccb_info(adv, cinfo); 550 xpt_done((union ccb *)csio); 551 splx(s); 552 return; 553 } 554 555 if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { 556 /* Temporary resource shortage */ 557 if (nsegments != 0) { 558 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 559 } 560 ccb_h->status = CAM_REQUEUE_REQ; 561 if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0) 562 ccb_h->status |= CAM_RELEASE_SIMQ; 563 564 /* Unfreeze when resources are available */ 565 xpt_freeze_simq(adv->sim, /*count*/1); 566 567 adv_free_ccb_info(adv, cinfo); 568 xpt_done((union ccb *)csio); 569 splx(s); 570 return; 571 } 572 cinfo->state |= ACCB_ACTIVE; 573 ccb_h->status |= CAM_SIM_QUEUED; 574 LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); 575 /* Schedule our timeout */ 576 ccb_h->timeout_ch = 577 timeout(adv_timeout, csio, (ccb_h->timeout * hz)/1000); 578 splx(s); 579} 580 581static struct adv_ccb_info * 582adv_alloc_ccb_info(struct adv_softc *adv) 583{ 584 int error; 585 struct adv_ccb_info *cinfo; 586 587 cinfo = malloc(sizeof(*cinfo), M_DEVBUF, M_NOWAIT); 588 if (cinfo == NULL) { 589 printf("%s: Can't malloc CCB info\n", adv_name(adv)); 590 return (NULL); 591 } 592 cinfo->state = ACCB_FREE; 593 error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0, 594 &cinfo->dmamap); 595 if (error != 0) { 596 printf("%s: Unable to allocate CCB info " 597 "dmamap - error %d\n", adv_name(adv), error); 598 free(cinfo, M_DEVBUF); 599 cinfo = NULL; 600 } 601 return (cinfo); 602} 603 604static void 605adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) 606{ 607 bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap); 608 free(cinfo, M_DEVBUF); 609} 610 611void 612adv_timeout(void *arg) 613{ 614 int s; 615 union ccb *ccb; 616 struct adv_softc *adv; 617 struct adv_ccb_info *cinfo; 618 619 ccb = (union ccb *)arg; 620 adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc; 621 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 622 623 xpt_print_path(ccb->ccb_h.path); 624 printf("Timed out\n"); 625 626 s = splcam(); 627 /* Have we been taken care of already?? */ 628 if (cinfo == NULL || cinfo->state == ACCB_FREE) { 629 splx(s); 630 return; 631 } 632 633 adv_stop_execution(adv); 634 635 if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) { 636 struct ccb_hdr *ccb_h; 637 638 /* 639 * In order to simplify the recovery process, we ask the XPT 640 * layer to halt the queue of new transactions and we traverse 641 * the list of pending CCBs and remove their timeouts. This 642 * means that the driver attempts to clear only one error 643 * condition at a time. In general, timeouts that occur 644 * close together are related anyway, so there is no benefit 645 * in attempting to handle errors in parrallel. Timeouts will 646 * be reinstated when the recovery process ends. 647 */ 648 if ((cinfo->state & ACCB_RELEASE_SIMQ) == 0) { 649 xpt_freeze_simq(adv->sim, /*count*/1); 650 cinfo->state |= ACCB_RELEASE_SIMQ; 651 } 652 653 /* This CCB is the CCB representing our recovery actions */ 654 cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED; 655 656 ccb_h = LIST_FIRST(&adv->pending_ccbs); 657 while (ccb_h != NULL) { 658 untimeout(adv_timeout, ccb_h, ccb_h->timeout_ch); 659 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 660 } 661 662 /* XXX Should send a BDR */ 663 /* Attempt an abort as our first tact */ 664 xpt_print_path(ccb->ccb_h.path); 665 printf("Attempting abort\n"); 666 adv_abort_ccb(adv, ccb->ccb_h.target_id, 667 ccb->ccb_h.target_lun, ccb, 668 CAM_CMD_TIMEOUT, /*queued_only*/FALSE); 669 ccb->ccb_h.timeout_ch = 670 timeout(adv_timeout, ccb, 2 * hz); 671 } else { 672 /* Our attempt to perform an abort failed, go for a reset */ 673 xpt_print_path(ccb->ccb_h.path); 674 printf("Resetting bus\n"); 675 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 676 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 677 adv_reset_bus(adv); 678 } 679 adv_start_execution(adv); 680 splx(s); 681} 682 683struct adv_softc * 684adv_alloc(int unit, bus_space_tag_t tag, bus_space_handle_t bsh) 685{ 686 struct adv_softc *adv; 687 688 if (unit >= NADV) { 689 printf("adv: unit number (%d) too high\n", unit); 690 return NULL; 691 } 692 693 /* 694 * Allocate a storage area for us 695 */ 696 if (advsoftcs[unit]) { 697 printf("adv%d: memory already allocated\n", unit); 698 return NULL; 699 } 700 701 adv = malloc(sizeof(struct adv_softc), M_DEVBUF, M_NOWAIT); 702 if (!adv) { 703 printf("adv%d: cannot malloc!\n", unit); 704 return NULL; 705 } 706 bzero(adv, sizeof(struct adv_softc)); 707 LIST_INIT(&adv->pending_ccbs); 708 SLIST_INIT(&adv->free_ccb_infos); 709 advsoftcs[unit] = adv; 710 adv->unit = unit; 711 adv->tag = tag; 712 adv->bsh = bsh; 713 714 return(adv); 715} 716 717void 718adv_free(struct adv_softc *adv) 719{ 720 switch (adv->init_level) { 721 case 5: 722 { 723 struct adv_ccb_info *cinfo; 724 725 while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { 726 SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); 727 adv_destroy_ccb_info(adv, cinfo); 728 } 729 730 bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap); 731 } 732 case 4: 733 bus_dmamem_free(adv->sense_dmat, adv->sense_buffers, 734 adv->sense_dmamap); 735 case 3: 736 bus_dma_tag_destroy(adv->sense_dmat); 737 case 2: 738 bus_dma_tag_destroy(adv->buffer_dmat); 739 case 1: 740 bus_dma_tag_destroy(adv->parent_dmat); 741 case 0: 742 break; 743 } 744 free(adv, M_DEVBUF); 745} 746 747int 748adv_init(struct adv_softc *adv) 749{ 750 struct adv_eeprom_config eeprom_config; 751 int checksum, i; 752 u_int16_t config_lsw; 753 u_int16_t config_msw; 754 755 adv_reset_chip_and_scsi_bus(adv); 756 adv_lib_init(adv); 757 758 /* 759 * Stop script execution. 760 */ 761 adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE); 762 adv_stop_execution(adv); 763 if (adv_is_chip_halted(adv) == 0) { 764 printf("adv%d: Unable to halt adapter. Initialization" 765 "failed\n", adv->unit); 766 return (1); 767 } 768 ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); 769 if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { 770 printf("adv%d: Unable to set program counter. Initialization" 771 "failed\n", adv->unit); 772 return (1); 773 } 774 775 config_msw = ADV_INW(adv, ADV_CONFIG_MSW); 776 config_lsw = ADV_INW(adv, ADV_CONFIG_LSW); 777 778 if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) { 779 config_msw &= (~(ADV_CFG_MSW_CLR_MASK)); 780 /* 781 * XXX The Linux code flags this as an error, 782 * but what should we report to the user??? 783 * It seems that clearing the config register 784 * makes this error recoverable. 785 */ 786 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 787 } 788 789 /* Suck in the configuration from the EEProm */ 790 checksum = adv_get_eeprom_config(adv, &eeprom_config); 791 792 eeprom_config.cfg_msw &= (~(ADV_CFG_MSW_CLR_MASK)); 793 794 if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) { 795 /* 796 * XXX The Linux code sets a warning level for this 797 * condition, yet nothing of meaning is printed to 798 * the user. What does this mean??? 799 */ 800 if (adv->chip_version == 3) { 801 if (eeprom_config.cfg_lsw != config_lsw) { 802 eeprom_config.cfg_lsw = 803 ADV_INW(adv, ADV_CONFIG_LSW); 804 } 805 if (eeprom_config.cfg_msw != config_msw) { 806 eeprom_config.cfg_msw = 807 ADV_INW(adv, ADV_CONFIG_MSW); 808 } 809 } 810 } 811 eeprom_config.cfg_lsw |= ADV_CFG_LSW_HOST_INT_ON; 812 if (adv_test_external_lram(adv) == 0) { 813 /* 814 * XXX What about non PCI cards with no 815 * external LRAM???? 816 */ 817 if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) { 818 eeprom_config.max_total_qng = 819 ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; 820 eeprom_config.max_tag_qng = 821 ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG; 822 } else { 823 eeprom_config.cfg_msw |= 0x0800; 824 config_msw |= 0x0800; 825 ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); 826 eeprom_config.max_total_qng = 827 ADV_MAX_PCI_INRAM_TOTAL_QNG; 828 eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG; 829 } 830 adv->max_openings = eeprom_config.max_total_qng; 831 } 832 if (checksum == eeprom_config.chksum) { 833 /* Range/Sanity checking */ 834 if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) { 835 eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG; 836 } 837 if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) { 838 eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG; 839 } 840 if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) { 841 eeprom_config.max_tag_qng = eeprom_config.max_total_qng; 842 } 843 if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) { 844 eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC; 845 } 846 adv->max_openings = eeprom_config.max_total_qng; 847 848 adv->user_disc_enable = eeprom_config.disc_enable; 849 adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng; 850 adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config); 851 adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID; 852 EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id); 853 adv->control = eeprom_config.cntl; 854 for (i = 0; i <= ADV_MAX_TID; i++) 855 adv_sdtr_to_period_offset(adv, 856 eeprom_config.sdtr_data[i], 857 &adv->tinfo[i].user.period, 858 &adv->tinfo[i].user.offset, 859 i); 860 } else { 861 u_int8_t sync_data; 862 863 printf("adv%d: Warning EEPROM Checksum mismatch. " 864 "Using default device parameters\n", adv->unit); 865 866 /* Set reasonable defaults since we can't read the EEPROM */ 867 adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1; 868 adv->max_openings = ADV_DEF_MAX_TOTAL_QNG; 869 adv->disc_enable = TARGET_BIT_VECTOR_SET; 870 adv->user_disc_enable = TARGET_BIT_VECTOR_SET; 871 adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 872 adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET; 873 adv->scsi_id = 7; 874 875 sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4); 876 for (i = 0; i <= ADV_MAX_TID; i++) 877 adv_sdtr_to_period_offset(adv, sync_data, 878 &adv->tinfo[i].user.period, 879 &adv->tinfo[i].user.offset, 880 i); 881 } 882 883 if (adv_set_eeprom_config(adv, &eeprom_config) != 0) 884 printf("%s: WARNING! Failure writing to EEPROM.\n", 885 adv_name(adv)); 886 887 adv_set_chip_scsiid(adv, adv->scsi_id); 888 if (adv_init_lram_and_mcode(adv)) 889 return (1); 890 891 adv->disc_enable = adv->user_disc_enable; 892 893 adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); 894 for (i = 0; i <= ADV_MAX_TID; i++) { 895 /* 896 * Start off in async mode. 897 */ 898 adv_set_syncrate(adv, /*struct cam_path */NULL, 899 i, /*period*/0, /*offset*/0, 900 ADV_TRANS_CUR); 901 /* 902 * Enable the use of tagged commands on all targets. 903 * This allows the kernel driver to make up it's own mind 904 * as it sees fit to tag queue instead of having the 905 * firmware try and second guess the tag_code settins. 906 */ 907 adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i, 908 adv->max_openings); 909 } 910 adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 911 adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); 912 printf("adv%d: AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n", 913 adv->unit, (adv->type & ADV_ULTRA) ? "Ultra SCSI" : "SCSI", 914 adv->scsi_id, adv->max_openings); 915 return (0); 916} 917 918void 919adv_intr(void *arg) 920{ 921 struct adv_softc *adv; 922 u_int16_t chipstat; 923 u_int16_t saved_ram_addr; 924 u_int8_t ctrl_reg; 925 u_int8_t saved_ctrl_reg; 926 u_int8_t host_flag; 927 928 adv = (struct adv_softc *)arg; 929 930 ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL); 931 saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET | 932 ADV_CC_SINGLE_STEP | ADV_CC_DIAG | 933 ADV_CC_TEST)); 934 935 936 if ((chipstat = ADV_INW(adv, ADV_CHIP_STATUS)) & ADV_CSW_INT_PENDING) { 937 938 saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR); 939 host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); 940 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, 941 host_flag | ADV_HOST_FLAG_IN_ISR); 942 943 adv_ack_interrupt(adv); 944 945 if ((chipstat & ADV_CSW_HALTED) 946 && (ctrl_reg & ADV_CC_SINGLE_STEP)) { 947 adv_isr_chip_halted(adv); 948 saved_ctrl_reg &= ~ADV_CC_HALT; 949 } else { 950 adv_run_doneq(adv); 951 } 952 ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr); 953#ifdef DIAGNOSTIC 954 if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr) 955 panic("adv_intr: Unable to set LRAM addr"); 956#endif 957 adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); 958 } 959 960 ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg); 961} 962 963void 964adv_run_doneq(struct adv_softc *adv) 965{ 966 struct adv_q_done_info scsiq; 967 u_int doneq_head; 968 u_int done_qno; 969 970 doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF; 971 done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head) 972 + ADV_SCSIQ_B_FWD); 973 while (done_qno != ADV_QLINK_END) { 974 union ccb* ccb; 975 u_int done_qaddr; 976 u_int sg_queue_cnt; 977 int aborted; 978 979 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 980 981 /* Pull status from this request */ 982 sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq, 983 adv->max_dma_count); 984 985 /* Mark it as free */ 986 adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, 987 scsiq.q_status & ~(QS_READY|QS_ABORTED)); 988 989 /* Process request based on retrieved info */ 990 if ((scsiq.cntl & QC_SG_HEAD) != 0) { 991 u_int i; 992 993 /* 994 * S/G based request. Free all of the queue 995 * structures that contained S/G information. 996 */ 997 for (i = 0; i < sg_queue_cnt; i++) { 998 done_qno = adv_read_lram_8(adv, done_qaddr 999 + ADV_SCSIQ_B_FWD); 1000 1001#ifdef DIAGNOSTIC 1002 if (done_qno == ADV_QLINK_END) { 1003 panic("adv_qdone: Corrupted SG " 1004 "list encountered"); 1005 } 1006#endif 1007 done_qaddr = ADV_QNO_TO_QADDR(done_qno); 1008 1009 /* Mark SG queue as free */ 1010 adv_write_lram_8(adv, done_qaddr 1011 + ADV_SCSIQ_B_STATUS, QS_FREE); 1012 } 1013 } else 1014 sg_queue_cnt = 0; 1015#ifdef DIAGNOSTIC 1016 if (adv->cur_active < (sg_queue_cnt + 1)) 1017 panic("adv_qdone: Attempting to free more " 1018 "queues than are active"); 1019#endif 1020 adv->cur_active -= sg_queue_cnt + 1; 1021 1022 aborted = (scsiq.q_status & QS_ABORTED) != 0; 1023 1024 if ((scsiq.q_status != QS_DONE) 1025 && (scsiq.q_status & QS_ABORTED) == 0) 1026 panic("adv_qdone: completed scsiq with unknown status"); 1027 1028 scsiq.remain_bytes += scsiq.extra_bytes; 1029 1030 if ((scsiq.d3.done_stat == QD_WITH_ERROR) && 1031 (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) { 1032 if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) { 1033 scsiq.d3.done_stat = QD_NO_ERROR; 1034 scsiq.d3.host_stat = QHSTA_NO_ERROR; 1035 } 1036 } 1037 1038 ccb = (union ccb *)scsiq.d2.ccb_ptr; 1039 ccb->csio.resid = scsiq.remain_bytes; 1040 adv_done(adv, (union ccb *)scsiq.d2.ccb_ptr, 1041 scsiq.d3.done_stat, scsiq.d3.host_stat, 1042 scsiq.d3.scsi_stat, scsiq.q_no); 1043 1044 doneq_head = done_qno; 1045 done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); 1046 } 1047 adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head); 1048} 1049 1050 1051void 1052adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat, 1053 u_int host_stat, u_int scsi_status, u_int q_no) 1054{ 1055 struct adv_ccb_info *cinfo; 1056 1057 cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; 1058 /* 1059 * Null this out so that we catch driver bugs that cause a 1060 * ccb to be completed twice. 1061 */ 1062 ccb->ccb_h.ccb_cinfo_ptr = NULL; 1063 1064 LIST_REMOVE(&ccb->ccb_h, sim_links.le); 1065 untimeout(adv_timeout, ccb, ccb->ccb_h.timeout_ch); 1066 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1067 bus_dmasync_op_t op; 1068 1069 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1070 op = BUS_DMASYNC_POSTREAD; 1071 else 1072 op = BUS_DMASYNC_POSTWRITE; 1073 bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); 1074 bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); 1075 } 1076 1077 switch (done_stat) { 1078 case QD_NO_ERROR: 1079 if (host_stat == QHSTA_NO_ERROR) { 1080 ccb->ccb_h.status = CAM_REQ_CMP; 1081 break; 1082 } 1083 xpt_print_path(ccb->ccb_h.path); 1084 printf("adv_done - queue done without error, " 1085 "but host status non-zero(%x)\n", host_stat); 1086 /*FALLTHROUGH*/ 1087 case QD_WITH_ERROR: 1088 switch (host_stat) { 1089 case QHSTA_M_TARGET_STATUS_BUSY: 1090 case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY: 1091 /* 1092 * Assume that if we were a tagged transaction 1093 * the target reported queue full. Otherwise, 1094 * report busy. The firmware really should just 1095 * pass the original status back up to us even 1096 * if it thinks the target was in error for 1097 * returning this status as no other transactions 1098 * from this initiator are in effect, but this 1099 * ignores multi-initiator setups and there is 1100 * evidence that the firmware gets its per-device 1101 * transaction counts screwed up occassionally. 1102 */ 1103 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1104 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 1105 && host_stat != QHSTA_M_TARGET_STATUS_BUSY) 1106 scsi_status = SCSI_STATUS_QUEUE_FULL; 1107 else 1108 scsi_status = SCSI_STATUS_BUSY; 1109 adv_abort_ccb(adv, ccb->ccb_h.target_id, 1110 ccb->ccb_h.target_lun, 1111 /*ccb*/NULL, CAM_REQUEUE_REQ, 1112 /*queued_only*/TRUE); 1113 /*FALLTHROUGH*/ 1114 case QHSTA_M_NO_AUTO_REQ_SENSE: 1115 case QHSTA_NO_ERROR: 1116 ccb->csio.scsi_status = scsi_status; 1117 switch (scsi_status) { 1118 case SCSI_STATUS_CHECK_COND: 1119 case SCSI_STATUS_CMD_TERMINATED: 1120 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1121 /* Structure copy */ 1122 ccb->csio.sense_data = 1123 adv->sense_buffers[q_no - 1]; 1124 /* FALLTHROUGH */ 1125 case SCSI_STATUS_BUSY: 1126 case SCSI_STATUS_RESERV_CONFLICT: 1127 case SCSI_STATUS_QUEUE_FULL: 1128 case SCSI_STATUS_COND_MET: 1129 case SCSI_STATUS_INTERMED: 1130 case SCSI_STATUS_INTERMED_COND_MET: 1131 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1132 break; 1133 case SCSI_STATUS_OK: 1134 ccb->ccb_h.status |= CAM_REQ_CMP; 1135 break; 1136 } 1137 break; 1138 case QHSTA_M_SEL_TIMEOUT: 1139 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1140 break; 1141 case QHSTA_M_DATA_OVER_RUN: 1142 ccb->ccb_h.status = CAM_DATA_RUN_ERR; 1143 break; 1144 case QHSTA_M_UNEXPECTED_BUS_FREE: 1145 ccb->ccb_h.status = CAM_UNEXP_BUSFREE; 1146 break; 1147 case QHSTA_M_BAD_BUS_PHASE_SEQ: 1148 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1149 break; 1150 case QHSTA_M_BAD_CMPL_STATUS_IN: 1151 /* No command complete after a status message */ 1152 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1153 break; 1154 case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT: 1155 case QHSTA_M_WTM_TIMEOUT: 1156 case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET: 1157 /* The SCSI bus hung in a phase */ 1158 ccb->ccb_h.status = CAM_SEQUENCE_FAIL; 1159 adv_reset_bus(adv); 1160 break; 1161 case QHSTA_M_AUTO_REQ_SENSE_FAIL: 1162 ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; 1163 break; 1164 case QHSTA_D_QDONE_SG_LIST_CORRUPTED: 1165 case QHSTA_D_ASC_DVC_ERROR_CODE_SET: 1166 case QHSTA_D_HOST_ABORT_FAILED: 1167 case QHSTA_D_EXE_SCSI_Q_FAILED: 1168 case QHSTA_D_ASPI_NO_BUF_POOL: 1169 case QHSTA_M_BAD_TAG_CODE: 1170 case QHSTA_D_LRAM_CMP_ERROR: 1171 case QHSTA_M_MICRO_CODE_ERROR_HALT: 1172 default: 1173 panic("%s: Unhandled Host status error %x", 1174 adv_name(adv), host_stat); 1175 /* NOTREACHED */ 1176 } 1177 break; 1178 1179 case QD_ABORTED_BY_HOST: 1180 /* Don't clobber any, more explicit, error codes we've set */ 1181 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) 1182 ccb->ccb_h.status = CAM_REQ_ABORTED; 1183 break; 1184 1185 default: 1186 xpt_print_path(ccb->ccb_h.path); 1187 printf("adv_done - queue done with unknown status %x:%x\n", 1188 done_stat, host_stat); 1189 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 1190 break; 1191 } 1192 if ((cinfo->state & ACCB_RELEASE_SIMQ) != 0) 1193 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1194 else if (adv->openings_needed > 0) { 1195 int openings; 1196 1197 openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q; 1198 if (openings >= adv->openings_needed) { 1199 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1200 adv->openings_needed = 0; 1201 } 1202 } 1203 if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) { 1204 /* 1205 * We now traverse our list of pending CCBs and reinstate 1206 * their timeouts. 1207 */ 1208 struct ccb_hdr *ccb_h; 1209 1210 ccb_h = LIST_FIRST(&adv->pending_ccbs); 1211 while (ccb_h != NULL) { 1212 ccb_h->timeout_ch = 1213 timeout(adv_timeout, (caddr_t)ccb_h, 1214 (ccb_h->timeout * hz) / 1000); 1215 ccb_h = LIST_NEXT(ccb_h, sim_links.le); 1216 } 1217 printf("%s: No longer in timeout\n", adv_name(adv)); 1218 } 1219 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP 1220 && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { 1221 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 1222 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1223 } 1224 adv_free_ccb_info(adv, cinfo); 1225 ccb->ccb_h.status &= ~CAM_SIM_QUEUED; 1226 xpt_done(ccb); 1227} 1228 1229/* 1230 * Function to poll for command completion when 1231 * interrupts are disabled (crash dumps) 1232 */ 1233static void 1234adv_poll(struct cam_sim *sim) 1235{ 1236 adv_intr(cam_sim_softc(sim)); 1237} 1238 1239/* 1240 * Attach all the sub-devices we can find 1241 */ 1242int 1243adv_attach(adv) 1244 struct adv_softc *adv; 1245{ 1246 struct ccb_setasync csa; 1247 struct cam_devq *devq; 1248 1249 /* 1250 * Create our DMA tags. These tags define the kinds of device 1251 * accessable memory allocations and memory mappings we will 1252 * need to perform during normal operation. 1253 * 1254 * Unless we need to further restrict the allocation, we rely 1255 * on the restrictions of the parent dmat, hence the common 1256 * use of MAXADDR and MAXSIZE. 1257 */ 1258 1259 /* DMA tag for mapping buffers into device visible space. */ 1260 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/0, /*boundary*/0, 1261 /*lowaddr*/BUS_SPACE_MAXADDR, 1262 /*highaddr*/BUS_SPACE_MAXADDR, 1263 /*filter*/NULL, /*filterarg*/NULL, 1264 /*maxsize*/MAXBSIZE, 1265 /*nsegments*/ADV_MAX_SG_LIST, 1266 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1267 /*flags*/BUS_DMA_ALLOCNOW, 1268 &adv->buffer_dmat) != 0) { 1269 goto error_exit; 1270 } 1271 adv->init_level++; 1272 1273 /* DMA tag for our sense buffers */ 1274 if (bus_dma_tag_create(adv->parent_dmat, /*alignment*/0, /*boundary*/0, 1275 /*lowaddr*/BUS_SPACE_MAXADDR, 1276 /*highaddr*/BUS_SPACE_MAXADDR, 1277 /*filter*/NULL, /*filterarg*/NULL, 1278 sizeof(struct scsi_sense_data)*adv->max_openings, 1279 /*nsegments*/1, 1280 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 1281 /*flags*/0, &adv->sense_dmat) != 0) { 1282 goto error_exit; 1283 } 1284 1285 adv->init_level++; 1286 1287 /* Allocation for our sense buffers */ 1288 if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers, 1289 BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) { 1290 goto error_exit; 1291 } 1292 1293 adv->init_level++; 1294 1295 /* And permanently map them */ 1296 bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap, 1297 adv->sense_buffers, 1298 sizeof(struct scsi_sense_data)*adv->max_openings, 1299 adv_map, &adv->sense_physbase, /*flags*/0); 1300 1301 adv->init_level++; 1302 1303 /* 1304 * Fire up the chip 1305 */ 1306 if (adv_start_chip(adv) != 1) { 1307 printf("adv%d: Unable to start on board processor. Aborting.\n", 1308 adv->unit); 1309 return (0); 1310 } 1311 1312 /* 1313 * Create the device queue for our SIM. 1314 */ 1315 devq = cam_simq_alloc(adv->max_openings); 1316 if (devq == NULL) 1317 return (0); 1318 1319 /* 1320 * Construct our SIM entry. 1321 */ 1322 adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, adv->unit, 1323 1, adv->max_openings, devq); 1324 if (adv->sim == NULL) 1325 return (0); 1326 1327 /* 1328 * Register the bus. 1329 * 1330 * XXX Twin Channel EISA Cards??? 1331 */ 1332 if (xpt_bus_register(adv->sim, 0) != CAM_SUCCESS) { 1333 cam_sim_free(adv->sim, /*free devq*/TRUE); 1334 return (0); 1335 } 1336 1337 if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim), 1338 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1339 == CAM_REQ_CMP) { 1340 xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5); 1341 csa.ccb_h.func_code = XPT_SASYNC_CB; 1342 csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; 1343 csa.callback = advasync; 1344 csa.callback_arg = adv; 1345 xpt_action((union ccb *)&csa); 1346 } 1347 return (1); 1348 1349error_exit: 1350 return (0); 1351} 1352