34 */ 35/* 36 * Ported from: 37 * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters 38 * 39 * Copyright (c) 1995-1998 Advanced System Products, Inc. 40 * All Rights Reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that redistributions of source 44 * code retain the above copyright notice and this comment without 45 * modification. 46 */ 47 48#include <sys/param.h> 49#include <sys/systm.h> 50#include <sys/kernel.h> 51#include <sys/malloc.h> 52#include <sys/bus.h> 53 54#include <machine/bus_pio.h> 55#include <machine/bus_memio.h> 56#include <machine/bus.h> 57#include <machine/resource.h> 58 59#include <sys/rman.h> 60 61#include <cam/cam.h> 62#include <cam/cam_ccb.h> 63#include <cam/cam_sim.h> 64#include <cam/cam_xpt_sim.h> 65#include <cam/cam_debug.h> 66 67#include <cam/scsi/scsi_message.h> 68 69#include <dev/advansys/adwvar.h> 70 71/* Definitions for our use of the SIM private CCB area */ 72#define ccb_acb_ptr spriv_ptr0 73#define ccb_adw_ptr spriv_ptr1 74 75u_long adw_unit; 76 77static __inline cam_status adwccbstatus(union ccb*); 78static __inline struct acb* adwgetacb(struct adw_softc *adw); 79static __inline void adwfreeacb(struct adw_softc *adw, 80 struct acb *acb); 81 82static void adwmapmem(void *arg, bus_dma_segment_t *segs, 83 int nseg, int error); 84static struct sg_map_node* 85 adwallocsgmap(struct adw_softc *adw); 86static int adwallocacbs(struct adw_softc *adw); 87 88static void adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, 89 int nseg, int error); 90static void adw_action(struct cam_sim *sim, union ccb *ccb); 91static void adw_poll(struct cam_sim *sim); 92static void adw_async(void *callback_arg, u_int32_t code, 93 struct cam_path *path, void *arg); 94static void adwprocesserror(struct adw_softc *adw, struct acb *acb); 95static void adwtimeout(void *arg); 96static void adw_handle_device_reset(struct adw_softc *adw, 97 u_int target); 98static void adw_handle_bus_reset(struct adw_softc *adw, 99 int initiated); 100 101static __inline cam_status 102adwccbstatus(union ccb* ccb) 103{ 104 return (ccb->ccb_h.status & CAM_STATUS_MASK); 105} 106 107static __inline struct acb* 108adwgetacb(struct adw_softc *adw) 109{ 110 struct acb* acb; 111 int s; 112 113 s = splcam(); 114 if ((acb = SLIST_FIRST(&adw->free_acb_list)) != NULL) { 115 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 116 } else if (adw->num_acbs < adw->max_acbs) { 117 adwallocacbs(adw); 118 acb = SLIST_FIRST(&adw->free_acb_list); 119 if (acb == NULL) 120 printf("%s: Can't malloc ACB\n", adw_name(adw)); 121 else { 122 SLIST_REMOVE_HEAD(&adw->free_acb_list, links); 123 } 124 } 125 splx(s); 126 127 return (acb); 128} 129 130static __inline void 131adwfreeacb(struct adw_softc *adw, struct acb *acb) 132{ 133 int s; 134 135 s = splcam(); 136 if ((acb->state & ACB_ACTIVE) != 0) 137 LIST_REMOVE(&acb->ccb->ccb_h, sim_links.le); 138 if ((acb->state & ACB_RELEASE_SIMQ) != 0) 139 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 140 else if ((adw->state & ADW_RESOURCE_SHORTAGE) != 0 141 && (acb->ccb->ccb_h.status & CAM_RELEASE_SIMQ) == 0) { 142 acb->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 143 adw->state &= ~ADW_RESOURCE_SHORTAGE; 144 } 145 acb->state = ACB_FREE; 146 SLIST_INSERT_HEAD(&adw->free_acb_list, acb, links); 147 splx(s); 148} 149 150static void 151adwmapmem(void *arg, bus_dma_segment_t *segs, int nseg, int error) 152{ 153 bus_addr_t *busaddrp; 154 155 busaddrp = (bus_addr_t *)arg; 156 *busaddrp = segs->ds_addr; 157} 158 159static struct sg_map_node * 160adwallocsgmap(struct adw_softc *adw) 161{ 162 struct sg_map_node *sg_map; 163 164 sg_map = malloc(sizeof(*sg_map), M_DEVBUF, M_NOWAIT); 165 166 if (sg_map == NULL) 167 return (NULL); 168 169 /* Allocate S/G space for the next batch of ACBS */ 170 if (bus_dmamem_alloc(adw->sg_dmat, (void **)&sg_map->sg_vaddr, 171 BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) { 172 free(sg_map, M_DEVBUF); 173 return (NULL); 174 } 175 176 SLIST_INSERT_HEAD(&adw->sg_maps, sg_map, links); 177 178 bus_dmamap_load(adw->sg_dmat, sg_map->sg_dmamap, sg_map->sg_vaddr, 179 PAGE_SIZE, adwmapmem, &sg_map->sg_physaddr, /*flags*/0); 180 181 bzero(sg_map->sg_vaddr, PAGE_SIZE); 182 return (sg_map); 183} 184 185/* 186 * Allocate another chunk of CCB's. Return count of entries added. 187 * Assumed to be called at splcam(). 188 */ 189static int 190adwallocacbs(struct adw_softc *adw) 191{ 192 struct acb *next_acb; 193 struct sg_map_node *sg_map; 194 bus_addr_t busaddr; 195 struct adw_sg_block *blocks; 196 int newcount; 197 int i; 198 199 next_acb = &adw->acbs[adw->num_acbs]; 200 sg_map = adwallocsgmap(adw); 201 202 if (sg_map == NULL) 203 return (0); 204 205 blocks = sg_map->sg_vaddr; 206 busaddr = sg_map->sg_physaddr; 207 208 newcount = (PAGE_SIZE / (ADW_SG_BLOCKCNT * sizeof(*blocks))); 209 for (i = 0; adw->num_acbs < adw->max_acbs && i < newcount; i++) { 210 int error; 211 212 error = bus_dmamap_create(adw->buffer_dmat, /*flags*/0, 213 &next_acb->dmamap); 214 if (error != 0) 215 break; 216 next_acb->queue.scsi_req_baddr = acbvtob(adw, next_acb); 217 next_acb->queue.scsi_req_bo = acbvtobo(adw, next_acb); 218 next_acb->queue.sense_baddr = 219 acbvtob(adw, next_acb) + offsetof(struct acb, sense_data); 220 next_acb->sg_blocks = blocks; 221 next_acb->sg_busaddr = busaddr; 222 next_acb->state = ACB_FREE; 223 SLIST_INSERT_HEAD(&adw->free_acb_list, next_acb, links); 224 blocks += ADW_SG_BLOCKCNT; 225 busaddr += ADW_SG_BLOCKCNT * sizeof(*blocks); 226 next_acb++; 227 adw->num_acbs++; 228 } 229 return (i); 230} 231 232static void 233adwexecuteacb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) 234{ 235 struct acb *acb; 236 union ccb *ccb; 237 struct adw_softc *adw; 238 int s; 239 240 acb = (struct acb *)arg; 241 ccb = acb->ccb; 242 adw = (struct adw_softc *)ccb->ccb_h.ccb_adw_ptr; 243 244 if (error != 0) { 245 if (error != EFBIG) 246 printf("%s: Unexepected error 0x%x returned from " 247 "bus_dmamap_load\n", adw_name(adw), error); 248 if (ccb->ccb_h.status == CAM_REQ_INPROG) { 249 xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); 250 ccb->ccb_h.status = CAM_REQ_TOO_BIG|CAM_DEV_QFRZN; 251 } 252 adwfreeacb(adw, acb); 253 xpt_done(ccb); 254 return; 255 } 256 257 if (nseg != 0) {
|
259 260 acb->queue.data_addr = dm_segs[0].ds_addr; 261 acb->queue.data_cnt = ccb->csio.dxfer_len; 262 if (nseg > 1) { 263 struct adw_sg_block *sg_block; 264 struct adw_sg_elm *sg; 265 bus_addr_t sg_busaddr; 266 u_int sg_index; 267 bus_dma_segment_t *end_seg; 268 269 end_seg = dm_segs + nseg; 270 271 sg_busaddr = acb->sg_busaddr; 272 sg_index = 0; 273 /* Copy the segments into our SG list */ 274 for (sg_block = acb->sg_blocks;; sg_block++) { 275 u_int i; 276 277 sg = sg_block->sg_list; 278 for (i = 0; i < ADW_NO_OF_SG_PER_BLOCK; i++) { 279 if (dm_segs >= end_seg) 280 break; 281 282 sg->sg_addr = dm_segs->ds_addr; 283 sg->sg_count = dm_segs->ds_len; 284 sg++; 285 dm_segs++; 286 } 287 sg_block->sg_cnt = i; 288 sg_index += i; 289 if (dm_segs == end_seg) { 290 sg_block->sg_busaddr_next = 0; 291 break; 292 } else { 293 sg_busaddr += 294 sizeof(struct adw_sg_block); 295 sg_block->sg_busaddr_next = sg_busaddr; 296 } 297 } 298 acb->queue.sg_real_addr = acb->sg_busaddr; 299 } else { 300 acb->queue.sg_real_addr = 0; 301 } 302 303 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 304 op = BUS_DMASYNC_PREREAD; 305 else 306 op = BUS_DMASYNC_PREWRITE; 307 308 bus_dmamap_sync(adw->buffer_dmat, acb->dmamap, op); 309 310 } else { 311 acb->queue.data_addr = 0; 312 acb->queue.data_cnt = 0; 313 acb->queue.sg_real_addr = 0; 314 } 315 316 s = splcam(); 317 318 /* 319 * Last time we need to check if this CCB needs to 320 * be aborted. 321 */ 322 if (ccb->ccb_h.status != CAM_REQ_INPROG) { 323 if (nseg != 0) 324 bus_dmamap_unload(adw->buffer_dmat, acb->dmamap); 325 adwfreeacb(adw, acb); 326 xpt_done(ccb); 327 splx(s); 328 return; 329 } 330 331 acb->state |= ACB_ACTIVE; 332 ccb->ccb_h.status |= CAM_SIM_QUEUED; 333 LIST_INSERT_HEAD(&adw->pending_ccbs, &ccb->ccb_h, sim_links.le); 334 ccb->ccb_h.timeout_ch = 335 timeout(adwtimeout, (caddr_t)acb, 336 (ccb->ccb_h.timeout * hz) / 1000); 337 338 adw_send_acb(adw, acb, acbvtob(adw, acb)); 339 340 splx(s); 341} 342 343static void 344adw_action(struct cam_sim *sim, union ccb *ccb) 345{ 346 struct adw_softc *adw; 347 348 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adw_action\n")); 349 350 adw = (struct adw_softc *)cam_sim_softc(sim); 351 352 switch (ccb->ccb_h.func_code) { 353 /* Common cases first */ 354 case XPT_SCSI_IO: /* Execute the requested I/O operation */ 355 { 356 struct ccb_scsiio *csio; 357 struct ccb_hdr *ccbh; 358 struct acb *acb; 359 360 csio = &ccb->csio; 361 ccbh = &ccb->ccb_h; 362 363 /* Max supported CDB length is 12 bytes */ 364 if (csio->cdb_len > 12) { 365 ccb->ccb_h.status = CAM_REQ_INVALID; 366 xpt_done(ccb); 367 return; 368 } 369 370 if ((acb = adwgetacb(adw)) == NULL) { 371 int s; 372 373 s = splcam(); 374 adw->state |= ADW_RESOURCE_SHORTAGE; 375 splx(s); 376 xpt_freeze_simq(sim, /*count*/1); 377 ccb->ccb_h.status = CAM_REQUEUE_REQ; 378 xpt_done(ccb); 379 return; 380 } 381 382 /* Link acb and ccb so we can find one from the other */ 383 acb->ccb = ccb; 384 ccb->ccb_h.ccb_acb_ptr = acb; 385 ccb->ccb_h.ccb_adw_ptr = adw; 386 387 acb->queue.cntl = 0; 388 acb->queue.target_cmd = 0; 389 acb->queue.target_id = ccb->ccb_h.target_id; 390 acb->queue.target_lun = ccb->ccb_h.target_lun; 391 392 acb->queue.mflag = 0; 393 acb->queue.sense_len = 394 MIN(csio->sense_len, sizeof(acb->sense_data)); 395 acb->queue.cdb_len = csio->cdb_len; 396 if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { 397 switch (csio->tag_action) { 398 case MSG_SIMPLE_Q_TAG: 399 acb->queue.scsi_cntl = ADW_QSC_SIMPLE_Q_TAG; 400 break; 401 case MSG_HEAD_OF_Q_TAG: 402 acb->queue.scsi_cntl = ADW_QSC_HEAD_OF_Q_TAG; 403 break; 404 case MSG_ORDERED_Q_TAG: 405 acb->queue.scsi_cntl = ADW_QSC_ORDERED_Q_TAG; 406 break; 407 default: 408 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 409 break; 410 } 411 } else 412 acb->queue.scsi_cntl = ADW_QSC_NO_TAGMSG; 413 414 if ((ccb->ccb_h.flags & CAM_DIS_DISCONNECT) != 0) 415 acb->queue.scsi_cntl |= ADW_QSC_NO_DISC; 416 417 acb->queue.done_status = 0; 418 acb->queue.scsi_status = 0; 419 acb->queue.host_status = 0; 420 acb->queue.sg_wk_ix = 0; 421 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { 422 if ((ccb->ccb_h.flags & CAM_CDB_PHYS) == 0) { 423 bcopy(csio->cdb_io.cdb_ptr, 424 acb->queue.cdb, csio->cdb_len); 425 } else { 426 /* I guess I could map it in... */ 427 ccb->ccb_h.status = CAM_REQ_INVALID; 428 adwfreeacb(adw, acb); 429 xpt_done(ccb); 430 return; 431 } 432 } else { 433 bcopy(csio->cdb_io.cdb_bytes, 434 acb->queue.cdb, csio->cdb_len); 435 } 436 437 /* 438 * If we have any data to send with this command, 439 * map it into bus space. 440 */ 441 if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 442 if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { 443 /* 444 * We've been given a pointer 445 * to a single buffer. 446 */ 447 if ((ccbh->flags & CAM_DATA_PHYS) == 0) { 448 int s; 449 int error; 450 451 s = splsoftvm(); 452 error = 453 bus_dmamap_load(adw->buffer_dmat, 454 acb->dmamap, 455 csio->data_ptr, 456 csio->dxfer_len, 457 adwexecuteacb, 458 acb, /*flags*/0); 459 if (error == EINPROGRESS) { 460 /* 461 * So as to maintain ordering, 462 * freeze the controller queue 463 * until our mapping is 464 * returned. 465 */ 466 xpt_freeze_simq(sim, 1); 467 acb->state |= CAM_RELEASE_SIMQ; 468 } 469 splx(s); 470 } else { 471 struct bus_dma_segment seg; 472 473 /* Pointer to physical buffer */ 474 seg.ds_addr = 475 (bus_addr_t)csio->data_ptr; 476 seg.ds_len = csio->dxfer_len; 477 adwexecuteacb(acb, &seg, 1, 0); 478 } 479 } else { 480 struct bus_dma_segment *segs; 481 482 if ((ccbh->flags & CAM_DATA_PHYS) != 0) 483 panic("adw_action - Physical " 484 "segment pointers " 485 "unsupported"); 486 487 if ((ccbh->flags&CAM_SG_LIST_PHYS)==0) 488 panic("adw_action - Virtual " 489 "segment addresses " 490 "unsupported"); 491 492 /* Just use the segments provided */ 493 segs = (struct bus_dma_segment *)csio->data_ptr; 494 adwexecuteacb(acb, segs, csio->sglist_cnt, 495 (csio->sglist_cnt < ADW_SGSIZE) 496 ? 0 : EFBIG); 497 } 498 } else { 499 adwexecuteacb(acb, NULL, 0, 0); 500 } 501 break; 502 } 503 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 504 { 505 adw_idle_cmd_status_t status; 506 507 status = adw_idle_cmd_send(adw, ADW_IDLE_CMD_DEVICE_RESET, 508 ccb->ccb_h.target_id); 509 if (status == ADW_IDLE_CMD_SUCCESS) { 510 ccb->ccb_h.status = CAM_REQ_CMP; 511 if (bootverbose) { 512 xpt_print_path(ccb->ccb_h.path); 513 printf("BDR Delivered\n"); 514 } 515 } else 516 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 517 xpt_done(ccb); 518 break; 519 } 520 case XPT_ABORT: /* Abort the specified CCB */ 521 /* XXX Implement */ 522 ccb->ccb_h.status = CAM_REQ_INVALID; 523 xpt_done(ccb); 524 break; 525 case XPT_SET_TRAN_SETTINGS: 526 { 527 struct ccb_trans_settings *cts; 528 u_int target_mask; 529 int s; 530 531 cts = &ccb->cts; 532 target_mask = 0x01 << ccb->ccb_h.target_id; 533 534 s = splcam(); 535 if ((cts->flags & CCB_TRANS_CURRENT_SETTINGS) != 0) { 536 u_int sdtrdone; 537 538 sdtrdone = adw_lram_read_16(adw, ADW_MC_SDTR_DONE); 539 if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) { 540 u_int discenb; 541 542 discenb = 543 adw_lram_read_16(adw, ADW_MC_DISC_ENABLE); 544 545 if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) 546 discenb |= target_mask; 547 else 548 discenb &= ~target_mask; 549 550 adw_lram_write_16(adw, ADW_MC_DISC_ENABLE, 551 discenb); 552 } 553 554 if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) { 555 556 if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) 557 adw->tagenb |= target_mask; 558 else 559 adw->tagenb &= ~target_mask; 560 } 561 562 if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) { 563 u_int wdtrenb_orig; 564 u_int wdtrenb; 565 u_int wdtrdone; 566 567 wdtrenb_orig = 568 adw_lram_read_16(adw, ADW_MC_WDTR_ABLE); 569 wdtrenb = wdtrenb_orig; 570 wdtrdone = adw_lram_read_16(adw, 571 ADW_MC_WDTR_DONE); 572 switch (cts->bus_width) { 573 case MSG_EXT_WDTR_BUS_32_BIT: 574 case MSG_EXT_WDTR_BUS_16_BIT: 575 wdtrenb |= target_mask; 576 break; 577 case MSG_EXT_WDTR_BUS_8_BIT: 578 default: 579 wdtrenb &= ~target_mask; 580 break; 581 } 582 if (wdtrenb != wdtrenb_orig) { 583 adw_lram_write_16(adw, 584 ADW_MC_WDTR_ABLE, 585 wdtrenb); 586 wdtrdone &= ~target_mask; 587 adw_lram_write_16(adw, 588 ADW_MC_WDTR_DONE, 589 wdtrdone); 590 /* Wide negotiation forces async */ 591 sdtrdone &= ~target_mask; 592 adw_lram_write_16(adw, 593 ADW_MC_SDTR_DONE, 594 sdtrdone); 595 } 596 } 597 598 if (((cts->valid & CCB_TRANS_SYNC_RATE_VALID) != 0) 599 || ((cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) != 0)) { 600 u_int sdtr_orig; 601 u_int sdtr; 602 u_int sdtrable_orig; 603 u_int sdtrable; 604 605 sdtr = adw_get_chip_sdtr(adw, 606 ccb->ccb_h.target_id); 607 sdtr_orig = sdtr; 608 sdtrable = adw_lram_read_16(adw, 609 ADW_MC_SDTR_ABLE); 610 sdtrable_orig = sdtrable; 611 612 if ((cts->valid 613 & CCB_TRANS_SYNC_RATE_VALID) != 0) { 614 615 sdtr = 616 adw_find_sdtr(adw, 617 cts->sync_period); 618 } 619 620 if ((cts->valid 621 & CCB_TRANS_SYNC_OFFSET_VALID) != 0) { 622 if (cts->sync_offset == 0) 623 sdtr = ADW_MC_SDTR_ASYNC; 624 } 625 626 if (sdtr == ADW_MC_SDTR_ASYNC) 627 sdtrable &= ~target_mask; 628 else 629 sdtrable |= target_mask; 630 if (sdtr != sdtr_orig 631 || sdtrable != sdtrable_orig) { 632 adw_set_chip_sdtr(adw, 633 ccb->ccb_h.target_id, 634 sdtr); 635 sdtrdone &= ~target_mask; 636 adw_lram_write_16(adw, ADW_MC_SDTR_ABLE, 637 sdtrable); 638 adw_lram_write_16(adw, ADW_MC_SDTR_DONE, 639 sdtrdone); 640 641 } 642 } 643 } 644 splx(s); 645 ccb->ccb_h.status = CAM_REQ_CMP; 646 xpt_done(ccb); 647 break; 648 } 649 case XPT_GET_TRAN_SETTINGS: 650 /* Get default/user set transfer settings for the target */ 651 { 652 struct ccb_trans_settings *cts; 653 u_int target_mask; 654 655 cts = &ccb->cts; 656 target_mask = 0x01 << ccb->ccb_h.target_id; 657 if ((cts->flags & CCB_TRANS_USER_SETTINGS) != 0) { 658 u_int mc_sdtr; 659 660 cts->flags = 0; 661 if ((adw->user_discenb & target_mask) != 0) 662 cts->flags |= CCB_TRANS_DISC_ENB; 663 664 if ((adw->user_tagenb & target_mask) != 0) 665 cts->flags |= CCB_TRANS_TAG_ENB; 666 667 if ((adw->user_wdtr & target_mask) != 0) 668 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 669 else 670 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 671 672 mc_sdtr = adw_get_user_sdtr(adw, ccb->ccb_h.target_id); 673 cts->sync_period = adw_find_period(adw, mc_sdtr); 674 if (cts->sync_period != 0) 675 cts->sync_offset = 15; /* XXX ??? */ 676 else 677 cts->sync_offset = 0; 678 679 cts->valid = CCB_TRANS_SYNC_RATE_VALID 680 | CCB_TRANS_SYNC_OFFSET_VALID 681 | CCB_TRANS_BUS_WIDTH_VALID 682 | CCB_TRANS_DISC_VALID 683 | CCB_TRANS_TQ_VALID; 684 ccb->ccb_h.status = CAM_REQ_CMP; 685 } else { 686 u_int targ_tinfo; 687 688 cts->flags = 0; 689 if ((adw_lram_read_16(adw, ADW_MC_DISC_ENABLE) 690 & target_mask) != 0) 691 cts->flags |= CCB_TRANS_DISC_ENB; 692 693 if ((adw->tagenb & target_mask) != 0) 694 cts->flags |= CCB_TRANS_TAG_ENB; 695 696 targ_tinfo = 697 adw_lram_read_16(adw, 698 ADW_MC_DEVICE_HSHK_CFG_TABLE 699 + (2 * ccb->ccb_h.target_id)); 700 701 if ((targ_tinfo & ADW_HSHK_CFG_WIDE_XFR) != 0) 702 cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT; 703 else 704 cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT; 705 706 cts->sync_period = 707 adw_hshk_cfg_period_factor(targ_tinfo); 708 709 cts->sync_offset = targ_tinfo & ADW_HSHK_CFG_OFFSET; 710 if (cts->sync_period == 0) 711 cts->sync_offset = 0; 712 713 if (cts->sync_offset == 0) 714 cts->sync_period = 0; 715 } 716 cts->valid = CCB_TRANS_SYNC_RATE_VALID 717 | CCB_TRANS_SYNC_OFFSET_VALID 718 | CCB_TRANS_BUS_WIDTH_VALID 719 | CCB_TRANS_DISC_VALID 720 | CCB_TRANS_TQ_VALID; 721 ccb->ccb_h.status = CAM_REQ_CMP; 722 xpt_done(ccb); 723 break; 724 } 725 case XPT_CALC_GEOMETRY: 726 { 727 struct ccb_calc_geometry *ccg; 728 u_int32_t size_mb; 729 u_int32_t secs_per_cylinder; 730 int extended; 731 732 /* 733 * XXX Use Adaptec translation until I find out how to 734 * get this information from the card. 735 */ 736 ccg = &ccb->ccg; 737 size_mb = ccg->volume_size 738 / ((1024L * 1024L) / ccg->block_size); 739 extended = 1; 740 741 if (size_mb > 1024 && extended) { 742 ccg->heads = 255; 743 ccg->secs_per_track = 63; 744 } else { 745 ccg->heads = 64; 746 ccg->secs_per_track = 32; 747 } 748 secs_per_cylinder = ccg->heads * ccg->secs_per_track; 749 ccg->cylinders = ccg->volume_size / secs_per_cylinder; 750 ccb->ccb_h.status = CAM_REQ_CMP; 751 xpt_done(ccb); 752 break; 753 } 754 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 755 { 756 int failure; 757 758 failure = adw_reset_bus(adw); 759 if (failure != 0) { 760 ccb->ccb_h.status = CAM_REQ_CMP_ERR; 761 } else { 762 if (bootverbose) { 763 xpt_print_path(adw->path); 764 printf("Bus Reset Delivered\n"); 765 } 766 ccb->ccb_h.status = CAM_REQ_CMP; 767 } 768 xpt_done(ccb); 769 break; 770 } 771 case XPT_TERM_IO: /* Terminate the I/O process */ 772 /* XXX Implement */ 773 ccb->ccb_h.status = CAM_REQ_INVALID; 774 xpt_done(ccb); 775 break; 776 case XPT_PATH_INQ: /* Path routing inquiry */ 777 { 778 struct ccb_pathinq *cpi = &ccb->cpi; 779 780 cpi->version_num = 1; 781 cpi->hba_inquiry = PI_WIDE_16|PI_SDTR_ABLE|PI_TAG_ABLE; 782 cpi->target_sprt = 0; 783 cpi->hba_misc = 0; 784 cpi->hba_eng_cnt = 0; 785 cpi->max_target = ADW_MAX_TID; 786 cpi->max_lun = ADW_MAX_LUN; 787 cpi->initiator_id = adw->initiator_id; 788 cpi->bus_id = cam_sim_bus(sim); 789 cpi->base_transfer_speed = 3300; 790 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 791 strncpy(cpi->hba_vid, "AdvanSys", HBA_IDLEN); 792 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 793 cpi->unit_number = cam_sim_unit(sim); 794 cpi->ccb_h.status = CAM_REQ_CMP; 795 xpt_done(ccb); 796 break; 797 } 798 default: 799 ccb->ccb_h.status = CAM_REQ_INVALID; 800 xpt_done(ccb); 801 break; 802 } 803} 804 805static void 806adw_poll(struct cam_sim *sim) 807{ 808 adw_intr(cam_sim_softc(sim)); 809} 810 811static void 812adw_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 813{ 814} 815 816struct adw_softc * 817adw_alloc(device_t dev, struct resource *regs, int regs_type, int regs_id) 818{ 819 struct adw_softc *adw; 820 int i; 821 822 /* 823 * Allocate a storage area for us 824 */ 825 adw = malloc(sizeof(struct adw_softc), M_DEVBUF, M_NOWAIT | M_ZERO); 826 if (adw == NULL) { 827 printf("adw%d: cannot malloc!\n", device_get_unit(dev)); 828 return NULL; 829 } 830 LIST_INIT(&adw->pending_ccbs); 831 SLIST_INIT(&adw->sg_maps); 832 adw->device = dev; 833 adw->unit = device_get_unit(dev); 834 adw->regs_res_type = regs_type; 835 adw->regs_res_id = regs_id; 836 adw->regs = regs; 837 adw->tag = rman_get_bustag(regs); 838 adw->bsh = rman_get_bushandle(regs); 839 i = adw->unit / 10; 840 adw->name = malloc(sizeof("adw") + i + 1, M_DEVBUF, M_NOWAIT); 841 if (adw->name == NULL) { 842 printf("adw%d: cannot malloc name!\n", adw->unit); 843 free(adw, M_DEVBUF); 844 return NULL; 845 } 846 sprintf(adw->name, "adw%d", adw->unit); 847 return(adw); 848} 849 850void 851adw_free(struct adw_softc *adw) 852{ 853 switch (adw->init_level) { 854 case 9: 855 { 856 struct sg_map_node *sg_map; 857 858 while ((sg_map = SLIST_FIRST(&adw->sg_maps)) != NULL) { 859 SLIST_REMOVE_HEAD(&adw->sg_maps, links); 860 bus_dmamap_unload(adw->sg_dmat, 861 sg_map->sg_dmamap); 862 bus_dmamem_free(adw->sg_dmat, sg_map->sg_vaddr, 863 sg_map->sg_dmamap); 864 free(sg_map, M_DEVBUF); 865 } 866 bus_dma_tag_destroy(adw->sg_dmat); 867 } 868 case 8: 869 bus_dmamap_unload(adw->acb_dmat, adw->acb_dmamap); 870 case 7: 871 bus_dmamem_free(adw->acb_dmat, adw->acbs, 872 adw->acb_dmamap); 873 bus_dmamap_destroy(adw->acb_dmat, adw->acb_dmamap); 874 case 6: 875 bus_dma_tag_destroy(adw->acb_dmat); 876 case 5: 877 bus_dmamap_unload(adw->carrier_dmat, adw->carrier_dmamap); 878 case 4: 879 bus_dmamem_free(adw->carrier_dmat, adw->carriers, 880 adw->carrier_dmamap); 881 bus_dmamap_destroy(adw->carrier_dmat, adw->carrier_dmamap); 882 case 3: 883 bus_dma_tag_destroy(adw->carrier_dmat); 884 case 2: 885 bus_dma_tag_destroy(adw->buffer_dmat); 886 case 1: 887 bus_dma_tag_destroy(adw->parent_dmat); 888 case 0: 889 break; 890 } 891 free(adw->name, M_DEVBUF); 892 free(adw, M_DEVBUF); 893} 894 895int 896adw_init(struct adw_softc *adw) 897{ 898 struct adw_eeprom eep_config; 899 u_int tid; 900 u_int i; 901 u_int16_t checksum; 902 u_int16_t scsicfg1; 903 904 checksum = adw_eeprom_read(adw, &eep_config); 905 bcopy(eep_config.serial_number, adw->serial_number, 906 sizeof(adw->serial_number)); 907 if (checksum != eep_config.checksum) { 908 u_int16_t serial_number[3]; 909 910 adw->flags |= ADW_EEPROM_FAILED; 911 printf("%s: EEPROM checksum failed. Restoring Defaults\n", 912 adw_name(adw)); 913 914 /* 915 * Restore the default EEPROM settings. 916 * Assume the 6 byte board serial number that was read 917 * from EEPROM is correct even if the EEPROM checksum 918 * failed. 919 */ 920 bcopy(adw->default_eeprom, &eep_config, sizeof(eep_config)); 921 bcopy(adw->serial_number, eep_config.serial_number, 922 sizeof(serial_number)); 923 adw_eeprom_write(adw, &eep_config); 924 } 925 926 /* Pull eeprom information into our softc. */ 927 adw->bios_ctrl = eep_config.bios_ctrl; 928 adw->user_wdtr = eep_config.wdtr_able; 929 for (tid = 0; tid < ADW_MAX_TID; tid++) { 930 u_int mc_sdtr; 931 u_int16_t tid_mask; 932 933 tid_mask = 0x1 << tid; 934 if ((adw->features & ADW_ULTRA) != 0) { 935 /* 936 * Ultra chips store sdtr and ultraenb 937 * bits in their seeprom, so we must 938 * construct valid mc_sdtr entries for 939 * indirectly. 940 */ 941 if (eep_config.sync1.sync_enable & tid_mask) { 942 if (eep_config.sync2.ultra_enable & tid_mask) 943 mc_sdtr = ADW_MC_SDTR_20; 944 else 945 mc_sdtr = ADW_MC_SDTR_10; 946 } else 947 mc_sdtr = ADW_MC_SDTR_ASYNC; 948 } else { 949 switch (ADW_TARGET_GROUP(tid)) { 950 case 3: 951 mc_sdtr = eep_config.sync4.sdtr4; 952 break; 953 case 2: 954 mc_sdtr = eep_config.sync3.sdtr3; 955 break; 956 case 1: 957 mc_sdtr = eep_config.sync2.sdtr2; 958 break; 959 default: /* Shut up compiler */ 960 case 0: 961 mc_sdtr = eep_config.sync1.sdtr1; 962 break; 963 } 964 mc_sdtr >>= ADW_TARGET_GROUP_SHIFT(tid); 965 mc_sdtr &= 0xFF; 966 } 967 adw_set_user_sdtr(adw, tid, mc_sdtr); 968 } 969 adw->user_tagenb = eep_config.tagqng_able; 970 adw->user_discenb = eep_config.disc_enable; 971 adw->max_acbs = eep_config.max_host_qng; 972 adw->initiator_id = (eep_config.adapter_scsi_id & ADW_MAX_TID); 973 974 /* 975 * Sanity check the number of host openings. 976 */ 977 if (adw->max_acbs > ADW_DEF_MAX_HOST_QNG) 978 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 979 else if (adw->max_acbs < ADW_DEF_MIN_HOST_QNG) { 980 /* If the value is zero, assume it is uninitialized. */ 981 if (adw->max_acbs == 0) 982 adw->max_acbs = ADW_DEF_MAX_HOST_QNG; 983 else 984 adw->max_acbs = ADW_DEF_MIN_HOST_QNG; 985 } 986 987 scsicfg1 = 0; 988 if ((adw->features & ADW_ULTRA2) != 0) { 989 switch (eep_config.termination_lvd) { 990 default: 991 printf("%s: Invalid EEPROM LVD Termination Settings.\n", 992 adw_name(adw)); 993 printf("%s: Reverting to Automatic LVD Termination\n", 994 adw_name(adw)); 995 /* FALLTHROUGH */ 996 case ADW_EEPROM_TERM_AUTO: 997 break; 998 case ADW_EEPROM_TERM_BOTH_ON: 999 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_LO; 1000 /* FALLTHROUGH */ 1001 case ADW_EEPROM_TERM_HIGH_ON: 1002 scsicfg1 |= ADW2_SCSI_CFG1_TERM_LVD_HI; 1003 /* FALLTHROUGH */ 1004 case ADW_EEPROM_TERM_OFF: 1005 scsicfg1 |= ADW2_SCSI_CFG1_DIS_TERM_DRV; 1006 break; 1007 } 1008 } 1009 1010 switch (eep_config.termination_se) { 1011 default: 1012 printf("%s: Invalid SE EEPROM Termination Settings.\n", 1013 adw_name(adw)); 1014 printf("%s: Reverting to Automatic SE Termination\n", 1015 adw_name(adw)); 1016 /* FALLTHROUGH */ 1017 case ADW_EEPROM_TERM_AUTO: 1018 break; 1019 case ADW_EEPROM_TERM_BOTH_ON: 1020 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_L; 1021 /* FALLTHROUGH */ 1022 case ADW_EEPROM_TERM_HIGH_ON: 1023 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_H; 1024 /* FALLTHROUGH */ 1025 case ADW_EEPROM_TERM_OFF: 1026 scsicfg1 |= ADW_SCSI_CFG1_TERM_CTL_MANUAL; 1027 break; 1028 } 1029 printf("%s: SCSI ID %d, ", adw_name(adw), adw->initiator_id); 1030 1031 /* DMA tag for mapping buffers into device visible space. */ 1032 if (bus_dma_tag_create( 1033 /* parent */ adw->parent_dmat, 1034 /* alignment */ 1, 1035 /* boundary */ 0, 1036 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, 1037 /* highaddr */ BUS_SPACE_MAXADDR, 1038 /* filter */ NULL, 1039 /* filterarg */ NULL, 1040 /* maxsize */ MAXBSIZE, 1041 /* nsegments */ ADW_SGSIZE, 1042 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1043 /* flags */ BUS_DMA_ALLOCNOW, 1044 &adw->buffer_dmat) != 0) { 1045 return (ENOMEM); 1046 } 1047 1048 adw->init_level++; 1049 1050 /* DMA tag for our ccb carrier structures */ 1051 if (bus_dma_tag_create( 1052 /* parent */ adw->parent_dmat, 1053 /* alignment */ 0x10, 1054 /* boundary */ 0, 1055 /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, 1056 /* highaddr */ BUS_SPACE_MAXADDR, 1057 /* filter */ NULL, 1058 /* filterarg */ NULL, 1059 /* maxsize */ (adw->max_acbs + 1060 ADW_NUM_CARRIER_QUEUES + 1) * 1061 sizeof(struct adw_carrier), 1062 /* nsegments */ 1, 1063 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1064 /* flags */ 0, 1065 &adw->carrier_dmat) != 0) { 1066 return (ENOMEM); 1067 } 1068 1069 adw->init_level++; 1070 1071 /* Allocation for our ccb carrier structures */ 1072 if (bus_dmamem_alloc(adw->carrier_dmat, (void **)&adw->carriers, 1073 BUS_DMA_NOWAIT, &adw->carrier_dmamap) != 0) { 1074 return (ENOMEM); 1075 } 1076 1077 adw->init_level++; 1078 1079 /* And permanently map them */ 1080 bus_dmamap_load(adw->carrier_dmat, adw->carrier_dmamap, 1081 adw->carriers, 1082 (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1083 * sizeof(struct adw_carrier), 1084 adwmapmem, &adw->carrier_busbase, /*flags*/0); 1085 1086 /* Clear them out. */ 1087 bzero(adw->carriers, (adw->max_acbs + ADW_NUM_CARRIER_QUEUES + 1) 1088 * sizeof(struct adw_carrier)); 1089 1090 /* Setup our free carrier list */ 1091 adw->free_carriers = adw->carriers; 1092 for (i = 0; i < adw->max_acbs + ADW_NUM_CARRIER_QUEUES; i++) { 1093 adw->carriers[i].carr_offset = 1094 carriervtobo(adw, &adw->carriers[i]); 1095 adw->carriers[i].carr_ba = 1096 carriervtob(adw, &adw->carriers[i]); 1097 adw->carriers[i].areq_ba = 0; 1098 adw->carriers[i].next_ba = 1099 carriervtobo(adw, &adw->carriers[i+1]); 1100 } 1101 /* Terminal carrier. Never leaves the freelist */ 1102 adw->carriers[i].carr_offset = 1103 carriervtobo(adw, &adw->carriers[i]); 1104 adw->carriers[i].carr_ba = 1105 carriervtob(adw, &adw->carriers[i]); 1106 adw->carriers[i].areq_ba = 0; 1107 adw->carriers[i].next_ba = ~0; 1108 1109 adw->init_level++; 1110 1111 /* DMA tag for our acb structures */ 1112 if (bus_dma_tag_create( 1113 /* parent */ adw->parent_dmat, 1114 /* alignment */ 1, 1115 /* boundary */ 0, 1116 /* lowaddr */ BUS_SPACE_MAXADDR, 1117 /* highaddr */ BUS_SPACE_MAXADDR, 1118 /* filter */ NULL, 1119 /* filterarg */ NULL, 1120 /* maxsize */ adw->max_acbs * sizeof(struct acb), 1121 /* nsegments */ 1, 1122 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1123 /* flags */ 0, 1124 &adw->acb_dmat) != 0) { 1125 return (ENOMEM); 1126 } 1127 1128 adw->init_level++; 1129 1130 /* Allocation for our ccbs */ 1131 if (bus_dmamem_alloc(adw->acb_dmat, (void **)&adw->acbs, 1132 BUS_DMA_NOWAIT, &adw->acb_dmamap) != 0) 1133 return (ENOMEM); 1134 1135 adw->init_level++; 1136 1137 /* And permanently map them */ 1138 bus_dmamap_load(adw->acb_dmat, adw->acb_dmamap, 1139 adw->acbs, 1140 adw->max_acbs * sizeof(struct acb), 1141 adwmapmem, &adw->acb_busbase, /*flags*/0); 1142 1143 /* Clear them out. */ 1144 bzero(adw->acbs, adw->max_acbs * sizeof(struct acb)); 1145 1146 /* DMA tag for our S/G structures. We allocate in page sized chunks */ 1147 if (bus_dma_tag_create( 1148 /* parent */ adw->parent_dmat, 1149 /* alignment */ 1, 1150 /* boundary */ 0, 1151 /* lowaddr */ BUS_SPACE_MAXADDR, 1152 /* highaddr */ BUS_SPACE_MAXADDR, 1153 /* filter */ NULL, 1154 /* filterarg */ NULL, 1155 /* maxsize */ PAGE_SIZE, 1156 /* nsegments */ 1, 1157 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 1158 /* flags */ 0, 1159 &adw->sg_dmat) != 0) { 1160 return (ENOMEM); 1161 } 1162 1163 adw->init_level++; 1164 1165 /* Allocate our first batch of ccbs */ 1166 if (adwallocacbs(adw) == 0) 1167 return (ENOMEM); 1168 1169 if (adw_init_chip(adw, scsicfg1) != 0) 1170 return (ENXIO); 1171 1172 printf("Queue Depth %d\n", adw->max_acbs); 1173 1174 return (0); 1175} 1176 1177/* 1178 * Attach all the sub-devices we can find 1179 */ 1180int 1181adw_attach(struct adw_softc *adw) 1182{ 1183 struct ccb_setasync csa; 1184 struct cam_devq *devq; 1185 int s; 1186 int error; 1187 1188 error = 0; 1189 s = splcam(); 1190 /* Hook up our interrupt handler */ 1191 if ((error = bus_setup_intr(adw->device, adw->irq, 1192 INTR_TYPE_CAM | INTR_ENTROPY, adw_intr, 1193 adw, &adw->ih)) != 0) { 1194 device_printf(adw->device, "bus_setup_intr() failed: %d\n", 1195 error); 1196 goto fail; 1197 } 1198 1199 /* Start the Risc processor now that we are fully configured. */ 1200 adw_outw(adw, ADW_RISC_CSR, ADW_RISC_CSR_RUN); 1201 1202 /* 1203 * Create the device queue for our SIM. 1204 */ 1205 devq = cam_simq_alloc(adw->max_acbs); 1206 if (devq == NULL) 1207 return (ENOMEM); 1208 1209 /* 1210 * Construct our SIM entry. 1211 */ 1212 adw->sim = cam_sim_alloc(adw_action, adw_poll, "adw", adw, adw->unit, 1213 1, adw->max_acbs, devq); 1214 if (adw->sim == NULL) { 1215 error = ENOMEM; 1216 goto fail; 1217 } 1218 1219 /* 1220 * Register the bus. 1221 */ 1222 if (xpt_bus_register(adw->sim, 0) != CAM_SUCCESS) { 1223 cam_sim_free(adw->sim, /*free devq*/TRUE); 1224 error = ENOMEM; 1225 goto fail; 1226 } 1227 1228 if (xpt_create_path(&adw->path, /*periph*/NULL, cam_sim_path(adw->sim), 1229 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) 1230 == CAM_REQ_CMP) { 1231 xpt_setup_ccb(&csa.ccb_h, adw->path, /*priority*/5); 1232 csa.ccb_h.func_code = XPT_SASYNC_CB; 1233 csa.event_enable = AC_LOST_DEVICE; 1234 csa.callback = adw_async; 1235 csa.callback_arg = adw; 1236 xpt_action((union ccb *)&csa); 1237 } 1238 1239fail: 1240 splx(s); 1241 return (error); 1242} 1243 1244void 1245adw_intr(void *arg) 1246{ 1247 struct adw_softc *adw; 1248 u_int int_stat; 1249 1250 adw = (struct adw_softc *)arg; 1251 if ((adw_inw(adw, ADW_CTRL_REG) & ADW_CTRL_REG_HOST_INTR) == 0) 1252 return; 1253 1254 /* Reading the register clears the interrupt. */ 1255 int_stat = adw_inb(adw, ADW_INTR_STATUS_REG); 1256 1257 if ((int_stat & ADW_INTR_STATUS_INTRB) != 0) { 1258 u_int intrb_code; 1259 1260 /* Async Microcode Event */ 1261 intrb_code = adw_lram_read_8(adw, ADW_MC_INTRB_CODE); 1262 switch (intrb_code) { 1263 case ADW_ASYNC_CARRIER_READY_FAILURE: 1264 /* 1265 * The RISC missed our update of 1266 * the commandq. 1267 */ 1268 if (LIST_FIRST(&adw->pending_ccbs) != NULL) 1269 adw_tickle_risc(adw, ADW_TICKLE_A); 1270 break; 1271 case ADW_ASYNC_SCSI_BUS_RESET_DET: 1272 /* 1273 * The firmware detected a SCSI Bus reset. 1274 */ 1275 printf("Someone Reset the Bus\n"); 1276 adw_handle_bus_reset(adw, /*initiated*/FALSE); 1277 break; 1278 case ADW_ASYNC_RDMA_FAILURE: 1279 /* 1280 * Handle RDMA failure by resetting the 1281 * SCSI Bus and chip. 1282 */ 1283#if XXX 1284 AdvResetChipAndSB(adv_dvc_varp); 1285#endif 1286 break; 1287 1288 case ADW_ASYNC_HOST_SCSI_BUS_RESET: 1289 /* 1290 * Host generated SCSI bus reset occurred. 1291 */ 1292 adw_handle_bus_reset(adw, /*initiated*/TRUE); 1293 break; 1294 default: 1295 printf("adw_intr: unknown async code 0x%x\n", 1296 intrb_code); 1297 break; 1298 } 1299 } 1300 1301 /* 1302 * Run down the RequestQ. 1303 */ 1304 while ((adw->responseq->next_ba & ADW_RQ_DONE) != 0) { 1305 struct adw_carrier *free_carrier; 1306 struct acb *acb; 1307 union ccb *ccb; 1308 1309#if 0 1310 printf("0x%x, 0x%x, 0x%x, 0x%x\n", 1311 adw->responseq->carr_offset, 1312 adw->responseq->carr_ba, 1313 adw->responseq->areq_ba, 1314 adw->responseq->next_ba); 1315#endif 1316 /* 1317 * The firmware copies the adw_scsi_req_q.acb_baddr 1318 * field into the areq_ba field of the carrier. 1319 */ 1320 acb = acbbotov(adw, adw->responseq->areq_ba); 1321 1322 /* 1323 * The least significant four bits of the next_ba 1324 * field are used as flags. Mask them out and then 1325 * advance through the list. 1326 */ 1327 free_carrier = adw->responseq; 1328 adw->responseq = 1329 carrierbotov(adw, free_carrier->next_ba & ADW_NEXT_BA_MASK); 1330 free_carrier->next_ba = adw->free_carriers->carr_offset; 1331 adw->free_carriers = free_carrier; 1332 1333 /* Process CCB */ 1334 ccb = acb->ccb; 1335 untimeout(adwtimeout, acb, ccb->ccb_h.timeout_ch); 1336 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
|