Lines Matching refs:scb

128 					struct scb *scb);
131 struct scb *scb);
167 struct scb *scb);
180 struct scb *scb, u_int col_idx);
182 struct scb *scb);
185 struct scb *prev_scb,
186 struct scb *scb);
216 struct scb *scb);
218 struct scb *scb);
239 struct scb *scb);
246 struct scb *scb);
248 struct scb *scb);
257 static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
406 ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
409 scb->sg_count++;
430 ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
433 scb->crc_retry_count = 0;
434 if ((scb->flags & SCB_PACKETIZED) != 0) {
436 scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
438 if (ahd_get_transfer_length(scb) & 0x01)
439 scb->hscb->task_attribute = SCB_XFERLEN_ODD;
441 scb->hscb->task_attribute = 0;
444 if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
445 || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
446 scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
447 ahd_htole32(scb->sense_busaddr);
451 ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
459 sg = (struct ahd_dma64_seg *)scb->sg_list;
460 scb->hscb->dataptr = sg->addr;
461 scb->hscb->datacnt = sg->len;
466 sg = (struct ahd_dma_seg *)scb->sg_list;
467 dataptr_words = (uint32_t*)&scb->hscb->dataptr;
474 scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
476 scb->hscb->datacnt = sg->len;
484 scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
488 ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
490 scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
491 scb->hscb->dataptr = 0;
492 scb->hscb->datacnt = 0;
497 ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
502 sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
503 return ((uint8_t *)scb->sg_list + sg_offset);
507 ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
512 sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
515 return (scb->sg_list_busaddr + sg_offset);
519 ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
522 scb->hscb_map->dmamap,
523 /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
524 /*len*/sizeof(*scb->hscb), op);
528 ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
530 if (scb->sg_count == 0)
534 scb->sg_map->dmamap,
535 /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
536 /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
540 ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
543 scb->sense_map->dmamap,
544 /*offset*/scb->sense_busaddr,
787 struct scb *
790 struct scb* scb;
794 scb = ahd->scb_data.scbindex[tag];
795 if (scb != NULL)
796 ahd_sync_scb(ahd, scb,
798 return (scb);
802 ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
813 * When we are called to queue "an arbitrary scb",
823 memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
825 q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
828 ahd->next_queued_hscb = scb->hscb;
829 ahd->next_queued_hscb_map = scb->hscb_map;
830 scb->hscb = q_hscb;
831 scb->hscb_map = q_hscb_map;
834 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
841 ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
843 ahd_swap_with_next_hscb(ahd, scb);
845 if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
847 SCB_GET_TAG(scb));
852 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
855 if (scb->sg_count != 0)
856 ahd_setup_data_scb(ahd, scb);
858 ahd_setup_noxfer_scb(ahd, scb);
859 ahd_setup_scb_common(ahd, scb);
865 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
871 host_dataptr = ahd_le64toh(scb->hscb->dataptr);
874 SCB_GET_TAG(scb), scb->hscb->scsiid,
875 ahd_le32toh(scb->hscb->hscb_busaddr),
878 ahd_le32toh(scb->hscb->datacnt));
1092 ahd_update_residual(struct ahd_softc *ahd, struct scb *scb)
1096 sgptr = ahd_le32toh(scb->hscb->sgptr);
1098 ahd_calc_residual(ahd, scb);
1102 ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb)
1106 sgptr = ahd_le32toh(scb->hscb->sgptr);
1108 ahd_handle_scb_status(ahd, scb);
1110 ahd_done(ahd, scb);
1189 struct scb *scb;
1208 scb = ahd_lookup_scb(ahd, scbid);
1209 if (scb == NULL) {
1226 if (ahd_scb_active_in_fifo(ahd, scb) == 0)
1229 ahd_run_data_fifo(ahd, scb);
1282 ahd_complete_scb(ahd, scb);
1332 scb = ahd_lookup_scb(ahd, scbid);
1333 if (scb == NULL) {
1338 hscb_ptr = (uint8_t *)scb->hscb;
1342 ahd_complete_scb(ahd, scb);
1353 scb = ahd_lookup_scb(ahd, scbid);
1354 if (scb == NULL) {
1360 ahd_complete_scb(ahd, scb);
1370 scb = ahd_lookup_scb(ahd, scbid);
1371 if (scb == NULL) {
1377 ahd_complete_scb(ahd, scb);
1395 ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb)
1404 if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb)
1424 ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb)
1578 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
1585 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
1661 struct scb *scb;
1675 scb = ahd_lookup_scb(ahd, scb_index);
1676 if (scb == NULL) {
1677 printk("%s: WARNING no command for scb %d "
1683 ahd_handle_scb_status(ahd, scb);
1685 ahd_done(ahd, scb);
1727 ahd_dump_sglist(struct scb *scb)
1731 if (scb->sg_count > 0) {
1732 if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) {
1735 sg_list = (struct ahd_dma64_seg*)scb->sg_list;
1736 for (i = 0; i < scb->sg_count; i++) {
1751 sg_list = (struct ahd_dma_seg*)scb->sg_list;
1752 for (i = 0; i < scb->sg_count; i++) {
1801 struct scb *scb;
1807 scb = ahd_lookup_scb(ahd, scbid);
1808 if (scb == NULL) {
1816 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
1817 ahd_outb(ahd, SAVED_LUN, scb->hscb->lun);
1851 struct scb *scb;
1855 scb = ahd_lookup_scb(ahd, scbid);
1856 if (scb != NULL)
1857 ahd_print_path(ahd, scb);
1867 struct scb *scb;
1871 scb = ahd_lookup_scb(ahd, scbid);
1872 if (scb == NULL) {
1877 ahd_outq(ahd, HADDR, scb->sense_busaddr);
1906 struct scb *scb;
1921 scb = ahd_lookup_scb(ahd, scbid);
1922 if (scb == NULL) {
1929 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
1930 SCB_GET_TARGET(ahd, scb),
1931 SCB_GET_LUN(scb),
1932 SCB_GET_CHANNEL(ahd, scb),
1947 scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE);
1948 scb->hscb->control |= MK_MESSAGE;
1949 ahd_outb(ahd, SCB_CONTROL, scb->hscb->control);
1951 ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid);
1959 scb->flags &= ~SCB_PACKETIZED;
1960 scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET;
1961 ahd_freeze_devq(ahd, scb);
1962 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
1963 ahd_freeze_scb(scb);
1980 ahd_print_path(ahd, scb);
1992 struct scb *scb;
2002 scb = ahd_lookup_scb(ahd, scb_index);
2003 if (scb == NULL) {
2059 struct scb *scb;
2079 scb = ahd_lookup_scb(ahd, scb_index);
2084 scb);
2100 scb);
2194 struct scb *scb;
2201 scb = ahd_lookup_scb(ahd, scbindex);
2205 ahd_print_path(ahd, scb);
2208 SCB_GET_TAG(scb));
2209 ahd_print_path(ahd, scb);
2214 ahd_get_transfer_length(scb), scb->sg_count);
2215 ahd_dump_sglist(scb);
2223 ahd_freeze_devq(ahd, scb);
2224 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
2225 ahd_freeze_scb(scb);
2231 struct scb *scb;
2239 scb = ahd_lookup_scb(ahd, scbid);
2240 if (scb != NULL
2241 && (scb->flags & SCB_RECOVERY_SCB) != 0)
2246 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
2247 SCB_GET_CHANNEL(ahd, scb),
2248 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
2258 struct scb *scb;
2261 scb = ahd_lookup_scb(ahd, scbid);
2262 if (scb != NULL) {
2267 ahd_print_path(ahd, scb);
2269 scb->hscb->task_management);
2273 switch (scb->hscb->task_management) {
2275 tag = SCB_GET_TAG(scb);
2279 lun = scb->hscb->lun;
2281 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
2286 lun = scb->hscb->lun;
2292 ahd_scb_devinfo(ahd, &devinfo, scb);
2312 struct scb *scb;
2319 scb = ahd_lookup_scb(ahd, scbid);
2320 if (scb != NULL) {
2325 ahd_print_path(ahd, scb);
2339 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
2340 SCB_GET_CHANNEL(ahd, scb),
2341 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
2375 struct scb *scb;
2416 scb = ahd_lookup_scb(ahd, scbid);
2417 if (scb != NULL
2419 scb = NULL;
2480 scb = ahd_lookup_scb(ahd, scbid);
2481 if (scb == NULL) {
2482 printk("%s: ahd_intr - referenced scb not "
2483 "valid during SELTO scb(0x%x)\n",
2490 ahd_print_path(ahd, scb);
2495 ahd_scb_devinfo(ahd, &devinfo, scb);
2496 ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT);
2497 ahd_freeze_devq(ahd, scb);
2570 scb = ahd_lookup_scb(ahd, scbid);
2571 if (scb == NULL) {
2577 packetized = (scb->flags & SCB_PACKETIZED) != 0;
2657 struct scb *scb;
2667 scb = NULL;
2706 scb = ahd_lookup_scb(ahd, scbid);
2707 if (scb != NULL && SCB_IS_SILENT(scb))
2802 if (scb == NULL) {
2833 if (scb != NULL && msg_out == INITIATOR_ERROR)
2834 scb->flags |= SCB_TRANSMISSION_ERROR;
2891 struct scb *scb;
2915 scb = ahd_lookup_scb(ahd, scbid);
2916 if (scb == NULL)
2957 if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) {
2958 if (SCB_IS_SILENT(scb) == FALSE) {
2959 ahd_print_path(ahd, scb);
2963 scb->crc_retry_count++;
2965 ahd_set_transaction_status(scb, CAM_UNCOR_PARITY);
2966 ahd_freeze_scb(scb);
2967 ahd_freeze_devq(ahd, scb);
2991 struct scb *scb;
2994 scb = ahd_lookup_scb(ahd, scbid);
2995 ahd_print_path(ahd, scb);
2998 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A',
2999 SCB_GET_LUN(scb), SCB_GET_TAG(scb),
3018 struct scb *scb;
3043 scb = ahd_lookup_scb(ahd, scbid);
3044 if (scb != NULL
3046 scb = NULL;
3058 if (scb == NULL) {
3066 ahd_print_path(ahd, scb);
3068 SCB_GET_TAG(scb),
3072 tag = SCB_GET_TAG(scb);
3074 if ((scb->flags & SCB_EXTERNAL_RESET) != 0) {
3087 tag = SCB_GET_TAG(scb);
3088 saved_lun = scb->hscb->lun;
3142 if (scb != NULL) {
3149 ahd_freeze_devq(ahd, scb);
3150 ahd_qinfifo_requeue_tail(ahd, scb);
3168 if (scb != NULL) {
3175 ahd_freeze_devq(ahd, scb);
3176 ahd_qinfifo_requeue_tail(ahd, scb);
3194 if (scb != NULL) {
3201 ahd_freeze_devq(ahd, scb);
3202 ahd_qinfifo_requeue_tail(ahd, scb);
3231 if (scb != NULL && printerror != 0
3235 ahd_freeze_devq(ahd, scb);
3236 ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
3237 ahd_freeze_scb(scb);
3239 ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb),
3240 SCB_GET_CHANNEL(ahd, scb),
3241 SCB_GET_LUN(scb), SCB_LIST_NULL,
3248 ahd_done(ahd, scb);
3256 if (scb != NULL) {
3259 if ((scb->hscb->control & TAG_ENB) != 0)
3260 tag = SCB_GET_TAG(scb);
3263 ahd_print_path(ahd, scb);
3265 SCB_GET_LUN(scb), tag,
3292 struct scb *scb;
3301 scb = ahd_lookup_scb(ahd, scbid);
3315 scb = NULL;
3316 } else if (scb == NULL) {
3325 ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
3327 ahd_print_path(ahd, scb);
3338 ahd_print_path(ahd, scb);
3341 ahd_print_path(ahd, scb);
3368 if (scb == NULL) {
3375 ahd_print_path(ahd, scb);
3376 scb->flags |= SCB_ABORT;
3565 ahd_print_scb(struct scb *scb)
3570 hscb = scb->hscb;
3571 printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
3572 (void *)scb,
3585 SCB_GET_TAG(scb));
3586 ahd_dump_sglist(scb);
4188 struct scb *pending_scb;
4343 struct scb *scb)
4348 our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
4350 if ((scb->hscb->control & TARGET_SCB) != 0)
4352 ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb),
4353 SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role);
4366 struct scb *scb)
4389 } else if (scb == NULL) {
4398 if ((scb->flags & SCB_DEVICE_RESET) == 0
4399 && (scb->flags & SCB_PACKETIZED) == 0
4403 identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
4404 if ((scb->hscb->control & DISCENB) != 0)
4409 if ((scb->hscb->control & TAG_ENB) != 0) {
4411 scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
4412 ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb);
4417 if (scb->flags & SCB_DEVICE_RESET) {
4420 ahd_print_path(ahd, scb);
4430 } else if ((scb->flags & SCB_ABORT) != 0) {
4432 if ((scb->hscb->control & TAG_ENB) != 0) {
4438 ahd_print_path(ahd, scb);
4440 (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
4449 } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
4462 printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
4465 "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control,
4467 scb->flags);
4476 scb->hscb->control &= ~MK_MESSAGE;
5492 struct scb *scb;
5500 scb = ahd_lookup_scb(ahd, scb_index);
5580 } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) {
5584 tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG);
5590 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE);
5598 ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC);
5608 scb->hscb->control &= mask;
5609 ahd_set_transaction_tag(scb, /*enabled*/FALSE,
5613 ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
5614 SCB_GET_TAG(scb));
5621 ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
5622 SCB_GET_CHANNEL(ahd, scb),
5623 SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
5658 struct scb *scb;
5661 scb = ahd_lookup_scb(ahd, scb_index);
5667 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) {
5715 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
5723 if (sg != scb->sg_list
5742 sgptr = ahd_sg_virt_to_bus(ahd, scb,
5748 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
5756 if (sg != scb->sg_list
5775 sgptr = ahd_sg_virt_to_bus(ahd, scb,
5807 struct scb *scb;
5819 scb = ahd_lookup_scb(ahd, scb_index);
5830 ahd_print_path(ahd, scb);
5854 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
5866 sg = ahd_sg_bus_to_virt(ahd, scb, sgptr);
5952 struct scb *scb)
5963 if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
6075 printk("%s: scb size = 0x%x, hscb size = 0x%x\n",
6076 ahd_name(ahd), (u_int)sizeof(struct scb),
6385 /* DMA tag for our hardware scb structures */
6452 static struct scb *
6455 struct scb *scb;
6460 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
6461 if (SCB_GET_TAG(scb) == tag)
6462 return (scb);
6468 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
6469 struct scb *list_scb;
6471 list_scb = scb;
6482 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
6483 if (SCB_GET_TAG(scb) == tag)
6484 return (scb);
6609 ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx)
6613 struct scb *first_scb;
6615 scb->flags |= SCB_ON_COL_LIST;
6616 AHD_SET_SCB_COL_IDX(scb, col_idx);
6621 LIST_INSERT_AFTER(first_scb, scb, collision_links);
6623 LIST_INSERT_HEAD(free_list, scb, collision_links);
6624 TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe);
6629 ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb)
6633 struct scb *first_scb;
6636 scb->flags &= ~SCB_ON_COL_LIST;
6637 col_idx = AHD_GET_SCB_COL_IDX(ahd, scb);
6641 if (first_scb == scb) {
6642 struct scb *next_scb;
6649 next_scb = LIST_NEXT(scb, collision_links);
6651 TAILQ_INSERT_AFTER(free_tailq, scb,
6654 TAILQ_REMOVE(free_tailq, scb, links.tqe);
6656 LIST_REMOVE(scb, collision_links);
6660 * Get a free scb. If there are none, see if we can allocate a new SCB.
6662 struct scb *
6665 struct scb *scb;
6670 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
6671 if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) {
6672 ahd_rem_col_list(ahd, scb);
6676 if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) {
6683 LIST_REMOVE(scb, links.le);
6685 && (scb->col_scb != NULL)
6686 && (scb->col_scb->flags & SCB_ACTIVE) == 0) {
6687 LIST_REMOVE(scb->col_scb, links.le);
6688 ahd_add_col_list(ahd, scb->col_scb, col_idx);
6691 scb->flags |= SCB_ACTIVE;
6692 return (scb);
6699 ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
6702 scb->flags = SCB_FLAG_NONE;
6703 scb->hscb->control = 0;
6704 ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL;
6706 if (scb->col_scb == NULL) {
6712 scb, links.le);
6713 } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) {
6720 ahd_rem_col_list(ahd, scb->col_scb);
6722 scb, links.le);
6724 scb->col_scb, links.le);
6725 } else if ((scb->col_scb->flags
6727 && (scb->col_scb->hscb->control & TAG_ENB) != 0) {
6734 ahd_add_col_list(ahd, scb,
6735 AHD_GET_SCB_COL_IDX(ahd, scb->col_scb));
6744 scb, links.le);
6747 ahd_platform_scb_free(ahd, scb);
6754 struct scb *next_scb;
6907 * The first entry is embedded in the scb.
7946 ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target,
7949 int targ = SCB_GET_TARGET(ahd, scb);
7950 char chan = SCB_GET_CHANNEL(ahd, scb);
7951 int slun = SCB_GET_LUN(scb);
7963 group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
7966 && ((tag == SCB_GET_TAG(scb))
7970 && ((tag == scb->io_ctx->csio.tag_id)
7974 match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL));
7982 ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
7988 target = SCB_GET_TARGET(ahd, scb);
7989 lun = SCB_GET_LUN(scb);
7990 channel = SCB_GET_CHANNEL(ahd, scb);
7996 ahd_platform_freeze_devq(ahd, scb);
8000 ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb)
8002 struct scb *prev_scb;
8016 ahd_qinfifo_requeue(ahd, prev_scb, scb);
8022 ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb,
8023 struct scb *scb)
8028 busaddr = ahd_le32toh(scb->hscb->hscb_busaddr);
8031 prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
8035 ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
8037 scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr;
8038 ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
8062 struct scb *scb;
8076 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
8085 ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status)
8090 ostat = ahd_get_transaction_status(scb);
8092 ahd_set_transaction_status(scb, status);
8093 cstat = ahd_get_transaction_status(scb);
8095 ahd_freeze_scb(scb);
8096 ahd_done(ahd, scb);
8104 struct scb *scb;
8105 struct scb *mk_msg_scb;
8106 struct scb *prev_scb;
8156 scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]);
8157 if (scb == NULL) {
8163 if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) {
8165 * We found an scb that needs to be acted on.
8170 if ((scb->flags & SCB_ACTIVE) == 0)
8172 ahd_done_with_status(ahd, scb, status);
8180 ahd_qinfifo_requeue(ahd, prev_scb, scb);
8181 prev_scb = scb;
8185 ahd_qinfifo_requeue(ahd, prev_scb, scb);
8186 prev_scb = scb;
8228 scb = ahd_lookup_scb(ahd, scbid);
8229 if (scb == NULL) {
8236 if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
8246 printk(" %d ( ", SCB_GET_TARGET(ahd, scb));
8251 SCB_GET_TARGET(ahd, scb));
8261 * We found an scb that needs to be acted on.
8274 printk("Removing MK_MSG scb\n");
8292 printk(" 0x%x", SCB_GET_TAG(scb));
8301 && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD,
8306 * queue with a pending MK_MESSAGE scb, we
8307 * must queue the MK_MESSAGE scb.
8335 struct scb *scb;
8354 scb = ahd_lookup_scb(ahd, scbid);
8355 if (scb == NULL) {
8363 if (ahd_match_scb(ahd, scb, target, channel,
8371 if ((scb->flags & SCB_ACTIVE) == 0)
8373 ahd_done_with_status(ahd, scb, status);
8433 * scb that follows the one that we remove.
8490 struct scb *scbp;
8491 struct scb *scbp_next;
8821 ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
8835 hscb = scb->hscb;
8845 ahd_freeze_devq(ahd, scb);
8846 ahd_freeze_scb(scb);
8854 if ((scb->flags & SCB_SENSE) != 0) {
8859 scb->flags &= ~SCB_SENSE;
8860 ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
8861 ahd_done(ahd, scb);
8864 ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
8865 ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status);
8871 ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD);
8872 siu = (struct scsi_status_iu_header *)scb->sense_data;
8873 ahd_set_scsi_status(scb, siu->status);
8876 ahd_print_path(ahd, scb);
8878 SCB_GET_TAG(scb), siu->status);
8886 ahd_print_path(ahd, scb);
8915 ahd_set_transaction_status(scb,
8919 scb->flags |= SCB_PKT_SENSE;
8925 ahd_done(ahd, scb);
8939 ahd_print_path(ahd, scb);
8941 SCB_GET_TAG(scb));
8945 if (ahd_perform_autosense(scb) == 0)
8948 ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb),
8949 SCB_GET_TARGET(ahd, scb),
8950 SCB_GET_LUN(scb),
8951 SCB_GET_CHANNEL(ahd, scb),
8959 sg = scb->sg_list;
8964 ahd_update_residual(ahd, scb);
8967 ahd_print_path(ahd, scb);
8971 scb->sg_count = 0;
8972 sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb),
8973 ahd_get_sense_bufsize(ahd, scb),
8978 && SCB_GET_LUN(scb) < 8)
8979 sc->byte2 = SCB_GET_LUN(scb) << 5;
8982 sc->length = ahd_get_sense_bufsize(ahd, scb);
9002 if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) {
9009 scb->flags &=
9011 scb->flags |= SCB_AUTO_NEGOTIATE;
9014 ahd_setup_data_scb(ahd, scb);
9015 scb->flags |= SCB_SENSE;
9016 ahd_queue_scb(ahd, scb);
9024 ahd_done(ahd, scb);
9030 ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb)
9032 if (scb->hscb->shared_data.istatus.scsi_status != 0) {
9033 ahd_handle_scsi_status(ahd, scb);
9035 ahd_calc_residual(ahd, scb);
9036 ahd_done(ahd, scb);
9044 ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
9068 hscb = scb->hscb;
9089 resid = ahd_get_transfer_length(scb);
9094 ahd_print_path(ahd, scb);
9096 SCB_GET_TAG(scb));
9097 ahd_freeze_devq(ahd, scb);
9098 ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR);
9099 ahd_freeze_scb(scb);
9112 sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK);
9127 if ((scb->flags & SCB_SENSE) == 0)
9128 ahd_set_residual(scb, resid);
9130 ahd_set_sense_residual(scb, resid);
9134 ahd_print_path(ahd, scb);
9136 (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
9634 struct scb *scb;
9717 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
9720 cur_col = printk("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb),
9722 ahd_set_scbptr(ahd, SCB_GET_TAG(scb));
9732 TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) {
9733 struct scb *list_scb;
9735 list_scb = scb;
9742 LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) {
9745 printk("%d ", SCB_GET_TAG(scb));
9791 struct scb *fifo_scb;
10426 struct scb *scb;
10437 LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
10440 ccbh = &scb->io_ctx->ccb_h;