• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-WNDR4500v2-V1.0.0.60_1.0.38/src/linux/linux-2.6/drivers/scsi/

Lines Matching refs:dcb

229 	struct DeviceCtlBlk *dcb;
269 struct list_head list; /* next/prev ptrs for the dcb list */
301 struct list_head dcb_list; /* head of going dcb list */
377 static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
381 static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
389 static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
391 static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
394 struct DeviceCtlBlk *dcb);
716 /* find supplied dcb and then select the next one */
735 static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
738 dcb->tag_mask &= ~(1 << srb->tag_number); /* free tag mask */
777 static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
781 srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
782 list_add(&srb->list, &dcb->srb_waiting_list);
786 static void srb_waiting_append(struct DeviceCtlBlk *dcb,
790 srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
791 list_add_tail(&srb->list, &dcb->srb_waiting_list);
795 static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
798 srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
799 list_add_tail(&srb->list, &dcb->srb_going_list);
803 static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
808 srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
810 list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
818 static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
824 srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
826 list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
834 static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
839 srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
840 list_move(&srb->list, &dcb->srb_waiting_list);
844 static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
849 srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
850 list_move(&srb->list, &dcb->srb_going_list);
876 struct DeviceCtlBlk *dcb;
891 * Find the starting dcb. Need to find it again in the list
894 list_for_each_entry(dcb, dcb_list_head, list)
895 if (dcb == acb->dcb_run_robin) {
896 start = dcb;
907 * Loop over the dcb, but we start somewhere (potentially) in
920 /* move to next dcb */
960 struct DeviceCtlBlk *dcb = srb->dcb;
962 if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
965 srb_waiting_append(dcb, srb);
970 if (!start_scsi(acb, dcb, srb))
971 srb_going_append(dcb, srb);
973 srb_waiting_insert(dcb, srb);
979 static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
984 cmd->pid, dcb->target_id, dcb->target_lun);
986 srb->dcb = dcb;
1014 srb->sg_count = pci_map_sg(dcb->acb->dev, sl, cmd->use_sg,
1040 if (dcb->sync_period & WIDE_SYNC &&
1046 srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
1057 pci_map_single(dcb->acb->dev, cmd->request_buffer,
1061 if (dcb->sync_period & WIDE_SYNC && srb->total_xfer_length % 2)
1097 struct DeviceCtlBlk *dcb;
1122 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1123 if (!dcb) {
1145 build_srb(cmd, dcb, srb);
1147 if (!list_empty(&dcb->srb_waiting_list)) {
1149 srb_waiting_append(dcb, srb);
1203 struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
1208 if (!dcb)
1209 dcb = acb->active_dcb;
1210 if (!srb && dcb)
1211 srb = dcb->active_srb;
1284 struct DeviceCtlBlk *dcb;
1288 list_for_each_entry(dcb, &acb->dcb_list, list) {
1291 dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
1292 dcb->sync_period = 0;
1293 dcb->sync_offset = 0;
1295 dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
1296 period_index = eeprom->target[dcb->target_id].period & 0x07;
1297 dcb->min_nego_period = clock_period[period_index];
1298 if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
1300 dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
1380 struct DeviceCtlBlk *dcb;
1385 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1386 if (!dcb) {
1391 srb = find_cmd(cmd, &dcb->srb_waiting_list);
1393 srb_waiting_remove(dcb, srb);
1396 free_tag(dcb, srb);
1402 srb = find_cmd(cmd, &dcb->srb_going_list);
1413 static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1424 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
1425 dcb->sync_offset = 0;
1426 dcb->min_nego_period = 200 >> 2;
1427 } else if (dcb->sync_offset == 0)
1428 dcb->sync_offset = SYNC_NEGO_OFFSET;
1433 *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
1434 *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
1441 static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1444 u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
1465 static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1472 srb->cmd->pid, dcb->target_id, dcb->target_lun, srb);
1518 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
1519 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
1520 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
1523 identify_message = dcb->identify_msg;
1532 && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
1533 && !(dcb->sync_mode & WIDE_NEGO_DONE))
1534 || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
1535 && !(dcb->sync_mode & SYNC_NEGO_DONE)))
1536 && (dcb->target_lun == 0)) {
1542 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1543 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1544 build_wdtr(acb, dcb, srb);
1548 if (dcb->sync_mode & SYNC_NEGO_ENABLE
1549 && dcb->inquiry7 & SCSI_INQ_SYNC) {
1550 build_sdtr(acb, dcb, srb);
1553 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1554 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1555 build_wdtr(acb, dcb, srb);
1566 if ((dcb->sync_mode & EN_TAG_QUEUEING)
1571 while (tag_mask & dcb->tag_mask
1572 && tag_number <= dcb->max_command) {
1576 if (tag_number >= dcb->max_command) {
1589 dcb->tag_mask |= tag_mask;
1602 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1623 srb->cmd->pid, dcb->target_id, dcb->target_lun);
1625 free_tag(dcb, srb);
1635 dcb->active_srb = srb;
1636 acb->active_dcb = dcb;
1675 struct DeviceCtlBlk *dcb;
1716 dcb = acb->active_dcb;
1717 if (!dcb) {
1723 srb = dcb->active_srb;
1724 if (dcb->flag & ABORT_DEV_) {
1861 struct DeviceCtlBlk *dcb;
1876 dcb = acb->active_dcb;
1878 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1942 pci_dma_sync_single_for_device(srb->dcb->
2004 struct DeviceCtlBlk *dcb = srb->dcb;
2050 if (dcb->sync_period & WIDE_SYNC)
2057 (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2080 if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
2108 (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
2190 << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
2197 (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2214 (srb->dcb->sync_period & WIDE_SYNC) ?
2217 if (srb->dcb->sync_period & WIDE_SYNC)
2262 if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
2323 struct DeviceCtlBlk *dcb = srb->dcb;
2346 dump_register_info(acb, dcb, srb);
2406 if (srb->dcb->sync_period & WIDE_SYNC)
2442 if (srb->dcb->sync_period & WIDE_SYNC) {
2469 if (dcb->sync_period & WIDE_SYNC) {
2552 srb->dcb->target_id, srb->dcb->target_lun);
2557 struct DeviceCtlBlk *dcb, u8 tag)
2564 if (!(dcb->tag_mask & (1 << tag)))
2567 dcb->tag_mask, tag);
2569 if (list_empty(&dcb->srb_going_list))
2571 list_for_each_entry(i, &dcb->srb_going_list, list) {
2581 srb->cmd->pid, srb->dcb->target_id, srb->dcb->target_lun);
2582 if (dcb->flag & ABORT_DEV_) {
2590 memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
2591 srb->state |= dcb->active_srb->state;
2593 dcb->active_srb = srb;
2600 dcb->active_srb = srb;
2610 struct DeviceCtlBlk *dcb)
2612 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
2613 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
2614 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
2615 set_xfer_rate(acb, dcb);
2622 struct DeviceCtlBlk *dcb = srb->dcb;
2624 dcb->target_id, dcb->target_lun);
2626 dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
2627 dcb->sync_mode |= SYNC_NEGO_DONE;
2628 /*dcb->sync_period &= 0; */
2629 dcb->sync_offset = 0;
2630 dcb->min_nego_period = 200 >> 2; /* 200ns <=> 5 MHz */
2632 reprogram_regs(acb, dcb);
2633 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2634 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2635 build_wdtr(acb, dcb, srb);
2645 struct DeviceCtlBlk *dcb = srb->dcb;
2650 dcb->target_id, srb->msgin_buf[3] << 2,
2657 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
2658 dcb->sync_offset = 0;
2659 else if (dcb->sync_offset == 0)
2660 dcb->sync_offset = srb->msgin_buf[4];
2661 if (srb->msgin_buf[4] > dcb->sync_offset)
2662 srb->msgin_buf[4] = dcb->sync_offset;
2664 dcb->sync_offset = srb->msgin_buf[4];
2667 || dcb->min_nego_period >
2675 dcb->sync_period &= 0xf0;
2676 dcb->sync_period |= ALT_SYNC | bval;
2677 dcb->min_nego_period = srb->msgin_buf[3];
2679 if (dcb->sync_period & WIDE_SYNC)
2686 dcb->target_id, (fact == 500) ? "Wide16" : "",
2687 dcb->min_nego_period << 2, dcb->sync_offset,
2688 (fact / dcb->min_nego_period),
2689 ((fact % dcb->min_nego_period) * 10 +
2690 dcb->min_nego_period / 2) / dcb->min_nego_period);
2700 dcb->sync_mode |= SYNC_NEGO_DONE;
2702 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2703 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2704 build_wdtr(acb, dcb, srb);
2710 dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
2712 reprogram_regs(acb, dcb);
2719 struct DeviceCtlBlk *dcb = srb->dcb;
2720 dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
2722 dcb->sync_period &= ~WIDE_SYNC;
2723 dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
2724 dcb->sync_mode |= WIDE_NEGO_DONE;
2726 reprogram_regs(acb, dcb);
2727 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2728 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2729 build_sdtr(acb, dcb, srb);
2737 struct DeviceCtlBlk *dcb = srb->dcb;
2738 u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
2740 dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
2748 dcb->target_id);
2755 dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
2757 dcb->sync_period |= WIDE_SYNC;
2759 dcb->sync_period &= ~WIDE_SYNC;
2761 /*dcb->sync_mode &= ~(WIDE_NEGO_ENABLE+WIDE_NEGO_DONE); */
2764 (8 << srb->msgin_buf[3]), dcb->target_id);
2765 reprogram_regs(acb, dcb);
2766 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2767 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2768 build_sdtr(acb, dcb, srb);
2790 struct DeviceCtlBlk *dcb = acb->active_dcb;
2805 msgin_qtag(acb, dcb,
2869 srb->cmd->pid, dcb->target_id,
2870 dcb->target_lun);
2871 dcb->flag |= ABORT_DEV_;
2880 srb->msgout_buf[0] = dcb->identify_msg;
2926 static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
2931 if (dcb->identify_msg & 0x07)
2935 current_sync_offset = dcb->sync_offset;
2940 if (i->target_id == dcb->target_id) {
2941 i->sync_period = dcb->sync_period;
2942 i->sync_offset = dcb->sync_offset;
2943 i->sync_mode = dcb->sync_mode;
2944 i->min_nego_period = dcb->min_nego_period;
2951 struct DeviceCtlBlk *dcb = acb->active_dcb;
2954 if (!dcb) {
2965 srb = dcb->active_srb;
2975 dcb->target_id, dcb->target_lun);
2979 dcb->flag &= ~ABORT_DEV_;
3005 dcb->target_id, dcb->target_lun);
3012 free_tag(dcb, srb);
3013 srb_going_to_waiting_move(dcb, srb);
3036 free_tag(dcb, srb);
3037 dcb->active_srb = NULL;
3039 srb_done(acb, dcb, srb);
3047 struct DeviceCtlBlk *dcb = acb->active_dcb;
3058 if (dcb) { /* Arbitration lost but Reselection win */
3059 srb = dcb->active_srb;
3070 srb->cmd->pid, dcb->target_id,
3071 dcb->target_lun, rsel_tar_lun_id,
3077 free_tag(dcb, srb);
3078 srb_going_to_waiting_move(dcb, srb);
3090 dcb = find_dcb(acb, id, lun);
3091 if (!dcb) {
3097 acb->active_dcb = dcb;
3099 if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
3102 dcb->target_id, dcb->target_lun);
3104 if (dcb->sync_mode & EN_TAG_QUEUEING /*&& !arblostflag */) {
3106 dcb->active_srb = srb;
3109 srb = dcb->active_srb;
3116 dcb->target_id, dcb->target_lun);
3119 dcb->active_srb = srb;
3122 if (dcb->flag & ABORT_DEV_) {
3133 dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
3135 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id); /* target ID */
3136 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset); /* offset */
3137 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period); /* sync period, wide */
3154 static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
3159 && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
3160 /*(dcb->dev_mode & NTC_DO_DISCONNECT) */
3161 /* ((dcb->dev_type == TYPE_DISK)
3162 || (dcb->dev_type == TYPE_MOD)) && */
3164 if (dcb->max_command == 1)
3165 dcb->max_command =
3166 dcb->acb->tag_max_num;
3167 dcb->sync_mode |= EN_TAG_QUEUEING;
3168 /*dcb->tag_mask = 0; */
3170 dcb->max_command = 1;
3175 static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3179 dcb->dev_type = bval1;
3181 disc_tagq_set(dcb, ptr);
3236 static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3264 cmd->cmnd[0], dcb->target_id,
3265 dcb->target_lun, status, acb->scan_devices);
3270 cmd->cmnd[0], dcb->target_id,
3271 dcb->target_lun, status, acb->scan_devices);
3276 cmd->cmnd[0], dcb->target_id,
3277 dcb->target_lun, status, acb->scan_devices);
3282 cmd->cmnd[0], dcb->target_id,
3283 dcb->target_lun, status, acb->scan_devices);
3288 cmd->cmnd[0], dcb->target_id,
3289 dcb->target_lun, status, acb->scan_devices);
3331 request_sense(acb, dcb, srb);
3334 tempcnt = (u8)list_size(&dcb->srb_going_list);
3336 dcb->target_id, dcb->target_lun, tempcnt);
3339 dcb->max_command = tempcnt;
3340 free_tag(dcb, srb);
3341 srb_going_to_waiting_move(dcb, srb);
3409 dcb->inquiry7 = ptr->Flags;
3416 if (!dcb->init_tcq_flag) {
3417 add_dev(acb, dcb, ptr);
3418 dcb->init_tcq_flag = 1;
3441 srb_going_remove(dcb, srb);
3461 struct DeviceCtlBlk *dcb;
3464 list_for_each_entry(dcb, &acb->dcb_list, list) {
3469 list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
3478 srb_going_remove(dcb, srb);
3479 free_tag(dcb, srb);
3490 if (!list_empty(&dcb->srb_going_list))
3493 dcb->target_id, dcb->target_lun);
3494 if (dcb->tag_mask)
3497 dcb->target_id, dcb->target_lun,
3498 dcb->tag_mask);
3501 list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
3508 srb_waiting_remove(dcb, srb);
3519 if (!list_empty(&dcb->srb_waiting_list))
3521 list_size(&dcb->srb_waiting_list), dcb->target_id,
3522 dcb->target_lun);
3523 dcb->flag &= ~ABORT_DEV_;
3611 static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3644 if (start_scsi(acb, dcb, srb)) { /* Should only happen, if sb. else grabs the bus */
3647 srb->cmd->pid, dcb->target_id, dcb->target_lun);
3648 srb_going_to_waiting_move(dcb, srb);
3672 struct DeviceCtlBlk *dcb;
3674 dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
3676 if (!dcb)
3678 dcb->acb = NULL;
3679 INIT_LIST_HEAD(&dcb->srb_going_list);
3680 INIT_LIST_HEAD(&dcb->srb_waiting_list);
3681 dcb->active_srb = NULL;
3682 dcb->tag_mask = 0;
3683 dcb->max_command = 1;
3684 dcb->target_id = target;
3685 dcb->target_lun = lun;
3687 dcb->identify_msg =
3688 IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
3690 dcb->identify_msg = IDENTIFY(0, lun);
3692 dcb->dev_mode = eeprom->target[target].cfg0;
3693 dcb->inquiry7 = 0;
3694 dcb->sync_mode = 0;
3695 dcb->min_nego_period = clock_period[period_index];
3696 dcb->sync_period = 0;
3697 dcb->sync_offset = 0;
3698 dcb->flag = 0;
3701 if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
3703 dcb->sync_mode |= WIDE_NEGO_ENABLE;
3706 if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
3708 dcb->sync_mode |= SYNC_NEGO_ENABLE;
3710 if (dcb->target_lun != 0) {
3714 if (p->target_id == dcb->target_id)
3718 dcb->target_id, dcb->target_lun,
3720 dcb->sync_mode = p->sync_mode;
3721 dcb->sync_period = p->sync_period;
3722 dcb->min_nego_period = p->min_nego_period;
3723 dcb->sync_offset = p->sync_offset;
3724 dcb->inquiry7 = p->inquiry7;
3726 return dcb;
3734 * @dcb: A newly created and intialised device instance to add.
3737 struct DeviceCtlBlk *dcb)
3740 dcb->acb = acb;
3744 acb->dcb_run_robin = dcb;
3747 list_add_tail(&dcb->list, &acb->dcb_list);
3750 acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
3751 acb->children[dcb->target_id][dcb->target_lun] = dcb;
3762 * @dcb: A device that has previously been added to the adapter.
3765 struct DeviceCtlBlk *dcb)
3770 dcb->target_id, dcb->target_lun);
3773 if (acb->active_dcb == dcb)
3775 if (acb->dcb_run_robin == dcb)
3776 acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
3780 if (dcb == i) {
3786 acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
3787 acb->children[dcb->target_id][dcb->target_lun] = NULL;
3788 dcb->acb = NULL;
3797 * @dcb: A device that has previously been added to the adapter.
3800 struct DeviceCtlBlk *dcb)
3802 if (list_size(&dcb->srb_going_list) > 1) {
3805 dcb->target_id, dcb->target_lun,
3806 list_size(&dcb->srb_going_list));
3809 adapter_remove_device(acb, dcb);
3810 kfree(dcb);
3822 struct DeviceCtlBlk *dcb;
3827 list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
3828 adapter_remove_and_free_device(acb, dcb);
3842 struct DeviceCtlBlk *dcb;
3844 dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
3845 if (!dcb)
3847 adapter_add_device(acb, dcb);
3862 struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
3863 if (dcb)
3864 adapter_remove_and_free_device(acb, dcb);
4484 "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
4568 struct DeviceCtlBlk *dcb;
4613 list_for_each_entry(dcb, &acb->dcb_list, list) {
4615 SPRINTF("%02i %02i %02i ", dev, dcb->target_id,
4616 dcb->target_lun);
4617 YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
4618 YESNO(dcb->sync_offset);
4619 YESNO(dcb->sync_period & WIDE_SYNC);
4620 YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
4621 YESNO(dcb->dev_mode & NTC_DO_SEND_START);
4622 YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
4623 nego_period = clock_period[dcb->sync_period & 0x07] << 2;
4624 if (dcb->sync_offset)
4627 SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2));
4629 if (dcb->sync_offset & 0x0f) {
4634 (dcb->sync_offset & 0x0f));
4639 SPRINTF(" %02i\n", dcb->max_command);
4648 list_for_each_entry(dcb, &acb->dcb_list, list) {
4650 if (!list_empty(&dcb->srb_waiting_list))
4652 dcb->target_id, dcb->target_lun,
4653 list_size(&dcb->srb_waiting_list));
4654 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4656 if (!list_empty(&dcb->srb_going_list))
4658 dcb->target_id, dcb->target_lun,
4659 list_size(&dcb->srb_going_list));
4660 list_for_each_entry(srb, &dcb->srb_going_list, list)
4662 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
4668 list_for_each_entry(dcb, &acb->dcb_list, list) {
4669 SPRINTF("%p -> ", dcb);