Lines Matching refs:scmd

60 static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
80 * Schedule SCSI EH without scmd.
149 struct scsi_cmnd *scmd =
151 struct scsi_device *sdev = scmd->device;
158 scmd_printk(KERN_INFO, scmd,
164 scmd_printk(KERN_INFO, scmd,
166 rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
169 scmd_printk(KERN_INFO, scmd,
175 set_host_byte(scmd, DID_TIME_OUT);
178 scmd_printk(KERN_INFO, scmd,
185 list_del_init(&scmd->eh_entry);
198 if (!scsi_noretry_cmd(scmd) &&
199 scsi_cmd_retry_allowed(scmd) &&
200 scsi_eh_should_retry_cmd(scmd)) {
202 scmd_printk(KERN_WARNING, scmd,
204 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
207 scmd_printk(KERN_WARNING, scmd,
209 scsi_finish_command(scmd);
215 list_del_init(&scmd->eh_entry);
218 scsi_eh_scmd_add(scmd);
223 * @scmd: scmd to abort.
228 scsi_abort_command(struct scsi_cmnd *scmd)
230 struct scsi_device *sdev = scmd->device;
239 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
244 scmd_printk(KERN_INFO, scmd,
246 BUG_ON(delayed_work_pending(&scmd->abort_work));
253 BUG_ON(!list_empty(&scmd->eh_entry));
254 list_add_tail(&scmd->eh_entry, &shost->eh_abort_list);
257 scmd->eh_eflags |= SCSI_EH_ABORT_SCHEDULED;
259 scmd_printk(KERN_INFO, scmd, "abort scheduled\n"));
260 queue_delayed_work(shost->tmf_work_q, &scmd->abort_work, HZ / 100);
266 * @scmd: scmd to run eh on.
272 static void scsi_eh_reset(struct scsi_cmnd *scmd)
274 if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
275 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
277 sdrv->eh_reset(scmd);
283 struct scsi_cmnd *scmd = container_of(head, typeof(*scmd), rcu);
284 struct Scsi_Host *shost = scmd->device->host;
296 * @scmd: scmd to run eh on.
298 void scsi_eh_scmd_add(struct scsi_cmnd *scmd)
300 struct Scsi_Host *shost = scmd->device->host;
305 WARN_ON_ONCE(!test_bit(SCMD_STATE_INFLIGHT, &scmd->state));
315 scsi_eh_reset(scmd);
316 list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q);
322 call_rcu_hurry(&scmd->rcu, scsi_eh_inc_host_failed);
337 struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
338 struct Scsi_Host *host = scmd->device->host;
340 trace_scsi_dispatch_cmd_timeout(scmd);
341 scsi_log_completion(scmd, TIMEOUT_ERROR);
343 atomic_inc(&scmd->device->iotmo_cnt);
348 switch (host->hostt->eh_timed_out(scmd)) {
360 * *scmd.
362 if (test_and_set_bit(SCMD_STATE_COMPLETE, &scmd->state))
364 atomic_inc(&scmd->device->iodone_cnt);
365 if (scsi_abort_command(scmd) != SUCCESS) {
366 set_host_byte(scmd, DID_TIME_OUT);
367 scsi_eh_scmd_add(scmd);
405 struct scsi_cmnd *scmd;
413 list_for_each_entry(scmd, work_q, eh_entry) {
414 if (scmd->device == sdev) {
416 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED)
530 * @scmd: Cmd to have sense checked.
539 enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
541 struct request *req = scsi_cmd_to_rq(scmd);
542 struct scsi_device *sdev = scmd->device;
545 if (! scsi_command_normalize_sense(scmd, &sshdr))
562 if (scmd->cmnd[0] == TEST_UNIT_READY &&
563 scmd->submitter != SUBMITTED_BY_SCSI_ERROR_HANDLER)
577 if (scmd->sense_buffer[2] & 0xe0)
586 (scmd->sense_buffer[8] == 0x4) &&
587 (scmd->sense_buffer[11] & 0xe0))
611 set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT);
632 if (scmd->device->expecting_cc_ua) {
640 scmd->device->expecting_cc_ua = 0;
649 if (scmd->device->sdev_target->expecting_lun_change &&
662 if (scmd->device->allow_restart &&
675 set_scsi_ml_byte(scmd, SCSIML_STAT_NOSPC);
683 set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
690 set_scsi_ml_byte(scmd, SCSIML_STAT_MED_ERROR);
696 if (scmd->device->retry_hwerror)
699 set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
709 set_scsi_ml_byte(scmd, SCSIML_STAT_TGT_FAILURE);
715 set_scsi_ml_byte(scmd, SCSIML_STAT_DL_TIMEOUT);
782 * @scmd: SCSI cmd to examine.
790 static enum scsi_disposition scsi_eh_completed_normally(struct scsi_cmnd *scmd)
796 if (host_byte(scmd->result) == DID_RESET) {
803 return scsi_check_sense(scmd);
805 if (host_byte(scmd->result) != DID_OK)
812 switch (get_status_byte(scmd)) {
814 scsi_handle_queue_ramp_up(scmd->device);
815 if (scmd->sense_buffer && SCSI_SENSE_VALID(scmd))
822 scsi_check_sense(scmd);
827 return scsi_check_sense(scmd);
836 if (scmd->cmnd[0] == TEST_UNIT_READY)
843 scsi_handle_queue_full(scmd->device);
855 * @scmd: Cmd that is done.
857 void scsi_eh_done(struct scsi_cmnd *scmd)
861 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
862 "%s result: %x\n", __func__, scmd->result));
864 eh_action = scmd->device->host->eh_action;
871 * @scmd: SCSI cmd to send host reset.
873 static enum scsi_disposition scsi_try_host_reset(struct scsi_cmnd *scmd)
877 struct Scsi_Host *host = scmd->device->host;
886 rtn = hostt->eh_host_reset_handler(scmd);
892 scsi_report_bus_reset(host, scmd_channel(scmd));
901 * @scmd: SCSI cmd to send bus reset.
903 static enum scsi_disposition scsi_try_bus_reset(struct scsi_cmnd *scmd)
907 struct Scsi_Host *host = scmd->device->host;
910 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
916 rtn = hostt->eh_bus_reset_handler(scmd);
922 scsi_report_bus_reset(host, scmd_channel(scmd));
937 * @scmd: SCSI cmd used to send a target reset
945 static enum scsi_disposition scsi_try_target_reset(struct scsi_cmnd *scmd)
949 struct Scsi_Host *host = scmd->device->host;
955 rtn = hostt->eh_target_reset_handler(scmd);
958 __starget_for_each_device(scsi_target(scmd->device), NULL,
968 * @scmd: SCSI cmd used to send BDR
976 static enum scsi_disposition scsi_try_bus_device_reset(struct scsi_cmnd *scmd)
979 const struct scsi_host_template *hostt = scmd->device->host->hostt;
984 rtn = hostt->eh_device_reset_handler(scmd);
986 __scsi_report_device_reset(scmd->device, NULL);
993 * @scmd: SCSI cmd used to send a target reset
1008 scsi_try_to_abort_cmd(const struct scsi_host_template *hostt, struct scsi_cmnd *scmd)
1013 return hostt->eh_abort_handler(scmd);
1016 static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd)
1018 if (scsi_try_to_abort_cmd(scmd->device->host->hostt, scmd) != SUCCESS)
1019 if (scsi_try_bus_device_reset(scmd) != SUCCESS)
1020 if (scsi_try_target_reset(scmd) != SUCCESS)
1021 if (scsi_try_bus_reset(scmd) != SUCCESS)
1022 scsi_try_host_reset(scmd);
1027 * @scmd: SCSI command structure to hijack
1037 * and cmnd buffers to read @sense_bytes into @scmd->sense_buffer.
1039 void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
1042 struct scsi_device *sdev = scmd->device;
1051 ses->cmd_len = scmd->cmd_len;
1052 ses->data_direction = scmd->sc_data_direction;
1053 ses->sdb = scmd->sdb;
1054 ses->result = scmd->result;
1055 ses->resid_len = scmd->resid_len;
1056 ses->underflow = scmd->underflow;
1057 ses->prot_op = scmd->prot_op;
1058 ses->eh_eflags = scmd->eh_eflags;
1060 scmd->prot_op = SCSI_PROT_NORMAL;
1061 scmd->eh_eflags = 0;
1062 memcpy(ses->cmnd, scmd->cmnd, sizeof(ses->cmnd));
1063 memset(scmd->cmnd, 0, sizeof(scmd->cmnd));
1064 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
1065 scmd->result = 0;
1066 scmd->resid_len = 0;
1069 scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
1071 sg_init_one(&ses->sense_sgl, scmd->sense_buffer,
1072 scmd->sdb.length);
1073 scmd->sdb.table.sgl = &ses->sense_sgl;
1074 scmd->sc_data_direction = DMA_FROM_DEVICE;
1075 scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
1076 scmd->cmnd[0] = REQUEST_SENSE;
1077 scmd->cmnd[4] = scmd->sdb.length;
1078 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
1080 scmd->sc_data_direction = DMA_NONE;
1082 BUG_ON(cmnd_size > sizeof(scmd->cmnd));
1083 memcpy(scmd->cmnd, cmnd, cmnd_size);
1084 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
1088 scmd->underflow = 0;
1091 scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) |
1098 memset(scmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1104 * @scmd: SCSI command structure to restore
1109 void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
1114 scmd->cmd_len = ses->cmd_len;
1115 memcpy(scmd->cmnd, ses->cmnd, sizeof(ses->cmnd));
1116 scmd->sc_data_direction = ses->data_direction;
1117 scmd->sdb = ses->sdb;
1118 scmd->result = ses->result;
1119 scmd->resid_len = ses->resid_len;
1120 scmd->underflow = ses->underflow;
1121 scmd->prot_op = ses->prot_op;
1122 scmd->eh_eflags = ses->eh_eflags;
1128 * @scmd: SCSI command structure to hijack
1140 static enum scsi_disposition scsi_send_eh_cmnd(struct scsi_cmnd *scmd,
1143 struct scsi_device *sdev = scmd->device;
1152 scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes);
1155 scsi_log_send(scmd);
1156 scmd->submitter = SUBMITTED_BY_SCSI_ERROR_HANDLER;
1157 scmd->flags |= SCMD_LAST;
1176 rtn = shost->hostt->queuecommand(shost, scmd);
1183 scsi_eh_restore_cmnd(scmd, &ses);
1199 scsi_log_completion(scmd, rtn);
1201 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1215 rtn = scsi_eh_completed_normally(scmd);
1216 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1232 scsi_abort_eh_cmnd(scmd);
1236 scsi_eh_restore_cmnd(scmd, &ses);
1243 * @scmd: SCSI cmd for request sense.
1250 static enum scsi_disposition scsi_request_sense(struct scsi_cmnd *scmd)
1252 return scsi_send_eh_cmnd(scmd, NULL, 0, scmd->device->eh_timeout, ~0);
1256 scsi_eh_action(struct scsi_cmnd *scmd, enum scsi_disposition rtn)
1258 if (!blk_rq_is_passthrough(scsi_cmd_to_rq(scmd))) {
1259 struct scsi_driver *sdrv = scsi_cmd_to_driver(scmd);
1261 rtn = sdrv->eh_action(scmd, rtn);
1268 * @scmd: Original SCSI cmd that eh has finished.
1278 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
1280 list_move_tail(&scmd->eh_entry, done_q);
1307 struct scsi_cmnd *scmd, *next;
1315 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1316 if ((scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) ||
1317 SCSI_SENSE_VALID(scmd))
1320 shost = scmd->device->host;
1323 scmd_printk(KERN_INFO, scmd,
1328 if (!scsi_status_is_check_condition(scmd->result))
1337 SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd,
1340 rtn = scsi_request_sense(scmd);
1344 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1345 "sense requested, result %x\n", scmd->result));
1346 SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense(scmd));
1348 rtn = scsi_decide_disposition(scmd);
1363 if (scmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
1364 scmd->retries = scmd->allowed = 1;
1366 scmd->retries = scmd->allowed;
1370 scsi_eh_finish_cmd(scmd, done_q);
1379 * @scmd: &scsi_cmnd to send TUR
1384 static int scsi_eh_tur(struct scsi_cmnd *scmd)
1391 rtn = scsi_send_eh_cmnd(scmd, tur_command, 6,
1392 scmd->device->eh_timeout, 0);
1394 SCSI_LOG_ERROR_RECOVERY(3, scmd_printk(KERN_INFO, scmd,
1426 struct scsi_cmnd *scmd, *next;
1431 scmd = list_entry(cmd_list->next, struct scsi_cmnd, eh_entry);
1432 sdev = scmd->device;
1446 finish_cmds = !scsi_device_online(scmd->device) ||
1447 (try_stu && !scsi_eh_try_stu(scmd) &&
1448 !scsi_eh_tur(scmd)) ||
1449 !scsi_eh_tur(scmd);
1451 list_for_each_entry_safe(scmd, next, cmd_list, eh_entry)
1452 if (scmd->device == sdev) {
1455 scsi_eh_action(scmd, SUCCESS) == SUCCESS))
1456 scsi_eh_finish_cmd(scmd, done_q);
1458 list_move_tail(&scmd->eh_entry, work_q);
1466 * @scmd: &scsi_cmnd to send START_UNIT
1471 static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
1475 if (scmd->device->allow_restart) {
1480 rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
1481 scmd->device->eh_timeout, 0);
1504 struct scsi_cmnd *scmd, *stu_scmd, *next;
1517 list_for_each_entry(scmd, work_q, eh_entry)
1518 if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) &&
1519 scsi_check_sense(scmd) == FAILED ) {
1520 stu_scmd = scmd;
1535 list_for_each_entry_safe(scmd, next,
1537 if (scmd->device == sdev &&
1538 scsi_eh_action(scmd, SUCCESS) == SUCCESS)
1539 scsi_eh_finish_cmd(scmd, done_q);
1570 struct scsi_cmnd *scmd, *bdr_scmd, *next;
1584 list_for_each_entry(scmd, work_q, eh_entry)
1585 if (scmd->device == sdev) {
1586 bdr_scmd = scmd;
1601 list_for_each_entry_safe(scmd, next,
1603 if (scmd->device == sdev &&
1604 scsi_eh_action(scmd, rtn) != FAILED)
1605 scsi_eh_finish_cmd(scmd,
1638 struct scsi_cmnd *next, *scmd;
1653 scmd = list_entry(tmp_list.next, struct scsi_cmnd, eh_entry);
1654 id = scmd_id(scmd);
1660 rtn = scsi_try_target_reset(scmd);
1667 list_for_each_entry_safe(scmd, next, &tmp_list, eh_entry) {
1668 if (scmd_id(scmd) != id)
1672 list_move_tail(&scmd->eh_entry, &check_list);
1674 scsi_eh_finish_cmd(scmd, done_q);
1677 list_move(&scmd->eh_entry, work_q);
1694 struct scsi_cmnd *scmd, *chan_scmd, *next;
1717 list_for_each_entry(scmd, work_q, eh_entry) {
1718 if (channel == scmd_channel(scmd)) {
1719 chan_scmd = scmd;
1736 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1737 if (channel == scmd_channel(scmd)) {
1739 scsi_eh_finish_cmd(scmd,
1742 list_move_tail(&scmd->eh_entry,
1766 struct scsi_cmnd *scmd, *next;
1771 scmd = list_entry(work_q->next,
1779 rtn = scsi_try_host_reset(scmd);
1783 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1784 scsi_eh_finish_cmd(scmd, done_q);
1804 struct scsi_cmnd *scmd, *next;
1807 list_for_each_entry_safe(scmd, next, work_q, eh_entry) {
1808 sdev_printk(KERN_INFO, scmd->device, "Device offlined - "
1810 sdev = scmd->device;
1816 scsi_eh_finish_cmd(scmd, done_q);
1823 * @scmd: SCSI cmd to examine.
1825 bool scsi_noretry_cmd(struct scsi_cmnd *scmd)
1827 struct request *req = scsi_cmd_to_rq(scmd);
1829 switch (host_byte(scmd->result)) {
1839 if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
1847 if (scsi_ml_byte(scmd->result) == SCSIML_STAT_DL_TIMEOUT)
1850 if (!scsi_status_is_check_condition(scmd->result))
1866 * @scmd: SCSI cmd to examine.
1878 enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *scmd)
1886 if (!scsi_device_online(scmd->device)) {
1887 SCSI_LOG_ERROR_RECOVERY(5, scmd_printk(KERN_INFO, scmd,
1896 switch (host_byte(scmd->result)) {
1903 scmd->result &= 0xff00ffff;
1911 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
1912 set_host_byte(scmd, DID_TIME_OUT);
1958 if (get_status_byte(scmd) == SAM_STAT_RESERVATION_CONFLICT)
1974 if ((scmd->cmnd[0] == TEST_UNIT_READY ||
1975 scmd->cmnd[0] == INQUIRY)) {
1989 switch (get_status_byte(scmd)) {
1991 scsi_handle_queue_full(scmd->device);
2006 if (scmd->cmnd[0] == REPORT_LUNS)
2007 scmd->device->sdev_target->expecting_lun_change = 0;
2008 scsi_handle_queue_ramp_up(scmd->device);
2009 if (scmd->sense_buffer && SCSI_SENSE_VALID(scmd))
2016 scsi_check_sense(scmd);
2023 rtn = scsi_check_sense(scmd);
2041 sdev_printk(KERN_INFO, scmd->device,
2043 set_scsi_ml_byte(scmd, SCSIML_STAT_RESV_CONFLICT);
2054 if (scsi_cmd_retry_allowed(scmd) && !scsi_noretry_cmd(scmd)) {
2084 struct scsi_cmnd *scmd;
2090 scmd = blk_mq_rq_to_pdu(req);
2092 scmd->cmnd[0] = ALLOW_MEDIUM_REMOVAL;
2093 scmd->cmnd[1] = 0;
2094 scmd->cmnd[2] = 0;
2095 scmd->cmnd[3] = 0;
2096 scmd->cmnd[4] = SCSI_REMOVAL_PREVENT;
2097 scmd->cmnd[5] = 0;
2098 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
2099 scmd->allowed = 5;
2198 struct scsi_cmnd *scmd, *next;
2200 list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
2201 struct scsi_device *sdev = scmd->device;
2203 list_del_init(&scmd->eh_entry);
2204 if (scsi_device_online(sdev) && !scsi_noretry_cmd(scmd) &&
2205 scsi_cmd_retry_allowed(scmd) &&
2206 scsi_eh_should_retry_cmd(scmd)) {
2208 scmd_printk(KERN_INFO, scmd,
2211 scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
2216 * scsi_eh_get_sense), scmd->result is already
2219 if (!scmd->result &&
2220 !(scmd->flags & SCMD_FORCE_EH_SUCCESS))
2221 scmd->result |= (DID_TIME_OUT << 16);
2223 scmd_printk(KERN_INFO, scmd,
2226 scsi_finish_command(scmd);
2440 struct scsi_cmnd *scmd;
2464 scmd = (struct scsi_cmnd *)(rq + 1);
2465 scsi_init_command(dev, scmd);
2467 scmd->submitter = SUBMITTED_BY_SCSI_RESET_IOCTL;
2468 scmd->flags |= SCMD_LAST;
2469 memset(&scmd->sdb, 0, sizeof(scmd->sdb));
2471 scmd->cmd_len = 0;
2473 scmd->sc_data_direction = DMA_BIDIRECTIONAL;
2484 rtn = scsi_try_bus_device_reset(scmd);
2489 rtn = scsi_try_target_reset(scmd);
2494 rtn = scsi_try_bus_reset(scmd);
2499 rtn = scsi_try_host_reset(scmd);