Lines Matching defs:se_cmd

836 	const bool do_put = cmd->se_cmd.se_tfo != NULL;
864 * Perform the kref_put now if se_cmd has already been setup by
869 target_put_sess_cmd(&cmd->se_cmd);
905 if (ent >= cmd->se_cmd.t_data_nents) {
910 sg = &cmd->se_cmd.t_data_sg[ent];
940 for_each_sg(cmd->se_cmd.t_data_sg, sg,
941 cmd->se_cmd.t_data_nents, i) {
991 u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
1189 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
1191 __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
1202 target_get_sess_cmd(&cmd->se_cmd, true);
1204 cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
1205 cmd->sense_reason = target_cmd_init_cdb(&cmd->se_cmd, cdb,
1220 cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd);
1224 cmd->sense_reason = target_cmd_parse_cdb(&cmd->se_cmd);
1271 target_put_sess_cmd(&cmd->se_cmd);
1287 target_put_sess_cmd(&cmd->se_cmd);
1302 cmd->sense_reason = transport_generic_new_cmd(&cmd->se_cmd);
1321 u32 length = min(cmd->se_cmd.data_length - cmd->write_data_done,
1325 cmd->se_cmd.data_length, cmd->write_data_done,
1348 target_put_sess_cmd(&cmd->se_cmd);
1486 struct se_cmd *se_cmd;
1509 se_cmd = &cmd->se_cmd;
1512 if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
1515 cmd->se_cmd.data_length);
1525 transport_send_check_condition_and_sense(&cmd->se_cmd,
1536 WARN_ON(se_cmd->t_state != TRANSPORT_WRITE_PENDING);
1537 if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE))
1562 if (se_cmd->transport_state & CMD_T_ABORTED) {
1628 payload_length = min_t(u32, cmd->se_cmd.data_length,
1717 target_execute_cmd(&cmd->se_cmd);
2049 __target_init_cmd(&cmd->se_cmd, &iscsi_ops,
2055 target_get_sess_cmd(&cmd->se_cmd, true);
2070 ret = core_tmr_alloc_req(&cmd->se_cmd, cmd->tmr_req, tcm_function,
2076 cmd->tmr_req->se_tmr_req = cmd->se_cmd.se_tmr_req;
2085 se_tmr = cmd->se_cmd.se_tmr_req;
2091 ret = transport_lookup_tmr_lun(&cmd->se_cmd);
2154 target_put_sess_cmd(&cmd->se_cmd);
2168 return transport_generic_handle_tmr(&cmd->se_cmd);
2179 target_put_sess_cmd(&cmd->se_cmd);
2638 BUG_ON(cmd->write_data_done > cmd->se_cmd.data_length);
2639 rx_size = min(cmd->se_cmd.data_length - cmd->write_data_done, length);
2723 if (cmd->write_data_done == cmd->se_cmd.data_length) {
2824 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
2826 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2827 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
2829 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
2834 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
2880 if ((datain.offset + datain.length) > cmd->se_cmd.data_length) {
2884 cmd->se_cmd.data_length);
2894 (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE))
2914 eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
3120 int_to_scsilun(cmd->se_cmd.orig_fe_lun,
3188 if (new_data_end > cmd->se_cmd.data_length)
3189 xfer_len = cmd->se_cmd.data_length - offset;
3198 if (new_data_end > cmd->se_cmd.data_length)
3199 xfer_len = cmd->se_cmd.data_length - offset;
3211 if (cmd->r2t_offset == cmd->se_cmd.data_length)
3256 if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
3258 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3259 } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
3261 hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
3264 hdr->cmd_status = cmd->se_cmd.scsi_status;
3274 cmd->init_task_tag, cmd->stat_sn, cmd->se_cmd.scsi_status,
3275 cmd->se_cmd.scsi_status, conn->cid);
3291 if (cmd->se_cmd.sense_buffer &&
3292 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
3293 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
3294 put_unaligned_be16(cmd->se_cmd.scsi_sense_length, cmd->sense_buffer);
3295 cmd->se_cmd.scsi_sense_length += sizeof (__be16);
3297 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
3298 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
3300 data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
3304 cmd->se_cmd.scsi_sense_length, 0, padding);
3311 cmd->se_cmd.scsi_sense_length);
3339 struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
4214 struct se_cmd *se_cmd = &cmd->se_cmd;
4216 if (!se_cmd->se_tfo)
4219 spin_lock_irq(&se_cmd->t_state_lock);
4220 if (se_cmd->transport_state & CMD_T_ABORTED) {
4221 if (!(se_cmd->transport_state & CMD_T_TAS))
4230 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
4233 if (cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
4238 spin_unlock_irq(&se_cmd->t_state_lock);
4239 target_complete_cmd(&cmd->se_cmd, SAM_STAT_TASK_ABORTED);
4242 spin_unlock_irq(&se_cmd->t_state_lock);