Lines Matching refs:io

15 #define scsi_io_printf(io, fmt, ...) \
16 efc_log_debug(io->efct, "[%s]" SCSI_IOFMT fmt, \
17 io->node->display_name, io->instance_index,\
18 io->init_task_tag, io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
23 #define scsi_io_trace(io, fmt, ...) \
25 if (EFCT_LOG_ENABLE_SCSI_TRACE(io->efct)) \
26 scsi_io_printf(io, fmt, ##__VA_ARGS__); \
34 struct efct_io *io;
41 io = efct_io_pool_io_alloc(efct->xport->io_pool);
42 if (!io) {
49 kref_init(&io->ref);
50 io->release = _efct_scsi_io_free;
53 io->efct = efct;
54 io->node = node;
58 io->io_type = EFCT_IO_TYPE_IO;
59 io->display_name = "scsi_io";
61 io->cmd_ini = false;
62 io->cmd_tgt = true;
65 INIT_LIST_HEAD(&io->list_entry);
67 list_add(&io->list_entry, &node->active_ios);
71 return io;
77 struct efct_io *io = container_of(arg, struct efct_io, ref);
78 struct efct *efct = io->efct;
79 struct efct_node *node = io->node;
82 scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
84 if (io->io_free) {
90 list_del_init(&io->list_entry);
94 io->node = NULL;
95 efct_io_pool_io_free(efct->xport->io_pool, io);
99 efct_scsi_io_free(struct efct_io *io)
101 scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
102 WARN_ON(!refcount_read(&io->ref.refcount));
103 kref_put(&io->ref, io->release);
111 struct efct_io *io = app;
116 if (!io || !io->efct) {
121 scsi_io_trace(io, "status x%x ext_status x%x\n", status, ext_status);
123 efct = io->efct;
125 io->transferred += length;
127 if (!io->scsi_tgt_cb) {
133 cb = io->scsi_tgt_cb;
136 io->scsi_tgt_cb = NULL;
142 if (status == 0 && io->auto_resp)
195 cb(io, scsi_stat, flags, io->scsi_tgt_cb_arg);
229 static void efc_log_sgl(struct efct_io *io)
231 struct efct_hw_io *hio = io->hio;
237 scsi_io_trace(io, "def_sgl at 0x%x 0x%08x\n",
244 scsi_io_trace(io, "SGL %2d 0x%08x 0x%08x 0x%08x 0x%08x\n",
256 struct efct_io *io = arg;
258 if (io) {
259 efct_hw_done_t cb = io->hw_cb;
261 if (!io->hw_cb)
264 io->hw_cb = NULL;
265 (cb)(io->hio, 0, SLI4_FC_WCQE_STATUS_DISPATCH_ERROR, 0, io);
270 efct_scsi_io_dispatch_hw_io(struct efct_io *io, struct efct_hw_io *hio)
273 struct efct *efct = io->efct;
278 io->hio = hio;
279 if (io->cmd_tgt)
280 io->tgt_task_tag = hio->indicator;
281 else if (io->cmd_ini)
282 io->init_task_tag = hio->indicator;
283 io->hw_tag = hio->reqtag;
285 hio->eq = io->hw_priv;
288 switch (io->wq_steering) {
300 switch (io->io_type) {
302 rc = efct_scsi_build_sgls(&efct->hw, io->hio,
303 io->sgl, io->sgl_count, io->hio_type);
308 efc_log_sgl(io);
310 if (io->app_id)
311 io->iparam.fcp_tgt.app_id = io->app_id;
313 io->iparam.fcp_tgt.vpi = io->node->vpi;
314 io->iparam.fcp_tgt.rpi = io->node->rpi;
315 io->iparam.fcp_tgt.s_id = io->node->port_fc_id;
316 io->iparam.fcp_tgt.d_id = io->node->node_fc_id;
317 io->iparam.fcp_tgt.xmit_len = io->wire_len;
319 rc = efct_hw_io_send(&io->efct->hw, io->hio_type, io->hio,
320 &io->iparam, io->hw_cb, io);
323 scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
331 efct_scsi_io_dispatch_no_hw_io(struct efct_io *io)
335 switch (io->io_type) {
339 hio_to_abort = io->io_to_abort->hio;
354 scsi_io_printf(io, "IO: not active\n");
355 ((efct_hw_done_t)io->hw_cb)(io->hio, 0,
356 SLI4_FC_WCQE_STATUS_SUCCESS, 0, io);
362 scsi_io_printf(io, "aborting\n");
363 rc = efct_hw_io_abort(&io->efct->hw, hio_to_abort,
364 io->send_abts, io->hw_cb, io);
367 efct_hw_done_t cb = io->hw_cb;
371 scsi_io_printf(io, "Failed to abort IO rc=%d\n",
374 cb(io->hio, 0, status, 0, io);
381 scsi_io_printf(io, "Unknown IO type=%d\n", io->io_type);
392 struct efct_io *io = NULL;
400 io = list_first_entry(&xport->io_pending_list, struct efct_io,
402 list_del_init(&io->io_pending_link);
405 if (!io) {
410 if (io->io_type == EFCT_IO_TYPE_ABORT) {
419 list_add(&xport->io_pending_list, &io->io_pending_link);
420 io = NULL;
422 hio->eq = io->hw_priv;
429 if (!io)
438 status = efct_scsi_io_dispatch_no_hw_io(io);
440 status = efct_scsi_io_dispatch_hw_io(io, hio);
449 efct_scsi_check_pending_async_cb, io)) {
454 return io;
461 struct efct_io *io = NULL;
490 list_for_each_entry(io, &xport->io_pending_list, io_pending_link) {
491 if (io->io_type == EFCT_IO_TYPE_ABORT && io->io_to_abort->hio) {
496 list_del_init(&io->io_pending_link);
505 if (efct_scsi_io_dispatch_no_hw_io(io)) {
507 efct_scsi_check_pending_async_cb, io)) {
517 efct_scsi_io_dispatch(struct efct_io *io, void *cb)
520 struct efct *efct = io->efct;
524 io->hw_cb = cb;
530 if (io->hio)
531 return efct_scsi_io_dispatch_hw_io(io, io->hio);
545 if (io->low_latency) {
546 INIT_LIST_HEAD(&io->io_pending_link);
547 list_add(&xport->io_pending_list, &io->io_pending_link);
549 INIT_LIST_HEAD(&io->io_pending_link);
550 list_add_tail(&io->io_pending_link,
567 hio = efct_hw_io_alloc(&io->efct->hw);
571 INIT_LIST_HEAD(&io->io_pending_link);
572 list_add_tail(&io->io_pending_link, &xport->io_pending_list);
581 return efct_scsi_io_dispatch_hw_io(io, hio);
585 efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb)
587 struct efct *efct = io->efct;
591 io->hw_cb = cb;
601 INIT_LIST_HEAD(&io->io_pending_link);
602 list_add_tail(&io->io_pending_link, &xport->io_pending_list);
614 return efct_scsi_io_dispatch_no_hw_io(io);
618 efct_scsi_xfer_data(struct efct_io *io, u32 flags,
626 io->sgl_count = sgl_count;
628 efct = io->efct;
630 scsi_io_trace(io, "%s wire_len %llu\n",
634 io->hio_type = type;
636 io->scsi_tgt_cb = cb;
637 io->scsi_tgt_cb_arg = arg;
639 residual = io->exp_xfer_len - io->transferred;
640 io->wire_len = (xwire_len < residual) ? xwire_len : residual;
641 residual = (xwire_len - io->wire_len);
643 memset(&io->iparam, 0, sizeof(io->iparam));
644 io->iparam.fcp_tgt.ox_id = io->init_task_tag;
645 io->iparam.fcp_tgt.offset = io->transferred;
646 io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
647 io->iparam.fcp_tgt.timeout = io->timeout;
653 ((io->transferred + io->wire_len) == io->exp_xfer_len) &&
655 io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
656 io->auto_resp = true;
658 io->auto_resp = false;
662 io->xfer_req = io->wire_len;
667 io->transferred += residual;
672 struct efct_scsi_sgl *sgl_ptr = &io->sgl[sgl_count - 1];
683 io->sgl_count--;
690 io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
691 io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
693 io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
707 return efct_scsi_io_dispatch(io, efct_target_io_cb);
711 efct_scsi_send_rd_data(struct efct_io *io, u32 flags,
715 return efct_scsi_xfer_data(io, flags, sgl, sgl_count,
717 enable_tsend_auto_resp(io->efct), cb, arg);
721 efct_scsi_recv_wr_data(struct efct_io *io, u32 flags,
725 return efct_scsi_xfer_data(io, flags, sgl, sgl_count, len,
727 enable_treceive_auto_resp(io->efct), cb, arg);
731 efct_scsi_send_resp(struct efct_io *io, u32 flags,
744 efct = io->efct;
753 residual = io->exp_xfer_len - io->transferred;
756 io->wire_len = 0;
757 io->hio_type = EFCT_HW_IO_TARGET_RSP;
759 io->scsi_tgt_cb = cb;
760 io->scsi_tgt_cb_arg = arg;
762 memset(&io->iparam, 0, sizeof(io->iparam));
763 io->iparam.fcp_tgt.ox_id = io->init_task_tag;
764 io->iparam.fcp_tgt.offset = 0;
765 io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
766 io->iparam.fcp_tgt.timeout = io->timeout;
769 io->low_latency = (flags & EFCT_SCSI_LOW_LATENCY) != 0;
770 io->wq_steering = (flags & EFCT_SCSI_WQ_STEERING_MASK) >>
772 io->wq_class = (flags & EFCT_SCSI_WQ_CLASS_MASK) >>
776 struct fcp_resp_with_ext *fcprsp = io->rspbuf.virt;
784 sns_data = (u8 *)io->rspbuf.virt + sizeof(*fcprsp);
790 io->wire_len += sizeof(*fcprsp);
821 io->wire_len += sense_data_length;
824 io->sgl[0].addr = io->rspbuf.phys;
825 io->sgl[0].dif_addr = 0;
826 io->sgl[0].len = io->wire_len;
827 io->sgl_count = 1;
831 io->iparam.fcp_tgt.flags |= SLI4_IO_AUTO_GOOD_RESPONSE;
833 return efct_scsi_io_dispatch(io, efct_target_io_cb);
840 struct efct_io *io = app;
844 efct = io->efct;
848 io_error_log(io, "s=%#x x=%#x\n", status, ext_status);
854 if (io->bls_cb) {
855 efct_scsi_io_cb_t bls_cb = io->bls_cb;
856 void *bls_cb_arg = io->bls_cb_arg;
858 io->bls_cb = NULL;
859 io->bls_cb_arg = NULL;
862 bls_cb(io, bls_status, 0, bls_cb_arg);
870 efct_target_send_bls_resp(struct efct_io *io,
873 struct efct_node *node = io->node;
874 struct sli_bls_params *bls = &io->iparam.bls;
880 memset(&io->iparam, 0, sizeof(io->iparam));
881 bls->ox_id = io->init_task_tag;
882 bls->rx_id = io->abort_rx_id;
883 bls->vpi = io->node->vpi;
884 bls->rpi = io->node->rpi;
886 bls->d_id = io->node->node_fc_id;
894 /* generic io fields have already been populated */
897 io->io_type = EFCT_IO_TYPE_BLS_RESP;
898 io->display_name = "bls_rsp";
899 io->hio_type = EFCT_HW_BLS_ACC;
900 io->bls_cb = cb;
901 io->bls_cb_arg = arg;
905 efct_target_bls_resp_cb, io);
912 struct efct_io *io = app;
914 efct_scsi_io_free(io);
919 efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr)
921 struct efct_node *node = io->node;
922 struct sli_bls_params *bls = &io->iparam.bls;
928 io->io_type = EFCT_IO_TYPE_BLS_RESP;
929 io->display_name = "ba_rjt";
930 io->hio_type = EFCT_HW_BLS_RJT;
931 io->init_task_tag = be16_to_cpu(hdr->fh_ox_id);
934 memset(&io->iparam, 0, sizeof(io->iparam));
937 bls->vpi = io->node->vpi;
938 bls->rpi = io->node->rpi;
940 bls->d_id = io->node->node_fc_id;
948 io);
951 efct_scsi_io_free(io);
952 io = NULL;
954 return io;
958 efct_scsi_send_tmf_resp(struct efct_io *io,
970 io->wire_len = 0;
994 io->hio_type = EFCT_HW_IO_TARGET_RSP;
996 io->scsi_tgt_cb = cb;
997 io->scsi_tgt_cb_arg = arg;
999 if (io->tmf_cmd == EFCT_SCSI_TMF_ABORT_TASK) {
1000 rc = efct_target_send_bls_resp(io, cb, arg);
1005 fcprsp = io->rspbuf.virt;
1016 io->wire_len = sizeof(*fcprsp);
1021 io->sgl[0].addr = io->rspbuf.phys;
1022 io->sgl[0].dif_addr = 0;
1023 io->sgl[0].len = io->wire_len;
1024 io->sgl_count = 1;
1026 memset(&io->iparam, 0, sizeof(io->iparam));
1027 io->iparam.fcp_tgt.ox_id = io->init_task_tag;
1028 io->iparam.fcp_tgt.offset = 0;
1029 io->iparam.fcp_tgt.cs_ctl = io->cs_ctl;
1030 io->iparam.fcp_tgt.timeout = io->timeout;
1032 rc = efct_scsi_io_dispatch(io, efct_target_io_cb);
1041 struct efct_io *io = app;
1047 efct = io->efct;
1049 if (!io->abort_cb)
1052 abort_cb = io->abort_cb;
1053 abort_cb_arg = io->abort_cb_arg;
1055 io->abort_cb = NULL;
1056 io->abort_cb_arg = NULL;
1084 abort_cb(io->io_to_abort, scsi_status, 0, abort_cb_arg);
1088 kref_put(&io->io_to_abort->ref, io->io_to_abort->release);
1090 efct_io_pool_io_free(efct->xport->io_pool, io);
1097 efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg)
1104 efct = io->efct;
1108 if (kref_get_unless_zero(&io->ref) == 0) {
1110 scsi_io_printf(io, "command no longer active\n");
1122 kref_put(&io->ref, io->release);
1129 abort_io->node = io->node;
1134 abort_io->io_to_abort = io;
1142 kref_put(&io->ref, io->release);
1147 efct_scsi_io_complete(struct efct_io *io)
1149 if (io->io_free) {
1150 efc_log_debug(io->efct, "completion for non-busy io tag 0x%x\n",
1151 io->tag);
1155 scsi_io_trace(io, "freeing io 0x%p %s\n", io, io->display_name);
1156 kref_put(&io->ref, io->release);