Lines Matching refs:ioctx

666 	struct srpt_ioctx *ioctx;
668 ioctx = kzalloc(ioctx_size, GFP_KERNEL);
669 if (!ioctx)
672 ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL);
673 if (!ioctx->buf)
676 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
678 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
681 return ioctx;
684 kmem_cache_free(buf_cache, ioctx->buf);
686 kfree(ioctx);
694 * @ioctx: I/O context pointer.
698 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
702 if (!ioctx)
705 ib_dma_unmap_single(sdev->device, ioctx->dma,
707 kmem_cache_free(buf_cache, ioctx->buf);
708 kfree(ioctx);
779 * @ioctx: Send I/O context.
785 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
790 previous = ioctx->state;
792 ioctx->state = new;
799 * @ioctx: Send I/O context.
805 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
811 WARN_ON(!ioctx);
815 previous = ioctx->state;
817 ioctx->state = new;
826 * @ioctx: Receive I/O context pointer.
829 struct srpt_recv_ioctx *ioctx)
835 list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset;
839 ioctx->ioctx.cqe.done = srpt_recv_done;
840 wr.wr_cqe = &ioctx->ioctx.cqe;
895 static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
899 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
900 struct srpt_rdma_ch *ch = ioctx->ch;
906 ioctx->rw_ctxs = &ioctx->s_rw_ctx;
908 ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
910 if (!ioctx->rw_ctxs)
914 for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
915 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
932 ioctx->n_rdma += ret;
933 ioctx->n_rw_ctx++;
952 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
958 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
959 kfree(ioctx->rw_ctxs);
964 struct srpt_send_ioctx *ioctx)
966 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
969 for (i = 0; i < ioctx->n_rw_ctx; i++) {
970 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
977 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
978 kfree(ioctx->rw_ctxs);
1002 * @ioctx: I/O context that will be used for responding to the initiator.
1013 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
1019 struct srpt_send_ioctx *ioctx,
1042 ioctx->cmd.data_direction = *dir;
1049 return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
1067 return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
1094 ioctx->recv_ioctx = recv_ioctx;
1099 sg_init_one(&ioctx->imm_sg, data, len);
1100 *sg = &ioctx->imm_sg;
1226 struct srpt_send_ioctx *ioctx;
1235 ioctx = ch->ioctx_ring[tag];
1236 BUG_ON(ioctx->ch != ch);
1237 ioctx->state = SRPT_STATE_NEW;
1238 WARN_ON_ONCE(ioctx->recv_ioctx);
1239 ioctx->n_rdma = 0;
1240 ioctx->n_rw_ctx = 0;
1241 ioctx->queue_status_only = false;
1246 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1247 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1248 ioctx->cmd.map_tag = tag;
1249 ioctx->cmd.map_cpu = cpu;
1251 return ioctx;
1256 * @ioctx: I/O context associated with the SCSI command.
1258 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1262 BUG_ON(!ioctx);
1269 state = ioctx->state;
1272 ioctx->state = SRPT_STATE_DATA_IN;
1276 ioctx->state = SRPT_STATE_DONE;
1285 ioctx->state, ioctx->cmd.tag);
1298 pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
1299 transport_generic_request_failure(&ioctx->cmd,
1307 transport_generic_free_cmd(&ioctx->cmd, 0);
1310 transport_generic_free_cmd(&ioctx->cmd, 0);
1333 struct srpt_send_ioctx *ioctx =
1336 WARN_ON(ioctx->n_rdma <= 0);
1337 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1338 ioctx->n_rdma = 0;
1341 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1342 ioctx, wc->status);
1343 srpt_abort_cmd(ioctx);
1347 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1349 target_execute_cmd(&ioctx->cmd);
1352 __LINE__, ioctx->state);
1358 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1359 * be built in the buffer ioctx->buf points at and hence this function will
1371 struct srpt_send_ioctx *ioctx, u64 tag,
1374 struct se_cmd *cmd = &ioctx->cmd;
1386 srp_rsp = ioctx->ioctx.buf;
1389 sense_data = ioctx->sense_data;
1390 sense_data_len = ioctx->cmd.scsi_sense_length;
1391 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1442 * @ioctx: I/O context in which the SRP_RSP response will be built.
1453 struct srpt_send_ioctx *ioctx,
1463 srp_rsp = ioctx->ioctx.buf;
1481 struct srpt_send_ioctx *ioctx = container_of(cmd,
1484 return target_put_sess_cmd(&ioctx->cmd);
1507 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1598 srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1639 recv_ioctx->ioctx.dma,
1640 recv_ioctx->ioctx.offset + srp_max_req_size,
1643 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1698 struct srpt_recv_ioctx *ioctx =
1699 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1707 ioctx->byte_len = wc->byte_len;
1708 srpt_handle_new_iu(ch, ioctx);
1710 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
1711 ioctx, wc->status);
1759 struct srpt_send_ioctx *ioctx =
1760 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1763 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1768 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
1771 pr_info("sending response for ioctx 0x%p failed with status %d\n",
1772 ioctx, wc->status);
1775 transport_generic_free_cmd(&ioctx->cmd, 0);
1778 ioctx->ioctx.index);
2747 struct srpt_send_ioctx *ioctx =
2749 struct srpt_rdma_ch *ch = ioctx->ch;
2751 struct ib_cqe *cqe = &ioctx->rdma_cqe;
2755 if (ioctx->recv_ioctx) {
2756 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2757 target_execute_cmd(&ioctx->cmd);
2761 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2764 if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
2766 __func__, ioctx->n_rdma);
2772 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2773 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2783 __func__, ret, ioctx->n_rdma,
2790 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
2814 struct srpt_send_ioctx *ioctx =
2816 struct srpt_rdma_ch *ch = ioctx->ch;
2824 state = ioctx->state;
2828 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2831 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2835 ch, ioctx->ioctx.index, ioctx->state);
2843 if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
2844 ioctx->cmd.data_length &&
2845 !ioctx->queue_status_only) {
2846 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2847 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2855 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2860 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2861 ioctx->cmd.tag);
2866 if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
2869 __func__, ioctx->n_rdma);
2873 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
2876 sge.addr = ioctx->ioctx.dma;
2880 ioctx->ioctx.cqe.done = srpt_send_done;
2882 send_wr.wr_cqe = &ioctx->ioctx.cqe;
2891 __func__, ioctx->cmd.tag, ret);
2898 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
2900 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2901 target_put_sess_cmd(&ioctx->cmd);
2923 struct srpt_send_ioctx *ioctx = container_of(cmd,
2925 struct srpt_rdma_ch *ch = ioctx->ch;
2932 struct srpt_send_ioctx *ioctx;
2934 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2935 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2939 ioctx->queue_status_only = true;
3341 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
3343 struct srpt_rdma_ch *ch = ioctx->ch;
3344 struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
3346 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
3347 !(ioctx->cmd.transport_state & CMD_T_ABORTED));
3351 ioctx->recv_ioctx = NULL;
3355 if (ioctx->n_rw_ctx) {
3356 srpt_free_rw_ctxs(ch, ioctx);
3357 ioctx->n_rw_ctx = 0;
3381 struct srpt_send_ioctx *ioctx;
3383 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3384 return ioctx->state;