Lines Matching refs:rsp

165 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
195 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
197 return nvme_is_write(rsp->req.cmd) &&
198 rsp->req.transfer_len &&
199 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
202 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
204 return !nvme_is_write(rsp->req.cmd) &&
205 rsp->req.transfer_len &&
206 !rsp->req.cqe->status &&
207 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
213 struct nvmet_rdma_rsp *rsp;
217 rsp = list_first_entry_or_null(&queue->free_rsps,
219 if (likely(rsp))
220 list_del(&rsp->free_list);
223 if (unlikely(!rsp)) {
226 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
227 if (unlikely(!rsp))
229 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
231 kfree(rsp);
235 rsp->allocated = true;
238 return rsp;
242 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
246 if (unlikely(rsp->allocated)) {
247 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
248 kfree(rsp);
252 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
253 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
254 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
465 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
467 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
471 list_add_tail(&rsp->free_list, &queue->free_rsps);
478 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
480 list_del(&rsp->free_list);
481 nvmet_rdma_free_rsp(ndev, rsp);
494 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
496 list_del(&rsp->free_list);
497 nvmet_rdma_free_rsp(ndev, rsp);
526 struct nvmet_rdma_rsp *rsp;
529 rsp = list_entry(queue->rsp_wr_wait_list.next,
531 list_del(&rsp->wait_list);
534 ret = nvmet_rdma_execute_command(rsp);
538 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
634 static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
637 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
638 struct nvmet_req *req = &rsp->req;
642 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
647 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
654 static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
656 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
657 struct nvmet_req *req = &rsp->req;
660 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
665 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
669 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
671 struct nvmet_rdma_queue *queue = rsp->queue;
673 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
675 if (rsp->n_rdma)
676 nvmet_rdma_rw_ctx_destroy(rsp);
678 if (rsp->req.sg != rsp->cmd->inline_sg)
679 nvmet_req_free_sgls(&rsp->req);
684 nvmet_rdma_put_rsp(rsp);
703 struct nvmet_rdma_rsp *rsp =
707 nvmet_rdma_release_rsp(rsp);
719 struct nvmet_rdma_rsp *rsp =
721 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
724 if (rsp->invalidate_rkey) {
725 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
726 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
728 rsp->send_wr.opcode = IB_WR_SEND;
731 if (nvmet_rdma_need_data_out(rsp)) {
732 if (rsp->req.metadata_len)
733 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
734 cm_id->port_num, &rsp->write_cqe, NULL);
736 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
737 cm_id->port_num, NULL, &rsp->send_wr);
739 first_wr = &rsp->send_wr;
742 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
744 ib_dma_sync_single_for_device(rsp->queue->dev->device,
745 rsp->send_sge.addr, rsp->send_sge.length,
750 nvmet_rdma_release_rsp(rsp);
756 struct nvmet_rdma_rsp *rsp =
761 WARN_ON(rsp->n_rdma <= 0);
762 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
763 rsp->n_rdma = 0;
766 nvmet_rdma_rw_ctx_destroy(rsp);
767 nvmet_req_uninit(&rsp->req);
768 nvmet_rdma_release_rsp(rsp);
777 if (rsp->req.metadata_len)
778 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
779 nvmet_rdma_rw_ctx_destroy(rsp);
782 nvmet_req_complete(&rsp->req, status);
784 rsp->req.execute(&rsp->req);
789 struct nvmet_rdma_rsp *rsp =
792 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
798 WARN_ON(rsp->n_rdma <= 0);
799 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
800 rsp->n_rdma = 0;
803 nvmet_rdma_rw_ctx_destroy(rsp);
804 nvmet_req_uninit(&rsp->req);
805 nvmet_rdma_release_rsp(rsp);
819 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
821 rsp->req.cqe->status = cpu_to_le16(status << 1);
822 nvmet_rdma_rw_ctx_destroy(rsp);
824 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
826 nvmet_rdma_release_rsp(rsp);
830 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
837 sg = rsp->cmd->inline_sg;
850 rsp->req.sg = rsp->cmd->inline_sg;
851 rsp->req.sg_cnt = sg_count;
854 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
856 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
860 if (!nvme_is_write(rsp->req.cmd)) {
861 rsp->req.error_loc =
866 if (off + len > rsp->queue->dev->inline_data_size) {
875 nvmet_rdma_use_inline_sg(rsp, len, off);
876 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
877 rsp->req.transfer_len += len;
881 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
889 rsp->req.transfer_len = get_unaligned_le24(sgl->length);
892 if (!rsp->req.transfer_len)
895 if (rsp->req.metadata_len)
896 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
898 ret = nvmet_req_alloc_sgls(&rsp->req);
902 ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
905 rsp->n_rdma += ret;
908 rsp->invalidate_rkey = key;
913 rsp->req.transfer_len = 0;
917 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
919 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
925 return nvmet_rdma_map_sgl_inline(rsp);
928 rsp->req.error_loc =
935 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
937 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
940 rsp->req.error_loc =
946 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
951 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
953 struct nvmet_rdma_queue *queue = rsp->queue;
955 if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
958 1 + rsp->n_rdma, queue->idx,
960 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
964 if (nvmet_rdma_need_data_in(rsp)) {
965 if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
966 queue->cm_id->port_num, &rsp->read_cqe, NULL))
967 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
969 rsp->req.execute(&rsp->req);
1012 struct nvmet_rdma_rsp *rsp;
1031 rsp = nvmet_rdma_get_rsp(queue);
1032 if (unlikely(!rsp)) {
1041 rsp->queue = queue;
1042 rsp->cmd = cmd;
1043 rsp->flags = 0;
1044 rsp->req.cmd = cmd->nvme_cmd;
1045 rsp->req.port = queue->port;
1046 rsp->n_rdma = 0;
1047 rsp->invalidate_rkey = 0;
1054 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
1056 nvmet_rdma_put_rsp(rsp);
1061 nvmet_rdma_handle_command(queue, rsp);
1663 struct nvmet_rdma_rsp *rsp;
1665 rsp = list_first_entry(&queue->rsp_wait_list,
1668 list_del(&rsp->wait_list);
1669 nvmet_rdma_put_rsp(rsp);
2004 struct nvmet_rdma_rsp *rsp =
2006 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;