Lines Matching refs:rq

154 static void nvme_rdma_complete_rq(struct request *rq);
286 struct request *rq, unsigned int hctx_idx)
288 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
294 struct request *rq, unsigned int hctx_idx,
298 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
302 nvme_req(rq)->ctrl = &ctrl->ctrl;
309 req->metadata_sgl = (void *)nvme_req(rq) +
314 nvme_req(rq)->cmd = req->sqe.data;
1165 struct request *rq = blk_mq_rq_from_pdu(req);
1169 if (!nvme_try_complete_req(rq, req->status, req->result))
1170 nvme_rdma_complete_rq(rq);
1221 static void nvme_rdma_dma_unmap_req(struct ib_device *ibdev, struct request *rq)
1223 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1225 if (blk_integrity_rq(rq)) {
1227 req->metadata_sgl->nents, rq_dma_dir(rq));
1233 rq_dma_dir(rq));
1238 struct request *rq)
1240 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1245 if (!blk_rq_nr_phys_segments(rq))
1256 nvme_rdma_dma_unmap_req(ibdev, rq);
1418 struct request *rq = blk_mq_rq_from_pdu(req);
1419 struct nvme_ns *ns = rq->q->queuedata;
1420 struct bio *bio = rq->bio;
1472 static int nvme_rdma_dma_map_req(struct ib_device *ibdev, struct request *rq,
1475 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1480 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
1485 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
1489 req->data_sgl.nents, rq_dma_dir(rq));
1495 if (blk_integrity_rq(rq)) {
1499 blk_rq_count_integrity_sg(rq->q, rq->bio),
1507 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
1508 rq->bio, req->metadata_sgl->sg_table.sgl);
1512 rq_dma_dir(rq));
1526 rq_dma_dir(rq));
1533 struct request *rq, struct nvme_command *c)
1535 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1546 if (!blk_rq_nr_phys_segments(rq))
1549 ret = nvme_rdma_dma_map_req(ibdev, rq, &count, &pi_count);
1559 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
1561 blk_rq_payload_bytes(rq) <=
1581 nvme_rdma_dma_unmap_req(ibdev, rq);
1700 struct request *rq;
1703 rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id);
1704 if (!rq) {
1711 req = blk_mq_rq_to_pdu(rq);
1949 static void nvme_rdma_complete_timed_out(struct request *rq)
1951 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1955 nvmf_complete_timed_out_request(rq);
1958 static enum blk_eh_timer_return nvme_rdma_timeout(struct request *rq)
1960 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1968 rq->tag, nvme_cid(rq), cmd->common.opcode,
1985 nvme_rdma_complete_timed_out(rq);
2002 struct request *rq = bd->rq;
2003 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2005 struct nvme_command *c = nvme_req(rq)->cmd;
2011 WARN_ON_ONCE(rq->tag < 0);
2013 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2014 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2028 ret = nvme_setup_cmd(ns, rq);
2032 nvme_start_request(rq);
2043 err = nvme_rdma_map_data(queue, rq, c);
2063 nvme_rdma_unmap_data(queue, rq);
2066 ret = nvme_host_path_error(rq);
2071 nvme_cleanup_cmd(rq);
2087 struct request *rq = blk_mq_rq_from_pdu(req);
2094 nvme_req(rq)->status = NVME_SC_INVALID_PI;
2101 nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
2104 nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
2107 nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
2116 static void nvme_rdma_complete_rq(struct request *rq)
2118 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
2125 nvme_rdma_unmap_data(queue, rq);
2128 nvme_complete_rq(rq);