Lines Matching defs:req

231 	struct nvme_request req;
420 struct request *req, unsigned int hctx_idx,
423 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
425 nvme_req(req)->ctrl = set->driver_data;
426 nvme_req(req)->cmd = &iod->cmd;
507 static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
510 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
513 avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
524 static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
527 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
540 static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
542 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
546 rq_dma_dir(req));
552 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
561 nvme_free_prps(dev, req);
580 struct request *req, struct nvme_rw_command *cmnd)
582 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
584 int length = blk_rq_payload_bytes(req);
660 nvme_free_prps(dev, req);
665 blk_rq_payload_bytes(req), iod->sgt.nents);
686 struct request *req, struct nvme_rw_command *cmd)
688 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
731 struct request *req, struct nvme_rw_command *cmnd,
734 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
738 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
752 struct request *req, struct nvme_rw_command *cmnd,
755 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
757 iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
769 static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
772 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
776 if (blk_rq_nr_phys_segments(req) == 1) {
777 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
778 struct bio_vec bv = req_bvec(req);
782 return nvme_setup_prp_simple(dev, req,
787 return nvme_setup_sgl_simple(dev, req,
796 sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req));
797 iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl);
801 rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req),
809 if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
810 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
812 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
818 dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
824 static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
827 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
829 iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req),
830 rq_dma_dir(req), 0);
837 static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
839 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
846 ret = nvme_setup_cmd(req->q->queuedata, req);
850 if (blk_rq_nr_phys_segments(req)) {
851 ret = nvme_map_data(dev, req, &iod->cmd);
856 if (blk_integrity_rq(req)) {
857 ret = nvme_map_metadata(dev, req, &iod->cmd);
862 nvme_start_request(req);
865 nvme_unmap_data(dev, req);
867 nvme_cleanup_cmd(req);
879 struct request *req = bd->rq;
880 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
890 if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
891 return nvme_fail_nonready_command(&dev->ctrl, req);
893 ret = nvme_prep_rq(dev, req);
907 struct request *req = rq_list_pop(rqlist);
908 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
916 static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
924 if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
927 return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
932 struct request *req, *next, *prev = NULL;
935 rq_list_for_each_safe(rqlist, req, next) {
936 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
938 if (!nvme_prep_rq_batch(nvmeq, req)) {
939 /* detach 'req' and add to remainder list */
940 rq_list_move(rqlist, &requeue_list, req, prev);
942 req = prev;
943 if (!req)
947 if (!next || req->mq_hctx != next->mq_hctx) {
949 req->rq_next = NULL;
954 prev = req;
960 static __always_inline void nvme_pci_unmap_rq(struct request *req)
962 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
965 if (blk_integrity_rq(req)) {
966 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
969 rq_integrity_vec(req)->bv_len, rq_dma_dir(req));
972 if (blk_rq_nr_phys_segments(req))
973 nvme_unmap_data(dev, req);
976 static void nvme_pci_complete_rq(struct request *req)
978 nvme_pci_unmap_rq(req);
979 nvme_complete_rq(req);
1016 struct request *req;
1030 req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id);
1031 if (unlikely(!req)) {
1038 trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
1039 if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
1040 !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
1042 nvme_pci_complete_rq(req);
1217 static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error)
1219 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1222 "Abort status: 0x%x", nvme_req(req)->status);
1224 blk_mq_free_request(req);
1279 static enum blk_eh_timer_return nvme_timeout(struct request *req)
1281 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
1282 struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
1311 nvme_poll(req->mq_hctx, NULL);
1315 if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
1318 req->tag, nvme_cid(req), nvmeq->qid);
1335 req->tag, nvme_cid(req), nvmeq->qid);
1336 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1350 opcode = nvme_req(req)->cmd->common.opcode;
1354 req->tag, nvme_cid(req), opcode,
1356 nvme_req(req)->flags |= NVME_REQ_CANCELLED;
1367 cmd.abort.cid = nvme_cid(req);
1372 req->tag, nvme_cid(req), opcode, nvme_get_opcode_str(opcode),
1373 nvmeq->qid, blk_op_str(req_op(req)), req_op(req),
1374 blk_rq_bytes(req));
1389 * The aborted req will be completed on receiving the abort req.
2383 static enum rq_end_io_ret nvme_del_queue_end(struct request *req,
2386 struct nvme_queue *nvmeq = req->end_io_data;
2388 blk_mq_free_request(req);
2393 static enum rq_end_io_ret nvme_del_cq_end(struct request *req,
2396 struct nvme_queue *nvmeq = req->end_io_data;
2401 return nvme_del_queue_end(req, error);
2407 struct request *req;
2413 req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT);
2414 if (IS_ERR(req))
2415 return PTR_ERR(req);
2416 nvme_init_request(req, &cmd);
2419 req->end_io = nvme_del_cq_end;
2421 req->end_io = nvme_del_queue_end;
2422 req->end_io_data = nvmeq;
2425 blk_execute_rq_nowait(req, false);