Lines Matching refs:rq

269 	struct request *rq;
274 rq = blk_mq_rq_from_pdu(req);
276 return rq_data_dir(rq) == WRITE && req->data_len &&
311 struct request *rq = blk_mq_rq_from_pdu(req);
317 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
318 vec = &rq->special_vec;
320 size = blk_rq_payload_bytes(rq);
505 struct request *rq, unsigned int hctx_idx)
507 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
513 struct request *rq, unsigned int hctx_idx,
517 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
531 nvme_req(rq)->ctrl = &ctrl->ctrl;
532 nvme_req(rq)->cmd = &pdu->cmd;
587 struct request *rq;
589 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
590 if (!rq) {
598 req = blk_mq_rq_to_pdu(rq);
602 if (!nvme_try_complete_req(rq, req->status, cqe->result))
603 nvme_complete_rq(rq);
612 struct request *rq;
614 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
615 if (!rq) {
622 if (!blk_rq_payload_bytes(rq)) {
625 nvme_tcp_queue_id(queue), rq->tag);
635 nvme_tcp_queue_id(queue), rq->tag);
669 struct request *rq = blk_mq_rq_from_pdu(req);
694 data->command_id = nvme_cid(rq);
703 struct request *rq;
707 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
708 if (!rq) {
714 req = blk_mq_rq_to_pdu(rq);
719 rq->tag, r2t_length);
726 rq->tag, r2t_length, req->data_len, req->data_sent);
733 rq->tag, r2t_offset, req->data_sent);
798 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
802 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
803 nvme_complete_rq(rq);
810 struct request *rq =
812 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
831 nvme_tcp_queue_id(queue), rq->tag);
851 nvme_tcp_queue_id(queue), rq->tag);
866 nvme_tcp_end_request(rq,
897 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
899 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
910 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
912 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
914 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
2435 static void nvme_tcp_complete_timed_out(struct request *rq)
2437 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2441 nvmf_complete_timed_out_request(rq);
2444 static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
2446 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2454 rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode,
2471 nvme_tcp_complete_timed_out(rq);
2484 struct request *rq)
2486 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2492 if (!blk_rq_nr_phys_segments(rq))
2494 else if (rq_data_dir(rq) == WRITE &&
2504 struct request *rq)
2506 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2512 ret = nvme_setup_cmd(ns, rq);
2523 req->data_len = blk_rq_nr_phys_segments(rq) ?
2524 blk_rq_payload_bytes(rq) : 0;
2525 req->curr_bio = rq->bio;
2527 nvme_tcp_init_iter(req, rq_data_dir(rq));
2529 if (rq_data_dir(rq) == WRITE &&
2546 ret = nvme_tcp_map_data(queue, rq);
2548 nvme_cleanup_cmd(rq);
2570 struct request *rq = bd->rq;
2571 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2575 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2576 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2578 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2582 nvme_start_request(rq);