Lines Matching defs:rqst

127 				struct rpc_rqst *rqst)
129 struct xdr_buf *xdr = &rqst->rq_snd_buf;
159 struct rpc_rqst *rqst)
161 return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
170 const struct rpc_rqst *rqst)
172 const struct xdr_buf *buf = &rqst->rq_rcv_buf;
335 struct rpc_rqst *rqst,
347 pos = rqst->rq_snd_buf.head[0].iov_len;
351 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
364 trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
392 struct rpc_rqst *rqst,
406 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
407 rqst->rq_rcv_buf.head[0].iov_len,
428 trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
435 if (xdr_pad_size(rqst->rq_rcv_buf.page_len)) {
439 trace_xprtrdma_chunk_wp(rqst->rq_task, ep->re_write_pad_mr,
470 struct rpc_rqst *rqst,
486 nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
506 trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
843 * @rqst: RPC request to be marshaled
845 * For the RPC in "rqst", this function:
860 rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
862 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
865 struct xdr_buf *buf = &rqst->rq_snd_buf;
870 if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) {
871 ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf);
878 rqst);
885 *p++ = rqst->rq_xid;
894 &rqst->rq_cred->cr_auth->au_flags);
905 if (rpcrdma_results_inline(r_xprt, rqst))
907 else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
908 rpcrdma_nonpayload_inline(r_xprt, rqst))
927 if (rpcrdma_args_inline(r_xprt, rqst)) {
962 ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
965 ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
968 ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
981 trace_xprtrdma_marshal_failed(rqst, ret);
1022 * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
1023 * @rqst: controlling RPC request
1040 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
1051 rqst->rq_rcv_buf.head[0].iov_base = srcp;
1052 rqst->rq_private_buf.head[0].iov_base = srcp;
1057 curlen = rqst->rq_rcv_buf.head[0].iov_len;
1063 ppages = rqst->rq_rcv_buf.pages +
1064 (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
1065 page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
1067 if (copy_len && rqst->rq_rcv_buf.page_len) {
1070 pagelist_len = rqst->rq_rcv_buf.page_len;
1106 rqst->rq_rcv_buf.tail[0].iov_base = srcp;
1107 rqst->rq_private_buf.tail[0].iov_base = srcp;
1111 trace_xprtrdma_fixup(rqst, fixup_copy_count);
1263 struct rpc_rqst *rqst)
1281 /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
1285 rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
1318 struct rpc_rqst *rqst)
1332 trace_xprtrdma_err_vers(rqst, p, p + 1);
1335 trace_xprtrdma_err_chunk(rqst);
1338 trace_xprtrdma_err_unrecognized(rqst, p);
1345 * rpcrdma_unpin_rqst - Release rqst without completing it
1355 struct rpc_rqst *rqst = rep->rr_rqst;
1356 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
1362 xprt_unpin_rqst(rqst);
1367 * rpcrdma_complete_rqst - Pass completed rqst back to RPC
1371 * while @rqst is still pinned to ensure the rep, rqst, and
1378 struct rpc_rqst *rqst = rep->rr_rqst;
1383 status = rpcrdma_decode_msg(r_xprt, rep, rqst);
1389 status = rpcrdma_decode_error(r_xprt, rep, rqst);
1399 xprt_complete_rqst(rqst->rq_task, status);
1400 xprt_unpin_rqst(rqst);
1407 rqst->rq_task->tk_status = status;
1433 struct rpc_rqst *rqst;
1464 rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
1465 if (!rqst)
1467 xprt_pin_rqst(rqst);
1479 req = rpcr_to_rdmar(rqst);
1483 rep->rr_rqst = rqst;
1485 trace_xprtrdma_reply(rqst->rq_task, rep, credits);