• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/sunrpc/xprtrdma/

Lines Matching defs:rqst

170 rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
293 rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
298 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
300 destp = rqst->rq_svec[0].iov_base;
301 curlen = rqst->rq_svec[0].iov_len;
308 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
312 __func__, pad, destp, rqst->rq_slen, curlen);
314 copy_len = rqst->rq_snd_buf.page_len;
316 if (rqst->rq_snd_buf.tail[0].iov_len) {
317 curlen = rqst->rq_snd_buf.tail[0].iov_len;
318 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
320 rqst->rq_snd_buf.tail[0].iov_base, curlen);
325 rqst->rq_svec[0].iov_len += curlen;
329 npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
332 curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base;
339 srcp = kmap_atomic(rqst->rq_snd_buf.pages[i],
342 memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen);
346 rqst->rq_svec[0].iov_len += curlen;
368 rpcrdma_marshal_req(struct rpc_rqst *rqst)
370 struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
372 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
382 base = rqst->rq_svec[0].iov_base;
383 rpclen = rqst->rq_svec[0].iov_len;
388 headerp->rm_xid = rqst->rq_xid;
412 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
414 else if (rqst->rq_rcv_buf.page_len == 0)
416 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
435 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
437 else if (rqst->rq_snd_buf.page_len == 0)
451 __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
465 padlen = rpcrdma_inline_pullup(rqst,
466 RPCRDMA_INLINE_PAD_VALUE(rqst));
471 htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
485 rpclen = rqst->rq_svec[0].iov_len;
507 hdrlen = rpcrdma_create_chunks(rqst,
508 &rqst->rq_snd_buf, headerp, rtype);
512 hdrlen = rpcrdma_create_chunks(rqst,
513 &rqst->rq_rcv_buf, headerp, wtype);
549 req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
605 rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
610 curlen = rqst->rq_rcv_buf.head[0].iov_len;
613 rqst->rq_rcv_buf.head[0].iov_len = curlen;
620 rqst->rq_rcv_buf.head[0].iov_base = srcp;
626 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
627 if (copy_len && rqst->rq_rcv_buf.page_len) {
628 npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base +
629 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
632 curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base;
640 destp = kmap_atomic(rqst->rq_rcv_buf.pages[i],
643 memcpy(destp + rqst->rq_rcv_buf.page_base,
647 flush_dcache_page(rqst->rq_rcv_buf.pages[i]);
654 rqst->rq_rcv_buf.page_len = olen - copy_len;
656 rqst->rq_rcv_buf.page_len = 0;
658 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
660 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
661 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
662 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
663 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
666 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
669 rqst->rq_rcv_buf.tail[0].iov_len = 0;
673 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
675 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
684 rqst->rq_private_buf = rqst->rq_rcv_buf;
731 struct rpc_rqst *rqst;
759 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
760 if (rqst == NULL) {
775 req = rpcr_to_rdmar(rqst);
779 __func__, rep, req, rqst, headerp->rm_xid);
825 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
881 __func__, xprt, rqst, status);
882 xprt_complete_rqst(rqst->rq_task, status);