Lines Matching refs:ctxt

87  * from rqstp::rq_pages into ctxt::pages. The consumed elements of
122 struct svc_rdma_recv_ctxt *ctxt;
126 ctxt = kzalloc_node(sizeof(*ctxt), GFP_KERNEL, node);
127 if (!ctxt)
137 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
138 pcl_init(&ctxt->rc_call_pcl);
139 pcl_init(&ctxt->rc_read_pcl);
140 pcl_init(&ctxt->rc_write_pcl);
141 pcl_init(&ctxt->rc_reply_pcl);
143 ctxt->rc_recv_wr.next = NULL;
144 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
145 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
146 ctxt->rc_recv_wr.num_sge = 1;
147 ctxt->rc_cqe.done = svc_rdma_wc_receive;
148 ctxt->rc_recv_sge.addr = addr;
149 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
150 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
151 ctxt->rc_recv_buf = buffer;
152 svc_rdma_cc_init(rdma, &ctxt->rc_cc);
153 return ctxt;
158 kfree(ctxt);
164 struct svc_rdma_recv_ctxt *ctxt)
166 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
167 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
168 kfree(ctxt->rc_recv_buf);
169 kfree(ctxt);
179 struct svc_rdma_recv_ctxt *ctxt;
183 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
184 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
196 struct svc_rdma_recv_ctxt *ctxt;
203 ctxt = llist_entry(node, struct svc_rdma_recv_ctxt, rc_node);
204 ctxt->rc_page_count = 0;
205 return ctxt;
211 * @ctxt: object to return to the free list
215 struct svc_rdma_recv_ctxt *ctxt)
217 svc_rdma_cc_release(rdma, &ctxt->rc_cc, DMA_FROM_DEVICE);
222 release_pages(ctxt->rc_pages, ctxt->rc_page_count);
224 pcl_free(&ctxt->rc_call_pcl);
225 pcl_free(&ctxt->rc_read_pcl);
226 pcl_free(&ctxt->rc_write_pcl);
227 pcl_free(&ctxt->rc_reply_pcl);
229 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
243 struct svc_rdma_recv_ctxt *ctxt = vctxt;
247 if (ctxt)
248 svc_rdma_recv_ctxt_put(rdma, ctxt);
255 struct svc_rdma_recv_ctxt *ctxt;
264 ctxt = svc_rdma_recv_ctxt_get(rdma);
265 if (!ctxt)
268 trace_svcrdma_post_recv(&ctxt->rc_cid);
269 ctxt->rc_recv_wr.next = recv_chain;
270 recv_chain = &ctxt->rc_recv_wr;
284 ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
287 svc_rdma_recv_ctxt_put(rdma, ctxt);
311 struct svc_rdma_recv_ctxt *ctxt;
313 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
314 if (!ctxt)
316 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
332 struct svc_rdma_recv_ctxt *ctxt;
337 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
341 trace_svcrdma_wc_recv(wc, &ctxt->rc_cid);
357 ctxt->rc_byte_len = wc->byte_len;
360 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
370 trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid);
372 trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid);
374 svc_rdma_recv_ctxt_put(rdma, ctxt);
385 struct svc_rdma_recv_ctxt *ctxt;
387 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
388 list_del(&ctxt->rc_list);
389 svc_rdma_recv_ctxt_put(rdma, ctxt);
391 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
392 list_del(&ctxt->rc_list);
393 svc_rdma_recv_ctxt_put(rdma, ctxt);
398 struct svc_rdma_recv_ctxt *ctxt)
402 arg->head[0].iov_base = ctxt->rc_recv_buf;
403 arg->head[0].iov_len = ctxt->rc_byte_len;
408 arg->buflen = ctxt->rc_byte_len;
409 arg->len = ctxt->rc_byte_len;
599 struct svc_rdma_recv_ctxt *ctxt)
605 ctxt->rc_inv_rkey = 0;
611 pcl_for_each_chunk(chunk, &ctxt->rc_call_pcl) {
619 pcl_for_each_chunk(chunk, &ctxt->rc_read_pcl) {
627 pcl_for_each_chunk(chunk, &ctxt->rc_write_pcl) {
635 pcl_for_each_chunk(chunk, &ctxt->rc_reply_pcl) {
643 ctxt->rc_inv_rkey = inv_rkey;
779 struct svc_rdma_recv_ctxt *ctxt)
781 struct svc_rdma_chunk *chunk = pcl_first_chunk(&ctxt->rc_read_pcl);
816 struct svc_rdma_recv_ctxt *ctxt)
820 buf->len += ctxt->rc_readbytes;
821 buf->buflen += ctxt->rc_readbytes;
824 buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
826 buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len;
835 struct svc_rdma_recv_ctxt *ctxt)
839 buf->len += ctxt->rc_readbytes;
840 buf->buflen += ctxt->rc_readbytes;
843 buf->head[0].iov_len = min_t(size_t, PAGE_SIZE, ctxt->rc_readbytes);
845 buf->page_len = ctxt->rc_readbytes - buf->head[0].iov_len;
849 struct svc_rdma_recv_ctxt *ctxt)
856 release_pages(rqstp->rq_respages, ctxt->rc_page_count);
857 for (i = 0; i < ctxt->rc_page_count; i++)
858 rqstp->rq_pages[i] = ctxt->rc_pages[i];
863 rqstp->rq_respages = &rqstp->rq_pages[ctxt->rc_page_count];
867 * pages in ctxt::rc_pages a second time.
869 ctxt->rc_page_count = 0;
875 rqstp->rq_arg = ctxt->rc_saved_arg;
876 if (pcl_is_empty(&ctxt->rc_call_pcl)) {
877 if (ctxt->rc_read_pcl.cl_count == 1)
878 svc_rdma_read_complete_one(rqstp, ctxt);
880 svc_rdma_read_complete_multiple(rqstp, ctxt);
882 svc_rdma_read_complete_pzrc(rqstp, ctxt);
885 trace_svcrdma_read_finished(&ctxt->rc_cid);
901 * when there are no remaining ctxt's to process.
903 * The next ctxt is removed from the "receive" lists.
905 * - If the ctxt completes a Receive, then construct the Call
921 struct svc_rdma_recv_ctxt *ctxt;
933 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
934 if (ctxt) {
935 list_del(&ctxt->rc_list);
938 svc_rdma_read_complete(rqstp, ctxt);
941 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
942 if (ctxt)
943 list_del(&ctxt->rc_list);
951 if (!ctxt)
956 ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
958 svc_rdma_build_arg_xdr(rqstp, ctxt);
960 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg, ctxt);
966 if (svc_rdma_is_reverse_direction_reply(xprt, ctxt))
969 svc_rdma_get_inv_rkey(rdma_xprt, ctxt);
971 if (!pcl_is_empty(&ctxt->rc_read_pcl) ||
972 !pcl_is_empty(&ctxt->rc_call_pcl))
976 rqstp->rq_xprt_ctxt = ctxt;
983 svc_rdma_send_error(rdma_xprt, ctxt, ret);
984 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
993 ctxt->rc_saved_arg = rqstp->rq_arg;
995 ret = svc_rdma_process_read_list(rdma_xprt, rqstp, ctxt);
998 svc_rdma_send_error(rdma_xprt, ctxt, ret);
999 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
1006 svc_rdma_handle_bc_reply(rqstp, ctxt);
1008 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);