Lines Matching refs:rdma

98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
119 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
121 int node = ibdev_to_node(rdma->sc_cm_id->device);
129 buffer = kmalloc_node(rdma->sc_max_req_size, GFP_KERNEL, node);
132 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
133 rdma->sc_max_req_size, DMA_FROM_DEVICE);
134 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
137 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid);
149 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
150 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
152 svc_rdma_cc_init(rdma, &ctxt->rc_cc);
163 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
166 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
174 * @rdma: svcxprt_rdma being torn down
177 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
182 while ((node = llist_del_first(&rdma->sc_recv_ctxts))) {
184 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
190 * @rdma: controlling svcxprt_rdma
194 struct svc_rdma_recv_ctxt *svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
199 node = llist_del_first(&rdma->sc_recv_ctxts);
210 * @rdma: controlling svcxprt_rdma
214 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
217 svc_rdma_cc_release(rdma, &ctxt->rc_cc, DMA_FROM_DEVICE);
229 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
244 struct svcxprt_rdma *rdma =
248 svc_rdma_recv_ctxt_put(rdma, ctxt);
251 static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
259 if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
264 ctxt = svc_rdma_recv_ctxt_get(rdma);
271 rdma->sc_pending_recvs++;
276 ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
282 trace_svcrdma_rq_post_err(rdma, ret);
287 svc_rdma_recv_ctxt_put(rdma, ctxt);
296 * @rdma: fresh svcxprt_rdma
302 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
309 total = (rdma->sc_max_requests * 2) + rdma->sc_recv_batch;
313 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
316 llist_add(&ctxt->rc_node, &rdma->sc_recv_ctxts);
319 return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests);
330 struct svcxprt_rdma *rdma = cq->cq_context;
334 rdma->sc_pending_recvs--;
352 if (rdma->sc_pending_recvs < rdma->sc_max_requests)
353 if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch))
359 spin_lock(&rdma->sc_rq_dto_lock);
360 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
362 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
363 spin_unlock(&rdma->sc_rq_dto_lock);
364 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
365 svc_xprt_enqueue(&rdma->sc_xprt);
374 svc_rdma_recv_ctxt_put(rdma, ctxt);
375 svc_xprt_deferred_close(&rdma->sc_xprt);
380 * @rdma: svcxprt_rdma being shut down
383 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
387 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
389 svc_rdma_recv_ctxt_put(rdma, ctxt);
391 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
393 svc_rdma_recv_ctxt_put(rdma, ctxt);
598 static void svc_rdma_get_inv_rkey(struct svcxprt_rdma *rdma,
607 if (!rdma->sc_snd_w_inv)
728 static void svc_rdma_send_error(struct svcxprt_rdma *rdma,
734 sctxt = svc_rdma_send_ctxt_get(rdma);
737 svc_rdma_send_error_msg(rdma, sctxt, rctxt, status);