• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/sunrpc/xprtrdma/

Lines Matching refs:xprt

57 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
60 static void svc_rdma_detach(struct svc_xprt *xprt);
61 static void svc_rdma_free(struct svc_xprt *xprt);
62 static int svc_rdma_has_wspace(struct svc_xprt *xprt);
63 static void rq_cq_reap(struct svcxprt_rdma *xprt);
64 static void sq_cq_reap(struct svcxprt_rdma *xprt);
92 struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
102 ctxt->xprt = xprt;
106 atomic_inc(&xprt->sc_ctxt_used);
112 struct svcxprt_rdma *xprt = ctxt->xprt;
121 if (ctxt->sge[i].lkey == xprt->sc_dma_lkey) {
122 atomic_dec(&xprt->sc_dma_used);
123 ib_dma_unmap_single(xprt->sc_cm_id->device,
133 struct svcxprt_rdma *xprt;
137 xprt = ctxt->xprt;
143 atomic_dec(&xprt->sc_ctxt_used);
176 struct svc_xprt *xprt = context;
179 set_bit(XPT_CLOSE, &xprt->xpt_flags);
185 struct svc_xprt *xprt = context;
206 set_bit(XPT_CLOSE, &xprt->xpt_flags);
222 struct svcxprt_rdma *xprt;
227 xprt = list_entry(dto_xprt_q.next,
229 list_del_init(&xprt->sc_dto_q);
232 rq_cq_reap(xprt);
233 sq_cq_reap(xprt);
235 svc_xprt_put(&xprt->sc_xprt);
249 struct svcxprt_rdma *xprt = cq_context;
253 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
261 set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
268 if (list_empty(&xprt->sc_dto_q)) {
269 svc_xprt_get(&xprt->sc_xprt);
270 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
286 static void rq_cq_reap(struct svcxprt_rdma *xprt)
292 if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
295 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
298 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
306 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
308 svc_xprt_put(&xprt->sc_xprt);
311 spin_lock_bh(&xprt->sc_rq_dto_lock);
312 list_add_tail(&ctxt->dto_q, &xprt->sc_rq_dto_q);
313 spin_unlock_bh(&xprt->sc_rq_dto_lock);
314 svc_xprt_put(&xprt->sc_xprt);
320 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
326 if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
327 svc_xprt_enqueue(&xprt->sc_xprt);
333 static void process_context(struct svcxprt_rdma *xprt,
341 svc_rdma_put_frmr(xprt, ctxt->frmr);
355 svc_rdma_put_frmr(xprt, ctxt->frmr);
356 spin_lock_bh(&xprt->sc_rq_dto_lock);
357 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
359 &xprt->sc_read_complete_q);
360 spin_unlock_bh(&xprt->sc_rq_dto_lock);
361 svc_xprt_enqueue(&xprt->sc_xprt);
379 static void sq_cq_reap(struct svcxprt_rdma *xprt)
383 struct ib_cq *cq = xprt->sc_sq_cq;
386 if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
389 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
394 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
397 atomic_dec(&xprt->sc_sq_count);
398 wake_up(&xprt->sc_send_wait);
402 process_context(xprt, ctxt);
404 svc_xprt_put(&xprt->sc_xprt);
413 struct svcxprt_rdma *xprt = cq_context;
417 if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
425 set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
432 if (list_empty(&xprt->sc_dto_q)) {
433 svc_xprt_get(&xprt->sc_xprt);
434 list_add_tail(&xprt->sc_dto_q, &dto_xprt_q);
488 int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
498 ctxt = svc_rdma_get_context(xprt);
501 for (sge_no = 0; buflen < xprt->sc_max_req_size; sge_no++) {
502 BUG_ON(sge_no >= xprt->sc_max_sge);
505 pa = ib_dma_map_single(xprt->sc_cm_id->device,
508 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
510 atomic_inc(&xprt->sc_dma_used);
513 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
522 svc_xprt_get(&xprt->sc_xprt);
523 ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
527 svc_xprt_put(&xprt->sc_xprt);
542 * This function creates a new xprt for the new connection and enqueues it on
543 * the accept queue for the listent xprt. When the listen thread is kicked, it
544 * will call the recvfrom method on the listen xprt which will accept the new
596 struct svcxprt_rdma *xprt = cma_id->context;
601 dprintk("svcrdma: Connect request on cma_id=%p, xprt = %p, "
609 dprintk("svcrdma: Connection completed on LISTEN xprt=%p, "
610 "cm_id=%p\n", xprt, cma_id);
614 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
615 xprt, cma_id);
616 if (xprt)
617 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
632 struct svc_xprt *xprt = cma_id->context;
634 container_of(xprt, struct svcxprt_rdma, sc_xprt);
638 svc_xprt_get(xprt);
639 dprintk("svcrdma: Connection completed on DTO xprt=%p, "
640 "cm_id=%p\n", xprt, cma_id);
642 svc_xprt_enqueue(xprt);
645 dprintk("svcrdma: Disconnect on DTO xprt=%p, cm_id=%p\n",
646 xprt, cma_id);
647 if (xprt) {
648 set_bit(XPT_CLOSE, &xprt->xpt_flags);
649 svc_xprt_enqueue(xprt);
650 svc_xprt_put(xprt);
654 dprintk("svcrdma: Device removal cma_id=%p, xprt = %p, "
655 "event=%d\n", cma_id, xprt, event->event);
656 if (xprt) {
657 set_bit(XPT_CLOSE, &xprt->xpt_flags);
658 svc_xprt_enqueue(xprt);
678 struct svc_xprt *xprt;
689 xprt = &cma_xprt->sc_xprt;
727 static struct svc_rdma_fastreg_mr *rdma_alloc_frmr(struct svcxprt_rdma *xprt)
737 mr = ib_alloc_fast_reg_mr(xprt->sc_pd, RPCSVC_MAXPAGES);
741 pl = ib_alloc_fast_reg_page_list(xprt->sc_cm_id->device,
759 static void rdma_dealloc_frmr_q(struct svcxprt_rdma *xprt)
763 while (!list_empty(&xprt->sc_frmr_q)) {
764 frmr = list_entry(xprt->sc_frmr_q.next,
792 static void frmr_unmap_dma(struct svcxprt_rdma *xprt,
800 atomic_dec(&xprt->sc_dma_used);
829 static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
841 listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
842 clear_bit(XPT_CONN, &xprt->xpt_flags);
1104 static void svc_rdma_detach(struct svc_xprt *xprt)
1107 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1108 dprintk("svc: svc_rdma_detach(%p)\n", xprt);
1177 static void svc_rdma_free(struct svc_xprt *xprt)
1180 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1185 static int svc_rdma_has_wspace(struct svc_xprt *xprt)
1188 container_of(xprt, struct svcxprt_rdma, sc_xprt);
1218 int svc_rdma_fastreg(struct svcxprt_rdma *xprt,
1239 return svc_rdma_send(xprt, &fastreg_wr);
1242 int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1249 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1259 spin_lock_bh(&xprt->sc_lock);
1260 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
1261 spin_unlock_bh(&xprt->sc_lock);
1265 sq_cq_reap(xprt);
1268 wait_event(xprt->sc_send_wait,
1269 atomic_read(&xprt->sc_sq_count) <
1270 xprt->sc_sq_depth);
1271 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1277 svc_xprt_get(&xprt->sc_xprt);
1280 atomic_add(wr_count, &xprt->sc_sq_count);
1281 ret = ib_post_send(xprt->sc_qp, wr, &bad_wr);
1283 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
1284 atomic_sub(wr_count, &xprt->sc_sq_count);
1286 svc_xprt_put(&xprt->sc_xprt);
1289 ret, atomic_read(&xprt->sc_sq_count),
1290 xprt->sc_sq_depth);
1292 spin_unlock_bh(&xprt->sc_lock);
1294 wake_up(&xprt->sc_send_wait);
1300 void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1315 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1318 sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
1324 atomic_inc(&xprt->sc_dma_used);
1325 sge.lkey = xprt->sc_dma_lkey;
1328 ctxt = svc_rdma_get_context(xprt);
1342 ret = svc_rdma_send(xprt, &err_wr);
1346 ib_dma_unmap_single(xprt->sc_cm_id->device,