Lines Matching defs:ep

48 static void frwr_cid_init(struct rpcrdma_ep *ep,
53 cid->ci_queue_id = ep->re_attr.send_cq->res.id;
123 struct rpcrdma_ep *ep = r_xprt->rx_ep;
124 unsigned int depth = ep->re_max_fr_depth;
129 ibdev_to_node(ep->re_id->device));
133 frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
142 frwr_cid_init(ep, mr);
156 * @ep: endpoint to fill in
160 * ep->re_attr
161 * ep->re_max_requests
162 * ep->re_max_rdma_segs
163 * ep->re_max_fr_depth
164 * ep->re_mrtype
171 int frwr_query_device(struct rpcrdma_ep *ep, const struct ib_device *device)
190 ep->re_attr.cap.max_send_sge = max_sge;
191 ep->re_attr.cap.max_recv_sge = 1;
193 ep->re_mrtype = IB_MR_TYPE_MEM_REG;
195 ep->re_mrtype = IB_MR_TYPE_SG_GAPS;
202 ep->re_max_fr_depth = attrs->max_sge_rd;
204 ep->re_max_fr_depth = attrs->max_fast_reg_page_list_len;
205 if (ep->re_max_fr_depth > RPCRDMA_MAX_DATA_SEGS)
206 ep->re_max_fr_depth = RPCRDMA_MAX_DATA_SEGS;
222 if (ep->re_max_fr_depth < RPCRDMA_MAX_DATA_SEGS) {
223 delta = RPCRDMA_MAX_DATA_SEGS - ep->re_max_fr_depth;
226 delta -= ep->re_max_fr_depth;
235 if (ep->re_max_requests > max_qp_wr)
236 ep->re_max_requests = max_qp_wr;
237 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
238 if (ep->re_attr.cap.max_send_wr > max_qp_wr) {
239 ep->re_max_requests = max_qp_wr / depth;
240 if (!ep->re_max_requests)
242 ep->re_attr.cap.max_send_wr = ep->re_max_requests * depth;
244 ep->re_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
245 ep->re_attr.cap.max_send_wr += 1; /* for ib_drain_sq */
246 ep->re_attr.cap.max_recv_wr = ep->re_max_requests;
247 ep->re_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
248 ep->re_attr.cap.max_recv_wr += RPCRDMA_MAX_RECV_BATCH;
249 ep->re_attr.cap.max_recv_wr += 1; /* for ib_drain_rq */
251 ep->re_max_rdma_segs =
252 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS, ep->re_max_fr_depth);
254 ep->re_max_rdma_segs += 2;
255 if (ep->re_max_rdma_segs > RPCRDMA_MAX_HDR_SEGS)
256 ep->re_max_rdma_segs = RPCRDMA_MAX_HDR_SEGS;
263 if ((ep->re_max_rdma_segs * ep->re_max_fr_depth) < RPCRDMA_MAX_SEGS)
289 struct rpcrdma_ep *ep = r_xprt->rx_ep;
295 if (nsegs > ep->re_max_fr_depth)
296 nsegs = ep->re_max_fr_depth;
303 if (ep->re_mrtype == IB_MR_TYPE_SG_GAPS)
312 dma_nents = ib_dma_map_sg(ep->re_id->device, mr->mr_sg, mr->mr_nents,
316 mr->mr_device = ep->re_id->device;
386 struct rpcrdma_ep *ep = r_xprt->rx_ep;
406 if ((kref_read(&req->rl_kref) > 1) || num_wrs > ep->re_send_count) {
408 ep->re_send_count = min_t(unsigned int, ep->re_send_batch,
409 num_wrs - ep->re_send_count);
412 ep->re_send_count -= num_wrs;
416 ret = ib_post_send(ep->re_id->qp, post_wr, NULL);
499 struct rpcrdma_ep *ep = r_xprt->rx_ep;
544 rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
560 rpcrdma_force_disconnect(ep);
605 struct rpcrdma_ep *ep = r_xprt->rx_ep;
644 rc = ib_post_send(ep->re_id->qp, first, NULL);
660 rpcrdma_force_disconnect(ep);
671 struct rpcrdma_ep *ep = r_xprt->rx_ep;
679 ep->re_write_pad_mr = mr;
682 seg.mr_page = virt_to_page(ep->re_write_pad);
683 seg.mr_offset = offset_in_page(ep->re_write_pad);
695 return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL);