Lines Matching defs:ibmr

36 rds_transition_frwr_state(struct rds_ib_mr *ibmr,
40 if (cmpxchg(&ibmr->u.frmr.fr_state,
43 /* enforce order of ibmr->u.frmr.fr_state update
47 atomic_dec(&ibmr->ic->i_fastreg_inuse_count);
57 struct rds_ib_mr *ibmr = NULL;
66 ibmr = rds_ib_try_reuse_ibmr(pool);
67 if (ibmr)
68 return ibmr;
70 ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL,
72 if (!ibmr) {
77 frmr = &ibmr->u.frmr;
86 ibmr->pool = pool;
98 return ibmr;
101 kfree(ibmr);
106 static void rds_ib_free_frmr(struct rds_ib_mr *ibmr, bool drop)
108 struct rds_ib_mr_pool *pool = ibmr->pool;
111 llist_add(&ibmr->llnode, &pool->drop_list);
113 llist_add(&ibmr->llnode, &pool->free_list);
114 atomic_add(ibmr->sg_len, &pool->free_pinned);
123 static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
125 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
129 while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
130 atomic_inc(&ibmr->ic->i_fastreg_wrs);
134 ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_dma_len,
136 if (unlikely(ret != ibmr->sg_dma_len))
143 atomic_inc(&ibmr->ic->i_fastreg_inuse_count);
150 ib_update_fast_reg_key(frmr->mr, ibmr->remap_count++);
154 reg_wr.wr.wr_id = (unsigned long)(void *)ibmr;
164 ret = ib_post_send(ibmr->ic->i_cm_id->qp, &reg_wr.wr, NULL);
167 rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
169 atomic_inc(&ibmr->ic->i_fastreg_wrs);
189 struct rds_ib_mr *ibmr,
193 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
198 /* We want to teardown old ibmr values here and fill it up with
201 rds_ib_teardown_mr(ibmr);
203 ibmr->sg = sg;
204 ibmr->sg_len = sg_len;
205 ibmr->sg_dma_len = 0;
207 WARN_ON(ibmr->sg_dma_len);
208 ibmr->sg_dma_len = ib_dma_map_sg(dev, ibmr->sg, ibmr->sg_len,
210 if (unlikely(!ibmr->sg_dma_len)) {
220 for (i = 0; i < ibmr->sg_dma_len; ++i) {
221 unsigned int dma_len = sg_dma_len(&ibmr->sg[i]);
222 u64 dma_addr = sg_dma_address(&ibmr->sg[i]);
233 if (i < ibmr->sg_dma_len - 1)
243 if (frmr->dma_npages > ibmr->pool->max_pages) {
248 ret = rds_ib_post_reg_frmr(ibmr);
252 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
260 ib_dma_unmap_sg(rds_ibdev->dev, ibmr->sg, ibmr->sg_len,
262 ibmr->sg_dma_len = 0;
266 static int rds_ib_post_inv(struct rds_ib_mr *ibmr)
269 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
270 struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id;
279 while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
280 atomic_inc(&ibmr->ic->i_fastreg_wrs);
288 s_wr->wr_id = (unsigned long)(void *)ibmr;
295 rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
301 atomic_inc(&ibmr->ic->i_fastreg_wrs);
324 struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id;
325 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
328 rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_STALE);
340 rds_transition_frwr_state(ibmr, FRMR_IS_INUSE, FRMR_IS_FREE);
360 struct rds_ib_mr *ibmr, *next;
366 list_for_each_entry(ibmr, list, unmap_list) {
367 if (ibmr->sg_dma_len) {
368 ret2 = rds_ib_post_inv(ibmr);
378 list_for_each_entry_safe(ibmr, next, list, unmap_list) {
379 *unpinned += ibmr->sg_len;
380 frmr = &ibmr->u.frmr;
381 __rds_ib_teardown_mr(ibmr);
387 if (ibmr->pool->pool_type == RDS_IB_MR_8K_POOL)
391 list_del(&ibmr->unmap_list);
394 kfree(ibmr);
406 struct rds_ib_mr *ibmr = NULL;
416 if (ibmr)
417 rds_ib_free_frmr(ibmr, true);
418 ibmr = rds_ib_alloc_frmr(rds_ibdev, nents);
419 if (IS_ERR(ibmr))
420 return ibmr;
421 frmr = &ibmr->u.frmr;
424 ibmr->ic = ic;
425 ibmr->device = rds_ibdev;
426 ret = rds_ib_map_frmr(rds_ibdev, ibmr->pool, ibmr, sg, nents);
430 rds_ib_free_frmr(ibmr, false);
431 ibmr = ERR_PTR(ret);
434 return ibmr;
437 void rds_ib_free_frmr_list(struct rds_ib_mr *ibmr)
439 struct rds_ib_mr_pool *pool = ibmr->pool;
440 struct rds_ib_frmr *frmr = &ibmr->u.frmr;
443 llist_add(&ibmr->llnode, &pool->drop_list);
445 llist_add(&ibmr->llnode, &pool->free_list);