• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/net/rds/

Lines Matching refs:ibmr

80 static int rds_iw_init_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
82 struct rds_iw_mr *ibmr,
84 static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
88 static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr);
381 struct rds_iw_mr *ibmr = NULL;
386 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list);
387 list_del_init(&ibmr->mapping.m_list);
391 return ibmr;
397 struct rds_iw_mr *ibmr = NULL;
401 ibmr = rds_iw_reuse_fmr(pool);
402 if (ibmr)
403 return ibmr;
429 ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
430 if (!ibmr) {
435 spin_lock_init(&ibmr->mapping.m_lock);
436 INIT_LIST_HEAD(&ibmr->mapping.m_list);
437 ibmr->mapping.m_mr = ibmr;
439 err = rds_iw_init_fastreg(pool, ibmr);
444 return ibmr;
447 if (ibmr) {
448 rds_iw_destroy_fastreg(pool, ibmr);
449 kfree(ibmr);
457 struct rds_iw_mr *ibmr = trans_private;
458 struct rds_iw_device *rds_iwdev = ibmr->device;
462 ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list,
463 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
466 ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list,
467 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL);
491 struct rds_iw_mr *ibmr, *next;
513 * actually members of an ibmr (ibmr->mapping). They either
528 list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) {
530 list_del(&ibmr->mapping.m_list);
531 rds_iw_destroy_fastreg(pool, ibmr);
532 kfree(ibmr);
560 struct rds_iw_mr *ibmr = trans_private;
561 struct rds_iw_mr_pool *pool = ibmr->device->mr_pool;
563 rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len);
568 rds_iw_free_fastreg(pool, ibmr);
602 struct rds_iw_mr *ibmr = NULL;
617 ibmr = rds_iw_alloc_mr(rds_iwdev);
618 if (IS_ERR(ibmr))
619 return ibmr;
621 ibmr->cm_id = cm_id;
622 ibmr->device = rds_iwdev;
624 ret = rds_iw_map_fastreg(rds_iwdev->mr_pool, ibmr, sg, nents);
626 *key_ret = ibmr->mr->rkey;
632 if (ibmr)
633 rds_iw_free_mr(ibmr, 0);
634 ibmr = ERR_PTR(ret);
636 return ibmr;
662 struct rds_iw_mr *ibmr)
686 ibmr->page_list = page_list;
687 ibmr->mr = mr;
693 struct rds_iw_mr *ibmr = mapping->m_mr;
703 ib_update_fast_reg_key(ibmr->mr, ibmr->remap_count++);
704 mapping->m_rkey = ibmr->mr->rkey;
711 f_wr.wr.fast_reg.page_list = ibmr->page_list;
721 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr);
729 static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr)
734 if (!ibmr->cm_id->qp || !ibmr->mr)
740 s_wr.ex.invalidate_rkey = ibmr->mr->rkey;
744 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
755 struct rds_iw_mr *ibmr,
760 struct rds_iw_mapping *mapping = &ibmr->mapping;
779 ibmr->page_list->page_list[i] = dma_pages[i];
797 struct rds_iw_mr *ibmr)
802 if (!ibmr->mapping.m_sg.dma_len)
805 ret = rds_iw_rdma_fastreg_inv(ibmr);
812 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list);
813 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned);
863 struct rds_iw_mr *ibmr)
865 if (ibmr->page_list)
866 ib_free_fast_reg_page_list(ibmr->page_list);
867 if (ibmr->mr)
868 ib_dereg_mr(ibmr->mr);