Lines Matching defs:srp_dev

235 	iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
237 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
258 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
280 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
308 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
316 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
499 struct srp_device *dev = target->srp_host->srp_dev;
526 struct srp_device *dev = target->srp_host->srp_dev;
639 struct srp_device *dev = target->srp_host->srp_dev;
721 target->srp_host->srp_dev->dev,
784 ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
790 dev_name(&host->srp_dev->dev->dev), subnet_timeout);
907 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
966 struct srp_device *dev = target->srp_host->srp_dev;
984 struct srp_device *srp_dev = target->srp_host->srp_dev;
985 struct ib_device *ibdev = srp_dev->dev;
990 if (srp_dev->use_fast_reg) {
1181 struct srp_device *dev = target->srp_host->srp_dev;
1426 struct srp_device *dev = target->srp_host->srp_dev;
1555 struct srp_device *dev = target->srp_host->srp_dev;
1593 struct srp_device *dev = ch->target->srp_host->srp_dev;
1653 dev = target->srp_host->srp_dev;
1996 struct ib_device *dev = target->srp_host->srp_dev->dev;
2064 struct ib_device *dev = target->srp_host->srp_dev->dev;
2179 dev = target->srp_host->srp_dev->dev;
2719 struct ib_device *dev = target->srp_host->srp_dev->dev;
2988 dev_name(&target->srp_host->srp_dev->dev->dev));
3117 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
3690 struct srp_device *srp_dev = host->srp_dev;
3691 struct ib_device *ibdev = srp_dev->dev;
3710 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
3718 target->lkey = host->srp_dev->pd->local_dma_lkey;
3719 target->global_rkey = host->srp_dev->global_rkey;
3758 if (!srp_dev->has_fr && !target->allow_ext_sg &&
3764 if (srp_dev->use_fast_reg) {
3768 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3769 (ilog2(srp_dev->mr_page_size) - 9);
3790 srp_dev->max_pages_per_mr - 1) /
3791 srp_dev->max_pages_per_mr;
3794 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3933 return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
3966 host->srp_dev = device;
3988 struct srp_device *srp_dev = client_data;
3991 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4002 struct srp_device *srp_dev;
4010 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
4011 if (!srp_dev)
4020 srp_dev->mr_page_size = 1 << mr_page_shift;
4021 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
4023 do_div(max_pages_per_mr, srp_dev->mr_page_size);
4025 attr->max_mr_size, srp_dev->mr_page_size,
4027 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
4030 srp_dev->has_fr = (attr->device_cap_flags &
4032 if (!never_register && !srp_dev->has_fr)
4035 attr->max_mr_size >= 2 * srp_dev->mr_page_size)
4036 srp_dev->use_fast_reg = srp_dev->has_fr;
4038 if (never_register || !register_always || !srp_dev->has_fr)
4041 if (srp_dev->use_fast_reg) {
4042 srp_dev->max_pages_per_mr =
4043 min_t(u32, srp_dev->max_pages_per_mr,
4046 srp_dev->mr_max_size = srp_dev->mr_page_size *
4047 srp_dev->max_pages_per_mr;
4051 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
4053 INIT_LIST_HEAD(&srp_dev->dev_list);
4055 srp_dev->dev = device;
4056 srp_dev->pd = ib_alloc_pd(device, flags);
4057 if (IS_ERR(srp_dev->pd)) {
4058 int ret = PTR_ERR(srp_dev->pd);
4060 kfree(srp_dev);
4065 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
4066 WARN_ON_ONCE(srp_dev->global_rkey == 0);
4070 host = srp_add_port(srp_dev, p);
4072 list_add_tail(&host->list, &srp_dev->dev_list);
4075 ib_set_client_data(device, &srp_client, srp_dev);
4081 struct srp_device *srp_dev;
4085 srp_dev = client_data;
4087 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
4113 ib_dealloc_pd(srp_dev->pd);
4115 kfree(srp_dev);