• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-12-stable/sys/ofed/drivers/infiniband/core/

Lines Matching refs:cma_dev

217 void cma_ref_dev(struct cma_device *cma_dev)
219 atomic_inc(&cma_dev->refcount);
225 struct cma_device *cma_dev;
230 list_for_each_entry(cma_dev, &dev_list, list)
231 if (filter(cma_dev->device, cookie)) {
232 found_cma_dev = cma_dev;
242 int cma_get_default_gid_type(struct cma_device *cma_dev,
245 if (port < rdma_start_port(cma_dev->device) ||
246 port > rdma_end_port(cma_dev->device))
249 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
252 int cma_set_default_gid_type(struct cma_device *cma_dev,
258 if (port < rdma_start_port(cma_dev->device) ||
259 port > rdma_end_port(cma_dev->device))
262 supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
267 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] =
273 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
275 return cma_dev->device;
291 struct cma_device *cma_dev;
456 struct cma_device *cma_dev)
458 cma_ref_dev(cma_dev);
459 id_priv->cma_dev = cma_dev;
461 id_priv->id.device = cma_dev->device;
463 rdma_node_get_transport(cma_dev->device->node_type);
464 list_add_tail(&id_priv->list, &cma_dev->id_list);
468 struct cma_device *cma_dev)
470 _cma_attach_to_dev(id_priv, cma_dev);
472 cma_dev->default_gid_type[id_priv->id.port_num -
473 rdma_start_port(cma_dev->device)];
476 void cma_deref_dev(struct cma_device *cma_dev)
478 if (atomic_dec_and_test(&cma_dev->refcount))
479 complete(&cma_dev->comp);
494 cma_deref_dev(id_priv->cma_dev);
495 id_priv->cma_dev = NULL;
605 struct cma_device *cma_dev;
622 cma_dev = listen_id_priv->cma_dev;
625 if (rdma_is_port_valid(cma_dev->device, port)) {
626 gidp = rdma_protocol_roce(cma_dev->device, port) ?
629 ret = cma_validate_port(cma_dev->device, port,
630 rdma_protocol_ib(cma_dev->device, port) ?
640 list_for_each_entry(cma_dev, &dev_list, list) {
641 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
643 listen_id_priv->cma_dev == cma_dev &&
647 gidp = rdma_protocol_roce(cma_dev->device, port) ?
650 ret = cma_validate_port(cma_dev->device, port,
651 rdma_protocol_ib(cma_dev->device, port) ?
653 cma_dev->default_gid_type[port - 1],
664 cma_attach_to_dev(id_priv, cma_dev);
675 struct cma_device *cma_dev, *cur_dev;
682 cma_dev = NULL;
699 cma_dev = cur_dev;
705 if (!cma_dev && (gid.global.subnet_prefix ==
707 cma_dev = cur_dev;
715 if (!cma_dev)
719 cma_attach_to_dev(id_priv, cma_dev);
884 BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
1650 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
1683 if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
1724 if (id_priv->cma_dev) {
2293 struct cma_device *cma_dev)
2300 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
2314 _cma_attach_to_dev(dev_id_priv, cma_dev);
2323 ret, cma_dev->device->name);
2328 struct cma_device *cma_dev;
2332 list_for_each_entry(cma_dev, &dev_list, list)
2333 cma_listen_on_dev(id_priv, cma_dev);
2680 struct cma_device *cma_dev, *cur_dev;
2687 cma_dev = NULL;
2694 if (!cma_dev)
2695 cma_dev = cur_dev;
2700 cma_dev = cur_dev;
2706 if (!cma_dev) {
2714 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL);
2718 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
2723 (rdma_protocol_ib(cma_dev->device, p)) ?
2729 cma_attach_to_dev(id_priv, cma_dev);
2749 if (!status && !id_priv->cma_dev)
2783 if (!id_priv->cma_dev) {
2813 if (!id_priv->cma_dev) {
3288 if (id_priv->cma_dev)
3825 id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
3826 rdma_start_port(id_priv->cma_dev->device)];
4068 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
4069 rdma_start_port(id_priv->cma_dev->device)];
4186 BUG_ON(id_priv->cma_dev->device != id->device);
4220 struct cma_device *cma_dev = arg1;
4226 cma_get_default_gid_type(cma_dev, port)), sizeof(buf));
4238 cma_set_default_gid_type(cma_dev, port, error);
4246 struct cma_device *cma_dev;
4250 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
4251 if (!cma_dev)
4254 sysctl_ctx_init(&cma_dev->sysctl_ctx);
4256 cma_dev->device = device;
4257 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
4258 sizeof(*cma_dev->default_gid_type),
4260 if (!cma_dev->default_gid_type) {
4261 kfree(cma_dev);
4280 cma_dev->default_gid_type[i - rdma_start_port(device)] =
4284 init_completion(&cma_dev->comp);
4285 atomic_set(&cma_dev->refcount, 1);
4286 INIT_LIST_HEAD(&cma_dev->id_list);
4287 ib_set_client_data(device, &cma_client, cma_dev);
4290 list_add_tail(&cma_dev->list, &dev_list);
4292 cma_listen_on_dev(id_priv, cma_dev);
4300 (void) SYSCTL_ADD_PROC(&cma_dev->sysctl_ctx,
4303 cma_dev, i, &sysctl_cma_default_roce_mode, "A",
4334 static void cma_process_remove(struct cma_device *cma_dev)
4340 while (!list_empty(&cma_dev->id_list)) {
4341 id_priv = list_entry(cma_dev->id_list.next,
4358 cma_deref_dev(cma_dev);
4359 wait_for_completion(&cma_dev->comp);
4364 struct cma_device *cma_dev = client_data;
4366 if (!cma_dev)
4370 list_del(&cma_dev->list);
4373 cma_process_remove(cma_dev);
4374 sysctl_ctx_free(&cma_dev->sysctl_ctx);
4375 kfree(cma_dev->default_gid_type);
4376 kfree(cma_dev);