• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/sys/dev/mlx4/mlx4_ib/

Lines Matching defs:ibdev

87 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init);
137 struct mlx4_ib_dev *ibdev = to_mdev(device);
141 dev = mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port_num);
145 if (mlx4_is_bonded(ibdev->dev)) {
167 struct mlx4_ib_dev *ibdev,
172 struct mlx4_dev *dev = ibdev->dev;
200 struct mlx4_ib_dev *ibdev,
205 struct mlx4_dev *dev = ibdev->dev;
247 struct mlx4_ib_dev *ibdev,
250 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
251 return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num);
253 return mlx4_ib_update_gids_v1(gids, ibdev, port_num);
263 struct mlx4_ib_dev *ibdev = to_mdev(device);
264 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
328 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
341 struct mlx4_ib_dev *ibdev = to_mdev(device);
342 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
386 ret = mlx4_ib_update_gids(gids, ibdev, port_num);
392 int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev,
395 struct mlx4_ib_iboe *iboe = &ibdev->iboe;
408 if (mlx4_is_bonded(ibdev->dev))
411 if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num))
414 ret = ib_get_cached_gid(&ibdev->ib_dev, port_num, index, &gid, &attr);
439 static int mlx4_ib_query_device(struct ib_device *ibdev,
443 struct mlx4_ib_dev *dev = to_mdev(ibdev);
477 err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
596 static int ib_link_query_port(struct ib_device *ibdev, u8 port,
614 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
617 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
633 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
634 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
635 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
666 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port,
691 static int eth_link_query_port(struct ib_device *ibdev, u8 port,
695 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
750 int __mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
757 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
758 ib_link_query_port(ibdev, port, props, netw_view) :
759 eth_link_query_port(ibdev, port, props, netw_view);
764 static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
768 return __mlx4_ib_query_port(ibdev, port, props, 0);
771 int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
777 struct mlx4_ib_dev *dev = to_mdev(ibdev);
827 static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
832 if (rdma_protocol_ib(ibdev, port))
833 return __mlx4_ib_query_gid(ibdev, port, index, gid, 0);
835 if (!rdma_protocol_roce(ibdev, port))
838 if (!rdma_cap_roce_gid_table(ibdev, port))
841 ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
850 static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u8 port, u64 *sl2vl_tbl)
859 if (mlx4_is_slave(to_mdev(ibdev)->dev)) {
873 if (mlx4_is_mfunc(to_mdev(ibdev)->dev))
876 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
910 int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
927 if (mlx4_is_mfunc(to_mdev(ibdev)->dev) && netw_view)
930 err = mlx4_MAD_IFC(to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL,
943 static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
945 return __mlx4_ib_query_pkey(ibdev, port, index, pkey, 0);
948 static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
960 if (mlx4_is_slave(to_mdev(ibdev)->dev))
963 spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
964 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX);
965 spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
971 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
976 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
979 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
1010 static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
1013 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
1028 err = mlx4_ib_query_port(ibdev, port, &attr);
1040 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
1044 static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
1047 struct mlx4_ib_dev *dev = to_mdev(ibdev);
1056 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) {
1072 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
1081 if (ibdev->uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION)
1087 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
1221 static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
1232 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
1240 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
1256 static struct ib_xrcd *mlx4_ib_alloc_xrcd(struct ib_device *ibdev,
1264 if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC))
1271 err = mlx4_xrcd_alloc(to_mdev(ibdev)->dev, &xrcd->xrcdn);
1275 xrcd->pd = ib_alloc_pd(ibdev, 0);
1282 xrcd->cq = ib_create_cq(ibdev, NULL, NULL, xrcd, &cq_attr);
1293 mlx4_xrcd_free(to_mdev(ibdev)->dev, xrcd->xrcdn);
1332 static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev,
1341 mlx4_counter_free(ibdev->dev, counter->index);
2135 static struct rdma_hw_stats *mlx4_ib_alloc_hw_stats(struct ib_device *ibdev,
2138 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2149 static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
2153 struct mlx4_ib_dev *dev = to_mdev(ibdev);
2174 static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
2184 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT)
2207 static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
2220 if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
2235 static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
2237 struct mlx4_ib_diag_counters *diag = ibdev->diag_counters;
2240 bool per_port = !!(ibdev->dev->caps.flags2 &
2243 if (mlx4_is_slave(ibdev->dev))
2251 ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
2257 mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
2261 ibdev->ib_dev.get_hw_stats = mlx4_ib_get_hw_stats;
2262 ibdev->ib_dev.alloc_hw_stats = mlx4_ib_alloc_hw_stats;
2275 static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
2280 kfree(ibdev->diag_counters[i].offset);
2281 kfree(ibdev->diag_counters[i].name);
2286 static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
2296 atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
2299 if (!mlx4_is_mfunc(ibdev->dev))
2302 mutex_lock(&ibdev->qp1_proxy_lock[port - 1]);
2303 qp = ibdev->qp1_proxy[port - 1];
2314 new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac);
2320 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC,
2335 mlx4_unregister_mac(ibdev->dev, port, release_mac);
2338 mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]);
2341 static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
2350 iboe = &ibdev->iboe;
2353 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
2356 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
2367 mlx4_ib_update_qps(ibdev, dev, update_qps_port);
2374 struct mlx4_ib_dev *ibdev;
2379 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
2380 mlx4_ib_scan_netdevs(ibdev, dev, event);
2385 static void init_pkeys(struct mlx4_ib_dev *ibdev)
2391 if (mlx4_is_master(ibdev->dev)) {
2392 for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
2394 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2396 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2398 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] =
2400 (slave == mlx4_master_func_num(ibdev->dev) || !i) ? i :
2401 ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1;
2402 mlx4_sync_pkey_table(ibdev->dev, slave, port, i,
2403 ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]);
2408 for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
2410 i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
2412 ibdev->pkeys.phys_pkey_cache[port-1][i] =
2418 static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2422 ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
2423 sizeof(ibdev->eq_table[0]), GFP_KERNEL);
2424 if (!ibdev->eq_table)
2432 ibdev->eq_table[eq] = total_eqs;
2434 &ibdev->eq_table[eq]))
2437 ibdev->eq_table[eq] = -1;
2442 ibdev->eq_table[i++] = -1)
2446 ibdev->ib_dev.num_comp_vectors = eq;
2449 static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
2452 int total_eqs = ibdev->ib_dev.num_comp_vectors;
2455 if (!ibdev->eq_table)
2459 ibdev->ib_dev.num_comp_vectors = 0;
2462 mlx4_release_eq(dev, ibdev->eq_table[i]);
2464 kfree(ibdev->eq_table);
2465 ibdev->eq_table = NULL;
2468 static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num,
2472 struct mlx4_ib_dev *mdev = to_mdev(ibdev);
2475 err = mlx4_ib_query_port(ibdev, port_num, &attr);
2482 if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) {
2510 struct mlx4_ib_dev *ibdev;
2531 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
2532 if (!ibdev) {
2538 iboe = &ibdev->iboe;
2540 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
2543 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
2546 ibdev->uar_map = ioremap((phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT,
2548 if (!ibdev->uar_map)
2550 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
2552 ibdev->dev = dev;
2553 ibdev->bond_next_port = 0;
2555 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
2556 ibdev->ib_dev.owner = THIS_MODULE;
2557 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
2558 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
2559 ibdev->num_ports = num_ports;
2560 ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ?
2561 1 : ibdev->num_ports;
2562 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
2563 ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
2564 ibdev->ib_dev.get_netdev = mlx4_ib_get_netdev;
2565 ibdev->ib_dev.add_gid = mlx4_ib_add_gid;
2566 ibdev->ib_dev.del_gid = mlx4_ib_del_gid;
2569 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
2571 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION;
2573 ibdev->ib_dev.uverbs_cmd_mask =
2599 ibdev->ib_dev.query_device = mlx4_ib_query_device;
2600 ibdev->ib_dev.query_port = mlx4_ib_query_port;
2601 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
2602 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
2603 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
2604 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
2605 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
2606 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
2607 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
2608 ibdev->ib_dev.mmap = mlx4_ib_mmap;
2609 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
2610 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
2611 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
2612 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
2613 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
2614 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
2615 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
2616 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
2617 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
2618 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
2619 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
2620 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
2621 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
2622 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
2623 ibdev->ib_dev.post_send = mlx4_ib_post_send;
2624 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
2625 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
2626 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
2627 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
2628 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
2629 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
2630 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
2631 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
2632 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
2633 ibdev->ib_dev.rereg_user_mr = mlx4_ib_rereg_user_mr;
2634 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
2635 ibdev->ib_dev.alloc_mr = mlx4_ib_alloc_mr;
2636 ibdev->ib_dev.map_mr_sg = mlx4_ib_map_mr_sg;
2637 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
2638 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
2639 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
2640 ibdev->ib_dev.get_port_immutable = mlx4_port_immutable;
2641 ibdev->ib_dev.get_dev_fw_str = get_fw_ver_str;
2643 if (!mlx4_is_slave(ibdev->dev)) {
2644 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
2645 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
2646 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
2647 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
2652 ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw;
2653 ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw;
2655 ibdev->ib_dev.uverbs_cmd_mask |=
2661 ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;
2662 ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd;
2663 ibdev->ib_dev.uverbs_cmd_mask |=
2669 ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED;
2670 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
2671 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
2673 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2678 ibdev->ib_dev.uverbs_ex_cmd_mask |=
2683 mlx4_ib_alloc_eqs(dev, ibdev);
2687 if (init_node_data(ibdev))
2689 mlx4_init_sl2vl_tbl(ibdev);
2691 for (i = 0; i < ibdev->num_ports; ++i) {
2692 mutex_init(&ibdev->counters_table[i].mutex);
2693 INIT_LIST_HEAD(&ibdev->counters_table[i].counters_list);
2696 num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports;
2698 mutex_init(&ibdev->qp1_proxy_lock[i]);
2700 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
2702 err = mlx4_counter_alloc(ibdev->dev, &counter_index);
2718 mlx4_counter_free(ibdev->dev, counter_index);
2724 &ibdev->counters_table[i].counters_list);
2725 ibdev->counters_table[i].default_counter = counter_index;
2730 for (i = 1; i < ibdev->num_ports ; ++i) {
2739 &ibdev->counters_table[i].counters_list);
2740 ibdev->counters_table[i].default_counter =
2747 spin_lock_init(&ibdev->sm_lock);
2748 mutex_init(&ibdev->cap_mask_mutex);
2749 INIT_LIST_HEAD(&ibdev->qp_list);
2750 spin_lock_init(&ibdev->reset_flow_resource_lock);
2752 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2754 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2755 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2757 &ibdev->steer_qpn_base, 0);
2761 ibdev->ib_uc_qpns_bitmap =
2762 kmalloc(BITS_TO_LONGS(ibdev->steer_qpn_count) *
2765 if (!ibdev->ib_uc_qpns_bitmap) {
2771 bitmap_zero(ibdev->ib_uc_qpns_bitmap, ibdev->steer_qpn_count);
2774 dev, ibdev->steer_qpn_base,
2775 ibdev->steer_qpn_base +
2776 ibdev->steer_qpn_count - 1);
2781 for (j = 1; j <= ibdev->dev->caps.num_ports; j++)
2782 atomic64_set(&iboe->mac[j - 1], ibdev->dev->caps.def_mac[j]);
2784 if (mlx4_ib_alloc_diag_counters(ibdev))
2787 if (ib_register_device(&ibdev->ib_dev, NULL))
2790 if (mlx4_ib_mad_init(ibdev))
2793 if (mlx4_ib_init_sriov(ibdev))
2815 if (device_create_file(&ibdev->ib_dev.dev,
2820 ibdev->ib_active = true;
2822 if (mlx4_is_mfunc(ibdev->dev))
2823 init_pkeys(ibdev);
2826 if (mlx4_is_master(ibdev->dev)) {
2828 if (j == mlx4_master_func_num(ibdev->dev))
2830 if (mlx4_is_slave_active(ibdev->dev, j))
2831 do_slave_init(ibdev, j, 1);
2834 return ibdev;
2837 if (ibdev->iboe.nb.notifier_call) {
2838 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2840 ibdev->iboe.nb.notifier_call = NULL;
2844 mlx4_ib_close_sriov(ibdev);
2847 mlx4_ib_mad_cleanup(ibdev);
2850 ib_unregister_device(&ibdev->ib_dev);
2853 mlx4_ib_diag_cleanup(ibdev);
2856 kfree(ibdev->ib_uc_qpns_bitmap);
2859 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED)
2860 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2861 ibdev->steer_qpn_count);
2863 for (i = 0; i < ibdev->num_ports; ++i)
2864 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
2867 iounmap(ibdev->uar_map);
2870 mlx4_uar_free(dev, &ibdev->priv_uar);
2873 mlx4_pd_free(dev, ibdev->priv_pdn);
2876 ib_dealloc_device(&ibdev->ib_dev);
2946 struct mlx4_ib_dev *ibdev = ibdev_ptr;
2949 ibdev->ib_active = false;
2952 mlx4_ib_close_sriov(ibdev);
2953 mlx4_ib_mad_cleanup(ibdev);
2954 ib_unregister_device(&ibdev->ib_dev);
2955 mlx4_ib_diag_cleanup(ibdev);
2956 if (ibdev->iboe.nb.notifier_call) {
2957 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
2959 ibdev->iboe.nb.notifier_call = NULL;
2962 if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
2963 mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
2964 ibdev->steer_qpn_count);
2965 kfree(ibdev->ib_uc_qpns_bitmap);
2968 iounmap(ibdev->uar_map);
2969 for (p = 0; p < ibdev->num_ports; ++p)
2970 mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[p]);
2975 mlx4_ib_free_eqs(dev, ibdev);
2977 mlx4_uar_free(dev, &ibdev->priv_uar);
2978 mlx4_pd_free(dev, ibdev->priv_pdn);
2979 ib_dealloc_device(&ibdev->ib_dev);
2982 static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
2985 struct mlx4_dev *dev = ibdev->dev;
3017 dm[i]->dev = ibdev;
3020 spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
3021 if (!ibdev->sriov.is_going_down) {
3023 queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
3024 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3026 spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
3035 static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
3048 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
3049 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
3051 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
3092 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
3100 struct mlx4_ib_dev *ibdev = ew->ib_dev;
3106 spin_lock_bh(&ibdev->iboe.lock);
3108 struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
3122 spin_unlock_bh(&ibdev->iboe.lock);
3124 ibev.device = &ibdev->ib_dev;
3157 void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev,
3166 ew->ib_dev = ibdev;
3177 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
3189 ew->ib_dev = ibdev;
3201 if (p > ibdev->num_ports)
3204 rdma_port_get_link_layer(&ibdev->ib_dev, p) ==
3207 mlx4_ib_invalidate_all_guid_record(ibdev, p);
3208 if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST &&
3209 !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT))
3210 mlx4_sched_ib_sl2vl_update_work(ibdev, p);
3216 if (p > ibdev->num_ports)
3222 ibdev->ib_active = false;
3224 mlx4_ib_handle_catas_error(ibdev);
3236 ew->ib_dev = ibdev;
3246 do_slave_init(ibdev, p, 1);
3250 for (i = 1; i <= ibdev->num_ports; i++) {
3251 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3253 mlx4_ib_slave_alias_guid_event(ibdev,
3264 for (i = 1; i <= ibdev->num_ports; i++) {
3265 if (rdma_port_get_link_layer(&ibdev->ib_dev, i)
3267 mlx4_ib_slave_alias_guid_event(ibdev,
3273 do_slave_init(ibdev, p, 0);
3281 ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p;