Lines Matching defs:mdev

109 	int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type);
170 struct mlx5_core_dev *mdev;
174 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
175 if (!mdev)
184 if (ndev->dev.parent == mdev->device)
200 struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev);
253 struct mlx5_core_dev *mdev;
255 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
256 if (!mdev)
259 ndev = mlx5_lag_get_roce_netdev(mdev);
281 struct mlx5_core_dev *mdev = NULL;
285 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
289 return ibdev->mdev;
299 mdev = mpi->mdev;
308 return mdev;
318 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
480 struct mlx5_core_dev *mdev;
489 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
490 if (!mdev) {
495 mdev = dev->mdev;
505 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
508 err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN,
521 if (!dev->is_rep && dev->mdev->roce.roce_en) {
526 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev,
528 mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr);
532 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg);
606 return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
645 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port));
650 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB)
651 return !MLX5_CAP_GEN(dev->mdev, ib_virt);
678 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
680 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
698 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
707 struct mlx5_core_dev *mdev = dev->mdev;
717 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
721 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
739 struct mlx5_core_dev *mdev = dev->mdev;
747 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
767 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
785 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
789 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp);
815 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
820 static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev,
823 struct mlx5_eswitch *esw = mdev->priv.eswitch;
824 u16 vport = mlx5_eswitch_manager_vport(mdev);
837 struct mlx5_core_dev *mdev = dev->mdev;
842 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
843 bool raw_support = !mlx5_core_mp_enabled(mdev);
869 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
870 (fw_rev_min(dev->mdev) << 16) |
871 fw_rev_sub(dev->mdev);
877 if (MLX5_CAP_GEN(mdev, pkv))
879 if (MLX5_CAP_GEN(mdev, qkv))
881 if (MLX5_CAP_GEN(mdev, apm))
883 if (MLX5_CAP_GEN(mdev, xrc))
885 if (MLX5_CAP_GEN(mdev, imaicl)) {
888 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
893 if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
895 if (MLX5_CAP_GEN(mdev, sho)) {
904 if (MLX5_CAP_GEN(mdev, block_lb_mc))
907 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) {
908 if (MLX5_CAP_ETH(mdev, csum_cap)) {
914 if (MLX5_CAP_ETH(dev->mdev, vlan_cap))
919 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap);
950 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
955 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) &&
956 MLX5_CAP_GEN(dev->mdev, general_notification_event) &&
960 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
961 MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap))
964 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
965 MLX5_CAP_ETH(dev->mdev, scatter_fcs) &&
972 if (MLX5_CAP_DEV_MEM(mdev, memic)) {
974 MLX5_CAP_DEV_MEM(mdev, max_memic_size);
977 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS))
980 if (MLX5_CAP_GEN(mdev, end_pad))
983 props->vendor_part_id = mdev->pdev->device;
984 props->hw_ver = mdev->pdev->revision;
988 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
989 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
990 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
992 max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512);
999 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
1000 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
1001 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
1002 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
1003 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
1004 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
1005 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
1006 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
1007 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
1011 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size);
1015 MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance);
1018 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
1019 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
1023 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz);
1049 if (mlx5_core_is_vf(mdev))
1055 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt);
1057 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size);
1060 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq);
1063 if (MLX5_CAP_GEN(mdev, tag_matching)) {
1065 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
1067 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
1071 if (MLX5_CAP_GEN(mdev, tag_matching) &&
1072 MLX5_CAP_GEN(mdev, rndv_offload_rc)) {
1077 if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) {
1087 if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) {
1089 MLX5_CAP_GEN(dev->mdev,
1096 if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
1104 if (MLX5_CAP_QOS(mdev, packet_pacing) &&
1105 MLX5_CAP_GEN(mdev, qos)) {
1107 MLX5_CAP_QOS(mdev, packet_pacing_max_rate);
1109 MLX5_CAP_QOS(mdev, packet_pacing_min_rate);
1112 if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) &&
1113 MLX5_CAP_QOS(mdev, packet_pacing_typical_size))
1122 if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
1126 if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
1137 if (MLX5_CAP_GEN(mdev, cqe_compression_128))
1141 if (MLX5_CAP_GEN(mdev, cqe_128_always))
1143 if (MLX5_CAP_GEN(mdev, qp_packet_based))
1152 if (MLX5_CAP_ETH(mdev, swp)) {
1156 if (MLX5_CAP_ETH(mdev, swp_csum))
1160 if (MLX5_CAP_ETH(mdev, swp_lso))
1173 if (MLX5_CAP_GEN(mdev, striding_rq)) {
1178 if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range))
1195 if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan))
1198 if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx))
1201 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1204 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
1207 if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
1216 MLX5_CAP_GEN(mdev, log_max_dci_stream_channels);
1219 MLX5_CAP_GEN(mdev, log_max_dci_errored_streams);
1226 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1230 if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS &&
1232 fill_esw_mgr_reg_c0(mdev, &resp);
1334 struct mlx5_core_dev *mdev = dev->mdev;
1350 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
1361 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
1362 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
1363 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
1372 err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper,
1379 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
1383 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
1387 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
1423 struct mlx5_core_dev *mdev;
1426 mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL);
1427 if (!mdev) {
1431 mdev = dev->mdev;
1435 count = mlx5_core_reserved_gids_count(mdev);
1463 struct mlx5_core_dev *mdev = dev->mdev;
1470 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
1482 struct mlx5_core_dev *mdev;
1487 mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num);
1488 if (!mdev) {
1493 mdev = dev->mdev;
1497 err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0,
1539 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
1553 struct mlx5_core_dev *mdev;
1557 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1558 if (!mdev)
1561 err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx);
1574 err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num,
1601 if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) {
1616 err = mlx5_set_port_caps(dev->mdev, port, tmp);
1668 MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no",
1684 err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
1699 if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1716 mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
1733 err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
1754 mlx5_nic_vport_update_local_lb(dev->mdev, false);
1767 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1770 err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
1774 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1775 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1776 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1785 if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
1788 mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
1790 if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
1791 (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
1792 !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
1806 if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
1812 resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
1814 resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev,
1817 resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
1818 resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
1819 resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1820 resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
1821 resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
1823 resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1825 resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1826 MLX5_CAP_GEN(dev->mdev,
1835 mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline);
1839 if (dev->mdev->clock_info)
1856 if (MLX5_CAP_GEN(dev->mdev, ece_support))
1859 if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) &&
1860 rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) &&
1861 rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format)))
1867 if (MLX5_CAP_GEN(dev->mdev, drain_sigerr))
1969 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
1987 u32 port = mlx5_core_native_port_num(dev->mdev) - 1;
2061 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
2063 return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
2071 fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ?
2074 return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE);
2134 if (!dev->mdev->clock_info)
2138 virt_to_page(dev->mdev->clock_info));
2161 mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
2240 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
2270 mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
2362 pfn = (dev->mdev->iseg_base +
2394 err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out);
2403 mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
2413 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
2416 return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
2434 err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2450 err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
2466 dev->mdev->rev_id = dev->mdev->pdev->revision;
2477 return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages);
2487 return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
2497 return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device);
2507 return sysfs_emit(buf, "%x\n", dev->mdev->rev_id);
2518 dev->mdev->board_id);
2785 if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
2789 if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
2793 err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
2837 if (!MLX5_CAP_GEN(dev->mdev, xrc))
2850 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0);
2854 ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0);
2890 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2892 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2916 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0);
2917 mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0);
2927 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type);
2928 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version);
2929 bool raw_support = !mlx5_core_mp_enabled(dev->mdev);
2970 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
3008 fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev),
3009 fw_rev_sub(dev->mdev));
3014 struct mlx5_core_dev *mdev = dev->mdev;
3015 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev,
3020 if (!ns || !mlx5_lag_is_active(mdev))
3023 err = mlx5_cmd_create_vport_lag(mdev);
3034 dev->lag_ports = mlx5_lag_get_num_ports(mdev);
3039 mlx5_cmd_destroy_vport_lag(mdev);
3045 struct mlx5_core_dev *mdev = dev->mdev;
3053 mlx5_cmd_destroy_vport_lag(mdev);
3104 mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb);
3105 mlx5_core_uplink_netdev_event_replay(dev->mdev);
3112 mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb);
3121 err = mlx5_nic_vport_enable_roce(dev->mdev);
3134 mlx5_nic_vport_disable_roce(dev->mdev);
3143 mlx5_nic_vport_disable_roce(dev->mdev);
3153 return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params);
3198 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3206 mlx5_core_mp_event_replay(ibdev->mdev,
3209 mlx5_core_mp_event_replay(mpi->mdev,
3225 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
3247 err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev);
3263 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3282 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev);
3289 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
3293 key = mpi->mdev->priv.adev_idx;
3294 mlx5_core_mp_event_replay(mpi->mdev,
3297 mlx5_core_mp_event_replay(ibdev->mdev,
3310 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3317 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3320 err = mlx5_query_nic_vport_system_image_guid(dev->mdev,
3325 err = mlx5_nic_vport_enable_roce(dev->mdev);
3338 mlx5_nic_vport_disable_roce(dev->mdev);
3343 mpi->mdev = dev->mdev;
3354 (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
3359 dev_dbg(mpi->mdev->device,
3378 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3383 if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
3408 mlx5_nic_vport_disable_roce(dev->mdev);
3548 return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3566 err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
3584 mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
3710 struct mlx5_core_dev *mdev = dev->mdev;
3716 dev->ib_dev.dev.parent = mdev->device;
3750 dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev);
3760 dev->dm.dev = mdev;
3771 struct mlx5_ib_dev *mdev = to_mdev(dev);
3774 ret = mlx5_ib_test_wc(mdev);
3775 mlx5_ib_dbg(mdev, "Write-Combining %s",
3776 mdev->wc_support ? "supported" : "not supported");
3879 struct mlx5_core_dev *mdev = dev->mdev;
3885 log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3887 log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev,
3889 var_table->hw_start_addr = dev->mdev->bar_addr +
3890 MLX5_CAP64_DEV_VDPA_EMULATION(mdev,
3909 struct mlx5_core_dev *mdev = dev->mdev;
3912 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
3917 if (mlx5_core_is_pf(mdev))
3920 dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
3922 if (MLX5_CAP_GEN(mdev, imaicl))
3925 if (MLX5_CAP_GEN(mdev, xrc))
3928 if (MLX5_CAP_DEV_MEM(mdev, memic) ||
3929 MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3942 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
3943 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
3944 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
3947 if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
3996 struct mlx5_core_dev *mdev = dev->mdev;
4002 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4008 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4026 struct mlx5_core_dev *mdev = dev->mdev;
4031 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4037 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4045 mlx5_core_native_port_num(dev->mdev) - 1);
4052 mlx5_core_native_port_num(dev->mdev) - 1);
4057 dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
4058 return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
4063 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4070 err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false);
4074 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
4076 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4083 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
4084 mlx5_free_bfreg(dev->mdev, &dev->bfreg);
4091 if (!mlx5_lag_is_active(dev->mdev))
4095 return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev);
4141 root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev));
4169 mlx5_notifier_register(dev->mdev, &dev->mdev_events);
4179 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events);
4355 struct mlx5_core_dev *mdev = idev->mdev;
4365 mpi->mdev = mdev;
4366 err = mlx5_query_nic_vport_system_image_guid(mdev,
4387 dev_dbg(mdev->device,
4414 struct mlx5_core_dev *mdev = idev->mdev;
4420 port_type_cap = MLX5_CAP_GEN(mdev, port_type);
4423 num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
4424 MLX5_CAP_GEN(mdev, num_vhca_ports));
4435 dev->mdev = mdev;
4438 if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))