Lines Matching refs:port_num

106 mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num)
115 u32 port_num,
122 ret = ibdev->ops.query_port(ibdev, port_num, &attr);
131 u32 *port_num)
143 *port_num = i + 1;
155 *port_num = i + 1;
169 u32 port_num = roce->native_port_num;
174 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
209 roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num);
218 if (get_port_state(&ibdev->ib_dev, port_num,
234 ibev.element.port_num = port_num;
244 mlx5_ib_put_native_port_mdev(ibdev, port_num);
249 u32 port_num)
255 mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL);
265 read_lock(&ibdev->port[port_num - 1].roce.netdev_lock);
266 ndev = ibdev->port[port_num - 1].roce.netdev;
268 read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock);
271 mlx5_ib_put_native_port_mdev(ibdev, port_num);
311 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num)
314 port_num);
321 port = &ibdev->port[port_num - 1];
324 mpi = ibdev->port[port_num - 1].mp.mpi;
475 static int mlx5_query_port_roce(struct ib_device *device, u32 port_num,
489 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
497 port_num = 1;
541 ndev = mlx5_ib_get_netdev(device, port_num);
568 mlx5_ib_put_native_port_mdev(dev, port_num);
572 int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num,
609 port_num);
621 return set_roce_addr(to_mdev(attr->device), attr->port_num,
630 ret = set_roce_addr(to_mdev(attr->device), attr->port_num,
1549 static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask,
1557 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
1578 mlx5_ib_put_native_port_mdev(dev, port_num);
2650 ibev->element.port_num = port;
2710 ibev.element.port_num = (u8)(unsigned long)work->param;
2726 if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) {
2727 mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num);
2956 static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num,
2961 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num);
2965 err = ib_query_port(ibdev, port_num, &attr);
2970 err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0,
2984 static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num,
2992 err = ib_query_port(ibdev, port_num, &attr);
3099 static void mlx5_mdev_netdev_track(struct mlx5_ib_dev *dev, u32 port_num)
3101 struct mlx5_roce *roce = &dev->port[port_num].roce;
3108 static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev *dev, u32 port_num)
3110 struct mlx5_roce *roce = &dev->port[port_num].roce;
3146 static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num,
3198 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3199 struct mlx5_ib_port *port = &ibdev->port[port_num];
3213 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num);
3227 mlx5_mdev_netdev_untrack(ibdev, port_num);
3249 mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1);
3255 port_num + 1);
3257 ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN;
3263 u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1;
3269 spin_lock(&ibdev->port[port_num].mp.mpi_lock);
3270 if (ibdev->port[port_num].mp.mpi) {
3272 port_num + 1);
3273 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3277 ibdev->port[port_num].mp.mpi = mpi;
3280 spin_unlock(&ibdev->port[port_num].mp.mpi_lock);
3286 mlx5_mdev_netdev_track(ibdev, port_num);
3291 mlx5_ib_init_cong_debugfs(ibdev, port_num);
3310 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3312 port_num + 1);
3334 if (i == port_num) {
3378 u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
3380 port_num + 1);
3390 if (i == port_num) {
3394 mlx5_ib_dbg(dev, "unbinding port_num: %u\n",
3999 u32 port_num = 0;
4008 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4011 mlx5_mdev_netdev_track(dev, port_num);
4020 mlx5_mdev_netdev_untrack(dev, port_num);
4029 u32 port_num;
4037 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
4038 mlx5_mdev_netdev_untrack(dev, port_num);