mlx5_ib_main.c revision 341948
1322810Shselasky/*- 2322810Shselasky * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3322810Shselasky * 4322810Shselasky * Redistribution and use in source and binary forms, with or without 5322810Shselasky * modification, are permitted provided that the following conditions 6322810Shselasky * are met: 7322810Shselasky * 1. Redistributions of source code must retain the above copyright 8322810Shselasky * notice, this list of conditions and the following disclaimer. 9322810Shselasky * 2. Redistributions in binary form must reproduce the above copyright 10322810Shselasky * notice, this list of conditions and the following disclaimer in the 11322810Shselasky * documentation and/or other materials provided with the distribution. 12322810Shselasky * 13322810Shselasky * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14322810Shselasky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15322810Shselasky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16322810Shselasky * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17322810Shselasky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18322810Shselasky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19322810Shselasky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20322810Shselasky * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21322810Shselasky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22322810Shselasky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23322810Shselasky * SUCH DAMAGE. 24322810Shselasky * 25322810Shselasky * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 341948 2018-12-12 12:30:51Z hselasky $ 26322810Shselasky */ 27322810Shselasky 28331769Shselasky#include <linux/module.h> 29322810Shselasky#include <linux/errno.h> 30322810Shselasky#include <linux/pci.h> 31322810Shselasky#include <linux/dma-mapping.h> 32322810Shselasky#include <linux/slab.h> 33331769Shselasky#if defined(CONFIG_X86) 34331769Shselasky#include <asm/pat.h> 35331769Shselasky#endif 36322810Shselasky#include <linux/sched.h> 37331769Shselasky#include <linux/delay.h> 38322810Shselasky#include <linux/fs.h> 39322810Shselasky#undef inode 40322810Shselasky#include <rdma/ib_user_verbs.h> 41331769Shselasky#include <rdma/ib_addr.h> 42331769Shselasky#include <rdma/ib_cache.h> 43331769Shselasky#include <dev/mlx5/port.h> 44331769Shselasky#include <dev/mlx5/vport.h> 45331769Shselasky#include <linux/list.h> 46322810Shselasky#include <rdma/ib_smi.h> 47322810Shselasky#include <rdma/ib_umem.h> 48331769Shselasky#include <linux/in.h> 49331769Shselasky#include <linux/etherdevice.h> 50331769Shselasky#include <dev/mlx5/fs.h> 51322810Shselasky#include "mlx5_ib.h" 52322810Shselasky 53341948Shselasky#define DRIVER_NAME "mlx5ib" 54337101Shselasky#ifndef DRIVER_VERSION 55337101Shselasky#define DRIVER_VERSION "3.4.2" 56337101Shselasky#endif 57337101Shselasky#define DRIVER_RELDATE "July 2018" 58322810Shselasky 59322810ShselaskyMODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 60322810ShselaskyMODULE_LICENSE("Dual BSD/GPL"); 61322810ShselaskyMODULE_DEPEND(mlx5ib, linuxkpi, 1, 1, 1); 62322810ShselaskyMODULE_DEPEND(mlx5ib, mlx5, 1, 1, 1); 63322810ShselaskyMODULE_DEPEND(mlx5ib, ibcore, 1, 1, 1); 64322810ShselaskyMODULE_VERSION(mlx5ib, 1); 65322810Shselasky 66322810Shselaskystatic int deprecated_prof_sel = 2; 67322810Shselaskymodule_param_named(prof_sel, deprecated_prof_sel, int, 0444); 68322810ShselaskyMODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core"); 69322810Shselasky 70337101Shselaskystatic const char mlx5_version[] = 71337101Shselasky DRIVER_NAME ": Mellanox Connect-IB Infiniband driver " 72331769Shselasky DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 73331769Shselasky 74322810Shselaskyenum { 75331769Shselasky MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 76322810Shselasky}; 77322810Shselasky 78331769Shselaskystatic enum rdma_link_layer 79331769Shselaskymlx5_port_type_cap_to_rdma_ll(int port_type_cap) 80331769Shselasky{ 81331769Shselasky switch (port_type_cap) { 82331769Shselasky case MLX5_CAP_PORT_TYPE_IB: 83331769Shselasky return IB_LINK_LAYER_INFINIBAND; 84331769Shselasky case MLX5_CAP_PORT_TYPE_ETH: 85331769Shselasky return IB_LINK_LAYER_ETHERNET; 86331769Shselasky default: 87331769Shselasky return IB_LINK_LAYER_UNSPECIFIED; 88331769Shselasky } 89331769Shselasky} 90322810Shselasky 91331769Shselaskystatic enum rdma_link_layer 92331769Shselaskymlx5_ib_port_link_layer(struct ib_device *device, u8 port_num) 93331769Shselasky{ 94331769Shselasky struct mlx5_ib_dev *dev = to_mdev(device); 95331769Shselasky int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 96322810Shselasky 97331769Shselasky return mlx5_port_type_cap_to_rdma_ll(port_type_cap); 98331769Shselasky} 99331769Shselasky 100331769Shselaskystatic bool mlx5_netdev_match(struct net_device *ndev, 101331769Shselasky struct mlx5_core_dev *mdev, 102331769Shselasky const char *dname) 103322810Shselasky{ 104331769Shselasky return ndev->if_type == IFT_ETHER && 105331769Shselasky ndev->if_dname != NULL && 106331769Shselasky strcmp(ndev->if_dname, dname) == 0 && 107331769Shselasky ndev->if_softc != NULL && 108331769Shselasky *(struct mlx5_core_dev **)ndev->if_softc == mdev; 109331769Shselasky} 110322810Shselasky 111331769Shselaskystatic int mlx5_netdev_event(struct notifier_block *this, 112331769Shselasky unsigned long event, void *ptr) 113331769Shselasky{ 114331769Shselasky struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 115331769Shselasky struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev, 116331769Shselasky roce.nb); 117322810Shselasky 118331769Shselasky switch (event) { 119331769Shselasky case NETDEV_REGISTER: 120331769Shselasky case NETDEV_UNREGISTER: 121331769Shselasky write_lock(&ibdev->roce.netdev_lock); 122331769Shselasky /* check if network interface belongs to mlx5en */ 123331769Shselasky if (mlx5_netdev_match(ndev, ibdev->mdev, "mce")) 124331769Shselasky ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? 125331769Shselasky NULL : ndev; 126331769Shselasky write_unlock(&ibdev->roce.netdev_lock); 127331769Shselasky break; 128331769Shselasky 129331769Shselasky case NETDEV_UP: 130331769Shselasky case NETDEV_DOWN: { 131331769Shselasky struct net_device *upper = NULL; 132331769Shselasky 133331769Shselasky if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev)) 134331769Shselasky && ibdev->ib_active) { 135331769Shselasky struct ib_event ibev = {0}; 136331769Shselasky 137331769Shselasky ibev.device = &ibdev->ib_dev; 138331769Shselasky ibev.event = (event == NETDEV_UP) ? 139331769Shselasky IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 140331769Shselasky ibev.element.port_num = 1; 141331769Shselasky ib_dispatch_event(&ibev); 142322810Shselasky } 143331769Shselasky break; 144322810Shselasky } 145322810Shselasky 146331769Shselasky default: 147331769Shselasky break; 148322810Shselasky } 149331769Shselasky 150331769Shselasky return NOTIFY_DONE; 151322810Shselasky} 152322810Shselasky 153331769Shselaskystatic struct net_device *mlx5_ib_get_netdev(struct ib_device *device, 154331769Shselasky u8 port_num) 155322810Shselasky{ 156331769Shselasky struct mlx5_ib_dev *ibdev = to_mdev(device); 157331769Shselasky struct net_device *ndev; 158331769Shselasky 159331769Shselasky /* Ensure ndev does not disappear before we invoke dev_hold() 160331769Shselasky */ 161331769Shselasky read_lock(&ibdev->roce.netdev_lock); 162331769Shselasky ndev = ibdev->roce.netdev; 163331769Shselasky if (ndev) 164331769Shselasky dev_hold(ndev); 165331769Shselasky read_unlock(&ibdev->roce.netdev_lock); 166331769Shselasky 167331769Shselasky return ndev; 168331769Shselasky} 169331769Shselasky 170331805Shselaskystatic int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, 171331805Shselasky u8 *active_width) 172331805Shselasky{ 173331805Shselasky switch (eth_proto_oper) { 174331805Shselasky case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): 175331805Shselasky case MLX5E_PROT_MASK(MLX5E_1000BASE_KX): 176331805Shselasky case MLX5E_PROT_MASK(MLX5E_100BASE_TX): 177331805Shselasky case MLX5E_PROT_MASK(MLX5E_1000BASE_T): 178331805Shselasky *active_width = IB_WIDTH_1X; 179331805Shselasky *active_speed = IB_SPEED_SDR; 180331805Shselasky break; 181331805Shselasky case MLX5E_PROT_MASK(MLX5E_10GBASE_T): 182331805Shselasky case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4): 183331805Shselasky case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4): 184331805Shselasky case MLX5E_PROT_MASK(MLX5E_10GBASE_KR): 185331805Shselasky case MLX5E_PROT_MASK(MLX5E_10GBASE_CR): 186331805Shselasky case MLX5E_PROT_MASK(MLX5E_10GBASE_SR): 187331805Shselasky case MLX5E_PROT_MASK(MLX5E_10GBASE_ER): 188331805Shselasky *active_width = IB_WIDTH_1X; 189331805Shselasky *active_speed = IB_SPEED_QDR; 190331805Shselasky break; 191331805Shselasky case MLX5E_PROT_MASK(MLX5E_25GBASE_CR): 192331805Shselasky case MLX5E_PROT_MASK(MLX5E_25GBASE_KR): 193331805Shselasky case MLX5E_PROT_MASK(MLX5E_25GBASE_SR): 194331805Shselasky *active_width = IB_WIDTH_1X; 195331805Shselasky *active_speed = IB_SPEED_EDR; 196331805Shselasky break; 197331805Shselasky case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4): 198331805Shselasky case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4): 199331805Shselasky case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4): 200331805Shselasky case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4): 201331805Shselasky *active_width = IB_WIDTH_4X; 202331805Shselasky *active_speed = IB_SPEED_QDR; 203331805Shselasky break; 204331805Shselasky case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2): 205331805Shselasky case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2): 206331805Shselasky case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2): 207331805Shselasky *active_width = IB_WIDTH_1X; 208331805Shselasky *active_speed = IB_SPEED_HDR; 209331805Shselasky break; 210331805Shselasky case MLX5E_PROT_MASK(MLX5E_56GBASE_R4): 211331805Shselasky *active_width = IB_WIDTH_4X; 212331805Shselasky *active_speed = IB_SPEED_FDR; 213331805Shselasky break; 214331805Shselasky case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4): 215331805Shselasky case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4): 216331805Shselasky case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4): 217331805Shselasky case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4): 218331805Shselasky *active_width = IB_WIDTH_4X; 219331805Shselasky *active_speed = IB_SPEED_EDR; 220331805Shselasky break; 221331805Shselasky default: 222331805Shselasky return -EINVAL; 223331805Shselasky } 224331805Shselasky 225331805Shselasky return 0; 226331805Shselasky} 227331805Shselasky 228331769Shselaskystatic int mlx5_query_port_roce(struct ib_device *device, u8 port_num, 229331769Shselasky struct ib_port_attr *props) 230331769Shselasky{ 231322810Shselasky struct mlx5_ib_dev *dev = to_mdev(device); 232331769Shselasky struct net_device *ndev; 233331769Shselasky enum ib_mtu ndev_ib_mtu; 234331769Shselasky u16 qkey_viol_cntr; 235331805Shselasky u32 eth_prot_oper; 236331805Shselasky int err; 237322810Shselasky 238331769Shselasky memset(props, 0, sizeof(*props)); 239331769Shselasky 240331805Shselasky /* Possible bad flows are checked before filling out props so in case 241331805Shselasky * of an error it will still be zeroed out. 242331805Shselasky */ 243331805Shselasky err = mlx5_query_port_eth_proto_oper(dev->mdev, ð_prot_oper, port_num); 244331805Shselasky if (err) 245331805Shselasky return err; 246331805Shselasky 247331805Shselasky translate_eth_proto_oper(eth_prot_oper, &props->active_speed, 248331805Shselasky &props->active_width); 249331805Shselasky 250331769Shselasky props->port_cap_flags |= IB_PORT_CM_SUP; 251331769Shselasky props->port_cap_flags |= IB_PORT_IP_BASED_GIDS; 252331769Shselasky 253331769Shselasky props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, 254331769Shselasky roce_address_table_size); 255331769Shselasky props->max_mtu = IB_MTU_4096; 256331769Shselasky props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); 257331769Shselasky props->pkey_tbl_len = 1; 258331769Shselasky props->state = IB_PORT_DOWN; 259331769Shselasky props->phys_state = 3; 260331769Shselasky 261331769Shselasky mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr); 262331769Shselasky props->qkey_viol_cntr = qkey_viol_cntr; 263331769Shselasky 264331769Shselasky ndev = mlx5_ib_get_netdev(device, port_num); 265331769Shselasky if (!ndev) 266331769Shselasky return 0; 267331769Shselasky 268331769Shselasky if (netif_running(ndev) && netif_carrier_ok(ndev)) { 269331769Shselasky props->state = IB_PORT_ACTIVE; 270331769Shselasky props->phys_state = 5; 271331769Shselasky } 272331769Shselasky 273331769Shselasky ndev_ib_mtu = iboe_get_mtu(ndev->if_mtu); 274331769Shselasky 275331769Shselasky dev_put(ndev); 276331769Shselasky 277331769Shselasky props->active_mtu = min(props->max_mtu, ndev_ib_mtu); 278331769Shselasky return 0; 279331769Shselasky} 280331769Shselasky 281331769Shselaskystatic void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, 282331769Shselasky const struct ib_gid_attr *attr, 283331769Shselasky void *mlx5_addr) 284331769Shselasky{ 285331769Shselasky#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) 286331769Shselasky char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 287331769Shselasky source_l3_address); 288331769Shselasky void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 289331769Shselasky source_mac_47_32); 290337078Shselasky u16 vlan_id; 291331769Shselasky 292331769Shselasky if (!gid) 293331769Shselasky return; 294331769Shselasky ether_addr_copy(mlx5_addr_mac, IF_LLADDR(attr->ndev)); 295331769Shselasky 296337078Shselasky vlan_id = rdma_vlan_dev_vlan_id(attr->ndev); 297337078Shselasky if (vlan_id != 0xffff) { 298331769Shselasky MLX5_SET_RA(mlx5_addr, vlan_valid, 1); 299337078Shselasky MLX5_SET_RA(mlx5_addr, vlan_id, vlan_id); 300331769Shselasky } 301331769Shselasky 302331769Shselasky switch (attr->gid_type) { 303331769Shselasky case IB_GID_TYPE_IB: 304331769Shselasky MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); 305331769Shselasky break; 306331769Shselasky case IB_GID_TYPE_ROCE_UDP_ENCAP: 307331769Shselasky MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); 308331769Shselasky break; 309331769Shselasky 310322810Shselasky default: 311331769Shselasky WARN_ON(true); 312322810Shselasky } 313331769Shselasky 314331769Shselasky if (attr->gid_type != IB_GID_TYPE_IB) { 315331769Shselasky if (ipv6_addr_v4mapped((void *)gid)) 316331769Shselasky MLX5_SET_RA(mlx5_addr, roce_l3_type, 317331769Shselasky MLX5_ROCE_L3_TYPE_IPV4); 318331769Shselasky else 319331769Shselasky MLX5_SET_RA(mlx5_addr, roce_l3_type, 320331769Shselasky MLX5_ROCE_L3_TYPE_IPV6); 321331769Shselasky } 322331769Shselasky 323331769Shselasky if ((attr->gid_type == IB_GID_TYPE_IB) || 324331769Shselasky !ipv6_addr_v4mapped((void *)gid)) 325331769Shselasky memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid)); 326331769Shselasky else 327331769Shselasky memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4); 328322810Shselasky} 329322810Shselasky 330331769Shselaskystatic int set_roce_addr(struct ib_device *device, u8 port_num, 331331769Shselasky unsigned int index, 332331769Shselasky const union ib_gid *gid, 333331769Shselasky const struct ib_gid_attr *attr) 334331769Shselasky{ 335331769Shselasky struct mlx5_ib_dev *dev = to_mdev(device); 336331769Shselasky u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; 337331769Shselasky u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; 338331769Shselasky void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); 339331769Shselasky enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num); 340331769Shselasky 341331769Shselasky if (ll != IB_LINK_LAYER_ETHERNET) 342331769Shselasky return -EINVAL; 343331769Shselasky 344331769Shselasky ib_gid_to_mlx5_roce_addr(gid, attr, in_addr); 345331769Shselasky 346331769Shselasky MLX5_SET(set_roce_address_in, in, roce_address_index, index); 347331769Shselasky MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); 348331769Shselasky return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); 349331769Shselasky} 350331769Shselasky 351331769Shselaskystatic int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, 352331769Shselasky unsigned int index, const union ib_gid *gid, 353331769Shselasky const struct ib_gid_attr *attr, 354331769Shselasky __always_unused void **context) 355331769Shselasky{ 356331769Shselasky return set_roce_addr(device, port_num, index, gid, attr); 357331769Shselasky} 358331769Shselasky 359331769Shselaskystatic int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, 360331769Shselasky unsigned int index, __always_unused void **context) 361331769Shselasky{ 362331769Shselasky return set_roce_addr(device, port_num, index, NULL, NULL); 363331769Shselasky} 364331769Shselasky 365331769Shselasky__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, 366331769Shselasky int index) 367331769Shselasky{ 368331769Shselasky struct ib_gid_attr attr; 369331769Shselasky union ib_gid gid; 370331769Shselasky 371331769Shselasky if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr)) 372331769Shselasky return 0; 373331769Shselasky 374331769Shselasky if (!attr.ndev) 375331769Shselasky return 0; 376331769Shselasky 377331769Shselasky dev_put(attr.ndev); 378331769Shselasky 379331769Shselasky if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 380331769Shselasky return 0; 381331769Shselasky 382331769Shselasky return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); 383331769Shselasky} 384331769Shselasky 385337098Shselaskyint mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num, 386337098Shselasky int index, enum ib_gid_type *gid_type) 387337098Shselasky{ 388337098Shselasky struct ib_gid_attr attr; 389337098Shselasky union ib_gid gid; 390337098Shselasky int ret; 391337098Shselasky 392337098Shselasky ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr); 393337098Shselasky if (ret) 394337098Shselasky return ret; 395337098Shselasky 396337098Shselasky if (!attr.ndev) 397337098Shselasky return -ENODEV; 398337098Shselasky 399337098Shselasky dev_put(attr.ndev); 400337098Shselasky 401337098Shselasky *gid_type = attr.gid_type; 402337098Shselasky 403337098Shselasky return 0; 404337098Shselasky} 405337098Shselasky 406322810Shselaskystatic int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 407322810Shselasky{ 408331769Shselasky if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) 409331769Shselasky return !MLX5_CAP_GEN(dev->mdev, ib_virt); 410331769Shselasky return 0; 411322810Shselasky} 412322810Shselasky 413322810Shselaskyenum { 414322810Shselasky MLX5_VPORT_ACCESS_METHOD_MAD, 415322810Shselasky MLX5_VPORT_ACCESS_METHOD_HCA, 416322810Shselasky MLX5_VPORT_ACCESS_METHOD_NIC, 417322810Shselasky}; 418322810Shselasky 419322810Shselaskystatic int mlx5_get_vport_access_method(struct ib_device *ibdev) 420322810Shselasky{ 421322810Shselasky if (mlx5_use_mad_ifc(to_mdev(ibdev))) 422322810Shselasky return MLX5_VPORT_ACCESS_METHOD_MAD; 423322810Shselasky 424322810Shselasky if (mlx5_ib_port_link_layer(ibdev, 1) == 425322810Shselasky IB_LINK_LAYER_ETHERNET) 426322810Shselasky return MLX5_VPORT_ACCESS_METHOD_NIC; 427322810Shselasky 428322810Shselasky return MLX5_VPORT_ACCESS_METHOD_HCA; 429322810Shselasky} 430322810Shselasky 431331769Shselaskystatic void get_atomic_caps(struct mlx5_ib_dev *dev, 432331769Shselasky struct ib_device_attr *props) 433331769Shselasky{ 434331769Shselasky u8 tmp; 435331769Shselasky u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 436331769Shselasky u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 437331769Shselasky u8 atomic_req_8B_endianness_mode = 438331769Shselasky MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode); 439331769Shselasky 440331769Shselasky /* Check if HW supports 8 bytes standard atomic operations and capable 441331769Shselasky * of host endianness respond 442331769Shselasky */ 443331769Shselasky tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; 444331769Shselasky if (((atomic_operations & tmp) == tmp) && 445331769Shselasky (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && 446331769Shselasky (atomic_req_8B_endianness_mode)) { 447331769Shselasky props->atomic_cap = IB_ATOMIC_HCA; 448331769Shselasky } else { 449331769Shselasky props->atomic_cap = IB_ATOMIC_NONE; 450331769Shselasky } 451331769Shselasky} 452331769Shselasky 453322810Shselaskystatic int mlx5_query_system_image_guid(struct ib_device *ibdev, 454322810Shselasky __be64 *sys_image_guid) 455322810Shselasky{ 456322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 457322810Shselasky struct mlx5_core_dev *mdev = dev->mdev; 458322810Shselasky u64 tmp; 459322810Shselasky int err; 460322810Shselasky 461322810Shselasky switch (mlx5_get_vport_access_method(ibdev)) { 462322810Shselasky case MLX5_VPORT_ACCESS_METHOD_MAD: 463331769Shselasky return mlx5_query_mad_ifc_system_image_guid(ibdev, 464322810Shselasky sys_image_guid); 465322810Shselasky 466322810Shselasky case MLX5_VPORT_ACCESS_METHOD_HCA: 467322810Shselasky err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 468331769Shselasky break; 469322810Shselasky 470322810Shselasky case MLX5_VPORT_ACCESS_METHOD_NIC: 471322810Shselasky err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); 472331769Shselasky break; 473322810Shselasky 474322810Shselasky default: 475322810Shselasky return -EINVAL; 476322810Shselasky } 477331769Shselasky 478331769Shselasky if (!err) 479331769Shselasky *sys_image_guid = cpu_to_be64(tmp); 480331769Shselasky 481331769Shselasky return err; 482331769Shselasky 483322810Shselasky} 484322810Shselasky 485322810Shselaskystatic int mlx5_query_max_pkeys(struct ib_device *ibdev, 486322810Shselasky u16 *max_pkeys) 487322810Shselasky{ 488322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 489322810Shselasky struct mlx5_core_dev *mdev = dev->mdev; 490322810Shselasky 491322810Shselasky switch (mlx5_get_vport_access_method(ibdev)) { 492322810Shselasky case MLX5_VPORT_ACCESS_METHOD_MAD: 493331769Shselasky return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); 494322810Shselasky 495322810Shselasky case MLX5_VPORT_ACCESS_METHOD_HCA: 496322810Shselasky case MLX5_VPORT_ACCESS_METHOD_NIC: 497322810Shselasky *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, 498322810Shselasky pkey_table_size)); 499322810Shselasky return 0; 500322810Shselasky 501322810Shselasky default: 502322810Shselasky return -EINVAL; 503322810Shselasky } 504322810Shselasky} 505322810Shselasky 506322810Shselaskystatic int mlx5_query_vendor_id(struct ib_device *ibdev, 507322810Shselasky u32 *vendor_id) 508322810Shselasky{ 509322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 510322810Shselasky 511322810Shselasky switch (mlx5_get_vport_access_method(ibdev)) { 512322810Shselasky case MLX5_VPORT_ACCESS_METHOD_MAD: 513331769Shselasky return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); 514322810Shselasky 515322810Shselasky case MLX5_VPORT_ACCESS_METHOD_HCA: 516322810Shselasky case MLX5_VPORT_ACCESS_METHOD_NIC: 517322810Shselasky return mlx5_core_query_vendor_id(dev->mdev, vendor_id); 518322810Shselasky 519322810Shselasky default: 520322810Shselasky return -EINVAL; 521322810Shselasky } 522322810Shselasky} 523322810Shselasky 524322810Shselaskystatic int mlx5_query_node_guid(struct mlx5_ib_dev *dev, 525322810Shselasky __be64 *node_guid) 526322810Shselasky{ 527322810Shselasky u64 tmp; 528322810Shselasky int err; 529322810Shselasky 530322810Shselasky switch (mlx5_get_vport_access_method(&dev->ib_dev)) { 531322810Shselasky case MLX5_VPORT_ACCESS_METHOD_MAD: 532331769Shselasky return mlx5_query_mad_ifc_node_guid(dev, node_guid); 533322810Shselasky 534322810Shselasky case MLX5_VPORT_ACCESS_METHOD_HCA: 535322810Shselasky err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); 536331769Shselasky break; 537322810Shselasky 538322810Shselasky case MLX5_VPORT_ACCESS_METHOD_NIC: 539322810Shselasky err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); 540331769Shselasky break; 541322810Shselasky 542322810Shselasky default: 543322810Shselasky return -EINVAL; 544322810Shselasky } 545331769Shselasky 546331769Shselasky if (!err) 547331769Shselasky *node_guid = cpu_to_be64(tmp); 548331769Shselasky 549331769Shselasky return err; 550322810Shselasky} 551322810Shselasky 552322810Shselaskystruct mlx5_reg_node_desc { 553331769Shselasky u8 desc[IB_DEVICE_NODE_DESC_MAX]; 554322810Shselasky}; 555322810Shselasky 556322810Shselaskystatic int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 557322810Shselasky{ 558322810Shselasky struct mlx5_reg_node_desc in; 559322810Shselasky 560322810Shselasky if (mlx5_use_mad_ifc(dev)) 561331769Shselasky return mlx5_query_mad_ifc_node_desc(dev, node_desc); 562322810Shselasky 563322810Shselasky memset(&in, 0, sizeof(in)); 564322810Shselasky 565322810Shselasky return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, 566322810Shselasky sizeof(struct mlx5_reg_node_desc), 567322810Shselasky MLX5_REG_NODE_DESC, 0, 0); 568322810Shselasky} 569322810Shselasky 570322810Shselaskystatic int mlx5_ib_query_device(struct ib_device *ibdev, 571331769Shselasky struct ib_device_attr *props, 572331769Shselasky struct ib_udata *uhw) 573322810Shselasky{ 574322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 575322810Shselasky struct mlx5_core_dev *mdev = dev->mdev; 576331769Shselasky int err = -ENOMEM; 577322810Shselasky int max_rq_sg; 578322810Shselasky int max_sq_sg; 579331769Shselasky u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 580331769Shselasky struct mlx5_ib_query_device_resp resp = {}; 581331769Shselasky size_t resp_len; 582331769Shselasky u64 max_tso; 583322810Shselasky 584331769Shselasky resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); 585331769Shselasky if (uhw->outlen && uhw->outlen < resp_len) 586331769Shselasky return -EINVAL; 587331769Shselasky else 588331769Shselasky resp.response_length = resp_len; 589322810Shselasky 590331769Shselasky if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) 591331769Shselasky return -EINVAL; 592331769Shselasky 593322810Shselasky memset(props, 0, sizeof(*props)); 594322810Shselasky err = mlx5_query_system_image_guid(ibdev, 595322810Shselasky &props->sys_image_guid); 596322810Shselasky if (err) 597322810Shselasky return err; 598322810Shselasky 599322810Shselasky err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); 600322810Shselasky if (err) 601322810Shselasky return err; 602322810Shselasky 603322810Shselasky err = mlx5_query_vendor_id(ibdev, &props->vendor_id); 604322810Shselasky if (err) 605322810Shselasky return err; 606322810Shselasky 607322810Shselasky props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 608331769Shselasky (fw_rev_min(dev->mdev) << 16) | 609322810Shselasky fw_rev_sub(dev->mdev); 610322810Shselasky props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 611322810Shselasky IB_DEVICE_PORT_ACTIVE_EVENT | 612322810Shselasky IB_DEVICE_SYS_IMAGE_GUID | 613322810Shselasky IB_DEVICE_RC_RNR_NAK_GEN; 614322810Shselasky 615322810Shselasky if (MLX5_CAP_GEN(mdev, pkv)) 616322810Shselasky props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 617322810Shselasky if (MLX5_CAP_GEN(mdev, qkv)) 618322810Shselasky props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 619322810Shselasky if (MLX5_CAP_GEN(mdev, apm)) 620322810Shselasky props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 621322810Shselasky if (MLX5_CAP_GEN(mdev, xrc)) 622322810Shselasky props->device_cap_flags |= IB_DEVICE_XRC; 623331769Shselasky if (MLX5_CAP_GEN(mdev, imaicl)) { 624331769Shselasky props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | 625331769Shselasky IB_DEVICE_MEM_WINDOW_TYPE_2B; 626331769Shselasky props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 627331769Shselasky /* We support 'Gappy' memory registration too */ 628331769Shselasky props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; 629331769Shselasky } 630322810Shselasky props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 631331769Shselasky if (MLX5_CAP_GEN(mdev, sho)) { 632331769Shselasky props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; 633331769Shselasky /* At this stage no support for signature handover */ 634331769Shselasky props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 635331769Shselasky IB_PROT_T10DIF_TYPE_2 | 636331769Shselasky IB_PROT_T10DIF_TYPE_3; 637331769Shselasky props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 638331769Shselasky IB_GUARD_T10DIF_CSUM; 639331769Shselasky } 640322810Shselasky if (MLX5_CAP_GEN(mdev, block_lb_mc)) 641322810Shselasky props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 642322810Shselasky 643331769Shselasky if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) { 644331769Shselasky if (MLX5_CAP_ETH(mdev, csum_cap)) 645331769Shselasky props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 646331769Shselasky 647331769Shselasky if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { 648331769Shselasky max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); 649331769Shselasky if (max_tso) { 650331769Shselasky resp.tso_caps.max_tso = 1 << max_tso; 651331769Shselasky resp.tso_caps.supported_qpts |= 652331769Shselasky 1 << IB_QPT_RAW_PACKET; 653331769Shselasky resp.response_length += sizeof(resp.tso_caps); 654331769Shselasky } 655331769Shselasky } 656331769Shselasky 657331769Shselasky if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { 658331769Shselasky resp.rss_caps.rx_hash_function = 659331769Shselasky MLX5_RX_HASH_FUNC_TOEPLITZ; 660331769Shselasky resp.rss_caps.rx_hash_fields_mask = 661331769Shselasky MLX5_RX_HASH_SRC_IPV4 | 662331769Shselasky MLX5_RX_HASH_DST_IPV4 | 663331769Shselasky MLX5_RX_HASH_SRC_IPV6 | 664331769Shselasky MLX5_RX_HASH_DST_IPV6 | 665331769Shselasky MLX5_RX_HASH_SRC_PORT_TCP | 666331769Shselasky MLX5_RX_HASH_DST_PORT_TCP | 667331769Shselasky MLX5_RX_HASH_SRC_PORT_UDP | 668331769Shselasky MLX5_RX_HASH_DST_PORT_UDP; 669331769Shselasky resp.response_length += sizeof(resp.rss_caps); 670331769Shselasky } 671331769Shselasky } else { 672331769Shselasky if (field_avail(typeof(resp), tso_caps, uhw->outlen)) 673331769Shselasky resp.response_length += sizeof(resp.tso_caps); 674331769Shselasky if (field_avail(typeof(resp), rss_caps, uhw->outlen)) 675331769Shselasky resp.response_length += sizeof(resp.rss_caps); 676331769Shselasky } 677331769Shselasky 678331769Shselasky if (MLX5_CAP_GEN(mdev, ipoib_ipoib_offloads)) { 679331769Shselasky props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 680331769Shselasky props->device_cap_flags |= IB_DEVICE_UD_TSO; 681331769Shselasky } 682331769Shselasky 683331769Shselasky if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 684331769Shselasky MLX5_CAP_ETH(dev->mdev, scatter_fcs)) 685331769Shselasky props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; 686331769Shselasky 687331769Shselasky if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) 688331769Shselasky props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 689331769Shselasky 690322810Shselasky props->vendor_part_id = mdev->pdev->device; 691322810Shselasky props->hw_ver = mdev->pdev->revision; 692322810Shselasky 693322810Shselasky props->max_mr_size = ~0ull; 694331769Shselasky props->page_size_cap = ~(min_page_size - 1); 695322810Shselasky props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); 696322810Shselasky props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 697322810Shselasky max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / 698322810Shselasky sizeof(struct mlx5_wqe_data_seg); 699331769Shselasky max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) - 700331769Shselasky sizeof(struct mlx5_wqe_ctrl_seg)) / 701331769Shselasky sizeof(struct mlx5_wqe_data_seg); 702322810Shselasky props->max_sge = min(max_rq_sg, max_sq_sg); 703331769Shselasky props->max_sge_rd = MLX5_MAX_SGE_RD; 704322810Shselasky props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 705322810Shselasky props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 706322810Shselasky props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 707322810Shselasky props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); 708322810Shselasky props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); 709322810Shselasky props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); 710322810Shselasky props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); 711322810Shselasky props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; 712322810Shselasky props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); 713322810Shselasky props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 714322810Shselasky props->max_srq_sge = max_rq_sg - 1; 715331769Shselasky props->max_fast_reg_page_list_len = 716331769Shselasky 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); 717322810Shselasky get_atomic_caps(dev, props); 718331769Shselasky props->masked_atomic_cap = IB_ATOMIC_NONE; 719322810Shselasky props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); 720322810Shselasky props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); 721322810Shselasky props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 722322810Shselasky props->max_mcast_grp; 723322810Shselasky props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 724331769Shselasky props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); 725331769Shselasky props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 726322810Shselasky 727331769Shselasky#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 728331769Shselasky if (MLX5_CAP_GEN(mdev, pg)) 729331769Shselasky props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 730331769Shselasky props->odp_caps = dev->odp_caps; 731331769Shselasky#endif 732331769Shselasky 733331769Shselasky if (MLX5_CAP_GEN(mdev, cd)) 734331769Shselasky props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 735331769Shselasky 736331769Shselasky if (!mlx5_core_is_pf(mdev)) 737331769Shselasky props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 738331769Shselasky 739331769Shselasky if (mlx5_ib_port_link_layer(ibdev, 1) == 740331769Shselasky IB_LINK_LAYER_ETHERNET) { 741331769Shselasky props->rss_caps.max_rwq_indirection_tables = 742331769Shselasky 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); 743331769Shselasky props->rss_caps.max_rwq_indirection_table_size = 744331769Shselasky 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); 745331769Shselasky props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; 746331769Shselasky props->max_wq_type_rq = 747331769Shselasky 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); 748331769Shselasky } 749331769Shselasky 750331769Shselasky if (uhw->outlen) { 751331769Shselasky err = ib_copy_to_udata(uhw, &resp, resp.response_length); 752331769Shselasky 753331769Shselasky if (err) 754331769Shselasky return err; 755331769Shselasky } 756331769Shselasky 757322810Shselasky return 0; 758322810Shselasky} 759322810Shselasky 760322810Shselaskyenum mlx5_ib_width { 761322810Shselasky MLX5_IB_WIDTH_1X = 1 << 0, 762322810Shselasky MLX5_IB_WIDTH_2X = 1 << 1, 763322810Shselasky MLX5_IB_WIDTH_4X = 1 << 2, 764322810Shselasky MLX5_IB_WIDTH_8X = 1 << 3, 765322810Shselasky MLX5_IB_WIDTH_12X = 1 << 4 766322810Shselasky}; 767322810Shselasky 768322810Shselaskystatic int translate_active_width(struct ib_device *ibdev, u8 active_width, 769322810Shselasky u8 *ib_width) 770322810Shselasky{ 771322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 772322810Shselasky int err = 0; 773322810Shselasky 774322810Shselasky if (active_width & MLX5_IB_WIDTH_1X) { 775322810Shselasky *ib_width = IB_WIDTH_1X; 776322810Shselasky } else if (active_width & MLX5_IB_WIDTH_2X) { 777331769Shselasky mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", 778331769Shselasky (int)active_width); 779322810Shselasky err = -EINVAL; 780322810Shselasky } else if (active_width & MLX5_IB_WIDTH_4X) { 781322810Shselasky *ib_width = IB_WIDTH_4X; 782322810Shselasky } else if (active_width & MLX5_IB_WIDTH_8X) { 783322810Shselasky *ib_width = IB_WIDTH_8X; 784322810Shselasky } else if (active_width & MLX5_IB_WIDTH_12X) { 785322810Shselasky *ib_width = IB_WIDTH_12X; 786322810Shselasky } else { 787322810Shselasky mlx5_ib_dbg(dev, "Invalid active_width %d\n", 788322810Shselasky (int)active_width); 789322810Shselasky err = -EINVAL; 790322810Shselasky } 791322810Shselasky 792322810Shselasky return err; 793322810Shselasky} 794322810Shselasky 795322810Shselaskyenum ib_max_vl_num { 796322810Shselasky __IB_MAX_VL_0 = 1, 797322810Shselasky __IB_MAX_VL_0_1 = 2, 798322810Shselasky __IB_MAX_VL_0_3 = 3, 799322810Shselasky __IB_MAX_VL_0_7 = 4, 800322810Shselasky __IB_MAX_VL_0_14 = 5, 801322810Shselasky}; 802322810Shselasky 803322810Shselaskyenum mlx5_vl_hw_cap { 804322810Shselasky MLX5_VL_HW_0 = 1, 805322810Shselasky MLX5_VL_HW_0_1 = 2, 806322810Shselasky MLX5_VL_HW_0_2 = 3, 807322810Shselasky MLX5_VL_HW_0_3 = 4, 808322810Shselasky MLX5_VL_HW_0_4 = 5, 809322810Shselasky MLX5_VL_HW_0_5 = 6, 810322810Shselasky MLX5_VL_HW_0_6 = 7, 811322810Shselasky MLX5_VL_HW_0_7 = 8, 812322810Shselasky MLX5_VL_HW_0_14 = 15 813322810Shselasky}; 814322810Shselasky 815322810Shselaskystatic int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, 816322810Shselasky u8 *max_vl_num) 817322810Shselasky{ 818322810Shselasky switch (vl_hw_cap) { 819322810Shselasky case MLX5_VL_HW_0: 820322810Shselasky *max_vl_num = __IB_MAX_VL_0; 821322810Shselasky break; 822322810Shselasky case MLX5_VL_HW_0_1: 823322810Shselasky *max_vl_num = __IB_MAX_VL_0_1; 824322810Shselasky break; 825322810Shselasky case MLX5_VL_HW_0_3: 826322810Shselasky *max_vl_num = __IB_MAX_VL_0_3; 827322810Shselasky break; 828322810Shselasky case MLX5_VL_HW_0_7: 829322810Shselasky *max_vl_num = __IB_MAX_VL_0_7; 830322810Shselasky break; 831322810Shselasky case MLX5_VL_HW_0_14: 832322810Shselasky *max_vl_num = __IB_MAX_VL_0_14; 833322810Shselasky break; 834322810Shselasky 835322810Shselasky default: 836322810Shselasky return -EINVAL; 837322810Shselasky } 838322810Shselasky 839322810Shselasky return 0; 840322810Shselasky} 841322810Shselasky 842331769Shselaskystatic int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 843331769Shselasky struct ib_port_attr *props) 844322810Shselasky{ 845322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 846322810Shselasky struct mlx5_core_dev *mdev = dev->mdev; 847322810Shselasky u32 *rep; 848331769Shselasky int replen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); 849322810Shselasky struct mlx5_ptys_reg *ptys; 850322810Shselasky struct mlx5_pmtu_reg *pmtu; 851322810Shselasky struct mlx5_pvlc_reg pvlc; 852322810Shselasky void *ctx; 853322810Shselasky int err; 854322810Shselasky 855331769Shselasky rep = mlx5_vzalloc(replen); 856322810Shselasky ptys = kzalloc(sizeof(*ptys), GFP_KERNEL); 857322810Shselasky pmtu = kzalloc(sizeof(*pmtu), GFP_KERNEL); 858322810Shselasky if (!rep || !ptys || !pmtu) { 859322810Shselasky err = -ENOMEM; 860322810Shselasky goto out; 861322810Shselasky } 862322810Shselasky 863322810Shselasky memset(props, 0, sizeof(*props)); 864322810Shselasky 865331769Shselasky err = mlx5_query_hca_vport_context(mdev, port, 0, rep, replen); 866322810Shselasky if (err) 867322810Shselasky goto out; 868322810Shselasky 869322810Shselasky ctx = MLX5_ADDR_OF(query_hca_vport_context_out, rep, hca_vport_context); 870322810Shselasky 871322810Shselasky props->lid = MLX5_GET(hca_vport_context, ctx, lid); 872322810Shselasky props->lmc = MLX5_GET(hca_vport_context, ctx, lmc); 873322810Shselasky props->sm_lid = MLX5_GET(hca_vport_context, ctx, sm_lid); 874322810Shselasky props->sm_sl = MLX5_GET(hca_vport_context, ctx, sm_sl); 875322810Shselasky props->state = MLX5_GET(hca_vport_context, ctx, vport_state); 876322810Shselasky props->phys_state = MLX5_GET(hca_vport_context, ctx, 877322810Shselasky port_physical_state); 878322810Shselasky props->port_cap_flags = MLX5_GET(hca_vport_context, ctx, cap_mask1); 879322810Shselasky props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); 880322810Shselasky props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 881322810Shselasky props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); 882322810Shselasky props->bad_pkey_cntr = MLX5_GET(hca_vport_context, ctx, 883331769Shselasky pkey_violation_counter); 884322810Shselasky props->qkey_viol_cntr = MLX5_GET(hca_vport_context, ctx, 885331769Shselasky qkey_violation_counter); 886322810Shselasky props->subnet_timeout = MLX5_GET(hca_vport_context, ctx, 887331769Shselasky subnet_timeout); 888322810Shselasky props->init_type_reply = MLX5_GET(hca_vport_context, ctx, 889331769Shselasky init_type_reply); 890331769Shselasky props->grh_required = MLX5_GET(hca_vport_context, ctx, grh_required); 891322810Shselasky 892322810Shselasky ptys->proto_mask |= MLX5_PTYS_IB; 893322810Shselasky ptys->local_port = port; 894322810Shselasky err = mlx5_core_access_ptys(mdev, ptys, 0); 895322810Shselasky if (err) 896322810Shselasky goto out; 897322810Shselasky 898322810Shselasky err = translate_active_width(ibdev, ptys->ib_link_width_oper, 899322810Shselasky &props->active_width); 900322810Shselasky if (err) 901322810Shselasky goto out; 902322810Shselasky 903322810Shselasky props->active_speed = (u8)ptys->ib_proto_oper; 904322810Shselasky 905322810Shselasky pmtu->local_port = port; 906322810Shselasky err = mlx5_core_access_pmtu(mdev, pmtu, 0); 907322810Shselasky if (err) 908322810Shselasky goto out; 909322810Shselasky 910322810Shselasky props->max_mtu = pmtu->max_mtu; 911322810Shselasky props->active_mtu = pmtu->oper_mtu; 912322810Shselasky 913322810Shselasky memset(&pvlc, 0, sizeof(pvlc)); 914322810Shselasky pvlc.local_port = port; 915322810Shselasky err = mlx5_core_access_pvlc(mdev, &pvlc, 0); 916322810Shselasky if (err) 917322810Shselasky goto out; 918322810Shselasky 919322810Shselasky err = translate_max_vl_num(ibdev, pvlc.vl_hw_cap, 920322810Shselasky &props->max_vl_num); 921322810Shselaskyout: 922322810Shselasky kvfree(rep); 923322810Shselasky kfree(ptys); 924322810Shselasky kfree(pmtu); 925322810Shselasky return err; 926322810Shselasky} 927322810Shselasky 928322810Shselaskyint mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 929322810Shselasky struct ib_port_attr *props) 930322810Shselasky{ 931322810Shselasky switch (mlx5_get_vport_access_method(ibdev)) { 932322810Shselasky case MLX5_VPORT_ACCESS_METHOD_MAD: 933331769Shselasky return mlx5_query_mad_ifc_port(ibdev, port, props); 934322810Shselasky 935322810Shselasky case MLX5_VPORT_ACCESS_METHOD_HCA: 936331769Shselasky return mlx5_query_hca_port(ibdev, port, props); 937322810Shselasky 938322810Shselasky case MLX5_VPORT_ACCESS_METHOD_NIC: 939322810Shselasky return mlx5_query_port_roce(ibdev, port, props); 940322810Shselasky 941322810Shselasky default: 942322810Shselasky return -EINVAL; 943322810Shselasky } 944322810Shselasky} 945322810Shselasky 946322810Shselaskystatic int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 947322810Shselasky union ib_gid *gid) 948322810Shselasky{ 949322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 950322810Shselasky struct mlx5_core_dev *mdev = dev->mdev; 951322810Shselasky 952322810Shselasky switch (mlx5_get_vport_access_method(ibdev)) { 953322810Shselasky case MLX5_VPORT_ACCESS_METHOD_MAD: 954331769Shselasky return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); 955322810Shselasky 956322810Shselasky case MLX5_VPORT_ACCESS_METHOD_HCA: 957322810Shselasky return mlx5_query_hca_vport_gid(mdev, port, 0, index, gid); 958322810Shselasky 959322810Shselasky default: 960322810Shselasky return -EINVAL; 961322810Shselasky } 962331769Shselasky 963322810Shselasky} 964322810Shselasky 965322810Shselaskystatic int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 966322810Shselasky u16 *pkey) 967322810Shselasky{ 968322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 969322810Shselasky struct mlx5_core_dev *mdev = dev->mdev; 970322810Shselasky 971322810Shselasky switch (mlx5_get_vport_access_method(ibdev)) { 972322810Shselasky case MLX5_VPORT_ACCESS_METHOD_MAD: 973331769Shselasky return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); 974322810Shselasky 975322810Shselasky case MLX5_VPORT_ACCESS_METHOD_HCA: 976322810Shselasky case MLX5_VPORT_ACCESS_METHOD_NIC: 977331769Shselasky return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index, 978322810Shselasky pkey); 979322810Shselasky default: 980322810Shselasky return -EINVAL; 981322810Shselasky } 982322810Shselasky} 983322810Shselasky 984322810Shselaskystatic int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, 985322810Shselasky struct ib_device_modify *props) 986322810Shselasky{ 987322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 988322810Shselasky struct mlx5_reg_node_desc in; 989322810Shselasky struct mlx5_reg_node_desc out; 990322810Shselasky int err; 991322810Shselasky 992322810Shselasky if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 993322810Shselasky return -EOPNOTSUPP; 994322810Shselasky 995322810Shselasky if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 996322810Shselasky return 0; 997322810Shselasky 998322810Shselasky /* 999322810Shselasky * If possible, pass node desc to FW, so it can generate 1000322810Shselasky * a 144 trap. If cmd fails, just ignore. 1001322810Shselasky */ 1002331769Shselasky memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1003322810Shselasky err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 1004322810Shselasky sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 1005322810Shselasky if (err) 1006322810Shselasky return err; 1007322810Shselasky 1008331769Shselasky memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1009322810Shselasky 1010322810Shselasky return err; 1011322810Shselasky} 1012322810Shselasky 1013322810Shselaskystatic int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 1014322810Shselasky struct ib_port_modify *props) 1015322810Shselasky{ 1016322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 1017322810Shselasky struct ib_port_attr attr; 1018322810Shselasky u32 tmp; 1019322810Shselasky int err; 1020322810Shselasky 1021322810Shselasky mutex_lock(&dev->cap_mask_mutex); 1022322810Shselasky 1023322810Shselasky err = mlx5_ib_query_port(ibdev, port, &attr); 1024322810Shselasky if (err) 1025322810Shselasky goto out; 1026322810Shselasky 1027322810Shselasky tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 1028322810Shselasky ~props->clr_port_cap_mask; 1029322810Shselasky 1030322810Shselasky err = mlx5_set_port_caps(dev->mdev, port, tmp); 1031322810Shselasky 1032322810Shselaskyout: 1033322810Shselasky mutex_unlock(&dev->cap_mask_mutex); 1034322810Shselasky return err; 1035322810Shselasky} 1036322810Shselasky 1037322810Shselaskystatic struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, 1038322810Shselasky struct ib_udata *udata) 1039322810Shselasky{ 1040322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 1041331769Shselasky struct mlx5_ib_alloc_ucontext_req_v2 req = {}; 1042331769Shselasky struct mlx5_ib_alloc_ucontext_resp resp = {}; 1043322810Shselasky struct mlx5_ib_ucontext *context; 1044322810Shselasky struct mlx5_uuar_info *uuari; 1045322810Shselasky struct mlx5_uar *uars; 1046322810Shselasky int gross_uuars; 1047322810Shselasky int num_uars; 1048322810Shselasky int ver; 1049322810Shselasky int uuarn; 1050322810Shselasky int err; 1051322810Shselasky int i; 1052322810Shselasky size_t reqlen; 1053331769Shselasky size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, 1054331769Shselasky max_cqe_version); 1055322810Shselasky 1056322810Shselasky if (!dev->ib_active) 1057322810Shselasky return ERR_PTR(-EAGAIN); 1058322810Shselasky 1059331769Shselasky if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr)) 1060331769Shselasky return ERR_PTR(-EINVAL); 1061322810Shselasky 1062322810Shselasky reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); 1063322810Shselasky if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 1064322810Shselasky ver = 0; 1065331769Shselasky else if (reqlen >= min_req_v2) 1066322810Shselasky ver = 2; 1067331769Shselasky else 1068322810Shselasky return ERR_PTR(-EINVAL); 1069322810Shselasky 1070331769Shselasky err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req))); 1071331769Shselasky if (err) 1072322810Shselasky return ERR_PTR(err); 1073322810Shselasky 1074331769Shselasky if (req.flags) 1075322810Shselasky return ERR_PTR(-EINVAL); 1076322810Shselasky 1077331769Shselasky if (req.total_num_uuars > MLX5_MAX_UUARS) 1078322810Shselasky return ERR_PTR(-ENOMEM); 1079322810Shselasky 1080331769Shselasky if (req.total_num_uuars == 0) 1081331769Shselasky return ERR_PTR(-EINVAL); 1082331769Shselasky 1083331769Shselasky if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) 1084331769Shselasky return ERR_PTR(-EOPNOTSUPP); 1085331769Shselasky 1086331769Shselasky if (reqlen > sizeof(req) && 1087331769Shselasky !ib_is_udata_cleared(udata, sizeof(req), 1088331769Shselasky reqlen - sizeof(req))) 1089331769Shselasky return ERR_PTR(-EOPNOTSUPP); 1090331769Shselasky 1091322810Shselasky req.total_num_uuars = ALIGN(req.total_num_uuars, 1092322810Shselasky MLX5_NON_FP_BF_REGS_PER_PAGE); 1093331769Shselasky if (req.num_low_latency_uuars > req.total_num_uuars - 1) 1094322810Shselasky return ERR_PTR(-EINVAL); 1095322810Shselasky 1096322810Shselasky num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 1097322810Shselasky gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 1098322810Shselasky resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 1099322810Shselasky if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) 1100322810Shselasky resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 1101331769Shselasky resp.cache_line_size = cache_line_size(); 1102322810Shselasky resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 1103322810Shselasky resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 1104322810Shselasky resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1105322810Shselasky resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1106322810Shselasky resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 1107331769Shselasky resp.cqe_version = min_t(__u8, 1108331769Shselasky (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), 1109331769Shselasky req.max_cqe_version); 1110331769Shselasky resp.response_length = min(offsetof(typeof(resp), response_length) + 1111331769Shselasky sizeof(resp.response_length), udata->outlen); 1112322810Shselasky 1113322810Shselasky context = kzalloc(sizeof(*context), GFP_KERNEL); 1114322810Shselasky if (!context) 1115322810Shselasky return ERR_PTR(-ENOMEM); 1116322810Shselasky 1117322810Shselasky uuari = &context->uuari; 1118322810Shselasky mutex_init(&uuari->lock); 1119322810Shselasky uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); 1120322810Shselasky if (!uars) { 1121322810Shselasky err = -ENOMEM; 1122322810Shselasky goto out_ctx; 1123322810Shselasky } 1124322810Shselasky 1125322810Shselasky uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars), 1126322810Shselasky sizeof(*uuari->bitmap), 1127322810Shselasky GFP_KERNEL); 1128322810Shselasky if (!uuari->bitmap) { 1129322810Shselasky err = -ENOMEM; 1130322810Shselasky goto out_uar_ctx; 1131322810Shselasky } 1132322810Shselasky /* 1133322810Shselasky * clear all fast path uuars 1134322810Shselasky */ 1135322810Shselasky for (i = 0; i < gross_uuars; i++) { 1136322810Shselasky uuarn = i & 3; 1137322810Shselasky if (uuarn == 2 || uuarn == 3) 1138322810Shselasky set_bit(i, uuari->bitmap); 1139322810Shselasky } 1140322810Shselasky 1141322810Shselasky uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL); 1142322810Shselasky if (!uuari->count) { 1143322810Shselasky err = -ENOMEM; 1144322810Shselasky goto out_bitmap; 1145322810Shselasky } 1146322810Shselasky 1147322810Shselasky for (i = 0; i < num_uars; i++) { 1148322810Shselasky err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); 1149331769Shselasky if (err) 1150331769Shselasky goto out_count; 1151331769Shselasky } 1152331769Shselasky 1153331769Shselasky#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1154331769Shselasky context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; 1155331769Shselasky#endif 1156331769Shselasky 1157331769Shselasky if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) { 1158331769Shselasky err = mlx5_alloc_transport_domain(dev->mdev, 1159331769Shselasky &context->tdn); 1160331769Shselasky if (err) 1161322810Shselasky goto out_uars; 1162322810Shselasky } 1163322810Shselasky 1164331769Shselasky INIT_LIST_HEAD(&context->vma_private_list); 1165322810Shselasky INIT_LIST_HEAD(&context->db_page_list); 1166322810Shselasky mutex_init(&context->db_page_mutex); 1167322810Shselasky 1168322810Shselasky resp.tot_uuars = req.total_num_uuars; 1169322810Shselasky resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); 1170331769Shselasky 1171331769Shselasky if (field_avail(typeof(resp), cqe_version, udata->outlen)) 1172331769Shselasky resp.response_length += sizeof(resp.cqe_version); 1173331769Shselasky 1174331769Shselasky if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { 1175331784Shselasky resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | 1176331784Shselasky MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; 1177331769Shselasky resp.response_length += sizeof(resp.cmds_supp_uhw); 1178331769Shselasky } 1179331769Shselasky 1180331769Shselasky /* 1181331769Shselasky * We don't want to expose information from the PCI bar that is located 1182331769Shselasky * after 4096 bytes, so if the arch only supports larger pages, let's 1183331769Shselasky * pretend we don't support reading the HCA's core clock. This is also 1184331769Shselasky * forced by mmap function. 1185331769Shselasky */ 1186331769Shselasky if (PAGE_SIZE <= 4096 && 1187331769Shselasky field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { 1188331769Shselasky resp.comp_mask |= 1189331769Shselasky MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; 1190331769Shselasky resp.hca_core_clock_offset = 1191331769Shselasky offsetof(struct mlx5_init_seg, internal_timer_h) % 1192331769Shselasky PAGE_SIZE; 1193331769Shselasky resp.response_length += sizeof(resp.hca_core_clock_offset) + 1194331769Shselasky sizeof(resp.reserved2); 1195331769Shselasky } 1196331769Shselasky 1197331769Shselasky err = ib_copy_to_udata(udata, &resp, resp.response_length); 1198322810Shselasky if (err) 1199331769Shselasky goto out_td; 1200322810Shselasky 1201322810Shselasky uuari->ver = ver; 1202322810Shselasky uuari->num_low_latency_uuars = req.num_low_latency_uuars; 1203322810Shselasky uuari->uars = uars; 1204322810Shselasky uuari->num_uars = num_uars; 1205331769Shselasky context->cqe_version = resp.cqe_version; 1206322810Shselasky 1207322810Shselasky return &context->ibucontext; 1208322810Shselasky 1209331769Shselaskyout_td: 1210331769Shselasky if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1211331769Shselasky mlx5_dealloc_transport_domain(dev->mdev, context->tdn); 1212331769Shselasky 1213322810Shselaskyout_uars: 1214322810Shselasky for (i--; i >= 0; i--) 1215322810Shselasky mlx5_cmd_free_uar(dev->mdev, uars[i].index); 1216331769Shselaskyout_count: 1217322810Shselasky kfree(uuari->count); 1218322810Shselasky 1219322810Shselaskyout_bitmap: 1220322810Shselasky kfree(uuari->bitmap); 1221322810Shselasky 1222322810Shselaskyout_uar_ctx: 1223322810Shselasky kfree(uars); 1224322810Shselasky 1225322810Shselaskyout_ctx: 1226322810Shselasky kfree(context); 1227322810Shselasky return ERR_PTR(err); 1228322810Shselasky} 1229322810Shselasky 1230322810Shselaskystatic int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 1231322810Shselasky{ 1232322810Shselasky struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1233322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1234322810Shselasky struct mlx5_uuar_info *uuari = &context->uuari; 1235322810Shselasky int i; 1236322810Shselasky 1237331769Shselasky if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1238322810Shselasky mlx5_dealloc_transport_domain(dev->mdev, context->tdn); 1239322810Shselasky 1240322810Shselasky for (i = 0; i < uuari->num_uars; i++) { 1241322810Shselasky if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) 1242322810Shselasky mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); 1243322810Shselasky } 1244322810Shselasky 1245322810Shselasky kfree(uuari->count); 1246322810Shselasky kfree(uuari->bitmap); 1247322810Shselasky kfree(uuari->uars); 1248322810Shselasky kfree(context); 1249322810Shselasky 1250322810Shselasky return 0; 1251322810Shselasky} 1252322810Shselasky 1253322810Shselaskystatic phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) 1254322810Shselasky{ 1255322810Shselasky return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; 1256322810Shselasky} 1257322810Shselasky 1258322810Shselaskystatic int get_command(unsigned long offset) 1259322810Shselasky{ 1260322810Shselasky return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; 1261322810Shselasky} 1262322810Shselasky 1263322810Shselaskystatic int get_arg(unsigned long offset) 1264322810Shselasky{ 1265322810Shselasky return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); 1266322810Shselasky} 1267322810Shselasky 1268322810Shselaskystatic int get_index(unsigned long offset) 1269322810Shselasky{ 1270322810Shselasky return get_arg(offset); 1271322810Shselasky} 1272322810Shselasky 1273331769Shselaskystatic void mlx5_ib_vma_open(struct vm_area_struct *area) 1274331769Shselasky{ 1275331769Shselasky /* vma_open is called when a new VMA is created on top of our VMA. This 1276331769Shselasky * is done through either mremap flow or split_vma (usually due to 1277331769Shselasky * mlock, madvise, munmap, etc.) We do not support a clone of the VMA, 1278331769Shselasky * as this VMA is strongly hardware related. Therefore we set the 1279331769Shselasky * vm_ops of the newly created/cloned VMA to NULL, to prevent it from 1280331769Shselasky * calling us again and trying to do incorrect actions. We assume that 1281331769Shselasky * the original VMA size is exactly a single page, and therefore all 1282331769Shselasky * "splitting" operation will not happen to it. 1283331769Shselasky */ 1284331769Shselasky area->vm_ops = NULL; 1285331769Shselasky} 1286331769Shselasky 1287331769Shselaskystatic void mlx5_ib_vma_close(struct vm_area_struct *area) 1288331769Shselasky{ 1289331769Shselasky struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data; 1290331769Shselasky 1291331769Shselasky /* It's guaranteed that all VMAs opened on a FD are closed before the 1292331769Shselasky * file itself is closed, therefore no sync is needed with the regular 1293331769Shselasky * closing flow. (e.g. mlx5 ib_dealloc_ucontext) 1294331769Shselasky * However need a sync with accessing the vma as part of 1295331769Shselasky * mlx5_ib_disassociate_ucontext. 1296331769Shselasky * The close operation is usually called under mm->mmap_sem except when 1297331769Shselasky * process is exiting. 1298331769Shselasky * The exiting case is handled explicitly as part of 1299331769Shselasky * mlx5_ib_disassociate_ucontext. 1300331769Shselasky */ 1301331769Shselasky mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data; 1302331769Shselasky 1303331769Shselasky /* setting the vma context pointer to null in the mlx5_ib driver's 1304331769Shselasky * private data, to protect a race condition in 1305331769Shselasky * mlx5_ib_disassociate_ucontext(). 1306331769Shselasky */ 1307331769Shselasky mlx5_ib_vma_priv_data->vma = NULL; 1308331769Shselasky list_del(&mlx5_ib_vma_priv_data->list); 1309331769Shselasky kfree(mlx5_ib_vma_priv_data); 1310331769Shselasky} 1311331769Shselasky 1312331769Shselaskystatic const struct vm_operations_struct mlx5_ib_vm_ops = { 1313331769Shselasky .open = mlx5_ib_vma_open, 1314331769Shselasky .close = mlx5_ib_vma_close 1315331769Shselasky}; 1316331769Shselasky 1317331769Shselaskystatic int mlx5_ib_set_vma_data(struct vm_area_struct *vma, 1318331769Shselasky struct mlx5_ib_ucontext *ctx) 1319331769Shselasky{ 1320331769Shselasky struct mlx5_ib_vma_private_data *vma_prv; 1321331769Shselasky struct list_head *vma_head = &ctx->vma_private_list; 1322331769Shselasky 1323331769Shselasky vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL); 1324331769Shselasky if (!vma_prv) 1325331769Shselasky return -ENOMEM; 1326331769Shselasky 1327331769Shselasky vma_prv->vma = vma; 1328331769Shselasky vma->vm_private_data = vma_prv; 1329331769Shselasky vma->vm_ops = &mlx5_ib_vm_ops; 1330331769Shselasky 1331331769Shselasky list_add(&vma_prv->list, vma_head); 1332331769Shselasky 1333331769Shselasky return 0; 1334331769Shselasky} 1335331769Shselasky 1336331769Shselaskystatic inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) 1337331769Shselasky{ 1338331769Shselasky switch (cmd) { 1339331769Shselasky case MLX5_IB_MMAP_WC_PAGE: 1340331769Shselasky return "WC"; 1341331769Shselasky case MLX5_IB_MMAP_REGULAR_PAGE: 1342331769Shselasky return "best effort WC"; 1343331769Shselasky case MLX5_IB_MMAP_NC_PAGE: 1344331769Shselasky return "NC"; 1345331769Shselasky default: 1346331769Shselasky return NULL; 1347331769Shselasky } 1348331769Shselasky} 1349331769Shselasky 1350331769Shselaskystatic int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 1351331769Shselasky struct vm_area_struct *vma, 1352322810Shselasky struct mlx5_ib_ucontext *context) 1353322810Shselasky{ 1354331769Shselasky struct mlx5_uuar_info *uuari = &context->uuari; 1355331769Shselasky int err; 1356322810Shselasky unsigned long idx; 1357331769Shselasky phys_addr_t pfn, pa; 1358331769Shselasky pgprot_t prot; 1359322810Shselasky 1360331769Shselasky switch (cmd) { 1361331769Shselasky case MLX5_IB_MMAP_WC_PAGE: 1362331769Shselasky/* Some architectures don't support WC memory */ 1363331769Shselasky#if defined(CONFIG_X86) 1364331769Shselasky if (!pat_enabled()) 1365331769Shselasky return -EPERM; 1366331769Shselasky#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU))) 1367331769Shselasky return -EPERM; 1368331769Shselasky#endif 1369331769Shselasky /* fall through */ 1370331769Shselasky case MLX5_IB_MMAP_REGULAR_PAGE: 1371331769Shselasky /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ 1372331769Shselasky prot = pgprot_writecombine(vma->vm_page_prot); 1373331769Shselasky break; 1374331769Shselasky case MLX5_IB_MMAP_NC_PAGE: 1375331769Shselasky prot = pgprot_noncached(vma->vm_page_prot); 1376331769Shselasky break; 1377331769Shselasky default: 1378322810Shselasky return -EINVAL; 1379322810Shselasky } 1380322810Shselasky 1381331769Shselasky if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1382331769Shselasky return -EINVAL; 1383331769Shselasky 1384322810Shselasky idx = get_index(vma->vm_pgoff); 1385331769Shselasky if (idx >= uuari->num_uars) 1386322810Shselasky return -EINVAL; 1387322810Shselasky 1388322810Shselasky pfn = uar_index2pfn(dev, uuari->uars[idx].index); 1389331769Shselasky mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); 1390322810Shselasky 1391322810Shselasky vma->vm_page_prot = prot; 1392331769Shselasky err = io_remap_pfn_range(vma, vma->vm_start, pfn, 1393331769Shselasky PAGE_SIZE, vma->vm_page_prot); 1394331769Shselasky if (err) { 1395331769Shselasky mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%llx, pfn=%pa, mmap_cmd=%s\n", 1396331769Shselasky err, (unsigned long long)vma->vm_start, &pfn, mmap_cmd2str(cmd)); 1397322810Shselasky return -EAGAIN; 1398322810Shselasky } 1399322810Shselasky 1400331769Shselasky pa = pfn << PAGE_SHIFT; 1401331769Shselasky mlx5_ib_dbg(dev, "mapped %s at 0x%llx, PA %pa\n", mmap_cmd2str(cmd), 1402331769Shselasky (unsigned long long)vma->vm_start, &pa); 1403322810Shselasky 1404331769Shselasky return mlx5_ib_set_vma_data(vma, context); 1405322810Shselasky} 1406322810Shselasky 1407322810Shselaskystatic int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 1408322810Shselasky{ 1409322810Shselasky struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1410322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1411322810Shselasky unsigned long command; 1412331769Shselasky phys_addr_t pfn; 1413322810Shselasky 1414322810Shselasky command = get_command(vma->vm_pgoff); 1415322810Shselasky switch (command) { 1416331769Shselasky case MLX5_IB_MMAP_WC_PAGE: 1417331769Shselasky case MLX5_IB_MMAP_NC_PAGE: 1418322810Shselasky case MLX5_IB_MMAP_REGULAR_PAGE: 1419331769Shselasky return uar_mmap(dev, command, vma, context); 1420322810Shselasky 1421331769Shselasky case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 1422331769Shselasky return -ENOSYS; 1423322810Shselasky 1424331769Shselasky case MLX5_IB_MMAP_CORE_CLOCK: 1425331769Shselasky if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1426331769Shselasky return -EINVAL; 1427322810Shselasky 1428331769Shselasky if (vma->vm_flags & VM_WRITE) 1429331769Shselasky return -EPERM; 1430331769Shselasky 1431331769Shselasky /* Don't expose to user-space information it shouldn't have */ 1432331769Shselasky if (PAGE_SIZE > 4096) 1433331769Shselasky return -EOPNOTSUPP; 1434331769Shselasky 1435331769Shselasky vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1436331769Shselasky pfn = (dev->mdev->iseg_base + 1437331769Shselasky offsetof(struct mlx5_init_seg, internal_timer_h)) >> 1438331769Shselasky PAGE_SHIFT; 1439331769Shselasky if (io_remap_pfn_range(vma, vma->vm_start, pfn, 1440331769Shselasky PAGE_SIZE, vma->vm_page_prot)) 1441331769Shselasky return -EAGAIN; 1442331769Shselasky 1443331769Shselasky mlx5_ib_dbg(dev, "mapped internal timer at 0x%llx, PA 0x%llx\n", 1444331769Shselasky (unsigned long long)vma->vm_start, 1445331769Shselasky (unsigned long long)pfn << PAGE_SHIFT); 1446322810Shselasky break; 1447322810Shselasky 1448322810Shselasky default: 1449322810Shselasky return -EINVAL; 1450322810Shselasky } 1451322810Shselasky 1452322810Shselasky return 0; 1453322810Shselasky} 1454322810Shselasky 1455322810Shselaskystatic struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, 1456322810Shselasky struct ib_ucontext *context, 1457322810Shselasky struct ib_udata *udata) 1458322810Shselasky{ 1459322810Shselasky struct mlx5_ib_alloc_pd_resp resp; 1460322810Shselasky struct mlx5_ib_pd *pd; 1461322810Shselasky int err; 1462322810Shselasky 1463322810Shselasky pd = kmalloc(sizeof(*pd), GFP_KERNEL); 1464322810Shselasky if (!pd) 1465322810Shselasky return ERR_PTR(-ENOMEM); 1466322810Shselasky 1467322810Shselasky err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); 1468322810Shselasky if (err) { 1469322810Shselasky kfree(pd); 1470322810Shselasky return ERR_PTR(err); 1471322810Shselasky } 1472322810Shselasky 1473322810Shselasky if (context) { 1474322810Shselasky resp.pdn = pd->pdn; 1475322810Shselasky if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 1476322810Shselasky mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 1477322810Shselasky kfree(pd); 1478322810Shselasky return ERR_PTR(-EFAULT); 1479322810Shselasky } 1480322810Shselasky } 1481322810Shselasky 1482322810Shselasky return &pd->ibpd; 1483322810Shselasky} 1484322810Shselasky 1485322810Shselaskystatic int mlx5_ib_dealloc_pd(struct ib_pd *pd) 1486322810Shselasky{ 1487322810Shselasky struct mlx5_ib_dev *mdev = to_mdev(pd->device); 1488322810Shselasky struct mlx5_ib_pd *mpd = to_mpd(pd); 1489322810Shselasky 1490322810Shselasky mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 1491322810Shselasky kfree(mpd); 1492322810Shselasky 1493322810Shselasky return 0; 1494322810Shselasky} 1495322810Shselasky 1496331769Shselaskyenum { 1497331769Shselasky MATCH_CRITERIA_ENABLE_OUTER_BIT, 1498331769Shselasky MATCH_CRITERIA_ENABLE_MISC_BIT, 1499331769Shselasky MATCH_CRITERIA_ENABLE_INNER_BIT 1500331769Shselasky}; 1501331769Shselasky 1502331769Shselasky#define HEADER_IS_ZERO(match_criteria, headers) \ 1503331769Shselasky !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 1504331769Shselasky 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 1505331769Shselasky 1506331769Shselaskystatic u8 get_match_criteria_enable(u32 *match_criteria) 1507331769Shselasky{ 1508331769Shselasky u8 match_criteria_enable; 1509331769Shselasky 1510331769Shselasky match_criteria_enable = 1511331769Shselasky (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 1512331769Shselasky MATCH_CRITERIA_ENABLE_OUTER_BIT; 1513331769Shselasky match_criteria_enable |= 1514331769Shselasky (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 1515331769Shselasky MATCH_CRITERIA_ENABLE_MISC_BIT; 1516331769Shselasky match_criteria_enable |= 1517331769Shselasky (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 1518331769Shselasky MATCH_CRITERIA_ENABLE_INNER_BIT; 1519331769Shselasky 1520331769Shselasky return match_criteria_enable; 1521331769Shselasky} 1522331769Shselasky 1523331769Shselaskystatic void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 1524331769Shselasky{ 1525331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 1526331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 1527331769Shselasky} 1528331769Shselasky 1529331769Shselaskystatic void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 1530331769Shselasky{ 1531331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 1532331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 1533331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 1534331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 1535331769Shselasky} 1536331769Shselasky 1537331769Shselasky#define LAST_ETH_FIELD vlan_tag 1538331769Shselasky#define LAST_IB_FIELD sl 1539331769Shselasky#define LAST_IPV4_FIELD tos 1540331769Shselasky#define LAST_IPV6_FIELD traffic_class 1541331769Shselasky#define LAST_TCP_UDP_FIELD src_port 1542331769Shselasky 1543331769Shselasky/* Field is the last supported field */ 1544331769Shselasky#define FIELDS_NOT_SUPPORTED(filter, field)\ 1545331769Shselasky memchr_inv((void *)&filter.field +\ 1546331769Shselasky sizeof(filter.field), 0,\ 1547331769Shselasky sizeof(filter) -\ 1548331769Shselasky offsetof(typeof(filter), field) -\ 1549331769Shselasky sizeof(filter.field)) 1550331769Shselasky 1551331769Shselaskystatic int parse_flow_attr(u32 *match_c, u32 *match_v, 1552331769Shselasky const union ib_flow_spec *ib_spec) 1553331769Shselasky{ 1554331769Shselasky void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 1555331769Shselasky outer_headers); 1556331769Shselasky void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 1557331769Shselasky outer_headers); 1558331769Shselasky void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 1559331769Shselasky misc_parameters); 1560331769Shselasky void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 1561331769Shselasky misc_parameters); 1562331769Shselasky 1563331769Shselasky switch (ib_spec->type) { 1564331769Shselasky case IB_FLOW_SPEC_ETH: 1565331769Shselasky if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 1566331769Shselasky return -ENOTSUPP; 1567331769Shselasky 1568331769Shselasky ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1569331769Shselasky dmac_47_16), 1570331769Shselasky ib_spec->eth.mask.dst_mac); 1571331769Shselasky ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1572331769Shselasky dmac_47_16), 1573331769Shselasky ib_spec->eth.val.dst_mac); 1574331769Shselasky 1575331769Shselasky ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1576331769Shselasky smac_47_16), 1577331769Shselasky ib_spec->eth.mask.src_mac); 1578331769Shselasky ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1579331769Shselasky smac_47_16), 1580331769Shselasky ib_spec->eth.val.src_mac); 1581331769Shselasky 1582331769Shselasky if (ib_spec->eth.mask.vlan_tag) { 1583331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1584331769Shselasky cvlan_tag, 1); 1585331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1586331769Shselasky cvlan_tag, 1); 1587331769Shselasky 1588331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1589331769Shselasky first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 1590331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1591331769Shselasky first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 1592331769Shselasky 1593331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1594331769Shselasky first_cfi, 1595331769Shselasky ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 1596331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1597331769Shselasky first_cfi, 1598331769Shselasky ntohs(ib_spec->eth.val.vlan_tag) >> 12); 1599331769Shselasky 1600331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1601331769Shselasky first_prio, 1602331769Shselasky ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 1603331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1604331769Shselasky first_prio, 1605331769Shselasky ntohs(ib_spec->eth.val.vlan_tag) >> 13); 1606331769Shselasky } 1607331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1608331769Shselasky ethertype, ntohs(ib_spec->eth.mask.ether_type)); 1609331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1610331769Shselasky ethertype, ntohs(ib_spec->eth.val.ether_type)); 1611331769Shselasky break; 1612331769Shselasky case IB_FLOW_SPEC_IPV4: 1613331769Shselasky if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 1614331769Shselasky return -ENOTSUPP; 1615331769Shselasky 1616331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1617331769Shselasky ethertype, 0xffff); 1618331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1619331769Shselasky ethertype, ETH_P_IP); 1620331769Shselasky 1621331769Shselasky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1622331769Shselasky src_ipv4_src_ipv6.ipv4_layout.ipv4), 1623331769Shselasky &ib_spec->ipv4.mask.src_ip, 1624331769Shselasky sizeof(ib_spec->ipv4.mask.src_ip)); 1625331769Shselasky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1626331769Shselasky src_ipv4_src_ipv6.ipv4_layout.ipv4), 1627331769Shselasky &ib_spec->ipv4.val.src_ip, 1628331769Shselasky sizeof(ib_spec->ipv4.val.src_ip)); 1629331769Shselasky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1630331769Shselasky dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1631331769Shselasky &ib_spec->ipv4.mask.dst_ip, 1632331769Shselasky sizeof(ib_spec->ipv4.mask.dst_ip)); 1633331769Shselasky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1634331769Shselasky dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1635331769Shselasky &ib_spec->ipv4.val.dst_ip, 1636331769Shselasky sizeof(ib_spec->ipv4.val.dst_ip)); 1637331769Shselasky 1638331769Shselasky set_tos(outer_headers_c, outer_headers_v, 1639331769Shselasky ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 1640331769Shselasky 1641331769Shselasky set_proto(outer_headers_c, outer_headers_v, 1642331769Shselasky ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto); 1643331769Shselasky break; 1644331769Shselasky case IB_FLOW_SPEC_IPV6: 1645331769Shselasky if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 1646331769Shselasky return -ENOTSUPP; 1647331769Shselasky 1648331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1649331769Shselasky ethertype, 0xffff); 1650331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1651331769Shselasky ethertype, IPPROTO_IPV6); 1652331769Shselasky 1653331769Shselasky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1654331769Shselasky src_ipv4_src_ipv6.ipv6_layout.ipv6), 1655331769Shselasky &ib_spec->ipv6.mask.src_ip, 1656331769Shselasky sizeof(ib_spec->ipv6.mask.src_ip)); 1657331769Shselasky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1658331769Shselasky src_ipv4_src_ipv6.ipv6_layout.ipv6), 1659331769Shselasky &ib_spec->ipv6.val.src_ip, 1660331769Shselasky sizeof(ib_spec->ipv6.val.src_ip)); 1661331769Shselasky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1662331769Shselasky dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1663331769Shselasky &ib_spec->ipv6.mask.dst_ip, 1664331769Shselasky sizeof(ib_spec->ipv6.mask.dst_ip)); 1665331769Shselasky memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1666331769Shselasky dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1667331769Shselasky &ib_spec->ipv6.val.dst_ip, 1668331769Shselasky sizeof(ib_spec->ipv6.val.dst_ip)); 1669331769Shselasky 1670331769Shselasky set_tos(outer_headers_c, outer_headers_v, 1671331769Shselasky ib_spec->ipv6.mask.traffic_class, 1672331769Shselasky ib_spec->ipv6.val.traffic_class); 1673331769Shselasky 1674331769Shselasky set_proto(outer_headers_c, outer_headers_v, 1675331769Shselasky ib_spec->ipv6.mask.next_hdr, 1676331769Shselasky ib_spec->ipv6.val.next_hdr); 1677331769Shselasky 1678331769Shselasky MLX5_SET(fte_match_set_misc, misc_params_c, 1679331769Shselasky outer_ipv6_flow_label, 1680331769Shselasky ntohl(ib_spec->ipv6.mask.flow_label)); 1681331769Shselasky MLX5_SET(fte_match_set_misc, misc_params_v, 1682331769Shselasky outer_ipv6_flow_label, 1683331769Shselasky ntohl(ib_spec->ipv6.val.flow_label)); 1684331769Shselasky break; 1685331769Shselasky case IB_FLOW_SPEC_TCP: 1686331769Shselasky if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 1687331769Shselasky LAST_TCP_UDP_FIELD)) 1688331769Shselasky return -ENOTSUPP; 1689331769Shselasky 1690331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, 1691331769Shselasky 0xff); 1692331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, 1693331769Shselasky IPPROTO_TCP); 1694331769Shselasky 1695331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport, 1696331769Shselasky ntohs(ib_spec->tcp_udp.mask.src_port)); 1697331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport, 1698331769Shselasky ntohs(ib_spec->tcp_udp.val.src_port)); 1699331769Shselasky 1700331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport, 1701331769Shselasky ntohs(ib_spec->tcp_udp.mask.dst_port)); 1702331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport, 1703331769Shselasky ntohs(ib_spec->tcp_udp.val.dst_port)); 1704331769Shselasky break; 1705331769Shselasky case IB_FLOW_SPEC_UDP: 1706331769Shselasky if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 1707331769Shselasky LAST_TCP_UDP_FIELD)) 1708331769Shselasky return -ENOTSUPP; 1709331769Shselasky 1710331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, 1711331769Shselasky 0xff); 1712331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, 1713331769Shselasky IPPROTO_UDP); 1714331769Shselasky 1715331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport, 1716331769Shselasky ntohs(ib_spec->tcp_udp.mask.src_port)); 1717331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport, 1718331769Shselasky ntohs(ib_spec->tcp_udp.val.src_port)); 1719331769Shselasky 1720331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport, 1721331769Shselasky ntohs(ib_spec->tcp_udp.mask.dst_port)); 1722331769Shselasky MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport, 1723331769Shselasky ntohs(ib_spec->tcp_udp.val.dst_port)); 1724331769Shselasky break; 1725331769Shselasky default: 1726331769Shselasky return -EINVAL; 1727331769Shselasky } 1728331769Shselasky 1729331769Shselasky return 0; 1730331769Shselasky} 1731331769Shselasky 1732331769Shselasky/* If a flow could catch both multicast and unicast packets, 1733331769Shselasky * it won't fall into the multicast flow steering table and this rule 1734331769Shselasky * could steal other multicast packets. 1735331769Shselasky */ 1736331769Shselaskystatic bool flow_is_multicast_only(struct ib_flow_attr *ib_attr) 1737331769Shselasky{ 1738331769Shselasky struct ib_flow_spec_eth *eth_spec; 1739331769Shselasky 1740331769Shselasky if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 1741331769Shselasky ib_attr->size < sizeof(struct ib_flow_attr) + 1742331769Shselasky sizeof(struct ib_flow_spec_eth) || 1743331769Shselasky ib_attr->num_of_specs < 1) 1744331769Shselasky return false; 1745331769Shselasky 1746331769Shselasky eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1); 1747331769Shselasky if (eth_spec->type != IB_FLOW_SPEC_ETH || 1748331769Shselasky eth_spec->size != sizeof(*eth_spec)) 1749331769Shselasky return false; 1750331769Shselasky 1751331769Shselasky return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 1752331769Shselasky is_multicast_ether_addr(eth_spec->val.dst_mac); 1753331769Shselasky} 1754331769Shselasky 1755331769Shselaskystatic bool is_valid_attr(const struct ib_flow_attr *flow_attr) 1756331769Shselasky{ 1757331769Shselasky union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 1758331769Shselasky bool has_ipv4_spec = false; 1759331769Shselasky bool eth_type_ipv4 = true; 1760331769Shselasky unsigned int spec_index; 1761331769Shselasky 1762331769Shselasky /* Validate that ethertype is correct */ 1763331769Shselasky for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1764331769Shselasky if (ib_spec->type == IB_FLOW_SPEC_ETH && 1765331769Shselasky ib_spec->eth.mask.ether_type) { 1766331769Shselasky if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) && 1767331769Shselasky ib_spec->eth.val.ether_type == htons(ETH_P_IP))) 1768331769Shselasky eth_type_ipv4 = false; 1769331769Shselasky } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) { 1770331769Shselasky has_ipv4_spec = true; 1771331769Shselasky } 1772331769Shselasky ib_spec = (void *)ib_spec + ib_spec->size; 1773331769Shselasky } 1774331769Shselasky return !has_ipv4_spec || eth_type_ipv4; 1775331769Shselasky} 1776331769Shselasky 1777331769Shselaskystatic void put_flow_table(struct mlx5_ib_dev *dev, 1778331769Shselasky struct mlx5_ib_flow_prio *prio, bool ft_added) 1779331769Shselasky{ 1780331769Shselasky prio->refcount -= !!ft_added; 1781331769Shselasky if (!prio->refcount) { 1782331769Shselasky mlx5_destroy_flow_table(prio->flow_table); 1783331769Shselasky prio->flow_table = NULL; 1784331769Shselasky } 1785331769Shselasky} 1786331769Shselasky 1787331769Shselaskystatic int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 1788331769Shselasky{ 1789331769Shselasky struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device); 1790331769Shselasky struct mlx5_ib_flow_handler *handler = container_of(flow_id, 1791331769Shselasky struct mlx5_ib_flow_handler, 1792331769Shselasky ibflow); 1793331769Shselasky struct mlx5_ib_flow_handler *iter, *tmp; 1794331769Shselasky 1795331769Shselasky mutex_lock(&dev->flow_db.lock); 1796331769Shselasky 1797331769Shselasky list_for_each_entry_safe(iter, tmp, &handler->list, list) { 1798331769Shselasky mlx5_del_flow_rule(iter->rule); 1799331769Shselasky put_flow_table(dev, iter->prio, true); 1800331769Shselasky list_del(&iter->list); 1801331769Shselasky kfree(iter); 1802331769Shselasky } 1803331769Shselasky 1804331769Shselasky mlx5_del_flow_rule(handler->rule); 1805331769Shselasky put_flow_table(dev, handler->prio, true); 1806331769Shselasky mutex_unlock(&dev->flow_db.lock); 1807331769Shselasky 1808331769Shselasky kfree(handler); 1809331769Shselasky 1810331769Shselasky return 0; 1811331769Shselasky} 1812331769Shselasky 1813331769Shselaskystatic int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 1814331769Shselasky{ 1815331769Shselasky priority *= 2; 1816331769Shselasky if (!dont_trap) 1817331769Shselasky priority++; 1818331769Shselasky return priority; 1819331769Shselasky} 1820331769Shselasky 1821331769Shselaskyenum flow_table_type { 1822331769Shselasky MLX5_IB_FT_RX, 1823331769Shselasky MLX5_IB_FT_TX 1824331769Shselasky}; 1825331769Shselasky 1826331769Shselasky#define MLX5_FS_MAX_TYPES 10 1827331769Shselasky#define MLX5_FS_MAX_ENTRIES 32000UL 1828331769Shselaskystatic struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 1829331769Shselasky struct ib_flow_attr *flow_attr, 1830331769Shselasky enum flow_table_type ft_type) 1831331769Shselasky{ 1832331769Shselasky bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 1833331769Shselasky struct mlx5_flow_namespace *ns = NULL; 1834331769Shselasky struct mlx5_ib_flow_prio *prio; 1835331769Shselasky struct mlx5_flow_table *ft; 1836331769Shselasky int num_entries; 1837331769Shselasky int num_groups; 1838331769Shselasky int priority; 1839331769Shselasky int err = 0; 1840331769Shselasky 1841331769Shselasky if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1842331769Shselasky if (flow_is_multicast_only(flow_attr) && 1843331769Shselasky !dont_trap) 1844331769Shselasky priority = MLX5_IB_FLOW_MCAST_PRIO; 1845331769Shselasky else 1846331769Shselasky priority = ib_prio_to_core_prio(flow_attr->priority, 1847331769Shselasky dont_trap); 1848331769Shselasky ns = mlx5_get_flow_namespace(dev->mdev, 1849331769Shselasky MLX5_FLOW_NAMESPACE_BYPASS); 1850331769Shselasky num_entries = MLX5_FS_MAX_ENTRIES; 1851331769Shselasky num_groups = MLX5_FS_MAX_TYPES; 1852331769Shselasky prio = &dev->flow_db.prios[priority]; 1853331769Shselasky } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1854331769Shselasky flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 1855331769Shselasky ns = mlx5_get_flow_namespace(dev->mdev, 1856331769Shselasky MLX5_FLOW_NAMESPACE_LEFTOVERS); 1857331769Shselasky build_leftovers_ft_param("bypass", &priority, 1858331769Shselasky &num_entries, 1859331769Shselasky &num_groups); 1860331769Shselasky prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 1861331769Shselasky } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 1862331769Shselasky if (!MLX5_CAP_FLOWTABLE(dev->mdev, 1863331769Shselasky allow_sniffer_and_nic_rx_shared_tir)) 1864331769Shselasky return ERR_PTR(-ENOTSUPP); 1865331769Shselasky 1866331769Shselasky ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ? 1867331769Shselasky MLX5_FLOW_NAMESPACE_SNIFFER_RX : 1868331769Shselasky MLX5_FLOW_NAMESPACE_SNIFFER_TX); 1869331769Shselasky 1870331769Shselasky prio = &dev->flow_db.sniffer[ft_type]; 1871331769Shselasky priority = 0; 1872331769Shselasky num_entries = 1; 1873331769Shselasky num_groups = 1; 1874331769Shselasky } 1875331769Shselasky 1876331769Shselasky if (!ns) 1877331769Shselasky return ERR_PTR(-ENOTSUPP); 1878331769Shselasky 1879331769Shselasky ft = prio->flow_table; 1880331769Shselasky if (!ft) { 1881331769Shselasky ft = mlx5_create_auto_grouped_flow_table(ns, priority, "bypass", 1882331769Shselasky num_entries, 1883331769Shselasky num_groups); 1884331769Shselasky 1885331769Shselasky if (!IS_ERR(ft)) { 1886331769Shselasky prio->refcount = 0; 1887331769Shselasky prio->flow_table = ft; 1888331769Shselasky } else { 1889331769Shselasky err = PTR_ERR(ft); 1890331769Shselasky } 1891331769Shselasky } 1892331769Shselasky 1893331769Shselasky return err ? ERR_PTR(err) : prio; 1894331769Shselasky} 1895331769Shselasky 1896331769Shselaskystatic struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 1897331769Shselasky struct mlx5_ib_flow_prio *ft_prio, 1898331769Shselasky const struct ib_flow_attr *flow_attr, 1899331769Shselasky struct mlx5_flow_destination *dst) 1900331769Shselasky{ 1901331769Shselasky struct mlx5_flow_table *ft = ft_prio->flow_table; 1902331769Shselasky struct mlx5_ib_flow_handler *handler; 1903331769Shselasky struct mlx5_flow_spec *spec; 1904331769Shselasky const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 1905331769Shselasky unsigned int spec_index; 1906331769Shselasky u32 action; 1907331769Shselasky int err = 0; 1908331769Shselasky 1909331769Shselasky if (!is_valid_attr(flow_attr)) 1910331769Shselasky return ERR_PTR(-EINVAL); 1911331769Shselasky 1912331769Shselasky spec = mlx5_vzalloc(sizeof(*spec)); 1913331769Shselasky handler = kzalloc(sizeof(*handler), GFP_KERNEL); 1914331769Shselasky if (!handler || !spec) { 1915331769Shselasky err = -ENOMEM; 1916331769Shselasky goto free; 1917331769Shselasky } 1918331769Shselasky 1919331769Shselasky INIT_LIST_HEAD(&handler->list); 1920331769Shselasky 1921331769Shselasky for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1922331769Shselasky err = parse_flow_attr(spec->match_criteria, 1923331769Shselasky spec->match_value, ib_flow); 1924331769Shselasky if (err < 0) 1925331769Shselasky goto free; 1926331769Shselasky 1927331769Shselasky ib_flow += ((union ib_flow_spec *)ib_flow)->size; 1928331769Shselasky } 1929331769Shselasky 1930331769Shselasky spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 1931331769Shselasky action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 1932331769Shselasky MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 1933331769Shselasky handler->rule = mlx5_add_flow_rule(ft, spec->match_criteria_enable, 1934331769Shselasky spec->match_criteria, 1935331769Shselasky spec->match_value, 1936331769Shselasky action, 1937331769Shselasky MLX5_FS_DEFAULT_FLOW_TAG, 1938331769Shselasky dst); 1939331769Shselasky 1940331769Shselasky if (IS_ERR(handler->rule)) { 1941331769Shselasky err = PTR_ERR(handler->rule); 1942331769Shselasky goto free; 1943331769Shselasky } 1944331769Shselasky 1945331769Shselasky ft_prio->refcount++; 1946331769Shselasky handler->prio = ft_prio; 1947331769Shselasky 1948331769Shselasky ft_prio->flow_table = ft; 1949331769Shselaskyfree: 1950331769Shselasky if (err) 1951331769Shselasky kfree(handler); 1952331769Shselasky kvfree(spec); 1953331769Shselasky return err ? ERR_PTR(err) : handler; 1954331769Shselasky} 1955331769Shselasky 1956331769Shselaskystatic struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, 1957331769Shselasky struct mlx5_ib_flow_prio *ft_prio, 1958331769Shselasky struct ib_flow_attr *flow_attr, 1959331769Shselasky struct mlx5_flow_destination *dst) 1960331769Shselasky{ 1961331769Shselasky struct mlx5_ib_flow_handler *handler_dst = NULL; 1962331769Shselasky struct mlx5_ib_flow_handler *handler = NULL; 1963331769Shselasky 1964331769Shselasky handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); 1965331769Shselasky if (!IS_ERR(handler)) { 1966331769Shselasky handler_dst = create_flow_rule(dev, ft_prio, 1967331769Shselasky flow_attr, dst); 1968331769Shselasky if (IS_ERR(handler_dst)) { 1969331769Shselasky mlx5_del_flow_rule(handler->rule); 1970331769Shselasky ft_prio->refcount--; 1971331769Shselasky kfree(handler); 1972331769Shselasky handler = handler_dst; 1973331769Shselasky } else { 1974331769Shselasky list_add(&handler_dst->list, &handler->list); 1975331769Shselasky } 1976331769Shselasky } 1977331769Shselasky 1978331769Shselasky return handler; 1979331769Shselasky} 1980331769Shselaskyenum { 1981331769Shselasky LEFTOVERS_MC, 1982331769Shselasky LEFTOVERS_UC, 1983331769Shselasky}; 1984331769Shselasky 1985331769Shselaskystatic struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 1986331769Shselasky struct mlx5_ib_flow_prio *ft_prio, 1987331769Shselasky struct ib_flow_attr *flow_attr, 1988331769Shselasky struct mlx5_flow_destination *dst) 1989331769Shselasky{ 1990331769Shselasky struct mlx5_ib_flow_handler *handler_ucast = NULL; 1991331769Shselasky struct mlx5_ib_flow_handler *handler = NULL; 1992331769Shselasky 1993331769Shselasky static struct { 1994331769Shselasky struct ib_flow_attr flow_attr; 1995331769Shselasky struct ib_flow_spec_eth eth_flow; 1996331769Shselasky } leftovers_specs[] = { 1997331769Shselasky [LEFTOVERS_MC] = { 1998331769Shselasky .flow_attr = { 1999331769Shselasky .num_of_specs = 1, 2000331769Shselasky .size = sizeof(leftovers_specs[0]) 2001331769Shselasky }, 2002331769Shselasky .eth_flow = { 2003331769Shselasky .type = IB_FLOW_SPEC_ETH, 2004331769Shselasky .size = sizeof(struct ib_flow_spec_eth), 2005331769Shselasky .mask = {.dst_mac = {0x1} }, 2006331769Shselasky .val = {.dst_mac = {0x1} } 2007331769Shselasky } 2008331769Shselasky }, 2009331769Shselasky [LEFTOVERS_UC] = { 2010331769Shselasky .flow_attr = { 2011331769Shselasky .num_of_specs = 1, 2012331769Shselasky .size = sizeof(leftovers_specs[0]) 2013331769Shselasky }, 2014331769Shselasky .eth_flow = { 2015331769Shselasky .type = IB_FLOW_SPEC_ETH, 2016331769Shselasky .size = sizeof(struct ib_flow_spec_eth), 2017331769Shselasky .mask = {.dst_mac = {0x1} }, 2018331769Shselasky .val = {.dst_mac = {} } 2019331769Shselasky } 2020331769Shselasky } 2021331769Shselasky }; 2022331769Shselasky 2023331769Shselasky handler = create_flow_rule(dev, ft_prio, 2024331769Shselasky &leftovers_specs[LEFTOVERS_MC].flow_attr, 2025331769Shselasky dst); 2026331769Shselasky if (!IS_ERR(handler) && 2027331769Shselasky flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 2028331769Shselasky handler_ucast = create_flow_rule(dev, ft_prio, 2029331769Shselasky &leftovers_specs[LEFTOVERS_UC].flow_attr, 2030331769Shselasky dst); 2031331769Shselasky if (IS_ERR(handler_ucast)) { 2032331769Shselasky mlx5_del_flow_rule(handler->rule); 2033331769Shselasky ft_prio->refcount--; 2034331769Shselasky kfree(handler); 2035331769Shselasky handler = handler_ucast; 2036331769Shselasky } else { 2037331769Shselasky list_add(&handler_ucast->list, &handler->list); 2038331769Shselasky } 2039331769Shselasky } 2040331769Shselasky 2041331769Shselasky return handler; 2042331769Shselasky} 2043331769Shselasky 2044331769Shselaskystatic struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 2045331769Shselasky struct mlx5_ib_flow_prio *ft_rx, 2046331769Shselasky struct mlx5_ib_flow_prio *ft_tx, 2047331769Shselasky struct mlx5_flow_destination *dst) 2048331769Shselasky{ 2049331769Shselasky struct mlx5_ib_flow_handler *handler_rx; 2050331769Shselasky struct mlx5_ib_flow_handler *handler_tx; 2051331769Shselasky int err; 2052331769Shselasky static const struct ib_flow_attr flow_attr = { 2053331769Shselasky .num_of_specs = 0, 2054331769Shselasky .size = sizeof(flow_attr) 2055331769Shselasky }; 2056331769Shselasky 2057331769Shselasky handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 2058331769Shselasky if (IS_ERR(handler_rx)) { 2059331769Shselasky err = PTR_ERR(handler_rx); 2060331769Shselasky goto err; 2061331769Shselasky } 2062331769Shselasky 2063331769Shselasky handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 2064331769Shselasky if (IS_ERR(handler_tx)) { 2065331769Shselasky err = PTR_ERR(handler_tx); 2066331769Shselasky goto err_tx; 2067331769Shselasky } 2068331769Shselasky 2069331769Shselasky list_add(&handler_tx->list, &handler_rx->list); 2070331769Shselasky 2071331769Shselasky return handler_rx; 2072331769Shselasky 2073331769Shselaskyerr_tx: 2074331769Shselasky mlx5_del_flow_rule(handler_rx->rule); 2075331769Shselasky ft_rx->refcount--; 2076331769Shselasky kfree(handler_rx); 2077331769Shselaskyerr: 2078331769Shselasky return ERR_PTR(err); 2079331769Shselasky} 2080331769Shselasky 2081331769Shselaskystatic struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 2082331769Shselasky struct ib_flow_attr *flow_attr, 2083331769Shselasky int domain) 2084331769Shselasky{ 2085331769Shselasky struct mlx5_ib_dev *dev = to_mdev(qp->device); 2086331769Shselasky struct mlx5_ib_qp *mqp = to_mqp(qp); 2087331769Shselasky struct mlx5_ib_flow_handler *handler = NULL; 2088331769Shselasky struct mlx5_flow_destination *dst = NULL; 2089331769Shselasky struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 2090331769Shselasky struct mlx5_ib_flow_prio *ft_prio; 2091331769Shselasky int err; 2092331769Shselasky 2093331769Shselasky if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) 2094331769Shselasky return ERR_PTR(-ENOSPC); 2095331769Shselasky 2096331769Shselasky if (domain != IB_FLOW_DOMAIN_USER || 2097331769Shselasky flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) || 2098331769Shselasky (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)) 2099331769Shselasky return ERR_PTR(-EINVAL); 2100331769Shselasky 2101331769Shselasky dst = kzalloc(sizeof(*dst), GFP_KERNEL); 2102331769Shselasky if (!dst) 2103331769Shselasky return ERR_PTR(-ENOMEM); 2104331769Shselasky 2105331769Shselasky mutex_lock(&dev->flow_db.lock); 2106331769Shselasky 2107331769Shselasky ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX); 2108331769Shselasky if (IS_ERR(ft_prio)) { 2109331769Shselasky err = PTR_ERR(ft_prio); 2110331769Shselasky goto unlock; 2111331769Shselasky } 2112331769Shselasky if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 2113331769Shselasky ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 2114331769Shselasky if (IS_ERR(ft_prio_tx)) { 2115331769Shselasky err = PTR_ERR(ft_prio_tx); 2116331769Shselasky ft_prio_tx = NULL; 2117331769Shselasky goto destroy_ft; 2118331769Shselasky } 2119331769Shselasky } 2120331769Shselasky 2121331769Shselasky dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 2122331769Shselasky if (mqp->flags & MLX5_IB_QP_RSS) 2123331769Shselasky dst->tir_num = mqp->rss_qp.tirn; 2124331769Shselasky else 2125331769Shselasky dst->tir_num = mqp->raw_packet_qp.rq.tirn; 2126331769Shselasky 2127331769Shselasky if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 2128331769Shselasky if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 2129331769Shselasky handler = create_dont_trap_rule(dev, ft_prio, 2130331769Shselasky flow_attr, dst); 2131331769Shselasky } else { 2132331769Shselasky handler = create_flow_rule(dev, ft_prio, flow_attr, 2133331769Shselasky dst); 2134331769Shselasky } 2135331769Shselasky } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 2136331769Shselasky flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 2137331769Shselasky handler = create_leftovers_rule(dev, ft_prio, flow_attr, 2138331769Shselasky dst); 2139331769Shselasky } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 2140331769Shselasky handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 2141331769Shselasky } else { 2142331769Shselasky err = -EINVAL; 2143331769Shselasky goto destroy_ft; 2144331769Shselasky } 2145331769Shselasky 2146331769Shselasky if (IS_ERR(handler)) { 2147331769Shselasky err = PTR_ERR(handler); 2148331769Shselasky handler = NULL; 2149331769Shselasky goto destroy_ft; 2150331769Shselasky } 2151331769Shselasky 2152331769Shselasky mutex_unlock(&dev->flow_db.lock); 2153331769Shselasky kfree(dst); 2154331769Shselasky 2155331769Shselasky return &handler->ibflow; 2156331769Shselasky 2157331769Shselaskydestroy_ft: 2158331769Shselasky put_flow_table(dev, ft_prio, false); 2159331769Shselasky if (ft_prio_tx) 2160331769Shselasky put_flow_table(dev, ft_prio_tx, false); 2161331769Shselaskyunlock: 2162331769Shselasky mutex_unlock(&dev->flow_db.lock); 2163331769Shselasky kfree(dst); 2164331769Shselasky kfree(handler); 2165331769Shselasky return ERR_PTR(err); 2166331769Shselasky} 2167331769Shselasky 2168322810Shselaskystatic int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 2169322810Shselasky{ 2170322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2171322810Shselasky int err; 2172322810Shselasky 2173331769Shselasky err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num); 2174322810Shselasky if (err) 2175322810Shselasky mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 2176322810Shselasky ibqp->qp_num, gid->raw); 2177322810Shselasky 2178322810Shselasky return err; 2179322810Shselasky} 2180322810Shselasky 2181322810Shselaskystatic int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 2182322810Shselasky{ 2183322810Shselasky struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2184322810Shselasky int err; 2185322810Shselasky 2186331769Shselasky err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num); 2187322810Shselasky if (err) 2188322810Shselasky mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 2189322810Shselasky ibqp->qp_num, gid->raw); 2190322810Shselasky 2191322810Shselasky return err; 2192322810Shselasky} 2193322810Shselasky 2194322810Shselaskystatic int init_node_data(struct mlx5_ib_dev *dev) 2195322810Shselasky{ 2196322810Shselasky int err; 2197322810Shselasky 2198322810Shselasky err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); 2199322810Shselasky if (err) 2200322810Shselasky return err; 2201322810Shselasky 2202322810Shselasky return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); 2203322810Shselasky} 2204322810Shselasky 2205322810Shselaskystatic ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, 2206322810Shselasky char *buf) 2207322810Shselasky{ 2208322810Shselasky struct mlx5_ib_dev *dev = 2209322810Shselasky container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2210322810Shselasky 2211322810Shselasky return sprintf(buf, "%lld\n", (long long)dev->mdev->priv.fw_pages); 2212322810Shselasky} 2213322810Shselasky 2214322810Shselaskystatic ssize_t show_reg_pages(struct device *device, 2215322810Shselasky struct device_attribute *attr, char *buf) 2216322810Shselasky{ 2217322810Shselasky struct mlx5_ib_dev *dev = 2218322810Shselasky container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2219322810Shselasky 2220322810Shselasky return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); 2221322810Shselasky} 2222322810Shselasky 2223322810Shselaskystatic ssize_t show_hca(struct device *device, struct device_attribute *attr, 2224322810Shselasky char *buf) 2225322810Shselasky{ 2226322810Shselasky struct mlx5_ib_dev *dev = 2227322810Shselasky container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2228322810Shselasky return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 2229322810Shselasky} 2230322810Shselasky 2231322810Shselaskystatic ssize_t show_rev(struct device *device, struct device_attribute *attr, 2232322810Shselasky char *buf) 2233322810Shselasky{ 2234322810Shselasky struct mlx5_ib_dev *dev = 2235322810Shselasky container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2236331769Shselasky return sprintf(buf, "%x\n", dev->mdev->pdev->revision); 2237322810Shselasky} 2238322810Shselasky 2239322810Shselaskystatic ssize_t show_board(struct device *device, struct device_attribute *attr, 2240322810Shselasky char *buf) 2241322810Shselasky{ 2242322810Shselasky struct mlx5_ib_dev *dev = 2243322810Shselasky container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2244322810Shselasky return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 2245322810Shselasky dev->mdev->board_id); 2246322810Shselasky} 2247322810Shselasky 2248322810Shselaskystatic DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 2249322810Shselaskystatic DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 2250322810Shselaskystatic DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 2251322810Shselaskystatic DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); 2252322810Shselaskystatic DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL); 2253322810Shselasky 2254322810Shselaskystatic struct device_attribute *mlx5_class_attributes[] = { 2255322810Shselasky &dev_attr_hw_rev, 2256322810Shselasky &dev_attr_hca_type, 2257322810Shselasky &dev_attr_board_id, 2258322810Shselasky &dev_attr_fw_pages, 2259322810Shselasky &dev_attr_reg_pages, 2260322810Shselasky}; 2261322810Shselasky 2262331769Shselaskystatic void pkey_change_handler(struct work_struct *work) 2263331769Shselasky{ 2264331769Shselasky struct mlx5_ib_port_resources *ports = 2265331769Shselasky container_of(work, struct mlx5_ib_port_resources, 2266331769Shselasky pkey_change_work); 2267331769Shselasky 2268331769Shselasky mutex_lock(&ports->devr->mutex); 2269331769Shselasky mlx5_ib_gsi_pkey_change(ports->gsi); 2270331769Shselasky mutex_unlock(&ports->devr->mutex); 2271331769Shselasky} 2272331769Shselasky 2273322810Shselaskystatic void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) 2274322810Shselasky{ 2275322810Shselasky struct mlx5_ib_qp *mqp; 2276322810Shselasky struct mlx5_ib_cq *send_mcq, *recv_mcq; 2277322810Shselasky struct mlx5_core_cq *mcq; 2278322810Shselasky struct list_head cq_armed_list; 2279322810Shselasky unsigned long flags_qp; 2280322810Shselasky unsigned long flags_cq; 2281322810Shselasky unsigned long flags; 2282322810Shselasky 2283322810Shselasky INIT_LIST_HEAD(&cq_armed_list); 2284322810Shselasky 2285322810Shselasky /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 2286322810Shselasky spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 2287322810Shselasky list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 2288322810Shselasky spin_lock_irqsave(&mqp->sq.lock, flags_qp); 2289322810Shselasky if (mqp->sq.tail != mqp->sq.head) { 2290322810Shselasky send_mcq = to_mcq(mqp->ibqp.send_cq); 2291322810Shselasky spin_lock_irqsave(&send_mcq->lock, flags_cq); 2292322810Shselasky if (send_mcq->mcq.comp && 2293322810Shselasky mqp->ibqp.send_cq->comp_handler) { 2294322810Shselasky if (!send_mcq->mcq.reset_notify_added) { 2295322810Shselasky send_mcq->mcq.reset_notify_added = 1; 2296322810Shselasky list_add_tail(&send_mcq->mcq.reset_notify, 2297322810Shselasky &cq_armed_list); 2298322810Shselasky } 2299322810Shselasky } 2300322810Shselasky spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 2301322810Shselasky } 2302322810Shselasky spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 2303322810Shselasky spin_lock_irqsave(&mqp->rq.lock, flags_qp); 2304322810Shselasky /* no handling is needed for SRQ */ 2305322810Shselasky if (!mqp->ibqp.srq) { 2306322810Shselasky if (mqp->rq.tail != mqp->rq.head) { 2307322810Shselasky recv_mcq = to_mcq(mqp->ibqp.recv_cq); 2308322810Shselasky spin_lock_irqsave(&recv_mcq->lock, flags_cq); 2309322810Shselasky if (recv_mcq->mcq.comp && 2310322810Shselasky mqp->ibqp.recv_cq->comp_handler) { 2311322810Shselasky if (!recv_mcq->mcq.reset_notify_added) { 2312322810Shselasky recv_mcq->mcq.reset_notify_added = 1; 2313322810Shselasky list_add_tail(&recv_mcq->mcq.reset_notify, 2314322810Shselasky &cq_armed_list); 2315322810Shselasky } 2316322810Shselasky } 2317322810Shselasky spin_unlock_irqrestore(&recv_mcq->lock, 2318322810Shselasky flags_cq); 2319322810Shselasky } 2320322810Shselasky } 2321322810Shselasky spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 2322322810Shselasky } 2323322810Shselasky /*At that point all inflight post send were put to be executed as of we 2324322810Shselasky * lock/unlock above locks Now need to arm all involved CQs. 2325322810Shselasky */ 2326322810Shselasky list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 2327322810Shselasky mcq->comp(mcq); 2328322810Shselasky } 2329322810Shselasky spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 2330322810Shselasky} 2331322810Shselasky 2332322810Shselaskystatic void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, 2333322810Shselasky enum mlx5_dev_event event, unsigned long param) 2334322810Shselasky{ 2335322810Shselasky struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; 2336322810Shselasky struct ib_event ibev; 2337331769Shselasky bool fatal = false; 2338341922Shselasky u8 port = (u8)param; 2339322810Shselasky 2340322810Shselasky switch (event) { 2341322810Shselasky case MLX5_DEV_EVENT_SYS_ERROR: 2342322810Shselasky ibev.event = IB_EVENT_DEVICE_FATAL; 2343322810Shselasky mlx5_ib_handle_internal_error(ibdev); 2344331769Shselasky fatal = true; 2345322810Shselasky break; 2346322810Shselasky 2347322810Shselasky case MLX5_DEV_EVENT_PORT_UP: 2348322810Shselasky case MLX5_DEV_EVENT_PORT_DOWN: 2349322810Shselasky case MLX5_DEV_EVENT_PORT_INITIALIZED: 2350331769Shselasky /* In RoCE, port up/down events are handled in 2351331769Shselasky * mlx5_netdev_event(). 2352331769Shselasky */ 2353331769Shselasky if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 2354331769Shselasky IB_LINK_LAYER_ETHERNET) 2355331769Shselasky return; 2356331769Shselasky 2357331769Shselasky ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ? 2358331769Shselasky IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 2359322810Shselasky break; 2360322810Shselasky 2361322810Shselasky case MLX5_DEV_EVENT_LID_CHANGE: 2362322810Shselasky ibev.event = IB_EVENT_LID_CHANGE; 2363322810Shselasky break; 2364322810Shselasky 2365322810Shselasky case MLX5_DEV_EVENT_PKEY_CHANGE: 2366322810Shselasky ibev.event = IB_EVENT_PKEY_CHANGE; 2367331769Shselasky 2368331769Shselasky schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 2369322810Shselasky break; 2370322810Shselasky 2371322810Shselasky case MLX5_DEV_EVENT_GUID_CHANGE: 2372322810Shselasky ibev.event = IB_EVENT_GID_CHANGE; 2373322810Shselasky break; 2374322810Shselasky 2375322810Shselasky case MLX5_DEV_EVENT_CLIENT_REREG: 2376322810Shselasky ibev.event = IB_EVENT_CLIENT_REREGISTER; 2377322810Shselasky break; 2378322810Shselasky 2379322810Shselasky default: 2380337100Shselasky /* unsupported event */ 2381337100Shselasky return; 2382322810Shselasky } 2383322810Shselasky 2384322810Shselasky ibev.device = &ibdev->ib_dev; 2385322810Shselasky ibev.element.port_num = port; 2386322810Shselasky 2387341922Shselasky if (!rdma_is_port_valid(&ibdev->ib_dev, port)) { 2388337100Shselasky mlx5_ib_warn(ibdev, "warning: event(%d) on port %d\n", event, port); 2389322810Shselasky return; 2390322810Shselasky } 2391322810Shselasky 2392322810Shselasky if (ibdev->ib_active) 2393322810Shselasky ib_dispatch_event(&ibev); 2394331769Shselasky 2395331769Shselasky if (fatal) 2396331769Shselasky ibdev->ib_active = false; 2397322810Shselasky} 2398322810Shselasky 2399322810Shselaskystatic void get_ext_port_caps(struct mlx5_ib_dev *dev) 2400322810Shselasky{ 2401322810Shselasky int port; 2402322810Shselasky 2403322810Shselasky for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) 2404322810Shselasky mlx5_query_ext_port_caps(dev, port); 2405322810Shselasky} 2406322810Shselasky 2407322810Shselaskystatic int get_port_caps(struct mlx5_ib_dev *dev) 2408322810Shselasky{ 2409322810Shselasky struct ib_device_attr *dprops = NULL; 2410322810Shselasky struct ib_port_attr *pprops = NULL; 2411322810Shselasky int err = -ENOMEM; 2412322810Shselasky int port; 2413331769Shselasky struct ib_udata uhw = {.inlen = 0, .outlen = 0}; 2414322810Shselasky 2415322810Shselasky pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); 2416322810Shselasky if (!pprops) 2417322810Shselasky goto out; 2418322810Shselasky 2419322810Shselasky dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); 2420322810Shselasky if (!dprops) 2421322810Shselasky goto out; 2422322810Shselasky 2423331769Shselasky err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); 2424322810Shselasky if (err) { 2425322810Shselasky mlx5_ib_warn(dev, "query_device failed %d\n", err); 2426322810Shselasky goto out; 2427322810Shselasky } 2428322810Shselasky 2429322810Shselasky for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) { 2430322810Shselasky err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 2431322810Shselasky if (err) { 2432322810Shselasky mlx5_ib_warn(dev, "query_port %d failed %d\n", 2433322810Shselasky port, err); 2434322810Shselasky break; 2435322810Shselasky } 2436331769Shselasky dev->mdev->port_caps[port - 1].pkey_table_len = 2437331769Shselasky dprops->max_pkeys; 2438331769Shselasky dev->mdev->port_caps[port - 1].gid_table_len = 2439331769Shselasky pprops->gid_tbl_len; 2440322810Shselasky mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 2441322810Shselasky dprops->max_pkeys, pprops->gid_tbl_len); 2442322810Shselasky } 2443322810Shselasky 2444322810Shselaskyout: 2445322810Shselasky kfree(pprops); 2446322810Shselasky kfree(dprops); 2447322810Shselasky 2448322810Shselasky return err; 2449322810Shselasky} 2450322810Shselasky 2451322810Shselaskystatic void destroy_umrc_res(struct mlx5_ib_dev *dev) 2452322810Shselasky{ 2453322810Shselasky int err; 2454322810Shselasky 2455322810Shselasky err = mlx5_mr_cache_cleanup(dev); 2456322810Shselasky if (err) 2457322810Shselasky mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 2458322810Shselasky 2459331769Shselasky mlx5_ib_destroy_qp(dev->umrc.qp); 2460331769Shselasky ib_free_cq(dev->umrc.cq); 2461322810Shselasky ib_dealloc_pd(dev->umrc.pd); 2462322810Shselasky} 2463322810Shselasky 2464322810Shselaskyenum { 2465322810Shselasky MAX_UMR_WR = 128, 2466322810Shselasky}; 2467322810Shselasky 2468322810Shselaskystatic int create_umr_res(struct mlx5_ib_dev *dev) 2469322810Shselasky{ 2470331769Shselasky struct ib_qp_init_attr *init_attr = NULL; 2471331769Shselasky struct ib_qp_attr *attr = NULL; 2472322810Shselasky struct ib_pd *pd; 2473331769Shselasky struct ib_cq *cq; 2474331769Shselasky struct ib_qp *qp; 2475322810Shselasky int ret; 2476322810Shselasky 2477331769Shselasky attr = kzalloc(sizeof(*attr), GFP_KERNEL); 2478331769Shselasky init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 2479331769Shselasky if (!attr || !init_attr) { 2480331769Shselasky ret = -ENOMEM; 2481331769Shselasky goto error_0; 2482331769Shselasky } 2483331769Shselasky 2484331769Shselasky pd = ib_alloc_pd(&dev->ib_dev, 0); 2485322810Shselasky if (IS_ERR(pd)) { 2486322810Shselasky mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 2487322810Shselasky ret = PTR_ERR(pd); 2488322810Shselasky goto error_0; 2489322810Shselasky } 2490322810Shselasky 2491331769Shselasky cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); 2492331769Shselasky if (IS_ERR(cq)) { 2493331769Shselasky mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 2494331769Shselasky ret = PTR_ERR(cq); 2495331769Shselasky goto error_2; 2496322810Shselasky } 2497322810Shselasky 2498331769Shselasky init_attr->send_cq = cq; 2499331769Shselasky init_attr->recv_cq = cq; 2500331769Shselasky init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 2501331769Shselasky init_attr->cap.max_send_wr = MAX_UMR_WR; 2502331769Shselasky init_attr->cap.max_send_sge = 1; 2503331769Shselasky init_attr->qp_type = MLX5_IB_QPT_REG_UMR; 2504331769Shselasky init_attr->port_num = 1; 2505331769Shselasky qp = mlx5_ib_create_qp(pd, init_attr, NULL); 2506331769Shselasky if (IS_ERR(qp)) { 2507331769Shselasky mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 2508331769Shselasky ret = PTR_ERR(qp); 2509331769Shselasky goto error_3; 2510331769Shselasky } 2511331769Shselasky qp->device = &dev->ib_dev; 2512331769Shselasky qp->real_qp = qp; 2513331769Shselasky qp->uobject = NULL; 2514331769Shselasky qp->qp_type = MLX5_IB_QPT_REG_UMR; 2515331769Shselasky 2516331769Shselasky attr->qp_state = IB_QPS_INIT; 2517331769Shselasky attr->port_num = 1; 2518331769Shselasky ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | 2519331769Shselasky IB_QP_PORT, NULL); 2520331769Shselasky if (ret) { 2521331769Shselasky mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 2522331769Shselasky goto error_4; 2523331769Shselasky } 2524331769Shselasky 2525331769Shselasky memset(attr, 0, sizeof(*attr)); 2526331769Shselasky attr->qp_state = IB_QPS_RTR; 2527331769Shselasky attr->path_mtu = IB_MTU_256; 2528331769Shselasky 2529331769Shselasky ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 2530331769Shselasky if (ret) { 2531331769Shselasky mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 2532331769Shselasky goto error_4; 2533331769Shselasky } 2534331769Shselasky 2535331769Shselasky memset(attr, 0, sizeof(*attr)); 2536331769Shselasky attr->qp_state = IB_QPS_RTS; 2537331769Shselasky ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 2538331769Shselasky if (ret) { 2539331769Shselasky mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 2540331769Shselasky goto error_4; 2541331769Shselasky } 2542331769Shselasky 2543331769Shselasky dev->umrc.qp = qp; 2544331769Shselasky dev->umrc.cq = cq; 2545322810Shselasky dev->umrc.pd = pd; 2546322810Shselasky 2547331769Shselasky sema_init(&dev->umrc.sem, MAX_UMR_WR); 2548322810Shselasky ret = mlx5_mr_cache_init(dev); 2549322810Shselasky if (ret) { 2550322810Shselasky mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); 2551322810Shselasky goto error_4; 2552322810Shselasky } 2553322810Shselasky 2554331769Shselasky kfree(attr); 2555331769Shselasky kfree(init_attr); 2556331769Shselasky 2557322810Shselasky return 0; 2558322810Shselasky 2559322810Shselaskyerror_4: 2560331769Shselasky mlx5_ib_destroy_qp(qp); 2561331769Shselasky 2562331769Shselaskyerror_3: 2563331769Shselasky ib_free_cq(cq); 2564331769Shselasky 2565331769Shselaskyerror_2: 2566322810Shselasky ib_dealloc_pd(pd); 2567331769Shselasky 2568322810Shselaskyerror_0: 2569331769Shselasky kfree(attr); 2570331769Shselasky kfree(init_attr); 2571322810Shselasky return ret; 2572322810Shselasky} 2573322810Shselasky 2574322810Shselaskystatic int create_dev_resources(struct mlx5_ib_resources *devr) 2575322810Shselasky{ 2576322810Shselasky struct ib_srq_init_attr attr; 2577322810Shselasky struct mlx5_ib_dev *dev; 2578331769Shselasky struct ib_cq_init_attr cq_attr = {.cqe = 1}; 2579331769Shselasky int port; 2580322810Shselasky int ret = 0; 2581322810Shselasky 2582322810Shselasky dev = container_of(devr, struct mlx5_ib_dev, devr); 2583322810Shselasky 2584331769Shselasky mutex_init(&devr->mutex); 2585331769Shselasky 2586322810Shselasky devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); 2587322810Shselasky if (IS_ERR(devr->p0)) { 2588322810Shselasky ret = PTR_ERR(devr->p0); 2589322810Shselasky goto error0; 2590322810Shselasky } 2591322810Shselasky devr->p0->device = &dev->ib_dev; 2592322810Shselasky devr->p0->uobject = NULL; 2593322810Shselasky atomic_set(&devr->p0->usecnt, 0); 2594322810Shselasky 2595322810Shselasky devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); 2596322810Shselasky if (IS_ERR(devr->c0)) { 2597322810Shselasky ret = PTR_ERR(devr->c0); 2598322810Shselasky goto error1; 2599322810Shselasky } 2600322810Shselasky devr->c0->device = &dev->ib_dev; 2601322810Shselasky devr->c0->uobject = NULL; 2602322810Shselasky devr->c0->comp_handler = NULL; 2603322810Shselasky devr->c0->event_handler = NULL; 2604322810Shselasky devr->c0->cq_context = NULL; 2605322810Shselasky atomic_set(&devr->c0->usecnt, 0); 2606322810Shselasky 2607322810Shselasky devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 2608322810Shselasky if (IS_ERR(devr->x0)) { 2609322810Shselasky ret = PTR_ERR(devr->x0); 2610322810Shselasky goto error2; 2611322810Shselasky } 2612322810Shselasky devr->x0->device = &dev->ib_dev; 2613322810Shselasky devr->x0->inode = NULL; 2614322810Shselasky atomic_set(&devr->x0->usecnt, 0); 2615322810Shselasky mutex_init(&devr->x0->tgt_qp_mutex); 2616322810Shselasky INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 2617322810Shselasky 2618322810Shselasky devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 2619322810Shselasky if (IS_ERR(devr->x1)) { 2620322810Shselasky ret = PTR_ERR(devr->x1); 2621322810Shselasky goto error3; 2622322810Shselasky } 2623322810Shselasky devr->x1->device = &dev->ib_dev; 2624322810Shselasky devr->x1->inode = NULL; 2625322810Shselasky atomic_set(&devr->x1->usecnt, 0); 2626322810Shselasky mutex_init(&devr->x1->tgt_qp_mutex); 2627322810Shselasky INIT_LIST_HEAD(&devr->x1->tgt_qp_list); 2628322810Shselasky 2629322810Shselasky memset(&attr, 0, sizeof(attr)); 2630322810Shselasky attr.attr.max_sge = 1; 2631322810Shselasky attr.attr.max_wr = 1; 2632322810Shselasky attr.srq_type = IB_SRQT_XRC; 2633322810Shselasky attr.ext.xrc.cq = devr->c0; 2634322810Shselasky attr.ext.xrc.xrcd = devr->x0; 2635322810Shselasky 2636322810Shselasky devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 2637322810Shselasky if (IS_ERR(devr->s0)) { 2638322810Shselasky ret = PTR_ERR(devr->s0); 2639322810Shselasky goto error4; 2640322810Shselasky } 2641322810Shselasky devr->s0->device = &dev->ib_dev; 2642322810Shselasky devr->s0->pd = devr->p0; 2643322810Shselasky devr->s0->uobject = NULL; 2644322810Shselasky devr->s0->event_handler = NULL; 2645322810Shselasky devr->s0->srq_context = NULL; 2646322810Shselasky devr->s0->srq_type = IB_SRQT_XRC; 2647331769Shselasky devr->s0->ext.xrc.xrcd = devr->x0; 2648322810Shselasky devr->s0->ext.xrc.cq = devr->c0; 2649322810Shselasky atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); 2650322810Shselasky atomic_inc(&devr->s0->ext.xrc.cq->usecnt); 2651322810Shselasky atomic_inc(&devr->p0->usecnt); 2652322810Shselasky atomic_set(&devr->s0->usecnt, 0); 2653322810Shselasky 2654322810Shselasky memset(&attr, 0, sizeof(attr)); 2655322810Shselasky attr.attr.max_sge = 1; 2656322810Shselasky attr.attr.max_wr = 1; 2657322810Shselasky attr.srq_type = IB_SRQT_BASIC; 2658322810Shselasky devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 2659322810Shselasky if (IS_ERR(devr->s1)) { 2660322810Shselasky ret = PTR_ERR(devr->s1); 2661322810Shselasky goto error5; 2662322810Shselasky } 2663322810Shselasky devr->s1->device = &dev->ib_dev; 2664322810Shselasky devr->s1->pd = devr->p0; 2665322810Shselasky devr->s1->uobject = NULL; 2666322810Shselasky devr->s1->event_handler = NULL; 2667322810Shselasky devr->s1->srq_context = NULL; 2668322810Shselasky devr->s1->srq_type = IB_SRQT_BASIC; 2669322810Shselasky devr->s1->ext.xrc.cq = devr->c0; 2670322810Shselasky atomic_inc(&devr->p0->usecnt); 2671331769Shselasky atomic_set(&devr->s0->usecnt, 0); 2672322810Shselasky 2673331769Shselasky for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { 2674331769Shselasky INIT_WORK(&devr->ports[port].pkey_change_work, 2675331769Shselasky pkey_change_handler); 2676331769Shselasky devr->ports[port].devr = devr; 2677331769Shselasky } 2678331769Shselasky 2679322810Shselasky return 0; 2680322810Shselasky 2681322810Shselaskyerror5: 2682322810Shselasky mlx5_ib_destroy_srq(devr->s0); 2683322810Shselaskyerror4: 2684322810Shselasky mlx5_ib_dealloc_xrcd(devr->x1); 2685322810Shselaskyerror3: 2686322810Shselasky mlx5_ib_dealloc_xrcd(devr->x0); 2687322810Shselaskyerror2: 2688322810Shselasky mlx5_ib_destroy_cq(devr->c0); 2689322810Shselaskyerror1: 2690322810Shselasky mlx5_ib_dealloc_pd(devr->p0); 2691322810Shselaskyerror0: 2692322810Shselasky return ret; 2693322810Shselasky} 2694322810Shselasky 2695322810Shselaskystatic void destroy_dev_resources(struct mlx5_ib_resources *devr) 2696322810Shselasky{ 2697331769Shselasky struct mlx5_ib_dev *dev = 2698331769Shselasky container_of(devr, struct mlx5_ib_dev, devr); 2699331769Shselasky int port; 2700331769Shselasky 2701322810Shselasky mlx5_ib_destroy_srq(devr->s1); 2702322810Shselasky mlx5_ib_destroy_srq(devr->s0); 2703322810Shselasky mlx5_ib_dealloc_xrcd(devr->x0); 2704322810Shselasky mlx5_ib_dealloc_xrcd(devr->x1); 2705322810Shselasky mlx5_ib_destroy_cq(devr->c0); 2706322810Shselasky mlx5_ib_dealloc_pd(devr->p0); 2707331769Shselasky 2708331769Shselasky /* Make sure no change P_Key work items are still executing */ 2709331769Shselasky for (port = 0; port < dev->num_ports; ++port) 2710331769Shselasky cancel_work_sync(&devr->ports[port].pkey_change_work); 2711322810Shselasky} 2712322810Shselasky 2713325604Shselaskystatic u32 get_core_cap_flags(struct ib_device *ibdev) 2714325604Shselasky{ 2715325604Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 2716325604Shselasky enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); 2717325604Shselasky u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); 2718325604Shselasky u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); 2719325604Shselasky u32 ret = 0; 2720325604Shselasky 2721325604Shselasky if (ll == IB_LINK_LAYER_INFINIBAND) 2722325604Shselasky return RDMA_CORE_PORT_IBA_IB; 2723325604Shselasky 2724325604Shselasky if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) 2725331769Shselasky return 0; 2726325604Shselasky 2727325604Shselasky if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) 2728331769Shselasky return 0; 2729325604Shselasky 2730325604Shselasky if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) 2731325604Shselasky ret |= RDMA_CORE_PORT_IBA_ROCE; 2732325604Shselasky 2733325604Shselasky if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) 2734325604Shselasky ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 2735325604Shselasky 2736325604Shselasky return ret; 2737325604Shselasky} 2738325604Shselasky 2739325604Shselaskystatic int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 2740325604Shselasky struct ib_port_immutable *immutable) 2741325604Shselasky{ 2742325604Shselasky struct ib_port_attr attr; 2743325604Shselasky int err; 2744325604Shselasky 2745331769Shselasky err = mlx5_ib_query_port(ibdev, port_num, &attr); 2746325604Shselasky if (err) 2747325604Shselasky return err; 2748325604Shselasky 2749325604Shselasky immutable->pkey_tbl_len = attr.pkey_tbl_len; 2750325604Shselasky immutable->gid_tbl_len = attr.gid_tbl_len; 2751325604Shselasky immutable->core_cap_flags = get_core_cap_flags(ibdev); 2752331769Shselasky immutable->max_mad_size = IB_MGMT_MAD_SIZE; 2753325604Shselasky 2754325604Shselasky return 0; 2755325604Shselasky} 2756325604Shselasky 2757331769Shselaskystatic void get_dev_fw_str(struct ib_device *ibdev, char *str, 2758331769Shselasky size_t str_len) 2759322810Shselasky{ 2760331769Shselasky struct mlx5_ib_dev *dev = 2761331769Shselasky container_of(ibdev, struct mlx5_ib_dev, ib_dev); 2762331769Shselasky snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev), 2763331769Shselasky fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 2764331769Shselasky} 2765322810Shselasky 2766331769Shselaskystatic int mlx5_roce_lag_init(struct mlx5_ib_dev *dev) 2767331769Shselasky{ 2768331769Shselasky return 0; 2769331769Shselasky} 2770322810Shselasky 2771331769Shselaskystatic void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev) 2772331769Shselasky{ 2773331769Shselasky} 2774322810Shselasky 2775331769Shselaskystatic void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev) 2776331769Shselasky{ 2777331769Shselasky if (dev->roce.nb.notifier_call) { 2778331769Shselasky unregister_netdevice_notifier(&dev->roce.nb); 2779331769Shselasky dev->roce.nb.notifier_call = NULL; 2780322810Shselasky } 2781322810Shselasky} 2782322810Shselasky 2783331769Shselaskystatic int mlx5_enable_roce(struct mlx5_ib_dev *dev) 2784322810Shselasky{ 2785331769Shselasky VNET_ITERATOR_DECL(vnet_iter); 2786331769Shselasky struct net_device *idev; 2787322810Shselasky int err; 2788322810Shselasky 2789331769Shselasky /* Check if mlx5en net device already exists */ 2790331769Shselasky VNET_LIST_RLOCK(); 2791331769Shselasky VNET_FOREACH(vnet_iter) { 2792331769Shselasky IFNET_RLOCK(); 2793331769Shselasky CURVNET_SET_QUIET(vnet_iter); 2794331769Shselasky TAILQ_FOREACH(idev, &V_ifnet, if_link) { 2795331769Shselasky /* check if network interface belongs to mlx5en */ 2796331769Shselasky if (!mlx5_netdev_match(idev, dev->mdev, "mce")) 2797331769Shselasky continue; 2798331769Shselasky write_lock(&dev->roce.netdev_lock); 2799331769Shselasky dev->roce.netdev = idev; 2800331769Shselasky write_unlock(&dev->roce.netdev_lock); 2801331769Shselasky } 2802331769Shselasky CURVNET_RESTORE(); 2803331769Shselasky IFNET_RUNLOCK(); 2804331769Shselasky } 2805331769Shselasky VNET_LIST_RUNLOCK(); 2806322810Shselasky 2807331769Shselasky dev->roce.nb.notifier_call = mlx5_netdev_event; 2808331769Shselasky err = register_netdevice_notifier(&dev->roce.nb); 2809322810Shselasky if (err) { 2810331769Shselasky dev->roce.nb.notifier_call = NULL; 2811331769Shselasky return err; 2812322810Shselasky } 2813322810Shselasky 2814331769Shselasky err = mlx5_nic_vport_enable_roce(dev->mdev); 2815331769Shselasky if (err) 2816331769Shselasky goto err_unregister_netdevice_notifier; 2817322810Shselasky 2818331769Shselasky err = mlx5_roce_lag_init(dev); 2819331769Shselasky if (err) 2820331769Shselasky goto err_disable_roce; 2821322810Shselasky 2822331769Shselasky return 0; 2823322810Shselasky 2824331769Shselaskyerr_disable_roce: 2825331769Shselasky mlx5_nic_vport_disable_roce(dev->mdev); 2826322810Shselasky 2827331769Shselaskyerr_unregister_netdevice_notifier: 2828331769Shselasky mlx5_remove_roce_notifier(dev); 2829331769Shselasky return err; 2830322810Shselasky} 2831322810Shselasky 2832331769Shselaskystatic void mlx5_disable_roce(struct mlx5_ib_dev *dev) 2833322810Shselasky{ 2834331769Shselasky mlx5_roce_lag_cleanup(dev); 2835331769Shselasky mlx5_nic_vport_disable_roce(dev->mdev); 2836322810Shselasky} 2837322810Shselasky 2838322810Shselaskystatic void mlx5_ib_dealloc_q_port_counter(struct mlx5_ib_dev *dev, u8 port_num) 2839322810Shselasky{ 2840322810Shselasky mlx5_vport_dealloc_q_counter(dev->mdev, 2841322810Shselasky MLX5_INTERFACE_PROTOCOL_IB, 2842322810Shselasky dev->port[port_num].q_cnt_id); 2843322810Shselasky dev->port[port_num].q_cnt_id = 0; 2844322810Shselasky} 2845322810Shselasky 2846322810Shselaskystatic void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) 2847322810Shselasky{ 2848322810Shselasky unsigned int i; 2849322810Shselasky 2850322810Shselasky for (i = 0; i < dev->num_ports; i++) 2851322810Shselasky mlx5_ib_dealloc_q_port_counter(dev, i); 2852322810Shselasky} 2853322810Shselasky 2854322810Shselaskystatic int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev) 2855322810Shselasky{ 2856322810Shselasky int i; 2857322810Shselasky int ret; 2858322810Shselasky 2859322810Shselasky for (i = 0; i < dev->num_ports; i++) { 2860322810Shselasky ret = mlx5_vport_alloc_q_counter(dev->mdev, 2861322810Shselasky MLX5_INTERFACE_PROTOCOL_IB, 2862322810Shselasky &dev->port[i].q_cnt_id); 2863322810Shselasky if (ret) { 2864322810Shselasky mlx5_ib_warn(dev, 2865331769Shselasky "couldn't allocate queue counter for port %d, err %d\n", 2866331769Shselasky i + 1, ret); 2867322810Shselasky goto dealloc_counters; 2868322810Shselasky } 2869322810Shselasky } 2870322810Shselasky 2871322810Shselasky return 0; 2872322810Shselasky 2873322810Shselaskydealloc_counters: 2874322810Shselasky while (--i >= 0) 2875322810Shselasky mlx5_ib_dealloc_q_port_counter(dev, i); 2876322810Shselasky 2877322810Shselasky return ret; 2878322810Shselasky} 2879322810Shselasky 2880331769Shselaskystatic const char * const names[] = { 2881331769Shselasky "rx_write_requests", 2882331769Shselasky "rx_read_requests", 2883331769Shselasky "rx_atomic_requests", 2884331769Shselasky "out_of_buffer", 2885331769Shselasky "out_of_sequence", 2886331769Shselasky "duplicate_request", 2887331769Shselasky "rnr_nak_retry_err", 2888331769Shselasky "packet_seq_err", 2889331769Shselasky "implied_nak_seq_err", 2890331769Shselasky "local_ack_timeout_err", 2891322810Shselasky}; 2892322810Shselasky 2893331769Shselaskystatic const size_t stats_offsets[] = { 2894331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests), 2895331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests), 2896331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests), 2897331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer), 2898331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence), 2899331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, duplicate_request), 2900331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err), 2901331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err), 2902331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err), 2903331769Shselasky MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err), 2904322810Shselasky}; 2905322810Shselasky 2906331769Shselaskystatic struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 2907331769Shselasky u8 port_num) 2908322810Shselasky{ 2909331769Shselasky BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets)); 2910322810Shselasky 2911331769Shselasky /* We support only per port stats */ 2912331769Shselasky if (port_num == 0) 2913331769Shselasky return NULL; 2914322810Shselasky 2915331769Shselasky return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names), 2916331769Shselasky RDMA_HW_STATS_DEFAULT_LIFESPAN); 2917322810Shselasky} 2918322810Shselasky 2919331769Shselaskystatic int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 2920331769Shselasky struct rdma_hw_stats *stats, 2921331769Shselasky u8 port, int index) 2922322810Shselasky{ 2923331769Shselasky struct mlx5_ib_dev *dev = to_mdev(ibdev); 2924322810Shselasky int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); 2925322810Shselasky void *out; 2926331769Shselasky __be32 val; 2927322810Shselasky int ret; 2928331769Shselasky int i; 2929322810Shselasky 2930331769Shselasky if (!port || !stats) 2931331769Shselasky return -ENOSYS; 2932331769Shselasky 2933322810Shselasky out = mlx5_vzalloc(outlen); 2934322810Shselasky if (!out) 2935322810Shselasky return -ENOMEM; 2936322810Shselasky 2937331769Shselasky ret = mlx5_vport_query_q_counter(dev->mdev, 2938331769Shselasky dev->port[port - 1].q_cnt_id, 0, 2939331769Shselasky out, outlen); 2940322810Shselasky if (ret) 2941322810Shselasky goto free; 2942322810Shselasky 2943331769Shselasky for (i = 0; i < ARRAY_SIZE(names); i++) { 2944331769Shselasky val = *(__be32 *)(out + stats_offsets[i]); 2945331769Shselasky stats->value[i] = (u64)be32_to_cpu(val); 2946331769Shselasky } 2947322810Shselaskyfree: 2948331769Shselasky kvfree(out); 2949331769Shselasky return ARRAY_SIZE(names); 2950322810Shselasky} 2951322810Shselasky 2952322810Shselaskystatic void *mlx5_ib_add(struct mlx5_core_dev *mdev) 2953322810Shselasky{ 2954322810Shselasky struct mlx5_ib_dev *dev; 2955331769Shselasky enum rdma_link_layer ll; 2956331769Shselasky int port_type_cap; 2957322810Shselasky int err; 2958322810Shselasky int i; 2959322810Shselasky 2960331769Shselasky port_type_cap = MLX5_CAP_GEN(mdev, port_type); 2961331769Shselasky ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 2962331769Shselasky 2963331769Shselasky if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce)) 2964331769Shselasky return NULL; 2965331769Shselasky 2966322810Shselasky dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); 2967322810Shselasky if (!dev) 2968322810Shselasky return NULL; 2969322810Shselasky 2970322810Shselasky dev->mdev = mdev; 2971322810Shselasky 2972322810Shselasky dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port), 2973331769Shselasky GFP_KERNEL); 2974322810Shselasky if (!dev->port) 2975322810Shselasky goto err_dealloc; 2976322810Shselasky 2977331769Shselasky rwlock_init(&dev->roce.netdev_lock); 2978322810Shselasky err = get_port_caps(dev); 2979322810Shselasky if (err) 2980322810Shselasky goto err_free_port; 2981322810Shselasky 2982322810Shselasky if (mlx5_use_mad_ifc(dev)) 2983322810Shselasky get_ext_port_caps(dev); 2984322810Shselasky 2985322810Shselasky MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); 2986322810Shselasky 2987337099Shselasky snprintf(dev->ib_dev.name, IB_DEVICE_NAME_MAX, "mlx5_%d", device_get_unit(mdev->pdev->dev.bsddev)); 2988322810Shselasky dev->ib_dev.owner = THIS_MODULE; 2989322810Shselasky dev->ib_dev.node_type = RDMA_NODE_IB_CA; 2990331769Shselasky dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; 2991322810Shselasky dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); 2992322810Shselasky dev->ib_dev.phys_port_cnt = dev->num_ports; 2993322810Shselasky dev->ib_dev.num_comp_vectors = 2994322810Shselasky dev->mdev->priv.eq_table.num_comp_vectors; 2995322810Shselasky dev->ib_dev.dma_device = &mdev->pdev->dev; 2996322810Shselasky 2997322810Shselasky dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; 2998322810Shselasky dev->ib_dev.uverbs_cmd_mask = 2999322810Shselasky (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 3000322810Shselasky (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 3001322810Shselasky (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 3002322810Shselasky (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 3003322810Shselasky (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 3004331784Shselasky (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 3005331784Shselasky (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 3006322810Shselasky (1ull << IB_USER_VERBS_CMD_REG_MR) | 3007331769Shselasky (1ull << IB_USER_VERBS_CMD_REREG_MR) | 3008322810Shselasky (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 3009322810Shselasky (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 3010322810Shselasky (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 3011322810Shselasky (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 3012322810Shselasky (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 3013322810Shselasky (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 3014322810Shselasky (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 3015322810Shselasky (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 3016322810Shselasky (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 3017322810Shselasky (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 3018322810Shselasky (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 3019322810Shselasky (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 3020322810Shselasky (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 3021322810Shselasky (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 3022322810Shselasky (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 3023322810Shselasky (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 3024322810Shselasky (1ull << IB_USER_VERBS_CMD_OPEN_QP); 3025331769Shselasky dev->ib_dev.uverbs_ex_cmd_mask = 3026331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 3027331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 3028331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); 3029322810Shselasky 3030322810Shselasky dev->ib_dev.query_device = mlx5_ib_query_device; 3031322810Shselasky dev->ib_dev.query_port = mlx5_ib_query_port; 3032322810Shselasky dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer; 3033331769Shselasky if (ll == IB_LINK_LAYER_ETHERNET) 3034331769Shselasky dev->ib_dev.get_netdev = mlx5_ib_get_netdev; 3035322810Shselasky dev->ib_dev.query_gid = mlx5_ib_query_gid; 3036331769Shselasky dev->ib_dev.add_gid = mlx5_ib_add_gid; 3037331769Shselasky dev->ib_dev.del_gid = mlx5_ib_del_gid; 3038322810Shselasky dev->ib_dev.query_pkey = mlx5_ib_query_pkey; 3039322810Shselasky dev->ib_dev.modify_device = mlx5_ib_modify_device; 3040322810Shselasky dev->ib_dev.modify_port = mlx5_ib_modify_port; 3041322810Shselasky dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; 3042322810Shselasky dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; 3043322810Shselasky dev->ib_dev.mmap = mlx5_ib_mmap; 3044322810Shselasky dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; 3045322810Shselasky dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; 3046322810Shselasky dev->ib_dev.create_ah = mlx5_ib_create_ah; 3047322810Shselasky dev->ib_dev.query_ah = mlx5_ib_query_ah; 3048322810Shselasky dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; 3049322810Shselasky dev->ib_dev.create_srq = mlx5_ib_create_srq; 3050322810Shselasky dev->ib_dev.modify_srq = mlx5_ib_modify_srq; 3051322810Shselasky dev->ib_dev.query_srq = mlx5_ib_query_srq; 3052322810Shselasky dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; 3053322810Shselasky dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; 3054322810Shselasky dev->ib_dev.create_qp = mlx5_ib_create_qp; 3055322810Shselasky dev->ib_dev.modify_qp = mlx5_ib_modify_qp; 3056322810Shselasky dev->ib_dev.query_qp = mlx5_ib_query_qp; 3057322810Shselasky dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; 3058322810Shselasky dev->ib_dev.post_send = mlx5_ib_post_send; 3059322810Shselasky dev->ib_dev.post_recv = mlx5_ib_post_recv; 3060322810Shselasky dev->ib_dev.create_cq = mlx5_ib_create_cq; 3061322810Shselasky dev->ib_dev.modify_cq = mlx5_ib_modify_cq; 3062322810Shselasky dev->ib_dev.resize_cq = mlx5_ib_resize_cq; 3063322810Shselasky dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; 3064322810Shselasky dev->ib_dev.poll_cq = mlx5_ib_poll_cq; 3065322810Shselasky dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; 3066322810Shselasky dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; 3067322810Shselasky dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; 3068331769Shselasky dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr; 3069322810Shselasky dev->ib_dev.reg_phys_mr = mlx5_ib_reg_phys_mr; 3070322810Shselasky dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; 3071322810Shselasky dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; 3072322810Shselasky dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; 3073322810Shselasky dev->ib_dev.process_mad = mlx5_ib_process_mad; 3074331769Shselasky dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; 3075331769Shselasky dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; 3076331769Shselasky dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 3077325604Shselasky dev->ib_dev.get_port_immutable = mlx5_port_immutable; 3078331769Shselasky dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 3079331769Shselasky if (mlx5_core_is_pf(mdev)) { 3080331769Shselasky dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 3081331769Shselasky dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 3082331769Shselasky dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; 3083331769Shselasky dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid; 3084331769Shselasky } 3085322810Shselasky 3086331769Shselasky mlx5_ib_internal_fill_odp_caps(dev); 3087331769Shselasky 3088331769Shselasky if (MLX5_CAP_GEN(mdev, imaicl)) { 3089331769Shselasky dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; 3090331769Shselasky dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; 3091331769Shselasky dev->ib_dev.uverbs_cmd_mask |= 3092331769Shselasky (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 3093331769Shselasky (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 3094331769Shselasky } 3095331769Shselasky 3096331769Shselasky if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) && 3097331769Shselasky MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { 3098331769Shselasky dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; 3099331769Shselasky dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats; 3100331769Shselasky } 3101331769Shselasky 3102322810Shselasky if (MLX5_CAP_GEN(mdev, xrc)) { 3103322810Shselasky dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; 3104322810Shselasky dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; 3105322810Shselasky dev->ib_dev.uverbs_cmd_mask |= 3106322810Shselasky (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 3107322810Shselasky (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 3108322810Shselasky } 3109322810Shselasky 3110331769Shselasky if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) == 3111331769Shselasky IB_LINK_LAYER_ETHERNET) { 3112331769Shselasky dev->ib_dev.create_flow = mlx5_ib_create_flow; 3113331769Shselasky dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; 3114331769Shselasky dev->ib_dev.create_wq = mlx5_ib_create_wq; 3115331769Shselasky dev->ib_dev.modify_wq = mlx5_ib_modify_wq; 3116331769Shselasky dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; 3117331769Shselasky dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; 3118331769Shselasky dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; 3119331769Shselasky dev->ib_dev.uverbs_ex_cmd_mask |= 3120331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 3121331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) | 3122331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 3123331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 3124331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 3125331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 3126331769Shselasky (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 3127331769Shselasky } 3128322810Shselasky err = init_node_data(dev); 3129322810Shselasky if (err) 3130331769Shselasky goto err_free_port; 3131322810Shselasky 3132331769Shselasky mutex_init(&dev->flow_db.lock); 3133322810Shselasky mutex_init(&dev->cap_mask_mutex); 3134322810Shselasky INIT_LIST_HEAD(&dev->qp_list); 3135322810Shselasky spin_lock_init(&dev->reset_flow_resource_lock); 3136322810Shselasky 3137331769Shselasky if (ll == IB_LINK_LAYER_ETHERNET) { 3138331769Shselasky err = mlx5_enable_roce(dev); 3139331769Shselasky if (err) 3140331769Shselasky goto err_free_port; 3141331769Shselasky } 3142331769Shselasky 3143322810Shselasky err = create_dev_resources(&dev->devr); 3144322810Shselasky if (err) 3145322810Shselasky goto err_disable_roce; 3146322810Shselasky 3147331769Shselasky err = mlx5_ib_odp_init_one(dev); 3148331769Shselasky if (err) 3149331769Shselasky goto err_rsrc; 3150322810Shselasky 3151322810Shselasky err = mlx5_ib_alloc_q_counters(dev); 3152322810Shselasky if (err) 3153322810Shselasky goto err_odp; 3154322810Shselasky 3155322810Shselasky err = ib_register_device(&dev->ib_dev, NULL); 3156322810Shselasky if (err) 3157322810Shselasky goto err_q_cnt; 3158322810Shselasky 3159322810Shselasky err = create_umr_res(dev); 3160322810Shselasky if (err) 3161322810Shselasky goto err_dev; 3162322810Shselasky 3163322810Shselasky for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { 3164322810Shselasky err = device_create_file(&dev->ib_dev.dev, 3165322810Shselasky mlx5_class_attributes[i]); 3166322810Shselasky if (err) 3167331769Shselasky goto err_umrc; 3168322810Shselasky } 3169322810Shselasky 3170331808Shselasky err = mlx5_ib_init_congestion(dev); 3171331808Shselasky if (err) 3172331808Shselasky goto err_umrc; 3173331808Shselasky 3174322810Shselasky dev->ib_active = true; 3175322810Shselasky 3176322810Shselasky return dev; 3177322810Shselasky 3178331769Shselaskyerr_umrc: 3179322810Shselasky destroy_umrc_res(dev); 3180322810Shselasky 3181322810Shselaskyerr_dev: 3182322810Shselasky ib_unregister_device(&dev->ib_dev); 3183322810Shselasky 3184322810Shselaskyerr_q_cnt: 3185322810Shselasky mlx5_ib_dealloc_q_counters(dev); 3186322810Shselasky 3187322810Shselaskyerr_odp: 3188331769Shselasky mlx5_ib_odp_remove_one(dev); 3189331769Shselasky 3190331769Shselaskyerr_rsrc: 3191322810Shselasky destroy_dev_resources(&dev->devr); 3192322810Shselasky 3193322810Shselaskyerr_disable_roce: 3194331769Shselasky if (ll == IB_LINK_LAYER_ETHERNET) { 3195331769Shselasky mlx5_disable_roce(dev); 3196331769Shselasky mlx5_remove_roce_notifier(dev); 3197331769Shselasky } 3198331769Shselasky 3199322810Shselaskyerr_free_port: 3200322810Shselasky kfree(dev->port); 3201322810Shselasky 3202322810Shselaskyerr_dealloc: 3203322810Shselasky ib_dealloc_device((struct ib_device *)dev); 3204322810Shselasky 3205322810Shselasky return NULL; 3206322810Shselasky} 3207322810Shselasky 3208322810Shselaskystatic void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 3209322810Shselasky{ 3210322810Shselasky struct mlx5_ib_dev *dev = context; 3211331769Shselasky enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1); 3212322810Shselasky 3213331808Shselasky mlx5_ib_cleanup_congestion(dev); 3214331769Shselasky mlx5_remove_roce_notifier(dev); 3215331769Shselasky ib_unregister_device(&dev->ib_dev); 3216322810Shselasky mlx5_ib_dealloc_q_counters(dev); 3217322810Shselasky destroy_umrc_res(dev); 3218331769Shselasky mlx5_ib_odp_remove_one(dev); 3219322810Shselasky destroy_dev_resources(&dev->devr); 3220331769Shselasky if (ll == IB_LINK_LAYER_ETHERNET) 3221331769Shselasky mlx5_disable_roce(dev); 3222322810Shselasky kfree(dev->port); 3223322810Shselasky ib_dealloc_device(&dev->ib_dev); 3224322810Shselasky} 3225322810Shselasky 3226322810Shselaskystatic struct mlx5_interface mlx5_ib_interface = { 3227322810Shselasky .add = mlx5_ib_add, 3228322810Shselasky .remove = mlx5_ib_remove, 3229322810Shselasky .event = mlx5_ib_event, 3230322810Shselasky .protocol = MLX5_INTERFACE_PROTOCOL_IB, 3231322810Shselasky}; 3232322810Shselasky 3233322810Shselaskystatic int __init mlx5_ib_init(void) 3234322810Shselasky{ 3235322810Shselasky int err; 3236322810Shselasky 3237322810Shselasky if (deprecated_prof_sel != 2) 3238331769Shselasky pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n"); 3239322810Shselasky 3240331769Shselasky err = mlx5_ib_odp_init(); 3241331769Shselasky if (err) 3242331769Shselasky return err; 3243331769Shselasky 3244322810Shselasky err = mlx5_register_interface(&mlx5_ib_interface); 3245322810Shselasky if (err) 3246322810Shselasky goto clean_odp; 3247322810Shselasky 3248322810Shselasky return err; 3249322810Shselasky 3250322810Shselaskyclean_odp: 3251331769Shselasky mlx5_ib_odp_cleanup(); 3252322810Shselasky return err; 3253322810Shselasky} 3254322810Shselasky 3255322810Shselaskystatic void __exit mlx5_ib_cleanup(void) 3256322810Shselasky{ 3257322810Shselasky mlx5_unregister_interface(&mlx5_ib_interface); 3258331769Shselasky mlx5_ib_odp_cleanup(); 3259322810Shselasky} 3260322810Shselasky 3261341948Shselaskystatic void 3262341948Shselaskymlx5_ib_show_version(void __unused *arg) 3263341948Shselasky{ 3264341948Shselasky 3265341948Shselasky printf("%s", mlx5_version); 3266341948Shselasky} 3267341948ShselaskySYSINIT(mlx5_ib_show_version, SI_SUB_DRIVERS, SI_ORDER_ANY, mlx5_ib_show_version, NULL); 3268341948Shselasky 3269322810Shselaskymodule_init_order(mlx5_ib_init, SI_ORDER_THIRD); 3270322810Shselaskymodule_exit_order(mlx5_ib_cleanup, SI_ORDER_THIRD); 3271