mlx5_ib_main.c revision 337099
1/*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 337099 2018-08-02 08:37:44Z hselasky $ 26 */ 27 28#include <linux/module.h> 29#include <linux/errno.h> 30#include <linux/pci.h> 31#include <linux/dma-mapping.h> 32#include <linux/slab.h> 33#if defined(CONFIG_X86) 34#include <asm/pat.h> 35#endif 36#include <linux/sched.h> 37#include <linux/delay.h> 38#include <linux/fs.h> 39#undef inode 40#include <rdma/ib_user_verbs.h> 41#include <rdma/ib_addr.h> 42#include <rdma/ib_cache.h> 43#include <dev/mlx5/port.h> 44#include <dev/mlx5/vport.h> 45#include <linux/list.h> 46#include <rdma/ib_smi.h> 47#include <rdma/ib_umem.h> 48#include <linux/in.h> 49#include <linux/etherdevice.h> 50#include <dev/mlx5/fs.h> 51#include "mlx5_ib.h" 52 53#define DRIVER_NAME "mlx5_ib" 54#define DRIVER_VERSION "3.4.1-BETA" 55#define DRIVER_RELDATE "October 2017" 56 57MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 58MODULE_LICENSE("Dual BSD/GPL"); 59MODULE_DEPEND(mlx5ib, linuxkpi, 1, 1, 1); 60MODULE_DEPEND(mlx5ib, mlx5, 1, 1, 1); 61MODULE_DEPEND(mlx5ib, ibcore, 1, 1, 1); 62MODULE_VERSION(mlx5ib, 1); 63 64static int deprecated_prof_sel = 2; 65module_param_named(prof_sel, deprecated_prof_sel, int, 0444); 66MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core"); 67 68static char mlx5_version[] = 69 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 70 DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 71 72enum { 73 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 74}; 75 76static enum rdma_link_layer 77mlx5_port_type_cap_to_rdma_ll(int port_type_cap) 78{ 79 switch (port_type_cap) { 80 case MLX5_CAP_PORT_TYPE_IB: 81 return IB_LINK_LAYER_INFINIBAND; 82 case MLX5_CAP_PORT_TYPE_ETH: 83 return IB_LINK_LAYER_ETHERNET; 84 default: 85 return IB_LINK_LAYER_UNSPECIFIED; 86 } 87} 88 89static enum rdma_link_layer 90mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num) 91{ 92 struct mlx5_ib_dev *dev = to_mdev(device); 93 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 94 95 return mlx5_port_type_cap_to_rdma_ll(port_type_cap); 96} 97 98static bool mlx5_netdev_match(struct net_device *ndev, 99 struct mlx5_core_dev *mdev, 100 const char *dname) 101{ 102 return ndev->if_type == IFT_ETHER && 103 ndev->if_dname != NULL && 104 strcmp(ndev->if_dname, dname) == 0 && 105 ndev->if_softc != NULL && 106 *(struct mlx5_core_dev **)ndev->if_softc == mdev; 107} 108 109static int mlx5_netdev_event(struct notifier_block *this, 110 unsigned long event, void *ptr) 111{ 112 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 113 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev, 114 roce.nb); 115 116 switch (event) { 117 case NETDEV_REGISTER: 118 case NETDEV_UNREGISTER: 119 write_lock(&ibdev->roce.netdev_lock); 120 /* check if network interface belongs to mlx5en */ 121 if (mlx5_netdev_match(ndev, ibdev->mdev, "mce")) 122 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? 123 NULL : ndev; 124 write_unlock(&ibdev->roce.netdev_lock); 125 break; 126 127 case NETDEV_UP: 128 case NETDEV_DOWN: { 129 struct net_device *upper = NULL; 130 131 if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev)) 132 && ibdev->ib_active) { 133 struct ib_event ibev = {0}; 134 135 ibev.device = &ibdev->ib_dev; 136 ibev.event = (event == NETDEV_UP) ? 137 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 138 ibev.element.port_num = 1; 139 ib_dispatch_event(&ibev); 140 } 141 break; 142 } 143 144 default: 145 break; 146 } 147 148 return NOTIFY_DONE; 149} 150 151static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, 152 u8 port_num) 153{ 154 struct mlx5_ib_dev *ibdev = to_mdev(device); 155 struct net_device *ndev; 156 157 /* Ensure ndev does not disappear before we invoke dev_hold() 158 */ 159 read_lock(&ibdev->roce.netdev_lock); 160 ndev = ibdev->roce.netdev; 161 if (ndev) 162 dev_hold(ndev); 163 read_unlock(&ibdev->roce.netdev_lock); 164 165 return ndev; 166} 167 168static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, 169 u8 *active_width) 170{ 171 switch (eth_proto_oper) { 172 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): 173 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX): 174 case MLX5E_PROT_MASK(MLX5E_100BASE_TX): 175 case MLX5E_PROT_MASK(MLX5E_1000BASE_T): 176 *active_width = IB_WIDTH_1X; 177 *active_speed = IB_SPEED_SDR; 178 break; 179 case MLX5E_PROT_MASK(MLX5E_10GBASE_T): 180 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4): 181 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4): 182 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR): 183 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR): 184 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR): 185 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER): 186 *active_width = IB_WIDTH_1X; 187 *active_speed = IB_SPEED_QDR; 188 break; 189 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR): 190 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR): 191 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR): 192 *active_width = IB_WIDTH_1X; 193 *active_speed = IB_SPEED_EDR; 194 break; 195 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4): 196 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4): 197 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4): 198 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4): 199 *active_width = IB_WIDTH_4X; 200 *active_speed = IB_SPEED_QDR; 201 break; 202 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2): 203 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2): 204 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2): 205 *active_width = IB_WIDTH_1X; 206 *active_speed = IB_SPEED_HDR; 207 break; 208 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4): 209 *active_width = IB_WIDTH_4X; 210 *active_speed = IB_SPEED_FDR; 211 break; 212 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4): 213 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4): 214 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4): 215 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4): 216 *active_width = IB_WIDTH_4X; 217 *active_speed = IB_SPEED_EDR; 218 break; 219 default: 220 return -EINVAL; 221 } 222 223 return 0; 224} 225 226static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, 227 struct ib_port_attr *props) 228{ 229 struct mlx5_ib_dev *dev = to_mdev(device); 230 struct net_device *ndev; 231 enum ib_mtu ndev_ib_mtu; 232 u16 qkey_viol_cntr; 233 u32 eth_prot_oper; 234 int err; 235 236 memset(props, 0, sizeof(*props)); 237 238 /* Possible bad flows are checked before filling out props so in case 239 * of an error it will still be zeroed out. 240 */ 241 err = mlx5_query_port_eth_proto_oper(dev->mdev, ð_prot_oper, port_num); 242 if (err) 243 return err; 244 245 translate_eth_proto_oper(eth_prot_oper, &props->active_speed, 246 &props->active_width); 247 248 props->port_cap_flags |= IB_PORT_CM_SUP; 249 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS; 250 251 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, 252 roce_address_table_size); 253 props->max_mtu = IB_MTU_4096; 254 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); 255 props->pkey_tbl_len = 1; 256 props->state = IB_PORT_DOWN; 257 props->phys_state = 3; 258 259 mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr); 260 props->qkey_viol_cntr = qkey_viol_cntr; 261 262 ndev = mlx5_ib_get_netdev(device, port_num); 263 if (!ndev) 264 return 0; 265 266 if (netif_running(ndev) && netif_carrier_ok(ndev)) { 267 props->state = IB_PORT_ACTIVE; 268 props->phys_state = 5; 269 } 270 271 ndev_ib_mtu = iboe_get_mtu(ndev->if_mtu); 272 273 dev_put(ndev); 274 275 props->active_mtu = min(props->max_mtu, ndev_ib_mtu); 276 return 0; 277} 278 279static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, 280 const struct ib_gid_attr *attr, 281 void *mlx5_addr) 282{ 283#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) 284 char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 285 source_l3_address); 286 void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 287 source_mac_47_32); 288 u16 vlan_id; 289 290 if (!gid) 291 return; 292 ether_addr_copy(mlx5_addr_mac, IF_LLADDR(attr->ndev)); 293 294 vlan_id = rdma_vlan_dev_vlan_id(attr->ndev); 295 if (vlan_id != 0xffff) { 296 MLX5_SET_RA(mlx5_addr, vlan_valid, 1); 297 MLX5_SET_RA(mlx5_addr, vlan_id, vlan_id); 298 } 299 300 switch (attr->gid_type) { 301 case IB_GID_TYPE_IB: 302 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); 303 break; 304 case IB_GID_TYPE_ROCE_UDP_ENCAP: 305 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); 306 break; 307 308 default: 309 WARN_ON(true); 310 } 311 312 if (attr->gid_type != IB_GID_TYPE_IB) { 313 if (ipv6_addr_v4mapped((void *)gid)) 314 MLX5_SET_RA(mlx5_addr, roce_l3_type, 315 MLX5_ROCE_L3_TYPE_IPV4); 316 else 317 MLX5_SET_RA(mlx5_addr, roce_l3_type, 318 MLX5_ROCE_L3_TYPE_IPV6); 319 } 320 321 if ((attr->gid_type == IB_GID_TYPE_IB) || 322 !ipv6_addr_v4mapped((void *)gid)) 323 memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid)); 324 else 325 memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4); 326} 327 328static int set_roce_addr(struct ib_device *device, u8 port_num, 329 unsigned int index, 330 const union ib_gid *gid, 331 const struct ib_gid_attr *attr) 332{ 333 struct mlx5_ib_dev *dev = to_mdev(device); 334 u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; 335 u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; 336 void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); 337 enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num); 338 339 if (ll != IB_LINK_LAYER_ETHERNET) 340 return -EINVAL; 341 342 ib_gid_to_mlx5_roce_addr(gid, attr, in_addr); 343 344 MLX5_SET(set_roce_address_in, in, roce_address_index, index); 345 MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); 346 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); 347} 348 349static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, 350 unsigned int index, const union ib_gid *gid, 351 const struct ib_gid_attr *attr, 352 __always_unused void **context) 353{ 354 return set_roce_addr(device, port_num, index, gid, attr); 355} 356 357static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, 358 unsigned int index, __always_unused void **context) 359{ 360 return set_roce_addr(device, port_num, index, NULL, NULL); 361} 362 363__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, 364 int index) 365{ 366 struct ib_gid_attr attr; 367 union ib_gid gid; 368 369 if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr)) 370 return 0; 371 372 if (!attr.ndev) 373 return 0; 374 375 dev_put(attr.ndev); 376 377 if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 378 return 0; 379 380 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); 381} 382 383int mlx5_get_roce_gid_type(struct mlx5_ib_dev *dev, u8 port_num, 384 int index, enum ib_gid_type *gid_type) 385{ 386 struct ib_gid_attr attr; 387 union ib_gid gid; 388 int ret; 389 390 ret = ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr); 391 if (ret) 392 return ret; 393 394 if (!attr.ndev) 395 return -ENODEV; 396 397 dev_put(attr.ndev); 398 399 *gid_type = attr.gid_type; 400 401 return 0; 402} 403 404static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 405{ 406 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) 407 return !MLX5_CAP_GEN(dev->mdev, ib_virt); 408 return 0; 409} 410 411enum { 412 MLX5_VPORT_ACCESS_METHOD_MAD, 413 MLX5_VPORT_ACCESS_METHOD_HCA, 414 MLX5_VPORT_ACCESS_METHOD_NIC, 415}; 416 417static int mlx5_get_vport_access_method(struct ib_device *ibdev) 418{ 419 if (mlx5_use_mad_ifc(to_mdev(ibdev))) 420 return MLX5_VPORT_ACCESS_METHOD_MAD; 421 422 if (mlx5_ib_port_link_layer(ibdev, 1) == 423 IB_LINK_LAYER_ETHERNET) 424 return MLX5_VPORT_ACCESS_METHOD_NIC; 425 426 return MLX5_VPORT_ACCESS_METHOD_HCA; 427} 428 429static void get_atomic_caps(struct mlx5_ib_dev *dev, 430 struct ib_device_attr *props) 431{ 432 u8 tmp; 433 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 434 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 435 u8 atomic_req_8B_endianness_mode = 436 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode); 437 438 /* Check if HW supports 8 bytes standard atomic operations and capable 439 * of host endianness respond 440 */ 441 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; 442 if (((atomic_operations & tmp) == tmp) && 443 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && 444 (atomic_req_8B_endianness_mode)) { 445 props->atomic_cap = IB_ATOMIC_HCA; 446 } else { 447 props->atomic_cap = IB_ATOMIC_NONE; 448 } 449} 450 451static int mlx5_query_system_image_guid(struct ib_device *ibdev, 452 __be64 *sys_image_guid) 453{ 454 struct mlx5_ib_dev *dev = to_mdev(ibdev); 455 struct mlx5_core_dev *mdev = dev->mdev; 456 u64 tmp; 457 int err; 458 459 switch (mlx5_get_vport_access_method(ibdev)) { 460 case MLX5_VPORT_ACCESS_METHOD_MAD: 461 return mlx5_query_mad_ifc_system_image_guid(ibdev, 462 sys_image_guid); 463 464 case MLX5_VPORT_ACCESS_METHOD_HCA: 465 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 466 break; 467 468 case MLX5_VPORT_ACCESS_METHOD_NIC: 469 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); 470 break; 471 472 default: 473 return -EINVAL; 474 } 475 476 if (!err) 477 *sys_image_guid = cpu_to_be64(tmp); 478 479 return err; 480 481} 482 483static int mlx5_query_max_pkeys(struct ib_device *ibdev, 484 u16 *max_pkeys) 485{ 486 struct mlx5_ib_dev *dev = to_mdev(ibdev); 487 struct mlx5_core_dev *mdev = dev->mdev; 488 489 switch (mlx5_get_vport_access_method(ibdev)) { 490 case MLX5_VPORT_ACCESS_METHOD_MAD: 491 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); 492 493 case MLX5_VPORT_ACCESS_METHOD_HCA: 494 case MLX5_VPORT_ACCESS_METHOD_NIC: 495 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, 496 pkey_table_size)); 497 return 0; 498 499 default: 500 return -EINVAL; 501 } 502} 503 504static int mlx5_query_vendor_id(struct ib_device *ibdev, 505 u32 *vendor_id) 506{ 507 struct mlx5_ib_dev *dev = to_mdev(ibdev); 508 509 switch (mlx5_get_vport_access_method(ibdev)) { 510 case MLX5_VPORT_ACCESS_METHOD_MAD: 511 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); 512 513 case MLX5_VPORT_ACCESS_METHOD_HCA: 514 case MLX5_VPORT_ACCESS_METHOD_NIC: 515 return mlx5_core_query_vendor_id(dev->mdev, vendor_id); 516 517 default: 518 return -EINVAL; 519 } 520} 521 522static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, 523 __be64 *node_guid) 524{ 525 u64 tmp; 526 int err; 527 528 switch (mlx5_get_vport_access_method(&dev->ib_dev)) { 529 case MLX5_VPORT_ACCESS_METHOD_MAD: 530 return mlx5_query_mad_ifc_node_guid(dev, node_guid); 531 532 case MLX5_VPORT_ACCESS_METHOD_HCA: 533 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); 534 break; 535 536 case MLX5_VPORT_ACCESS_METHOD_NIC: 537 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); 538 break; 539 540 default: 541 return -EINVAL; 542 } 543 544 if (!err) 545 *node_guid = cpu_to_be64(tmp); 546 547 return err; 548} 549 550struct mlx5_reg_node_desc { 551 u8 desc[IB_DEVICE_NODE_DESC_MAX]; 552}; 553 554static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 555{ 556 struct mlx5_reg_node_desc in; 557 558 if (mlx5_use_mad_ifc(dev)) 559 return mlx5_query_mad_ifc_node_desc(dev, node_desc); 560 561 memset(&in, 0, sizeof(in)); 562 563 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, 564 sizeof(struct mlx5_reg_node_desc), 565 MLX5_REG_NODE_DESC, 0, 0); 566} 567 568static int mlx5_ib_query_device(struct ib_device *ibdev, 569 struct ib_device_attr *props, 570 struct ib_udata *uhw) 571{ 572 struct mlx5_ib_dev *dev = to_mdev(ibdev); 573 struct mlx5_core_dev *mdev = dev->mdev; 574 int err = -ENOMEM; 575 int max_rq_sg; 576 int max_sq_sg; 577 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 578 struct mlx5_ib_query_device_resp resp = {}; 579 size_t resp_len; 580 u64 max_tso; 581 582 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); 583 if (uhw->outlen && uhw->outlen < resp_len) 584 return -EINVAL; 585 else 586 resp.response_length = resp_len; 587 588 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) 589 return -EINVAL; 590 591 memset(props, 0, sizeof(*props)); 592 err = mlx5_query_system_image_guid(ibdev, 593 &props->sys_image_guid); 594 if (err) 595 return err; 596 597 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); 598 if (err) 599 return err; 600 601 err = mlx5_query_vendor_id(ibdev, &props->vendor_id); 602 if (err) 603 return err; 604 605 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 606 (fw_rev_min(dev->mdev) << 16) | 607 fw_rev_sub(dev->mdev); 608 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 609 IB_DEVICE_PORT_ACTIVE_EVENT | 610 IB_DEVICE_SYS_IMAGE_GUID | 611 IB_DEVICE_RC_RNR_NAK_GEN; 612 613 if (MLX5_CAP_GEN(mdev, pkv)) 614 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 615 if (MLX5_CAP_GEN(mdev, qkv)) 616 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 617 if (MLX5_CAP_GEN(mdev, apm)) 618 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 619 if (MLX5_CAP_GEN(mdev, xrc)) 620 props->device_cap_flags |= IB_DEVICE_XRC; 621 if (MLX5_CAP_GEN(mdev, imaicl)) { 622 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | 623 IB_DEVICE_MEM_WINDOW_TYPE_2B; 624 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 625 /* We support 'Gappy' memory registration too */ 626 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; 627 } 628 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 629 if (MLX5_CAP_GEN(mdev, sho)) { 630 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; 631 /* At this stage no support for signature handover */ 632 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 633 IB_PROT_T10DIF_TYPE_2 | 634 IB_PROT_T10DIF_TYPE_3; 635 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 636 IB_GUARD_T10DIF_CSUM; 637 } 638 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 639 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 640 641 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) { 642 if (MLX5_CAP_ETH(mdev, csum_cap)) 643 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 644 645 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { 646 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); 647 if (max_tso) { 648 resp.tso_caps.max_tso = 1 << max_tso; 649 resp.tso_caps.supported_qpts |= 650 1 << IB_QPT_RAW_PACKET; 651 resp.response_length += sizeof(resp.tso_caps); 652 } 653 } 654 655 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { 656 resp.rss_caps.rx_hash_function = 657 MLX5_RX_HASH_FUNC_TOEPLITZ; 658 resp.rss_caps.rx_hash_fields_mask = 659 MLX5_RX_HASH_SRC_IPV4 | 660 MLX5_RX_HASH_DST_IPV4 | 661 MLX5_RX_HASH_SRC_IPV6 | 662 MLX5_RX_HASH_DST_IPV6 | 663 MLX5_RX_HASH_SRC_PORT_TCP | 664 MLX5_RX_HASH_DST_PORT_TCP | 665 MLX5_RX_HASH_SRC_PORT_UDP | 666 MLX5_RX_HASH_DST_PORT_UDP; 667 resp.response_length += sizeof(resp.rss_caps); 668 } 669 } else { 670 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) 671 resp.response_length += sizeof(resp.tso_caps); 672 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) 673 resp.response_length += sizeof(resp.rss_caps); 674 } 675 676 if (MLX5_CAP_GEN(mdev, ipoib_ipoib_offloads)) { 677 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 678 props->device_cap_flags |= IB_DEVICE_UD_TSO; 679 } 680 681 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 682 MLX5_CAP_ETH(dev->mdev, scatter_fcs)) 683 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; 684 685 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) 686 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 687 688 props->vendor_part_id = mdev->pdev->device; 689 props->hw_ver = mdev->pdev->revision; 690 691 props->max_mr_size = ~0ull; 692 props->page_size_cap = ~(min_page_size - 1); 693 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); 694 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 695 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / 696 sizeof(struct mlx5_wqe_data_seg); 697 max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) - 698 sizeof(struct mlx5_wqe_ctrl_seg)) / 699 sizeof(struct mlx5_wqe_data_seg); 700 props->max_sge = min(max_rq_sg, max_sq_sg); 701 props->max_sge_rd = MLX5_MAX_SGE_RD; 702 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 703 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 704 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 705 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); 706 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); 707 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); 708 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); 709 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; 710 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); 711 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 712 props->max_srq_sge = max_rq_sg - 1; 713 props->max_fast_reg_page_list_len = 714 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); 715 get_atomic_caps(dev, props); 716 props->masked_atomic_cap = IB_ATOMIC_NONE; 717 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); 718 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); 719 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 720 props->max_mcast_grp; 721 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 722 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); 723 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 724 725#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 726 if (MLX5_CAP_GEN(mdev, pg)) 727 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 728 props->odp_caps = dev->odp_caps; 729#endif 730 731 if (MLX5_CAP_GEN(mdev, cd)) 732 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 733 734 if (!mlx5_core_is_pf(mdev)) 735 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 736 737 if (mlx5_ib_port_link_layer(ibdev, 1) == 738 IB_LINK_LAYER_ETHERNET) { 739 props->rss_caps.max_rwq_indirection_tables = 740 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); 741 props->rss_caps.max_rwq_indirection_table_size = 742 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); 743 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; 744 props->max_wq_type_rq = 745 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); 746 } 747 748 if (uhw->outlen) { 749 err = ib_copy_to_udata(uhw, &resp, resp.response_length); 750 751 if (err) 752 return err; 753 } 754 755 return 0; 756} 757 758enum mlx5_ib_width { 759 MLX5_IB_WIDTH_1X = 1 << 0, 760 MLX5_IB_WIDTH_2X = 1 << 1, 761 MLX5_IB_WIDTH_4X = 1 << 2, 762 MLX5_IB_WIDTH_8X = 1 << 3, 763 MLX5_IB_WIDTH_12X = 1 << 4 764}; 765 766static int translate_active_width(struct ib_device *ibdev, u8 active_width, 767 u8 *ib_width) 768{ 769 struct mlx5_ib_dev *dev = to_mdev(ibdev); 770 int err = 0; 771 772 if (active_width & MLX5_IB_WIDTH_1X) { 773 *ib_width = IB_WIDTH_1X; 774 } else if (active_width & MLX5_IB_WIDTH_2X) { 775 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", 776 (int)active_width); 777 err = -EINVAL; 778 } else if (active_width & MLX5_IB_WIDTH_4X) { 779 *ib_width = IB_WIDTH_4X; 780 } else if (active_width & MLX5_IB_WIDTH_8X) { 781 *ib_width = IB_WIDTH_8X; 782 } else if (active_width & MLX5_IB_WIDTH_12X) { 783 *ib_width = IB_WIDTH_12X; 784 } else { 785 mlx5_ib_dbg(dev, "Invalid active_width %d\n", 786 (int)active_width); 787 err = -EINVAL; 788 } 789 790 return err; 791} 792 793enum ib_max_vl_num { 794 __IB_MAX_VL_0 = 1, 795 __IB_MAX_VL_0_1 = 2, 796 __IB_MAX_VL_0_3 = 3, 797 __IB_MAX_VL_0_7 = 4, 798 __IB_MAX_VL_0_14 = 5, 799}; 800 801enum mlx5_vl_hw_cap { 802 MLX5_VL_HW_0 = 1, 803 MLX5_VL_HW_0_1 = 2, 804 MLX5_VL_HW_0_2 = 3, 805 MLX5_VL_HW_0_3 = 4, 806 MLX5_VL_HW_0_4 = 5, 807 MLX5_VL_HW_0_5 = 6, 808 MLX5_VL_HW_0_6 = 7, 809 MLX5_VL_HW_0_7 = 8, 810 MLX5_VL_HW_0_14 = 15 811}; 812 813static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, 814 u8 *max_vl_num) 815{ 816 switch (vl_hw_cap) { 817 case MLX5_VL_HW_0: 818 *max_vl_num = __IB_MAX_VL_0; 819 break; 820 case MLX5_VL_HW_0_1: 821 *max_vl_num = __IB_MAX_VL_0_1; 822 break; 823 case MLX5_VL_HW_0_3: 824 *max_vl_num = __IB_MAX_VL_0_3; 825 break; 826 case MLX5_VL_HW_0_7: 827 *max_vl_num = __IB_MAX_VL_0_7; 828 break; 829 case MLX5_VL_HW_0_14: 830 *max_vl_num = __IB_MAX_VL_0_14; 831 break; 832 833 default: 834 return -EINVAL; 835 } 836 837 return 0; 838} 839 840static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 841 struct ib_port_attr *props) 842{ 843 struct mlx5_ib_dev *dev = to_mdev(ibdev); 844 struct mlx5_core_dev *mdev = dev->mdev; 845 u32 *rep; 846 int replen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); 847 struct mlx5_ptys_reg *ptys; 848 struct mlx5_pmtu_reg *pmtu; 849 struct mlx5_pvlc_reg pvlc; 850 void *ctx; 851 int err; 852 853 rep = mlx5_vzalloc(replen); 854 ptys = kzalloc(sizeof(*ptys), GFP_KERNEL); 855 pmtu = kzalloc(sizeof(*pmtu), GFP_KERNEL); 856 if (!rep || !ptys || !pmtu) { 857 err = -ENOMEM; 858 goto out; 859 } 860 861 memset(props, 0, sizeof(*props)); 862 863 err = mlx5_query_hca_vport_context(mdev, port, 0, rep, replen); 864 if (err) 865 goto out; 866 867 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, rep, hca_vport_context); 868 869 props->lid = MLX5_GET(hca_vport_context, ctx, lid); 870 props->lmc = MLX5_GET(hca_vport_context, ctx, lmc); 871 props->sm_lid = MLX5_GET(hca_vport_context, ctx, sm_lid); 872 props->sm_sl = MLX5_GET(hca_vport_context, ctx, sm_sl); 873 props->state = MLX5_GET(hca_vport_context, ctx, vport_state); 874 props->phys_state = MLX5_GET(hca_vport_context, ctx, 875 port_physical_state); 876 props->port_cap_flags = MLX5_GET(hca_vport_context, ctx, cap_mask1); 877 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); 878 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 879 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); 880 props->bad_pkey_cntr = MLX5_GET(hca_vport_context, ctx, 881 pkey_violation_counter); 882 props->qkey_viol_cntr = MLX5_GET(hca_vport_context, ctx, 883 qkey_violation_counter); 884 props->subnet_timeout = MLX5_GET(hca_vport_context, ctx, 885 subnet_timeout); 886 props->init_type_reply = MLX5_GET(hca_vport_context, ctx, 887 init_type_reply); 888 props->grh_required = MLX5_GET(hca_vport_context, ctx, grh_required); 889 890 ptys->proto_mask |= MLX5_PTYS_IB; 891 ptys->local_port = port; 892 err = mlx5_core_access_ptys(mdev, ptys, 0); 893 if (err) 894 goto out; 895 896 err = translate_active_width(ibdev, ptys->ib_link_width_oper, 897 &props->active_width); 898 if (err) 899 goto out; 900 901 props->active_speed = (u8)ptys->ib_proto_oper; 902 903 pmtu->local_port = port; 904 err = mlx5_core_access_pmtu(mdev, pmtu, 0); 905 if (err) 906 goto out; 907 908 props->max_mtu = pmtu->max_mtu; 909 props->active_mtu = pmtu->oper_mtu; 910 911 memset(&pvlc, 0, sizeof(pvlc)); 912 pvlc.local_port = port; 913 err = mlx5_core_access_pvlc(mdev, &pvlc, 0); 914 if (err) 915 goto out; 916 917 err = translate_max_vl_num(ibdev, pvlc.vl_hw_cap, 918 &props->max_vl_num); 919out: 920 kvfree(rep); 921 kfree(ptys); 922 kfree(pmtu); 923 return err; 924} 925 926int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 927 struct ib_port_attr *props) 928{ 929 switch (mlx5_get_vport_access_method(ibdev)) { 930 case MLX5_VPORT_ACCESS_METHOD_MAD: 931 return mlx5_query_mad_ifc_port(ibdev, port, props); 932 933 case MLX5_VPORT_ACCESS_METHOD_HCA: 934 return mlx5_query_hca_port(ibdev, port, props); 935 936 case MLX5_VPORT_ACCESS_METHOD_NIC: 937 return mlx5_query_port_roce(ibdev, port, props); 938 939 default: 940 return -EINVAL; 941 } 942} 943 944static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 945 union ib_gid *gid) 946{ 947 struct mlx5_ib_dev *dev = to_mdev(ibdev); 948 struct mlx5_core_dev *mdev = dev->mdev; 949 950 switch (mlx5_get_vport_access_method(ibdev)) { 951 case MLX5_VPORT_ACCESS_METHOD_MAD: 952 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); 953 954 case MLX5_VPORT_ACCESS_METHOD_HCA: 955 return mlx5_query_hca_vport_gid(mdev, port, 0, index, gid); 956 957 default: 958 return -EINVAL; 959 } 960 961} 962 963static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 964 u16 *pkey) 965{ 966 struct mlx5_ib_dev *dev = to_mdev(ibdev); 967 struct mlx5_core_dev *mdev = dev->mdev; 968 969 switch (mlx5_get_vport_access_method(ibdev)) { 970 case MLX5_VPORT_ACCESS_METHOD_MAD: 971 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); 972 973 case MLX5_VPORT_ACCESS_METHOD_HCA: 974 case MLX5_VPORT_ACCESS_METHOD_NIC: 975 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index, 976 pkey); 977 default: 978 return -EINVAL; 979 } 980} 981 982static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, 983 struct ib_device_modify *props) 984{ 985 struct mlx5_ib_dev *dev = to_mdev(ibdev); 986 struct mlx5_reg_node_desc in; 987 struct mlx5_reg_node_desc out; 988 int err; 989 990 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 991 return -EOPNOTSUPP; 992 993 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 994 return 0; 995 996 /* 997 * If possible, pass node desc to FW, so it can generate 998 * a 144 trap. If cmd fails, just ignore. 999 */ 1000 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1001 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 1002 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 1003 if (err) 1004 return err; 1005 1006 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 1007 1008 return err; 1009} 1010 1011static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 1012 struct ib_port_modify *props) 1013{ 1014 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1015 struct ib_port_attr attr; 1016 u32 tmp; 1017 int err; 1018 1019 mutex_lock(&dev->cap_mask_mutex); 1020 1021 err = mlx5_ib_query_port(ibdev, port, &attr); 1022 if (err) 1023 goto out; 1024 1025 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 1026 ~props->clr_port_cap_mask; 1027 1028 err = mlx5_set_port_caps(dev->mdev, port, tmp); 1029 1030out: 1031 mutex_unlock(&dev->cap_mask_mutex); 1032 return err; 1033} 1034 1035static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, 1036 struct ib_udata *udata) 1037{ 1038 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1039 struct mlx5_ib_alloc_ucontext_req_v2 req = {}; 1040 struct mlx5_ib_alloc_ucontext_resp resp = {}; 1041 struct mlx5_ib_ucontext *context; 1042 struct mlx5_uuar_info *uuari; 1043 struct mlx5_uar *uars; 1044 int gross_uuars; 1045 int num_uars; 1046 int ver; 1047 int uuarn; 1048 int err; 1049 int i; 1050 size_t reqlen; 1051 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, 1052 max_cqe_version); 1053 1054 if (!dev->ib_active) 1055 return ERR_PTR(-EAGAIN); 1056 1057 if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr)) 1058 return ERR_PTR(-EINVAL); 1059 1060 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); 1061 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 1062 ver = 0; 1063 else if (reqlen >= min_req_v2) 1064 ver = 2; 1065 else 1066 return ERR_PTR(-EINVAL); 1067 1068 err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req))); 1069 if (err) 1070 return ERR_PTR(err); 1071 1072 if (req.flags) 1073 return ERR_PTR(-EINVAL); 1074 1075 if (req.total_num_uuars > MLX5_MAX_UUARS) 1076 return ERR_PTR(-ENOMEM); 1077 1078 if (req.total_num_uuars == 0) 1079 return ERR_PTR(-EINVAL); 1080 1081 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) 1082 return ERR_PTR(-EOPNOTSUPP); 1083 1084 if (reqlen > sizeof(req) && 1085 !ib_is_udata_cleared(udata, sizeof(req), 1086 reqlen - sizeof(req))) 1087 return ERR_PTR(-EOPNOTSUPP); 1088 1089 req.total_num_uuars = ALIGN(req.total_num_uuars, 1090 MLX5_NON_FP_BF_REGS_PER_PAGE); 1091 if (req.num_low_latency_uuars > req.total_num_uuars - 1) 1092 return ERR_PTR(-EINVAL); 1093 1094 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 1095 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 1096 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 1097 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) 1098 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 1099 resp.cache_line_size = cache_line_size(); 1100 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 1101 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 1102 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1103 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1104 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 1105 resp.cqe_version = min_t(__u8, 1106 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), 1107 req.max_cqe_version); 1108 resp.response_length = min(offsetof(typeof(resp), response_length) + 1109 sizeof(resp.response_length), udata->outlen); 1110 1111 context = kzalloc(sizeof(*context), GFP_KERNEL); 1112 if (!context) 1113 return ERR_PTR(-ENOMEM); 1114 1115 uuari = &context->uuari; 1116 mutex_init(&uuari->lock); 1117 uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); 1118 if (!uars) { 1119 err = -ENOMEM; 1120 goto out_ctx; 1121 } 1122 1123 uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars), 1124 sizeof(*uuari->bitmap), 1125 GFP_KERNEL); 1126 if (!uuari->bitmap) { 1127 err = -ENOMEM; 1128 goto out_uar_ctx; 1129 } 1130 /* 1131 * clear all fast path uuars 1132 */ 1133 for (i = 0; i < gross_uuars; i++) { 1134 uuarn = i & 3; 1135 if (uuarn == 2 || uuarn == 3) 1136 set_bit(i, uuari->bitmap); 1137 } 1138 1139 uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL); 1140 if (!uuari->count) { 1141 err = -ENOMEM; 1142 goto out_bitmap; 1143 } 1144 1145 for (i = 0; i < num_uars; i++) { 1146 err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); 1147 if (err) 1148 goto out_count; 1149 } 1150 1151#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1152 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; 1153#endif 1154 1155 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) { 1156 err = mlx5_alloc_transport_domain(dev->mdev, 1157 &context->tdn); 1158 if (err) 1159 goto out_uars; 1160 } 1161 1162 INIT_LIST_HEAD(&context->vma_private_list); 1163 INIT_LIST_HEAD(&context->db_page_list); 1164 mutex_init(&context->db_page_mutex); 1165 1166 resp.tot_uuars = req.total_num_uuars; 1167 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); 1168 1169 if (field_avail(typeof(resp), cqe_version, udata->outlen)) 1170 resp.response_length += sizeof(resp.cqe_version); 1171 1172 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { 1173 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | 1174 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; 1175 resp.response_length += sizeof(resp.cmds_supp_uhw); 1176 } 1177 1178 /* 1179 * We don't want to expose information from the PCI bar that is located 1180 * after 4096 bytes, so if the arch only supports larger pages, let's 1181 * pretend we don't support reading the HCA's core clock. This is also 1182 * forced by mmap function. 1183 */ 1184 if (PAGE_SIZE <= 4096 && 1185 field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { 1186 resp.comp_mask |= 1187 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; 1188 resp.hca_core_clock_offset = 1189 offsetof(struct mlx5_init_seg, internal_timer_h) % 1190 PAGE_SIZE; 1191 resp.response_length += sizeof(resp.hca_core_clock_offset) + 1192 sizeof(resp.reserved2); 1193 } 1194 1195 err = ib_copy_to_udata(udata, &resp, resp.response_length); 1196 if (err) 1197 goto out_td; 1198 1199 uuari->ver = ver; 1200 uuari->num_low_latency_uuars = req.num_low_latency_uuars; 1201 uuari->uars = uars; 1202 uuari->num_uars = num_uars; 1203 context->cqe_version = resp.cqe_version; 1204 1205 return &context->ibucontext; 1206 1207out_td: 1208 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1209 mlx5_dealloc_transport_domain(dev->mdev, context->tdn); 1210 1211out_uars: 1212 for (i--; i >= 0; i--) 1213 mlx5_cmd_free_uar(dev->mdev, uars[i].index); 1214out_count: 1215 kfree(uuari->count); 1216 1217out_bitmap: 1218 kfree(uuari->bitmap); 1219 1220out_uar_ctx: 1221 kfree(uars); 1222 1223out_ctx: 1224 kfree(context); 1225 return ERR_PTR(err); 1226} 1227 1228static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 1229{ 1230 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1231 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1232 struct mlx5_uuar_info *uuari = &context->uuari; 1233 int i; 1234 1235 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1236 mlx5_dealloc_transport_domain(dev->mdev, context->tdn); 1237 1238 for (i = 0; i < uuari->num_uars; i++) { 1239 if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) 1240 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); 1241 } 1242 1243 kfree(uuari->count); 1244 kfree(uuari->bitmap); 1245 kfree(uuari->uars); 1246 kfree(context); 1247 1248 return 0; 1249} 1250 1251static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) 1252{ 1253 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; 1254} 1255 1256static int get_command(unsigned long offset) 1257{ 1258 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; 1259} 1260 1261static int get_arg(unsigned long offset) 1262{ 1263 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); 1264} 1265 1266static int get_index(unsigned long offset) 1267{ 1268 return get_arg(offset); 1269} 1270 1271static void mlx5_ib_vma_open(struct vm_area_struct *area) 1272{ 1273 /* vma_open is called when a new VMA is created on top of our VMA. This 1274 * is done through either mremap flow or split_vma (usually due to 1275 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA, 1276 * as this VMA is strongly hardware related. Therefore we set the 1277 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from 1278 * calling us again and trying to do incorrect actions. We assume that 1279 * the original VMA size is exactly a single page, and therefore all 1280 * "splitting" operation will not happen to it. 1281 */ 1282 area->vm_ops = NULL; 1283} 1284 1285static void mlx5_ib_vma_close(struct vm_area_struct *area) 1286{ 1287 struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data; 1288 1289 /* It's guaranteed that all VMAs opened on a FD are closed before the 1290 * file itself is closed, therefore no sync is needed with the regular 1291 * closing flow. (e.g. mlx5 ib_dealloc_ucontext) 1292 * However need a sync with accessing the vma as part of 1293 * mlx5_ib_disassociate_ucontext. 1294 * The close operation is usually called under mm->mmap_sem except when 1295 * process is exiting. 1296 * The exiting case is handled explicitly as part of 1297 * mlx5_ib_disassociate_ucontext. 1298 */ 1299 mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data; 1300 1301 /* setting the vma context pointer to null in the mlx5_ib driver's 1302 * private data, to protect a race condition in 1303 * mlx5_ib_disassociate_ucontext(). 1304 */ 1305 mlx5_ib_vma_priv_data->vma = NULL; 1306 list_del(&mlx5_ib_vma_priv_data->list); 1307 kfree(mlx5_ib_vma_priv_data); 1308} 1309 1310static const struct vm_operations_struct mlx5_ib_vm_ops = { 1311 .open = mlx5_ib_vma_open, 1312 .close = mlx5_ib_vma_close 1313}; 1314 1315static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, 1316 struct mlx5_ib_ucontext *ctx) 1317{ 1318 struct mlx5_ib_vma_private_data *vma_prv; 1319 struct list_head *vma_head = &ctx->vma_private_list; 1320 1321 vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL); 1322 if (!vma_prv) 1323 return -ENOMEM; 1324 1325 vma_prv->vma = vma; 1326 vma->vm_private_data = vma_prv; 1327 vma->vm_ops = &mlx5_ib_vm_ops; 1328 1329 list_add(&vma_prv->list, vma_head); 1330 1331 return 0; 1332} 1333 1334static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) 1335{ 1336 switch (cmd) { 1337 case MLX5_IB_MMAP_WC_PAGE: 1338 return "WC"; 1339 case MLX5_IB_MMAP_REGULAR_PAGE: 1340 return "best effort WC"; 1341 case MLX5_IB_MMAP_NC_PAGE: 1342 return "NC"; 1343 default: 1344 return NULL; 1345 } 1346} 1347 1348static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 1349 struct vm_area_struct *vma, 1350 struct mlx5_ib_ucontext *context) 1351{ 1352 struct mlx5_uuar_info *uuari = &context->uuari; 1353 int err; 1354 unsigned long idx; 1355 phys_addr_t pfn, pa; 1356 pgprot_t prot; 1357 1358 switch (cmd) { 1359 case MLX5_IB_MMAP_WC_PAGE: 1360/* Some architectures don't support WC memory */ 1361#if defined(CONFIG_X86) 1362 if (!pat_enabled()) 1363 return -EPERM; 1364#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU))) 1365 return -EPERM; 1366#endif 1367 /* fall through */ 1368 case MLX5_IB_MMAP_REGULAR_PAGE: 1369 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ 1370 prot = pgprot_writecombine(vma->vm_page_prot); 1371 break; 1372 case MLX5_IB_MMAP_NC_PAGE: 1373 prot = pgprot_noncached(vma->vm_page_prot); 1374 break; 1375 default: 1376 return -EINVAL; 1377 } 1378 1379 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1380 return -EINVAL; 1381 1382 idx = get_index(vma->vm_pgoff); 1383 if (idx >= uuari->num_uars) 1384 return -EINVAL; 1385 1386 pfn = uar_index2pfn(dev, uuari->uars[idx].index); 1387 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); 1388 1389 vma->vm_page_prot = prot; 1390 err = io_remap_pfn_range(vma, vma->vm_start, pfn, 1391 PAGE_SIZE, vma->vm_page_prot); 1392 if (err) { 1393 mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%llx, pfn=%pa, mmap_cmd=%s\n", 1394 err, (unsigned long long)vma->vm_start, &pfn, mmap_cmd2str(cmd)); 1395 return -EAGAIN; 1396 } 1397 1398 pa = pfn << PAGE_SHIFT; 1399 mlx5_ib_dbg(dev, "mapped %s at 0x%llx, PA %pa\n", mmap_cmd2str(cmd), 1400 (unsigned long long)vma->vm_start, &pa); 1401 1402 return mlx5_ib_set_vma_data(vma, context); 1403} 1404 1405static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 1406{ 1407 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1408 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1409 unsigned long command; 1410 phys_addr_t pfn; 1411 1412 command = get_command(vma->vm_pgoff); 1413 switch (command) { 1414 case MLX5_IB_MMAP_WC_PAGE: 1415 case MLX5_IB_MMAP_NC_PAGE: 1416 case MLX5_IB_MMAP_REGULAR_PAGE: 1417 return uar_mmap(dev, command, vma, context); 1418 1419 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 1420 return -ENOSYS; 1421 1422 case MLX5_IB_MMAP_CORE_CLOCK: 1423 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1424 return -EINVAL; 1425 1426 if (vma->vm_flags & VM_WRITE) 1427 return -EPERM; 1428 1429 /* Don't expose to user-space information it shouldn't have */ 1430 if (PAGE_SIZE > 4096) 1431 return -EOPNOTSUPP; 1432 1433 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1434 pfn = (dev->mdev->iseg_base + 1435 offsetof(struct mlx5_init_seg, internal_timer_h)) >> 1436 PAGE_SHIFT; 1437 if (io_remap_pfn_range(vma, vma->vm_start, pfn, 1438 PAGE_SIZE, vma->vm_page_prot)) 1439 return -EAGAIN; 1440 1441 mlx5_ib_dbg(dev, "mapped internal timer at 0x%llx, PA 0x%llx\n", 1442 (unsigned long long)vma->vm_start, 1443 (unsigned long long)pfn << PAGE_SHIFT); 1444 break; 1445 1446 default: 1447 return -EINVAL; 1448 } 1449 1450 return 0; 1451} 1452 1453static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, 1454 struct ib_ucontext *context, 1455 struct ib_udata *udata) 1456{ 1457 struct mlx5_ib_alloc_pd_resp resp; 1458 struct mlx5_ib_pd *pd; 1459 int err; 1460 1461 pd = kmalloc(sizeof(*pd), GFP_KERNEL); 1462 if (!pd) 1463 return ERR_PTR(-ENOMEM); 1464 1465 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); 1466 if (err) { 1467 kfree(pd); 1468 return ERR_PTR(err); 1469 } 1470 1471 if (context) { 1472 resp.pdn = pd->pdn; 1473 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 1474 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 1475 kfree(pd); 1476 return ERR_PTR(-EFAULT); 1477 } 1478 } 1479 1480 return &pd->ibpd; 1481} 1482 1483static int mlx5_ib_dealloc_pd(struct ib_pd *pd) 1484{ 1485 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 1486 struct mlx5_ib_pd *mpd = to_mpd(pd); 1487 1488 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 1489 kfree(mpd); 1490 1491 return 0; 1492} 1493 1494enum { 1495 MATCH_CRITERIA_ENABLE_OUTER_BIT, 1496 MATCH_CRITERIA_ENABLE_MISC_BIT, 1497 MATCH_CRITERIA_ENABLE_INNER_BIT 1498}; 1499 1500#define HEADER_IS_ZERO(match_criteria, headers) \ 1501 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 1502 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 1503 1504static u8 get_match_criteria_enable(u32 *match_criteria) 1505{ 1506 u8 match_criteria_enable; 1507 1508 match_criteria_enable = 1509 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 1510 MATCH_CRITERIA_ENABLE_OUTER_BIT; 1511 match_criteria_enable |= 1512 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 1513 MATCH_CRITERIA_ENABLE_MISC_BIT; 1514 match_criteria_enable |= 1515 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 1516 MATCH_CRITERIA_ENABLE_INNER_BIT; 1517 1518 return match_criteria_enable; 1519} 1520 1521static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 1522{ 1523 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 1524 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 1525} 1526 1527static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 1528{ 1529 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 1530 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 1531 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 1532 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 1533} 1534 1535#define LAST_ETH_FIELD vlan_tag 1536#define LAST_IB_FIELD sl 1537#define LAST_IPV4_FIELD tos 1538#define LAST_IPV6_FIELD traffic_class 1539#define LAST_TCP_UDP_FIELD src_port 1540 1541/* Field is the last supported field */ 1542#define FIELDS_NOT_SUPPORTED(filter, field)\ 1543 memchr_inv((void *)&filter.field +\ 1544 sizeof(filter.field), 0,\ 1545 sizeof(filter) -\ 1546 offsetof(typeof(filter), field) -\ 1547 sizeof(filter.field)) 1548 1549static int parse_flow_attr(u32 *match_c, u32 *match_v, 1550 const union ib_flow_spec *ib_spec) 1551{ 1552 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 1553 outer_headers); 1554 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 1555 outer_headers); 1556 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 1557 misc_parameters); 1558 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 1559 misc_parameters); 1560 1561 switch (ib_spec->type) { 1562 case IB_FLOW_SPEC_ETH: 1563 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 1564 return -ENOTSUPP; 1565 1566 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1567 dmac_47_16), 1568 ib_spec->eth.mask.dst_mac); 1569 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1570 dmac_47_16), 1571 ib_spec->eth.val.dst_mac); 1572 1573 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1574 smac_47_16), 1575 ib_spec->eth.mask.src_mac); 1576 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1577 smac_47_16), 1578 ib_spec->eth.val.src_mac); 1579 1580 if (ib_spec->eth.mask.vlan_tag) { 1581 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1582 cvlan_tag, 1); 1583 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1584 cvlan_tag, 1); 1585 1586 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1587 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 1588 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1589 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 1590 1591 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1592 first_cfi, 1593 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 1594 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1595 first_cfi, 1596 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 1597 1598 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1599 first_prio, 1600 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 1601 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1602 first_prio, 1603 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 1604 } 1605 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1606 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 1607 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1608 ethertype, ntohs(ib_spec->eth.val.ether_type)); 1609 break; 1610 case IB_FLOW_SPEC_IPV4: 1611 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 1612 return -ENOTSUPP; 1613 1614 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1615 ethertype, 0xffff); 1616 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1617 ethertype, ETH_P_IP); 1618 1619 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1620 src_ipv4_src_ipv6.ipv4_layout.ipv4), 1621 &ib_spec->ipv4.mask.src_ip, 1622 sizeof(ib_spec->ipv4.mask.src_ip)); 1623 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1624 src_ipv4_src_ipv6.ipv4_layout.ipv4), 1625 &ib_spec->ipv4.val.src_ip, 1626 sizeof(ib_spec->ipv4.val.src_ip)); 1627 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1628 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1629 &ib_spec->ipv4.mask.dst_ip, 1630 sizeof(ib_spec->ipv4.mask.dst_ip)); 1631 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1632 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1633 &ib_spec->ipv4.val.dst_ip, 1634 sizeof(ib_spec->ipv4.val.dst_ip)); 1635 1636 set_tos(outer_headers_c, outer_headers_v, 1637 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 1638 1639 set_proto(outer_headers_c, outer_headers_v, 1640 ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto); 1641 break; 1642 case IB_FLOW_SPEC_IPV6: 1643 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 1644 return -ENOTSUPP; 1645 1646 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1647 ethertype, 0xffff); 1648 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1649 ethertype, IPPROTO_IPV6); 1650 1651 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1652 src_ipv4_src_ipv6.ipv6_layout.ipv6), 1653 &ib_spec->ipv6.mask.src_ip, 1654 sizeof(ib_spec->ipv6.mask.src_ip)); 1655 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1656 src_ipv4_src_ipv6.ipv6_layout.ipv6), 1657 &ib_spec->ipv6.val.src_ip, 1658 sizeof(ib_spec->ipv6.val.src_ip)); 1659 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1660 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1661 &ib_spec->ipv6.mask.dst_ip, 1662 sizeof(ib_spec->ipv6.mask.dst_ip)); 1663 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1664 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1665 &ib_spec->ipv6.val.dst_ip, 1666 sizeof(ib_spec->ipv6.val.dst_ip)); 1667 1668 set_tos(outer_headers_c, outer_headers_v, 1669 ib_spec->ipv6.mask.traffic_class, 1670 ib_spec->ipv6.val.traffic_class); 1671 1672 set_proto(outer_headers_c, outer_headers_v, 1673 ib_spec->ipv6.mask.next_hdr, 1674 ib_spec->ipv6.val.next_hdr); 1675 1676 MLX5_SET(fte_match_set_misc, misc_params_c, 1677 outer_ipv6_flow_label, 1678 ntohl(ib_spec->ipv6.mask.flow_label)); 1679 MLX5_SET(fte_match_set_misc, misc_params_v, 1680 outer_ipv6_flow_label, 1681 ntohl(ib_spec->ipv6.val.flow_label)); 1682 break; 1683 case IB_FLOW_SPEC_TCP: 1684 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 1685 LAST_TCP_UDP_FIELD)) 1686 return -ENOTSUPP; 1687 1688 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, 1689 0xff); 1690 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, 1691 IPPROTO_TCP); 1692 1693 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport, 1694 ntohs(ib_spec->tcp_udp.mask.src_port)); 1695 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport, 1696 ntohs(ib_spec->tcp_udp.val.src_port)); 1697 1698 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport, 1699 ntohs(ib_spec->tcp_udp.mask.dst_port)); 1700 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport, 1701 ntohs(ib_spec->tcp_udp.val.dst_port)); 1702 break; 1703 case IB_FLOW_SPEC_UDP: 1704 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 1705 LAST_TCP_UDP_FIELD)) 1706 return -ENOTSUPP; 1707 1708 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, 1709 0xff); 1710 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, 1711 IPPROTO_UDP); 1712 1713 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport, 1714 ntohs(ib_spec->tcp_udp.mask.src_port)); 1715 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport, 1716 ntohs(ib_spec->tcp_udp.val.src_port)); 1717 1718 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport, 1719 ntohs(ib_spec->tcp_udp.mask.dst_port)); 1720 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport, 1721 ntohs(ib_spec->tcp_udp.val.dst_port)); 1722 break; 1723 default: 1724 return -EINVAL; 1725 } 1726 1727 return 0; 1728} 1729 1730/* If a flow could catch both multicast and unicast packets, 1731 * it won't fall into the multicast flow steering table and this rule 1732 * could steal other multicast packets. 1733 */ 1734static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr) 1735{ 1736 struct ib_flow_spec_eth *eth_spec; 1737 1738 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 1739 ib_attr->size < sizeof(struct ib_flow_attr) + 1740 sizeof(struct ib_flow_spec_eth) || 1741 ib_attr->num_of_specs < 1) 1742 return false; 1743 1744 eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1); 1745 if (eth_spec->type != IB_FLOW_SPEC_ETH || 1746 eth_spec->size != sizeof(*eth_spec)) 1747 return false; 1748 1749 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 1750 is_multicast_ether_addr(eth_spec->val.dst_mac); 1751} 1752 1753static bool is_valid_attr(const struct ib_flow_attr *flow_attr) 1754{ 1755 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 1756 bool has_ipv4_spec = false; 1757 bool eth_type_ipv4 = true; 1758 unsigned int spec_index; 1759 1760 /* Validate that ethertype is correct */ 1761 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1762 if (ib_spec->type == IB_FLOW_SPEC_ETH && 1763 ib_spec->eth.mask.ether_type) { 1764 if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) && 1765 ib_spec->eth.val.ether_type == htons(ETH_P_IP))) 1766 eth_type_ipv4 = false; 1767 } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) { 1768 has_ipv4_spec = true; 1769 } 1770 ib_spec = (void *)ib_spec + ib_spec->size; 1771 } 1772 return !has_ipv4_spec || eth_type_ipv4; 1773} 1774 1775static void put_flow_table(struct mlx5_ib_dev *dev, 1776 struct mlx5_ib_flow_prio *prio, bool ft_added) 1777{ 1778 prio->refcount -= !!ft_added; 1779 if (!prio->refcount) { 1780 mlx5_destroy_flow_table(prio->flow_table); 1781 prio->flow_table = NULL; 1782 } 1783} 1784 1785static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 1786{ 1787 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device); 1788 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 1789 struct mlx5_ib_flow_handler, 1790 ibflow); 1791 struct mlx5_ib_flow_handler *iter, *tmp; 1792 1793 mutex_lock(&dev->flow_db.lock); 1794 1795 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 1796 mlx5_del_flow_rule(iter->rule); 1797 put_flow_table(dev, iter->prio, true); 1798 list_del(&iter->list); 1799 kfree(iter); 1800 } 1801 1802 mlx5_del_flow_rule(handler->rule); 1803 put_flow_table(dev, handler->prio, true); 1804 mutex_unlock(&dev->flow_db.lock); 1805 1806 kfree(handler); 1807 1808 return 0; 1809} 1810 1811static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 1812{ 1813 priority *= 2; 1814 if (!dont_trap) 1815 priority++; 1816 return priority; 1817} 1818 1819enum flow_table_type { 1820 MLX5_IB_FT_RX, 1821 MLX5_IB_FT_TX 1822}; 1823 1824#define MLX5_FS_MAX_TYPES 10 1825#define MLX5_FS_MAX_ENTRIES 32000UL 1826static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 1827 struct ib_flow_attr *flow_attr, 1828 enum flow_table_type ft_type) 1829{ 1830 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 1831 struct mlx5_flow_namespace *ns = NULL; 1832 struct mlx5_ib_flow_prio *prio; 1833 struct mlx5_flow_table *ft; 1834 int num_entries; 1835 int num_groups; 1836 int priority; 1837 int err = 0; 1838 1839 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1840 if (flow_is_multicast_only(flow_attr) && 1841 !dont_trap) 1842 priority = MLX5_IB_FLOW_MCAST_PRIO; 1843 else 1844 priority = ib_prio_to_core_prio(flow_attr->priority, 1845 dont_trap); 1846 ns = mlx5_get_flow_namespace(dev->mdev, 1847 MLX5_FLOW_NAMESPACE_BYPASS); 1848 num_entries = MLX5_FS_MAX_ENTRIES; 1849 num_groups = MLX5_FS_MAX_TYPES; 1850 prio = &dev->flow_db.prios[priority]; 1851 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1852 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 1853 ns = mlx5_get_flow_namespace(dev->mdev, 1854 MLX5_FLOW_NAMESPACE_LEFTOVERS); 1855 build_leftovers_ft_param("bypass", &priority, 1856 &num_entries, 1857 &num_groups); 1858 prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 1859 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 1860 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 1861 allow_sniffer_and_nic_rx_shared_tir)) 1862 return ERR_PTR(-ENOTSUPP); 1863 1864 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ? 1865 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 1866 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 1867 1868 prio = &dev->flow_db.sniffer[ft_type]; 1869 priority = 0; 1870 num_entries = 1; 1871 num_groups = 1; 1872 } 1873 1874 if (!ns) 1875 return ERR_PTR(-ENOTSUPP); 1876 1877 ft = prio->flow_table; 1878 if (!ft) { 1879 ft = mlx5_create_auto_grouped_flow_table(ns, priority, "bypass", 1880 num_entries, 1881 num_groups); 1882 1883 if (!IS_ERR(ft)) { 1884 prio->refcount = 0; 1885 prio->flow_table = ft; 1886 } else { 1887 err = PTR_ERR(ft); 1888 } 1889 } 1890 1891 return err ? ERR_PTR(err) : prio; 1892} 1893 1894static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 1895 struct mlx5_ib_flow_prio *ft_prio, 1896 const struct ib_flow_attr *flow_attr, 1897 struct mlx5_flow_destination *dst) 1898{ 1899 struct mlx5_flow_table *ft = ft_prio->flow_table; 1900 struct mlx5_ib_flow_handler *handler; 1901 struct mlx5_flow_spec *spec; 1902 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 1903 unsigned int spec_index; 1904 u32 action; 1905 int err = 0; 1906 1907 if (!is_valid_attr(flow_attr)) 1908 return ERR_PTR(-EINVAL); 1909 1910 spec = mlx5_vzalloc(sizeof(*spec)); 1911 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 1912 if (!handler || !spec) { 1913 err = -ENOMEM; 1914 goto free; 1915 } 1916 1917 INIT_LIST_HEAD(&handler->list); 1918 1919 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1920 err = parse_flow_attr(spec->match_criteria, 1921 spec->match_value, ib_flow); 1922 if (err < 0) 1923 goto free; 1924 1925 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 1926 } 1927 1928 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 1929 action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 1930 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 1931 handler->rule = mlx5_add_flow_rule(ft, spec->match_criteria_enable, 1932 spec->match_criteria, 1933 spec->match_value, 1934 action, 1935 MLX5_FS_DEFAULT_FLOW_TAG, 1936 dst); 1937 1938 if (IS_ERR(handler->rule)) { 1939 err = PTR_ERR(handler->rule); 1940 goto free; 1941 } 1942 1943 ft_prio->refcount++; 1944 handler->prio = ft_prio; 1945 1946 ft_prio->flow_table = ft; 1947free: 1948 if (err) 1949 kfree(handler); 1950 kvfree(spec); 1951 return err ? ERR_PTR(err) : handler; 1952} 1953 1954static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, 1955 struct mlx5_ib_flow_prio *ft_prio, 1956 struct ib_flow_attr *flow_attr, 1957 struct mlx5_flow_destination *dst) 1958{ 1959 struct mlx5_ib_flow_handler *handler_dst = NULL; 1960 struct mlx5_ib_flow_handler *handler = NULL; 1961 1962 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); 1963 if (!IS_ERR(handler)) { 1964 handler_dst = create_flow_rule(dev, ft_prio, 1965 flow_attr, dst); 1966 if (IS_ERR(handler_dst)) { 1967 mlx5_del_flow_rule(handler->rule); 1968 ft_prio->refcount--; 1969 kfree(handler); 1970 handler = handler_dst; 1971 } else { 1972 list_add(&handler_dst->list, &handler->list); 1973 } 1974 } 1975 1976 return handler; 1977} 1978enum { 1979 LEFTOVERS_MC, 1980 LEFTOVERS_UC, 1981}; 1982 1983static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 1984 struct mlx5_ib_flow_prio *ft_prio, 1985 struct ib_flow_attr *flow_attr, 1986 struct mlx5_flow_destination *dst) 1987{ 1988 struct mlx5_ib_flow_handler *handler_ucast = NULL; 1989 struct mlx5_ib_flow_handler *handler = NULL; 1990 1991 static struct { 1992 struct ib_flow_attr flow_attr; 1993 struct ib_flow_spec_eth eth_flow; 1994 } leftovers_specs[] = { 1995 [LEFTOVERS_MC] = { 1996 .flow_attr = { 1997 .num_of_specs = 1, 1998 .size = sizeof(leftovers_specs[0]) 1999 }, 2000 .eth_flow = { 2001 .type = IB_FLOW_SPEC_ETH, 2002 .size = sizeof(struct ib_flow_spec_eth), 2003 .mask = {.dst_mac = {0x1} }, 2004 .val = {.dst_mac = {0x1} } 2005 } 2006 }, 2007 [LEFTOVERS_UC] = { 2008 .flow_attr = { 2009 .num_of_specs = 1, 2010 .size = sizeof(leftovers_specs[0]) 2011 }, 2012 .eth_flow = { 2013 .type = IB_FLOW_SPEC_ETH, 2014 .size = sizeof(struct ib_flow_spec_eth), 2015 .mask = {.dst_mac = {0x1} }, 2016 .val = {.dst_mac = {} } 2017 } 2018 } 2019 }; 2020 2021 handler = create_flow_rule(dev, ft_prio, 2022 &leftovers_specs[LEFTOVERS_MC].flow_attr, 2023 dst); 2024 if (!IS_ERR(handler) && 2025 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 2026 handler_ucast = create_flow_rule(dev, ft_prio, 2027 &leftovers_specs[LEFTOVERS_UC].flow_attr, 2028 dst); 2029 if (IS_ERR(handler_ucast)) { 2030 mlx5_del_flow_rule(handler->rule); 2031 ft_prio->refcount--; 2032 kfree(handler); 2033 handler = handler_ucast; 2034 } else { 2035 list_add(&handler_ucast->list, &handler->list); 2036 } 2037 } 2038 2039 return handler; 2040} 2041 2042static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 2043 struct mlx5_ib_flow_prio *ft_rx, 2044 struct mlx5_ib_flow_prio *ft_tx, 2045 struct mlx5_flow_destination *dst) 2046{ 2047 struct mlx5_ib_flow_handler *handler_rx; 2048 struct mlx5_ib_flow_handler *handler_tx; 2049 int err; 2050 static const struct ib_flow_attr flow_attr = { 2051 .num_of_specs = 0, 2052 .size = sizeof(flow_attr) 2053 }; 2054 2055 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 2056 if (IS_ERR(handler_rx)) { 2057 err = PTR_ERR(handler_rx); 2058 goto err; 2059 } 2060 2061 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 2062 if (IS_ERR(handler_tx)) { 2063 err = PTR_ERR(handler_tx); 2064 goto err_tx; 2065 } 2066 2067 list_add(&handler_tx->list, &handler_rx->list); 2068 2069 return handler_rx; 2070 2071err_tx: 2072 mlx5_del_flow_rule(handler_rx->rule); 2073 ft_rx->refcount--; 2074 kfree(handler_rx); 2075err: 2076 return ERR_PTR(err); 2077} 2078 2079static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 2080 struct ib_flow_attr *flow_attr, 2081 int domain) 2082{ 2083 struct mlx5_ib_dev *dev = to_mdev(qp->device); 2084 struct mlx5_ib_qp *mqp = to_mqp(qp); 2085 struct mlx5_ib_flow_handler *handler = NULL; 2086 struct mlx5_flow_destination *dst = NULL; 2087 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 2088 struct mlx5_ib_flow_prio *ft_prio; 2089 int err; 2090 2091 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) 2092 return ERR_PTR(-ENOSPC); 2093 2094 if (domain != IB_FLOW_DOMAIN_USER || 2095 flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) || 2096 (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)) 2097 return ERR_PTR(-EINVAL); 2098 2099 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 2100 if (!dst) 2101 return ERR_PTR(-ENOMEM); 2102 2103 mutex_lock(&dev->flow_db.lock); 2104 2105 ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX); 2106 if (IS_ERR(ft_prio)) { 2107 err = PTR_ERR(ft_prio); 2108 goto unlock; 2109 } 2110 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 2111 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 2112 if (IS_ERR(ft_prio_tx)) { 2113 err = PTR_ERR(ft_prio_tx); 2114 ft_prio_tx = NULL; 2115 goto destroy_ft; 2116 } 2117 } 2118 2119 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 2120 if (mqp->flags & MLX5_IB_QP_RSS) 2121 dst->tir_num = mqp->rss_qp.tirn; 2122 else 2123 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 2124 2125 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 2126 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 2127 handler = create_dont_trap_rule(dev, ft_prio, 2128 flow_attr, dst); 2129 } else { 2130 handler = create_flow_rule(dev, ft_prio, flow_attr, 2131 dst); 2132 } 2133 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 2134 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 2135 handler = create_leftovers_rule(dev, ft_prio, flow_attr, 2136 dst); 2137 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 2138 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 2139 } else { 2140 err = -EINVAL; 2141 goto destroy_ft; 2142 } 2143 2144 if (IS_ERR(handler)) { 2145 err = PTR_ERR(handler); 2146 handler = NULL; 2147 goto destroy_ft; 2148 } 2149 2150 mutex_unlock(&dev->flow_db.lock); 2151 kfree(dst); 2152 2153 return &handler->ibflow; 2154 2155destroy_ft: 2156 put_flow_table(dev, ft_prio, false); 2157 if (ft_prio_tx) 2158 put_flow_table(dev, ft_prio_tx, false); 2159unlock: 2160 mutex_unlock(&dev->flow_db.lock); 2161 kfree(dst); 2162 kfree(handler); 2163 return ERR_PTR(err); 2164} 2165 2166static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 2167{ 2168 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2169 int err; 2170 2171 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num); 2172 if (err) 2173 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 2174 ibqp->qp_num, gid->raw); 2175 2176 return err; 2177} 2178 2179static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 2180{ 2181 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2182 int err; 2183 2184 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num); 2185 if (err) 2186 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 2187 ibqp->qp_num, gid->raw); 2188 2189 return err; 2190} 2191 2192static int init_node_data(struct mlx5_ib_dev *dev) 2193{ 2194 int err; 2195 2196 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); 2197 if (err) 2198 return err; 2199 2200 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); 2201} 2202 2203static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, 2204 char *buf) 2205{ 2206 struct mlx5_ib_dev *dev = 2207 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2208 2209 return sprintf(buf, "%lld\n", (long long)dev->mdev->priv.fw_pages); 2210} 2211 2212static ssize_t show_reg_pages(struct device *device, 2213 struct device_attribute *attr, char *buf) 2214{ 2215 struct mlx5_ib_dev *dev = 2216 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2217 2218 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); 2219} 2220 2221static ssize_t show_hca(struct device *device, struct device_attribute *attr, 2222 char *buf) 2223{ 2224 struct mlx5_ib_dev *dev = 2225 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2226 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 2227} 2228 2229static ssize_t show_rev(struct device *device, struct device_attribute *attr, 2230 char *buf) 2231{ 2232 struct mlx5_ib_dev *dev = 2233 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2234 return sprintf(buf, "%x\n", dev->mdev->pdev->revision); 2235} 2236 2237static ssize_t show_board(struct device *device, struct device_attribute *attr, 2238 char *buf) 2239{ 2240 struct mlx5_ib_dev *dev = 2241 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2242 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 2243 dev->mdev->board_id); 2244} 2245 2246static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 2247static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 2248static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 2249static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); 2250static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL); 2251 2252static struct device_attribute *mlx5_class_attributes[] = { 2253 &dev_attr_hw_rev, 2254 &dev_attr_hca_type, 2255 &dev_attr_board_id, 2256 &dev_attr_fw_pages, 2257 &dev_attr_reg_pages, 2258}; 2259 2260static void pkey_change_handler(struct work_struct *work) 2261{ 2262 struct mlx5_ib_port_resources *ports = 2263 container_of(work, struct mlx5_ib_port_resources, 2264 pkey_change_work); 2265 2266 mutex_lock(&ports->devr->mutex); 2267 mlx5_ib_gsi_pkey_change(ports->gsi); 2268 mutex_unlock(&ports->devr->mutex); 2269} 2270 2271static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) 2272{ 2273 struct mlx5_ib_qp *mqp; 2274 struct mlx5_ib_cq *send_mcq, *recv_mcq; 2275 struct mlx5_core_cq *mcq; 2276 struct list_head cq_armed_list; 2277 unsigned long flags_qp; 2278 unsigned long flags_cq; 2279 unsigned long flags; 2280 2281 INIT_LIST_HEAD(&cq_armed_list); 2282 2283 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 2284 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 2285 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 2286 spin_lock_irqsave(&mqp->sq.lock, flags_qp); 2287 if (mqp->sq.tail != mqp->sq.head) { 2288 send_mcq = to_mcq(mqp->ibqp.send_cq); 2289 spin_lock_irqsave(&send_mcq->lock, flags_cq); 2290 if (send_mcq->mcq.comp && 2291 mqp->ibqp.send_cq->comp_handler) { 2292 if (!send_mcq->mcq.reset_notify_added) { 2293 send_mcq->mcq.reset_notify_added = 1; 2294 list_add_tail(&send_mcq->mcq.reset_notify, 2295 &cq_armed_list); 2296 } 2297 } 2298 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 2299 } 2300 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 2301 spin_lock_irqsave(&mqp->rq.lock, flags_qp); 2302 /* no handling is needed for SRQ */ 2303 if (!mqp->ibqp.srq) { 2304 if (mqp->rq.tail != mqp->rq.head) { 2305 recv_mcq = to_mcq(mqp->ibqp.recv_cq); 2306 spin_lock_irqsave(&recv_mcq->lock, flags_cq); 2307 if (recv_mcq->mcq.comp && 2308 mqp->ibqp.recv_cq->comp_handler) { 2309 if (!recv_mcq->mcq.reset_notify_added) { 2310 recv_mcq->mcq.reset_notify_added = 1; 2311 list_add_tail(&recv_mcq->mcq.reset_notify, 2312 &cq_armed_list); 2313 } 2314 } 2315 spin_unlock_irqrestore(&recv_mcq->lock, 2316 flags_cq); 2317 } 2318 } 2319 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 2320 } 2321 /*At that point all inflight post send were put to be executed as of we 2322 * lock/unlock above locks Now need to arm all involved CQs. 2323 */ 2324 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 2325 mcq->comp(mcq); 2326 } 2327 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 2328} 2329 2330static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, 2331 enum mlx5_dev_event event, unsigned long param) 2332{ 2333 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; 2334 struct ib_event ibev; 2335 bool fatal = false; 2336 u8 port = 0; 2337 2338 switch (event) { 2339 case MLX5_DEV_EVENT_SYS_ERROR: 2340 ibev.event = IB_EVENT_DEVICE_FATAL; 2341 mlx5_ib_handle_internal_error(ibdev); 2342 fatal = true; 2343 break; 2344 2345 case MLX5_DEV_EVENT_PORT_UP: 2346 case MLX5_DEV_EVENT_PORT_DOWN: 2347 case MLX5_DEV_EVENT_PORT_INITIALIZED: 2348 port = (u8)param; 2349 2350 /* In RoCE, port up/down events are handled in 2351 * mlx5_netdev_event(). 2352 */ 2353 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 2354 IB_LINK_LAYER_ETHERNET) 2355 return; 2356 2357 ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ? 2358 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 2359 break; 2360 2361 case MLX5_DEV_EVENT_LID_CHANGE: 2362 ibev.event = IB_EVENT_LID_CHANGE; 2363 port = (u8)param; 2364 break; 2365 2366 case MLX5_DEV_EVENT_PKEY_CHANGE: 2367 ibev.event = IB_EVENT_PKEY_CHANGE; 2368 port = (u8)param; 2369 2370 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 2371 break; 2372 2373 case MLX5_DEV_EVENT_GUID_CHANGE: 2374 ibev.event = IB_EVENT_GID_CHANGE; 2375 port = (u8)param; 2376 break; 2377 2378 case MLX5_DEV_EVENT_CLIENT_REREG: 2379 ibev.event = IB_EVENT_CLIENT_REREGISTER; 2380 port = (u8)param; 2381 break; 2382 2383 default: 2384 break; 2385 } 2386 2387 ibev.device = &ibdev->ib_dev; 2388 ibev.element.port_num = port; 2389 2390 if (port < 1 || port > ibdev->num_ports) { 2391 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 2392 return; 2393 } 2394 2395 if (ibdev->ib_active) 2396 ib_dispatch_event(&ibev); 2397 2398 if (fatal) 2399 ibdev->ib_active = false; 2400} 2401 2402static void get_ext_port_caps(struct mlx5_ib_dev *dev) 2403{ 2404 int port; 2405 2406 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) 2407 mlx5_query_ext_port_caps(dev, port); 2408} 2409 2410static int get_port_caps(struct mlx5_ib_dev *dev) 2411{ 2412 struct ib_device_attr *dprops = NULL; 2413 struct ib_port_attr *pprops = NULL; 2414 int err = -ENOMEM; 2415 int port; 2416 struct ib_udata uhw = {.inlen = 0, .outlen = 0}; 2417 2418 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); 2419 if (!pprops) 2420 goto out; 2421 2422 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); 2423 if (!dprops) 2424 goto out; 2425 2426 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); 2427 if (err) { 2428 mlx5_ib_warn(dev, "query_device failed %d\n", err); 2429 goto out; 2430 } 2431 2432 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) { 2433 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 2434 if (err) { 2435 mlx5_ib_warn(dev, "query_port %d failed %d\n", 2436 port, err); 2437 break; 2438 } 2439 dev->mdev->port_caps[port - 1].pkey_table_len = 2440 dprops->max_pkeys; 2441 dev->mdev->port_caps[port - 1].gid_table_len = 2442 pprops->gid_tbl_len; 2443 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 2444 dprops->max_pkeys, pprops->gid_tbl_len); 2445 } 2446 2447out: 2448 kfree(pprops); 2449 kfree(dprops); 2450 2451 return err; 2452} 2453 2454static void destroy_umrc_res(struct mlx5_ib_dev *dev) 2455{ 2456 int err; 2457 2458 err = mlx5_mr_cache_cleanup(dev); 2459 if (err) 2460 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 2461 2462 mlx5_ib_destroy_qp(dev->umrc.qp); 2463 ib_free_cq(dev->umrc.cq); 2464 ib_dealloc_pd(dev->umrc.pd); 2465} 2466 2467enum { 2468 MAX_UMR_WR = 128, 2469}; 2470 2471static int create_umr_res(struct mlx5_ib_dev *dev) 2472{ 2473 struct ib_qp_init_attr *init_attr = NULL; 2474 struct ib_qp_attr *attr = NULL; 2475 struct ib_pd *pd; 2476 struct ib_cq *cq; 2477 struct ib_qp *qp; 2478 int ret; 2479 2480 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 2481 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 2482 if (!attr || !init_attr) { 2483 ret = -ENOMEM; 2484 goto error_0; 2485 } 2486 2487 pd = ib_alloc_pd(&dev->ib_dev, 0); 2488 if (IS_ERR(pd)) { 2489 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 2490 ret = PTR_ERR(pd); 2491 goto error_0; 2492 } 2493 2494 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); 2495 if (IS_ERR(cq)) { 2496 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 2497 ret = PTR_ERR(cq); 2498 goto error_2; 2499 } 2500 2501 init_attr->send_cq = cq; 2502 init_attr->recv_cq = cq; 2503 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 2504 init_attr->cap.max_send_wr = MAX_UMR_WR; 2505 init_attr->cap.max_send_sge = 1; 2506 init_attr->qp_type = MLX5_IB_QPT_REG_UMR; 2507 init_attr->port_num = 1; 2508 qp = mlx5_ib_create_qp(pd, init_attr, NULL); 2509 if (IS_ERR(qp)) { 2510 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 2511 ret = PTR_ERR(qp); 2512 goto error_3; 2513 } 2514 qp->device = &dev->ib_dev; 2515 qp->real_qp = qp; 2516 qp->uobject = NULL; 2517 qp->qp_type = MLX5_IB_QPT_REG_UMR; 2518 2519 attr->qp_state = IB_QPS_INIT; 2520 attr->port_num = 1; 2521 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | 2522 IB_QP_PORT, NULL); 2523 if (ret) { 2524 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 2525 goto error_4; 2526 } 2527 2528 memset(attr, 0, sizeof(*attr)); 2529 attr->qp_state = IB_QPS_RTR; 2530 attr->path_mtu = IB_MTU_256; 2531 2532 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 2533 if (ret) { 2534 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 2535 goto error_4; 2536 } 2537 2538 memset(attr, 0, sizeof(*attr)); 2539 attr->qp_state = IB_QPS_RTS; 2540 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 2541 if (ret) { 2542 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 2543 goto error_4; 2544 } 2545 2546 dev->umrc.qp = qp; 2547 dev->umrc.cq = cq; 2548 dev->umrc.pd = pd; 2549 2550 sema_init(&dev->umrc.sem, MAX_UMR_WR); 2551 ret = mlx5_mr_cache_init(dev); 2552 if (ret) { 2553 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); 2554 goto error_4; 2555 } 2556 2557 kfree(attr); 2558 kfree(init_attr); 2559 2560 return 0; 2561 2562error_4: 2563 mlx5_ib_destroy_qp(qp); 2564 2565error_3: 2566 ib_free_cq(cq); 2567 2568error_2: 2569 ib_dealloc_pd(pd); 2570 2571error_0: 2572 kfree(attr); 2573 kfree(init_attr); 2574 return ret; 2575} 2576 2577static int create_dev_resources(struct mlx5_ib_resources *devr) 2578{ 2579 struct ib_srq_init_attr attr; 2580 struct mlx5_ib_dev *dev; 2581 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 2582 int port; 2583 int ret = 0; 2584 2585 dev = container_of(devr, struct mlx5_ib_dev, devr); 2586 2587 mutex_init(&devr->mutex); 2588 2589 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); 2590 if (IS_ERR(devr->p0)) { 2591 ret = PTR_ERR(devr->p0); 2592 goto error0; 2593 } 2594 devr->p0->device = &dev->ib_dev; 2595 devr->p0->uobject = NULL; 2596 atomic_set(&devr->p0->usecnt, 0); 2597 2598 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); 2599 if (IS_ERR(devr->c0)) { 2600 ret = PTR_ERR(devr->c0); 2601 goto error1; 2602 } 2603 devr->c0->device = &dev->ib_dev; 2604 devr->c0->uobject = NULL; 2605 devr->c0->comp_handler = NULL; 2606 devr->c0->event_handler = NULL; 2607 devr->c0->cq_context = NULL; 2608 atomic_set(&devr->c0->usecnt, 0); 2609 2610 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 2611 if (IS_ERR(devr->x0)) { 2612 ret = PTR_ERR(devr->x0); 2613 goto error2; 2614 } 2615 devr->x0->device = &dev->ib_dev; 2616 devr->x0->inode = NULL; 2617 atomic_set(&devr->x0->usecnt, 0); 2618 mutex_init(&devr->x0->tgt_qp_mutex); 2619 INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 2620 2621 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 2622 if (IS_ERR(devr->x1)) { 2623 ret = PTR_ERR(devr->x1); 2624 goto error3; 2625 } 2626 devr->x1->device = &dev->ib_dev; 2627 devr->x1->inode = NULL; 2628 atomic_set(&devr->x1->usecnt, 0); 2629 mutex_init(&devr->x1->tgt_qp_mutex); 2630 INIT_LIST_HEAD(&devr->x1->tgt_qp_list); 2631 2632 memset(&attr, 0, sizeof(attr)); 2633 attr.attr.max_sge = 1; 2634 attr.attr.max_wr = 1; 2635 attr.srq_type = IB_SRQT_XRC; 2636 attr.ext.xrc.cq = devr->c0; 2637 attr.ext.xrc.xrcd = devr->x0; 2638 2639 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 2640 if (IS_ERR(devr->s0)) { 2641 ret = PTR_ERR(devr->s0); 2642 goto error4; 2643 } 2644 devr->s0->device = &dev->ib_dev; 2645 devr->s0->pd = devr->p0; 2646 devr->s0->uobject = NULL; 2647 devr->s0->event_handler = NULL; 2648 devr->s0->srq_context = NULL; 2649 devr->s0->srq_type = IB_SRQT_XRC; 2650 devr->s0->ext.xrc.xrcd = devr->x0; 2651 devr->s0->ext.xrc.cq = devr->c0; 2652 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); 2653 atomic_inc(&devr->s0->ext.xrc.cq->usecnt); 2654 atomic_inc(&devr->p0->usecnt); 2655 atomic_set(&devr->s0->usecnt, 0); 2656 2657 memset(&attr, 0, sizeof(attr)); 2658 attr.attr.max_sge = 1; 2659 attr.attr.max_wr = 1; 2660 attr.srq_type = IB_SRQT_BASIC; 2661 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 2662 if (IS_ERR(devr->s1)) { 2663 ret = PTR_ERR(devr->s1); 2664 goto error5; 2665 } 2666 devr->s1->device = &dev->ib_dev; 2667 devr->s1->pd = devr->p0; 2668 devr->s1->uobject = NULL; 2669 devr->s1->event_handler = NULL; 2670 devr->s1->srq_context = NULL; 2671 devr->s1->srq_type = IB_SRQT_BASIC; 2672 devr->s1->ext.xrc.cq = devr->c0; 2673 atomic_inc(&devr->p0->usecnt); 2674 atomic_set(&devr->s0->usecnt, 0); 2675 2676 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { 2677 INIT_WORK(&devr->ports[port].pkey_change_work, 2678 pkey_change_handler); 2679 devr->ports[port].devr = devr; 2680 } 2681 2682 return 0; 2683 2684error5: 2685 mlx5_ib_destroy_srq(devr->s0); 2686error4: 2687 mlx5_ib_dealloc_xrcd(devr->x1); 2688error3: 2689 mlx5_ib_dealloc_xrcd(devr->x0); 2690error2: 2691 mlx5_ib_destroy_cq(devr->c0); 2692error1: 2693 mlx5_ib_dealloc_pd(devr->p0); 2694error0: 2695 return ret; 2696} 2697 2698static void destroy_dev_resources(struct mlx5_ib_resources *devr) 2699{ 2700 struct mlx5_ib_dev *dev = 2701 container_of(devr, struct mlx5_ib_dev, devr); 2702 int port; 2703 2704 mlx5_ib_destroy_srq(devr->s1); 2705 mlx5_ib_destroy_srq(devr->s0); 2706 mlx5_ib_dealloc_xrcd(devr->x0); 2707 mlx5_ib_dealloc_xrcd(devr->x1); 2708 mlx5_ib_destroy_cq(devr->c0); 2709 mlx5_ib_dealloc_pd(devr->p0); 2710 2711 /* Make sure no change P_Key work items are still executing */ 2712 for (port = 0; port < dev->num_ports; ++port) 2713 cancel_work_sync(&devr->ports[port].pkey_change_work); 2714} 2715 2716static u32 get_core_cap_flags(struct ib_device *ibdev) 2717{ 2718 struct mlx5_ib_dev *dev = to_mdev(ibdev); 2719 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); 2720 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); 2721 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); 2722 u32 ret = 0; 2723 2724 if (ll == IB_LINK_LAYER_INFINIBAND) 2725 return RDMA_CORE_PORT_IBA_IB; 2726 2727 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) 2728 return 0; 2729 2730 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) 2731 return 0; 2732 2733 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) 2734 ret |= RDMA_CORE_PORT_IBA_ROCE; 2735 2736 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) 2737 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 2738 2739 return ret; 2740} 2741 2742static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 2743 struct ib_port_immutable *immutable) 2744{ 2745 struct ib_port_attr attr; 2746 int err; 2747 2748 err = mlx5_ib_query_port(ibdev, port_num, &attr); 2749 if (err) 2750 return err; 2751 2752 immutable->pkey_tbl_len = attr.pkey_tbl_len; 2753 immutable->gid_tbl_len = attr.gid_tbl_len; 2754 immutable->core_cap_flags = get_core_cap_flags(ibdev); 2755 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 2756 2757 return 0; 2758} 2759 2760static void get_dev_fw_str(struct ib_device *ibdev, char *str, 2761 size_t str_len) 2762{ 2763 struct mlx5_ib_dev *dev = 2764 container_of(ibdev, struct mlx5_ib_dev, ib_dev); 2765 snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev), 2766 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 2767} 2768 2769static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev) 2770{ 2771 return 0; 2772} 2773 2774static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev) 2775{ 2776} 2777 2778static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev) 2779{ 2780 if (dev->roce.nb.notifier_call) { 2781 unregister_netdevice_notifier(&dev->roce.nb); 2782 dev->roce.nb.notifier_call = NULL; 2783 } 2784} 2785 2786static int mlx5_enable_roce(struct mlx5_ib_dev *dev) 2787{ 2788 VNET_ITERATOR_DECL(vnet_iter); 2789 struct net_device *idev; 2790 int err; 2791 2792 /* Check if mlx5en net device already exists */ 2793 VNET_LIST_RLOCK(); 2794 VNET_FOREACH(vnet_iter) { 2795 IFNET_RLOCK(); 2796 CURVNET_SET_QUIET(vnet_iter); 2797 TAILQ_FOREACH(idev, &V_ifnet, if_link) { 2798 /* check if network interface belongs to mlx5en */ 2799 if (!mlx5_netdev_match(idev, dev->mdev, "mce")) 2800 continue; 2801 write_lock(&dev->roce.netdev_lock); 2802 dev->roce.netdev = idev; 2803 write_unlock(&dev->roce.netdev_lock); 2804 } 2805 CURVNET_RESTORE(); 2806 IFNET_RUNLOCK(); 2807 } 2808 VNET_LIST_RUNLOCK(); 2809 2810 dev->roce.nb.notifier_call = mlx5_netdev_event; 2811 err = register_netdevice_notifier(&dev->roce.nb); 2812 if (err) { 2813 dev->roce.nb.notifier_call = NULL; 2814 return err; 2815 } 2816 2817 err = mlx5_nic_vport_enable_roce(dev->mdev); 2818 if (err) 2819 goto err_unregister_netdevice_notifier; 2820 2821 err = mlx5_roce_lag_init(dev); 2822 if (err) 2823 goto err_disable_roce; 2824 2825 return 0; 2826 2827err_disable_roce: 2828 mlx5_nic_vport_disable_roce(dev->mdev); 2829 2830err_unregister_netdevice_notifier: 2831 mlx5_remove_roce_notifier(dev); 2832 return err; 2833} 2834 2835static void mlx5_disable_roce(struct mlx5_ib_dev *dev) 2836{ 2837 mlx5_roce_lag_cleanup(dev); 2838 mlx5_nic_vport_disable_roce(dev->mdev); 2839} 2840 2841static void mlx5_ib_dealloc_q_port_counter(struct mlx5_ib_dev *dev, u8 port_num) 2842{ 2843 mlx5_vport_dealloc_q_counter(dev->mdev, 2844 MLX5_INTERFACE_PROTOCOL_IB, 2845 dev->port[port_num].q_cnt_id); 2846 dev->port[port_num].q_cnt_id = 0; 2847} 2848 2849static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) 2850{ 2851 unsigned int i; 2852 2853 for (i = 0; i < dev->num_ports; i++) 2854 mlx5_ib_dealloc_q_port_counter(dev, i); 2855} 2856 2857static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev) 2858{ 2859 int i; 2860 int ret; 2861 2862 for (i = 0; i < dev->num_ports; i++) { 2863 ret = mlx5_vport_alloc_q_counter(dev->mdev, 2864 MLX5_INTERFACE_PROTOCOL_IB, 2865 &dev->port[i].q_cnt_id); 2866 if (ret) { 2867 mlx5_ib_warn(dev, 2868 "couldn't allocate queue counter for port %d, err %d\n", 2869 i + 1, ret); 2870 goto dealloc_counters; 2871 } 2872 } 2873 2874 return 0; 2875 2876dealloc_counters: 2877 while (--i >= 0) 2878 mlx5_ib_dealloc_q_port_counter(dev, i); 2879 2880 return ret; 2881} 2882 2883static const char * const names[] = { 2884 "rx_write_requests", 2885 "rx_read_requests", 2886 "rx_atomic_requests", 2887 "out_of_buffer", 2888 "out_of_sequence", 2889 "duplicate_request", 2890 "rnr_nak_retry_err", 2891 "packet_seq_err", 2892 "implied_nak_seq_err", 2893 "local_ack_timeout_err", 2894}; 2895 2896static const size_t stats_offsets[] = { 2897 MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests), 2898 MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests), 2899 MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests), 2900 MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer), 2901 MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence), 2902 MLX5_BYTE_OFF(query_q_counter_out, duplicate_request), 2903 MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err), 2904 MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err), 2905 MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err), 2906 MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err), 2907}; 2908 2909static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 2910 u8 port_num) 2911{ 2912 BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets)); 2913 2914 /* We support only per port stats */ 2915 if (port_num == 0) 2916 return NULL; 2917 2918 return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names), 2919 RDMA_HW_STATS_DEFAULT_LIFESPAN); 2920} 2921 2922static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 2923 struct rdma_hw_stats *stats, 2924 u8 port, int index) 2925{ 2926 struct mlx5_ib_dev *dev = to_mdev(ibdev); 2927 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); 2928 void *out; 2929 __be32 val; 2930 int ret; 2931 int i; 2932 2933 if (!port || !stats) 2934 return -ENOSYS; 2935 2936 out = mlx5_vzalloc(outlen); 2937 if (!out) 2938 return -ENOMEM; 2939 2940 ret = mlx5_vport_query_q_counter(dev->mdev, 2941 dev->port[port - 1].q_cnt_id, 0, 2942 out, outlen); 2943 if (ret) 2944 goto free; 2945 2946 for (i = 0; i < ARRAY_SIZE(names); i++) { 2947 val = *(__be32 *)(out + stats_offsets[i]); 2948 stats->value[i] = (u64)be32_to_cpu(val); 2949 } 2950free: 2951 kvfree(out); 2952 return ARRAY_SIZE(names); 2953} 2954 2955static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 2956{ 2957 struct mlx5_ib_dev *dev; 2958 enum rdma_link_layer ll; 2959 int port_type_cap; 2960 int err; 2961 int i; 2962 2963 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 2964 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 2965 2966 if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce)) 2967 return NULL; 2968 2969 printk_once(KERN_INFO "%s", mlx5_version); 2970 2971 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); 2972 if (!dev) 2973 return NULL; 2974 2975 dev->mdev = mdev; 2976 2977 dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port), 2978 GFP_KERNEL); 2979 if (!dev->port) 2980 goto err_dealloc; 2981 2982 rwlock_init(&dev->roce.netdev_lock); 2983 err = get_port_caps(dev); 2984 if (err) 2985 goto err_free_port; 2986 2987 if (mlx5_use_mad_ifc(dev)) 2988 get_ext_port_caps(dev); 2989 2990 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); 2991 2992 snprintf(dev->ib_dev.name, IB_DEVICE_NAME_MAX, "mlx5_%d", device_get_unit(mdev->pdev->dev.bsddev)); 2993 dev->ib_dev.owner = THIS_MODULE; 2994 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 2995 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; 2996 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); 2997 dev->ib_dev.phys_port_cnt = dev->num_ports; 2998 dev->ib_dev.num_comp_vectors = 2999 dev->mdev->priv.eq_table.num_comp_vectors; 3000 dev->ib_dev.dma_device = &mdev->pdev->dev; 3001 3002 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; 3003 dev->ib_dev.uverbs_cmd_mask = 3004 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 3005 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 3006 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 3007 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 3008 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 3009 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 3010 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 3011 (1ull << IB_USER_VERBS_CMD_REG_MR) | 3012 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 3013 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 3014 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 3015 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 3016 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 3017 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 3018 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 3019 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 3020 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 3021 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 3022 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 3023 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 3024 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 3025 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 3026 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 3027 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 3028 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 3029 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 3030 dev->ib_dev.uverbs_ex_cmd_mask = 3031 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 3032 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 3033 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); 3034 3035 dev->ib_dev.query_device = mlx5_ib_query_device; 3036 dev->ib_dev.query_port = mlx5_ib_query_port; 3037 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer; 3038 if (ll == IB_LINK_LAYER_ETHERNET) 3039 dev->ib_dev.get_netdev = mlx5_ib_get_netdev; 3040 dev->ib_dev.query_gid = mlx5_ib_query_gid; 3041 dev->ib_dev.add_gid = mlx5_ib_add_gid; 3042 dev->ib_dev.del_gid = mlx5_ib_del_gid; 3043 dev->ib_dev.query_pkey = mlx5_ib_query_pkey; 3044 dev->ib_dev.modify_device = mlx5_ib_modify_device; 3045 dev->ib_dev.modify_port = mlx5_ib_modify_port; 3046 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; 3047 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; 3048 dev->ib_dev.mmap = mlx5_ib_mmap; 3049 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; 3050 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; 3051 dev->ib_dev.create_ah = mlx5_ib_create_ah; 3052 dev->ib_dev.query_ah = mlx5_ib_query_ah; 3053 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; 3054 dev->ib_dev.create_srq = mlx5_ib_create_srq; 3055 dev->ib_dev.modify_srq = mlx5_ib_modify_srq; 3056 dev->ib_dev.query_srq = mlx5_ib_query_srq; 3057 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; 3058 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; 3059 dev->ib_dev.create_qp = mlx5_ib_create_qp; 3060 dev->ib_dev.modify_qp = mlx5_ib_modify_qp; 3061 dev->ib_dev.query_qp = mlx5_ib_query_qp; 3062 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; 3063 dev->ib_dev.post_send = mlx5_ib_post_send; 3064 dev->ib_dev.post_recv = mlx5_ib_post_recv; 3065 dev->ib_dev.create_cq = mlx5_ib_create_cq; 3066 dev->ib_dev.modify_cq = mlx5_ib_modify_cq; 3067 dev->ib_dev.resize_cq = mlx5_ib_resize_cq; 3068 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; 3069 dev->ib_dev.poll_cq = mlx5_ib_poll_cq; 3070 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; 3071 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; 3072 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; 3073 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr; 3074 dev->ib_dev.reg_phys_mr = mlx5_ib_reg_phys_mr; 3075 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; 3076 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; 3077 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; 3078 dev->ib_dev.process_mad = mlx5_ib_process_mad; 3079 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; 3080 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; 3081 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 3082 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 3083 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 3084 if (mlx5_core_is_pf(mdev)) { 3085 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 3086 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 3087 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; 3088 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid; 3089 } 3090 3091 mlx5_ib_internal_fill_odp_caps(dev); 3092 3093 if (MLX5_CAP_GEN(mdev, imaicl)) { 3094 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; 3095 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; 3096 dev->ib_dev.uverbs_cmd_mask |= 3097 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 3098 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 3099 } 3100 3101 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) && 3102 MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { 3103 dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; 3104 dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats; 3105 } 3106 3107 if (MLX5_CAP_GEN(mdev, xrc)) { 3108 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; 3109 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; 3110 dev->ib_dev.uverbs_cmd_mask |= 3111 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 3112 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 3113 } 3114 3115 if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) == 3116 IB_LINK_LAYER_ETHERNET) { 3117 dev->ib_dev.create_flow = mlx5_ib_create_flow; 3118 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; 3119 dev->ib_dev.create_wq = mlx5_ib_create_wq; 3120 dev->ib_dev.modify_wq = mlx5_ib_modify_wq; 3121 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; 3122 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; 3123 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; 3124 dev->ib_dev.uverbs_ex_cmd_mask |= 3125 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 3126 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) | 3127 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 3128 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 3129 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 3130 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 3131 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 3132 } 3133 err = init_node_data(dev); 3134 if (err) 3135 goto err_free_port; 3136 3137 mutex_init(&dev->flow_db.lock); 3138 mutex_init(&dev->cap_mask_mutex); 3139 INIT_LIST_HEAD(&dev->qp_list); 3140 spin_lock_init(&dev->reset_flow_resource_lock); 3141 3142 if (ll == IB_LINK_LAYER_ETHERNET) { 3143 err = mlx5_enable_roce(dev); 3144 if (err) 3145 goto err_free_port; 3146 } 3147 3148 err = create_dev_resources(&dev->devr); 3149 if (err) 3150 goto err_disable_roce; 3151 3152 err = mlx5_ib_odp_init_one(dev); 3153 if (err) 3154 goto err_rsrc; 3155 3156 err = mlx5_ib_alloc_q_counters(dev); 3157 if (err) 3158 goto err_odp; 3159 3160 err = ib_register_device(&dev->ib_dev, NULL); 3161 if (err) 3162 goto err_q_cnt; 3163 3164 err = create_umr_res(dev); 3165 if (err) 3166 goto err_dev; 3167 3168 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { 3169 err = device_create_file(&dev->ib_dev.dev, 3170 mlx5_class_attributes[i]); 3171 if (err) 3172 goto err_umrc; 3173 } 3174 3175 err = mlx5_ib_init_congestion(dev); 3176 if (err) 3177 goto err_umrc; 3178 3179 dev->ib_active = true; 3180 3181 return dev; 3182 3183err_umrc: 3184 destroy_umrc_res(dev); 3185 3186err_dev: 3187 ib_unregister_device(&dev->ib_dev); 3188 3189err_q_cnt: 3190 mlx5_ib_dealloc_q_counters(dev); 3191 3192err_odp: 3193 mlx5_ib_odp_remove_one(dev); 3194 3195err_rsrc: 3196 destroy_dev_resources(&dev->devr); 3197 3198err_disable_roce: 3199 if (ll == IB_LINK_LAYER_ETHERNET) { 3200 mlx5_disable_roce(dev); 3201 mlx5_remove_roce_notifier(dev); 3202 } 3203 3204err_free_port: 3205 kfree(dev->port); 3206 3207err_dealloc: 3208 ib_dealloc_device((struct ib_device *)dev); 3209 3210 return NULL; 3211} 3212 3213static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 3214{ 3215 struct mlx5_ib_dev *dev = context; 3216 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1); 3217 3218 mlx5_ib_cleanup_congestion(dev); 3219 mlx5_remove_roce_notifier(dev); 3220 ib_unregister_device(&dev->ib_dev); 3221 mlx5_ib_dealloc_q_counters(dev); 3222 destroy_umrc_res(dev); 3223 mlx5_ib_odp_remove_one(dev); 3224 destroy_dev_resources(&dev->devr); 3225 if (ll == IB_LINK_LAYER_ETHERNET) 3226 mlx5_disable_roce(dev); 3227 kfree(dev->port); 3228 ib_dealloc_device(&dev->ib_dev); 3229} 3230 3231static struct mlx5_interface mlx5_ib_interface = { 3232 .add = mlx5_ib_add, 3233 .remove = mlx5_ib_remove, 3234 .event = mlx5_ib_event, 3235 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 3236}; 3237 3238static int __init mlx5_ib_init(void) 3239{ 3240 int err; 3241 3242 if (deprecated_prof_sel != 2) 3243 pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n"); 3244 3245 err = mlx5_ib_odp_init(); 3246 if (err) 3247 return err; 3248 3249 err = mlx5_register_interface(&mlx5_ib_interface); 3250 if (err) 3251 goto clean_odp; 3252 3253 return err; 3254 3255clean_odp: 3256 mlx5_ib_odp_cleanup(); 3257 return err; 3258} 3259 3260static void __exit mlx5_ib_cleanup(void) 3261{ 3262 mlx5_unregister_interface(&mlx5_ib_interface); 3263 mlx5_ib_odp_cleanup(); 3264} 3265 3266module_init_order(mlx5_ib_init, SI_ORDER_THIRD); 3267module_exit_order(mlx5_ib_cleanup, SI_ORDER_THIRD); 3268