mlx5_ib_main.c revision 331784
1/*- 2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 331784 2018-03-30 18:36:44Z hselasky $ 26 */ 27 28#include <linux/module.h> 29#include <linux/errno.h> 30#include <linux/pci.h> 31#include <linux/dma-mapping.h> 32#include <linux/slab.h> 33#if defined(CONFIG_X86) 34#include <asm/pat.h> 35#endif 36#include <linux/sched.h> 37#include <linux/delay.h> 38#include <linux/fs.h> 39#undef inode 40#include <rdma/ib_user_verbs.h> 41#include <rdma/ib_addr.h> 42#include <rdma/ib_cache.h> 43#include <dev/mlx5/port.h> 44#include <dev/mlx5/vport.h> 45#include <linux/list.h> 46#include <rdma/ib_smi.h> 47#include <rdma/ib_umem.h> 48#include <linux/in.h> 49#include <linux/etherdevice.h> 50#include <dev/mlx5/fs.h> 51#include "mlx5_ib.h" 52 53#define DRIVER_NAME "mlx5_ib" 54#define DRIVER_VERSION "3.4.1-BETA" 55#define DRIVER_RELDATE "October 2017" 56 57MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 58MODULE_LICENSE("Dual BSD/GPL"); 59MODULE_DEPEND(mlx5ib, linuxkpi, 1, 1, 1); 60MODULE_DEPEND(mlx5ib, mlx5, 1, 1, 1); 61MODULE_DEPEND(mlx5ib, ibcore, 1, 1, 1); 62MODULE_VERSION(mlx5ib, 1); 63 64static int deprecated_prof_sel = 2; 65module_param_named(prof_sel, deprecated_prof_sel, int, 0444); 66MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core"); 67 68static char mlx5_version[] = 69 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 70 DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 71 72enum { 73 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 74}; 75 76static enum rdma_link_layer 77mlx5_port_type_cap_to_rdma_ll(int port_type_cap) 78{ 79 switch (port_type_cap) { 80 case MLX5_CAP_PORT_TYPE_IB: 81 return IB_LINK_LAYER_INFINIBAND; 82 case MLX5_CAP_PORT_TYPE_ETH: 83 return IB_LINK_LAYER_ETHERNET; 84 default: 85 return IB_LINK_LAYER_UNSPECIFIED; 86 } 87} 88 89static enum rdma_link_layer 90mlx5_ib_port_link_layer(struct ib_device *device, u8 port_num) 91{ 92 struct mlx5_ib_dev *dev = to_mdev(device); 93 int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 94 95 return mlx5_port_type_cap_to_rdma_ll(port_type_cap); 96} 97 98static bool mlx5_netdev_match(struct net_device *ndev, 99 struct mlx5_core_dev *mdev, 100 const char *dname) 101{ 102 return ndev->if_type == IFT_ETHER && 103 ndev->if_dname != NULL && 104 strcmp(ndev->if_dname, dname) == 0 && 105 ndev->if_softc != NULL && 106 *(struct mlx5_core_dev **)ndev->if_softc == mdev; 107} 108 109static int mlx5_netdev_event(struct notifier_block *this, 110 unsigned long event, void *ptr) 111{ 112 struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 113 struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev, 114 roce.nb); 115 116 switch (event) { 117 case NETDEV_REGISTER: 118 case NETDEV_UNREGISTER: 119 write_lock(&ibdev->roce.netdev_lock); 120 /* check if network interface belongs to mlx5en */ 121 if (mlx5_netdev_match(ndev, ibdev->mdev, "mce")) 122 ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? 123 NULL : ndev; 124 write_unlock(&ibdev->roce.netdev_lock); 125 break; 126 127 case NETDEV_UP: 128 case NETDEV_DOWN: { 129 struct net_device *upper = NULL; 130 131 if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev)) 132 && ibdev->ib_active) { 133 struct ib_event ibev = {0}; 134 135 ibev.device = &ibdev->ib_dev; 136 ibev.event = (event == NETDEV_UP) ? 137 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 138 ibev.element.port_num = 1; 139 ib_dispatch_event(&ibev); 140 } 141 break; 142 } 143 144 default: 145 break; 146 } 147 148 return NOTIFY_DONE; 149} 150 151static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, 152 u8 port_num) 153{ 154 struct mlx5_ib_dev *ibdev = to_mdev(device); 155 struct net_device *ndev; 156 157 /* Ensure ndev does not disappear before we invoke dev_hold() 158 */ 159 read_lock(&ibdev->roce.netdev_lock); 160 ndev = ibdev->roce.netdev; 161 if (ndev) 162 dev_hold(ndev); 163 read_unlock(&ibdev->roce.netdev_lock); 164 165 return ndev; 166} 167 168static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, 169 struct ib_port_attr *props) 170{ 171 struct mlx5_ib_dev *dev = to_mdev(device); 172 struct net_device *ndev; 173 enum ib_mtu ndev_ib_mtu; 174 u16 qkey_viol_cntr; 175 176 memset(props, 0, sizeof(*props)); 177 178 props->port_cap_flags |= IB_PORT_CM_SUP; 179 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS; 180 181 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, 182 roce_address_table_size); 183 props->max_mtu = IB_MTU_4096; 184 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); 185 props->pkey_tbl_len = 1; 186 props->state = IB_PORT_DOWN; 187 props->phys_state = 3; 188 189 mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr); 190 props->qkey_viol_cntr = qkey_viol_cntr; 191 192 ndev = mlx5_ib_get_netdev(device, port_num); 193 if (!ndev) 194 return 0; 195 196 if (netif_running(ndev) && netif_carrier_ok(ndev)) { 197 props->state = IB_PORT_ACTIVE; 198 props->phys_state = 5; 199 } 200 201 ndev_ib_mtu = iboe_get_mtu(ndev->if_mtu); 202 203 dev_put(ndev); 204 205 props->active_mtu = min(props->max_mtu, ndev_ib_mtu); 206 207 props->active_width = IB_WIDTH_4X; /* TODO */ 208 props->active_speed = IB_SPEED_QDR; /* TODO */ 209 210 return 0; 211} 212 213static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, 214 const struct ib_gid_attr *attr, 215 void *mlx5_addr) 216{ 217#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) 218 char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 219 source_l3_address); 220 void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 221 source_mac_47_32); 222 223 if (!gid) 224 return; 225 ether_addr_copy(mlx5_addr_mac, IF_LLADDR(attr->ndev)); 226 227 if (is_vlan_dev(attr->ndev)) { 228 MLX5_SET_RA(mlx5_addr, vlan_valid, 1); 229 MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev)); 230 } 231 232 switch (attr->gid_type) { 233 case IB_GID_TYPE_IB: 234 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); 235 break; 236 case IB_GID_TYPE_ROCE_UDP_ENCAP: 237 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); 238 break; 239 240 default: 241 WARN_ON(true); 242 } 243 244 if (attr->gid_type != IB_GID_TYPE_IB) { 245 if (ipv6_addr_v4mapped((void *)gid)) 246 MLX5_SET_RA(mlx5_addr, roce_l3_type, 247 MLX5_ROCE_L3_TYPE_IPV4); 248 else 249 MLX5_SET_RA(mlx5_addr, roce_l3_type, 250 MLX5_ROCE_L3_TYPE_IPV6); 251 } 252 253 if ((attr->gid_type == IB_GID_TYPE_IB) || 254 !ipv6_addr_v4mapped((void *)gid)) 255 memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid)); 256 else 257 memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4); 258} 259 260static int set_roce_addr(struct ib_device *device, u8 port_num, 261 unsigned int index, 262 const union ib_gid *gid, 263 const struct ib_gid_attr *attr) 264{ 265 struct mlx5_ib_dev *dev = to_mdev(device); 266 u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; 267 u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; 268 void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); 269 enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num); 270 271 if (ll != IB_LINK_LAYER_ETHERNET) 272 return -EINVAL; 273 274 ib_gid_to_mlx5_roce_addr(gid, attr, in_addr); 275 276 MLX5_SET(set_roce_address_in, in, roce_address_index, index); 277 MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); 278 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); 279} 280 281static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, 282 unsigned int index, const union ib_gid *gid, 283 const struct ib_gid_attr *attr, 284 __always_unused void **context) 285{ 286 return set_roce_addr(device, port_num, index, gid, attr); 287} 288 289static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, 290 unsigned int index, __always_unused void **context) 291{ 292 return set_roce_addr(device, port_num, index, NULL, NULL); 293} 294 295__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, 296 int index) 297{ 298 struct ib_gid_attr attr; 299 union ib_gid gid; 300 301 if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr)) 302 return 0; 303 304 if (!attr.ndev) 305 return 0; 306 307 dev_put(attr.ndev); 308 309 if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 310 return 0; 311 312 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); 313} 314 315static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 316{ 317 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) 318 return !MLX5_CAP_GEN(dev->mdev, ib_virt); 319 return 0; 320} 321 322enum { 323 MLX5_VPORT_ACCESS_METHOD_MAD, 324 MLX5_VPORT_ACCESS_METHOD_HCA, 325 MLX5_VPORT_ACCESS_METHOD_NIC, 326}; 327 328static int mlx5_get_vport_access_method(struct ib_device *ibdev) 329{ 330 if (mlx5_use_mad_ifc(to_mdev(ibdev))) 331 return MLX5_VPORT_ACCESS_METHOD_MAD; 332 333 if (mlx5_ib_port_link_layer(ibdev, 1) == 334 IB_LINK_LAYER_ETHERNET) 335 return MLX5_VPORT_ACCESS_METHOD_NIC; 336 337 return MLX5_VPORT_ACCESS_METHOD_HCA; 338} 339 340static void get_atomic_caps(struct mlx5_ib_dev *dev, 341 struct ib_device_attr *props) 342{ 343 u8 tmp; 344 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 345 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 346 u8 atomic_req_8B_endianness_mode = 347 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode); 348 349 /* Check if HW supports 8 bytes standard atomic operations and capable 350 * of host endianness respond 351 */ 352 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; 353 if (((atomic_operations & tmp) == tmp) && 354 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && 355 (atomic_req_8B_endianness_mode)) { 356 props->atomic_cap = IB_ATOMIC_HCA; 357 } else { 358 props->atomic_cap = IB_ATOMIC_NONE; 359 } 360} 361 362static int mlx5_query_system_image_guid(struct ib_device *ibdev, 363 __be64 *sys_image_guid) 364{ 365 struct mlx5_ib_dev *dev = to_mdev(ibdev); 366 struct mlx5_core_dev *mdev = dev->mdev; 367 u64 tmp; 368 int err; 369 370 switch (mlx5_get_vport_access_method(ibdev)) { 371 case MLX5_VPORT_ACCESS_METHOD_MAD: 372 return mlx5_query_mad_ifc_system_image_guid(ibdev, 373 sys_image_guid); 374 375 case MLX5_VPORT_ACCESS_METHOD_HCA: 376 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 377 break; 378 379 case MLX5_VPORT_ACCESS_METHOD_NIC: 380 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); 381 break; 382 383 default: 384 return -EINVAL; 385 } 386 387 if (!err) 388 *sys_image_guid = cpu_to_be64(tmp); 389 390 return err; 391 392} 393 394static int mlx5_query_max_pkeys(struct ib_device *ibdev, 395 u16 *max_pkeys) 396{ 397 struct mlx5_ib_dev *dev = to_mdev(ibdev); 398 struct mlx5_core_dev *mdev = dev->mdev; 399 400 switch (mlx5_get_vport_access_method(ibdev)) { 401 case MLX5_VPORT_ACCESS_METHOD_MAD: 402 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); 403 404 case MLX5_VPORT_ACCESS_METHOD_HCA: 405 case MLX5_VPORT_ACCESS_METHOD_NIC: 406 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, 407 pkey_table_size)); 408 return 0; 409 410 default: 411 return -EINVAL; 412 } 413} 414 415static int mlx5_query_vendor_id(struct ib_device *ibdev, 416 u32 *vendor_id) 417{ 418 struct mlx5_ib_dev *dev = to_mdev(ibdev); 419 420 switch (mlx5_get_vport_access_method(ibdev)) { 421 case MLX5_VPORT_ACCESS_METHOD_MAD: 422 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); 423 424 case MLX5_VPORT_ACCESS_METHOD_HCA: 425 case MLX5_VPORT_ACCESS_METHOD_NIC: 426 return mlx5_core_query_vendor_id(dev->mdev, vendor_id); 427 428 default: 429 return -EINVAL; 430 } 431} 432 433static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, 434 __be64 *node_guid) 435{ 436 u64 tmp; 437 int err; 438 439 switch (mlx5_get_vport_access_method(&dev->ib_dev)) { 440 case MLX5_VPORT_ACCESS_METHOD_MAD: 441 return mlx5_query_mad_ifc_node_guid(dev, node_guid); 442 443 case MLX5_VPORT_ACCESS_METHOD_HCA: 444 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); 445 break; 446 447 case MLX5_VPORT_ACCESS_METHOD_NIC: 448 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); 449 break; 450 451 default: 452 return -EINVAL; 453 } 454 455 if (!err) 456 *node_guid = cpu_to_be64(tmp); 457 458 return err; 459} 460 461struct mlx5_reg_node_desc { 462 u8 desc[IB_DEVICE_NODE_DESC_MAX]; 463}; 464 465static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 466{ 467 struct mlx5_reg_node_desc in; 468 469 if (mlx5_use_mad_ifc(dev)) 470 return mlx5_query_mad_ifc_node_desc(dev, node_desc); 471 472 memset(&in, 0, sizeof(in)); 473 474 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, 475 sizeof(struct mlx5_reg_node_desc), 476 MLX5_REG_NODE_DESC, 0, 0); 477} 478 479static int mlx5_ib_query_device(struct ib_device *ibdev, 480 struct ib_device_attr *props, 481 struct ib_udata *uhw) 482{ 483 struct mlx5_ib_dev *dev = to_mdev(ibdev); 484 struct mlx5_core_dev *mdev = dev->mdev; 485 int err = -ENOMEM; 486 int max_rq_sg; 487 int max_sq_sg; 488 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 489 struct mlx5_ib_query_device_resp resp = {}; 490 size_t resp_len; 491 u64 max_tso; 492 493 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); 494 if (uhw->outlen && uhw->outlen < resp_len) 495 return -EINVAL; 496 else 497 resp.response_length = resp_len; 498 499 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) 500 return -EINVAL; 501 502 memset(props, 0, sizeof(*props)); 503 err = mlx5_query_system_image_guid(ibdev, 504 &props->sys_image_guid); 505 if (err) 506 return err; 507 508 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); 509 if (err) 510 return err; 511 512 err = mlx5_query_vendor_id(ibdev, &props->vendor_id); 513 if (err) 514 return err; 515 516 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 517 (fw_rev_min(dev->mdev) << 16) | 518 fw_rev_sub(dev->mdev); 519 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 520 IB_DEVICE_PORT_ACTIVE_EVENT | 521 IB_DEVICE_SYS_IMAGE_GUID | 522 IB_DEVICE_RC_RNR_NAK_GEN; 523 524 if (MLX5_CAP_GEN(mdev, pkv)) 525 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 526 if (MLX5_CAP_GEN(mdev, qkv)) 527 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 528 if (MLX5_CAP_GEN(mdev, apm)) 529 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 530 if (MLX5_CAP_GEN(mdev, xrc)) 531 props->device_cap_flags |= IB_DEVICE_XRC; 532 if (MLX5_CAP_GEN(mdev, imaicl)) { 533 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | 534 IB_DEVICE_MEM_WINDOW_TYPE_2B; 535 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 536 /* We support 'Gappy' memory registration too */ 537 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; 538 } 539 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 540 if (MLX5_CAP_GEN(mdev, sho)) { 541 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; 542 /* At this stage no support for signature handover */ 543 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 544 IB_PROT_T10DIF_TYPE_2 | 545 IB_PROT_T10DIF_TYPE_3; 546 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 547 IB_GUARD_T10DIF_CSUM; 548 } 549 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 550 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 551 552 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) { 553 if (MLX5_CAP_ETH(mdev, csum_cap)) 554 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 555 556 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { 557 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); 558 if (max_tso) { 559 resp.tso_caps.max_tso = 1 << max_tso; 560 resp.tso_caps.supported_qpts |= 561 1 << IB_QPT_RAW_PACKET; 562 resp.response_length += sizeof(resp.tso_caps); 563 } 564 } 565 566 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { 567 resp.rss_caps.rx_hash_function = 568 MLX5_RX_HASH_FUNC_TOEPLITZ; 569 resp.rss_caps.rx_hash_fields_mask = 570 MLX5_RX_HASH_SRC_IPV4 | 571 MLX5_RX_HASH_DST_IPV4 | 572 MLX5_RX_HASH_SRC_IPV6 | 573 MLX5_RX_HASH_DST_IPV6 | 574 MLX5_RX_HASH_SRC_PORT_TCP | 575 MLX5_RX_HASH_DST_PORT_TCP | 576 MLX5_RX_HASH_SRC_PORT_UDP | 577 MLX5_RX_HASH_DST_PORT_UDP; 578 resp.response_length += sizeof(resp.rss_caps); 579 } 580 } else { 581 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) 582 resp.response_length += sizeof(resp.tso_caps); 583 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) 584 resp.response_length += sizeof(resp.rss_caps); 585 } 586 587 if (MLX5_CAP_GEN(mdev, ipoib_ipoib_offloads)) { 588 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 589 props->device_cap_flags |= IB_DEVICE_UD_TSO; 590 } 591 592 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 593 MLX5_CAP_ETH(dev->mdev, scatter_fcs)) 594 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; 595 596 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) 597 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 598 599 props->vendor_part_id = mdev->pdev->device; 600 props->hw_ver = mdev->pdev->revision; 601 602 props->max_mr_size = ~0ull; 603 props->page_size_cap = ~(min_page_size - 1); 604 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); 605 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 606 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / 607 sizeof(struct mlx5_wqe_data_seg); 608 max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) - 609 sizeof(struct mlx5_wqe_ctrl_seg)) / 610 sizeof(struct mlx5_wqe_data_seg); 611 props->max_sge = min(max_rq_sg, max_sq_sg); 612 props->max_sge_rd = MLX5_MAX_SGE_RD; 613 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 614 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 615 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 616 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); 617 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); 618 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); 619 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); 620 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; 621 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); 622 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 623 props->max_srq_sge = max_rq_sg - 1; 624 props->max_fast_reg_page_list_len = 625 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); 626 get_atomic_caps(dev, props); 627 props->masked_atomic_cap = IB_ATOMIC_NONE; 628 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); 629 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); 630 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 631 props->max_mcast_grp; 632 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 633 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); 634 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 635 636#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 637 if (MLX5_CAP_GEN(mdev, pg)) 638 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 639 props->odp_caps = dev->odp_caps; 640#endif 641 642 if (MLX5_CAP_GEN(mdev, cd)) 643 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 644 645 if (!mlx5_core_is_pf(mdev)) 646 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 647 648 if (mlx5_ib_port_link_layer(ibdev, 1) == 649 IB_LINK_LAYER_ETHERNET) { 650 props->rss_caps.max_rwq_indirection_tables = 651 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); 652 props->rss_caps.max_rwq_indirection_table_size = 653 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); 654 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; 655 props->max_wq_type_rq = 656 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); 657 } 658 659 if (uhw->outlen) { 660 err = ib_copy_to_udata(uhw, &resp, resp.response_length); 661 662 if (err) 663 return err; 664 } 665 666 return 0; 667} 668 669enum mlx5_ib_width { 670 MLX5_IB_WIDTH_1X = 1 << 0, 671 MLX5_IB_WIDTH_2X = 1 << 1, 672 MLX5_IB_WIDTH_4X = 1 << 2, 673 MLX5_IB_WIDTH_8X = 1 << 3, 674 MLX5_IB_WIDTH_12X = 1 << 4 675}; 676 677static int translate_active_width(struct ib_device *ibdev, u8 active_width, 678 u8 *ib_width) 679{ 680 struct mlx5_ib_dev *dev = to_mdev(ibdev); 681 int err = 0; 682 683 if (active_width & MLX5_IB_WIDTH_1X) { 684 *ib_width = IB_WIDTH_1X; 685 } else if (active_width & MLX5_IB_WIDTH_2X) { 686 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", 687 (int)active_width); 688 err = -EINVAL; 689 } else if (active_width & MLX5_IB_WIDTH_4X) { 690 *ib_width = IB_WIDTH_4X; 691 } else if (active_width & MLX5_IB_WIDTH_8X) { 692 *ib_width = IB_WIDTH_8X; 693 } else if (active_width & MLX5_IB_WIDTH_12X) { 694 *ib_width = IB_WIDTH_12X; 695 } else { 696 mlx5_ib_dbg(dev, "Invalid active_width %d\n", 697 (int)active_width); 698 err = -EINVAL; 699 } 700 701 return err; 702} 703 704enum ib_max_vl_num { 705 __IB_MAX_VL_0 = 1, 706 __IB_MAX_VL_0_1 = 2, 707 __IB_MAX_VL_0_3 = 3, 708 __IB_MAX_VL_0_7 = 4, 709 __IB_MAX_VL_0_14 = 5, 710}; 711 712enum mlx5_vl_hw_cap { 713 MLX5_VL_HW_0 = 1, 714 MLX5_VL_HW_0_1 = 2, 715 MLX5_VL_HW_0_2 = 3, 716 MLX5_VL_HW_0_3 = 4, 717 MLX5_VL_HW_0_4 = 5, 718 MLX5_VL_HW_0_5 = 6, 719 MLX5_VL_HW_0_6 = 7, 720 MLX5_VL_HW_0_7 = 8, 721 MLX5_VL_HW_0_14 = 15 722}; 723 724static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, 725 u8 *max_vl_num) 726{ 727 switch (vl_hw_cap) { 728 case MLX5_VL_HW_0: 729 *max_vl_num = __IB_MAX_VL_0; 730 break; 731 case MLX5_VL_HW_0_1: 732 *max_vl_num = __IB_MAX_VL_0_1; 733 break; 734 case MLX5_VL_HW_0_3: 735 *max_vl_num = __IB_MAX_VL_0_3; 736 break; 737 case MLX5_VL_HW_0_7: 738 *max_vl_num = __IB_MAX_VL_0_7; 739 break; 740 case MLX5_VL_HW_0_14: 741 *max_vl_num = __IB_MAX_VL_0_14; 742 break; 743 744 default: 745 return -EINVAL; 746 } 747 748 return 0; 749} 750 751static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 752 struct ib_port_attr *props) 753{ 754 struct mlx5_ib_dev *dev = to_mdev(ibdev); 755 struct mlx5_core_dev *mdev = dev->mdev; 756 u32 *rep; 757 int replen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); 758 struct mlx5_ptys_reg *ptys; 759 struct mlx5_pmtu_reg *pmtu; 760 struct mlx5_pvlc_reg pvlc; 761 void *ctx; 762 int err; 763 764 rep = mlx5_vzalloc(replen); 765 ptys = kzalloc(sizeof(*ptys), GFP_KERNEL); 766 pmtu = kzalloc(sizeof(*pmtu), GFP_KERNEL); 767 if (!rep || !ptys || !pmtu) { 768 err = -ENOMEM; 769 goto out; 770 } 771 772 memset(props, 0, sizeof(*props)); 773 774 err = mlx5_query_hca_vport_context(mdev, port, 0, rep, replen); 775 if (err) 776 goto out; 777 778 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, rep, hca_vport_context); 779 780 props->lid = MLX5_GET(hca_vport_context, ctx, lid); 781 props->lmc = MLX5_GET(hca_vport_context, ctx, lmc); 782 props->sm_lid = MLX5_GET(hca_vport_context, ctx, sm_lid); 783 props->sm_sl = MLX5_GET(hca_vport_context, ctx, sm_sl); 784 props->state = MLX5_GET(hca_vport_context, ctx, vport_state); 785 props->phys_state = MLX5_GET(hca_vport_context, ctx, 786 port_physical_state); 787 props->port_cap_flags = MLX5_GET(hca_vport_context, ctx, cap_mask1); 788 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); 789 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 790 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); 791 props->bad_pkey_cntr = MLX5_GET(hca_vport_context, ctx, 792 pkey_violation_counter); 793 props->qkey_viol_cntr = MLX5_GET(hca_vport_context, ctx, 794 qkey_violation_counter); 795 props->subnet_timeout = MLX5_GET(hca_vport_context, ctx, 796 subnet_timeout); 797 props->init_type_reply = MLX5_GET(hca_vport_context, ctx, 798 init_type_reply); 799 props->grh_required = MLX5_GET(hca_vport_context, ctx, grh_required); 800 801 ptys->proto_mask |= MLX5_PTYS_IB; 802 ptys->local_port = port; 803 err = mlx5_core_access_ptys(mdev, ptys, 0); 804 if (err) 805 goto out; 806 807 err = translate_active_width(ibdev, ptys->ib_link_width_oper, 808 &props->active_width); 809 if (err) 810 goto out; 811 812 props->active_speed = (u8)ptys->ib_proto_oper; 813 814 pmtu->local_port = port; 815 err = mlx5_core_access_pmtu(mdev, pmtu, 0); 816 if (err) 817 goto out; 818 819 props->max_mtu = pmtu->max_mtu; 820 props->active_mtu = pmtu->oper_mtu; 821 822 memset(&pvlc, 0, sizeof(pvlc)); 823 pvlc.local_port = port; 824 err = mlx5_core_access_pvlc(mdev, &pvlc, 0); 825 if (err) 826 goto out; 827 828 err = translate_max_vl_num(ibdev, pvlc.vl_hw_cap, 829 &props->max_vl_num); 830out: 831 kvfree(rep); 832 kfree(ptys); 833 kfree(pmtu); 834 return err; 835} 836 837int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 838 struct ib_port_attr *props) 839{ 840 switch (mlx5_get_vport_access_method(ibdev)) { 841 case MLX5_VPORT_ACCESS_METHOD_MAD: 842 return mlx5_query_mad_ifc_port(ibdev, port, props); 843 844 case MLX5_VPORT_ACCESS_METHOD_HCA: 845 return mlx5_query_hca_port(ibdev, port, props); 846 847 case MLX5_VPORT_ACCESS_METHOD_NIC: 848 return mlx5_query_port_roce(ibdev, port, props); 849 850 default: 851 return -EINVAL; 852 } 853} 854 855static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 856 union ib_gid *gid) 857{ 858 struct mlx5_ib_dev *dev = to_mdev(ibdev); 859 struct mlx5_core_dev *mdev = dev->mdev; 860 861 switch (mlx5_get_vport_access_method(ibdev)) { 862 case MLX5_VPORT_ACCESS_METHOD_MAD: 863 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); 864 865 case MLX5_VPORT_ACCESS_METHOD_HCA: 866 return mlx5_query_hca_vport_gid(mdev, port, 0, index, gid); 867 868 default: 869 return -EINVAL; 870 } 871 872} 873 874static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 875 u16 *pkey) 876{ 877 struct mlx5_ib_dev *dev = to_mdev(ibdev); 878 struct mlx5_core_dev *mdev = dev->mdev; 879 880 switch (mlx5_get_vport_access_method(ibdev)) { 881 case MLX5_VPORT_ACCESS_METHOD_MAD: 882 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); 883 884 case MLX5_VPORT_ACCESS_METHOD_HCA: 885 case MLX5_VPORT_ACCESS_METHOD_NIC: 886 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index, 887 pkey); 888 default: 889 return -EINVAL; 890 } 891} 892 893static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, 894 struct ib_device_modify *props) 895{ 896 struct mlx5_ib_dev *dev = to_mdev(ibdev); 897 struct mlx5_reg_node_desc in; 898 struct mlx5_reg_node_desc out; 899 int err; 900 901 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 902 return -EOPNOTSUPP; 903 904 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 905 return 0; 906 907 /* 908 * If possible, pass node desc to FW, so it can generate 909 * a 144 trap. If cmd fails, just ignore. 910 */ 911 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 912 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 913 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 914 if (err) 915 return err; 916 917 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 918 919 return err; 920} 921 922static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 923 struct ib_port_modify *props) 924{ 925 struct mlx5_ib_dev *dev = to_mdev(ibdev); 926 struct ib_port_attr attr; 927 u32 tmp; 928 int err; 929 930 mutex_lock(&dev->cap_mask_mutex); 931 932 err = mlx5_ib_query_port(ibdev, port, &attr); 933 if (err) 934 goto out; 935 936 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 937 ~props->clr_port_cap_mask; 938 939 err = mlx5_set_port_caps(dev->mdev, port, tmp); 940 941out: 942 mutex_unlock(&dev->cap_mask_mutex); 943 return err; 944} 945 946static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, 947 struct ib_udata *udata) 948{ 949 struct mlx5_ib_dev *dev = to_mdev(ibdev); 950 struct mlx5_ib_alloc_ucontext_req_v2 req = {}; 951 struct mlx5_ib_alloc_ucontext_resp resp = {}; 952 struct mlx5_ib_ucontext *context; 953 struct mlx5_uuar_info *uuari; 954 struct mlx5_uar *uars; 955 int gross_uuars; 956 int num_uars; 957 int ver; 958 int uuarn; 959 int err; 960 int i; 961 size_t reqlen; 962 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, 963 max_cqe_version); 964 965 if (!dev->ib_active) 966 return ERR_PTR(-EAGAIN); 967 968 if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr)) 969 return ERR_PTR(-EINVAL); 970 971 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); 972 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 973 ver = 0; 974 else if (reqlen >= min_req_v2) 975 ver = 2; 976 else 977 return ERR_PTR(-EINVAL); 978 979 err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req))); 980 if (err) 981 return ERR_PTR(err); 982 983 if (req.flags) 984 return ERR_PTR(-EINVAL); 985 986 if (req.total_num_uuars > MLX5_MAX_UUARS) 987 return ERR_PTR(-ENOMEM); 988 989 if (req.total_num_uuars == 0) 990 return ERR_PTR(-EINVAL); 991 992 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) 993 return ERR_PTR(-EOPNOTSUPP); 994 995 if (reqlen > sizeof(req) && 996 !ib_is_udata_cleared(udata, sizeof(req), 997 reqlen - sizeof(req))) 998 return ERR_PTR(-EOPNOTSUPP); 999 1000 req.total_num_uuars = ALIGN(req.total_num_uuars, 1001 MLX5_NON_FP_BF_REGS_PER_PAGE); 1002 if (req.num_low_latency_uuars > req.total_num_uuars - 1) 1003 return ERR_PTR(-EINVAL); 1004 1005 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 1006 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 1007 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 1008 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) 1009 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 1010 resp.cache_line_size = cache_line_size(); 1011 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 1012 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 1013 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1014 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1015 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 1016 resp.cqe_version = min_t(__u8, 1017 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), 1018 req.max_cqe_version); 1019 resp.response_length = min(offsetof(typeof(resp), response_length) + 1020 sizeof(resp.response_length), udata->outlen); 1021 1022 context = kzalloc(sizeof(*context), GFP_KERNEL); 1023 if (!context) 1024 return ERR_PTR(-ENOMEM); 1025 1026 uuari = &context->uuari; 1027 mutex_init(&uuari->lock); 1028 uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); 1029 if (!uars) { 1030 err = -ENOMEM; 1031 goto out_ctx; 1032 } 1033 1034 uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars), 1035 sizeof(*uuari->bitmap), 1036 GFP_KERNEL); 1037 if (!uuari->bitmap) { 1038 err = -ENOMEM; 1039 goto out_uar_ctx; 1040 } 1041 /* 1042 * clear all fast path uuars 1043 */ 1044 for (i = 0; i < gross_uuars; i++) { 1045 uuarn = i & 3; 1046 if (uuarn == 2 || uuarn == 3) 1047 set_bit(i, uuari->bitmap); 1048 } 1049 1050 uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL); 1051 if (!uuari->count) { 1052 err = -ENOMEM; 1053 goto out_bitmap; 1054 } 1055 1056 for (i = 0; i < num_uars; i++) { 1057 err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); 1058 if (err) 1059 goto out_count; 1060 } 1061 1062#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1063 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; 1064#endif 1065 1066 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) { 1067 err = mlx5_alloc_transport_domain(dev->mdev, 1068 &context->tdn); 1069 if (err) 1070 goto out_uars; 1071 } 1072 1073 INIT_LIST_HEAD(&context->vma_private_list); 1074 INIT_LIST_HEAD(&context->db_page_list); 1075 mutex_init(&context->db_page_mutex); 1076 1077 resp.tot_uuars = req.total_num_uuars; 1078 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); 1079 1080 if (field_avail(typeof(resp), cqe_version, udata->outlen)) 1081 resp.response_length += sizeof(resp.cqe_version); 1082 1083 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { 1084 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | 1085 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; 1086 resp.response_length += sizeof(resp.cmds_supp_uhw); 1087 } 1088 1089 /* 1090 * We don't want to expose information from the PCI bar that is located 1091 * after 4096 bytes, so if the arch only supports larger pages, let's 1092 * pretend we don't support reading the HCA's core clock. This is also 1093 * forced by mmap function. 1094 */ 1095 if (PAGE_SIZE <= 4096 && 1096 field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { 1097 resp.comp_mask |= 1098 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; 1099 resp.hca_core_clock_offset = 1100 offsetof(struct mlx5_init_seg, internal_timer_h) % 1101 PAGE_SIZE; 1102 resp.response_length += sizeof(resp.hca_core_clock_offset) + 1103 sizeof(resp.reserved2); 1104 } 1105 1106 err = ib_copy_to_udata(udata, &resp, resp.response_length); 1107 if (err) 1108 goto out_td; 1109 1110 uuari->ver = ver; 1111 uuari->num_low_latency_uuars = req.num_low_latency_uuars; 1112 uuari->uars = uars; 1113 uuari->num_uars = num_uars; 1114 context->cqe_version = resp.cqe_version; 1115 1116 return &context->ibucontext; 1117 1118out_td: 1119 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1120 mlx5_dealloc_transport_domain(dev->mdev, context->tdn); 1121 1122out_uars: 1123 for (i--; i >= 0; i--) 1124 mlx5_cmd_free_uar(dev->mdev, uars[i].index); 1125out_count: 1126 kfree(uuari->count); 1127 1128out_bitmap: 1129 kfree(uuari->bitmap); 1130 1131out_uar_ctx: 1132 kfree(uars); 1133 1134out_ctx: 1135 kfree(context); 1136 return ERR_PTR(err); 1137} 1138 1139static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 1140{ 1141 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1142 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1143 struct mlx5_uuar_info *uuari = &context->uuari; 1144 int i; 1145 1146 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1147 mlx5_dealloc_transport_domain(dev->mdev, context->tdn); 1148 1149 for (i = 0; i < uuari->num_uars; i++) { 1150 if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) 1151 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); 1152 } 1153 1154 kfree(uuari->count); 1155 kfree(uuari->bitmap); 1156 kfree(uuari->uars); 1157 kfree(context); 1158 1159 return 0; 1160} 1161 1162static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) 1163{ 1164 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; 1165} 1166 1167static int get_command(unsigned long offset) 1168{ 1169 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; 1170} 1171 1172static int get_arg(unsigned long offset) 1173{ 1174 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); 1175} 1176 1177static int get_index(unsigned long offset) 1178{ 1179 return get_arg(offset); 1180} 1181 1182static void mlx5_ib_vma_open(struct vm_area_struct *area) 1183{ 1184 /* vma_open is called when a new VMA is created on top of our VMA. This 1185 * is done through either mremap flow or split_vma (usually due to 1186 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA, 1187 * as this VMA is strongly hardware related. Therefore we set the 1188 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from 1189 * calling us again and trying to do incorrect actions. We assume that 1190 * the original VMA size is exactly a single page, and therefore all 1191 * "splitting" operation will not happen to it. 1192 */ 1193 area->vm_ops = NULL; 1194} 1195 1196static void mlx5_ib_vma_close(struct vm_area_struct *area) 1197{ 1198 struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data; 1199 1200 /* It's guaranteed that all VMAs opened on a FD are closed before the 1201 * file itself is closed, therefore no sync is needed with the regular 1202 * closing flow. (e.g. mlx5 ib_dealloc_ucontext) 1203 * However need a sync with accessing the vma as part of 1204 * mlx5_ib_disassociate_ucontext. 1205 * The close operation is usually called under mm->mmap_sem except when 1206 * process is exiting. 1207 * The exiting case is handled explicitly as part of 1208 * mlx5_ib_disassociate_ucontext. 1209 */ 1210 mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data; 1211 1212 /* setting the vma context pointer to null in the mlx5_ib driver's 1213 * private data, to protect a race condition in 1214 * mlx5_ib_disassociate_ucontext(). 1215 */ 1216 mlx5_ib_vma_priv_data->vma = NULL; 1217 list_del(&mlx5_ib_vma_priv_data->list); 1218 kfree(mlx5_ib_vma_priv_data); 1219} 1220 1221static const struct vm_operations_struct mlx5_ib_vm_ops = { 1222 .open = mlx5_ib_vma_open, 1223 .close = mlx5_ib_vma_close 1224}; 1225 1226static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, 1227 struct mlx5_ib_ucontext *ctx) 1228{ 1229 struct mlx5_ib_vma_private_data *vma_prv; 1230 struct list_head *vma_head = &ctx->vma_private_list; 1231 1232 vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL); 1233 if (!vma_prv) 1234 return -ENOMEM; 1235 1236 vma_prv->vma = vma; 1237 vma->vm_private_data = vma_prv; 1238 vma->vm_ops = &mlx5_ib_vm_ops; 1239 1240 list_add(&vma_prv->list, vma_head); 1241 1242 return 0; 1243} 1244 1245static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) 1246{ 1247 int ret; 1248 struct vm_area_struct *vma; 1249 struct mlx5_ib_vma_private_data *vma_private, *n; 1250 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1251 struct task_struct *owning_process = NULL; 1252 struct mm_struct *owning_mm = NULL; 1253 1254 owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); 1255 if (!owning_process) 1256 return; 1257 1258 owning_mm = get_task_mm(owning_process); 1259 if (!owning_mm) { 1260 pr_info("no mm, disassociate ucontext is pending task termination\n"); 1261 while (1) { 1262 put_task_struct(owning_process); 1263 usleep_range(1000, 2000); 1264 owning_process = get_pid_task(ibcontext->tgid, 1265 PIDTYPE_PID); 1266 if (!owning_process /* || 1267 owning_process->state == TASK_DEAD */) { 1268 pr_info("disassociate ucontext done, task was terminated\n"); 1269 /* in case task was dead need to release the 1270 * task struct. 1271 */ 1272 if (owning_process) 1273 put_task_struct(owning_process); 1274 return; 1275 } 1276 } 1277 } 1278 1279 /* need to protect from a race on closing the vma as part of 1280 * mlx5_ib_vma_close. 1281 */ 1282 down_read(&owning_mm->mmap_sem); 1283 list_for_each_entry_safe(vma_private, n, &context->vma_private_list, 1284 list) { 1285 vma = vma_private->vma; 1286 ret = zap_vma_ptes(vma, vma->vm_start, 1287 PAGE_SIZE); 1288 WARN_ONCE(ret, "%s: zap_vma_ptes failed", __func__); 1289 /* context going to be destroyed, should 1290 * not access ops any more. 1291 */ 1292 vma->vm_ops = NULL; 1293 list_del(&vma_private->list); 1294 kfree(vma_private); 1295 } 1296 up_read(&owning_mm->mmap_sem); 1297 mmput(owning_mm); 1298 put_task_struct(owning_process); 1299} 1300 1301static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) 1302{ 1303 switch (cmd) { 1304 case MLX5_IB_MMAP_WC_PAGE: 1305 return "WC"; 1306 case MLX5_IB_MMAP_REGULAR_PAGE: 1307 return "best effort WC"; 1308 case MLX5_IB_MMAP_NC_PAGE: 1309 return "NC"; 1310 default: 1311 return NULL; 1312 } 1313} 1314 1315static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 1316 struct vm_area_struct *vma, 1317 struct mlx5_ib_ucontext *context) 1318{ 1319 struct mlx5_uuar_info *uuari = &context->uuari; 1320 int err; 1321 unsigned long idx; 1322 phys_addr_t pfn, pa; 1323 pgprot_t prot; 1324 1325 switch (cmd) { 1326 case MLX5_IB_MMAP_WC_PAGE: 1327/* Some architectures don't support WC memory */ 1328#if defined(CONFIG_X86) 1329 if (!pat_enabled()) 1330 return -EPERM; 1331#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU))) 1332 return -EPERM; 1333#endif 1334 /* fall through */ 1335 case MLX5_IB_MMAP_REGULAR_PAGE: 1336 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ 1337 prot = pgprot_writecombine(vma->vm_page_prot); 1338 break; 1339 case MLX5_IB_MMAP_NC_PAGE: 1340 prot = pgprot_noncached(vma->vm_page_prot); 1341 break; 1342 default: 1343 return -EINVAL; 1344 } 1345 1346 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1347 return -EINVAL; 1348 1349 idx = get_index(vma->vm_pgoff); 1350 if (idx >= uuari->num_uars) 1351 return -EINVAL; 1352 1353 pfn = uar_index2pfn(dev, uuari->uars[idx].index); 1354 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); 1355 1356 vma->vm_page_prot = prot; 1357 err = io_remap_pfn_range(vma, vma->vm_start, pfn, 1358 PAGE_SIZE, vma->vm_page_prot); 1359 if (err) { 1360 mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%llx, pfn=%pa, mmap_cmd=%s\n", 1361 err, (unsigned long long)vma->vm_start, &pfn, mmap_cmd2str(cmd)); 1362 return -EAGAIN; 1363 } 1364 1365 pa = pfn << PAGE_SHIFT; 1366 mlx5_ib_dbg(dev, "mapped %s at 0x%llx, PA %pa\n", mmap_cmd2str(cmd), 1367 (unsigned long long)vma->vm_start, &pa); 1368 1369 return mlx5_ib_set_vma_data(vma, context); 1370} 1371 1372static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 1373{ 1374 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1375 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1376 unsigned long command; 1377 phys_addr_t pfn; 1378 1379 command = get_command(vma->vm_pgoff); 1380 switch (command) { 1381 case MLX5_IB_MMAP_WC_PAGE: 1382 case MLX5_IB_MMAP_NC_PAGE: 1383 case MLX5_IB_MMAP_REGULAR_PAGE: 1384 return uar_mmap(dev, command, vma, context); 1385 1386 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 1387 return -ENOSYS; 1388 1389 case MLX5_IB_MMAP_CORE_CLOCK: 1390 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1391 return -EINVAL; 1392 1393 if (vma->vm_flags & VM_WRITE) 1394 return -EPERM; 1395 1396 /* Don't expose to user-space information it shouldn't have */ 1397 if (PAGE_SIZE > 4096) 1398 return -EOPNOTSUPP; 1399 1400 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1401 pfn = (dev->mdev->iseg_base + 1402 offsetof(struct mlx5_init_seg, internal_timer_h)) >> 1403 PAGE_SHIFT; 1404 if (io_remap_pfn_range(vma, vma->vm_start, pfn, 1405 PAGE_SIZE, vma->vm_page_prot)) 1406 return -EAGAIN; 1407 1408 mlx5_ib_dbg(dev, "mapped internal timer at 0x%llx, PA 0x%llx\n", 1409 (unsigned long long)vma->vm_start, 1410 (unsigned long long)pfn << PAGE_SHIFT); 1411 break; 1412 1413 default: 1414 return -EINVAL; 1415 } 1416 1417 return 0; 1418} 1419 1420static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, 1421 struct ib_ucontext *context, 1422 struct ib_udata *udata) 1423{ 1424 struct mlx5_ib_alloc_pd_resp resp; 1425 struct mlx5_ib_pd *pd; 1426 int err; 1427 1428 pd = kmalloc(sizeof(*pd), GFP_KERNEL); 1429 if (!pd) 1430 return ERR_PTR(-ENOMEM); 1431 1432 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); 1433 if (err) { 1434 kfree(pd); 1435 return ERR_PTR(err); 1436 } 1437 1438 if (context) { 1439 resp.pdn = pd->pdn; 1440 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 1441 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 1442 kfree(pd); 1443 return ERR_PTR(-EFAULT); 1444 } 1445 } 1446 1447 return &pd->ibpd; 1448} 1449 1450static int mlx5_ib_dealloc_pd(struct ib_pd *pd) 1451{ 1452 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 1453 struct mlx5_ib_pd *mpd = to_mpd(pd); 1454 1455 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 1456 kfree(mpd); 1457 1458 return 0; 1459} 1460 1461enum { 1462 MATCH_CRITERIA_ENABLE_OUTER_BIT, 1463 MATCH_CRITERIA_ENABLE_MISC_BIT, 1464 MATCH_CRITERIA_ENABLE_INNER_BIT 1465}; 1466 1467#define HEADER_IS_ZERO(match_criteria, headers) \ 1468 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 1469 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 1470 1471static u8 get_match_criteria_enable(u32 *match_criteria) 1472{ 1473 u8 match_criteria_enable; 1474 1475 match_criteria_enable = 1476 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 1477 MATCH_CRITERIA_ENABLE_OUTER_BIT; 1478 match_criteria_enable |= 1479 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 1480 MATCH_CRITERIA_ENABLE_MISC_BIT; 1481 match_criteria_enable |= 1482 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 1483 MATCH_CRITERIA_ENABLE_INNER_BIT; 1484 1485 return match_criteria_enable; 1486} 1487 1488static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 1489{ 1490 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 1491 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 1492} 1493 1494static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 1495{ 1496 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 1497 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 1498 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 1499 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 1500} 1501 1502#define LAST_ETH_FIELD vlan_tag 1503#define LAST_IB_FIELD sl 1504#define LAST_IPV4_FIELD tos 1505#define LAST_IPV6_FIELD traffic_class 1506#define LAST_TCP_UDP_FIELD src_port 1507 1508/* Field is the last supported field */ 1509#define FIELDS_NOT_SUPPORTED(filter, field)\ 1510 memchr_inv((void *)&filter.field +\ 1511 sizeof(filter.field), 0,\ 1512 sizeof(filter) -\ 1513 offsetof(typeof(filter), field) -\ 1514 sizeof(filter.field)) 1515 1516static int parse_flow_attr(u32 *match_c, u32 *match_v, 1517 const union ib_flow_spec *ib_spec) 1518{ 1519 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 1520 outer_headers); 1521 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 1522 outer_headers); 1523 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 1524 misc_parameters); 1525 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 1526 misc_parameters); 1527 1528 switch (ib_spec->type) { 1529 case IB_FLOW_SPEC_ETH: 1530 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 1531 return -ENOTSUPP; 1532 1533 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1534 dmac_47_16), 1535 ib_spec->eth.mask.dst_mac); 1536 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1537 dmac_47_16), 1538 ib_spec->eth.val.dst_mac); 1539 1540 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1541 smac_47_16), 1542 ib_spec->eth.mask.src_mac); 1543 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1544 smac_47_16), 1545 ib_spec->eth.val.src_mac); 1546 1547 if (ib_spec->eth.mask.vlan_tag) { 1548 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1549 cvlan_tag, 1); 1550 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1551 cvlan_tag, 1); 1552 1553 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1554 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 1555 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1556 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 1557 1558 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1559 first_cfi, 1560 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 1561 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1562 first_cfi, 1563 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 1564 1565 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1566 first_prio, 1567 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 1568 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1569 first_prio, 1570 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 1571 } 1572 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1573 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 1574 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1575 ethertype, ntohs(ib_spec->eth.val.ether_type)); 1576 break; 1577 case IB_FLOW_SPEC_IPV4: 1578 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 1579 return -ENOTSUPP; 1580 1581 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1582 ethertype, 0xffff); 1583 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1584 ethertype, ETH_P_IP); 1585 1586 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1587 src_ipv4_src_ipv6.ipv4_layout.ipv4), 1588 &ib_spec->ipv4.mask.src_ip, 1589 sizeof(ib_spec->ipv4.mask.src_ip)); 1590 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1591 src_ipv4_src_ipv6.ipv4_layout.ipv4), 1592 &ib_spec->ipv4.val.src_ip, 1593 sizeof(ib_spec->ipv4.val.src_ip)); 1594 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1595 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1596 &ib_spec->ipv4.mask.dst_ip, 1597 sizeof(ib_spec->ipv4.mask.dst_ip)); 1598 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1599 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1600 &ib_spec->ipv4.val.dst_ip, 1601 sizeof(ib_spec->ipv4.val.dst_ip)); 1602 1603 set_tos(outer_headers_c, outer_headers_v, 1604 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 1605 1606 set_proto(outer_headers_c, outer_headers_v, 1607 ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto); 1608 break; 1609 case IB_FLOW_SPEC_IPV6: 1610 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 1611 return -ENOTSUPP; 1612 1613 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1614 ethertype, 0xffff); 1615 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1616 ethertype, IPPROTO_IPV6); 1617 1618 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1619 src_ipv4_src_ipv6.ipv6_layout.ipv6), 1620 &ib_spec->ipv6.mask.src_ip, 1621 sizeof(ib_spec->ipv6.mask.src_ip)); 1622 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1623 src_ipv4_src_ipv6.ipv6_layout.ipv6), 1624 &ib_spec->ipv6.val.src_ip, 1625 sizeof(ib_spec->ipv6.val.src_ip)); 1626 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1627 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1628 &ib_spec->ipv6.mask.dst_ip, 1629 sizeof(ib_spec->ipv6.mask.dst_ip)); 1630 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1631 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1632 &ib_spec->ipv6.val.dst_ip, 1633 sizeof(ib_spec->ipv6.val.dst_ip)); 1634 1635 set_tos(outer_headers_c, outer_headers_v, 1636 ib_spec->ipv6.mask.traffic_class, 1637 ib_spec->ipv6.val.traffic_class); 1638 1639 set_proto(outer_headers_c, outer_headers_v, 1640 ib_spec->ipv6.mask.next_hdr, 1641 ib_spec->ipv6.val.next_hdr); 1642 1643 MLX5_SET(fte_match_set_misc, misc_params_c, 1644 outer_ipv6_flow_label, 1645 ntohl(ib_spec->ipv6.mask.flow_label)); 1646 MLX5_SET(fte_match_set_misc, misc_params_v, 1647 outer_ipv6_flow_label, 1648 ntohl(ib_spec->ipv6.val.flow_label)); 1649 break; 1650 case IB_FLOW_SPEC_TCP: 1651 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 1652 LAST_TCP_UDP_FIELD)) 1653 return -ENOTSUPP; 1654 1655 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, 1656 0xff); 1657 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, 1658 IPPROTO_TCP); 1659 1660 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport, 1661 ntohs(ib_spec->tcp_udp.mask.src_port)); 1662 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport, 1663 ntohs(ib_spec->tcp_udp.val.src_port)); 1664 1665 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport, 1666 ntohs(ib_spec->tcp_udp.mask.dst_port)); 1667 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport, 1668 ntohs(ib_spec->tcp_udp.val.dst_port)); 1669 break; 1670 case IB_FLOW_SPEC_UDP: 1671 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 1672 LAST_TCP_UDP_FIELD)) 1673 return -ENOTSUPP; 1674 1675 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, 1676 0xff); 1677 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, 1678 IPPROTO_UDP); 1679 1680 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport, 1681 ntohs(ib_spec->tcp_udp.mask.src_port)); 1682 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport, 1683 ntohs(ib_spec->tcp_udp.val.src_port)); 1684 1685 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport, 1686 ntohs(ib_spec->tcp_udp.mask.dst_port)); 1687 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport, 1688 ntohs(ib_spec->tcp_udp.val.dst_port)); 1689 break; 1690 default: 1691 return -EINVAL; 1692 } 1693 1694 return 0; 1695} 1696 1697/* If a flow could catch both multicast and unicast packets, 1698 * it won't fall into the multicast flow steering table and this rule 1699 * could steal other multicast packets. 1700 */ 1701static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr) 1702{ 1703 struct ib_flow_spec_eth *eth_spec; 1704 1705 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 1706 ib_attr->size < sizeof(struct ib_flow_attr) + 1707 sizeof(struct ib_flow_spec_eth) || 1708 ib_attr->num_of_specs < 1) 1709 return false; 1710 1711 eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1); 1712 if (eth_spec->type != IB_FLOW_SPEC_ETH || 1713 eth_spec->size != sizeof(*eth_spec)) 1714 return false; 1715 1716 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 1717 is_multicast_ether_addr(eth_spec->val.dst_mac); 1718} 1719 1720static bool is_valid_attr(const struct ib_flow_attr *flow_attr) 1721{ 1722 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 1723 bool has_ipv4_spec = false; 1724 bool eth_type_ipv4 = true; 1725 unsigned int spec_index; 1726 1727 /* Validate that ethertype is correct */ 1728 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1729 if (ib_spec->type == IB_FLOW_SPEC_ETH && 1730 ib_spec->eth.mask.ether_type) { 1731 if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) && 1732 ib_spec->eth.val.ether_type == htons(ETH_P_IP))) 1733 eth_type_ipv4 = false; 1734 } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) { 1735 has_ipv4_spec = true; 1736 } 1737 ib_spec = (void *)ib_spec + ib_spec->size; 1738 } 1739 return !has_ipv4_spec || eth_type_ipv4; 1740} 1741 1742static void put_flow_table(struct mlx5_ib_dev *dev, 1743 struct mlx5_ib_flow_prio *prio, bool ft_added) 1744{ 1745 prio->refcount -= !!ft_added; 1746 if (!prio->refcount) { 1747 mlx5_destroy_flow_table(prio->flow_table); 1748 prio->flow_table = NULL; 1749 } 1750} 1751 1752static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 1753{ 1754 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device); 1755 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 1756 struct mlx5_ib_flow_handler, 1757 ibflow); 1758 struct mlx5_ib_flow_handler *iter, *tmp; 1759 1760 mutex_lock(&dev->flow_db.lock); 1761 1762 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 1763 mlx5_del_flow_rule(iter->rule); 1764 put_flow_table(dev, iter->prio, true); 1765 list_del(&iter->list); 1766 kfree(iter); 1767 } 1768 1769 mlx5_del_flow_rule(handler->rule); 1770 put_flow_table(dev, handler->prio, true); 1771 mutex_unlock(&dev->flow_db.lock); 1772 1773 kfree(handler); 1774 1775 return 0; 1776} 1777 1778static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 1779{ 1780 priority *= 2; 1781 if (!dont_trap) 1782 priority++; 1783 return priority; 1784} 1785 1786enum flow_table_type { 1787 MLX5_IB_FT_RX, 1788 MLX5_IB_FT_TX 1789}; 1790 1791#define MLX5_FS_MAX_TYPES 10 1792#define MLX5_FS_MAX_ENTRIES 32000UL 1793static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 1794 struct ib_flow_attr *flow_attr, 1795 enum flow_table_type ft_type) 1796{ 1797 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 1798 struct mlx5_flow_namespace *ns = NULL; 1799 struct mlx5_ib_flow_prio *prio; 1800 struct mlx5_flow_table *ft; 1801 int num_entries; 1802 int num_groups; 1803 int priority; 1804 int err = 0; 1805 1806 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1807 if (flow_is_multicast_only(flow_attr) && 1808 !dont_trap) 1809 priority = MLX5_IB_FLOW_MCAST_PRIO; 1810 else 1811 priority = ib_prio_to_core_prio(flow_attr->priority, 1812 dont_trap); 1813 ns = mlx5_get_flow_namespace(dev->mdev, 1814 MLX5_FLOW_NAMESPACE_BYPASS); 1815 num_entries = MLX5_FS_MAX_ENTRIES; 1816 num_groups = MLX5_FS_MAX_TYPES; 1817 prio = &dev->flow_db.prios[priority]; 1818 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1819 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 1820 ns = mlx5_get_flow_namespace(dev->mdev, 1821 MLX5_FLOW_NAMESPACE_LEFTOVERS); 1822 build_leftovers_ft_param("bypass", &priority, 1823 &num_entries, 1824 &num_groups); 1825 prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 1826 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 1827 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 1828 allow_sniffer_and_nic_rx_shared_tir)) 1829 return ERR_PTR(-ENOTSUPP); 1830 1831 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ? 1832 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 1833 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 1834 1835 prio = &dev->flow_db.sniffer[ft_type]; 1836 priority = 0; 1837 num_entries = 1; 1838 num_groups = 1; 1839 } 1840 1841 if (!ns) 1842 return ERR_PTR(-ENOTSUPP); 1843 1844 ft = prio->flow_table; 1845 if (!ft) { 1846 ft = mlx5_create_auto_grouped_flow_table(ns, priority, "bypass", 1847 num_entries, 1848 num_groups); 1849 1850 if (!IS_ERR(ft)) { 1851 prio->refcount = 0; 1852 prio->flow_table = ft; 1853 } else { 1854 err = PTR_ERR(ft); 1855 } 1856 } 1857 1858 return err ? ERR_PTR(err) : prio; 1859} 1860 1861static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 1862 struct mlx5_ib_flow_prio *ft_prio, 1863 const struct ib_flow_attr *flow_attr, 1864 struct mlx5_flow_destination *dst) 1865{ 1866 struct mlx5_flow_table *ft = ft_prio->flow_table; 1867 struct mlx5_ib_flow_handler *handler; 1868 struct mlx5_flow_spec *spec; 1869 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 1870 unsigned int spec_index; 1871 u32 action; 1872 int err = 0; 1873 1874 if (!is_valid_attr(flow_attr)) 1875 return ERR_PTR(-EINVAL); 1876 1877 spec = mlx5_vzalloc(sizeof(*spec)); 1878 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 1879 if (!handler || !spec) { 1880 err = -ENOMEM; 1881 goto free; 1882 } 1883 1884 INIT_LIST_HEAD(&handler->list); 1885 1886 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1887 err = parse_flow_attr(spec->match_criteria, 1888 spec->match_value, ib_flow); 1889 if (err < 0) 1890 goto free; 1891 1892 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 1893 } 1894 1895 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 1896 action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 1897 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 1898 handler->rule = mlx5_add_flow_rule(ft, spec->match_criteria_enable, 1899 spec->match_criteria, 1900 spec->match_value, 1901 action, 1902 MLX5_FS_DEFAULT_FLOW_TAG, 1903 dst); 1904 1905 if (IS_ERR(handler->rule)) { 1906 err = PTR_ERR(handler->rule); 1907 goto free; 1908 } 1909 1910 ft_prio->refcount++; 1911 handler->prio = ft_prio; 1912 1913 ft_prio->flow_table = ft; 1914free: 1915 if (err) 1916 kfree(handler); 1917 kvfree(spec); 1918 return err ? ERR_PTR(err) : handler; 1919} 1920 1921static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, 1922 struct mlx5_ib_flow_prio *ft_prio, 1923 struct ib_flow_attr *flow_attr, 1924 struct mlx5_flow_destination *dst) 1925{ 1926 struct mlx5_ib_flow_handler *handler_dst = NULL; 1927 struct mlx5_ib_flow_handler *handler = NULL; 1928 1929 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); 1930 if (!IS_ERR(handler)) { 1931 handler_dst = create_flow_rule(dev, ft_prio, 1932 flow_attr, dst); 1933 if (IS_ERR(handler_dst)) { 1934 mlx5_del_flow_rule(handler->rule); 1935 ft_prio->refcount--; 1936 kfree(handler); 1937 handler = handler_dst; 1938 } else { 1939 list_add(&handler_dst->list, &handler->list); 1940 } 1941 } 1942 1943 return handler; 1944} 1945enum { 1946 LEFTOVERS_MC, 1947 LEFTOVERS_UC, 1948}; 1949 1950static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 1951 struct mlx5_ib_flow_prio *ft_prio, 1952 struct ib_flow_attr *flow_attr, 1953 struct mlx5_flow_destination *dst) 1954{ 1955 struct mlx5_ib_flow_handler *handler_ucast = NULL; 1956 struct mlx5_ib_flow_handler *handler = NULL; 1957 1958 static struct { 1959 struct ib_flow_attr flow_attr; 1960 struct ib_flow_spec_eth eth_flow; 1961 } leftovers_specs[] = { 1962 [LEFTOVERS_MC] = { 1963 .flow_attr = { 1964 .num_of_specs = 1, 1965 .size = sizeof(leftovers_specs[0]) 1966 }, 1967 .eth_flow = { 1968 .type = IB_FLOW_SPEC_ETH, 1969 .size = sizeof(struct ib_flow_spec_eth), 1970 .mask = {.dst_mac = {0x1} }, 1971 .val = {.dst_mac = {0x1} } 1972 } 1973 }, 1974 [LEFTOVERS_UC] = { 1975 .flow_attr = { 1976 .num_of_specs = 1, 1977 .size = sizeof(leftovers_specs[0]) 1978 }, 1979 .eth_flow = { 1980 .type = IB_FLOW_SPEC_ETH, 1981 .size = sizeof(struct ib_flow_spec_eth), 1982 .mask = {.dst_mac = {0x1} }, 1983 .val = {.dst_mac = {} } 1984 } 1985 } 1986 }; 1987 1988 handler = create_flow_rule(dev, ft_prio, 1989 &leftovers_specs[LEFTOVERS_MC].flow_attr, 1990 dst); 1991 if (!IS_ERR(handler) && 1992 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 1993 handler_ucast = create_flow_rule(dev, ft_prio, 1994 &leftovers_specs[LEFTOVERS_UC].flow_attr, 1995 dst); 1996 if (IS_ERR(handler_ucast)) { 1997 mlx5_del_flow_rule(handler->rule); 1998 ft_prio->refcount--; 1999 kfree(handler); 2000 handler = handler_ucast; 2001 } else { 2002 list_add(&handler_ucast->list, &handler->list); 2003 } 2004 } 2005 2006 return handler; 2007} 2008 2009static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 2010 struct mlx5_ib_flow_prio *ft_rx, 2011 struct mlx5_ib_flow_prio *ft_tx, 2012 struct mlx5_flow_destination *dst) 2013{ 2014 struct mlx5_ib_flow_handler *handler_rx; 2015 struct mlx5_ib_flow_handler *handler_tx; 2016 int err; 2017 static const struct ib_flow_attr flow_attr = { 2018 .num_of_specs = 0, 2019 .size = sizeof(flow_attr) 2020 }; 2021 2022 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 2023 if (IS_ERR(handler_rx)) { 2024 err = PTR_ERR(handler_rx); 2025 goto err; 2026 } 2027 2028 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 2029 if (IS_ERR(handler_tx)) { 2030 err = PTR_ERR(handler_tx); 2031 goto err_tx; 2032 } 2033 2034 list_add(&handler_tx->list, &handler_rx->list); 2035 2036 return handler_rx; 2037 2038err_tx: 2039 mlx5_del_flow_rule(handler_rx->rule); 2040 ft_rx->refcount--; 2041 kfree(handler_rx); 2042err: 2043 return ERR_PTR(err); 2044} 2045 2046static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 2047 struct ib_flow_attr *flow_attr, 2048 int domain) 2049{ 2050 struct mlx5_ib_dev *dev = to_mdev(qp->device); 2051 struct mlx5_ib_qp *mqp = to_mqp(qp); 2052 struct mlx5_ib_flow_handler *handler = NULL; 2053 struct mlx5_flow_destination *dst = NULL; 2054 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 2055 struct mlx5_ib_flow_prio *ft_prio; 2056 int err; 2057 2058 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) 2059 return ERR_PTR(-ENOSPC); 2060 2061 if (domain != IB_FLOW_DOMAIN_USER || 2062 flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) || 2063 (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)) 2064 return ERR_PTR(-EINVAL); 2065 2066 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 2067 if (!dst) 2068 return ERR_PTR(-ENOMEM); 2069 2070 mutex_lock(&dev->flow_db.lock); 2071 2072 ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX); 2073 if (IS_ERR(ft_prio)) { 2074 err = PTR_ERR(ft_prio); 2075 goto unlock; 2076 } 2077 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 2078 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 2079 if (IS_ERR(ft_prio_tx)) { 2080 err = PTR_ERR(ft_prio_tx); 2081 ft_prio_tx = NULL; 2082 goto destroy_ft; 2083 } 2084 } 2085 2086 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 2087 if (mqp->flags & MLX5_IB_QP_RSS) 2088 dst->tir_num = mqp->rss_qp.tirn; 2089 else 2090 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 2091 2092 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 2093 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 2094 handler = create_dont_trap_rule(dev, ft_prio, 2095 flow_attr, dst); 2096 } else { 2097 handler = create_flow_rule(dev, ft_prio, flow_attr, 2098 dst); 2099 } 2100 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 2101 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 2102 handler = create_leftovers_rule(dev, ft_prio, flow_attr, 2103 dst); 2104 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 2105 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 2106 } else { 2107 err = -EINVAL; 2108 goto destroy_ft; 2109 } 2110 2111 if (IS_ERR(handler)) { 2112 err = PTR_ERR(handler); 2113 handler = NULL; 2114 goto destroy_ft; 2115 } 2116 2117 mutex_unlock(&dev->flow_db.lock); 2118 kfree(dst); 2119 2120 return &handler->ibflow; 2121 2122destroy_ft: 2123 put_flow_table(dev, ft_prio, false); 2124 if (ft_prio_tx) 2125 put_flow_table(dev, ft_prio_tx, false); 2126unlock: 2127 mutex_unlock(&dev->flow_db.lock); 2128 kfree(dst); 2129 kfree(handler); 2130 return ERR_PTR(err); 2131} 2132 2133static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 2134{ 2135 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2136 int err; 2137 2138 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num); 2139 if (err) 2140 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 2141 ibqp->qp_num, gid->raw); 2142 2143 return err; 2144} 2145 2146static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 2147{ 2148 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2149 int err; 2150 2151 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num); 2152 if (err) 2153 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 2154 ibqp->qp_num, gid->raw); 2155 2156 return err; 2157} 2158 2159static int init_node_data(struct mlx5_ib_dev *dev) 2160{ 2161 int err; 2162 2163 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); 2164 if (err) 2165 return err; 2166 2167 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); 2168} 2169 2170static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, 2171 char *buf) 2172{ 2173 struct mlx5_ib_dev *dev = 2174 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2175 2176 return sprintf(buf, "%lld\n", (long long)dev->mdev->priv.fw_pages); 2177} 2178 2179static ssize_t show_reg_pages(struct device *device, 2180 struct device_attribute *attr, char *buf) 2181{ 2182 struct mlx5_ib_dev *dev = 2183 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2184 2185 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); 2186} 2187 2188static ssize_t show_hca(struct device *device, struct device_attribute *attr, 2189 char *buf) 2190{ 2191 struct mlx5_ib_dev *dev = 2192 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2193 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 2194} 2195 2196static ssize_t show_rev(struct device *device, struct device_attribute *attr, 2197 char *buf) 2198{ 2199 struct mlx5_ib_dev *dev = 2200 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2201 return sprintf(buf, "%x\n", dev->mdev->pdev->revision); 2202} 2203 2204static ssize_t show_board(struct device *device, struct device_attribute *attr, 2205 char *buf) 2206{ 2207 struct mlx5_ib_dev *dev = 2208 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2209 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 2210 dev->mdev->board_id); 2211} 2212 2213static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 2214static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 2215static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 2216static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); 2217static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL); 2218 2219static struct device_attribute *mlx5_class_attributes[] = { 2220 &dev_attr_hw_rev, 2221 &dev_attr_hca_type, 2222 &dev_attr_board_id, 2223 &dev_attr_fw_pages, 2224 &dev_attr_reg_pages, 2225}; 2226 2227static void pkey_change_handler(struct work_struct *work) 2228{ 2229 struct mlx5_ib_port_resources *ports = 2230 container_of(work, struct mlx5_ib_port_resources, 2231 pkey_change_work); 2232 2233 mutex_lock(&ports->devr->mutex); 2234 mlx5_ib_gsi_pkey_change(ports->gsi); 2235 mutex_unlock(&ports->devr->mutex); 2236} 2237 2238static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) 2239{ 2240 struct mlx5_ib_qp *mqp; 2241 struct mlx5_ib_cq *send_mcq, *recv_mcq; 2242 struct mlx5_core_cq *mcq; 2243 struct list_head cq_armed_list; 2244 unsigned long flags_qp; 2245 unsigned long flags_cq; 2246 unsigned long flags; 2247 2248 INIT_LIST_HEAD(&cq_armed_list); 2249 2250 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 2251 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 2252 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 2253 spin_lock_irqsave(&mqp->sq.lock, flags_qp); 2254 if (mqp->sq.tail != mqp->sq.head) { 2255 send_mcq = to_mcq(mqp->ibqp.send_cq); 2256 spin_lock_irqsave(&send_mcq->lock, flags_cq); 2257 if (send_mcq->mcq.comp && 2258 mqp->ibqp.send_cq->comp_handler) { 2259 if (!send_mcq->mcq.reset_notify_added) { 2260 send_mcq->mcq.reset_notify_added = 1; 2261 list_add_tail(&send_mcq->mcq.reset_notify, 2262 &cq_armed_list); 2263 } 2264 } 2265 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 2266 } 2267 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 2268 spin_lock_irqsave(&mqp->rq.lock, flags_qp); 2269 /* no handling is needed for SRQ */ 2270 if (!mqp->ibqp.srq) { 2271 if (mqp->rq.tail != mqp->rq.head) { 2272 recv_mcq = to_mcq(mqp->ibqp.recv_cq); 2273 spin_lock_irqsave(&recv_mcq->lock, flags_cq); 2274 if (recv_mcq->mcq.comp && 2275 mqp->ibqp.recv_cq->comp_handler) { 2276 if (!recv_mcq->mcq.reset_notify_added) { 2277 recv_mcq->mcq.reset_notify_added = 1; 2278 list_add_tail(&recv_mcq->mcq.reset_notify, 2279 &cq_armed_list); 2280 } 2281 } 2282 spin_unlock_irqrestore(&recv_mcq->lock, 2283 flags_cq); 2284 } 2285 } 2286 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 2287 } 2288 /*At that point all inflight post send were put to be executed as of we 2289 * lock/unlock above locks Now need to arm all involved CQs. 2290 */ 2291 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 2292 mcq->comp(mcq); 2293 } 2294 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 2295} 2296 2297static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, 2298 enum mlx5_dev_event event, unsigned long param) 2299{ 2300 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; 2301 struct ib_event ibev; 2302 bool fatal = false; 2303 u8 port = 0; 2304 2305 switch (event) { 2306 case MLX5_DEV_EVENT_SYS_ERROR: 2307 ibev.event = IB_EVENT_DEVICE_FATAL; 2308 mlx5_ib_handle_internal_error(ibdev); 2309 fatal = true; 2310 break; 2311 2312 case MLX5_DEV_EVENT_PORT_UP: 2313 case MLX5_DEV_EVENT_PORT_DOWN: 2314 case MLX5_DEV_EVENT_PORT_INITIALIZED: 2315 port = (u8)param; 2316 2317 /* In RoCE, port up/down events are handled in 2318 * mlx5_netdev_event(). 2319 */ 2320 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 2321 IB_LINK_LAYER_ETHERNET) 2322 return; 2323 2324 ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ? 2325 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 2326 break; 2327 2328 case MLX5_DEV_EVENT_LID_CHANGE: 2329 ibev.event = IB_EVENT_LID_CHANGE; 2330 port = (u8)param; 2331 break; 2332 2333 case MLX5_DEV_EVENT_PKEY_CHANGE: 2334 ibev.event = IB_EVENT_PKEY_CHANGE; 2335 port = (u8)param; 2336 2337 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 2338 break; 2339 2340 case MLX5_DEV_EVENT_GUID_CHANGE: 2341 ibev.event = IB_EVENT_GID_CHANGE; 2342 port = (u8)param; 2343 break; 2344 2345 case MLX5_DEV_EVENT_CLIENT_REREG: 2346 ibev.event = IB_EVENT_CLIENT_REREGISTER; 2347 port = (u8)param; 2348 break; 2349 2350 default: 2351 break; 2352 } 2353 2354 ibev.device = &ibdev->ib_dev; 2355 ibev.element.port_num = port; 2356 2357 if (port < 1 || port > ibdev->num_ports) { 2358 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 2359 return; 2360 } 2361 2362 if (ibdev->ib_active) 2363 ib_dispatch_event(&ibev); 2364 2365 if (fatal) 2366 ibdev->ib_active = false; 2367} 2368 2369static void get_ext_port_caps(struct mlx5_ib_dev *dev) 2370{ 2371 int port; 2372 2373 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) 2374 mlx5_query_ext_port_caps(dev, port); 2375} 2376 2377static int get_port_caps(struct mlx5_ib_dev *dev) 2378{ 2379 struct ib_device_attr *dprops = NULL; 2380 struct ib_port_attr *pprops = NULL; 2381 int err = -ENOMEM; 2382 int port; 2383 struct ib_udata uhw = {.inlen = 0, .outlen = 0}; 2384 2385 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); 2386 if (!pprops) 2387 goto out; 2388 2389 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); 2390 if (!dprops) 2391 goto out; 2392 2393 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); 2394 if (err) { 2395 mlx5_ib_warn(dev, "query_device failed %d\n", err); 2396 goto out; 2397 } 2398 2399 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) { 2400 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 2401 if (err) { 2402 mlx5_ib_warn(dev, "query_port %d failed %d\n", 2403 port, err); 2404 break; 2405 } 2406 dev->mdev->port_caps[port - 1].pkey_table_len = 2407 dprops->max_pkeys; 2408 dev->mdev->port_caps[port - 1].gid_table_len = 2409 pprops->gid_tbl_len; 2410 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 2411 dprops->max_pkeys, pprops->gid_tbl_len); 2412 } 2413 2414out: 2415 kfree(pprops); 2416 kfree(dprops); 2417 2418 return err; 2419} 2420 2421static void destroy_umrc_res(struct mlx5_ib_dev *dev) 2422{ 2423 int err; 2424 2425 err = mlx5_mr_cache_cleanup(dev); 2426 if (err) 2427 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 2428 2429 mlx5_ib_destroy_qp(dev->umrc.qp); 2430 ib_free_cq(dev->umrc.cq); 2431 ib_dealloc_pd(dev->umrc.pd); 2432} 2433 2434enum { 2435 MAX_UMR_WR = 128, 2436}; 2437 2438static int create_umr_res(struct mlx5_ib_dev *dev) 2439{ 2440 struct ib_qp_init_attr *init_attr = NULL; 2441 struct ib_qp_attr *attr = NULL; 2442 struct ib_pd *pd; 2443 struct ib_cq *cq; 2444 struct ib_qp *qp; 2445 int ret; 2446 2447 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 2448 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 2449 if (!attr || !init_attr) { 2450 ret = -ENOMEM; 2451 goto error_0; 2452 } 2453 2454 pd = ib_alloc_pd(&dev->ib_dev, 0); 2455 if (IS_ERR(pd)) { 2456 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 2457 ret = PTR_ERR(pd); 2458 goto error_0; 2459 } 2460 2461 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); 2462 if (IS_ERR(cq)) { 2463 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 2464 ret = PTR_ERR(cq); 2465 goto error_2; 2466 } 2467 2468 init_attr->send_cq = cq; 2469 init_attr->recv_cq = cq; 2470 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 2471 init_attr->cap.max_send_wr = MAX_UMR_WR; 2472 init_attr->cap.max_send_sge = 1; 2473 init_attr->qp_type = MLX5_IB_QPT_REG_UMR; 2474 init_attr->port_num = 1; 2475 qp = mlx5_ib_create_qp(pd, init_attr, NULL); 2476 if (IS_ERR(qp)) { 2477 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 2478 ret = PTR_ERR(qp); 2479 goto error_3; 2480 } 2481 qp->device = &dev->ib_dev; 2482 qp->real_qp = qp; 2483 qp->uobject = NULL; 2484 qp->qp_type = MLX5_IB_QPT_REG_UMR; 2485 2486 attr->qp_state = IB_QPS_INIT; 2487 attr->port_num = 1; 2488 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | 2489 IB_QP_PORT, NULL); 2490 if (ret) { 2491 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 2492 goto error_4; 2493 } 2494 2495 memset(attr, 0, sizeof(*attr)); 2496 attr->qp_state = IB_QPS_RTR; 2497 attr->path_mtu = IB_MTU_256; 2498 2499 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 2500 if (ret) { 2501 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 2502 goto error_4; 2503 } 2504 2505 memset(attr, 0, sizeof(*attr)); 2506 attr->qp_state = IB_QPS_RTS; 2507 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 2508 if (ret) { 2509 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 2510 goto error_4; 2511 } 2512 2513 dev->umrc.qp = qp; 2514 dev->umrc.cq = cq; 2515 dev->umrc.pd = pd; 2516 2517 sema_init(&dev->umrc.sem, MAX_UMR_WR); 2518 ret = mlx5_mr_cache_init(dev); 2519 if (ret) { 2520 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); 2521 goto error_4; 2522 } 2523 2524 kfree(attr); 2525 kfree(init_attr); 2526 2527 return 0; 2528 2529error_4: 2530 mlx5_ib_destroy_qp(qp); 2531 2532error_3: 2533 ib_free_cq(cq); 2534 2535error_2: 2536 ib_dealloc_pd(pd); 2537 2538error_0: 2539 kfree(attr); 2540 kfree(init_attr); 2541 return ret; 2542} 2543 2544static int create_dev_resources(struct mlx5_ib_resources *devr) 2545{ 2546 struct ib_srq_init_attr attr; 2547 struct mlx5_ib_dev *dev; 2548 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 2549 int port; 2550 int ret = 0; 2551 2552 dev = container_of(devr, struct mlx5_ib_dev, devr); 2553 2554 mutex_init(&devr->mutex); 2555 2556 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); 2557 if (IS_ERR(devr->p0)) { 2558 ret = PTR_ERR(devr->p0); 2559 goto error0; 2560 } 2561 devr->p0->device = &dev->ib_dev; 2562 devr->p0->uobject = NULL; 2563 atomic_set(&devr->p0->usecnt, 0); 2564 2565 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); 2566 if (IS_ERR(devr->c0)) { 2567 ret = PTR_ERR(devr->c0); 2568 goto error1; 2569 } 2570 devr->c0->device = &dev->ib_dev; 2571 devr->c0->uobject = NULL; 2572 devr->c0->comp_handler = NULL; 2573 devr->c0->event_handler = NULL; 2574 devr->c0->cq_context = NULL; 2575 atomic_set(&devr->c0->usecnt, 0); 2576 2577 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 2578 if (IS_ERR(devr->x0)) { 2579 ret = PTR_ERR(devr->x0); 2580 goto error2; 2581 } 2582 devr->x0->device = &dev->ib_dev; 2583 devr->x0->inode = NULL; 2584 atomic_set(&devr->x0->usecnt, 0); 2585 mutex_init(&devr->x0->tgt_qp_mutex); 2586 INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 2587 2588 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 2589 if (IS_ERR(devr->x1)) { 2590 ret = PTR_ERR(devr->x1); 2591 goto error3; 2592 } 2593 devr->x1->device = &dev->ib_dev; 2594 devr->x1->inode = NULL; 2595 atomic_set(&devr->x1->usecnt, 0); 2596 mutex_init(&devr->x1->tgt_qp_mutex); 2597 INIT_LIST_HEAD(&devr->x1->tgt_qp_list); 2598 2599 memset(&attr, 0, sizeof(attr)); 2600 attr.attr.max_sge = 1; 2601 attr.attr.max_wr = 1; 2602 attr.srq_type = IB_SRQT_XRC; 2603 attr.ext.xrc.cq = devr->c0; 2604 attr.ext.xrc.xrcd = devr->x0; 2605 2606 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 2607 if (IS_ERR(devr->s0)) { 2608 ret = PTR_ERR(devr->s0); 2609 goto error4; 2610 } 2611 devr->s0->device = &dev->ib_dev; 2612 devr->s0->pd = devr->p0; 2613 devr->s0->uobject = NULL; 2614 devr->s0->event_handler = NULL; 2615 devr->s0->srq_context = NULL; 2616 devr->s0->srq_type = IB_SRQT_XRC; 2617 devr->s0->ext.xrc.xrcd = devr->x0; 2618 devr->s0->ext.xrc.cq = devr->c0; 2619 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); 2620 atomic_inc(&devr->s0->ext.xrc.cq->usecnt); 2621 atomic_inc(&devr->p0->usecnt); 2622 atomic_set(&devr->s0->usecnt, 0); 2623 2624 memset(&attr, 0, sizeof(attr)); 2625 attr.attr.max_sge = 1; 2626 attr.attr.max_wr = 1; 2627 attr.srq_type = IB_SRQT_BASIC; 2628 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 2629 if (IS_ERR(devr->s1)) { 2630 ret = PTR_ERR(devr->s1); 2631 goto error5; 2632 } 2633 devr->s1->device = &dev->ib_dev; 2634 devr->s1->pd = devr->p0; 2635 devr->s1->uobject = NULL; 2636 devr->s1->event_handler = NULL; 2637 devr->s1->srq_context = NULL; 2638 devr->s1->srq_type = IB_SRQT_BASIC; 2639 devr->s1->ext.xrc.cq = devr->c0; 2640 atomic_inc(&devr->p0->usecnt); 2641 atomic_set(&devr->s0->usecnt, 0); 2642 2643 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { 2644 INIT_WORK(&devr->ports[port].pkey_change_work, 2645 pkey_change_handler); 2646 devr->ports[port].devr = devr; 2647 } 2648 2649 return 0; 2650 2651error5: 2652 mlx5_ib_destroy_srq(devr->s0); 2653error4: 2654 mlx5_ib_dealloc_xrcd(devr->x1); 2655error3: 2656 mlx5_ib_dealloc_xrcd(devr->x0); 2657error2: 2658 mlx5_ib_destroy_cq(devr->c0); 2659error1: 2660 mlx5_ib_dealloc_pd(devr->p0); 2661error0: 2662 return ret; 2663} 2664 2665static void destroy_dev_resources(struct mlx5_ib_resources *devr) 2666{ 2667 struct mlx5_ib_dev *dev = 2668 container_of(devr, struct mlx5_ib_dev, devr); 2669 int port; 2670 2671 mlx5_ib_destroy_srq(devr->s1); 2672 mlx5_ib_destroy_srq(devr->s0); 2673 mlx5_ib_dealloc_xrcd(devr->x0); 2674 mlx5_ib_dealloc_xrcd(devr->x1); 2675 mlx5_ib_destroy_cq(devr->c0); 2676 mlx5_ib_dealloc_pd(devr->p0); 2677 2678 /* Make sure no change P_Key work items are still executing */ 2679 for (port = 0; port < dev->num_ports; ++port) 2680 cancel_work_sync(&devr->ports[port].pkey_change_work); 2681} 2682 2683static u32 get_core_cap_flags(struct ib_device *ibdev) 2684{ 2685 struct mlx5_ib_dev *dev = to_mdev(ibdev); 2686 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); 2687 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); 2688 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); 2689 u32 ret = 0; 2690 2691 if (ll == IB_LINK_LAYER_INFINIBAND) 2692 return RDMA_CORE_PORT_IBA_IB; 2693 2694 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) 2695 return 0; 2696 2697 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) 2698 return 0; 2699 2700 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) 2701 ret |= RDMA_CORE_PORT_IBA_ROCE; 2702 2703 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) 2704 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 2705 2706 return ret; 2707} 2708 2709static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 2710 struct ib_port_immutable *immutable) 2711{ 2712 struct ib_port_attr attr; 2713 int err; 2714 2715 err = mlx5_ib_query_port(ibdev, port_num, &attr); 2716 if (err) 2717 return err; 2718 2719 immutable->pkey_tbl_len = attr.pkey_tbl_len; 2720 immutable->gid_tbl_len = attr.gid_tbl_len; 2721 immutable->core_cap_flags = get_core_cap_flags(ibdev); 2722 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 2723 2724 return 0; 2725} 2726 2727static void get_dev_fw_str(struct ib_device *ibdev, char *str, 2728 size_t str_len) 2729{ 2730 struct mlx5_ib_dev *dev = 2731 container_of(ibdev, struct mlx5_ib_dev, ib_dev); 2732 snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev), 2733 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 2734} 2735 2736static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev) 2737{ 2738 return 0; 2739} 2740 2741static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev) 2742{ 2743} 2744 2745static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev) 2746{ 2747 if (dev->roce.nb.notifier_call) { 2748 unregister_netdevice_notifier(&dev->roce.nb); 2749 dev->roce.nb.notifier_call = NULL; 2750 } 2751} 2752 2753static int mlx5_enable_roce(struct mlx5_ib_dev *dev) 2754{ 2755 VNET_ITERATOR_DECL(vnet_iter); 2756 struct net_device *idev; 2757 int err; 2758 2759 /* Check if mlx5en net device already exists */ 2760 VNET_LIST_RLOCK(); 2761 VNET_FOREACH(vnet_iter) { 2762 IFNET_RLOCK(); 2763 CURVNET_SET_QUIET(vnet_iter); 2764 TAILQ_FOREACH(idev, &V_ifnet, if_link) { 2765 /* check if network interface belongs to mlx5en */ 2766 if (!mlx5_netdev_match(idev, dev->mdev, "mce")) 2767 continue; 2768 write_lock(&dev->roce.netdev_lock); 2769 dev->roce.netdev = idev; 2770 write_unlock(&dev->roce.netdev_lock); 2771 } 2772 CURVNET_RESTORE(); 2773 IFNET_RUNLOCK(); 2774 } 2775 VNET_LIST_RUNLOCK(); 2776 2777 dev->roce.nb.notifier_call = mlx5_netdev_event; 2778 err = register_netdevice_notifier(&dev->roce.nb); 2779 if (err) { 2780 dev->roce.nb.notifier_call = NULL; 2781 return err; 2782 } 2783 2784 err = mlx5_nic_vport_enable_roce(dev->mdev); 2785 if (err) 2786 goto err_unregister_netdevice_notifier; 2787 2788 err = mlx5_roce_lag_init(dev); 2789 if (err) 2790 goto err_disable_roce; 2791 2792 return 0; 2793 2794err_disable_roce: 2795 mlx5_nic_vport_disable_roce(dev->mdev); 2796 2797err_unregister_netdevice_notifier: 2798 mlx5_remove_roce_notifier(dev); 2799 return err; 2800} 2801 2802static void mlx5_disable_roce(struct mlx5_ib_dev *dev) 2803{ 2804 mlx5_roce_lag_cleanup(dev); 2805 mlx5_nic_vport_disable_roce(dev->mdev); 2806} 2807 2808static void mlx5_ib_dealloc_q_port_counter(struct mlx5_ib_dev *dev, u8 port_num) 2809{ 2810 mlx5_vport_dealloc_q_counter(dev->mdev, 2811 MLX5_INTERFACE_PROTOCOL_IB, 2812 dev->port[port_num].q_cnt_id); 2813 dev->port[port_num].q_cnt_id = 0; 2814} 2815 2816static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) 2817{ 2818 unsigned int i; 2819 2820 for (i = 0; i < dev->num_ports; i++) 2821 mlx5_ib_dealloc_q_port_counter(dev, i); 2822} 2823 2824static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev) 2825{ 2826 int i; 2827 int ret; 2828 2829 for (i = 0; i < dev->num_ports; i++) { 2830 ret = mlx5_vport_alloc_q_counter(dev->mdev, 2831 MLX5_INTERFACE_PROTOCOL_IB, 2832 &dev->port[i].q_cnt_id); 2833 if (ret) { 2834 mlx5_ib_warn(dev, 2835 "couldn't allocate queue counter for port %d, err %d\n", 2836 i + 1, ret); 2837 goto dealloc_counters; 2838 } 2839 } 2840 2841 return 0; 2842 2843dealloc_counters: 2844 while (--i >= 0) 2845 mlx5_ib_dealloc_q_port_counter(dev, i); 2846 2847 return ret; 2848} 2849 2850static const char * const names[] = { 2851 "rx_write_requests", 2852 "rx_read_requests", 2853 "rx_atomic_requests", 2854 "out_of_buffer", 2855 "out_of_sequence", 2856 "duplicate_request", 2857 "rnr_nak_retry_err", 2858 "packet_seq_err", 2859 "implied_nak_seq_err", 2860 "local_ack_timeout_err", 2861}; 2862 2863static const size_t stats_offsets[] = { 2864 MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests), 2865 MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests), 2866 MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests), 2867 MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer), 2868 MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence), 2869 MLX5_BYTE_OFF(query_q_counter_out, duplicate_request), 2870 MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err), 2871 MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err), 2872 MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err), 2873 MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err), 2874}; 2875 2876static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 2877 u8 port_num) 2878{ 2879 BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets)); 2880 2881 /* We support only per port stats */ 2882 if (port_num == 0) 2883 return NULL; 2884 2885 return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names), 2886 RDMA_HW_STATS_DEFAULT_LIFESPAN); 2887} 2888 2889static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 2890 struct rdma_hw_stats *stats, 2891 u8 port, int index) 2892{ 2893 struct mlx5_ib_dev *dev = to_mdev(ibdev); 2894 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); 2895 void *out; 2896 __be32 val; 2897 int ret; 2898 int i; 2899 2900 if (!port || !stats) 2901 return -ENOSYS; 2902 2903 out = mlx5_vzalloc(outlen); 2904 if (!out) 2905 return -ENOMEM; 2906 2907 ret = mlx5_vport_query_q_counter(dev->mdev, 2908 dev->port[port - 1].q_cnt_id, 0, 2909 out, outlen); 2910 if (ret) 2911 goto free; 2912 2913 for (i = 0; i < ARRAY_SIZE(names); i++) { 2914 val = *(__be32 *)(out + stats_offsets[i]); 2915 stats->value[i] = (u64)be32_to_cpu(val); 2916 } 2917free: 2918 kvfree(out); 2919 return ARRAY_SIZE(names); 2920} 2921 2922static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 2923{ 2924 struct mlx5_ib_dev *dev; 2925 enum rdma_link_layer ll; 2926 int port_type_cap; 2927 const char *name; 2928 int err; 2929 int i; 2930 2931 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 2932 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 2933 2934 if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce)) 2935 return NULL; 2936 2937 printk_once(KERN_INFO "%s", mlx5_version); 2938 2939 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); 2940 if (!dev) 2941 return NULL; 2942 2943 dev->mdev = mdev; 2944 2945 dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port), 2946 GFP_KERNEL); 2947 if (!dev->port) 2948 goto err_dealloc; 2949 2950 rwlock_init(&dev->roce.netdev_lock); 2951 err = get_port_caps(dev); 2952 if (err) 2953 goto err_free_port; 2954 2955 if (mlx5_use_mad_ifc(dev)) 2956 get_ext_port_caps(dev); 2957 2958 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); 2959 2960 name = "mlx5_%d"; 2961 2962 strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX); 2963 dev->ib_dev.owner = THIS_MODULE; 2964 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 2965 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; 2966 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); 2967 dev->ib_dev.phys_port_cnt = dev->num_ports; 2968 dev->ib_dev.num_comp_vectors = 2969 dev->mdev->priv.eq_table.num_comp_vectors; 2970 dev->ib_dev.dma_device = &mdev->pdev->dev; 2971 2972 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; 2973 dev->ib_dev.uverbs_cmd_mask = 2974 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 2975 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 2976 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 2977 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 2978 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 2979 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 2980 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 2981 (1ull << IB_USER_VERBS_CMD_REG_MR) | 2982 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 2983 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 2984 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2985 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 2986 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 2987 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 2988 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 2989 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 2990 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 2991 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 2992 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 2993 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 2994 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 2995 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 2996 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 2997 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 2998 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 2999 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 3000 dev->ib_dev.uverbs_ex_cmd_mask = 3001 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 3002 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 3003 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); 3004 3005 dev->ib_dev.query_device = mlx5_ib_query_device; 3006 dev->ib_dev.query_port = mlx5_ib_query_port; 3007 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer; 3008 if (ll == IB_LINK_LAYER_ETHERNET) 3009 dev->ib_dev.get_netdev = mlx5_ib_get_netdev; 3010 dev->ib_dev.query_gid = mlx5_ib_query_gid; 3011 dev->ib_dev.add_gid = mlx5_ib_add_gid; 3012 dev->ib_dev.del_gid = mlx5_ib_del_gid; 3013 dev->ib_dev.query_pkey = mlx5_ib_query_pkey; 3014 dev->ib_dev.modify_device = mlx5_ib_modify_device; 3015 dev->ib_dev.modify_port = mlx5_ib_modify_port; 3016 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; 3017 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; 3018 dev->ib_dev.mmap = mlx5_ib_mmap; 3019 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; 3020 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; 3021 dev->ib_dev.create_ah = mlx5_ib_create_ah; 3022 dev->ib_dev.query_ah = mlx5_ib_query_ah; 3023 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; 3024 dev->ib_dev.create_srq = mlx5_ib_create_srq; 3025 dev->ib_dev.modify_srq = mlx5_ib_modify_srq; 3026 dev->ib_dev.query_srq = mlx5_ib_query_srq; 3027 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; 3028 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; 3029 dev->ib_dev.create_qp = mlx5_ib_create_qp; 3030 dev->ib_dev.modify_qp = mlx5_ib_modify_qp; 3031 dev->ib_dev.query_qp = mlx5_ib_query_qp; 3032 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; 3033 dev->ib_dev.post_send = mlx5_ib_post_send; 3034 dev->ib_dev.post_recv = mlx5_ib_post_recv; 3035 dev->ib_dev.create_cq = mlx5_ib_create_cq; 3036 dev->ib_dev.modify_cq = mlx5_ib_modify_cq; 3037 dev->ib_dev.resize_cq = mlx5_ib_resize_cq; 3038 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; 3039 dev->ib_dev.poll_cq = mlx5_ib_poll_cq; 3040 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; 3041 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; 3042 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; 3043 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr; 3044 dev->ib_dev.reg_phys_mr = mlx5_ib_reg_phys_mr; 3045 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; 3046 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; 3047 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; 3048 dev->ib_dev.process_mad = mlx5_ib_process_mad; 3049 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; 3050 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; 3051 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 3052 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 3053 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 3054 if (mlx5_core_is_pf(mdev)) { 3055 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 3056 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 3057 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; 3058 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid; 3059 } 3060 3061 dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext; 3062 3063 mlx5_ib_internal_fill_odp_caps(dev); 3064 3065 if (MLX5_CAP_GEN(mdev, imaicl)) { 3066 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; 3067 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; 3068 dev->ib_dev.uverbs_cmd_mask |= 3069 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 3070 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 3071 } 3072 3073 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) && 3074 MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { 3075 dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; 3076 dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats; 3077 } 3078 3079 if (MLX5_CAP_GEN(mdev, xrc)) { 3080 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; 3081 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; 3082 dev->ib_dev.uverbs_cmd_mask |= 3083 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 3084 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 3085 } 3086 3087 if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) == 3088 IB_LINK_LAYER_ETHERNET) { 3089 dev->ib_dev.create_flow = mlx5_ib_create_flow; 3090 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; 3091 dev->ib_dev.create_wq = mlx5_ib_create_wq; 3092 dev->ib_dev.modify_wq = mlx5_ib_modify_wq; 3093 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; 3094 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; 3095 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; 3096 dev->ib_dev.uverbs_ex_cmd_mask |= 3097 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 3098 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) | 3099 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 3100 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 3101 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 3102 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 3103 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 3104 } 3105 err = init_node_data(dev); 3106 if (err) 3107 goto err_free_port; 3108 3109 mutex_init(&dev->flow_db.lock); 3110 mutex_init(&dev->cap_mask_mutex); 3111 INIT_LIST_HEAD(&dev->qp_list); 3112 spin_lock_init(&dev->reset_flow_resource_lock); 3113 3114 if (ll == IB_LINK_LAYER_ETHERNET) { 3115 err = mlx5_enable_roce(dev); 3116 if (err) 3117 goto err_free_port; 3118 } 3119 3120 err = create_dev_resources(&dev->devr); 3121 if (err) 3122 goto err_disable_roce; 3123 3124 err = mlx5_ib_odp_init_one(dev); 3125 if (err) 3126 goto err_rsrc; 3127 3128 err = mlx5_ib_alloc_q_counters(dev); 3129 if (err) 3130 goto err_odp; 3131 3132 err = ib_register_device(&dev->ib_dev, NULL); 3133 if (err) 3134 goto err_q_cnt; 3135 3136 err = create_umr_res(dev); 3137 if (err) 3138 goto err_dev; 3139 3140 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { 3141 err = device_create_file(&dev->ib_dev.dev, 3142 mlx5_class_attributes[i]); 3143 if (err) 3144 goto err_umrc; 3145 } 3146 3147 dev->ib_active = true; 3148 3149 return dev; 3150 3151err_umrc: 3152 destroy_umrc_res(dev); 3153 3154err_dev: 3155 ib_unregister_device(&dev->ib_dev); 3156 3157err_q_cnt: 3158 mlx5_ib_dealloc_q_counters(dev); 3159 3160err_odp: 3161 mlx5_ib_odp_remove_one(dev); 3162 3163err_rsrc: 3164 destroy_dev_resources(&dev->devr); 3165 3166err_disable_roce: 3167 if (ll == IB_LINK_LAYER_ETHERNET) { 3168 mlx5_disable_roce(dev); 3169 mlx5_remove_roce_notifier(dev); 3170 } 3171 3172err_free_port: 3173 kfree(dev->port); 3174 3175err_dealloc: 3176 ib_dealloc_device((struct ib_device *)dev); 3177 3178 return NULL; 3179} 3180 3181static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 3182{ 3183 struct mlx5_ib_dev *dev = context; 3184 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1); 3185 3186 mlx5_remove_roce_notifier(dev); 3187 ib_unregister_device(&dev->ib_dev); 3188 mlx5_ib_dealloc_q_counters(dev); 3189 destroy_umrc_res(dev); 3190 mlx5_ib_odp_remove_one(dev); 3191 destroy_dev_resources(&dev->devr); 3192 if (ll == IB_LINK_LAYER_ETHERNET) 3193 mlx5_disable_roce(dev); 3194 kfree(dev->port); 3195 ib_dealloc_device(&dev->ib_dev); 3196} 3197 3198static struct mlx5_interface mlx5_ib_interface = { 3199 .add = mlx5_ib_add, 3200 .remove = mlx5_ib_remove, 3201 .event = mlx5_ib_event, 3202 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 3203}; 3204 3205static int __init mlx5_ib_init(void) 3206{ 3207 int err; 3208 3209 if (deprecated_prof_sel != 2) 3210 pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n"); 3211 3212 err = mlx5_ib_odp_init(); 3213 if (err) 3214 return err; 3215 3216 err = mlx5_register_interface(&mlx5_ib_interface); 3217 if (err) 3218 goto clean_odp; 3219 3220 return err; 3221 3222clean_odp: 3223 mlx5_ib_odp_cleanup(); 3224 return err; 3225} 3226 3227static void __exit mlx5_ib_cleanup(void) 3228{ 3229 mlx5_unregister_interface(&mlx5_ib_interface); 3230 mlx5_ib_odp_cleanup(); 3231} 3232 3233module_init_order(mlx5_ib_init, SI_ORDER_THIRD); 3234module_exit_order(mlx5_ib_cleanup, SI_ORDER_THIRD); 3235