mlx5_ib_main.c revision 337078
1279219Sken/*- 2279219Sken * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3279219Sken * 4279219Sken * Redistribution and use in source and binary forms, with or without 5279219Sken * modification, are permitted provided that the following conditions 6279219Sken * are met: 7279219Sken * 1. Redistributions of source code must retain the above copyright 8279219Sken * notice, this list of conditions and the following disclaimer. 9279219Sken * 2. Redistributions in binary form must reproduce the above copyright 10279219Sken * notice, this list of conditions and the following disclaimer in the 11279219Sken * documentation and/or other materials provided with the distribution. 12279219Sken * 13279219Sken * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14279219Sken * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15279219Sken * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16279219Sken * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17279219Sken * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18279219Sken * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19279219Sken * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20279219Sken * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21279219Sken * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22279219Sken * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23279219Sken * SUCH DAMAGE. 24279219Sken * 25279219Sken * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c 337078 2018-08-02 08:15:05Z hselasky $ 26279219Sken */ 27279219Sken 28279219Sken#include <linux/module.h> 29279219Sken#include <linux/errno.h> 30279219Sken#include <linux/pci.h> 31279219Sken#include <linux/dma-mapping.h> 32279219Sken#include <linux/slab.h> 33279219Sken#if defined(CONFIG_X86) 34279219Sken#include <asm/pat.h> 35279219Sken#endif 36279219Sken#include <linux/sched.h> 37279219Sken#include <linux/delay.h> 38279219Sken#include <linux/fs.h> 39279219Sken#undef inode 40279219Sken#include <rdma/ib_user_verbs.h> 41279219Sken#include <rdma/ib_addr.h> 42279219Sken#include <rdma/ib_cache.h> 43279219Sken#include <dev/mlx5/port.h> 44279219Sken#include <dev/mlx5/vport.h> 45279219Sken#include <linux/list.h> 46279219Sken#include <rdma/ib_smi.h> 47279219Sken#include <rdma/ib_umem.h> 48279219Sken#include <linux/in.h> 49279219Sken#include <linux/etherdevice.h> 50279219Sken#include <dev/mlx5/fs.h> 51279219Sken#include "mlx5_ib.h" 52279219Sken 53279219Sken#define DRIVER_NAME "mlx5_ib" 54279219Sken#define DRIVER_VERSION "3.4.1-BETA" 55279219Sken#define DRIVER_RELDATE "October 2017" 56279219Sken 57279219SkenMODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); 58279219SkenMODULE_LICENSE("Dual BSD/GPL"); 59279219SkenMODULE_DEPEND(mlx5ib, linuxkpi, 1, 1, 1); 60279219SkenMODULE_DEPEND(mlx5ib, mlx5, 1, 1, 1); 61279219SkenMODULE_DEPEND(mlx5ib, ibcore, 1, 1, 1); 62279219SkenMODULE_VERSION(mlx5ib, 1); 63279219Sken 64279219Skenstatic int deprecated_prof_sel = 2; 65279219Skenmodule_param_named(prof_sel, deprecated_prof_sel, int, 0444); 66279219SkenMODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core"); 67279219Sken 68279219Skenstatic char mlx5_version[] = 69279219Sken DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 70279219Sken DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; 71279219Sken 72279219Skenenum { 73279219Sken MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 74279219Sken}; 75279219Sken 76279219Skenstatic enum rdma_link_layer 77279219Skenmlx5_port_type_cap_to_rdma_ll(int port_type_cap) 78279219Sken{ 79279219Sken switch (port_type_cap) { 80279219Sken case MLX5_CAP_PORT_TYPE_IB: 81279219Sken return IB_LINK_LAYER_INFINIBAND; 82279219Sken case MLX5_CAP_PORT_TYPE_ETH: 83279219Sken return IB_LINK_LAYER_ETHERNET; 84279219Sken default: 85279219Sken return IB_LINK_LAYER_UNSPECIFIED; 86279219Sken } 87279219Sken} 88279219Sken 89279219Skenstatic enum rdma_link_layer 90279219Skenmlx5_ib_port_link_layer(struct ib_device *device, u8 port_num) 91279219Sken{ 92279219Sken struct mlx5_ib_dev *dev = to_mdev(device); 93279219Sken int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); 94279219Sken 95279219Sken return mlx5_port_type_cap_to_rdma_ll(port_type_cap); 96279219Sken} 97279219Sken 98279219Skenstatic bool mlx5_netdev_match(struct net_device *ndev, 99279219Sken struct mlx5_core_dev *mdev, 100279219Sken const char *dname) 101279219Sken{ 102279219Sken return ndev->if_type == IFT_ETHER && 103279219Sken ndev->if_dname != NULL && 104279219Sken strcmp(ndev->if_dname, dname) == 0 && 105279219Sken ndev->if_softc != NULL && 106279219Sken *(struct mlx5_core_dev **)ndev->if_softc == mdev; 107279219Sken} 108279219Sken 109279219Skenstatic int mlx5_netdev_event(struct notifier_block *this, 110279219Sken unsigned long event, void *ptr) 111279219Sken{ 112279219Sken struct net_device *ndev = netdev_notifier_info_to_dev(ptr); 113279219Sken struct mlx5_ib_dev *ibdev = container_of(this, struct mlx5_ib_dev, 114279219Sken roce.nb); 115279219Sken 116279219Sken switch (event) { 117279219Sken case NETDEV_REGISTER: 118279219Sken case NETDEV_UNREGISTER: 119279219Sken write_lock(&ibdev->roce.netdev_lock); 120279219Sken /* check if network interface belongs to mlx5en */ 121279219Sken if (mlx5_netdev_match(ndev, ibdev->mdev, "mce")) 122279219Sken ibdev->roce.netdev = (event == NETDEV_UNREGISTER) ? 123 NULL : ndev; 124 write_unlock(&ibdev->roce.netdev_lock); 125 break; 126 127 case NETDEV_UP: 128 case NETDEV_DOWN: { 129 struct net_device *upper = NULL; 130 131 if ((upper == ndev || (!upper && ndev == ibdev->roce.netdev)) 132 && ibdev->ib_active) { 133 struct ib_event ibev = {0}; 134 135 ibev.device = &ibdev->ib_dev; 136 ibev.event = (event == NETDEV_UP) ? 137 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 138 ibev.element.port_num = 1; 139 ib_dispatch_event(&ibev); 140 } 141 break; 142 } 143 144 default: 145 break; 146 } 147 148 return NOTIFY_DONE; 149} 150 151static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, 152 u8 port_num) 153{ 154 struct mlx5_ib_dev *ibdev = to_mdev(device); 155 struct net_device *ndev; 156 157 /* Ensure ndev does not disappear before we invoke dev_hold() 158 */ 159 read_lock(&ibdev->roce.netdev_lock); 160 ndev = ibdev->roce.netdev; 161 if (ndev) 162 dev_hold(ndev); 163 read_unlock(&ibdev->roce.netdev_lock); 164 165 return ndev; 166} 167 168static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, 169 u8 *active_width) 170{ 171 switch (eth_proto_oper) { 172 case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): 173 case MLX5E_PROT_MASK(MLX5E_1000BASE_KX): 174 case MLX5E_PROT_MASK(MLX5E_100BASE_TX): 175 case MLX5E_PROT_MASK(MLX5E_1000BASE_T): 176 *active_width = IB_WIDTH_1X; 177 *active_speed = IB_SPEED_SDR; 178 break; 179 case MLX5E_PROT_MASK(MLX5E_10GBASE_T): 180 case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4): 181 case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4): 182 case MLX5E_PROT_MASK(MLX5E_10GBASE_KR): 183 case MLX5E_PROT_MASK(MLX5E_10GBASE_CR): 184 case MLX5E_PROT_MASK(MLX5E_10GBASE_SR): 185 case MLX5E_PROT_MASK(MLX5E_10GBASE_ER): 186 *active_width = IB_WIDTH_1X; 187 *active_speed = IB_SPEED_QDR; 188 break; 189 case MLX5E_PROT_MASK(MLX5E_25GBASE_CR): 190 case MLX5E_PROT_MASK(MLX5E_25GBASE_KR): 191 case MLX5E_PROT_MASK(MLX5E_25GBASE_SR): 192 *active_width = IB_WIDTH_1X; 193 *active_speed = IB_SPEED_EDR; 194 break; 195 case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4): 196 case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4): 197 case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4): 198 case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4): 199 *active_width = IB_WIDTH_4X; 200 *active_speed = IB_SPEED_QDR; 201 break; 202 case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2): 203 case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2): 204 case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2): 205 *active_width = IB_WIDTH_1X; 206 *active_speed = IB_SPEED_HDR; 207 break; 208 case MLX5E_PROT_MASK(MLX5E_56GBASE_R4): 209 *active_width = IB_WIDTH_4X; 210 *active_speed = IB_SPEED_FDR; 211 break; 212 case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4): 213 case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4): 214 case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4): 215 case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4): 216 *active_width = IB_WIDTH_4X; 217 *active_speed = IB_SPEED_EDR; 218 break; 219 default: 220 return -EINVAL; 221 } 222 223 return 0; 224} 225 226static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, 227 struct ib_port_attr *props) 228{ 229 struct mlx5_ib_dev *dev = to_mdev(device); 230 struct net_device *ndev; 231 enum ib_mtu ndev_ib_mtu; 232 u16 qkey_viol_cntr; 233 u32 eth_prot_oper; 234 int err; 235 236 memset(props, 0, sizeof(*props)); 237 238 /* Possible bad flows are checked before filling out props so in case 239 * of an error it will still be zeroed out. 240 */ 241 err = mlx5_query_port_eth_proto_oper(dev->mdev, ð_prot_oper, port_num); 242 if (err) 243 return err; 244 245 translate_eth_proto_oper(eth_prot_oper, &props->active_speed, 246 &props->active_width); 247 248 props->port_cap_flags |= IB_PORT_CM_SUP; 249 props->port_cap_flags |= IB_PORT_IP_BASED_GIDS; 250 251 props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, 252 roce_address_table_size); 253 props->max_mtu = IB_MTU_4096; 254 props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); 255 props->pkey_tbl_len = 1; 256 props->state = IB_PORT_DOWN; 257 props->phys_state = 3; 258 259 mlx5_query_nic_vport_qkey_viol_cntr(dev->mdev, &qkey_viol_cntr); 260 props->qkey_viol_cntr = qkey_viol_cntr; 261 262 ndev = mlx5_ib_get_netdev(device, port_num); 263 if (!ndev) 264 return 0; 265 266 if (netif_running(ndev) && netif_carrier_ok(ndev)) { 267 props->state = IB_PORT_ACTIVE; 268 props->phys_state = 5; 269 } 270 271 ndev_ib_mtu = iboe_get_mtu(ndev->if_mtu); 272 273 dev_put(ndev); 274 275 props->active_mtu = min(props->max_mtu, ndev_ib_mtu); 276 return 0; 277} 278 279static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, 280 const struct ib_gid_attr *attr, 281 void *mlx5_addr) 282{ 283#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) 284 char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 285 source_l3_address); 286 void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, 287 source_mac_47_32); 288 u16 vlan_id; 289 290 if (!gid) 291 return; 292 ether_addr_copy(mlx5_addr_mac, IF_LLADDR(attr->ndev)); 293 294 vlan_id = rdma_vlan_dev_vlan_id(attr->ndev); 295 if (vlan_id != 0xffff) { 296 MLX5_SET_RA(mlx5_addr, vlan_valid, 1); 297 MLX5_SET_RA(mlx5_addr, vlan_id, vlan_id); 298 } 299 300 switch (attr->gid_type) { 301 case IB_GID_TYPE_IB: 302 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); 303 break; 304 case IB_GID_TYPE_ROCE_UDP_ENCAP: 305 MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); 306 break; 307 308 default: 309 WARN_ON(true); 310 } 311 312 if (attr->gid_type != IB_GID_TYPE_IB) { 313 if (ipv6_addr_v4mapped((void *)gid)) 314 MLX5_SET_RA(mlx5_addr, roce_l3_type, 315 MLX5_ROCE_L3_TYPE_IPV4); 316 else 317 MLX5_SET_RA(mlx5_addr, roce_l3_type, 318 MLX5_ROCE_L3_TYPE_IPV6); 319 } 320 321 if ((attr->gid_type == IB_GID_TYPE_IB) || 322 !ipv6_addr_v4mapped((void *)gid)) 323 memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid)); 324 else 325 memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4); 326} 327 328static int set_roce_addr(struct ib_device *device, u8 port_num, 329 unsigned int index, 330 const union ib_gid *gid, 331 const struct ib_gid_attr *attr) 332{ 333 struct mlx5_ib_dev *dev = to_mdev(device); 334 u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; 335 u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; 336 void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); 337 enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num); 338 339 if (ll != IB_LINK_LAYER_ETHERNET) 340 return -EINVAL; 341 342 ib_gid_to_mlx5_roce_addr(gid, attr, in_addr); 343 344 MLX5_SET(set_roce_address_in, in, roce_address_index, index); 345 MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); 346 return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); 347} 348 349static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, 350 unsigned int index, const union ib_gid *gid, 351 const struct ib_gid_attr *attr, 352 __always_unused void **context) 353{ 354 return set_roce_addr(device, port_num, index, gid, attr); 355} 356 357static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, 358 unsigned int index, __always_unused void **context) 359{ 360 return set_roce_addr(device, port_num, index, NULL, NULL); 361} 362 363__be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, 364 int index) 365{ 366 struct ib_gid_attr attr; 367 union ib_gid gid; 368 369 if (ib_get_cached_gid(&dev->ib_dev, port_num, index, &gid, &attr)) 370 return 0; 371 372 if (!attr.ndev) 373 return 0; 374 375 dev_put(attr.ndev); 376 377 if (attr.gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 378 return 0; 379 380 return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); 381} 382 383static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) 384{ 385 if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) 386 return !MLX5_CAP_GEN(dev->mdev, ib_virt); 387 return 0; 388} 389 390enum { 391 MLX5_VPORT_ACCESS_METHOD_MAD, 392 MLX5_VPORT_ACCESS_METHOD_HCA, 393 MLX5_VPORT_ACCESS_METHOD_NIC, 394}; 395 396static int mlx5_get_vport_access_method(struct ib_device *ibdev) 397{ 398 if (mlx5_use_mad_ifc(to_mdev(ibdev))) 399 return MLX5_VPORT_ACCESS_METHOD_MAD; 400 401 if (mlx5_ib_port_link_layer(ibdev, 1) == 402 IB_LINK_LAYER_ETHERNET) 403 return MLX5_VPORT_ACCESS_METHOD_NIC; 404 405 return MLX5_VPORT_ACCESS_METHOD_HCA; 406} 407 408static void get_atomic_caps(struct mlx5_ib_dev *dev, 409 struct ib_device_attr *props) 410{ 411 u8 tmp; 412 u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); 413 u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); 414 u8 atomic_req_8B_endianness_mode = 415 MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode); 416 417 /* Check if HW supports 8 bytes standard atomic operations and capable 418 * of host endianness respond 419 */ 420 tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; 421 if (((atomic_operations & tmp) == tmp) && 422 (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && 423 (atomic_req_8B_endianness_mode)) { 424 props->atomic_cap = IB_ATOMIC_HCA; 425 } else { 426 props->atomic_cap = IB_ATOMIC_NONE; 427 } 428} 429 430static int mlx5_query_system_image_guid(struct ib_device *ibdev, 431 __be64 *sys_image_guid) 432{ 433 struct mlx5_ib_dev *dev = to_mdev(ibdev); 434 struct mlx5_core_dev *mdev = dev->mdev; 435 u64 tmp; 436 int err; 437 438 switch (mlx5_get_vport_access_method(ibdev)) { 439 case MLX5_VPORT_ACCESS_METHOD_MAD: 440 return mlx5_query_mad_ifc_system_image_guid(ibdev, 441 sys_image_guid); 442 443 case MLX5_VPORT_ACCESS_METHOD_HCA: 444 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); 445 break; 446 447 case MLX5_VPORT_ACCESS_METHOD_NIC: 448 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); 449 break; 450 451 default: 452 return -EINVAL; 453 } 454 455 if (!err) 456 *sys_image_guid = cpu_to_be64(tmp); 457 458 return err; 459 460} 461 462static int mlx5_query_max_pkeys(struct ib_device *ibdev, 463 u16 *max_pkeys) 464{ 465 struct mlx5_ib_dev *dev = to_mdev(ibdev); 466 struct mlx5_core_dev *mdev = dev->mdev; 467 468 switch (mlx5_get_vport_access_method(ibdev)) { 469 case MLX5_VPORT_ACCESS_METHOD_MAD: 470 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); 471 472 case MLX5_VPORT_ACCESS_METHOD_HCA: 473 case MLX5_VPORT_ACCESS_METHOD_NIC: 474 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, 475 pkey_table_size)); 476 return 0; 477 478 default: 479 return -EINVAL; 480 } 481} 482 483static int mlx5_query_vendor_id(struct ib_device *ibdev, 484 u32 *vendor_id) 485{ 486 struct mlx5_ib_dev *dev = to_mdev(ibdev); 487 488 switch (mlx5_get_vport_access_method(ibdev)) { 489 case MLX5_VPORT_ACCESS_METHOD_MAD: 490 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); 491 492 case MLX5_VPORT_ACCESS_METHOD_HCA: 493 case MLX5_VPORT_ACCESS_METHOD_NIC: 494 return mlx5_core_query_vendor_id(dev->mdev, vendor_id); 495 496 default: 497 return -EINVAL; 498 } 499} 500 501static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, 502 __be64 *node_guid) 503{ 504 u64 tmp; 505 int err; 506 507 switch (mlx5_get_vport_access_method(&dev->ib_dev)) { 508 case MLX5_VPORT_ACCESS_METHOD_MAD: 509 return mlx5_query_mad_ifc_node_guid(dev, node_guid); 510 511 case MLX5_VPORT_ACCESS_METHOD_HCA: 512 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); 513 break; 514 515 case MLX5_VPORT_ACCESS_METHOD_NIC: 516 err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); 517 break; 518 519 default: 520 return -EINVAL; 521 } 522 523 if (!err) 524 *node_guid = cpu_to_be64(tmp); 525 526 return err; 527} 528 529struct mlx5_reg_node_desc { 530 u8 desc[IB_DEVICE_NODE_DESC_MAX]; 531}; 532 533static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) 534{ 535 struct mlx5_reg_node_desc in; 536 537 if (mlx5_use_mad_ifc(dev)) 538 return mlx5_query_mad_ifc_node_desc(dev, node_desc); 539 540 memset(&in, 0, sizeof(in)); 541 542 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, 543 sizeof(struct mlx5_reg_node_desc), 544 MLX5_REG_NODE_DESC, 0, 0); 545} 546 547static int mlx5_ib_query_device(struct ib_device *ibdev, 548 struct ib_device_attr *props, 549 struct ib_udata *uhw) 550{ 551 struct mlx5_ib_dev *dev = to_mdev(ibdev); 552 struct mlx5_core_dev *mdev = dev->mdev; 553 int err = -ENOMEM; 554 int max_rq_sg; 555 int max_sq_sg; 556 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); 557 struct mlx5_ib_query_device_resp resp = {}; 558 size_t resp_len; 559 u64 max_tso; 560 561 resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); 562 if (uhw->outlen && uhw->outlen < resp_len) 563 return -EINVAL; 564 else 565 resp.response_length = resp_len; 566 567 if (uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) 568 return -EINVAL; 569 570 memset(props, 0, sizeof(*props)); 571 err = mlx5_query_system_image_guid(ibdev, 572 &props->sys_image_guid); 573 if (err) 574 return err; 575 576 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys); 577 if (err) 578 return err; 579 580 err = mlx5_query_vendor_id(ibdev, &props->vendor_id); 581 if (err) 582 return err; 583 584 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | 585 (fw_rev_min(dev->mdev) << 16) | 586 fw_rev_sub(dev->mdev); 587 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 588 IB_DEVICE_PORT_ACTIVE_EVENT | 589 IB_DEVICE_SYS_IMAGE_GUID | 590 IB_DEVICE_RC_RNR_NAK_GEN; 591 592 if (MLX5_CAP_GEN(mdev, pkv)) 593 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 594 if (MLX5_CAP_GEN(mdev, qkv)) 595 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 596 if (MLX5_CAP_GEN(mdev, apm)) 597 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 598 if (MLX5_CAP_GEN(mdev, xrc)) 599 props->device_cap_flags |= IB_DEVICE_XRC; 600 if (MLX5_CAP_GEN(mdev, imaicl)) { 601 props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | 602 IB_DEVICE_MEM_WINDOW_TYPE_2B; 603 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 604 /* We support 'Gappy' memory registration too */ 605 props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; 606 } 607 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 608 if (MLX5_CAP_GEN(mdev, sho)) { 609 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; 610 /* At this stage no support for signature handover */ 611 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 612 IB_PROT_T10DIF_TYPE_2 | 613 IB_PROT_T10DIF_TYPE_3; 614 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 615 IB_GUARD_T10DIF_CSUM; 616 } 617 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 618 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 619 620 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads)) { 621 if (MLX5_CAP_ETH(mdev, csum_cap)) 622 props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; 623 624 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) { 625 max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); 626 if (max_tso) { 627 resp.tso_caps.max_tso = 1 << max_tso; 628 resp.tso_caps.supported_qpts |= 629 1 << IB_QPT_RAW_PACKET; 630 resp.response_length += sizeof(resp.tso_caps); 631 } 632 } 633 634 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) { 635 resp.rss_caps.rx_hash_function = 636 MLX5_RX_HASH_FUNC_TOEPLITZ; 637 resp.rss_caps.rx_hash_fields_mask = 638 MLX5_RX_HASH_SRC_IPV4 | 639 MLX5_RX_HASH_DST_IPV4 | 640 MLX5_RX_HASH_SRC_IPV6 | 641 MLX5_RX_HASH_DST_IPV6 | 642 MLX5_RX_HASH_SRC_PORT_TCP | 643 MLX5_RX_HASH_DST_PORT_TCP | 644 MLX5_RX_HASH_SRC_PORT_UDP | 645 MLX5_RX_HASH_DST_PORT_UDP; 646 resp.response_length += sizeof(resp.rss_caps); 647 } 648 } else { 649 if (field_avail(typeof(resp), tso_caps, uhw->outlen)) 650 resp.response_length += sizeof(resp.tso_caps); 651 if (field_avail(typeof(resp), rss_caps, uhw->outlen)) 652 resp.response_length += sizeof(resp.rss_caps); 653 } 654 655 if (MLX5_CAP_GEN(mdev, ipoib_ipoib_offloads)) { 656 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 657 props->device_cap_flags |= IB_DEVICE_UD_TSO; 658 } 659 660 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && 661 MLX5_CAP_ETH(dev->mdev, scatter_fcs)) 662 props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; 663 664 if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) 665 props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; 666 667 props->vendor_part_id = mdev->pdev->device; 668 props->hw_ver = mdev->pdev->revision; 669 670 props->max_mr_size = ~0ull; 671 props->page_size_cap = ~(min_page_size - 1); 672 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); 673 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 674 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / 675 sizeof(struct mlx5_wqe_data_seg); 676 max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) - 677 sizeof(struct mlx5_wqe_ctrl_seg)) / 678 sizeof(struct mlx5_wqe_data_seg); 679 props->max_sge = min(max_rq_sg, max_sq_sg); 680 props->max_sge_rd = MLX5_MAX_SGE_RD; 681 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); 682 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; 683 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 684 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); 685 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); 686 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); 687 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); 688 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; 689 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); 690 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 691 props->max_srq_sge = max_rq_sg - 1; 692 props->max_fast_reg_page_list_len = 693 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); 694 get_atomic_caps(dev, props); 695 props->masked_atomic_cap = IB_ATOMIC_NONE; 696 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); 697 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); 698 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 699 props->max_mcast_grp; 700 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 701 props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); 702 props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; 703 704#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 705 if (MLX5_CAP_GEN(mdev, pg)) 706 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 707 props->odp_caps = dev->odp_caps; 708#endif 709 710 if (MLX5_CAP_GEN(mdev, cd)) 711 props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 712 713 if (!mlx5_core_is_pf(mdev)) 714 props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 715 716 if (mlx5_ib_port_link_layer(ibdev, 1) == 717 IB_LINK_LAYER_ETHERNET) { 718 props->rss_caps.max_rwq_indirection_tables = 719 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); 720 props->rss_caps.max_rwq_indirection_table_size = 721 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); 722 props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; 723 props->max_wq_type_rq = 724 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); 725 } 726 727 if (uhw->outlen) { 728 err = ib_copy_to_udata(uhw, &resp, resp.response_length); 729 730 if (err) 731 return err; 732 } 733 734 return 0; 735} 736 737enum mlx5_ib_width { 738 MLX5_IB_WIDTH_1X = 1 << 0, 739 MLX5_IB_WIDTH_2X = 1 << 1, 740 MLX5_IB_WIDTH_4X = 1 << 2, 741 MLX5_IB_WIDTH_8X = 1 << 3, 742 MLX5_IB_WIDTH_12X = 1 << 4 743}; 744 745static int translate_active_width(struct ib_device *ibdev, u8 active_width, 746 u8 *ib_width) 747{ 748 struct mlx5_ib_dev *dev = to_mdev(ibdev); 749 int err = 0; 750 751 if (active_width & MLX5_IB_WIDTH_1X) { 752 *ib_width = IB_WIDTH_1X; 753 } else if (active_width & MLX5_IB_WIDTH_2X) { 754 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n", 755 (int)active_width); 756 err = -EINVAL; 757 } else if (active_width & MLX5_IB_WIDTH_4X) { 758 *ib_width = IB_WIDTH_4X; 759 } else if (active_width & MLX5_IB_WIDTH_8X) { 760 *ib_width = IB_WIDTH_8X; 761 } else if (active_width & MLX5_IB_WIDTH_12X) { 762 *ib_width = IB_WIDTH_12X; 763 } else { 764 mlx5_ib_dbg(dev, "Invalid active_width %d\n", 765 (int)active_width); 766 err = -EINVAL; 767 } 768 769 return err; 770} 771 772enum ib_max_vl_num { 773 __IB_MAX_VL_0 = 1, 774 __IB_MAX_VL_0_1 = 2, 775 __IB_MAX_VL_0_3 = 3, 776 __IB_MAX_VL_0_7 = 4, 777 __IB_MAX_VL_0_14 = 5, 778}; 779 780enum mlx5_vl_hw_cap { 781 MLX5_VL_HW_0 = 1, 782 MLX5_VL_HW_0_1 = 2, 783 MLX5_VL_HW_0_2 = 3, 784 MLX5_VL_HW_0_3 = 4, 785 MLX5_VL_HW_0_4 = 5, 786 MLX5_VL_HW_0_5 = 6, 787 MLX5_VL_HW_0_6 = 7, 788 MLX5_VL_HW_0_7 = 8, 789 MLX5_VL_HW_0_14 = 15 790}; 791 792static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, 793 u8 *max_vl_num) 794{ 795 switch (vl_hw_cap) { 796 case MLX5_VL_HW_0: 797 *max_vl_num = __IB_MAX_VL_0; 798 break; 799 case MLX5_VL_HW_0_1: 800 *max_vl_num = __IB_MAX_VL_0_1; 801 break; 802 case MLX5_VL_HW_0_3: 803 *max_vl_num = __IB_MAX_VL_0_3; 804 break; 805 case MLX5_VL_HW_0_7: 806 *max_vl_num = __IB_MAX_VL_0_7; 807 break; 808 case MLX5_VL_HW_0_14: 809 *max_vl_num = __IB_MAX_VL_0_14; 810 break; 811 812 default: 813 return -EINVAL; 814 } 815 816 return 0; 817} 818 819static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, 820 struct ib_port_attr *props) 821{ 822 struct mlx5_ib_dev *dev = to_mdev(ibdev); 823 struct mlx5_core_dev *mdev = dev->mdev; 824 u32 *rep; 825 int replen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out); 826 struct mlx5_ptys_reg *ptys; 827 struct mlx5_pmtu_reg *pmtu; 828 struct mlx5_pvlc_reg pvlc; 829 void *ctx; 830 int err; 831 832 rep = mlx5_vzalloc(replen); 833 ptys = kzalloc(sizeof(*ptys), GFP_KERNEL); 834 pmtu = kzalloc(sizeof(*pmtu), GFP_KERNEL); 835 if (!rep || !ptys || !pmtu) { 836 err = -ENOMEM; 837 goto out; 838 } 839 840 memset(props, 0, sizeof(*props)); 841 842 err = mlx5_query_hca_vport_context(mdev, port, 0, rep, replen); 843 if (err) 844 goto out; 845 846 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, rep, hca_vport_context); 847 848 props->lid = MLX5_GET(hca_vport_context, ctx, lid); 849 props->lmc = MLX5_GET(hca_vport_context, ctx, lmc); 850 props->sm_lid = MLX5_GET(hca_vport_context, ctx, sm_lid); 851 props->sm_sl = MLX5_GET(hca_vport_context, ctx, sm_sl); 852 props->state = MLX5_GET(hca_vport_context, ctx, vport_state); 853 props->phys_state = MLX5_GET(hca_vport_context, ctx, 854 port_physical_state); 855 props->port_cap_flags = MLX5_GET(hca_vport_context, ctx, cap_mask1); 856 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); 857 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); 858 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); 859 props->bad_pkey_cntr = MLX5_GET(hca_vport_context, ctx, 860 pkey_violation_counter); 861 props->qkey_viol_cntr = MLX5_GET(hca_vport_context, ctx, 862 qkey_violation_counter); 863 props->subnet_timeout = MLX5_GET(hca_vport_context, ctx, 864 subnet_timeout); 865 props->init_type_reply = MLX5_GET(hca_vport_context, ctx, 866 init_type_reply); 867 props->grh_required = MLX5_GET(hca_vport_context, ctx, grh_required); 868 869 ptys->proto_mask |= MLX5_PTYS_IB; 870 ptys->local_port = port; 871 err = mlx5_core_access_ptys(mdev, ptys, 0); 872 if (err) 873 goto out; 874 875 err = translate_active_width(ibdev, ptys->ib_link_width_oper, 876 &props->active_width); 877 if (err) 878 goto out; 879 880 props->active_speed = (u8)ptys->ib_proto_oper; 881 882 pmtu->local_port = port; 883 err = mlx5_core_access_pmtu(mdev, pmtu, 0); 884 if (err) 885 goto out; 886 887 props->max_mtu = pmtu->max_mtu; 888 props->active_mtu = pmtu->oper_mtu; 889 890 memset(&pvlc, 0, sizeof(pvlc)); 891 pvlc.local_port = port; 892 err = mlx5_core_access_pvlc(mdev, &pvlc, 0); 893 if (err) 894 goto out; 895 896 err = translate_max_vl_num(ibdev, pvlc.vl_hw_cap, 897 &props->max_vl_num); 898out: 899 kvfree(rep); 900 kfree(ptys); 901 kfree(pmtu); 902 return err; 903} 904 905int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, 906 struct ib_port_attr *props) 907{ 908 switch (mlx5_get_vport_access_method(ibdev)) { 909 case MLX5_VPORT_ACCESS_METHOD_MAD: 910 return mlx5_query_mad_ifc_port(ibdev, port, props); 911 912 case MLX5_VPORT_ACCESS_METHOD_HCA: 913 return mlx5_query_hca_port(ibdev, port, props); 914 915 case MLX5_VPORT_ACCESS_METHOD_NIC: 916 return mlx5_query_port_roce(ibdev, port, props); 917 918 default: 919 return -EINVAL; 920 } 921} 922 923static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 924 union ib_gid *gid) 925{ 926 struct mlx5_ib_dev *dev = to_mdev(ibdev); 927 struct mlx5_core_dev *mdev = dev->mdev; 928 929 switch (mlx5_get_vport_access_method(ibdev)) { 930 case MLX5_VPORT_ACCESS_METHOD_MAD: 931 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); 932 933 case MLX5_VPORT_ACCESS_METHOD_HCA: 934 return mlx5_query_hca_vport_gid(mdev, port, 0, index, gid); 935 936 default: 937 return -EINVAL; 938 } 939 940} 941 942static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 943 u16 *pkey) 944{ 945 struct mlx5_ib_dev *dev = to_mdev(ibdev); 946 struct mlx5_core_dev *mdev = dev->mdev; 947 948 switch (mlx5_get_vport_access_method(ibdev)) { 949 case MLX5_VPORT_ACCESS_METHOD_MAD: 950 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); 951 952 case MLX5_VPORT_ACCESS_METHOD_HCA: 953 case MLX5_VPORT_ACCESS_METHOD_NIC: 954 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index, 955 pkey); 956 default: 957 return -EINVAL; 958 } 959} 960 961static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, 962 struct ib_device_modify *props) 963{ 964 struct mlx5_ib_dev *dev = to_mdev(ibdev); 965 struct mlx5_reg_node_desc in; 966 struct mlx5_reg_node_desc out; 967 int err; 968 969 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 970 return -EOPNOTSUPP; 971 972 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) 973 return 0; 974 975 /* 976 * If possible, pass node desc to FW, so it can generate 977 * a 144 trap. If cmd fails, just ignore. 978 */ 979 memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 980 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, 981 sizeof(out), MLX5_REG_NODE_DESC, 0, 1); 982 if (err) 983 return err; 984 985 memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); 986 987 return err; 988} 989 990static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, 991 struct ib_port_modify *props) 992{ 993 struct mlx5_ib_dev *dev = to_mdev(ibdev); 994 struct ib_port_attr attr; 995 u32 tmp; 996 int err; 997 998 mutex_lock(&dev->cap_mask_mutex); 999 1000 err = mlx5_ib_query_port(ibdev, port, &attr); 1001 if (err) 1002 goto out; 1003 1004 tmp = (attr.port_cap_flags | props->set_port_cap_mask) & 1005 ~props->clr_port_cap_mask; 1006 1007 err = mlx5_set_port_caps(dev->mdev, port, tmp); 1008 1009out: 1010 mutex_unlock(&dev->cap_mask_mutex); 1011 return err; 1012} 1013 1014static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, 1015 struct ib_udata *udata) 1016{ 1017 struct mlx5_ib_dev *dev = to_mdev(ibdev); 1018 struct mlx5_ib_alloc_ucontext_req_v2 req = {}; 1019 struct mlx5_ib_alloc_ucontext_resp resp = {}; 1020 struct mlx5_ib_ucontext *context; 1021 struct mlx5_uuar_info *uuari; 1022 struct mlx5_uar *uars; 1023 int gross_uuars; 1024 int num_uars; 1025 int ver; 1026 int uuarn; 1027 int err; 1028 int i; 1029 size_t reqlen; 1030 size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, 1031 max_cqe_version); 1032 1033 if (!dev->ib_active) 1034 return ERR_PTR(-EAGAIN); 1035 1036 if (udata->inlen < sizeof(struct ib_uverbs_cmd_hdr)) 1037 return ERR_PTR(-EINVAL); 1038 1039 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr); 1040 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) 1041 ver = 0; 1042 else if (reqlen >= min_req_v2) 1043 ver = 2; 1044 else 1045 return ERR_PTR(-EINVAL); 1046 1047 err = ib_copy_from_udata(&req, udata, min(reqlen, sizeof(req))); 1048 if (err) 1049 return ERR_PTR(err); 1050 1051 if (req.flags) 1052 return ERR_PTR(-EINVAL); 1053 1054 if (req.total_num_uuars > MLX5_MAX_UUARS) 1055 return ERR_PTR(-ENOMEM); 1056 1057 if (req.total_num_uuars == 0) 1058 return ERR_PTR(-EINVAL); 1059 1060 if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) 1061 return ERR_PTR(-EOPNOTSUPP); 1062 1063 if (reqlen > sizeof(req) && 1064 !ib_is_udata_cleared(udata, sizeof(req), 1065 reqlen - sizeof(req))) 1066 return ERR_PTR(-EOPNOTSUPP); 1067 1068 req.total_num_uuars = ALIGN(req.total_num_uuars, 1069 MLX5_NON_FP_BF_REGS_PER_PAGE); 1070 if (req.num_low_latency_uuars > req.total_num_uuars - 1) 1071 return ERR_PTR(-EINVAL); 1072 1073 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 1074 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 1075 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); 1076 if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) 1077 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); 1078 resp.cache_line_size = cache_line_size(); 1079 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); 1080 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); 1081 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1082 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); 1083 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 1084 resp.cqe_version = min_t(__u8, 1085 (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), 1086 req.max_cqe_version); 1087 resp.response_length = min(offsetof(typeof(resp), response_length) + 1088 sizeof(resp.response_length), udata->outlen); 1089 1090 context = kzalloc(sizeof(*context), GFP_KERNEL); 1091 if (!context) 1092 return ERR_PTR(-ENOMEM); 1093 1094 uuari = &context->uuari; 1095 mutex_init(&uuari->lock); 1096 uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); 1097 if (!uars) { 1098 err = -ENOMEM; 1099 goto out_ctx; 1100 } 1101 1102 uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars), 1103 sizeof(*uuari->bitmap), 1104 GFP_KERNEL); 1105 if (!uuari->bitmap) { 1106 err = -ENOMEM; 1107 goto out_uar_ctx; 1108 } 1109 /* 1110 * clear all fast path uuars 1111 */ 1112 for (i = 0; i < gross_uuars; i++) { 1113 uuarn = i & 3; 1114 if (uuarn == 2 || uuarn == 3) 1115 set_bit(i, uuari->bitmap); 1116 } 1117 1118 uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL); 1119 if (!uuari->count) { 1120 err = -ENOMEM; 1121 goto out_bitmap; 1122 } 1123 1124 for (i = 0; i < num_uars; i++) { 1125 err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index); 1126 if (err) 1127 goto out_count; 1128 } 1129 1130#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 1131 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range; 1132#endif 1133 1134 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) { 1135 err = mlx5_alloc_transport_domain(dev->mdev, 1136 &context->tdn); 1137 if (err) 1138 goto out_uars; 1139 } 1140 1141 INIT_LIST_HEAD(&context->vma_private_list); 1142 INIT_LIST_HEAD(&context->db_page_list); 1143 mutex_init(&context->db_page_mutex); 1144 1145 resp.tot_uuars = req.total_num_uuars; 1146 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports); 1147 1148 if (field_avail(typeof(resp), cqe_version, udata->outlen)) 1149 resp.response_length += sizeof(resp.cqe_version); 1150 1151 if (field_avail(typeof(resp), cmds_supp_uhw, udata->outlen)) { 1152 resp.cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | 1153 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; 1154 resp.response_length += sizeof(resp.cmds_supp_uhw); 1155 } 1156 1157 /* 1158 * We don't want to expose information from the PCI bar that is located 1159 * after 4096 bytes, so if the arch only supports larger pages, let's 1160 * pretend we don't support reading the HCA's core clock. This is also 1161 * forced by mmap function. 1162 */ 1163 if (PAGE_SIZE <= 4096 && 1164 field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { 1165 resp.comp_mask |= 1166 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; 1167 resp.hca_core_clock_offset = 1168 offsetof(struct mlx5_init_seg, internal_timer_h) % 1169 PAGE_SIZE; 1170 resp.response_length += sizeof(resp.hca_core_clock_offset) + 1171 sizeof(resp.reserved2); 1172 } 1173 1174 err = ib_copy_to_udata(udata, &resp, resp.response_length); 1175 if (err) 1176 goto out_td; 1177 1178 uuari->ver = ver; 1179 uuari->num_low_latency_uuars = req.num_low_latency_uuars; 1180 uuari->uars = uars; 1181 uuari->num_uars = num_uars; 1182 context->cqe_version = resp.cqe_version; 1183 1184 return &context->ibucontext; 1185 1186out_td: 1187 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1188 mlx5_dealloc_transport_domain(dev->mdev, context->tdn); 1189 1190out_uars: 1191 for (i--; i >= 0; i--) 1192 mlx5_cmd_free_uar(dev->mdev, uars[i].index); 1193out_count: 1194 kfree(uuari->count); 1195 1196out_bitmap: 1197 kfree(uuari->bitmap); 1198 1199out_uar_ctx: 1200 kfree(uars); 1201 1202out_ctx: 1203 kfree(context); 1204 return ERR_PTR(err); 1205} 1206 1207static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) 1208{ 1209 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1210 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1211 struct mlx5_uuar_info *uuari = &context->uuari; 1212 int i; 1213 1214 if (MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) 1215 mlx5_dealloc_transport_domain(dev->mdev, context->tdn); 1216 1217 for (i = 0; i < uuari->num_uars; i++) { 1218 if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index)) 1219 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); 1220 } 1221 1222 kfree(uuari->count); 1223 kfree(uuari->bitmap); 1224 kfree(uuari->uars); 1225 kfree(context); 1226 1227 return 0; 1228} 1229 1230static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) 1231{ 1232 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index; 1233} 1234 1235static int get_command(unsigned long offset) 1236{ 1237 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; 1238} 1239 1240static int get_arg(unsigned long offset) 1241{ 1242 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); 1243} 1244 1245static int get_index(unsigned long offset) 1246{ 1247 return get_arg(offset); 1248} 1249 1250static void mlx5_ib_vma_open(struct vm_area_struct *area) 1251{ 1252 /* vma_open is called when a new VMA is created on top of our VMA. This 1253 * is done through either mremap flow or split_vma (usually due to 1254 * mlock, madvise, munmap, etc.) We do not support a clone of the VMA, 1255 * as this VMA is strongly hardware related. Therefore we set the 1256 * vm_ops of the newly created/cloned VMA to NULL, to prevent it from 1257 * calling us again and trying to do incorrect actions. We assume that 1258 * the original VMA size is exactly a single page, and therefore all 1259 * "splitting" operation will not happen to it. 1260 */ 1261 area->vm_ops = NULL; 1262} 1263 1264static void mlx5_ib_vma_close(struct vm_area_struct *area) 1265{ 1266 struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data; 1267 1268 /* It's guaranteed that all VMAs opened on a FD are closed before the 1269 * file itself is closed, therefore no sync is needed with the regular 1270 * closing flow. (e.g. mlx5 ib_dealloc_ucontext) 1271 * However need a sync with accessing the vma as part of 1272 * mlx5_ib_disassociate_ucontext. 1273 * The close operation is usually called under mm->mmap_sem except when 1274 * process is exiting. 1275 * The exiting case is handled explicitly as part of 1276 * mlx5_ib_disassociate_ucontext. 1277 */ 1278 mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data; 1279 1280 /* setting the vma context pointer to null in the mlx5_ib driver's 1281 * private data, to protect a race condition in 1282 * mlx5_ib_disassociate_ucontext(). 1283 */ 1284 mlx5_ib_vma_priv_data->vma = NULL; 1285 list_del(&mlx5_ib_vma_priv_data->list); 1286 kfree(mlx5_ib_vma_priv_data); 1287} 1288 1289static const struct vm_operations_struct mlx5_ib_vm_ops = { 1290 .open = mlx5_ib_vma_open, 1291 .close = mlx5_ib_vma_close 1292}; 1293 1294static int mlx5_ib_set_vma_data(struct vm_area_struct *vma, 1295 struct mlx5_ib_ucontext *ctx) 1296{ 1297 struct mlx5_ib_vma_private_data *vma_prv; 1298 struct list_head *vma_head = &ctx->vma_private_list; 1299 1300 vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL); 1301 if (!vma_prv) 1302 return -ENOMEM; 1303 1304 vma_prv->vma = vma; 1305 vma->vm_private_data = vma_prv; 1306 vma->vm_ops = &mlx5_ib_vm_ops; 1307 1308 list_add(&vma_prv->list, vma_head); 1309 1310 return 0; 1311} 1312 1313static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) 1314{ 1315 switch (cmd) { 1316 case MLX5_IB_MMAP_WC_PAGE: 1317 return "WC"; 1318 case MLX5_IB_MMAP_REGULAR_PAGE: 1319 return "best effort WC"; 1320 case MLX5_IB_MMAP_NC_PAGE: 1321 return "NC"; 1322 default: 1323 return NULL; 1324 } 1325} 1326 1327static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, 1328 struct vm_area_struct *vma, 1329 struct mlx5_ib_ucontext *context) 1330{ 1331 struct mlx5_uuar_info *uuari = &context->uuari; 1332 int err; 1333 unsigned long idx; 1334 phys_addr_t pfn, pa; 1335 pgprot_t prot; 1336 1337 switch (cmd) { 1338 case MLX5_IB_MMAP_WC_PAGE: 1339/* Some architectures don't support WC memory */ 1340#if defined(CONFIG_X86) 1341 if (!pat_enabled()) 1342 return -EPERM; 1343#elif !(defined(CONFIG_PPC) || (defined(CONFIG_ARM) && defined(CONFIG_MMU))) 1344 return -EPERM; 1345#endif 1346 /* fall through */ 1347 case MLX5_IB_MMAP_REGULAR_PAGE: 1348 /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ 1349 prot = pgprot_writecombine(vma->vm_page_prot); 1350 break; 1351 case MLX5_IB_MMAP_NC_PAGE: 1352 prot = pgprot_noncached(vma->vm_page_prot); 1353 break; 1354 default: 1355 return -EINVAL; 1356 } 1357 1358 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1359 return -EINVAL; 1360 1361 idx = get_index(vma->vm_pgoff); 1362 if (idx >= uuari->num_uars) 1363 return -EINVAL; 1364 1365 pfn = uar_index2pfn(dev, uuari->uars[idx].index); 1366 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); 1367 1368 vma->vm_page_prot = prot; 1369 err = io_remap_pfn_range(vma, vma->vm_start, pfn, 1370 PAGE_SIZE, vma->vm_page_prot); 1371 if (err) { 1372 mlx5_ib_err(dev, "io_remap_pfn_range failed with error=%d, vm_start=0x%llx, pfn=%pa, mmap_cmd=%s\n", 1373 err, (unsigned long long)vma->vm_start, &pfn, mmap_cmd2str(cmd)); 1374 return -EAGAIN; 1375 } 1376 1377 pa = pfn << PAGE_SHIFT; 1378 mlx5_ib_dbg(dev, "mapped %s at 0x%llx, PA %pa\n", mmap_cmd2str(cmd), 1379 (unsigned long long)vma->vm_start, &pa); 1380 1381 return mlx5_ib_set_vma_data(vma, context); 1382} 1383 1384static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) 1385{ 1386 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); 1387 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); 1388 unsigned long command; 1389 phys_addr_t pfn; 1390 1391 command = get_command(vma->vm_pgoff); 1392 switch (command) { 1393 case MLX5_IB_MMAP_WC_PAGE: 1394 case MLX5_IB_MMAP_NC_PAGE: 1395 case MLX5_IB_MMAP_REGULAR_PAGE: 1396 return uar_mmap(dev, command, vma, context); 1397 1398 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: 1399 return -ENOSYS; 1400 1401 case MLX5_IB_MMAP_CORE_CLOCK: 1402 if (vma->vm_end - vma->vm_start != PAGE_SIZE) 1403 return -EINVAL; 1404 1405 if (vma->vm_flags & VM_WRITE) 1406 return -EPERM; 1407 1408 /* Don't expose to user-space information it shouldn't have */ 1409 if (PAGE_SIZE > 4096) 1410 return -EOPNOTSUPP; 1411 1412 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1413 pfn = (dev->mdev->iseg_base + 1414 offsetof(struct mlx5_init_seg, internal_timer_h)) >> 1415 PAGE_SHIFT; 1416 if (io_remap_pfn_range(vma, vma->vm_start, pfn, 1417 PAGE_SIZE, vma->vm_page_prot)) 1418 return -EAGAIN; 1419 1420 mlx5_ib_dbg(dev, "mapped internal timer at 0x%llx, PA 0x%llx\n", 1421 (unsigned long long)vma->vm_start, 1422 (unsigned long long)pfn << PAGE_SHIFT); 1423 break; 1424 1425 default: 1426 return -EINVAL; 1427 } 1428 1429 return 0; 1430} 1431 1432static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, 1433 struct ib_ucontext *context, 1434 struct ib_udata *udata) 1435{ 1436 struct mlx5_ib_alloc_pd_resp resp; 1437 struct mlx5_ib_pd *pd; 1438 int err; 1439 1440 pd = kmalloc(sizeof(*pd), GFP_KERNEL); 1441 if (!pd) 1442 return ERR_PTR(-ENOMEM); 1443 1444 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn); 1445 if (err) { 1446 kfree(pd); 1447 return ERR_PTR(err); 1448 } 1449 1450 if (context) { 1451 resp.pdn = pd->pdn; 1452 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { 1453 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn); 1454 kfree(pd); 1455 return ERR_PTR(-EFAULT); 1456 } 1457 } 1458 1459 return &pd->ibpd; 1460} 1461 1462static int mlx5_ib_dealloc_pd(struct ib_pd *pd) 1463{ 1464 struct mlx5_ib_dev *mdev = to_mdev(pd->device); 1465 struct mlx5_ib_pd *mpd = to_mpd(pd); 1466 1467 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn); 1468 kfree(mpd); 1469 1470 return 0; 1471} 1472 1473enum { 1474 MATCH_CRITERIA_ENABLE_OUTER_BIT, 1475 MATCH_CRITERIA_ENABLE_MISC_BIT, 1476 MATCH_CRITERIA_ENABLE_INNER_BIT 1477}; 1478 1479#define HEADER_IS_ZERO(match_criteria, headers) \ 1480 !(memchr_inv(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \ 1481 0, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \ 1482 1483static u8 get_match_criteria_enable(u32 *match_criteria) 1484{ 1485 u8 match_criteria_enable; 1486 1487 match_criteria_enable = 1488 (!HEADER_IS_ZERO(match_criteria, outer_headers)) << 1489 MATCH_CRITERIA_ENABLE_OUTER_BIT; 1490 match_criteria_enable |= 1491 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) << 1492 MATCH_CRITERIA_ENABLE_MISC_BIT; 1493 match_criteria_enable |= 1494 (!HEADER_IS_ZERO(match_criteria, inner_headers)) << 1495 MATCH_CRITERIA_ENABLE_INNER_BIT; 1496 1497 return match_criteria_enable; 1498} 1499 1500static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val) 1501{ 1502 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask); 1503 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val); 1504} 1505 1506static void set_tos(void *outer_c, void *outer_v, u8 mask, u8 val) 1507{ 1508 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_ecn, mask); 1509 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_ecn, val); 1510 MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_dscp, mask >> 2); 1511 MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_dscp, val >> 2); 1512} 1513 1514#define LAST_ETH_FIELD vlan_tag 1515#define LAST_IB_FIELD sl 1516#define LAST_IPV4_FIELD tos 1517#define LAST_IPV6_FIELD traffic_class 1518#define LAST_TCP_UDP_FIELD src_port 1519 1520/* Field is the last supported field */ 1521#define FIELDS_NOT_SUPPORTED(filter, field)\ 1522 memchr_inv((void *)&filter.field +\ 1523 sizeof(filter.field), 0,\ 1524 sizeof(filter) -\ 1525 offsetof(typeof(filter), field) -\ 1526 sizeof(filter.field)) 1527 1528static int parse_flow_attr(u32 *match_c, u32 *match_v, 1529 const union ib_flow_spec *ib_spec) 1530{ 1531 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 1532 outer_headers); 1533 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 1534 outer_headers); 1535 void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 1536 misc_parameters); 1537 void *misc_params_v = MLX5_ADDR_OF(fte_match_param, match_v, 1538 misc_parameters); 1539 1540 switch (ib_spec->type) { 1541 case IB_FLOW_SPEC_ETH: 1542 if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) 1543 return -ENOTSUPP; 1544 1545 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1546 dmac_47_16), 1547 ib_spec->eth.mask.dst_mac); 1548 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1549 dmac_47_16), 1550 ib_spec->eth.val.dst_mac); 1551 1552 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1553 smac_47_16), 1554 ib_spec->eth.mask.src_mac); 1555 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1556 smac_47_16), 1557 ib_spec->eth.val.src_mac); 1558 1559 if (ib_spec->eth.mask.vlan_tag) { 1560 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1561 cvlan_tag, 1); 1562 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1563 cvlan_tag, 1); 1564 1565 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1566 first_vid, ntohs(ib_spec->eth.mask.vlan_tag)); 1567 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1568 first_vid, ntohs(ib_spec->eth.val.vlan_tag)); 1569 1570 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1571 first_cfi, 1572 ntohs(ib_spec->eth.mask.vlan_tag) >> 12); 1573 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1574 first_cfi, 1575 ntohs(ib_spec->eth.val.vlan_tag) >> 12); 1576 1577 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1578 first_prio, 1579 ntohs(ib_spec->eth.mask.vlan_tag) >> 13); 1580 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1581 first_prio, 1582 ntohs(ib_spec->eth.val.vlan_tag) >> 13); 1583 } 1584 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1585 ethertype, ntohs(ib_spec->eth.mask.ether_type)); 1586 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1587 ethertype, ntohs(ib_spec->eth.val.ether_type)); 1588 break; 1589 case IB_FLOW_SPEC_IPV4: 1590 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) 1591 return -ENOTSUPP; 1592 1593 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1594 ethertype, 0xffff); 1595 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1596 ethertype, ETH_P_IP); 1597 1598 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1599 src_ipv4_src_ipv6.ipv4_layout.ipv4), 1600 &ib_spec->ipv4.mask.src_ip, 1601 sizeof(ib_spec->ipv4.mask.src_ip)); 1602 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1603 src_ipv4_src_ipv6.ipv4_layout.ipv4), 1604 &ib_spec->ipv4.val.src_ip, 1605 sizeof(ib_spec->ipv4.val.src_ip)); 1606 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1607 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1608 &ib_spec->ipv4.mask.dst_ip, 1609 sizeof(ib_spec->ipv4.mask.dst_ip)); 1610 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1611 dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 1612 &ib_spec->ipv4.val.dst_ip, 1613 sizeof(ib_spec->ipv4.val.dst_ip)); 1614 1615 set_tos(outer_headers_c, outer_headers_v, 1616 ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos); 1617 1618 set_proto(outer_headers_c, outer_headers_v, 1619 ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto); 1620 break; 1621 case IB_FLOW_SPEC_IPV6: 1622 if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD)) 1623 return -ENOTSUPP; 1624 1625 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, 1626 ethertype, 0xffff); 1627 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, 1628 ethertype, IPPROTO_IPV6); 1629 1630 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1631 src_ipv4_src_ipv6.ipv6_layout.ipv6), 1632 &ib_spec->ipv6.mask.src_ip, 1633 sizeof(ib_spec->ipv6.mask.src_ip)); 1634 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1635 src_ipv4_src_ipv6.ipv6_layout.ipv6), 1636 &ib_spec->ipv6.val.src_ip, 1637 sizeof(ib_spec->ipv6.val.src_ip)); 1638 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, 1639 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1640 &ib_spec->ipv6.mask.dst_ip, 1641 sizeof(ib_spec->ipv6.mask.dst_ip)); 1642 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, 1643 dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 1644 &ib_spec->ipv6.val.dst_ip, 1645 sizeof(ib_spec->ipv6.val.dst_ip)); 1646 1647 set_tos(outer_headers_c, outer_headers_v, 1648 ib_spec->ipv6.mask.traffic_class, 1649 ib_spec->ipv6.val.traffic_class); 1650 1651 set_proto(outer_headers_c, outer_headers_v, 1652 ib_spec->ipv6.mask.next_hdr, 1653 ib_spec->ipv6.val.next_hdr); 1654 1655 MLX5_SET(fte_match_set_misc, misc_params_c, 1656 outer_ipv6_flow_label, 1657 ntohl(ib_spec->ipv6.mask.flow_label)); 1658 MLX5_SET(fte_match_set_misc, misc_params_v, 1659 outer_ipv6_flow_label, 1660 ntohl(ib_spec->ipv6.val.flow_label)); 1661 break; 1662 case IB_FLOW_SPEC_TCP: 1663 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 1664 LAST_TCP_UDP_FIELD)) 1665 return -ENOTSUPP; 1666 1667 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, 1668 0xff); 1669 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, 1670 IPPROTO_TCP); 1671 1672 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport, 1673 ntohs(ib_spec->tcp_udp.mask.src_port)); 1674 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport, 1675 ntohs(ib_spec->tcp_udp.val.src_port)); 1676 1677 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport, 1678 ntohs(ib_spec->tcp_udp.mask.dst_port)); 1679 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport, 1680 ntohs(ib_spec->tcp_udp.val.dst_port)); 1681 break; 1682 case IB_FLOW_SPEC_UDP: 1683 if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, 1684 LAST_TCP_UDP_FIELD)) 1685 return -ENOTSUPP; 1686 1687 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol, 1688 0xff); 1689 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol, 1690 IPPROTO_UDP); 1691 1692 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport, 1693 ntohs(ib_spec->tcp_udp.mask.src_port)); 1694 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport, 1695 ntohs(ib_spec->tcp_udp.val.src_port)); 1696 1697 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport, 1698 ntohs(ib_spec->tcp_udp.mask.dst_port)); 1699 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport, 1700 ntohs(ib_spec->tcp_udp.val.dst_port)); 1701 break; 1702 default: 1703 return -EINVAL; 1704 } 1705 1706 return 0; 1707} 1708 1709/* If a flow could catch both multicast and unicast packets, 1710 * it won't fall into the multicast flow steering table and this rule 1711 * could steal other multicast packets. 1712 */ 1713static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr) 1714{ 1715 struct ib_flow_spec_eth *eth_spec; 1716 1717 if (ib_attr->type != IB_FLOW_ATTR_NORMAL || 1718 ib_attr->size < sizeof(struct ib_flow_attr) + 1719 sizeof(struct ib_flow_spec_eth) || 1720 ib_attr->num_of_specs < 1) 1721 return false; 1722 1723 eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1); 1724 if (eth_spec->type != IB_FLOW_SPEC_ETH || 1725 eth_spec->size != sizeof(*eth_spec)) 1726 return false; 1727 1728 return is_multicast_ether_addr(eth_spec->mask.dst_mac) && 1729 is_multicast_ether_addr(eth_spec->val.dst_mac); 1730} 1731 1732static bool is_valid_attr(const struct ib_flow_attr *flow_attr) 1733{ 1734 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1); 1735 bool has_ipv4_spec = false; 1736 bool eth_type_ipv4 = true; 1737 unsigned int spec_index; 1738 1739 /* Validate that ethertype is correct */ 1740 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1741 if (ib_spec->type == IB_FLOW_SPEC_ETH && 1742 ib_spec->eth.mask.ether_type) { 1743 if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) && 1744 ib_spec->eth.val.ether_type == htons(ETH_P_IP))) 1745 eth_type_ipv4 = false; 1746 } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) { 1747 has_ipv4_spec = true; 1748 } 1749 ib_spec = (void *)ib_spec + ib_spec->size; 1750 } 1751 return !has_ipv4_spec || eth_type_ipv4; 1752} 1753 1754static void put_flow_table(struct mlx5_ib_dev *dev, 1755 struct mlx5_ib_flow_prio *prio, bool ft_added) 1756{ 1757 prio->refcount -= !!ft_added; 1758 if (!prio->refcount) { 1759 mlx5_destroy_flow_table(prio->flow_table); 1760 prio->flow_table = NULL; 1761 } 1762} 1763 1764static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) 1765{ 1766 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device); 1767 struct mlx5_ib_flow_handler *handler = container_of(flow_id, 1768 struct mlx5_ib_flow_handler, 1769 ibflow); 1770 struct mlx5_ib_flow_handler *iter, *tmp; 1771 1772 mutex_lock(&dev->flow_db.lock); 1773 1774 list_for_each_entry_safe(iter, tmp, &handler->list, list) { 1775 mlx5_del_flow_rule(iter->rule); 1776 put_flow_table(dev, iter->prio, true); 1777 list_del(&iter->list); 1778 kfree(iter); 1779 } 1780 1781 mlx5_del_flow_rule(handler->rule); 1782 put_flow_table(dev, handler->prio, true); 1783 mutex_unlock(&dev->flow_db.lock); 1784 1785 kfree(handler); 1786 1787 return 0; 1788} 1789 1790static int ib_prio_to_core_prio(unsigned int priority, bool dont_trap) 1791{ 1792 priority *= 2; 1793 if (!dont_trap) 1794 priority++; 1795 return priority; 1796} 1797 1798enum flow_table_type { 1799 MLX5_IB_FT_RX, 1800 MLX5_IB_FT_TX 1801}; 1802 1803#define MLX5_FS_MAX_TYPES 10 1804#define MLX5_FS_MAX_ENTRIES 32000UL 1805static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, 1806 struct ib_flow_attr *flow_attr, 1807 enum flow_table_type ft_type) 1808{ 1809 bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP; 1810 struct mlx5_flow_namespace *ns = NULL; 1811 struct mlx5_ib_flow_prio *prio; 1812 struct mlx5_flow_table *ft; 1813 int num_entries; 1814 int num_groups; 1815 int priority; 1816 int err = 0; 1817 1818 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1819 if (flow_is_multicast_only(flow_attr) && 1820 !dont_trap) 1821 priority = MLX5_IB_FLOW_MCAST_PRIO; 1822 else 1823 priority = ib_prio_to_core_prio(flow_attr->priority, 1824 dont_trap); 1825 ns = mlx5_get_flow_namespace(dev->mdev, 1826 MLX5_FLOW_NAMESPACE_BYPASS); 1827 num_entries = MLX5_FS_MAX_ENTRIES; 1828 num_groups = MLX5_FS_MAX_TYPES; 1829 prio = &dev->flow_db.prios[priority]; 1830 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 1831 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 1832 ns = mlx5_get_flow_namespace(dev->mdev, 1833 MLX5_FLOW_NAMESPACE_LEFTOVERS); 1834 build_leftovers_ft_param("bypass", &priority, 1835 &num_entries, 1836 &num_groups); 1837 prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO]; 1838 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 1839 if (!MLX5_CAP_FLOWTABLE(dev->mdev, 1840 allow_sniffer_and_nic_rx_shared_tir)) 1841 return ERR_PTR(-ENOTSUPP); 1842 1843 ns = mlx5_get_flow_namespace(dev->mdev, ft_type == MLX5_IB_FT_RX ? 1844 MLX5_FLOW_NAMESPACE_SNIFFER_RX : 1845 MLX5_FLOW_NAMESPACE_SNIFFER_TX); 1846 1847 prio = &dev->flow_db.sniffer[ft_type]; 1848 priority = 0; 1849 num_entries = 1; 1850 num_groups = 1; 1851 } 1852 1853 if (!ns) 1854 return ERR_PTR(-ENOTSUPP); 1855 1856 ft = prio->flow_table; 1857 if (!ft) { 1858 ft = mlx5_create_auto_grouped_flow_table(ns, priority, "bypass", 1859 num_entries, 1860 num_groups); 1861 1862 if (!IS_ERR(ft)) { 1863 prio->refcount = 0; 1864 prio->flow_table = ft; 1865 } else { 1866 err = PTR_ERR(ft); 1867 } 1868 } 1869 1870 return err ? ERR_PTR(err) : prio; 1871} 1872 1873static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, 1874 struct mlx5_ib_flow_prio *ft_prio, 1875 const struct ib_flow_attr *flow_attr, 1876 struct mlx5_flow_destination *dst) 1877{ 1878 struct mlx5_flow_table *ft = ft_prio->flow_table; 1879 struct mlx5_ib_flow_handler *handler; 1880 struct mlx5_flow_spec *spec; 1881 const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr); 1882 unsigned int spec_index; 1883 u32 action; 1884 int err = 0; 1885 1886 if (!is_valid_attr(flow_attr)) 1887 return ERR_PTR(-EINVAL); 1888 1889 spec = mlx5_vzalloc(sizeof(*spec)); 1890 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 1891 if (!handler || !spec) { 1892 err = -ENOMEM; 1893 goto free; 1894 } 1895 1896 INIT_LIST_HEAD(&handler->list); 1897 1898 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1899 err = parse_flow_attr(spec->match_criteria, 1900 spec->match_value, ib_flow); 1901 if (err < 0) 1902 goto free; 1903 1904 ib_flow += ((union ib_flow_spec *)ib_flow)->size; 1905 } 1906 1907 spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); 1908 action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 1909 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 1910 handler->rule = mlx5_add_flow_rule(ft, spec->match_criteria_enable, 1911 spec->match_criteria, 1912 spec->match_value, 1913 action, 1914 MLX5_FS_DEFAULT_FLOW_TAG, 1915 dst); 1916 1917 if (IS_ERR(handler->rule)) { 1918 err = PTR_ERR(handler->rule); 1919 goto free; 1920 } 1921 1922 ft_prio->refcount++; 1923 handler->prio = ft_prio; 1924 1925 ft_prio->flow_table = ft; 1926free: 1927 if (err) 1928 kfree(handler); 1929 kvfree(spec); 1930 return err ? ERR_PTR(err) : handler; 1931} 1932 1933static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, 1934 struct mlx5_ib_flow_prio *ft_prio, 1935 struct ib_flow_attr *flow_attr, 1936 struct mlx5_flow_destination *dst) 1937{ 1938 struct mlx5_ib_flow_handler *handler_dst = NULL; 1939 struct mlx5_ib_flow_handler *handler = NULL; 1940 1941 handler = create_flow_rule(dev, ft_prio, flow_attr, NULL); 1942 if (!IS_ERR(handler)) { 1943 handler_dst = create_flow_rule(dev, ft_prio, 1944 flow_attr, dst); 1945 if (IS_ERR(handler_dst)) { 1946 mlx5_del_flow_rule(handler->rule); 1947 ft_prio->refcount--; 1948 kfree(handler); 1949 handler = handler_dst; 1950 } else { 1951 list_add(&handler_dst->list, &handler->list); 1952 } 1953 } 1954 1955 return handler; 1956} 1957enum { 1958 LEFTOVERS_MC, 1959 LEFTOVERS_UC, 1960}; 1961 1962static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev, 1963 struct mlx5_ib_flow_prio *ft_prio, 1964 struct ib_flow_attr *flow_attr, 1965 struct mlx5_flow_destination *dst) 1966{ 1967 struct mlx5_ib_flow_handler *handler_ucast = NULL; 1968 struct mlx5_ib_flow_handler *handler = NULL; 1969 1970 static struct { 1971 struct ib_flow_attr flow_attr; 1972 struct ib_flow_spec_eth eth_flow; 1973 } leftovers_specs[] = { 1974 [LEFTOVERS_MC] = { 1975 .flow_attr = { 1976 .num_of_specs = 1, 1977 .size = sizeof(leftovers_specs[0]) 1978 }, 1979 .eth_flow = { 1980 .type = IB_FLOW_SPEC_ETH, 1981 .size = sizeof(struct ib_flow_spec_eth), 1982 .mask = {.dst_mac = {0x1} }, 1983 .val = {.dst_mac = {0x1} } 1984 } 1985 }, 1986 [LEFTOVERS_UC] = { 1987 .flow_attr = { 1988 .num_of_specs = 1, 1989 .size = sizeof(leftovers_specs[0]) 1990 }, 1991 .eth_flow = { 1992 .type = IB_FLOW_SPEC_ETH, 1993 .size = sizeof(struct ib_flow_spec_eth), 1994 .mask = {.dst_mac = {0x1} }, 1995 .val = {.dst_mac = {} } 1996 } 1997 } 1998 }; 1999 2000 handler = create_flow_rule(dev, ft_prio, 2001 &leftovers_specs[LEFTOVERS_MC].flow_attr, 2002 dst); 2003 if (!IS_ERR(handler) && 2004 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) { 2005 handler_ucast = create_flow_rule(dev, ft_prio, 2006 &leftovers_specs[LEFTOVERS_UC].flow_attr, 2007 dst); 2008 if (IS_ERR(handler_ucast)) { 2009 mlx5_del_flow_rule(handler->rule); 2010 ft_prio->refcount--; 2011 kfree(handler); 2012 handler = handler_ucast; 2013 } else { 2014 list_add(&handler_ucast->list, &handler->list); 2015 } 2016 } 2017 2018 return handler; 2019} 2020 2021static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, 2022 struct mlx5_ib_flow_prio *ft_rx, 2023 struct mlx5_ib_flow_prio *ft_tx, 2024 struct mlx5_flow_destination *dst) 2025{ 2026 struct mlx5_ib_flow_handler *handler_rx; 2027 struct mlx5_ib_flow_handler *handler_tx; 2028 int err; 2029 static const struct ib_flow_attr flow_attr = { 2030 .num_of_specs = 0, 2031 .size = sizeof(flow_attr) 2032 }; 2033 2034 handler_rx = create_flow_rule(dev, ft_rx, &flow_attr, dst); 2035 if (IS_ERR(handler_rx)) { 2036 err = PTR_ERR(handler_rx); 2037 goto err; 2038 } 2039 2040 handler_tx = create_flow_rule(dev, ft_tx, &flow_attr, dst); 2041 if (IS_ERR(handler_tx)) { 2042 err = PTR_ERR(handler_tx); 2043 goto err_tx; 2044 } 2045 2046 list_add(&handler_tx->list, &handler_rx->list); 2047 2048 return handler_rx; 2049 2050err_tx: 2051 mlx5_del_flow_rule(handler_rx->rule); 2052 ft_rx->refcount--; 2053 kfree(handler_rx); 2054err: 2055 return ERR_PTR(err); 2056} 2057 2058static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, 2059 struct ib_flow_attr *flow_attr, 2060 int domain) 2061{ 2062 struct mlx5_ib_dev *dev = to_mdev(qp->device); 2063 struct mlx5_ib_qp *mqp = to_mqp(qp); 2064 struct mlx5_ib_flow_handler *handler = NULL; 2065 struct mlx5_flow_destination *dst = NULL; 2066 struct mlx5_ib_flow_prio *ft_prio_tx = NULL; 2067 struct mlx5_ib_flow_prio *ft_prio; 2068 int err; 2069 2070 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) 2071 return ERR_PTR(-ENOSPC); 2072 2073 if (domain != IB_FLOW_DOMAIN_USER || 2074 flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) || 2075 (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP)) 2076 return ERR_PTR(-EINVAL); 2077 2078 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 2079 if (!dst) 2080 return ERR_PTR(-ENOMEM); 2081 2082 mutex_lock(&dev->flow_db.lock); 2083 2084 ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX); 2085 if (IS_ERR(ft_prio)) { 2086 err = PTR_ERR(ft_prio); 2087 goto unlock; 2088 } 2089 if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 2090 ft_prio_tx = get_flow_table(dev, flow_attr, MLX5_IB_FT_TX); 2091 if (IS_ERR(ft_prio_tx)) { 2092 err = PTR_ERR(ft_prio_tx); 2093 ft_prio_tx = NULL; 2094 goto destroy_ft; 2095 } 2096 } 2097 2098 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; 2099 if (mqp->flags & MLX5_IB_QP_RSS) 2100 dst->tir_num = mqp->rss_qp.tirn; 2101 else 2102 dst->tir_num = mqp->raw_packet_qp.rq.tirn; 2103 2104 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) { 2105 if (flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) { 2106 handler = create_dont_trap_rule(dev, ft_prio, 2107 flow_attr, dst); 2108 } else { 2109 handler = create_flow_rule(dev, ft_prio, flow_attr, 2110 dst); 2111 } 2112 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || 2113 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { 2114 handler = create_leftovers_rule(dev, ft_prio, flow_attr, 2115 dst); 2116 } else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) { 2117 handler = create_sniffer_rule(dev, ft_prio, ft_prio_tx, dst); 2118 } else { 2119 err = -EINVAL; 2120 goto destroy_ft; 2121 } 2122 2123 if (IS_ERR(handler)) { 2124 err = PTR_ERR(handler); 2125 handler = NULL; 2126 goto destroy_ft; 2127 } 2128 2129 mutex_unlock(&dev->flow_db.lock); 2130 kfree(dst); 2131 2132 return &handler->ibflow; 2133 2134destroy_ft: 2135 put_flow_table(dev, ft_prio, false); 2136 if (ft_prio_tx) 2137 put_flow_table(dev, ft_prio_tx, false); 2138unlock: 2139 mutex_unlock(&dev->flow_db.lock); 2140 kfree(dst); 2141 kfree(handler); 2142 return ERR_PTR(err); 2143} 2144 2145static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 2146{ 2147 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2148 int err; 2149 2150 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num); 2151 if (err) 2152 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", 2153 ibqp->qp_num, gid->raw); 2154 2155 return err; 2156} 2157 2158static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 2159{ 2160 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2161 int err; 2162 2163 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num); 2164 if (err) 2165 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", 2166 ibqp->qp_num, gid->raw); 2167 2168 return err; 2169} 2170 2171static int init_node_data(struct mlx5_ib_dev *dev) 2172{ 2173 int err; 2174 2175 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); 2176 if (err) 2177 return err; 2178 2179 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); 2180} 2181 2182static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, 2183 char *buf) 2184{ 2185 struct mlx5_ib_dev *dev = 2186 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2187 2188 return sprintf(buf, "%lld\n", (long long)dev->mdev->priv.fw_pages); 2189} 2190 2191static ssize_t show_reg_pages(struct device *device, 2192 struct device_attribute *attr, char *buf) 2193{ 2194 struct mlx5_ib_dev *dev = 2195 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2196 2197 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); 2198} 2199 2200static ssize_t show_hca(struct device *device, struct device_attribute *attr, 2201 char *buf) 2202{ 2203 struct mlx5_ib_dev *dev = 2204 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2205 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device); 2206} 2207 2208static ssize_t show_rev(struct device *device, struct device_attribute *attr, 2209 char *buf) 2210{ 2211 struct mlx5_ib_dev *dev = 2212 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2213 return sprintf(buf, "%x\n", dev->mdev->pdev->revision); 2214} 2215 2216static ssize_t show_board(struct device *device, struct device_attribute *attr, 2217 char *buf) 2218{ 2219 struct mlx5_ib_dev *dev = 2220 container_of(device, struct mlx5_ib_dev, ib_dev.dev); 2221 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, 2222 dev->mdev->board_id); 2223} 2224 2225static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); 2226static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); 2227static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); 2228static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); 2229static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL); 2230 2231static struct device_attribute *mlx5_class_attributes[] = { 2232 &dev_attr_hw_rev, 2233 &dev_attr_hca_type, 2234 &dev_attr_board_id, 2235 &dev_attr_fw_pages, 2236 &dev_attr_reg_pages, 2237}; 2238 2239static void pkey_change_handler(struct work_struct *work) 2240{ 2241 struct mlx5_ib_port_resources *ports = 2242 container_of(work, struct mlx5_ib_port_resources, 2243 pkey_change_work); 2244 2245 mutex_lock(&ports->devr->mutex); 2246 mlx5_ib_gsi_pkey_change(ports->gsi); 2247 mutex_unlock(&ports->devr->mutex); 2248} 2249 2250static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) 2251{ 2252 struct mlx5_ib_qp *mqp; 2253 struct mlx5_ib_cq *send_mcq, *recv_mcq; 2254 struct mlx5_core_cq *mcq; 2255 struct list_head cq_armed_list; 2256 unsigned long flags_qp; 2257 unsigned long flags_cq; 2258 unsigned long flags; 2259 2260 INIT_LIST_HEAD(&cq_armed_list); 2261 2262 /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ 2263 spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); 2264 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { 2265 spin_lock_irqsave(&mqp->sq.lock, flags_qp); 2266 if (mqp->sq.tail != mqp->sq.head) { 2267 send_mcq = to_mcq(mqp->ibqp.send_cq); 2268 spin_lock_irqsave(&send_mcq->lock, flags_cq); 2269 if (send_mcq->mcq.comp && 2270 mqp->ibqp.send_cq->comp_handler) { 2271 if (!send_mcq->mcq.reset_notify_added) { 2272 send_mcq->mcq.reset_notify_added = 1; 2273 list_add_tail(&send_mcq->mcq.reset_notify, 2274 &cq_armed_list); 2275 } 2276 } 2277 spin_unlock_irqrestore(&send_mcq->lock, flags_cq); 2278 } 2279 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); 2280 spin_lock_irqsave(&mqp->rq.lock, flags_qp); 2281 /* no handling is needed for SRQ */ 2282 if (!mqp->ibqp.srq) { 2283 if (mqp->rq.tail != mqp->rq.head) { 2284 recv_mcq = to_mcq(mqp->ibqp.recv_cq); 2285 spin_lock_irqsave(&recv_mcq->lock, flags_cq); 2286 if (recv_mcq->mcq.comp && 2287 mqp->ibqp.recv_cq->comp_handler) { 2288 if (!recv_mcq->mcq.reset_notify_added) { 2289 recv_mcq->mcq.reset_notify_added = 1; 2290 list_add_tail(&recv_mcq->mcq.reset_notify, 2291 &cq_armed_list); 2292 } 2293 } 2294 spin_unlock_irqrestore(&recv_mcq->lock, 2295 flags_cq); 2296 } 2297 } 2298 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); 2299 } 2300 /*At that point all inflight post send were put to be executed as of we 2301 * lock/unlock above locks Now need to arm all involved CQs. 2302 */ 2303 list_for_each_entry(mcq, &cq_armed_list, reset_notify) { 2304 mcq->comp(mcq); 2305 } 2306 spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); 2307} 2308 2309static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, 2310 enum mlx5_dev_event event, unsigned long param) 2311{ 2312 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; 2313 struct ib_event ibev; 2314 bool fatal = false; 2315 u8 port = 0; 2316 2317 switch (event) { 2318 case MLX5_DEV_EVENT_SYS_ERROR: 2319 ibev.event = IB_EVENT_DEVICE_FATAL; 2320 mlx5_ib_handle_internal_error(ibdev); 2321 fatal = true; 2322 break; 2323 2324 case MLX5_DEV_EVENT_PORT_UP: 2325 case MLX5_DEV_EVENT_PORT_DOWN: 2326 case MLX5_DEV_EVENT_PORT_INITIALIZED: 2327 port = (u8)param; 2328 2329 /* In RoCE, port up/down events are handled in 2330 * mlx5_netdev_event(). 2331 */ 2332 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 2333 IB_LINK_LAYER_ETHERNET) 2334 return; 2335 2336 ibev.event = (event == MLX5_DEV_EVENT_PORT_UP) ? 2337 IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; 2338 break; 2339 2340 case MLX5_DEV_EVENT_LID_CHANGE: 2341 ibev.event = IB_EVENT_LID_CHANGE; 2342 port = (u8)param; 2343 break; 2344 2345 case MLX5_DEV_EVENT_PKEY_CHANGE: 2346 ibev.event = IB_EVENT_PKEY_CHANGE; 2347 port = (u8)param; 2348 2349 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 2350 break; 2351 2352 case MLX5_DEV_EVENT_GUID_CHANGE: 2353 ibev.event = IB_EVENT_GID_CHANGE; 2354 port = (u8)param; 2355 break; 2356 2357 case MLX5_DEV_EVENT_CLIENT_REREG: 2358 ibev.event = IB_EVENT_CLIENT_REREGISTER; 2359 port = (u8)param; 2360 break; 2361 2362 default: 2363 break; 2364 } 2365 2366 ibev.device = &ibdev->ib_dev; 2367 ibev.element.port_num = port; 2368 2369 if (port < 1 || port > ibdev->num_ports) { 2370 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 2371 return; 2372 } 2373 2374 if (ibdev->ib_active) 2375 ib_dispatch_event(&ibev); 2376 2377 if (fatal) 2378 ibdev->ib_active = false; 2379} 2380 2381static void get_ext_port_caps(struct mlx5_ib_dev *dev) 2382{ 2383 int port; 2384 2385 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) 2386 mlx5_query_ext_port_caps(dev, port); 2387} 2388 2389static int get_port_caps(struct mlx5_ib_dev *dev) 2390{ 2391 struct ib_device_attr *dprops = NULL; 2392 struct ib_port_attr *pprops = NULL; 2393 int err = -ENOMEM; 2394 int port; 2395 struct ib_udata uhw = {.inlen = 0, .outlen = 0}; 2396 2397 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); 2398 if (!pprops) 2399 goto out; 2400 2401 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); 2402 if (!dprops) 2403 goto out; 2404 2405 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw); 2406 if (err) { 2407 mlx5_ib_warn(dev, "query_device failed %d\n", err); 2408 goto out; 2409 } 2410 2411 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) { 2412 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 2413 if (err) { 2414 mlx5_ib_warn(dev, "query_port %d failed %d\n", 2415 port, err); 2416 break; 2417 } 2418 dev->mdev->port_caps[port - 1].pkey_table_len = 2419 dprops->max_pkeys; 2420 dev->mdev->port_caps[port - 1].gid_table_len = 2421 pprops->gid_tbl_len; 2422 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 2423 dprops->max_pkeys, pprops->gid_tbl_len); 2424 } 2425 2426out: 2427 kfree(pprops); 2428 kfree(dprops); 2429 2430 return err; 2431} 2432 2433static void destroy_umrc_res(struct mlx5_ib_dev *dev) 2434{ 2435 int err; 2436 2437 err = mlx5_mr_cache_cleanup(dev); 2438 if (err) 2439 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 2440 2441 mlx5_ib_destroy_qp(dev->umrc.qp); 2442 ib_free_cq(dev->umrc.cq); 2443 ib_dealloc_pd(dev->umrc.pd); 2444} 2445 2446enum { 2447 MAX_UMR_WR = 128, 2448}; 2449 2450static int create_umr_res(struct mlx5_ib_dev *dev) 2451{ 2452 struct ib_qp_init_attr *init_attr = NULL; 2453 struct ib_qp_attr *attr = NULL; 2454 struct ib_pd *pd; 2455 struct ib_cq *cq; 2456 struct ib_qp *qp; 2457 int ret; 2458 2459 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 2460 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 2461 if (!attr || !init_attr) { 2462 ret = -ENOMEM; 2463 goto error_0; 2464 } 2465 2466 pd = ib_alloc_pd(&dev->ib_dev, 0); 2467 if (IS_ERR(pd)) { 2468 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); 2469 ret = PTR_ERR(pd); 2470 goto error_0; 2471 } 2472 2473 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); 2474 if (IS_ERR(cq)) { 2475 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 2476 ret = PTR_ERR(cq); 2477 goto error_2; 2478 } 2479 2480 init_attr->send_cq = cq; 2481 init_attr->recv_cq = cq; 2482 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; 2483 init_attr->cap.max_send_wr = MAX_UMR_WR; 2484 init_attr->cap.max_send_sge = 1; 2485 init_attr->qp_type = MLX5_IB_QPT_REG_UMR; 2486 init_attr->port_num = 1; 2487 qp = mlx5_ib_create_qp(pd, init_attr, NULL); 2488 if (IS_ERR(qp)) { 2489 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); 2490 ret = PTR_ERR(qp); 2491 goto error_3; 2492 } 2493 qp->device = &dev->ib_dev; 2494 qp->real_qp = qp; 2495 qp->uobject = NULL; 2496 qp->qp_type = MLX5_IB_QPT_REG_UMR; 2497 2498 attr->qp_state = IB_QPS_INIT; 2499 attr->port_num = 1; 2500 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | 2501 IB_QP_PORT, NULL); 2502 if (ret) { 2503 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); 2504 goto error_4; 2505 } 2506 2507 memset(attr, 0, sizeof(*attr)); 2508 attr->qp_state = IB_QPS_RTR; 2509 attr->path_mtu = IB_MTU_256; 2510 2511 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 2512 if (ret) { 2513 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); 2514 goto error_4; 2515 } 2516 2517 memset(attr, 0, sizeof(*attr)); 2518 attr->qp_state = IB_QPS_RTS; 2519 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); 2520 if (ret) { 2521 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); 2522 goto error_4; 2523 } 2524 2525 dev->umrc.qp = qp; 2526 dev->umrc.cq = cq; 2527 dev->umrc.pd = pd; 2528 2529 sema_init(&dev->umrc.sem, MAX_UMR_WR); 2530 ret = mlx5_mr_cache_init(dev); 2531 if (ret) { 2532 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); 2533 goto error_4; 2534 } 2535 2536 kfree(attr); 2537 kfree(init_attr); 2538 2539 return 0; 2540 2541error_4: 2542 mlx5_ib_destroy_qp(qp); 2543 2544error_3: 2545 ib_free_cq(cq); 2546 2547error_2: 2548 ib_dealloc_pd(pd); 2549 2550error_0: 2551 kfree(attr); 2552 kfree(init_attr); 2553 return ret; 2554} 2555 2556static int create_dev_resources(struct mlx5_ib_resources *devr) 2557{ 2558 struct ib_srq_init_attr attr; 2559 struct mlx5_ib_dev *dev; 2560 struct ib_cq_init_attr cq_attr = {.cqe = 1}; 2561 int port; 2562 int ret = 0; 2563 2564 dev = container_of(devr, struct mlx5_ib_dev, devr); 2565 2566 mutex_init(&devr->mutex); 2567 2568 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); 2569 if (IS_ERR(devr->p0)) { 2570 ret = PTR_ERR(devr->p0); 2571 goto error0; 2572 } 2573 devr->p0->device = &dev->ib_dev; 2574 devr->p0->uobject = NULL; 2575 atomic_set(&devr->p0->usecnt, 0); 2576 2577 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL); 2578 if (IS_ERR(devr->c0)) { 2579 ret = PTR_ERR(devr->c0); 2580 goto error1; 2581 } 2582 devr->c0->device = &dev->ib_dev; 2583 devr->c0->uobject = NULL; 2584 devr->c0->comp_handler = NULL; 2585 devr->c0->event_handler = NULL; 2586 devr->c0->cq_context = NULL; 2587 atomic_set(&devr->c0->usecnt, 0); 2588 2589 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 2590 if (IS_ERR(devr->x0)) { 2591 ret = PTR_ERR(devr->x0); 2592 goto error2; 2593 } 2594 devr->x0->device = &dev->ib_dev; 2595 devr->x0->inode = NULL; 2596 atomic_set(&devr->x0->usecnt, 0); 2597 mutex_init(&devr->x0->tgt_qp_mutex); 2598 INIT_LIST_HEAD(&devr->x0->tgt_qp_list); 2599 2600 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); 2601 if (IS_ERR(devr->x1)) { 2602 ret = PTR_ERR(devr->x1); 2603 goto error3; 2604 } 2605 devr->x1->device = &dev->ib_dev; 2606 devr->x1->inode = NULL; 2607 atomic_set(&devr->x1->usecnt, 0); 2608 mutex_init(&devr->x1->tgt_qp_mutex); 2609 INIT_LIST_HEAD(&devr->x1->tgt_qp_list); 2610 2611 memset(&attr, 0, sizeof(attr)); 2612 attr.attr.max_sge = 1; 2613 attr.attr.max_wr = 1; 2614 attr.srq_type = IB_SRQT_XRC; 2615 attr.ext.xrc.cq = devr->c0; 2616 attr.ext.xrc.xrcd = devr->x0; 2617 2618 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 2619 if (IS_ERR(devr->s0)) { 2620 ret = PTR_ERR(devr->s0); 2621 goto error4; 2622 } 2623 devr->s0->device = &dev->ib_dev; 2624 devr->s0->pd = devr->p0; 2625 devr->s0->uobject = NULL; 2626 devr->s0->event_handler = NULL; 2627 devr->s0->srq_context = NULL; 2628 devr->s0->srq_type = IB_SRQT_XRC; 2629 devr->s0->ext.xrc.xrcd = devr->x0; 2630 devr->s0->ext.xrc.cq = devr->c0; 2631 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); 2632 atomic_inc(&devr->s0->ext.xrc.cq->usecnt); 2633 atomic_inc(&devr->p0->usecnt); 2634 atomic_set(&devr->s0->usecnt, 0); 2635 2636 memset(&attr, 0, sizeof(attr)); 2637 attr.attr.max_sge = 1; 2638 attr.attr.max_wr = 1; 2639 attr.srq_type = IB_SRQT_BASIC; 2640 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL); 2641 if (IS_ERR(devr->s1)) { 2642 ret = PTR_ERR(devr->s1); 2643 goto error5; 2644 } 2645 devr->s1->device = &dev->ib_dev; 2646 devr->s1->pd = devr->p0; 2647 devr->s1->uobject = NULL; 2648 devr->s1->event_handler = NULL; 2649 devr->s1->srq_context = NULL; 2650 devr->s1->srq_type = IB_SRQT_BASIC; 2651 devr->s1->ext.xrc.cq = devr->c0; 2652 atomic_inc(&devr->p0->usecnt); 2653 atomic_set(&devr->s0->usecnt, 0); 2654 2655 for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) { 2656 INIT_WORK(&devr->ports[port].pkey_change_work, 2657 pkey_change_handler); 2658 devr->ports[port].devr = devr; 2659 } 2660 2661 return 0; 2662 2663error5: 2664 mlx5_ib_destroy_srq(devr->s0); 2665error4: 2666 mlx5_ib_dealloc_xrcd(devr->x1); 2667error3: 2668 mlx5_ib_dealloc_xrcd(devr->x0); 2669error2: 2670 mlx5_ib_destroy_cq(devr->c0); 2671error1: 2672 mlx5_ib_dealloc_pd(devr->p0); 2673error0: 2674 return ret; 2675} 2676 2677static void destroy_dev_resources(struct mlx5_ib_resources *devr) 2678{ 2679 struct mlx5_ib_dev *dev = 2680 container_of(devr, struct mlx5_ib_dev, devr); 2681 int port; 2682 2683 mlx5_ib_destroy_srq(devr->s1); 2684 mlx5_ib_destroy_srq(devr->s0); 2685 mlx5_ib_dealloc_xrcd(devr->x0); 2686 mlx5_ib_dealloc_xrcd(devr->x1); 2687 mlx5_ib_destroy_cq(devr->c0); 2688 mlx5_ib_dealloc_pd(devr->p0); 2689 2690 /* Make sure no change P_Key work items are still executing */ 2691 for (port = 0; port < dev->num_ports; ++port) 2692 cancel_work_sync(&devr->ports[port].pkey_change_work); 2693} 2694 2695static u32 get_core_cap_flags(struct ib_device *ibdev) 2696{ 2697 struct mlx5_ib_dev *dev = to_mdev(ibdev); 2698 enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); 2699 u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); 2700 u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); 2701 u32 ret = 0; 2702 2703 if (ll == IB_LINK_LAYER_INFINIBAND) 2704 return RDMA_CORE_PORT_IBA_IB; 2705 2706 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) 2707 return 0; 2708 2709 if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) 2710 return 0; 2711 2712 if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) 2713 ret |= RDMA_CORE_PORT_IBA_ROCE; 2714 2715 if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) 2716 ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; 2717 2718 return ret; 2719} 2720 2721static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, 2722 struct ib_port_immutable *immutable) 2723{ 2724 struct ib_port_attr attr; 2725 int err; 2726 2727 err = mlx5_ib_query_port(ibdev, port_num, &attr); 2728 if (err) 2729 return err; 2730 2731 immutable->pkey_tbl_len = attr.pkey_tbl_len; 2732 immutable->gid_tbl_len = attr.gid_tbl_len; 2733 immutable->core_cap_flags = get_core_cap_flags(ibdev); 2734 immutable->max_mad_size = IB_MGMT_MAD_SIZE; 2735 2736 return 0; 2737} 2738 2739static void get_dev_fw_str(struct ib_device *ibdev, char *str, 2740 size_t str_len) 2741{ 2742 struct mlx5_ib_dev *dev = 2743 container_of(ibdev, struct mlx5_ib_dev, ib_dev); 2744 snprintf(str, str_len, "%d.%d.%04d", fw_rev_maj(dev->mdev), 2745 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); 2746} 2747 2748static int mlx5_roce_lag_init(struct mlx5_ib_dev *dev) 2749{ 2750 return 0; 2751} 2752 2753static void mlx5_roce_lag_cleanup(struct mlx5_ib_dev *dev) 2754{ 2755} 2756 2757static void mlx5_remove_roce_notifier(struct mlx5_ib_dev *dev) 2758{ 2759 if (dev->roce.nb.notifier_call) { 2760 unregister_netdevice_notifier(&dev->roce.nb); 2761 dev->roce.nb.notifier_call = NULL; 2762 } 2763} 2764 2765static int mlx5_enable_roce(struct mlx5_ib_dev *dev) 2766{ 2767 VNET_ITERATOR_DECL(vnet_iter); 2768 struct net_device *idev; 2769 int err; 2770 2771 /* Check if mlx5en net device already exists */ 2772 VNET_LIST_RLOCK(); 2773 VNET_FOREACH(vnet_iter) { 2774 IFNET_RLOCK(); 2775 CURVNET_SET_QUIET(vnet_iter); 2776 TAILQ_FOREACH(idev, &V_ifnet, if_link) { 2777 /* check if network interface belongs to mlx5en */ 2778 if (!mlx5_netdev_match(idev, dev->mdev, "mce")) 2779 continue; 2780 write_lock(&dev->roce.netdev_lock); 2781 dev->roce.netdev = idev; 2782 write_unlock(&dev->roce.netdev_lock); 2783 } 2784 CURVNET_RESTORE(); 2785 IFNET_RUNLOCK(); 2786 } 2787 VNET_LIST_RUNLOCK(); 2788 2789 dev->roce.nb.notifier_call = mlx5_netdev_event; 2790 err = register_netdevice_notifier(&dev->roce.nb); 2791 if (err) { 2792 dev->roce.nb.notifier_call = NULL; 2793 return err; 2794 } 2795 2796 err = mlx5_nic_vport_enable_roce(dev->mdev); 2797 if (err) 2798 goto err_unregister_netdevice_notifier; 2799 2800 err = mlx5_roce_lag_init(dev); 2801 if (err) 2802 goto err_disable_roce; 2803 2804 return 0; 2805 2806err_disable_roce: 2807 mlx5_nic_vport_disable_roce(dev->mdev); 2808 2809err_unregister_netdevice_notifier: 2810 mlx5_remove_roce_notifier(dev); 2811 return err; 2812} 2813 2814static void mlx5_disable_roce(struct mlx5_ib_dev *dev) 2815{ 2816 mlx5_roce_lag_cleanup(dev); 2817 mlx5_nic_vport_disable_roce(dev->mdev); 2818} 2819 2820static void mlx5_ib_dealloc_q_port_counter(struct mlx5_ib_dev *dev, u8 port_num) 2821{ 2822 mlx5_vport_dealloc_q_counter(dev->mdev, 2823 MLX5_INTERFACE_PROTOCOL_IB, 2824 dev->port[port_num].q_cnt_id); 2825 dev->port[port_num].q_cnt_id = 0; 2826} 2827 2828static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) 2829{ 2830 unsigned int i; 2831 2832 for (i = 0; i < dev->num_ports; i++) 2833 mlx5_ib_dealloc_q_port_counter(dev, i); 2834} 2835 2836static int mlx5_ib_alloc_q_counters(struct mlx5_ib_dev *dev) 2837{ 2838 int i; 2839 int ret; 2840 2841 for (i = 0; i < dev->num_ports; i++) { 2842 ret = mlx5_vport_alloc_q_counter(dev->mdev, 2843 MLX5_INTERFACE_PROTOCOL_IB, 2844 &dev->port[i].q_cnt_id); 2845 if (ret) { 2846 mlx5_ib_warn(dev, 2847 "couldn't allocate queue counter for port %d, err %d\n", 2848 i + 1, ret); 2849 goto dealloc_counters; 2850 } 2851 } 2852 2853 return 0; 2854 2855dealloc_counters: 2856 while (--i >= 0) 2857 mlx5_ib_dealloc_q_port_counter(dev, i); 2858 2859 return ret; 2860} 2861 2862static const char * const names[] = { 2863 "rx_write_requests", 2864 "rx_read_requests", 2865 "rx_atomic_requests", 2866 "out_of_buffer", 2867 "out_of_sequence", 2868 "duplicate_request", 2869 "rnr_nak_retry_err", 2870 "packet_seq_err", 2871 "implied_nak_seq_err", 2872 "local_ack_timeout_err", 2873}; 2874 2875static const size_t stats_offsets[] = { 2876 MLX5_BYTE_OFF(query_q_counter_out, rx_write_requests), 2877 MLX5_BYTE_OFF(query_q_counter_out, rx_read_requests), 2878 MLX5_BYTE_OFF(query_q_counter_out, rx_atomic_requests), 2879 MLX5_BYTE_OFF(query_q_counter_out, out_of_buffer), 2880 MLX5_BYTE_OFF(query_q_counter_out, out_of_sequence), 2881 MLX5_BYTE_OFF(query_q_counter_out, duplicate_request), 2882 MLX5_BYTE_OFF(query_q_counter_out, rnr_nak_retry_err), 2883 MLX5_BYTE_OFF(query_q_counter_out, packet_seq_err), 2884 MLX5_BYTE_OFF(query_q_counter_out, implied_nak_seq_err), 2885 MLX5_BYTE_OFF(query_q_counter_out, local_ack_timeout_err), 2886}; 2887 2888static struct rdma_hw_stats *mlx5_ib_alloc_hw_stats(struct ib_device *ibdev, 2889 u8 port_num) 2890{ 2891 BUILD_BUG_ON(ARRAY_SIZE(names) != ARRAY_SIZE(stats_offsets)); 2892 2893 /* We support only per port stats */ 2894 if (port_num == 0) 2895 return NULL; 2896 2897 return rdma_alloc_hw_stats_struct(names, ARRAY_SIZE(names), 2898 RDMA_HW_STATS_DEFAULT_LIFESPAN); 2899} 2900 2901static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, 2902 struct rdma_hw_stats *stats, 2903 u8 port, int index) 2904{ 2905 struct mlx5_ib_dev *dev = to_mdev(ibdev); 2906 int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out); 2907 void *out; 2908 __be32 val; 2909 int ret; 2910 int i; 2911 2912 if (!port || !stats) 2913 return -ENOSYS; 2914 2915 out = mlx5_vzalloc(outlen); 2916 if (!out) 2917 return -ENOMEM; 2918 2919 ret = mlx5_vport_query_q_counter(dev->mdev, 2920 dev->port[port - 1].q_cnt_id, 0, 2921 out, outlen); 2922 if (ret) 2923 goto free; 2924 2925 for (i = 0; i < ARRAY_SIZE(names); i++) { 2926 val = *(__be32 *)(out + stats_offsets[i]); 2927 stats->value[i] = (u64)be32_to_cpu(val); 2928 } 2929free: 2930 kvfree(out); 2931 return ARRAY_SIZE(names); 2932} 2933 2934static void *mlx5_ib_add(struct mlx5_core_dev *mdev) 2935{ 2936 struct mlx5_ib_dev *dev; 2937 enum rdma_link_layer ll; 2938 int port_type_cap; 2939 const char *name; 2940 int err; 2941 int i; 2942 2943 port_type_cap = MLX5_CAP_GEN(mdev, port_type); 2944 ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); 2945 2946 if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce)) 2947 return NULL; 2948 2949 printk_once(KERN_INFO "%s", mlx5_version); 2950 2951 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); 2952 if (!dev) 2953 return NULL; 2954 2955 dev->mdev = mdev; 2956 2957 dev->port = kcalloc(MLX5_CAP_GEN(mdev, num_ports), sizeof(*dev->port), 2958 GFP_KERNEL); 2959 if (!dev->port) 2960 goto err_dealloc; 2961 2962 rwlock_init(&dev->roce.netdev_lock); 2963 err = get_port_caps(dev); 2964 if (err) 2965 goto err_free_port; 2966 2967 if (mlx5_use_mad_ifc(dev)) 2968 get_ext_port_caps(dev); 2969 2970 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); 2971 2972 name = "mlx5_%d"; 2973 2974 strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX); 2975 dev->ib_dev.owner = THIS_MODULE; 2976 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 2977 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; 2978 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports); 2979 dev->ib_dev.phys_port_cnt = dev->num_ports; 2980 dev->ib_dev.num_comp_vectors = 2981 dev->mdev->priv.eq_table.num_comp_vectors; 2982 dev->ib_dev.dma_device = &mdev->pdev->dev; 2983 2984 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; 2985 dev->ib_dev.uverbs_cmd_mask = 2986 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 2987 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 2988 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 2989 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 2990 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 2991 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 2992 (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | 2993 (1ull << IB_USER_VERBS_CMD_REG_MR) | 2994 (1ull << IB_USER_VERBS_CMD_REREG_MR) | 2995 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 2996 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 2997 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 2998 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | 2999 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 3000 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 3001 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 3002 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 3003 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 3004 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | 3005 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | 3006 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | 3007 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | 3008 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | 3009 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 3010 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 3011 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 3012 dev->ib_dev.uverbs_ex_cmd_mask = 3013 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE) | 3014 (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ) | 3015 (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP); 3016 3017 dev->ib_dev.query_device = mlx5_ib_query_device; 3018 dev->ib_dev.query_port = mlx5_ib_query_port; 3019 dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer; 3020 if (ll == IB_LINK_LAYER_ETHERNET) 3021 dev->ib_dev.get_netdev = mlx5_ib_get_netdev; 3022 dev->ib_dev.query_gid = mlx5_ib_query_gid; 3023 dev->ib_dev.add_gid = mlx5_ib_add_gid; 3024 dev->ib_dev.del_gid = mlx5_ib_del_gid; 3025 dev->ib_dev.query_pkey = mlx5_ib_query_pkey; 3026 dev->ib_dev.modify_device = mlx5_ib_modify_device; 3027 dev->ib_dev.modify_port = mlx5_ib_modify_port; 3028 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; 3029 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; 3030 dev->ib_dev.mmap = mlx5_ib_mmap; 3031 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; 3032 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; 3033 dev->ib_dev.create_ah = mlx5_ib_create_ah; 3034 dev->ib_dev.query_ah = mlx5_ib_query_ah; 3035 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; 3036 dev->ib_dev.create_srq = mlx5_ib_create_srq; 3037 dev->ib_dev.modify_srq = mlx5_ib_modify_srq; 3038 dev->ib_dev.query_srq = mlx5_ib_query_srq; 3039 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; 3040 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; 3041 dev->ib_dev.create_qp = mlx5_ib_create_qp; 3042 dev->ib_dev.modify_qp = mlx5_ib_modify_qp; 3043 dev->ib_dev.query_qp = mlx5_ib_query_qp; 3044 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; 3045 dev->ib_dev.post_send = mlx5_ib_post_send; 3046 dev->ib_dev.post_recv = mlx5_ib_post_recv; 3047 dev->ib_dev.create_cq = mlx5_ib_create_cq; 3048 dev->ib_dev.modify_cq = mlx5_ib_modify_cq; 3049 dev->ib_dev.resize_cq = mlx5_ib_resize_cq; 3050 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; 3051 dev->ib_dev.poll_cq = mlx5_ib_poll_cq; 3052 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; 3053 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; 3054 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; 3055 dev->ib_dev.rereg_user_mr = mlx5_ib_rereg_user_mr; 3056 dev->ib_dev.reg_phys_mr = mlx5_ib_reg_phys_mr; 3057 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; 3058 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; 3059 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; 3060 dev->ib_dev.process_mad = mlx5_ib_process_mad; 3061 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; 3062 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; 3063 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 3064 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 3065 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; 3066 if (mlx5_core_is_pf(mdev)) { 3067 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 3068 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 3069 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; 3070 dev->ib_dev.set_vf_guid = mlx5_ib_set_vf_guid; 3071 } 3072 3073 mlx5_ib_internal_fill_odp_caps(dev); 3074 3075 if (MLX5_CAP_GEN(mdev, imaicl)) { 3076 dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; 3077 dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; 3078 dev->ib_dev.uverbs_cmd_mask |= 3079 (1ull << IB_USER_VERBS_CMD_ALLOC_MW) | 3080 (1ull << IB_USER_VERBS_CMD_DEALLOC_MW); 3081 } 3082 3083 if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt) && 3084 MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { 3085 dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats; 3086 dev->ib_dev.alloc_hw_stats = mlx5_ib_alloc_hw_stats; 3087 } 3088 3089 if (MLX5_CAP_GEN(mdev, xrc)) { 3090 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; 3091 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; 3092 dev->ib_dev.uverbs_cmd_mask |= 3093 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | 3094 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); 3095 } 3096 3097 if (mlx5_ib_port_link_layer(&dev->ib_dev, 1) == 3098 IB_LINK_LAYER_ETHERNET) { 3099 dev->ib_dev.create_flow = mlx5_ib_create_flow; 3100 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; 3101 dev->ib_dev.create_wq = mlx5_ib_create_wq; 3102 dev->ib_dev.modify_wq = mlx5_ib_modify_wq; 3103 dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq; 3104 dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table; 3105 dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table; 3106 dev->ib_dev.uverbs_ex_cmd_mask |= 3107 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 3108 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW) | 3109 (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) | 3110 (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) | 3111 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) | 3112 (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) | 3113 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL); 3114 } 3115 err = init_node_data(dev); 3116 if (err) 3117 goto err_free_port; 3118 3119 mutex_init(&dev->flow_db.lock); 3120 mutex_init(&dev->cap_mask_mutex); 3121 INIT_LIST_HEAD(&dev->qp_list); 3122 spin_lock_init(&dev->reset_flow_resource_lock); 3123 3124 if (ll == IB_LINK_LAYER_ETHERNET) { 3125 err = mlx5_enable_roce(dev); 3126 if (err) 3127 goto err_free_port; 3128 } 3129 3130 err = create_dev_resources(&dev->devr); 3131 if (err) 3132 goto err_disable_roce; 3133 3134 err = mlx5_ib_odp_init_one(dev); 3135 if (err) 3136 goto err_rsrc; 3137 3138 err = mlx5_ib_alloc_q_counters(dev); 3139 if (err) 3140 goto err_odp; 3141 3142 err = ib_register_device(&dev->ib_dev, NULL); 3143 if (err) 3144 goto err_q_cnt; 3145 3146 err = create_umr_res(dev); 3147 if (err) 3148 goto err_dev; 3149 3150 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { 3151 err = device_create_file(&dev->ib_dev.dev, 3152 mlx5_class_attributes[i]); 3153 if (err) 3154 goto err_umrc; 3155 } 3156 3157 err = mlx5_ib_init_congestion(dev); 3158 if (err) 3159 goto err_umrc; 3160 3161 dev->ib_active = true; 3162 3163 return dev; 3164 3165err_umrc: 3166 destroy_umrc_res(dev); 3167 3168err_dev: 3169 ib_unregister_device(&dev->ib_dev); 3170 3171err_q_cnt: 3172 mlx5_ib_dealloc_q_counters(dev); 3173 3174err_odp: 3175 mlx5_ib_odp_remove_one(dev); 3176 3177err_rsrc: 3178 destroy_dev_resources(&dev->devr); 3179 3180err_disable_roce: 3181 if (ll == IB_LINK_LAYER_ETHERNET) { 3182 mlx5_disable_roce(dev); 3183 mlx5_remove_roce_notifier(dev); 3184 } 3185 3186err_free_port: 3187 kfree(dev->port); 3188 3189err_dealloc: 3190 ib_dealloc_device((struct ib_device *)dev); 3191 3192 return NULL; 3193} 3194 3195static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context) 3196{ 3197 struct mlx5_ib_dev *dev = context; 3198 enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, 1); 3199 3200 mlx5_ib_cleanup_congestion(dev); 3201 mlx5_remove_roce_notifier(dev); 3202 ib_unregister_device(&dev->ib_dev); 3203 mlx5_ib_dealloc_q_counters(dev); 3204 destroy_umrc_res(dev); 3205 mlx5_ib_odp_remove_one(dev); 3206 destroy_dev_resources(&dev->devr); 3207 if (ll == IB_LINK_LAYER_ETHERNET) 3208 mlx5_disable_roce(dev); 3209 kfree(dev->port); 3210 ib_dealloc_device(&dev->ib_dev); 3211} 3212 3213static struct mlx5_interface mlx5_ib_interface = { 3214 .add = mlx5_ib_add, 3215 .remove = mlx5_ib_remove, 3216 .event = mlx5_ib_event, 3217 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 3218}; 3219 3220static int __init mlx5_ib_init(void) 3221{ 3222 int err; 3223 3224 if (deprecated_prof_sel != 2) 3225 pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n"); 3226 3227 err = mlx5_ib_odp_init(); 3228 if (err) 3229 return err; 3230 3231 err = mlx5_register_interface(&mlx5_ib_interface); 3232 if (err) 3233 goto clean_odp; 3234 3235 return err; 3236 3237clean_odp: 3238 mlx5_ib_odp_cleanup(); 3239 return err; 3240} 3241 3242static void __exit mlx5_ib_cleanup(void) 3243{ 3244 mlx5_unregister_interface(&mlx5_ib_interface); 3245 mlx5_ib_odp_cleanup(); 3246} 3247 3248module_init_order(mlx5_ib_init, SI_ORDER_THIRD); 3249module_exit_order(mlx5_ib_cleanup, SI_ORDER_THIRD); 3250