mlx5_eq.c revision 331918
1/*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_eq.c 331918 2018-04-03 09:31:30Z hselasky $ 26 */ 27 28#include <linux/interrupt.h> 29#include <linux/module.h> 30#include <dev/mlx5/port.h> 31#include <dev/mlx5/mlx5_ifc.h> 32#include "mlx5_core.h" 33 34#include "opt_rss.h" 35 36#ifdef RSS 37#include <net/rss_config.h> 38#include <netinet/in_rss.h> 39#endif 40 41enum { 42 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), 43 MLX5_EQE_OWNER_INIT_VAL = 0x1, 44}; 45 46enum { 47 MLX5_NUM_SPARE_EQE = 0x80, 48 MLX5_NUM_ASYNC_EQE = 0x100, 49 MLX5_NUM_CMD_EQE = 32, 50}; 51 52enum { 53 MLX5_EQ_DOORBEL_OFFSET = 0x40, 54}; 55 56#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ 57 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ 58 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ 59 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ 60 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ 61 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ 62 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 63 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 64 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ 65 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 66 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ 67 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) 68 69struct map_eq_in { 70 u64 mask; 71 u32 reserved; 72 u32 unmap_eqn; 73}; 74 75struct cre_des_eq { 76 u8 reserved[15]; 77 u8 eqn; 78}; 79 80/*Function prototype*/ 81static void mlx5_port_module_event(struct mlx5_core_dev *dev, 82 struct mlx5_eqe *eqe); 83static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, 84 struct mlx5_eqe *eqe); 85 86static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) 87{ 88 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0}; 89 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; 90 91 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); 92 MLX5_SET(destroy_eq_in, in, eq_number, eqn); 93 94 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 95} 96 97static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) 98{ 99 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); 100} 101 102static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) 103{ 104 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); 105 106 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; 107} 108 109static const char *eqe_type_str(u8 type) 110{ 111 switch (type) { 112 case MLX5_EVENT_TYPE_COMP: 113 return "MLX5_EVENT_TYPE_COMP"; 114 case MLX5_EVENT_TYPE_PATH_MIG: 115 return "MLX5_EVENT_TYPE_PATH_MIG"; 116 case MLX5_EVENT_TYPE_COMM_EST: 117 return "MLX5_EVENT_TYPE_COMM_EST"; 118 case MLX5_EVENT_TYPE_SQ_DRAINED: 119 return "MLX5_EVENT_TYPE_SQ_DRAINED"; 120 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 121 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; 122 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 123 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; 124 case MLX5_EVENT_TYPE_CQ_ERROR: 125 return "MLX5_EVENT_TYPE_CQ_ERROR"; 126 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 127 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; 128 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 129 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; 130 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 131 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; 132 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 133 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; 134 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 135 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; 136 case MLX5_EVENT_TYPE_INTERNAL_ERROR: 137 return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; 138 case MLX5_EVENT_TYPE_PORT_CHANGE: 139 return "MLX5_EVENT_TYPE_PORT_CHANGE"; 140 case MLX5_EVENT_TYPE_GPIO_EVENT: 141 return "MLX5_EVENT_TYPE_GPIO_EVENT"; 142 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 143 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; 144 case MLX5_EVENT_TYPE_REMOTE_CONFIG: 145 return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; 146 case MLX5_EVENT_TYPE_DB_BF_CONGESTION: 147 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; 148 case MLX5_EVENT_TYPE_STALL_EVENT: 149 return "MLX5_EVENT_TYPE_STALL_EVENT"; 150 case MLX5_EVENT_TYPE_CMD: 151 return "MLX5_EVENT_TYPE_CMD"; 152 case MLX5_EVENT_TYPE_PAGE_REQUEST: 153 return "MLX5_EVENT_TYPE_PAGE_REQUEST"; 154 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 155 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; 156 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 157 return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT"; 158 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: 159 return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT"; 160 default: 161 return "Unrecognized event"; 162 } 163} 164 165static enum mlx5_dev_event port_subtype_event(u8 subtype) 166{ 167 switch (subtype) { 168 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 169 return MLX5_DEV_EVENT_PORT_DOWN; 170 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 171 return MLX5_DEV_EVENT_PORT_UP; 172 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 173 return MLX5_DEV_EVENT_PORT_INITIALIZED; 174 case MLX5_PORT_CHANGE_SUBTYPE_LID: 175 return MLX5_DEV_EVENT_LID_CHANGE; 176 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 177 return MLX5_DEV_EVENT_PKEY_CHANGE; 178 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 179 return MLX5_DEV_EVENT_GUID_CHANGE; 180 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 181 return MLX5_DEV_EVENT_CLIENT_REREG; 182 } 183 return -1; 184} 185 186static enum mlx5_dev_event dcbx_subevent(u8 subtype) 187{ 188 switch (subtype) { 189 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 190 return MLX5_DEV_EVENT_ERROR_STATE_DCBX; 191 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 192 return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE; 193 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 194 return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE; 195 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 196 return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE; 197 } 198 return -1; 199} 200 201static void eq_update_ci(struct mlx5_eq *eq, int arm) 202{ 203 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); 204 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); 205 __raw_writel((__force u32) cpu_to_be32(val), addr); 206 /* We still want ordering, just not swabbing, so add a barrier */ 207 mb(); 208} 209 210static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 211{ 212 struct mlx5_eqe *eqe; 213 int eqes_found = 0; 214 int set_ci = 0; 215 u32 cqn; 216 u32 rsn; 217 u8 port; 218 219 while ((eqe = next_eqe_sw(eq))) { 220 /* 221 * Make sure we read EQ entry contents after we've 222 * checked the ownership bit. 223 */ 224 rmb(); 225 226 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", 227 eq->eqn, eqe_type_str(eqe->type)); 228 switch (eqe->type) { 229 case MLX5_EVENT_TYPE_COMP: 230 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 231 mlx5_cq_completion(dev, cqn); 232 break; 233 234 case MLX5_EVENT_TYPE_PATH_MIG: 235 case MLX5_EVENT_TYPE_COMM_EST: 236 case MLX5_EVENT_TYPE_SQ_DRAINED: 237 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 238 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 239 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 240 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 241 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 242 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 243 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", 244 eqe_type_str(eqe->type), eqe->type, rsn); 245 mlx5_rsc_event(dev, rsn, eqe->type); 246 break; 247 248 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 249 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 250 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 251 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", 252 eqe_type_str(eqe->type), eqe->type, rsn); 253 mlx5_srq_event(dev, rsn, eqe->type); 254 break; 255 256 case MLX5_EVENT_TYPE_CMD: 257 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) 258 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); 259 break; 260 261 case MLX5_EVENT_TYPE_PORT_CHANGE: 262 port = (eqe->data.port.port >> 4) & 0xf; 263 switch (eqe->sub_type) { 264 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 265 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 266 case MLX5_PORT_CHANGE_SUBTYPE_LID: 267 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 268 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 269 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 270 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 271 if (dev->event) 272 dev->event(dev, port_subtype_event(eqe->sub_type), 273 (unsigned long)port); 274 break; 275 default: 276 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", 277 port, eqe->sub_type); 278 } 279 break; 280 281 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 282 port = (eqe->data.port.port >> 4) & 0xf; 283 switch (eqe->sub_type) { 284 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 285 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 286 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 287 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 288 if (dev->event) 289 dev->event(dev, 290 dcbx_subevent(eqe->sub_type), 291 0); 292 break; 293 default: 294 mlx5_core_warn(dev, 295 "dcbx event with unrecognized subtype: port %d, sub_type %d\n", 296 port, eqe->sub_type); 297 } 298 break; 299 300 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: 301 mlx5_port_general_notification_event(dev, eqe); 302 break; 303 304 case MLX5_EVENT_TYPE_CQ_ERROR: 305 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 306 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", 307 cqn, eqe->data.cq_err.syndrome); 308 mlx5_cq_event(dev, cqn, eqe->type); 309 break; 310 311 case MLX5_EVENT_TYPE_PAGE_REQUEST: 312 { 313 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 314 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 315 316 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", 317 func_id, npages); 318 mlx5_core_req_pages_handler(dev, func_id, npages); 319 } 320 break; 321 322 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 323 mlx5_port_module_event(dev, eqe); 324 break; 325 326 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 327 { 328 struct mlx5_eqe_vport_change *vc_eqe = 329 &eqe->data.vport_change; 330 u16 vport_num = be16_to_cpu(vc_eqe->vport_num); 331 332 if (dev->event) 333 dev->event(dev, 334 MLX5_DEV_EVENT_VPORT_CHANGE, 335 (unsigned long)vport_num); 336 } 337 break; 338 339 default: 340 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", 341 eqe->type, eq->eqn); 342 break; 343 } 344 345 ++eq->cons_index; 346 eqes_found = 1; 347 ++set_ci; 348 349 /* The HCA will think the queue has overflowed if we 350 * don't tell it we've been processing events. We 351 * create our EQs with MLX5_NUM_SPARE_EQE extra 352 * entries, so we must update our consumer index at 353 * least that often. 354 */ 355 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { 356 eq_update_ci(eq, 0); 357 set_ci = 0; 358 } 359 } 360 361 eq_update_ci(eq, 1); 362 363 return eqes_found; 364} 365 366static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) 367{ 368 struct mlx5_eq *eq = eq_ptr; 369 struct mlx5_core_dev *dev = eq->dev; 370 371 mlx5_eq_int(dev, eq); 372 373 /* MSI-X vectors always belong to us */ 374 return IRQ_HANDLED; 375} 376 377static void init_eq_buf(struct mlx5_eq *eq) 378{ 379 struct mlx5_eqe *eqe; 380 int i; 381 382 for (i = 0; i < eq->nent; i++) { 383 eqe = get_eqe(eq, i); 384 eqe->owner = MLX5_EQE_OWNER_INIT_VAL; 385 } 386} 387 388int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 389 int nent, u64 mask, const char *name, struct mlx5_uar *uar) 390{ 391 u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; 392 struct mlx5_priv *priv = &dev->priv; 393 __be64 *pas; 394 void *eqc; 395 int inlen; 396 u32 *in; 397 int err; 398 399 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); 400 eq->cons_index = 0; 401 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, 402 &eq->buf); 403 if (err) 404 return err; 405 406 init_eq_buf(eq); 407 408 inlen = MLX5_ST_SZ_BYTES(create_eq_in) + 409 MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages; 410 in = mlx5_vzalloc(inlen); 411 if (!in) { 412 err = -ENOMEM; 413 goto err_buf; 414 } 415 416 pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas); 417 mlx5_fill_page_array(&eq->buf, pas); 418 419 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); 420 MLX5_SET64(create_eq_in, in, event_bitmask, mask); 421 422 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); 423 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); 424 MLX5_SET(eqc, eqc, uar_page, uar->index); 425 MLX5_SET(eqc, eqc, intr, vecidx); 426 MLX5_SET(eqc, eqc, log_page_size, 427 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 428 429 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 430 if (err) 431 goto err_in; 432 433 eq->eqn = MLX5_GET(create_eq_out, out, eq_number); 434 eq->irqn = vecidx; 435 eq->dev = dev; 436 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; 437 snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", 438 name, pci_name(dev->pdev)); 439 err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 440 priv->irq_info[vecidx].name, eq); 441 if (err) 442 goto err_eq; 443#ifdef RSS 444 if (vecidx >= MLX5_EQ_VEC_COMP_BASE) { 445 u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE; 446 err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector, 447 rss_getcpu(bucket % rss_getnumbuckets())); 448 if (err) 449 goto err_irq; 450 } 451#else 452 if (0) 453 goto err_irq; 454#endif 455 456 457 /* EQs are created in ARMED state 458 */ 459 eq_update_ci(eq, 1); 460 461 kvfree(in); 462 return 0; 463 464err_irq: 465 free_irq(priv->msix_arr[vecidx].vector, eq); 466 467err_eq: 468 mlx5_cmd_destroy_eq(dev, eq->eqn); 469 470err_in: 471 kvfree(in); 472 473err_buf: 474 mlx5_buf_free(dev, &eq->buf); 475 return err; 476} 477EXPORT_SYMBOL_GPL(mlx5_create_map_eq); 478 479int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 480{ 481 int err; 482 483 free_irq(dev->priv.msix_arr[eq->irqn].vector, eq); 484 err = mlx5_cmd_destroy_eq(dev, eq->eqn); 485 if (err) 486 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", 487 eq->eqn); 488 mlx5_buf_free(dev, &eq->buf); 489 490 return err; 491} 492EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); 493 494int mlx5_eq_init(struct mlx5_core_dev *dev) 495{ 496 int err; 497 498 spin_lock_init(&dev->priv.eq_table.lock); 499 500 err = 0; 501 502 return err; 503} 504 505 506void mlx5_eq_cleanup(struct mlx5_core_dev *dev) 507{ 508} 509 510int mlx5_start_eqs(struct mlx5_core_dev *dev) 511{ 512 struct mlx5_eq_table *table = &dev->priv.eq_table; 513 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; 514 int err; 515 516 if (MLX5_CAP_GEN(dev, port_module_event)) 517 async_event_mask |= (1ull << 518 MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT); 519 520 if (MLX5_CAP_GEN(dev, nic_vport_change_event)) 521 async_event_mask |= (1ull << 522 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); 523 524 if (MLX5_CAP_GEN(dev, dcbx)) 525 async_event_mask |= (1ull << 526 MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT); 527 528 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, 529 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, 530 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); 531 if (err) { 532 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); 533 return err; 534 } 535 536 mlx5_cmd_use_events(dev); 537 538 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, 539 MLX5_NUM_ASYNC_EQE, async_event_mask, 540 "mlx5_async_eq", &dev->priv.uuari.uars[0]); 541 if (err) { 542 mlx5_core_warn(dev, "failed to create async EQ %d\n", err); 543 goto err1; 544 } 545 546 err = mlx5_create_map_eq(dev, &table->pages_eq, 547 MLX5_EQ_VEC_PAGES, 548 /* TODO: sriov max_vf + */ 1, 549 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", 550 &dev->priv.uuari.uars[0]); 551 if (err) { 552 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); 553 goto err2; 554 } 555 556 return err; 557 558err2: 559 mlx5_destroy_unmap_eq(dev, &table->async_eq); 560 561err1: 562 mlx5_cmd_use_polling(dev); 563 mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 564 return err; 565} 566 567int mlx5_stop_eqs(struct mlx5_core_dev *dev) 568{ 569 struct mlx5_eq_table *table = &dev->priv.eq_table; 570 int err; 571 572 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); 573 if (err) 574 return err; 575 576 mlx5_destroy_unmap_eq(dev, &table->async_eq); 577 mlx5_cmd_use_polling(dev); 578 579 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 580 if (err) 581 mlx5_cmd_use_events(dev); 582 583 return err; 584} 585 586int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 587 u32 *out, int outlen) 588{ 589 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0}; 590 591 memset(out, 0, outlen); 592 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); 593 MLX5_SET(query_eq_in, in, eq_number, eq->eqn); 594 595 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); 596} 597EXPORT_SYMBOL_GPL(mlx5_core_eq_query); 598 599static const char *mlx5_port_module_event_error_type_to_string(u8 error_type) 600{ 601 switch (error_type) { 602 case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED: 603 return "Power Budget Exceeded"; 604 case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE: 605 return "Long Range for non MLNX cable/module"; 606 case MLX5_MODULE_EVENT_ERROR_BUS_STUCK: 607 return "Bus stuck(I2C or data shorted)"; 608 case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT: 609 return "No EEPROM/retry timeout"; 610 case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST: 611 return "Enforce part number list"; 612 case MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE: 613 return "Unsupported Cable"; 614 case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE: 615 return "High Temperature"; 616 case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED: 617 return "Cable is shorted"; 618 619 default: 620 return "Unknown error type"; 621 } 622} 623 624unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num) 625{ 626 if (module_num < 0 || module_num >= MLX5_MAX_PORTS) 627 return 0; /* undefined */ 628 return dev->module_status[module_num]; 629} 630 631static void mlx5_port_module_event(struct mlx5_core_dev *dev, 632 struct mlx5_eqe *eqe) 633{ 634 unsigned int module_num; 635 unsigned int module_status; 636 unsigned int error_type; 637 struct mlx5_eqe_port_module_event *module_event_eqe; 638 struct pci_dev *pdev = dev->pdev; 639 640 module_event_eqe = &eqe->data.port_module_event; 641 642 module_num = (unsigned int)module_event_eqe->module; 643 module_status = (unsigned int)module_event_eqe->module_status & 644 PORT_MODULE_EVENT_MODULE_STATUS_MASK; 645 error_type = (unsigned int)module_event_eqe->error_type & 646 PORT_MODULE_EVENT_ERROR_TYPE_MASK; 647 648 switch (module_status) { 649 case MLX5_MODULE_STATUS_PLUGGED_ENABLED: 650 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged and enabled\n", module_num); 651 break; 652 653 case MLX5_MODULE_STATUS_UNPLUGGED: 654 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: unplugged\n", module_num); 655 break; 656 657 case MLX5_MODULE_STATUS_ERROR: 658 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: error, %s\n", module_num, mlx5_port_module_event_error_type_to_string(error_type)); 659 break; 660 661 case MLX5_MODULE_STATUS_PLUGGED_DISABLED: 662 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged but disabled\n", module_num); 663 break; 664 665 default: 666 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, unknown status\n", module_num); 667 } 668 /* store module status */ 669 if (module_num < MLX5_MAX_PORTS) 670 dev->module_status[module_num] = module_status; 671} 672 673static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, 674 struct mlx5_eqe *eqe) 675{ 676 u8 port = (eqe->data.port.port >> 4) & 0xf; 677 u32 rqn = 0; 678 struct mlx5_eqe_general_notification_event *general_event = NULL; 679 680 switch (eqe->sub_type) { 681 case MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT: 682 general_event = &eqe->data.general_notifications; 683 rqn = be32_to_cpu(general_event->rq_user_index_delay_drop) & 684 0xffffff; 685 break; 686 default: 687 mlx5_core_warn(dev, 688 "general event with unrecognized subtype: port %d, sub_type %d\n", 689 port, eqe->sub_type); 690 break; 691 } 692} 693 694