mlx5_eq.c revision 331575
1/*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_eq.c 331575 2018-03-26 20:10:49Z hselasky $ 26 */ 27 28#include <linux/interrupt.h> 29#include <linux/module.h> 30#include <dev/mlx5/driver.h> 31#include <dev/mlx5/mlx5_ifc.h> 32#include "mlx5_core.h" 33 34#include "opt_rss.h" 35 36#ifdef RSS 37#include <net/rss_config.h> 38#include <netinet/in_rss.h> 39#endif 40 41enum { 42 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), 43 MLX5_EQE_OWNER_INIT_VAL = 0x1, 44}; 45 46enum { 47 MLX5_NUM_SPARE_EQE = 0x80, 48 MLX5_NUM_ASYNC_EQE = 0x100, 49 MLX5_NUM_CMD_EQE = 32, 50}; 51 52enum { 53 MLX5_EQ_DOORBEL_OFFSET = 0x40, 54}; 55 56#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ 57 (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ 58 (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ 59 (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ 60 (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ 61 (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ 62 (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 63 (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 64 (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ 65 (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 66 (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ 67 (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) 68 69struct map_eq_in { 70 u64 mask; 71 u32 reserved; 72 u32 unmap_eqn; 73}; 74 75struct cre_des_eq { 76 u8 reserved[15]; 77 u8 eqn; 78}; 79 80/*Function prototype*/ 81static void mlx5_port_module_event(struct mlx5_core_dev *dev, 82 struct mlx5_eqe *eqe); 83static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, 84 struct mlx5_eqe *eqe); 85 86static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) 87{ 88 u32 in[MLX5_ST_SZ_DW(destroy_eq_in)]; 89 u32 out[MLX5_ST_SZ_DW(destroy_eq_out)]; 90 91 memset(in, 0, sizeof(in)); 92 93 MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); 94 MLX5_SET(destroy_eq_in, in, eq_number, eqn); 95 96 memset(out, 0, sizeof(out)); 97 return mlx5_cmd_exec_check_status(dev, in, sizeof(in), 98 out, sizeof(out)); 99} 100 101static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) 102{ 103 return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); 104} 105 106static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) 107{ 108 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); 109 110 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; 111} 112 113static const char *eqe_type_str(u8 type) 114{ 115 switch (type) { 116 case MLX5_EVENT_TYPE_COMP: 117 return "MLX5_EVENT_TYPE_COMP"; 118 case MLX5_EVENT_TYPE_PATH_MIG: 119 return "MLX5_EVENT_TYPE_PATH_MIG"; 120 case MLX5_EVENT_TYPE_COMM_EST: 121 return "MLX5_EVENT_TYPE_COMM_EST"; 122 case MLX5_EVENT_TYPE_SQ_DRAINED: 123 return "MLX5_EVENT_TYPE_SQ_DRAINED"; 124 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 125 return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; 126 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 127 return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; 128 case MLX5_EVENT_TYPE_CQ_ERROR: 129 return "MLX5_EVENT_TYPE_CQ_ERROR"; 130 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 131 return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; 132 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 133 return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; 134 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 135 return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; 136 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 137 return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; 138 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 139 return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; 140 case MLX5_EVENT_TYPE_INTERNAL_ERROR: 141 return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; 142 case MLX5_EVENT_TYPE_PORT_CHANGE: 143 return "MLX5_EVENT_TYPE_PORT_CHANGE"; 144 case MLX5_EVENT_TYPE_GPIO_EVENT: 145 return "MLX5_EVENT_TYPE_GPIO_EVENT"; 146 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 147 return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; 148 case MLX5_EVENT_TYPE_REMOTE_CONFIG: 149 return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; 150 case MLX5_EVENT_TYPE_DB_BF_CONGESTION: 151 return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; 152 case MLX5_EVENT_TYPE_STALL_EVENT: 153 return "MLX5_EVENT_TYPE_STALL_EVENT"; 154 case MLX5_EVENT_TYPE_CMD: 155 return "MLX5_EVENT_TYPE_CMD"; 156 case MLX5_EVENT_TYPE_PAGE_REQUEST: 157 return "MLX5_EVENT_TYPE_PAGE_REQUEST"; 158 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 159 return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; 160 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 161 return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT"; 162 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: 163 return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT"; 164 default: 165 return "Unrecognized event"; 166 } 167} 168 169static enum mlx5_dev_event port_subtype_event(u8 subtype) 170{ 171 switch (subtype) { 172 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 173 return MLX5_DEV_EVENT_PORT_DOWN; 174 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 175 return MLX5_DEV_EVENT_PORT_UP; 176 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 177 return MLX5_DEV_EVENT_PORT_INITIALIZED; 178 case MLX5_PORT_CHANGE_SUBTYPE_LID: 179 return MLX5_DEV_EVENT_LID_CHANGE; 180 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 181 return MLX5_DEV_EVENT_PKEY_CHANGE; 182 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 183 return MLX5_DEV_EVENT_GUID_CHANGE; 184 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 185 return MLX5_DEV_EVENT_CLIENT_REREG; 186 } 187 return -1; 188} 189 190static enum mlx5_dev_event dcbx_subevent(u8 subtype) 191{ 192 switch (subtype) { 193 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 194 return MLX5_DEV_EVENT_ERROR_STATE_DCBX; 195 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 196 return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE; 197 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 198 return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE; 199 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 200 return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE; 201 } 202 return -1; 203} 204 205static void eq_update_ci(struct mlx5_eq *eq, int arm) 206{ 207 __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); 208 u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); 209 __raw_writel((__force u32) cpu_to_be32(val), addr); 210 /* We still want ordering, just not swabbing, so add a barrier */ 211 mb(); 212} 213 214static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 215{ 216 struct mlx5_eqe *eqe; 217 int eqes_found = 0; 218 int set_ci = 0; 219 u32 cqn; 220 u32 rsn; 221 u8 port; 222 223 while ((eqe = next_eqe_sw(eq))) { 224 /* 225 * Make sure we read EQ entry contents after we've 226 * checked the ownership bit. 227 */ 228 rmb(); 229 230 mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", 231 eq->eqn, eqe_type_str(eqe->type)); 232 switch (eqe->type) { 233 case MLX5_EVENT_TYPE_COMP: 234 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 235 mlx5_cq_completion(dev, cqn); 236 break; 237 238 case MLX5_EVENT_TYPE_PATH_MIG: 239 case MLX5_EVENT_TYPE_COMM_EST: 240 case MLX5_EVENT_TYPE_SQ_DRAINED: 241 case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 242 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 243 case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 244 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 245 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 246 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 247 mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", 248 eqe_type_str(eqe->type), eqe->type, rsn); 249 mlx5_rsc_event(dev, rsn, eqe->type); 250 break; 251 252 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 253 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 254 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 255 mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", 256 eqe_type_str(eqe->type), eqe->type, rsn); 257 mlx5_srq_event(dev, rsn, eqe->type); 258 break; 259 260 case MLX5_EVENT_TYPE_CMD: 261 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); 262 break; 263 264 case MLX5_EVENT_TYPE_PORT_CHANGE: 265 port = (eqe->data.port.port >> 4) & 0xf; 266 switch (eqe->sub_type) { 267 case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 268 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 269 case MLX5_PORT_CHANGE_SUBTYPE_LID: 270 case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 271 case MLX5_PORT_CHANGE_SUBTYPE_GUID: 272 case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 273 case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 274 if (dev->event) 275 dev->event(dev, port_subtype_event(eqe->sub_type), 276 (unsigned long)port); 277 break; 278 default: 279 mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", 280 port, eqe->sub_type); 281 } 282 break; 283 284 case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 285 port = (eqe->data.port.port >> 4) & 0xf; 286 switch (eqe->sub_type) { 287 case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 288 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 289 case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 290 case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 291 if (dev->event) 292 dev->event(dev, 293 dcbx_subevent(eqe->sub_type), 294 0); 295 break; 296 default: 297 mlx5_core_warn(dev, 298 "dcbx event with unrecognized subtype: port %d, sub_type %d\n", 299 port, eqe->sub_type); 300 } 301 break; 302 303 case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT: 304 mlx5_port_general_notification_event(dev, eqe); 305 break; 306 307 case MLX5_EVENT_TYPE_CQ_ERROR: 308 cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 309 mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", 310 cqn, eqe->data.cq_err.syndrome); 311 mlx5_cq_event(dev, cqn, eqe->type); 312 break; 313 314 case MLX5_EVENT_TYPE_PAGE_REQUEST: 315 { 316 u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 317 s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 318 319 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", 320 func_id, npages); 321 mlx5_core_req_pages_handler(dev, func_id, npages); 322 } 323 break; 324 325 case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 326 mlx5_port_module_event(dev, eqe); 327 break; 328 329 case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 330 { 331 struct mlx5_eqe_vport_change *vc_eqe = 332 &eqe->data.vport_change; 333 u16 vport_num = be16_to_cpu(vc_eqe->vport_num); 334 335 if (dev->event) 336 dev->event(dev, 337 MLX5_DEV_EVENT_VPORT_CHANGE, 338 (unsigned long)vport_num); 339 } 340 break; 341 342 default: 343 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", 344 eqe->type, eq->eqn); 345 break; 346 } 347 348 ++eq->cons_index; 349 eqes_found = 1; 350 ++set_ci; 351 352 /* The HCA will think the queue has overflowed if we 353 * don't tell it we've been processing events. We 354 * create our EQs with MLX5_NUM_SPARE_EQE extra 355 * entries, so we must update our consumer index at 356 * least that often. 357 */ 358 if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { 359 eq_update_ci(eq, 0); 360 set_ci = 0; 361 } 362 } 363 364 eq_update_ci(eq, 1); 365 366 return eqes_found; 367} 368 369static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) 370{ 371 struct mlx5_eq *eq = eq_ptr; 372 struct mlx5_core_dev *dev = eq->dev; 373 374 mlx5_eq_int(dev, eq); 375 376 /* MSI-X vectors always belong to us */ 377 return IRQ_HANDLED; 378} 379 380static void init_eq_buf(struct mlx5_eq *eq) 381{ 382 struct mlx5_eqe *eqe; 383 int i; 384 385 for (i = 0; i < eq->nent; i++) { 386 eqe = get_eqe(eq, i); 387 eqe->owner = MLX5_EQE_OWNER_INIT_VAL; 388 } 389} 390 391int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 392 int nent, u64 mask, const char *name, struct mlx5_uar *uar) 393{ 394 struct mlx5_priv *priv = &dev->priv; 395 struct mlx5_create_eq_mbox_in *in; 396 struct mlx5_create_eq_mbox_out out; 397 int err; 398 int inlen; 399 400 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); 401 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, 402 &eq->buf); 403 if (err) 404 return err; 405 406 init_eq_buf(eq); 407 408 inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; 409 in = mlx5_vzalloc(inlen); 410 if (!in) { 411 err = -ENOMEM; 412 goto err_buf; 413 } 414 memset(&out, 0, sizeof(out)); 415 416 mlx5_fill_page_array(&eq->buf, in->pas); 417 418 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); 419 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); 420 in->ctx.intr = vecidx; 421 in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; 422 in->events_mask = cpu_to_be64(mask); 423 424 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 425 if (err) 426 goto err_in; 427 428 if (out.hdr.status) { 429 err = mlx5_cmd_status_to_err(&out.hdr); 430 goto err_in; 431 } 432 433 eq->eqn = out.eq_number; 434 eq->irqn = vecidx; 435 eq->dev = dev; 436 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; 437 snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", 438 name, pci_name(dev->pdev)); 439 err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 440 priv->irq_info[vecidx].name, eq); 441 if (err) 442 goto err_eq; 443#ifdef RSS 444 if (vecidx >= MLX5_EQ_VEC_COMP_BASE) { 445 u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE; 446 err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector, 447 rss_getcpu(bucket % rss_getnumbuckets())); 448 if (err) 449 goto err_irq; 450 } 451#else 452 if (0) 453 goto err_irq; 454#endif 455 456 457 /* EQs are created in ARMED state 458 */ 459 eq_update_ci(eq, 1); 460 461 kvfree(in); 462 return 0; 463 464err_irq: 465 free_irq(priv->msix_arr[vecidx].vector, eq); 466 467err_eq: 468 mlx5_cmd_destroy_eq(dev, eq->eqn); 469 470err_in: 471 kvfree(in); 472 473err_buf: 474 mlx5_buf_free(dev, &eq->buf); 475 return err; 476} 477EXPORT_SYMBOL_GPL(mlx5_create_map_eq); 478 479int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 480{ 481 int err; 482 483 free_irq(dev->priv.msix_arr[eq->irqn].vector, eq); 484 err = mlx5_cmd_destroy_eq(dev, eq->eqn); 485 if (err) 486 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", 487 eq->eqn); 488 mlx5_buf_free(dev, &eq->buf); 489 490 return err; 491} 492EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); 493 494int mlx5_eq_init(struct mlx5_core_dev *dev) 495{ 496 int err; 497 498 spin_lock_init(&dev->priv.eq_table.lock); 499 500 err = 0; 501 502 return err; 503} 504 505 506void mlx5_eq_cleanup(struct mlx5_core_dev *dev) 507{ 508} 509 510int mlx5_start_eqs(struct mlx5_core_dev *dev) 511{ 512 struct mlx5_eq_table *table = &dev->priv.eq_table; 513 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; 514 int err; 515 516 if (MLX5_CAP_GEN(dev, port_module_event)) 517 async_event_mask |= (1ull << 518 MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT); 519 520 if (MLX5_CAP_GEN(dev, nic_vport_change_event)) 521 async_event_mask |= (1ull << 522 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); 523 524 if (MLX5_CAP_GEN(dev, dcbx)) 525 async_event_mask |= (1ull << 526 MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT); 527 528 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, 529 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, 530 "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); 531 if (err) { 532 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); 533 return err; 534 } 535 536 mlx5_cmd_use_events(dev); 537 538 err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, 539 MLX5_NUM_ASYNC_EQE, async_event_mask, 540 "mlx5_async_eq", &dev->priv.uuari.uars[0]); 541 if (err) { 542 mlx5_core_warn(dev, "failed to create async EQ %d\n", err); 543 goto err1; 544 } 545 546 err = mlx5_create_map_eq(dev, &table->pages_eq, 547 MLX5_EQ_VEC_PAGES, 548 /* TODO: sriov max_vf + */ 1, 549 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", 550 &dev->priv.uuari.uars[0]); 551 if (err) { 552 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); 553 goto err2; 554 } 555 556 return err; 557 558err2: 559 mlx5_destroy_unmap_eq(dev, &table->async_eq); 560 561err1: 562 mlx5_cmd_use_polling(dev); 563 mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 564 return err; 565} 566 567int mlx5_stop_eqs(struct mlx5_core_dev *dev) 568{ 569 struct mlx5_eq_table *table = &dev->priv.eq_table; 570 int err; 571 572 err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); 573 if (err) 574 return err; 575 576 mlx5_destroy_unmap_eq(dev, &table->async_eq); 577 mlx5_cmd_use_polling(dev); 578 579 err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 580 if (err) 581 mlx5_cmd_use_events(dev); 582 583 return err; 584} 585 586int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 587 struct mlx5_query_eq_mbox_out *out, int outlen) 588{ 589 struct mlx5_query_eq_mbox_in in; 590 int err; 591 592 memset(&in, 0, sizeof(in)); 593 memset(out, 0, outlen); 594 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); 595 in.eqn = eq->eqn; 596 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 597 if (err) 598 return err; 599 600 if (out->hdr.status) 601 err = mlx5_cmd_status_to_err(&out->hdr); 602 603 return err; 604} 605 606EXPORT_SYMBOL_GPL(mlx5_core_eq_query); 607 608static const char *mlx5_port_module_event_error_type_to_string(u8 error_type) 609{ 610 switch (error_type) { 611 case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED: 612 return "Power Budget Exceeded"; 613 case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE: 614 return "Long Range for non MLNX cable/module"; 615 case MLX5_MODULE_EVENT_ERROR_BUS_STUCK: 616 return "Bus stuck(I2C or data shorted)"; 617 case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT: 618 return "No EEPROM/retry timeout"; 619 case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST: 620 return "Enforce part number list"; 621 case MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE: 622 return "Unsupported Cable"; 623 case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE: 624 return "High Temperature"; 625 case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED: 626 return "Cable is shorted"; 627 628 default: 629 return "Unknown error type"; 630 } 631} 632 633unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num) 634{ 635 if (module_num < 0 || module_num >= MLX5_MAX_PORTS) 636 return 0; /* undefined */ 637 return dev->module_status[module_num]; 638} 639 640static void mlx5_port_module_event(struct mlx5_core_dev *dev, 641 struct mlx5_eqe *eqe) 642{ 643 unsigned int module_num; 644 unsigned int module_status; 645 unsigned int error_type; 646 struct mlx5_eqe_port_module_event *module_event_eqe; 647 struct pci_dev *pdev = dev->pdev; 648 649 module_event_eqe = &eqe->data.port_module_event; 650 651 module_num = (unsigned int)module_event_eqe->module; 652 module_status = (unsigned int)module_event_eqe->module_status & 653 PORT_MODULE_EVENT_MODULE_STATUS_MASK; 654 error_type = (unsigned int)module_event_eqe->error_type & 655 PORT_MODULE_EVENT_ERROR_TYPE_MASK; 656 657 switch (module_status) { 658 case MLX5_MODULE_STATUS_PLUGGED_ENABLED: 659 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged and enabled\n", module_num); 660 break; 661 662 case MLX5_MODULE_STATUS_UNPLUGGED: 663 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: unplugged\n", module_num); 664 break; 665 666 case MLX5_MODULE_STATUS_ERROR: 667 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: error, %s\n", module_num, mlx5_port_module_event_error_type_to_string(error_type)); 668 break; 669 670 case MLX5_MODULE_STATUS_PLUGGED_DISABLED: 671 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged but disabled\n", module_num); 672 break; 673 674 default: 675 device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, unknown status\n", module_num); 676 } 677 /* store module status */ 678 if (module_num < MLX5_MAX_PORTS) 679 dev->module_status[module_num] = module_status; 680} 681 682static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev, 683 struct mlx5_eqe *eqe) 684{ 685 u8 port = (eqe->data.port.port >> 4) & 0xf; 686 u32 rqn = 0; 687 struct mlx5_eqe_general_notification_event *general_event = NULL; 688 689 switch (eqe->sub_type) { 690 case MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT: 691 general_event = &eqe->data.general_notifications; 692 rqn = be32_to_cpu(general_event->rq_user_index_delay_drop) & 693 0xffffff; 694 break; 695 default: 696 mlx5_core_warn(dev, 697 "general event with unrecognized subtype: port %d, sub_type %d\n", 698 port, eqe->sub_type); 699 break; 700 } 701} 702 703