1290650Shselasky/*- 2290650Shselasky * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. 3290650Shselasky * 4290650Shselasky * Redistribution and use in source and binary forms, with or without 5290650Shselasky * modification, are permitted provided that the following conditions 6290650Shselasky * are met: 7290650Shselasky * 1. Redistributions of source code must retain the above copyright 8290650Shselasky * notice, this list of conditions and the following disclaimer. 9290650Shselasky * 2. Redistributions in binary form must reproduce the above copyright 10290650Shselasky * notice, this list of conditions and the following disclaimer in the 11290650Shselasky * documentation and/or other materials provided with the distribution. 12290650Shselasky * 13290650Shselasky * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14290650Shselasky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15290650Shselasky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16290650Shselasky * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17290650Shselasky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18290650Shselasky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19290650Shselasky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20290650Shselasky * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21290650Shselasky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22290650Shselasky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23290650Shselasky * SUCH DAMAGE. 24290650Shselasky * 25290650Shselasky * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_eq.c 339713 2018-10-25 14:12:48Z slavash $ 26290650Shselasky */ 27290650Shselasky 28290650Shselasky#include <linux/interrupt.h> 29290650Shselasky#include <linux/module.h> 30290650Shselasky#include <dev/mlx5/driver.h> 31290650Shselasky#include <dev/mlx5/mlx5_ifc.h> 32290650Shselasky#include "mlx5_core.h" 33290650Shselasky 34292195Shselasky#if (__FreeBSD_version >= 1100000) 35292195Shselasky#include "opt_rss.h" 36292195Shselasky#endif 37292195Shselasky 38292195Shselasky#ifdef RSS 39292195Shselasky#include <net/rss_config.h> 40292195Shselasky#include <netinet/in_rss.h> 41292195Shselasky#endif 42292195Shselasky 43290650Shselaskyenum { 44290650Shselasky MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), 45290650Shselasky MLX5_EQE_OWNER_INIT_VAL = 0x1, 46290650Shselasky}; 47290650Shselasky 48290650Shselaskyenum { 49290650Shselasky MLX5_NUM_SPARE_EQE = 0x80, 50290650Shselasky MLX5_NUM_ASYNC_EQE = 0x100, 51290650Shselasky MLX5_NUM_CMD_EQE = 32, 52290650Shselasky}; 53290650Shselasky 54290650Shselaskyenum { 55290650Shselasky MLX5_EQ_DOORBEL_OFFSET = 0x40, 56290650Shselasky}; 57290650Shselasky 58290650Shselasky#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ 59290650Shselasky (1ull << MLX5_EVENT_TYPE_COMM_EST) | \ 60290650Shselasky (1ull << MLX5_EVENT_TYPE_SQ_DRAINED) | \ 61290650Shselasky (1ull << MLX5_EVENT_TYPE_CQ_ERROR) | \ 62290650Shselasky (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR) | \ 63290650Shselasky (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED) | \ 64290650Shselasky (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 65290650Shselasky (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 66290650Shselasky (1ull << MLX5_EVENT_TYPE_PORT_CHANGE) | \ 67290650Shselasky (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 68290650Shselasky (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ 69290650Shselasky (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) 70290650Shselasky 71290650Shselaskystruct map_eq_in { 72290650Shselasky u64 mask; 73290650Shselasky u32 reserved; 74290650Shselasky u32 unmap_eqn; 75290650Shselasky}; 76290650Shselasky 77290650Shselaskystruct cre_des_eq { 78290650Shselasky u8 reserved[15]; 79290650Shselasky u8 eqn; 80290650Shselasky}; 81290650Shselasky 82290650Shselasky/*Function prototype*/ 83290650Shselaskystatic void mlx5_port_module_event(struct mlx5_core_dev *dev, 84290650Shselasky struct mlx5_eqe *eqe); 85290650Shselasky 86290650Shselaskystatic int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) 87290650Shselasky{ 88290650Shselasky u32 in[MLX5_ST_SZ_DW(destroy_eq_in)]; 89290650Shselasky u32 out[MLX5_ST_SZ_DW(destroy_eq_out)]; 90290650Shselasky 91290650Shselasky memset(in, 0, sizeof(in)); 92290650Shselasky 93290650Shselasky MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ); 94290650Shselasky MLX5_SET(destroy_eq_in, in, eq_number, eqn); 95290650Shselasky 96290650Shselasky memset(out, 0, sizeof(out)); 97290650Shselasky return mlx5_cmd_exec_check_status(dev, in, sizeof(in), 98290650Shselasky out, sizeof(out)); 99290650Shselasky} 100290650Shselasky 101290650Shselaskystatic struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) 102290650Shselasky{ 103290650Shselasky return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); 104290650Shselasky} 105290650Shselasky 106290650Shselaskystatic struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) 107290650Shselasky{ 108290650Shselasky struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); 109290650Shselasky 110290650Shselasky return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; 111290650Shselasky} 112290650Shselasky 113290650Shselaskystatic const char *eqe_type_str(u8 type) 114290650Shselasky{ 115290650Shselasky switch (type) { 116290650Shselasky case MLX5_EVENT_TYPE_COMP: 117290650Shselasky return "MLX5_EVENT_TYPE_COMP"; 118290650Shselasky case MLX5_EVENT_TYPE_PATH_MIG: 119290650Shselasky return "MLX5_EVENT_TYPE_PATH_MIG"; 120290650Shselasky case MLX5_EVENT_TYPE_COMM_EST: 121290650Shselasky return "MLX5_EVENT_TYPE_COMM_EST"; 122290650Shselasky case MLX5_EVENT_TYPE_SQ_DRAINED: 123290650Shselasky return "MLX5_EVENT_TYPE_SQ_DRAINED"; 124290650Shselasky case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 125290650Shselasky return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; 126290650Shselasky case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 127290650Shselasky return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; 128290650Shselasky case MLX5_EVENT_TYPE_CQ_ERROR: 129290650Shselasky return "MLX5_EVENT_TYPE_CQ_ERROR"; 130290650Shselasky case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 131290650Shselasky return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; 132290650Shselasky case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 133290650Shselasky return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; 134290650Shselasky case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 135290650Shselasky return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; 136290650Shselasky case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 137290650Shselasky return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; 138290650Shselasky case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 139290650Shselasky return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; 140290650Shselasky case MLX5_EVENT_TYPE_INTERNAL_ERROR: 141290650Shselasky return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; 142290650Shselasky case MLX5_EVENT_TYPE_PORT_CHANGE: 143290650Shselasky return "MLX5_EVENT_TYPE_PORT_CHANGE"; 144290650Shselasky case MLX5_EVENT_TYPE_GPIO_EVENT: 145290650Shselasky return "MLX5_EVENT_TYPE_GPIO_EVENT"; 146290650Shselasky case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 147290650Shselasky return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; 148290650Shselasky case MLX5_EVENT_TYPE_REMOTE_CONFIG: 149290650Shselasky return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; 150290650Shselasky case MLX5_EVENT_TYPE_DB_BF_CONGESTION: 151290650Shselasky return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; 152290650Shselasky case MLX5_EVENT_TYPE_STALL_EVENT: 153290650Shselasky return "MLX5_EVENT_TYPE_STALL_EVENT"; 154290650Shselasky case MLX5_EVENT_TYPE_CMD: 155290650Shselasky return "MLX5_EVENT_TYPE_CMD"; 156290650Shselasky case MLX5_EVENT_TYPE_PAGE_REQUEST: 157290650Shselasky return "MLX5_EVENT_TYPE_PAGE_REQUEST"; 158290650Shselasky case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 159290650Shselasky return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; 160306244Shselasky case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 161306244Shselasky return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT"; 162290650Shselasky default: 163290650Shselasky return "Unrecognized event"; 164290650Shselasky } 165290650Shselasky} 166290650Shselasky 167290650Shselaskystatic enum mlx5_dev_event port_subtype_event(u8 subtype) 168290650Shselasky{ 169290650Shselasky switch (subtype) { 170290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 171290650Shselasky return MLX5_DEV_EVENT_PORT_DOWN; 172290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 173290650Shselasky return MLX5_DEV_EVENT_PORT_UP; 174290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 175290650Shselasky return MLX5_DEV_EVENT_PORT_INITIALIZED; 176290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_LID: 177290650Shselasky return MLX5_DEV_EVENT_LID_CHANGE; 178290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 179290650Shselasky return MLX5_DEV_EVENT_PKEY_CHANGE; 180290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_GUID: 181290650Shselasky return MLX5_DEV_EVENT_GUID_CHANGE; 182290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 183290650Shselasky return MLX5_DEV_EVENT_CLIENT_REREG; 184290650Shselasky } 185290650Shselasky return -1; 186290650Shselasky} 187290650Shselasky 188306244Shselaskystatic enum mlx5_dev_event dcbx_subevent(u8 subtype) 189306244Shselasky{ 190306244Shselasky switch (subtype) { 191306244Shselasky case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 192306244Shselasky return MLX5_DEV_EVENT_ERROR_STATE_DCBX; 193306244Shselasky case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 194306244Shselasky return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE; 195306244Shselasky case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 196306244Shselasky return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE; 197306244Shselasky case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 198306244Shselasky return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE; 199306244Shselasky } 200306244Shselasky return -1; 201306244Shselasky} 202306244Shselasky 203290650Shselaskystatic void eq_update_ci(struct mlx5_eq *eq, int arm) 204290650Shselasky{ 205290650Shselasky __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); 206290650Shselasky u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); 207290650Shselasky __raw_writel((__force u32) cpu_to_be32(val), addr); 208290650Shselasky /* We still want ordering, just not swabbing, so add a barrier */ 209290650Shselasky mb(); 210290650Shselasky} 211290650Shselasky 212290650Shselaskystatic int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 213290650Shselasky{ 214290650Shselasky struct mlx5_eqe *eqe; 215290650Shselasky int eqes_found = 0; 216290650Shselasky int set_ci = 0; 217290650Shselasky u32 cqn; 218290650Shselasky u32 rsn; 219290650Shselasky u8 port; 220290650Shselasky 221290650Shselasky while ((eqe = next_eqe_sw(eq))) { 222290650Shselasky /* 223290650Shselasky * Make sure we read EQ entry contents after we've 224290650Shselasky * checked the ownership bit. 225290650Shselasky */ 226290650Shselasky rmb(); 227290650Shselasky 228290650Shselasky mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", 229290650Shselasky eq->eqn, eqe_type_str(eqe->type)); 230290650Shselasky switch (eqe->type) { 231290650Shselasky case MLX5_EVENT_TYPE_COMP: 232290650Shselasky cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; 233290650Shselasky mlx5_cq_completion(dev, cqn); 234290650Shselasky break; 235290650Shselasky 236290650Shselasky case MLX5_EVENT_TYPE_PATH_MIG: 237290650Shselasky case MLX5_EVENT_TYPE_COMM_EST: 238290650Shselasky case MLX5_EVENT_TYPE_SQ_DRAINED: 239290650Shselasky case MLX5_EVENT_TYPE_SRQ_LAST_WQE: 240290650Shselasky case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: 241290650Shselasky case MLX5_EVENT_TYPE_PATH_MIG_FAILED: 242290650Shselasky case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 243290650Shselasky case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: 244290650Shselasky rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 245290650Shselasky mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", 246290650Shselasky eqe_type_str(eqe->type), eqe->type, rsn); 247290650Shselasky mlx5_rsc_event(dev, rsn, eqe->type); 248290650Shselasky break; 249290650Shselasky 250290650Shselasky case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: 251290650Shselasky case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: 252290650Shselasky rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; 253290650Shselasky mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", 254290650Shselasky eqe_type_str(eqe->type), eqe->type, rsn); 255290650Shselasky mlx5_srq_event(dev, rsn, eqe->type); 256290650Shselasky break; 257290650Shselasky 258290650Shselasky case MLX5_EVENT_TYPE_CMD: 259290650Shselasky mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector)); 260290650Shselasky break; 261290650Shselasky 262290650Shselasky case MLX5_EVENT_TYPE_PORT_CHANGE: 263290650Shselasky port = (eqe->data.port.port >> 4) & 0xf; 264290650Shselasky switch (eqe->sub_type) { 265290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_DOWN: 266290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: 267290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_LID: 268290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_PKEY: 269290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_GUID: 270290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: 271290650Shselasky case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: 272290650Shselasky if (dev->event) 273290650Shselasky dev->event(dev, port_subtype_event(eqe->sub_type), 274290650Shselasky (unsigned long)port); 275290650Shselasky break; 276290650Shselasky default: 277290650Shselasky mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", 278290650Shselasky port, eqe->sub_type); 279290650Shselasky } 280290650Shselasky break; 281306244Shselasky 282306244Shselasky case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT: 283306244Shselasky port = (eqe->data.port.port >> 4) & 0xf; 284306244Shselasky switch (eqe->sub_type) { 285306244Shselasky case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX: 286306244Shselasky case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE: 287306244Shselasky case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE: 288306244Shselasky case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE: 289306244Shselasky if (dev->event) 290306244Shselasky dev->event(dev, 291306244Shselasky dcbx_subevent(eqe->sub_type), 292306244Shselasky 0); 293306244Shselasky break; 294306244Shselasky default: 295306244Shselasky mlx5_core_warn(dev, 296306244Shselasky "dcbx event with unrecognized subtype: port %d, sub_type %d\n", 297306244Shselasky port, eqe->sub_type); 298306244Shselasky } 299306244Shselasky break; 300306244Shselasky 301290650Shselasky case MLX5_EVENT_TYPE_CQ_ERROR: 302290650Shselasky cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; 303290650Shselasky mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n", 304290650Shselasky cqn, eqe->data.cq_err.syndrome); 305290650Shselasky mlx5_cq_event(dev, cqn, eqe->type); 306290650Shselasky break; 307290650Shselasky 308290650Shselasky case MLX5_EVENT_TYPE_PAGE_REQUEST: 309290650Shselasky { 310290650Shselasky u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); 311290650Shselasky s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); 312290650Shselasky 313290650Shselasky mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", 314290650Shselasky func_id, npages); 315290650Shselasky mlx5_core_req_pages_handler(dev, func_id, npages); 316290650Shselasky } 317290650Shselasky break; 318290650Shselasky 319290650Shselasky case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT: 320290650Shselasky mlx5_port_module_event(dev, eqe); 321290650Shselasky break; 322290650Shselasky 323290650Shselasky case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: 324290650Shselasky { 325290650Shselasky struct mlx5_eqe_vport_change *vc_eqe = 326290650Shselasky &eqe->data.vport_change; 327290650Shselasky u16 vport_num = be16_to_cpu(vc_eqe->vport_num); 328290650Shselasky 329290650Shselasky if (dev->event) 330290650Shselasky dev->event(dev, 331290650Shselasky MLX5_DEV_EVENT_VPORT_CHANGE, 332290650Shselasky (unsigned long)vport_num); 333290650Shselasky } 334290650Shselasky break; 335290650Shselasky 336290650Shselasky default: 337290650Shselasky mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", 338290650Shselasky eqe->type, eq->eqn); 339290650Shselasky break; 340290650Shselasky } 341290650Shselasky 342290650Shselasky ++eq->cons_index; 343290650Shselasky eqes_found = 1; 344290650Shselasky ++set_ci; 345290650Shselasky 346290650Shselasky /* The HCA will think the queue has overflowed if we 347290650Shselasky * don't tell it we've been processing events. We 348290650Shselasky * create our EQs with MLX5_NUM_SPARE_EQE extra 349290650Shselasky * entries, so we must update our consumer index at 350290650Shselasky * least that often. 351290650Shselasky */ 352290650Shselasky if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { 353290650Shselasky eq_update_ci(eq, 0); 354290650Shselasky set_ci = 0; 355290650Shselasky } 356290650Shselasky } 357290650Shselasky 358290650Shselasky eq_update_ci(eq, 1); 359290650Shselasky 360290650Shselasky return eqes_found; 361290650Shselasky} 362290650Shselasky 363290650Shselaskystatic irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr) 364290650Shselasky{ 365290650Shselasky struct mlx5_eq *eq = eq_ptr; 366290650Shselasky struct mlx5_core_dev *dev = eq->dev; 367290650Shselasky 368290650Shselasky mlx5_eq_int(dev, eq); 369290650Shselasky 370290650Shselasky /* MSI-X vectors always belong to us */ 371290650Shselasky return IRQ_HANDLED; 372290650Shselasky} 373290650Shselasky 374290650Shselaskystatic void init_eq_buf(struct mlx5_eq *eq) 375290650Shselasky{ 376290650Shselasky struct mlx5_eqe *eqe; 377290650Shselasky int i; 378290650Shselasky 379290650Shselasky for (i = 0; i < eq->nent; i++) { 380290650Shselasky eqe = get_eqe(eq, i); 381290650Shselasky eqe->owner = MLX5_EQE_OWNER_INIT_VAL; 382290650Shselasky } 383290650Shselasky} 384290650Shselasky 385290650Shselaskyint mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, 386290650Shselasky int nent, u64 mask, const char *name, struct mlx5_uar *uar) 387290650Shselasky{ 388290650Shselasky struct mlx5_priv *priv = &dev->priv; 389290650Shselasky struct mlx5_create_eq_mbox_in *in; 390290650Shselasky struct mlx5_create_eq_mbox_out out; 391290650Shselasky int err; 392290650Shselasky int inlen; 393290650Shselasky 394290650Shselasky eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); 395290650Shselasky err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE, 396290650Shselasky &eq->buf); 397290650Shselasky if (err) 398290650Shselasky return err; 399290650Shselasky 400290650Shselasky init_eq_buf(eq); 401290650Shselasky 402290650Shselasky inlen = sizeof(*in) + sizeof(in->pas[0]) * eq->buf.npages; 403290650Shselasky in = mlx5_vzalloc(inlen); 404290650Shselasky if (!in) { 405290650Shselasky err = -ENOMEM; 406290650Shselasky goto err_buf; 407290650Shselasky } 408290650Shselasky memset(&out, 0, sizeof(out)); 409290650Shselasky 410290650Shselasky mlx5_fill_page_array(&eq->buf, in->pas); 411290650Shselasky 412290650Shselasky in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); 413290650Shselasky in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); 414290650Shselasky in->ctx.intr = vecidx; 415290650Shselasky in->ctx.log_page_size = eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT; 416290650Shselasky in->events_mask = cpu_to_be64(mask); 417290650Shselasky 418290650Shselasky err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 419290650Shselasky if (err) 420290650Shselasky goto err_in; 421290650Shselasky 422290650Shselasky if (out.hdr.status) { 423290650Shselasky err = mlx5_cmd_status_to_err(&out.hdr); 424290650Shselasky goto err_in; 425290650Shselasky } 426290650Shselasky 427290650Shselasky eq->eqn = out.eq_number; 428290650Shselasky eq->irqn = vecidx; 429290650Shselasky eq->dev = dev; 430290650Shselasky eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET; 431290650Shselasky snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", 432290650Shselasky name, pci_name(dev->pdev)); 433290650Shselasky err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 434290650Shselasky priv->irq_info[vecidx].name, eq); 435290650Shselasky if (err) 436290650Shselasky goto err_eq; 437292195Shselasky#ifdef RSS 438292195Shselasky if (vecidx >= MLX5_EQ_VEC_COMP_BASE) { 439292195Shselasky u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE; 440292195Shselasky err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector, 441292195Shselasky rss_getcpu(bucket % rss_getnumbuckets())); 442292195Shselasky if (err) 443292195Shselasky goto err_irq; 444292195Shselasky } 445292195Shselasky#else 446292195Shselasky if (0) 447292195Shselasky goto err_irq; 448292195Shselasky#endif 449290650Shselasky 450290650Shselasky 451290650Shselasky /* EQs are created in ARMED state 452290650Shselasky */ 453290650Shselasky eq_update_ci(eq, 1); 454290650Shselasky 455290650Shselasky kvfree(in); 456290650Shselasky return 0; 457290650Shselasky 458292195Shselaskyerr_irq: 459292195Shselasky free_irq(priv->msix_arr[vecidx].vector, eq); 460290650Shselasky 461290650Shselaskyerr_eq: 462290650Shselasky mlx5_cmd_destroy_eq(dev, eq->eqn); 463290650Shselasky 464290650Shselaskyerr_in: 465290650Shselasky kvfree(in); 466290650Shselasky 467290650Shselaskyerr_buf: 468290650Shselasky mlx5_buf_free(dev, &eq->buf); 469290650Shselasky return err; 470290650Shselasky} 471290650ShselaskyEXPORT_SYMBOL_GPL(mlx5_create_map_eq); 472290650Shselasky 473290650Shselaskyint mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) 474290650Shselasky{ 475290650Shselasky int err; 476290650Shselasky 477290650Shselasky free_irq(dev->priv.msix_arr[eq->irqn].vector, eq); 478290650Shselasky err = mlx5_cmd_destroy_eq(dev, eq->eqn); 479290650Shselasky if (err) 480290650Shselasky mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", 481290650Shselasky eq->eqn); 482290650Shselasky mlx5_buf_free(dev, &eq->buf); 483290650Shselasky 484290650Shselasky return err; 485290650Shselasky} 486290650ShselaskyEXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); 487290650Shselasky 488290650Shselaskyint mlx5_eq_init(struct mlx5_core_dev *dev) 489290650Shselasky{ 490290650Shselasky int err; 491290650Shselasky 492290650Shselasky spin_lock_init(&dev->priv.eq_table.lock); 493290650Shselasky 494290650Shselasky err = 0; 495290650Shselasky 496290650Shselasky return err; 497290650Shselasky} 498290650Shselasky 499290650Shselasky 500290650Shselaskyvoid mlx5_eq_cleanup(struct mlx5_core_dev *dev) 501290650Shselasky{ 502290650Shselasky} 503290650Shselasky 504290650Shselaskyint mlx5_start_eqs(struct mlx5_core_dev *dev) 505290650Shselasky{ 506290650Shselasky struct mlx5_eq_table *table = &dev->priv.eq_table; 507290650Shselasky u32 async_event_mask = MLX5_ASYNC_EVENT_MASK; 508290650Shselasky int err; 509290650Shselasky 510290650Shselasky if (MLX5_CAP_GEN(dev, port_module_event)) 511290650Shselasky async_event_mask |= (1ull << 512290650Shselasky MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT); 513290650Shselasky 514292196Shselasky if (MLX5_CAP_GEN(dev, nic_vport_change_event)) 515292196Shselasky async_event_mask |= (1ull << 516292196Shselasky MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); 517292196Shselasky 518306244Shselasky if (MLX5_CAP_GEN(dev, dcbx)) 519306244Shselasky async_event_mask |= (1ull << 520306244Shselasky MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT); 521306244Shselasky 522290650Shselasky err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, 523290650Shselasky MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, 524290650Shselasky "mlx5_cmd_eq", &dev->priv.uuari.uars[0]); 525290650Shselasky if (err) { 526290650Shselasky mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); 527290650Shselasky return err; 528290650Shselasky } 529290650Shselasky 530290650Shselasky mlx5_cmd_use_events(dev); 531290650Shselasky 532290650Shselasky err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, 533290650Shselasky MLX5_NUM_ASYNC_EQE, async_event_mask, 534290650Shselasky "mlx5_async_eq", &dev->priv.uuari.uars[0]); 535290650Shselasky if (err) { 536290650Shselasky mlx5_core_warn(dev, "failed to create async EQ %d\n", err); 537290650Shselasky goto err1; 538290650Shselasky } 539290650Shselasky 540290650Shselasky err = mlx5_create_map_eq(dev, &table->pages_eq, 541290650Shselasky MLX5_EQ_VEC_PAGES, 542290650Shselasky /* TODO: sriov max_vf + */ 1, 543290650Shselasky 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", 544290650Shselasky &dev->priv.uuari.uars[0]); 545290650Shselasky if (err) { 546290650Shselasky mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); 547290650Shselasky goto err2; 548290650Shselasky } 549290650Shselasky 550290650Shselasky return err; 551290650Shselasky 552290650Shselaskyerr2: 553290650Shselasky mlx5_destroy_unmap_eq(dev, &table->async_eq); 554290650Shselasky 555290650Shselaskyerr1: 556290650Shselasky mlx5_cmd_use_polling(dev); 557290650Shselasky mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 558290650Shselasky return err; 559290650Shselasky} 560290650Shselasky 561290650Shselaskyint mlx5_stop_eqs(struct mlx5_core_dev *dev) 562290650Shselasky{ 563290650Shselasky struct mlx5_eq_table *table = &dev->priv.eq_table; 564290650Shselasky int err; 565290650Shselasky 566290650Shselasky err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); 567290650Shselasky if (err) 568290650Shselasky return err; 569290650Shselasky 570290650Shselasky mlx5_destroy_unmap_eq(dev, &table->async_eq); 571290650Shselasky mlx5_cmd_use_polling(dev); 572290650Shselasky 573290650Shselasky err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); 574290650Shselasky if (err) 575290650Shselasky mlx5_cmd_use_events(dev); 576290650Shselasky 577290650Shselasky return err; 578290650Shselasky} 579290650Shselasky 580290650Shselaskyint mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 581290650Shselasky struct mlx5_query_eq_mbox_out *out, int outlen) 582290650Shselasky{ 583290650Shselasky struct mlx5_query_eq_mbox_in in; 584290650Shselasky int err; 585290650Shselasky 586290650Shselasky memset(&in, 0, sizeof(in)); 587290650Shselasky memset(out, 0, outlen); 588290650Shselasky in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_EQ); 589290650Shselasky in.eqn = eq->eqn; 590290650Shselasky err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); 591290650Shselasky if (err) 592290650Shselasky return err; 593290650Shselasky 594290650Shselasky if (out->hdr.status) 595290650Shselasky err = mlx5_cmd_status_to_err(&out->hdr); 596290650Shselasky 597290650Shselasky return err; 598290650Shselasky} 599290650Shselasky 600290650ShselaskyEXPORT_SYMBOL_GPL(mlx5_core_eq_query); 601290650Shselasky 602290650Shselaskystatic const char *mlx5_port_module_event_error_type_to_string(u8 error_type) 603290650Shselasky{ 604290650Shselasky switch (error_type) { 605290650Shselasky case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED: 606290650Shselasky return "Power Budget Exceeded"; 607290650Shselasky case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE: 608290650Shselasky return "Long Range for non MLNX cable/module"; 609290650Shselasky case MLX5_MODULE_EVENT_ERROR_BUS_STUCK: 610290650Shselasky return "Bus stuck(I2C or data shorted)"; 611290650Shselasky case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT: 612290650Shselasky return "No EEPROM/retry timeout"; 613290650Shselasky case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST: 614290650Shselasky return "Enforce part number list"; 615290650Shselasky case MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER: 616290650Shselasky return "Unknown identifier"; 617290650Shselasky case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE: 618290650Shselasky return "High Temperature"; 619306244Shselasky case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED: 620306244Shselasky return "Cable is shorted"; 621339713Sslavash case MLX5_MODULE_EVENT_ERROR_PCIE_SYSTEM_POWER_SLOT_EXCEEDED: 622339713Sslavash return "One or more network ports have been powered " 623339713Sslavash "down due to insufficient/unadvertised power on " 624339713Sslavash "the PCIe slot. Please refer to the card's user " 625339713Sslavash "manual for power specifications or contact " 626339713Sslavash "Mellanox support."; 627290650Shselasky 628290650Shselasky default: 629290650Shselasky return "Unknown error type"; 630290650Shselasky } 631290650Shselasky} 632290650Shselasky 633299258Shselaskyunsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num) 634299258Shselasky{ 635299258Shselasky if (module_num < 0 || module_num >= MLX5_MAX_PORTS) 636299258Shselasky return 0; /* undefined */ 637299258Shselasky return dev->module_status[module_num]; 638299258Shselasky} 639299258Shselasky 640290650Shselaskystatic void mlx5_port_module_event(struct mlx5_core_dev *dev, 641290650Shselasky struct mlx5_eqe *eqe) 642290650Shselasky{ 643290650Shselasky unsigned int module_num; 644290650Shselasky unsigned int module_status; 645290650Shselasky unsigned int error_type; 646290650Shselasky struct mlx5_eqe_port_module_event *module_event_eqe; 647290650Shselasky struct pci_dev *pdev = dev->pdev; 648290650Shselasky 649290650Shselasky module_event_eqe = &eqe->data.port_module_event; 650290650Shselasky 651290650Shselasky module_num = (unsigned int)module_event_eqe->module; 652290650Shselasky module_status = (unsigned int)module_event_eqe->module_status & 653290650Shselasky PORT_MODULE_EVENT_MODULE_STATUS_MASK; 654290650Shselasky error_type = (unsigned int)module_event_eqe->error_type & 655290650Shselasky PORT_MODULE_EVENT_ERROR_TYPE_MASK; 656290650Shselasky 657290650Shselasky switch (module_status) { 658290650Shselasky case MLX5_MODULE_STATUS_PLUGGED: 659306244Shselasky device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: plugged\n", module_num); 660290650Shselasky break; 661290650Shselasky 662290650Shselasky case MLX5_MODULE_STATUS_UNPLUGGED: 663306244Shselasky device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: unplugged\n", module_num); 664290650Shselasky break; 665290650Shselasky 666290650Shselasky case MLX5_MODULE_STATUS_ERROR: 667306244Shselasky device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, status: error, %s\n", module_num, mlx5_port_module_event_error_type_to_string(error_type)); 668290650Shselasky break; 669290650Shselasky 670290650Shselasky default: 671306244Shselasky device_printf((&pdev->dev)->bsddev, "INFO: ""Module %u, unknown status\n", module_num); 672290650Shselasky } 673299258Shselasky /* store module status */ 674299258Shselasky if (module_num < MLX5_MAX_PORTS) 675299258Shselasky dev->module_status[module_num] = module_status; 676290650Shselasky} 677290650Shselasky 678