mlx5_eq.c revision 361414
1/*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_eq.c 361414 2020-05-23 12:00:46Z kib $
26 */
27
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <dev/mlx5/port.h>
31#include <dev/mlx5/mlx5_ifc.h>
32#include <dev/mlx5/mlx5_fpga/core.h>
33#include "mlx5_core.h"
34#include "eswitch.h"
35
36#include "opt_rss.h"
37
38#ifdef  RSS
39#include <net/rss_config.h>
40#include <netinet/in_rss.h>
41#endif
42
43enum {
44	MLX5_EQE_SIZE		= sizeof(struct mlx5_eqe),
45	MLX5_EQE_OWNER_INIT_VAL	= 0x1,
46};
47
48enum {
49	MLX5_NUM_SPARE_EQE	= 0x80,
50	MLX5_NUM_ASYNC_EQE	= 0x100,
51	MLX5_NUM_CMD_EQE	= 32,
52};
53
54enum {
55	MLX5_EQ_DOORBEL_OFFSET	= 0x40,
56};
57
58#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)	    | \
59			       (1ull << MLX5_EVENT_TYPE_COMM_EST)	    | \
60			       (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)	    | \
61			       (1ull << MLX5_EVENT_TYPE_CQ_ERROR)	    | \
62			       (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)	    | \
63			       (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
64			       (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
65			       (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
66			       (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)	    | \
67			       (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
68			       (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)	    | \
69			       (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)	    | \
70			       (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE))
71
72struct map_eq_in {
73	u64	mask;
74	u32	reserved;
75	u32	unmap_eqn;
76};
77
78struct cre_des_eq {
79	u8	reserved[15];
80	u8	eqn;
81};
82
83/*Function prototype*/
84static void mlx5_port_module_event(struct mlx5_core_dev *dev,
85				   struct mlx5_eqe *eqe);
86static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
87						 struct mlx5_eqe *eqe);
88
89static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
90{
91	u32 in[MLX5_ST_SZ_DW(destroy_eq_in)] = {0};
92	u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
93
94	MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
95	MLX5_SET(destroy_eq_in, in, eq_number, eqn);
96
97	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
98}
99
100static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
101{
102	return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
103}
104
105static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
106{
107	struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
108
109	return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
110}
111
112static const char *eqe_type_str(u8 type)
113{
114	switch (type) {
115	case MLX5_EVENT_TYPE_COMP:
116		return "MLX5_EVENT_TYPE_COMP";
117	case MLX5_EVENT_TYPE_PATH_MIG:
118		return "MLX5_EVENT_TYPE_PATH_MIG";
119	case MLX5_EVENT_TYPE_COMM_EST:
120		return "MLX5_EVENT_TYPE_COMM_EST";
121	case MLX5_EVENT_TYPE_SQ_DRAINED:
122		return "MLX5_EVENT_TYPE_SQ_DRAINED";
123	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
124		return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
125	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
126		return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
127	case MLX5_EVENT_TYPE_CQ_ERROR:
128		return "MLX5_EVENT_TYPE_CQ_ERROR";
129	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
130		return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
131	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
132		return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
133	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
134		return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
135	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
136		return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
137	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
138		return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
139	case MLX5_EVENT_TYPE_INTERNAL_ERROR:
140		return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
141	case MLX5_EVENT_TYPE_PORT_CHANGE:
142		return "MLX5_EVENT_TYPE_PORT_CHANGE";
143	case MLX5_EVENT_TYPE_GPIO_EVENT:
144		return "MLX5_EVENT_TYPE_GPIO_EVENT";
145	case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
146		return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
147	case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
148		return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT";
149	case MLX5_EVENT_TYPE_REMOTE_CONFIG:
150		return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
151	case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
152		return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
153	case MLX5_EVENT_TYPE_STALL_EVENT:
154		return "MLX5_EVENT_TYPE_STALL_EVENT";
155	case MLX5_EVENT_TYPE_CMD:
156		return "MLX5_EVENT_TYPE_CMD";
157	case MLX5_EVENT_TYPE_PAGE_REQUEST:
158		return "MLX5_EVENT_TYPE_PAGE_REQUEST";
159	case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
160		return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE";
161	case MLX5_EVENT_TYPE_FPGA_ERROR:
162		return "MLX5_EVENT_TYPE_FPGA_ERROR";
163	case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
164		return "MLX5_EVENT_TYPE_FPGA_QP_ERROR";
165	case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
166		return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT";
167	case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
168		return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT";
169	default:
170		return "Unrecognized event";
171	}
172}
173
174static enum mlx5_dev_event port_subtype_event(u8 subtype)
175{
176	switch (subtype) {
177	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
178		return MLX5_DEV_EVENT_PORT_DOWN;
179	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
180		return MLX5_DEV_EVENT_PORT_UP;
181	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
182		return MLX5_DEV_EVENT_PORT_INITIALIZED;
183	case MLX5_PORT_CHANGE_SUBTYPE_LID:
184		return MLX5_DEV_EVENT_LID_CHANGE;
185	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
186		return MLX5_DEV_EVENT_PKEY_CHANGE;
187	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
188		return MLX5_DEV_EVENT_GUID_CHANGE;
189	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
190		return MLX5_DEV_EVENT_CLIENT_REREG;
191	}
192	return -1;
193}
194
195static enum mlx5_dev_event dcbx_subevent(u8 subtype)
196{
197	switch (subtype) {
198	case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
199		return MLX5_DEV_EVENT_ERROR_STATE_DCBX;
200	case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
201		return MLX5_DEV_EVENT_REMOTE_CONFIG_CHANGE;
202	case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
203		return MLX5_DEV_EVENT_LOCAL_OPER_CHANGE;
204	case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
205		return MLX5_DEV_EVENT_REMOTE_CONFIG_APPLICATION_PRIORITY_CHANGE;
206	}
207	return -1;
208}
209
210static void eq_update_ci(struct mlx5_eq *eq, int arm)
211{
212	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
213	u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
214	__raw_writel((__force u32) cpu_to_be32(val), addr);
215	/* We still want ordering, just not swabbing, so add a barrier */
216	mb();
217}
218
219static void
220mlx5_temp_warning_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
221{
222
223	mlx5_core_warn(dev,
224	    "High temperature on sensors with bit set %#jx %#jx\n",
225	    (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb),
226	    (uintmax_t)be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb));
227}
228
229static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
230{
231	struct mlx5_eqe *eqe;
232	int eqes_found = 0;
233	int set_ci = 0;
234	u32 cqn;
235	u32 rsn;
236	u8 port;
237
238	while ((eqe = next_eqe_sw(eq))) {
239		/*
240		 * Make sure we read EQ entry contents after we've
241		 * checked the ownership bit.
242		 */
243		rmb();
244
245		mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
246			      eq->eqn, eqe_type_str(eqe->type));
247		switch (eqe->type) {
248		case MLX5_EVENT_TYPE_COMP:
249			cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
250			mlx5_cq_completion(dev, cqn);
251			break;
252
253		case MLX5_EVENT_TYPE_PATH_MIG:
254		case MLX5_EVENT_TYPE_COMM_EST:
255		case MLX5_EVENT_TYPE_SQ_DRAINED:
256		case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
257		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
258		case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
259		case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
260		case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
261			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
262			mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
263				      eqe_type_str(eqe->type), eqe->type, rsn);
264			mlx5_rsc_event(dev, rsn, eqe->type);
265			break;
266
267		case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
268		case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
269			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
270			mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
271				      eqe_type_str(eqe->type), eqe->type, rsn);
272			mlx5_srq_event(dev, rsn, eqe->type);
273			break;
274
275		case MLX5_EVENT_TYPE_CMD:
276			if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR) {
277				mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector),
278				    MLX5_CMD_MODE_EVENTS);
279			}
280			break;
281
282		case MLX5_EVENT_TYPE_PORT_CHANGE:
283			port = (eqe->data.port.port >> 4) & 0xf;
284			switch (eqe->sub_type) {
285			case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
286			case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
287			case MLX5_PORT_CHANGE_SUBTYPE_LID:
288			case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
289			case MLX5_PORT_CHANGE_SUBTYPE_GUID:
290			case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
291			case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
292				if (dev->event)
293					dev->event(dev, port_subtype_event(eqe->sub_type),
294						   (unsigned long)port);
295				break;
296			default:
297				mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
298					       port, eqe->sub_type);
299			}
300			break;
301
302		case MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT:
303			port = (eqe->data.port.port >> 4) & 0xf;
304			switch (eqe->sub_type) {
305			case MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX:
306			case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE:
307			case MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE:
308			case MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE:
309				if (dev->event)
310					dev->event(dev,
311						   dcbx_subevent(eqe->sub_type),
312						   0);
313				break;
314			default:
315				mlx5_core_warn(dev,
316					       "dcbx event with unrecognized subtype: port %d, sub_type %d\n",
317					       port, eqe->sub_type);
318			}
319			break;
320
321		case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
322			mlx5_port_general_notification_event(dev, eqe);
323			break;
324
325		case MLX5_EVENT_TYPE_CQ_ERROR:
326			cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
327			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
328				       cqn, eqe->data.cq_err.syndrome);
329			mlx5_cq_event(dev, cqn, eqe->type);
330			break;
331
332		case MLX5_EVENT_TYPE_PAGE_REQUEST:
333			{
334				u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
335				s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
336
337				mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
338					      func_id, npages);
339				mlx5_core_req_pages_handler(dev, func_id, npages);
340			}
341			break;
342
343		case MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT:
344			mlx5_port_module_event(dev, eqe);
345			break;
346
347		case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
348			{
349				struct mlx5_eqe_vport_change *vc_eqe =
350						&eqe->data.vport_change;
351				u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
352
353				if (dev->event)
354					dev->event(dev,
355					     MLX5_DEV_EVENT_VPORT_CHANGE,
356					     (unsigned long)vport_num);
357			}
358			if (dev->priv.eswitch != NULL)
359				mlx5_eswitch_vport_event(dev->priv.eswitch,
360				    eqe);
361			break;
362
363		case MLX5_EVENT_TYPE_FPGA_ERROR:
364		case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
365			mlx5_fpga_event(dev, eqe->type, &eqe->data.raw);
366			break;
367		case MLX5_EVENT_TYPE_TEMP_WARN_EVENT:
368			mlx5_temp_warning_event(dev, eqe);
369			break;
370
371		default:
372			mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
373				       eqe->type, eq->eqn);
374			break;
375		}
376
377		++eq->cons_index;
378		eqes_found = 1;
379		++set_ci;
380
381		/* The HCA will think the queue has overflowed if we
382		 * don't tell it we've been processing events.  We
383		 * create our EQs with MLX5_NUM_SPARE_EQE extra
384		 * entries, so we must update our consumer index at
385		 * least that often.
386		 */
387		if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
388			eq_update_ci(eq, 0);
389			set_ci = 0;
390		}
391	}
392
393	eq_update_ci(eq, 1);
394
395	return eqes_found;
396}
397
398static irqreturn_t mlx5_msix_handler(int irq, void *eq_ptr)
399{
400	struct mlx5_eq *eq = eq_ptr;
401	struct mlx5_core_dev *dev = eq->dev;
402
403	/* check if IRQs are not disabled */
404	if (likely(dev->priv.disable_irqs == 0))
405		mlx5_eq_int(dev, eq);
406
407	/* MSI-X vectors always belong to us */
408	return IRQ_HANDLED;
409}
410
411static void init_eq_buf(struct mlx5_eq *eq)
412{
413	struct mlx5_eqe *eqe;
414	int i;
415
416	for (i = 0; i < eq->nent; i++) {
417		eqe = get_eqe(eq, i);
418		eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
419	}
420}
421
422int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
423		       int nent, u64 mask, struct mlx5_uar *uar)
424{
425	u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
426	struct mlx5_priv *priv = &dev->priv;
427	__be64 *pas;
428	void *eqc;
429	int inlen;
430	u32 *in;
431	int err;
432
433	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
434	eq->cons_index = 0;
435	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
436			     &eq->buf);
437	if (err)
438		return err;
439
440	init_eq_buf(eq);
441
442	inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
443		MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
444	in = mlx5_vzalloc(inlen);
445	if (!in) {
446		err = -ENOMEM;
447		goto err_buf;
448	}
449
450	pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
451	mlx5_fill_page_array(&eq->buf, pas);
452
453	MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
454	MLX5_SET64(create_eq_in, in, event_bitmask, mask);
455
456	eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
457	MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
458	MLX5_SET(eqc, eqc, uar_page, uar->index);
459	MLX5_SET(eqc, eqc, intr, vecidx);
460	MLX5_SET(eqc, eqc, log_page_size,
461		 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
462
463	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
464	if (err)
465		goto err_in;
466
467	eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
468	eq->irqn = vecidx;
469	eq->dev = dev;
470	eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
471	err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
472			  "mlx5_core", eq);
473	if (err)
474		goto err_eq;
475#ifdef RSS
476	if (vecidx >= MLX5_EQ_VEC_COMP_BASE) {
477		u8 bucket = vecidx - MLX5_EQ_VEC_COMP_BASE;
478		err = bind_irq_to_cpu(priv->msix_arr[vecidx].vector,
479				      rss_getcpu(bucket % rss_getnumbuckets()));
480		if (err)
481			goto err_irq;
482	}
483#else
484	if (0)
485		goto err_irq;
486#endif
487
488
489	/* EQs are created in ARMED state
490	 */
491	eq_update_ci(eq, 1);
492
493	kvfree(in);
494	return 0;
495
496err_irq:
497	free_irq(priv->msix_arr[vecidx].vector, eq);
498
499err_eq:
500	mlx5_cmd_destroy_eq(dev, eq->eqn);
501
502err_in:
503	kvfree(in);
504
505err_buf:
506	mlx5_buf_free(dev, &eq->buf);
507	return err;
508}
509EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
510
511int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
512{
513	int err;
514
515	free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
516	err = mlx5_cmd_destroy_eq(dev, eq->eqn);
517	if (err)
518		mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
519			       eq->eqn);
520	mlx5_buf_free(dev, &eq->buf);
521
522	return err;
523}
524EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
525
526int mlx5_eq_init(struct mlx5_core_dev *dev)
527{
528	int err;
529
530	spin_lock_init(&dev->priv.eq_table.lock);
531
532	err = 0;
533
534	return err;
535}
536
537
538void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
539{
540}
541
542int mlx5_start_eqs(struct mlx5_core_dev *dev)
543{
544	struct mlx5_eq_table *table = &dev->priv.eq_table;
545	u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
546	int err;
547
548	if (MLX5_CAP_GEN(dev, port_module_event))
549		async_event_mask |= (1ull <<
550				     MLX5_EVENT_TYPE_CODING_PORT_MODULE_EVENT);
551
552	if (MLX5_CAP_GEN(dev, nic_vport_change_event))
553		async_event_mask |= (1ull <<
554				     MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
555
556	if (MLX5_CAP_GEN(dev, dcbx))
557		async_event_mask |= (1ull <<
558				     MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT);
559
560	if (MLX5_CAP_GEN(dev, fpga))
561		async_event_mask |= (1ull << MLX5_EVENT_TYPE_FPGA_ERROR) |
562				    (1ull << MLX5_EVENT_TYPE_FPGA_QP_ERROR);
563
564	if (MLX5_CAP_GEN(dev, temp_warn_event))
565		async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT);
566
567	if (MLX5_CAP_GEN(dev, general_notification_event)) {
568		async_event_mask |= (1ull <<
569		    MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT);
570	}
571
572	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
573				 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
574				 &dev->priv.uuari.uars[0]);
575	if (err) {
576		mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
577		return err;
578	}
579
580	mlx5_cmd_use_events(dev);
581
582	err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
583				 MLX5_NUM_ASYNC_EQE, async_event_mask,
584				 &dev->priv.uuari.uars[0]);
585	if (err) {
586		mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
587		goto err1;
588	}
589
590	err = mlx5_create_map_eq(dev, &table->pages_eq,
591				 MLX5_EQ_VEC_PAGES,
592				 /* TODO: sriov max_vf + */ 1,
593				 1 << MLX5_EVENT_TYPE_PAGE_REQUEST,
594				 &dev->priv.uuari.uars[0]);
595	if (err) {
596		mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
597		goto err2;
598	}
599
600	return err;
601
602err2:
603	mlx5_destroy_unmap_eq(dev, &table->async_eq);
604
605err1:
606	mlx5_cmd_use_polling(dev);
607	mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
608	return err;
609}
610
611int mlx5_stop_eqs(struct mlx5_core_dev *dev)
612{
613	struct mlx5_eq_table *table = &dev->priv.eq_table;
614	int err;
615
616	err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
617	if (err)
618		return err;
619
620	mlx5_destroy_unmap_eq(dev, &table->async_eq);
621	mlx5_cmd_use_polling(dev);
622
623	err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
624	if (err)
625		mlx5_cmd_use_events(dev);
626
627	return err;
628}
629
630int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
631		       u32 *out, int outlen)
632{
633	u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
634
635	memset(out, 0, outlen);
636	MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
637	MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
638
639	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
640}
641EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
642
643static const char *mlx5_port_module_event_error_type_to_string(u8 error_type)
644{
645	switch (error_type) {
646	case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED:
647		return "Power budget exceeded";
648	case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE:
649		return "Long Range for non MLNX cable";
650	case MLX5_MODULE_EVENT_ERROR_BUS_STUCK:
651		return "Bus stuck(I2C or data shorted)";
652	case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT:
653		return "No EEPROM/retry timeout";
654	case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST:
655		return "Enforce part number list";
656	case MLX5_MODULE_EVENT_ERROR_UNSUPPORTED_CABLE:
657		return "Unknown identifier";
658	case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE:
659		return "High Temperature";
660	case MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED:
661		return "Bad or shorted cable/module";
662	case MLX5_MODULE_EVENT_ERROR_PMD_TYPE_NOT_ENABLED:
663		return "PMD type is not enabled";
664	case MLX5_MODULE_EVENT_ERROR_LASTER_TEC_FAILURE:
665		return "Laster_TEC_failure";
666	case MLX5_MODULE_EVENT_ERROR_HIGH_CURRENT:
667		return "High_current";
668	case MLX5_MODULE_EVENT_ERROR_HIGH_VOLTAGE:
669		return "High_voltage";
670	case MLX5_MODULE_EVENT_ERROR_PCIE_SYS_POWER_SLOT_EXCEEDED:
671		return "pcie_system_power_slot_Exceeded";
672	case MLX5_MODULE_EVENT_ERROR_HIGH_POWER:
673		return "High_power";
674	case MLX5_MODULE_EVENT_ERROR_MODULE_STATE_MACHINE_FAULT:
675		return "Module_state_machine_fault";
676	default:
677		return "Unknown error type";
678	}
679}
680
681unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num)
682{
683	if (module_num < 0 || module_num >= MLX5_MAX_PORTS)
684		return 0;		/* undefined */
685	return dev->module_status[module_num];
686}
687
688static void mlx5_port_module_event(struct mlx5_core_dev *dev,
689				   struct mlx5_eqe *eqe)
690{
691	unsigned int module_num;
692	unsigned int module_status;
693	unsigned int error_type;
694	struct mlx5_eqe_port_module_event *module_event_eqe;
695
696	module_event_eqe = &eqe->data.port_module_event;
697
698	module_num = (unsigned int)module_event_eqe->module;
699	module_status = (unsigned int)module_event_eqe->module_status &
700	    PORT_MODULE_EVENT_MODULE_STATUS_MASK;
701	error_type = (unsigned int)module_event_eqe->error_type &
702	    PORT_MODULE_EVENT_ERROR_TYPE_MASK;
703
704	if (module_status < MLX5_MODULE_STATUS_NUM)
705		dev->priv.pme_stats.status_counters[module_status]++;
706	switch (module_status) {
707	case MLX5_MODULE_STATUS_PLUGGED_ENABLED:
708		mlx5_core_info(dev,
709		    "Module %u, status: plugged and enabled\n",
710		    module_num);
711		break;
712
713	case MLX5_MODULE_STATUS_UNPLUGGED:
714		mlx5_core_info(dev,
715		    "Module %u, status: unplugged\n", module_num);
716		break;
717
718	case MLX5_MODULE_STATUS_ERROR:
719		mlx5_core_err(dev,
720		    "Module %u, status: error, %s (%d)\n",
721		    module_num,
722		    mlx5_port_module_event_error_type_to_string(error_type),
723		    error_type);
724		if (error_type < MLX5_MODULE_EVENT_ERROR_NUM)
725			dev->priv.pme_stats.error_counters[error_type]++;
726		break;
727
728	default:
729		mlx5_core_info(dev,
730		    "Module %u, unknown status %d\n", module_num, module_status);
731	}
732	/* store module status */
733	if (module_num < MLX5_MAX_PORTS)
734		dev->module_status[module_num] = module_status;
735}
736
737static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
738						 struct mlx5_eqe *eqe)
739{
740	u8 port = (eqe->data.port.port >> 4) & 0xf;
741	u32 rqn;
742	struct mlx5_eqe_general_notification_event *general_event;
743
744	switch (eqe->sub_type) {
745	case MLX5_GEN_EVENT_SUBTYPE_DELAY_DROP_TIMEOUT:
746		general_event = &eqe->data.general_notifications;
747		rqn = be32_to_cpu(general_event->rq_user_index_delay_drop) &
748			  0xffffff;
749		break;
750	case MLX5_GEN_EVENT_SUBTYPE_PCI_POWER_CHANGE_EVENT:
751		mlx5_trigger_health_watchdog(dev);
752		break;
753	default:
754		mlx5_core_warn(dev,
755			       "general event with unrecognized subtype: port %d, sub_type %d\n",
756			       port, eqe->sub_type);
757		break;
758	}
759}
760
761void
762mlx5_disable_interrupts(struct mlx5_core_dev *dev)
763{
764	int nvec = dev->priv.eq_table.num_comp_vectors + MLX5_EQ_VEC_COMP_BASE;
765	int x;
766
767	for (x = 0; x != nvec; x++)
768		disable_irq(dev->priv.msix_arr[x].vector);
769}
770
771void
772mlx5_poll_interrupts(struct mlx5_core_dev *dev)
773{
774	struct mlx5_eq *eq;
775
776	if (unlikely(dev->priv.disable_irqs != 0))
777		return;
778
779	mlx5_eq_int(dev, &dev->priv.eq_table.cmd_eq);
780	mlx5_eq_int(dev, &dev->priv.eq_table.async_eq);
781	mlx5_eq_int(dev, &dev->priv.eq_table.pages_eq);
782
783	list_for_each_entry(eq, &dev->priv.eq_table.comp_eqs_list, list)
784		mlx5_eq_int(dev, eq);
785}
786