1/*-
2 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#include "opt_rss.h"
27#include "opt_ratelimit.h"
28
29#include <linux/printk.h>
30
31#include <dev/mlx5/mlx5_en/en.h>
32
33struct mlx5_cqe64 *
34mlx5e_get_cqe(struct mlx5e_cq *cq)
35{
36	struct mlx5_cqe64 *cqe;
37
38	cqe = mlx5_cqwq_get_wqe(&cq->wq, mlx5_cqwq_get_ci(&cq->wq));
39
40	if ((cqe->op_own ^ mlx5_cqwq_get_wrap_cnt(&cq->wq)) & MLX5_CQE_OWNER_MASK)
41		return (NULL);
42
43	/* ensure cqe content is read after cqe ownership bit */
44	atomic_thread_fence_acq();
45
46	return (cqe);
47}
48
49void
50mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event)
51{
52	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
53
54	mlx5_en_err(cq->priv->ifp, "cqn=0x%.6x event=0x%.2x\n",
55	    mcq->cqn, event);
56}
57
58void
59mlx5e_dump_err_cqe(struct mlx5e_cq *cq, u32 qn, const struct mlx5_err_cqe *err_cqe)
60{
61	u32 ci;
62
63	/* Don't print flushed in error syndromes. */
64	if (err_cqe->vendor_err_synd == 0xf9 && err_cqe->syndrome == 0x05)
65		return;
66	/* Don't print when the queue is set to error state by software. */
67	if (err_cqe->vendor_err_synd == 0xf5 && err_cqe->syndrome == 0x05)
68		return;
69
70	ci = (cq->wq.cc - 1) & cq->wq.sz_m1;
71
72	mlx5_en_err(cq->priv->ifp,
73	    "Error CQE on CQN 0x%x, CI 0x%x, QN 0x%x, OPCODE 0x%x, SYNDROME 0x%x, VENDOR SYNDROME 0x%x\n",
74	    cq->mcq.cqn, ci, qn, err_cqe->op_own >> 4,
75	    err_cqe->syndrome, err_cqe->vendor_err_synd);
76
77	print_hex_dump(NULL, NULL, DUMP_PREFIX_OFFSET,
78	    16, 1, err_cqe, sizeof(*err_cqe), false);
79}
80