1/*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#ifndef MLX5_CORE_CQ_H
27#define MLX5_CORE_CQ_H
28
29#include <rdma/ib_verbs.h>
30#include <dev/mlx5/driver.h>
31#include <dev/mlx5/mlx5_ifc.h>
32
33struct mlx5_eqe;
34struct mlx5_core_cq {
35	u32			cqn;
36	int			cqe_sz;
37	__be32		       *set_ci_db;
38	__be32		       *arm_db;
39	unsigned		vector;
40	int			irqn;
41	void (*comp)		(struct mlx5_core_cq *, struct mlx5_eqe *);
42	void (*event)		(struct mlx5_core_cq *, int);
43	struct mlx5_uars_page  *uar;
44	u32			cons_index;
45	unsigned		arm_sn;
46	struct mlx5_rsc_debug	*dbg;
47	int			pid;
48	int			reset_notify_added;
49	struct list_head	reset_notify;
50};
51
52
53enum {
54	MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR		= 0x01,
55	MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR		= 0x02,
56	MLX5_CQE_SYNDROME_LOCAL_PROT_ERR		= 0x04,
57	MLX5_CQE_SYNDROME_WR_FLUSH_ERR			= 0x05,
58	MLX5_CQE_SYNDROME_MW_BIND_ERR			= 0x06,
59	MLX5_CQE_SYNDROME_BAD_RESP_ERR			= 0x10,
60	MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR		= 0x11,
61	MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR		= 0x12,
62	MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR		= 0x13,
63	MLX5_CQE_SYNDROME_REMOTE_OP_ERR			= 0x14,
64	MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR	= 0x15,
65	MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR		= 0x16,
66	MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22,
67};
68
69enum {
70	MLX5_CQE_OWNER_MASK	= 1,
71	MLX5_CQE_REQ		= 0,
72	MLX5_CQE_RESP_WR_IMM	= 1,
73	MLX5_CQE_RESP_SEND	= 2,
74	MLX5_CQE_RESP_SEND_IMM	= 3,
75	MLX5_CQE_RESP_SEND_INV	= 4,
76	MLX5_CQE_RESIZE_CQ	= 5,
77	MLX5_CQE_SIG_ERR	= 12,
78	MLX5_CQE_REQ_ERR	= 13,
79	MLX5_CQE_RESP_ERR	= 14,
80	MLX5_CQE_INVALID	= 15,
81};
82
83enum {
84	MLX5_CQ_MODIFY_PERIOD	= 1 << 0,
85	MLX5_CQ_MODIFY_COUNT	= 1 << 1,
86	MLX5_CQ_MODIFY_OVERRUN	= 1 << 2,
87	MLX5_CQ_MODIFY_EQN	= 1 << 3,
88	MLX5_CQ_MODIFY_PERIOD_MODE = 1 << 4,
89};
90
91enum {
92	MLX5_CQ_OPMOD_RESIZE		= 1,
93	MLX5_MODIFY_CQ_MASK_LOG_SIZE	= 1 << 0,
94	MLX5_MODIFY_CQ_MASK_PG_OFFSET	= 1 << 1,
95	MLX5_MODIFY_CQ_MASK_PG_SIZE	= 1 << 2,
96};
97
98struct mlx5_cq_modify_params {
99	int	type;
100	union {
101		struct {
102			u32	page_offset;
103			u8	log_cq_size;
104		} resize;
105
106		struct {
107		} moder;
108
109		struct {
110		} mapping;
111	} params;
112};
113
114static inline int cqe_sz_to_mlx_sz(u8 size)
115{
116	return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
117}
118
119static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
120{
121	*cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
122}
123
124enum {
125	MLX5_CQ_DB_REQ_NOT_SOL		= 1 << 24,
126	MLX5_CQ_DB_REQ_NOT		= 0 << 24
127};
128
129static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
130			       void __iomem *uar_page,
131			       spinlock_t *doorbell_lock,
132			       u32 cons_index)
133{
134	__be32 doorbell[2];
135	u32 sn;
136	u32 ci;
137
138	sn = cq->arm_sn & 3;
139	ci = cons_index & 0xffffff;
140
141	*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
142
143	/* Make sure that the doorbell record in host memory is
144	 * written before ringing the doorbell via PCI MMIO.
145	 */
146	wmb();
147
148	doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
149	doorbell[1] = cpu_to_be32(cq->cqn);
150
151	mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
152}
153
154int mlx5_init_cq_table(struct mlx5_core_dev *dev);
155void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
156int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
157			u32 *in, int inlen, u32 *out, int outlen);
158int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
159int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
160		       u32 *out, int outlen);
161int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
162			u32 *in, int inlen);
163int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
164				   struct mlx5_core_cq *cq, u16 cq_period,
165				   u16 cq_max_count);
166int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
167					struct mlx5_core_cq *cq,
168					u16 cq_period,
169					u16 cq_max_count,
170					u8 cq_mode);
171int mlx5_core_modify_cq_by_mask(struct mlx5_core_dev *,
172				struct mlx5_core_cq *, u32 mask,
173				u16 cq_period, u16 cq_max_count,
174				u8 cq_mode, u8 cq_eqn);
175int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
176void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
177
178#endif /* MLX5_CORE_CQ_H */
179