1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_CORE_CQ_H
34#define MLX5_CORE_CQ_H
35
36#include <linux/mlx5/driver.h>
37#include <linux/refcount.h>
38
39struct mlx5_core_cq {
40	u32			cqn;
41	int			cqe_sz;
42	__be32		       *set_ci_db;
43	__be32		       *arm_db;
44	struct mlx5_uars_page  *uar;
45	refcount_t		refcount;
46	struct completion	free;
47	unsigned		vector;
48	unsigned int		irqn;
49	void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
50	void (*event)		(struct mlx5_core_cq *, enum mlx5_event);
51	u32			cons_index;
52	unsigned		arm_sn;
53	struct mlx5_rsc_debug	*dbg;
54	int			pid;
55	struct {
56		struct list_head list;
57		void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
58		void		*priv;
59	} tasklet_ctx;
60	int			reset_notify_added;
61	struct list_head	reset_notify;
62	struct mlx5_eq_comp	*eq;
63	u16 uid;
64};
65
66
67enum {
68	MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR		= 0x01,
69	MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR		= 0x02,
70	MLX5_CQE_SYNDROME_LOCAL_PROT_ERR		= 0x04,
71	MLX5_CQE_SYNDROME_WR_FLUSH_ERR			= 0x05,
72	MLX5_CQE_SYNDROME_MW_BIND_ERR			= 0x06,
73	MLX5_CQE_SYNDROME_BAD_RESP_ERR			= 0x10,
74	MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR		= 0x11,
75	MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR		= 0x12,
76	MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR		= 0x13,
77	MLX5_CQE_SYNDROME_REMOTE_OP_ERR			= 0x14,
78	MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR	= 0x15,
79	MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR		= 0x16,
80	MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22,
81};
82
83enum {
84	MLX5_CQE_OWNER_MASK	= 1,
85	MLX5_CQE_REQ		= 0,
86	MLX5_CQE_RESP_WR_IMM	= 1,
87	MLX5_CQE_RESP_SEND	= 2,
88	MLX5_CQE_RESP_SEND_IMM	= 3,
89	MLX5_CQE_RESP_SEND_INV	= 4,
90	MLX5_CQE_RESIZE_CQ	= 5,
91	MLX5_CQE_SIG_ERR	= 12,
92	MLX5_CQE_REQ_ERR	= 13,
93	MLX5_CQE_RESP_ERR	= 14,
94	MLX5_CQE_INVALID	= 15,
95};
96
97enum {
98	MLX5_CQ_MODIFY_PERIOD	= 1 << 0,
99	MLX5_CQ_MODIFY_COUNT	= 1 << 1,
100	MLX5_CQ_MODIFY_OVERRUN	= 1 << 2,
101};
102
103enum {
104	MLX5_CQ_OPMOD_RESIZE		= 1,
105	MLX5_MODIFY_CQ_MASK_LOG_SIZE	= 1 << 0,
106	MLX5_MODIFY_CQ_MASK_PG_OFFSET	= 1 << 1,
107	MLX5_MODIFY_CQ_MASK_PG_SIZE	= 1 << 2,
108};
109
110struct mlx5_cq_modify_params {
111	int	type;
112	union {
113		struct {
114			u32	page_offset;
115			u8	log_cq_size;
116		} resize;
117
118		struct {
119		} moder;
120
121		struct {
122		} mapping;
123	} params;
124};
125
126enum {
127	CQE_STRIDE_64 = 0,
128	CQE_STRIDE_128 = 1,
129	CQE_STRIDE_128_PAD = 2,
130};
131
132#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
133#define MLX5_MAX_CQ_COUNT (BIT(__mlx5_bit_sz(cqc, cq_max_count)) - 1)
134
135static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
136{
137	return padding_128_en ? CQE_STRIDE_128_PAD :
138				size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
139}
140
141static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
142{
143	*cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
144}
145
146enum {
147	MLX5_CQ_DB_REQ_NOT_SOL		= 1 << 24,
148	MLX5_CQ_DB_REQ_NOT		= 0 << 24
149};
150
151static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
152			       void __iomem *uar_page,
153			       u32 cons_index)
154{
155	__be32 doorbell[2];
156	u32 sn;
157	u32 ci;
158
159	sn = cq->arm_sn & 3;
160	ci = cons_index & 0xffffff;
161
162	*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
163
164	/* Make sure that the doorbell record in host memory is
165	 * written before ringing the doorbell via PCI MMIO.
166	 */
167	wmb();
168
169	doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
170	doorbell[1] = cpu_to_be32(cq->cqn);
171
172	mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL);
173}
174
175static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
176{
177	refcount_inc(&cq->refcount);
178}
179
180static inline void mlx5_cq_put(struct mlx5_core_cq *cq)
181{
182	if (refcount_dec_and_test(&cq->refcount))
183		complete(&cq->free);
184}
185
186int mlx5_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
187		   u32 *in, int inlen, u32 *out, int outlen);
188int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
189			u32 *in, int inlen, u32 *out, int outlen);
190int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
191int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
192		       u32 *out);
193int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
194			u32 *in, int inlen);
195int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
196				   struct mlx5_core_cq *cq, u16 cq_period,
197				   u16 cq_max_count);
198static inline void mlx5_dump_err_cqe(struct mlx5_core_dev *dev,
199				     struct mlx5_err_cqe *err_cqe)
200{
201	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe,
202		       sizeof(*err_cqe), false);
203}
204int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
205void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
206
207#endif /* MLX5_CORE_CQ_H */
208