1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: releng/11.0/sys/dev/mlx5/mlx5_core/mlx5_cq.c 290650 2015-11-10 12:20:22Z hselasky $
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/hardirq.h>
31#include <dev/mlx5/driver.h>
32#include <rdma/ib_verbs.h>
33#include <dev/mlx5/cq.h>
34#include "mlx5_core.h"
35
36void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
37{
38	struct mlx5_core_cq *cq;
39	struct mlx5_cq_table *table = &dev->priv.cq_table;
40
41	if (cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
42		struct mlx5_cq_linear_array_entry *entry;
43
44		entry = &table->linear_array[cqn];
45		spin_lock(&entry->lock);
46		cq = entry->cq;
47		if (cq == NULL) {
48			mlx5_core_warn(dev,
49			    "Completion event for bogus CQ 0x%x\n", cqn);
50		} else {
51			++cq->arm_sn;
52			cq->comp(cq);
53		}
54		spin_unlock(&entry->lock);
55		return;
56	}
57
58	spin_lock(&table->lock);
59	cq = radix_tree_lookup(&table->tree, cqn);
60	if (likely(cq))
61		atomic_inc(&cq->refcount);
62	spin_unlock(&table->lock);
63
64	if (!cq) {
65		mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
66		return;
67	}
68
69	++cq->arm_sn;
70
71	cq->comp(cq);
72
73	if (atomic_dec_and_test(&cq->refcount))
74		complete(&cq->free);
75}
76
77void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
78{
79	struct mlx5_cq_table *table = &dev->priv.cq_table;
80	struct mlx5_core_cq *cq;
81
82	spin_lock(&table->lock);
83
84	cq = radix_tree_lookup(&table->tree, cqn);
85	if (cq)
86		atomic_inc(&cq->refcount);
87
88	spin_unlock(&table->lock);
89
90	if (!cq) {
91		mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
92		return;
93	}
94
95	cq->event(cq, event_type);
96
97	if (atomic_dec_and_test(&cq->refcount))
98		complete(&cq->free);
99}
100
101
102int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
103			struct mlx5_create_cq_mbox_in *in, int inlen)
104{
105	int err;
106	struct mlx5_cq_table *table = &dev->priv.cq_table;
107	struct mlx5_create_cq_mbox_out out;
108	struct mlx5_destroy_cq_mbox_in din;
109	struct mlx5_destroy_cq_mbox_out dout;
110
111	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
112	memset(&out, 0, sizeof(out));
113	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
114	if (err)
115		return err;
116
117	if (out.hdr.status)
118		return mlx5_cmd_status_to_err(&out.hdr);
119
120	cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
121	cq->cons_index = 0;
122	cq->arm_sn     = 0;
123	atomic_set(&cq->refcount, 1);
124	init_completion(&cq->free);
125
126	spin_lock_irq(&table->lock);
127	err = radix_tree_insert(&table->tree, cq->cqn, cq);
128	spin_unlock_irq(&table->lock);
129	if (err)
130		goto err_cmd;
131
132	if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
133		struct mlx5_cq_linear_array_entry *entry;
134
135		entry = &table->linear_array[cq->cqn];
136		spin_lock_irq(&entry->lock);
137		entry->cq = cq;
138		spin_unlock_irq(&entry->lock);
139	}
140
141	cq->pid = curthread->td_proc->p_pid;
142
143	return 0;
144
145err_cmd:
146	memset(&din, 0, sizeof(din));
147	memset(&dout, 0, sizeof(dout));
148	din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
149	din.cqn = cpu_to_be32(cq->cqn);
150	mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
151	return err;
152}
153EXPORT_SYMBOL(mlx5_core_create_cq);
154
155int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
156{
157	struct mlx5_cq_table *table = &dev->priv.cq_table;
158	struct mlx5_destroy_cq_mbox_in in;
159	struct mlx5_destroy_cq_mbox_out out;
160	struct mlx5_core_cq *tmp;
161	int err;
162
163	if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
164		struct mlx5_cq_linear_array_entry *entry;
165
166		entry = &table->linear_array[cq->cqn];
167		spin_lock_irq(&entry->lock);
168		entry->cq = NULL;
169		spin_unlock_irq(&entry->lock);
170	}
171
172	spin_lock_irq(&table->lock);
173	tmp = radix_tree_delete(&table->tree, cq->cqn);
174	spin_unlock_irq(&table->lock);
175	if (!tmp) {
176		mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
177		return -EINVAL;
178	}
179	if (tmp != cq) {
180		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
181		return -EINVAL;
182	}
183
184	memset(&in, 0, sizeof(in));
185	memset(&out, 0, sizeof(out));
186	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
187	in.cqn = cpu_to_be32(cq->cqn);
188	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
189	if (err)
190		return err;
191
192	if (out.hdr.status)
193		return mlx5_cmd_status_to_err(&out.hdr);
194
195	synchronize_irq(cq->irqn);
196
197	if (atomic_dec_and_test(&cq->refcount))
198		complete(&cq->free);
199	wait_for_completion(&cq->free);
200
201	return 0;
202}
203EXPORT_SYMBOL(mlx5_core_destroy_cq);
204
205int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
206		       struct mlx5_query_cq_mbox_out *out)
207{
208	struct mlx5_query_cq_mbox_in in;
209	int err;
210
211	memset(&in, 0, sizeof(in));
212	memset(out, 0, sizeof(*out));
213
214	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
215	in.cqn = cpu_to_be32(cq->cqn);
216	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
217	if (err)
218		return err;
219
220	if (out->hdr.status)
221		return mlx5_cmd_status_to_err(&out->hdr);
222
223	return err;
224}
225EXPORT_SYMBOL(mlx5_core_query_cq);
226
227
228int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
229			struct mlx5_modify_cq_mbox_in *in, int in_sz)
230{
231	struct mlx5_modify_cq_mbox_out out;
232	int err;
233
234	memset(&out, 0, sizeof(out));
235	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
236	err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
237	if (err)
238		return err;
239
240	if (out.hdr.status)
241		return mlx5_cmd_status_to_err(&out.hdr);
242
243	return 0;
244}
245EXPORT_SYMBOL(mlx5_core_modify_cq);
246
247int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
248				   struct mlx5_core_cq *cq,
249				   u16 cq_period,
250				   u16 cq_max_count)
251{
252	struct mlx5_modify_cq_mbox_in in;
253
254	memset(&in, 0, sizeof(in));
255
256	in.cqn              = cpu_to_be32(cq->cqn);
257	in.ctx.cq_period    = cpu_to_be16(cq_period);
258	in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
259	in.field_select     = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
260					  MLX5_CQ_MODIFY_COUNT);
261
262	return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
263}
264
265int mlx5_init_cq_table(struct mlx5_core_dev *dev)
266{
267	struct mlx5_cq_table *table = &dev->priv.cq_table;
268	int err;
269	int x;
270
271	spin_lock_init(&table->lock);
272	for (x = 0; x != MLX5_CQ_LINEAR_ARRAY_SIZE; x++)
273		spin_lock_init(&table->linear_array[x].lock);
274	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
275	err = 0;
276
277	return err;
278}
279
280void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
281{
282}
283