mlx5_cq.c revision 331580
1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_cq.c 331580 2018-03-26 20:33:31Z hselasky $
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/hardirq.h>
31#include <dev/mlx5/driver.h>
32#include <rdma/ib_verbs.h>
33#include <dev/mlx5/cq.h>
34#include "mlx5_core.h"
35
36void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
37{
38	struct mlx5_core_cq *cq;
39	struct mlx5_cq_table *table = &dev->priv.cq_table;
40
41	if (cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
42		struct mlx5_cq_linear_array_entry *entry;
43
44		entry = &table->linear_array[cqn];
45		spin_lock(&entry->lock);
46		cq = entry->cq;
47		if (cq == NULL) {
48			mlx5_core_warn(dev,
49			    "Completion event for bogus CQ 0x%x\n", cqn);
50		} else {
51			++cq->arm_sn;
52			cq->comp(cq);
53		}
54		spin_unlock(&entry->lock);
55		return;
56	}
57
58	spin_lock(&table->lock);
59	cq = radix_tree_lookup(&table->tree, cqn);
60	if (likely(cq))
61		atomic_inc(&cq->refcount);
62	spin_unlock(&table->lock);
63
64	if (!cq) {
65		mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
66		return;
67	}
68
69	++cq->arm_sn;
70
71	cq->comp(cq);
72
73	if (atomic_dec_and_test(&cq->refcount))
74		complete(&cq->free);
75}
76
77void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
78{
79	struct mlx5_cq_table *table = &dev->priv.cq_table;
80	struct mlx5_core_cq *cq;
81
82	spin_lock(&table->lock);
83
84	cq = radix_tree_lookup(&table->tree, cqn);
85	if (cq)
86		atomic_inc(&cq->refcount);
87
88	spin_unlock(&table->lock);
89
90	if (!cq) {
91		mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
92		return;
93	}
94
95	cq->event(cq, event_type);
96
97	if (atomic_dec_and_test(&cq->refcount))
98		complete(&cq->free);
99}
100
101
102int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
103			struct mlx5_create_cq_mbox_in *in, int inlen)
104{
105	int err;
106	struct mlx5_cq_table *table = &dev->priv.cq_table;
107	struct mlx5_create_cq_mbox_out out;
108	struct mlx5_destroy_cq_mbox_in din;
109	struct mlx5_destroy_cq_mbox_out dout;
110
111	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
112	memset(&out, 0, sizeof(out));
113	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
114	if (err)
115		return err;
116
117	if (out.hdr.status)
118		return mlx5_cmd_status_to_err(&out.hdr);
119
120	cq->cqn = be32_to_cpu(out.cqn) & 0xffffff;
121	cq->cons_index = 0;
122	cq->arm_sn     = 0;
123	atomic_set(&cq->refcount, 1);
124	init_completion(&cq->free);
125
126	spin_lock_irq(&table->lock);
127	err = radix_tree_insert(&table->tree, cq->cqn, cq);
128	spin_unlock_irq(&table->lock);
129	if (err)
130		goto err_cmd;
131
132	if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
133		struct mlx5_cq_linear_array_entry *entry;
134
135		entry = &table->linear_array[cq->cqn];
136		spin_lock_irq(&entry->lock);
137		entry->cq = cq;
138		spin_unlock_irq(&entry->lock);
139	}
140
141	cq->pid = curthread->td_proc->p_pid;
142
143	return 0;
144
145err_cmd:
146	memset(&din, 0, sizeof(din));
147	memset(&dout, 0, sizeof(dout));
148	din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
149	din.cqn = cpu_to_be32(cq->cqn);
150	mlx5_cmd_exec(dev, &din, sizeof(din), &dout, sizeof(dout));
151	return err;
152}
153EXPORT_SYMBOL(mlx5_core_create_cq);
154
155int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
156{
157	struct mlx5_cq_table *table = &dev->priv.cq_table;
158	struct mlx5_destroy_cq_mbox_in in;
159	struct mlx5_destroy_cq_mbox_out out;
160	struct mlx5_core_cq *tmp;
161	int err;
162
163	if (cq->cqn < MLX5_CQ_LINEAR_ARRAY_SIZE) {
164		struct mlx5_cq_linear_array_entry *entry;
165
166		entry = &table->linear_array[cq->cqn];
167		spin_lock_irq(&entry->lock);
168		entry->cq = NULL;
169		spin_unlock_irq(&entry->lock);
170	}
171
172	spin_lock_irq(&table->lock);
173	tmp = radix_tree_delete(&table->tree, cq->cqn);
174	spin_unlock_irq(&table->lock);
175	if (!tmp) {
176		mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
177		return -EINVAL;
178	}
179	if (tmp != cq) {
180		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
181		return -EINVAL;
182	}
183
184	memset(&in, 0, sizeof(in));
185	memset(&out, 0, sizeof(out));
186	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_CQ);
187	in.cqn = cpu_to_be32(cq->cqn);
188	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
189	if (err)
190		goto out;
191
192	if (out.hdr.status) {
193		err = mlx5_cmd_status_to_err(&out.hdr);
194		goto out;
195	}
196
197	synchronize_irq(cq->irqn);
198
199	if (atomic_dec_and_test(&cq->refcount))
200		complete(&cq->free);
201	wait_for_completion(&cq->free);
202
203out:
204
205	return err;
206}
207EXPORT_SYMBOL(mlx5_core_destroy_cq);
208
209int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
210		       struct mlx5_query_cq_mbox_out *out)
211{
212	struct mlx5_query_cq_mbox_in in;
213	int err;
214
215	memset(&in, 0, sizeof(in));
216	memset(out, 0, sizeof(*out));
217
218	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_CQ);
219	in.cqn = cpu_to_be32(cq->cqn);
220	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
221	if (err)
222		return err;
223
224	if (out->hdr.status)
225		return mlx5_cmd_status_to_err(&out->hdr);
226
227	return err;
228}
229EXPORT_SYMBOL(mlx5_core_query_cq);
230
231
232int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
233			struct mlx5_modify_cq_mbox_in *in, int in_sz)
234{
235	struct mlx5_modify_cq_mbox_out out;
236	int err;
237
238	memset(&out, 0, sizeof(out));
239	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MODIFY_CQ);
240	err = mlx5_cmd_exec(dev, in, in_sz, &out, sizeof(out));
241	if (err)
242		return err;
243
244	if (out.hdr.status)
245		return mlx5_cmd_status_to_err(&out.hdr);
246
247	return 0;
248}
249EXPORT_SYMBOL(mlx5_core_modify_cq);
250
251int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
252				   struct mlx5_core_cq *cq,
253				   u16 cq_period,
254				   u16 cq_max_count)
255{
256	struct mlx5_modify_cq_mbox_in in;
257
258	memset(&in, 0, sizeof(in));
259
260	in.cqn              = cpu_to_be32(cq->cqn);
261	in.ctx.cq_period    = cpu_to_be16(cq_period);
262	in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
263	in.field_select     = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
264					  MLX5_CQ_MODIFY_COUNT);
265
266	return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
267}
268
269int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
270					struct mlx5_core_cq *cq,
271					u16 cq_period,
272					u16 cq_max_count,
273					u8 cq_mode)
274{
275	struct mlx5_modify_cq_mbox_in in;
276
277	memset(&in, 0, sizeof(in));
278
279	in.cqn              = cpu_to_be32(cq->cqn);
280	in.ctx.cq_period    = cpu_to_be16(cq_period);
281	in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
282	in.ctx.cqe_sz_flags = (cq_mode & 2) >> 1;
283	in.ctx.st	    = (cq_mode & 1) << 7;
284	in.field_select     = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
285					  MLX5_CQ_MODIFY_COUNT |
286					  MLX5_CQ_MODIFY_PERIOD_MODE);
287
288	return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
289}
290
291int mlx5_init_cq_table(struct mlx5_core_dev *dev)
292{
293	struct mlx5_cq_table *table = &dev->priv.cq_table;
294	int err;
295	int x;
296
297	memset(table, 0, sizeof(*table));
298	spin_lock_init(&table->lock);
299	for (x = 0; x != MLX5_CQ_LINEAR_ARRAY_SIZE; x++)
300		spin_lock_init(&table->linear_array[x].lock);
301	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
302	err = 0;
303
304	return err;
305}
306
307void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
308{
309}
310