mlx5_srq.c revision 290650
1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/dev/mlx5/mlx5_core/mlx5_srq.c 290650 2015-11-10 12:20:22Z hselasky $
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <dev/mlx5/driver.h>
31#include <dev/mlx5/srq.h>
32#include <rdma/ib_verbs.h>
33#include "mlx5_core.h"
34#include "transobj.h"
35
36void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
37{
38	struct mlx5_srq_table *table = &dev->priv.srq_table;
39	struct mlx5_core_srq *srq;
40
41	spin_lock(&table->lock);
42
43	srq = radix_tree_lookup(&table->tree, srqn);
44	if (srq)
45		atomic_inc(&srq->refcount);
46
47	spin_unlock(&table->lock);
48
49	if (!srq) {
50		mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
51		return;
52	}
53
54	srq->event(srq, event_type);
55
56	if (atomic_dec_and_test(&srq->refcount))
57		complete(&srq->free);
58}
59
60static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
61{
62	void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
63
64	if (srqc_to_rmpc) {
65		switch (MLX5_GET(srqc, srqc, state)) {
66		case MLX5_SRQC_STATE_GOOD:
67			MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
68			break;
69		case MLX5_SRQC_STATE_ERROR:
70			MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
71			break;
72		default:
73			printf("mlx5_core: WARN: ""%s: %d: Unknown srq state = 0x%x\n", __func__, __LINE__, MLX5_GET(srqc, srqc, state));
74		}
75
76		MLX5_SET(wq,   wq, wq_signature,  MLX5_GET(srqc, srqc, wq_signature));
77		MLX5_SET(wq,   wq, log_wq_pg_sz,  MLX5_GET(srqc, srqc, log_page_size));
78		MLX5_SET(wq,   wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
79		MLX5_SET(wq,   wq, log_wq_sz,     MLX5_GET(srqc, srqc, log_srq_size));
80		MLX5_SET(wq,   wq, page_offset,   MLX5_GET(srqc, srqc, page_offset));
81		MLX5_SET(wq,   wq, lwm,           MLX5_GET(srqc, srqc, lwm));
82		MLX5_SET(wq,   wq, pd,            MLX5_GET(srqc, srqc, pd));
83		MLX5_SET64(wq, wq, dbr_addr,
84			   ((u64)MLX5_GET(srqc, srqc, db_record_addr_h)) << 32 |
85			   ((u64)MLX5_GET(srqc, srqc, db_record_addr_l)) << 2);
86	} else {
87		switch (MLX5_GET(rmpc, rmpc, state)) {
88		case MLX5_RMPC_STATE_RDY:
89			MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
90			break;
91		case MLX5_RMPC_STATE_ERR:
92			MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
93			break;
94		default:
95			printf("mlx5_core: WARN: ""%s: %d: Unknown rmp state = 0x%x\n", __func__, __LINE__, MLX5_GET(rmpc, rmpc, state));
96		}
97
98		MLX5_SET(srqc, srqc, wq_signature,     MLX5_GET(wq,   wq, wq_signature));
99		MLX5_SET(srqc, srqc, log_page_size,    MLX5_GET(wq,   wq, log_wq_pg_sz));
100		MLX5_SET(srqc, srqc, log_rq_stride,    MLX5_GET(wq,   wq, log_wq_stride) - 4);
101		MLX5_SET(srqc, srqc, log_srq_size,     MLX5_GET(wq,   wq, log_wq_sz));
102		MLX5_SET(srqc, srqc, page_offset,      MLX5_GET(wq,   wq, page_offset));
103		MLX5_SET(srqc, srqc, lwm,	       MLX5_GET(wq,   wq, lwm));
104		MLX5_SET(srqc, srqc, pd,	       MLX5_GET(wq,   wq, pd));
105		MLX5_SET(srqc, srqc, db_record_addr_h, MLX5_GET64(wq, wq, dbr_addr) >> 32);
106		MLX5_SET(srqc, srqc, db_record_addr_l, (MLX5_GET64(wq, wq, dbr_addr) >> 2) & 0x3fffffff);
107	}
108}
109
110struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
111{
112	struct mlx5_srq_table *table = &dev->priv.srq_table;
113	struct mlx5_core_srq *srq;
114
115	spin_lock(&table->lock);
116
117	srq = radix_tree_lookup(&table->tree, srqn);
118	if (srq)
119		atomic_inc(&srq->refcount);
120
121	spin_unlock(&table->lock);
122
123	return srq;
124}
125EXPORT_SYMBOL(mlx5_core_get_srq);
126
127static int get_pas_size(void *srqc)
128{
129	u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
130	u32 log_srq_size  = MLX5_GET(srqc, srqc, log_srq_size);
131	u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
132	u32 page_offset   = MLX5_GET(srqc, srqc, page_offset);
133	u32 po_quanta	  = 1 << (log_page_size - 6);
134	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
135	u32 page_size	  = 1 << log_page_size;
136	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
137	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
138
139	return rq_num_pas * sizeof(u64);
140
141}
142
143static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
144			  struct mlx5_create_srq_mbox_in *in, int srq_inlen)
145{
146	void *create_in;
147	void *rmpc;
148	void *srqc;
149	int pas_size;
150	int inlen;
151	int err;
152
153	srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
154	pas_size = get_pas_size(srqc);
155	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
156	create_in = mlx5_vzalloc(inlen);
157	if (!create_in)
158		return -ENOMEM;
159
160	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
161
162	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
163	rmpc_srqc_reformat(srqc, rmpc, true);
164
165	err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
166
167	kvfree(create_in);
168	return err;
169}
170
171static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
172			    struct mlx5_core_srq *srq)
173{
174	return mlx5_core_destroy_rmp(dev, srq->srqn);
175}
176
177static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
178			 struct mlx5_query_srq_mbox_out *out)
179{
180	u32 *rmp_out;
181	void *rmpc;
182	void *srqc;
183	int err;
184
185	rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
186	if (!rmp_out)
187		return -ENOMEM;
188
189	err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
190	if (err)
191		goto out;
192
193	srqc = MLX5_ADDR_OF(query_srq_out, out,	    srq_context_entry);
194	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
195	rmpc_srqc_reformat(srqc, rmpc, false);
196
197out:
198	kvfree(rmp_out);
199	return 0;
200}
201
202static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm)
203{
204	return mlx5_core_arm_rmp(dev, srq->srqn, lwm);
205}
206
207static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
208			      struct mlx5_core_srq *srq,
209			      struct mlx5_create_srq_mbox_in *in,
210			      int srq_inlen)
211{
212	void *create_in;
213	void *srqc;
214	void *xrc_srqc;
215	void *pas;
216	int pas_size;
217	int inlen;
218	int err;
219
220	srqc	  = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
221	pas_size  = get_pas_size(srqc);
222	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
223	create_in = mlx5_vzalloc(inlen);
224	if (!create_in)
225		return -ENOMEM;
226
227	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
228	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
229
230	memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
231	memcpy(pas, in->pas, pas_size);
232	/* 0xffffff means we ask to work with cqe version 0 */
233	MLX5_SET(xrc_srqc, xrc_srqc,  user_index, 0xffffff);
234
235	err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
236	if (err)
237		goto out;
238
239out:
240	kvfree(create_in);
241	return err;
242}
243
244static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
245			       struct mlx5_core_srq *srq)
246{
247	return mlx5_core_destroy_xsrq(dev, srq->srqn);
248}
249
250static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
251			     struct mlx5_core_srq *srq,
252			     struct mlx5_query_srq_mbox_out *out)
253{
254	u32 *xrcsrq_out;
255	int err;
256
257	xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
258	if (!xrcsrq_out)
259		return -ENOMEM;
260
261	err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out);
262	if (err)
263		goto out;
264
265out:
266	kvfree(xrcsrq_out);
267	return err;
268}
269
270static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
271			   struct mlx5_core_srq *srq, u16 lwm)
272{
273	return mlx5_core_arm_xsrq(dev, srq->srqn, lwm);
274}
275
276static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
277			  struct mlx5_create_srq_mbox_in *in, int inlen)
278{
279	struct mlx5_create_srq_mbox_out out;
280	int err;
281
282	memset(&out, 0, sizeof(out));
283
284	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
285
286	err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), sizeof(out));
287
288	srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
289
290	return err;
291}
292
293static int destroy_srq_cmd(struct mlx5_core_dev *dev,
294			   struct mlx5_core_srq *srq)
295{
296	struct mlx5_destroy_srq_mbox_in in;
297	struct mlx5_destroy_srq_mbox_out out;
298
299	memset(&in, 0, sizeof(in));
300	memset(&out, 0, sizeof(out));
301	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
302	in.srqn = cpu_to_be32(srq->srqn);
303
304	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
305}
306
307static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
308			 struct mlx5_query_srq_mbox_out *out)
309{
310	struct mlx5_query_srq_mbox_in in;
311
312	memset(&in, 0, sizeof(in));
313
314	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
315	in.srqn = cpu_to_be32(srq->srqn);
316
317	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)out, sizeof(*out));
318}
319
320static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
321		       u16 lwm, int is_srq)
322{
323	struct mlx5_arm_srq_mbox_in	in;
324	struct mlx5_arm_srq_mbox_out	out;
325
326	memset(&in, 0, sizeof(in));
327	memset(&out, 0, sizeof(out));
328
329	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
330	in.hdr.opmod = cpu_to_be16(!!is_srq);
331	in.srqn = cpu_to_be32(srq->srqn);
332	in.lwm = cpu_to_be16(lwm);
333
334	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
335}
336
337static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
338			    struct mlx5_create_srq_mbox_in *in, int inlen,
339			    int is_xrc)
340{
341	if (!dev->issi)
342		return create_srq_cmd(dev, srq, in, inlen);
343	else if (srq->common.res == MLX5_RES_XSRQ)
344		return create_xrc_srq_cmd(dev, srq, in, inlen);
345	else
346		return create_rmp_cmd(dev, srq, in, inlen);
347}
348
349static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
350{
351	if (!dev->issi)
352		return destroy_srq_cmd(dev, srq);
353	else if (srq->common.res == MLX5_RES_XSRQ)
354		return destroy_xrc_srq_cmd(dev, srq);
355	else
356		return destroy_rmp_cmd(dev, srq);
357}
358
359int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
360			 struct mlx5_create_srq_mbox_in *in, int inlen,
361			 int is_xrc)
362{
363	int err;
364	struct mlx5_srq_table *table = &dev->priv.srq_table;
365
366	srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
367
368	err = create_srq_split(dev, srq, in, inlen, is_xrc);
369	if (err)
370		return err;
371
372	atomic_set(&srq->refcount, 1);
373	init_completion(&srq->free);
374
375	spin_lock_irq(&table->lock);
376	err = radix_tree_insert(&table->tree, srq->srqn, srq);
377	spin_unlock_irq(&table->lock);
378	if (err) {
379		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
380		goto err_destroy_srq_split;
381	}
382
383	return 0;
384
385err_destroy_srq_split:
386	destroy_srq_split(dev, srq);
387
388	return err;
389}
390EXPORT_SYMBOL(mlx5_core_create_srq);
391
392int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
393{
394	struct mlx5_srq_table *table = &dev->priv.srq_table;
395	struct mlx5_core_srq *tmp;
396	int err;
397
398	spin_lock_irq(&table->lock);
399	tmp = radix_tree_delete(&table->tree, srq->srqn);
400	spin_unlock_irq(&table->lock);
401	if (!tmp) {
402		mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
403		return -EINVAL;
404	}
405	if (tmp != srq) {
406		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
407		return -EINVAL;
408	}
409
410	err = destroy_srq_split(dev, srq);
411	if (err)
412		return err;
413
414	if (atomic_dec_and_test(&srq->refcount))
415		complete(&srq->free);
416	wait_for_completion(&srq->free);
417
418	return 0;
419}
420EXPORT_SYMBOL(mlx5_core_destroy_srq);
421
422int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
423			struct mlx5_query_srq_mbox_out *out)
424{
425	if (!dev->issi)
426		return query_srq_cmd(dev, srq, out);
427	else if (srq->common.res == MLX5_RES_XSRQ)
428		return query_xrc_srq_cmd(dev, srq, out);
429	else
430		return query_rmp_cmd(dev, srq, out);
431}
432EXPORT_SYMBOL(mlx5_core_query_srq);
433
434int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
435		      u16 lwm, int is_srq)
436{
437	if (!dev->issi)
438		return arm_srq_cmd(dev, srq, lwm, is_srq);
439	else if (srq->common.res == MLX5_RES_XSRQ)
440		return arm_xrc_srq_cmd(dev, srq, lwm);
441	else
442		return arm_rmp_cmd(dev, srq, lwm);
443}
444EXPORT_SYMBOL(mlx5_core_arm_srq);
445
446void mlx5_init_srq_table(struct mlx5_core_dev *dev)
447{
448	struct mlx5_srq_table *table = &dev->priv.srq_table;
449
450	spin_lock_init(&table->lock);
451	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
452}
453
454void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
455{
456	/* nothing */
457}
458