mlx5_srq.c revision 306233
1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_srq.c 306233 2016-09-23 08:17:51Z hselasky $
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <dev/mlx5/driver.h>
31#include <dev/mlx5/srq.h>
32#include <rdma/ib_verbs.h>
33#include "mlx5_core.h"
34#include "transobj.h"
35
36void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
37{
38	struct mlx5_srq_table *table = &dev->priv.srq_table;
39	struct mlx5_core_srq *srq;
40
41	spin_lock(&table->lock);
42
43	srq = radix_tree_lookup(&table->tree, srqn);
44	if (srq)
45		atomic_inc(&srq->refcount);
46
47	spin_unlock(&table->lock);
48
49	if (!srq) {
50		mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
51		return;
52	}
53
54	srq->event(srq, event_type);
55
56	if (atomic_dec_and_test(&srq->refcount))
57		complete(&srq->free);
58}
59
60static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
61{
62	void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
63
64	if (srqc_to_rmpc) {
65		switch (MLX5_GET(srqc, srqc, state)) {
66		case MLX5_SRQC_STATE_GOOD:
67			MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
68			break;
69		case MLX5_SRQC_STATE_ERROR:
70			MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
71			break;
72		default:
73			printf("mlx5_core: WARN: ""%s: %d: Unknown srq state = 0x%x\n", __func__, __LINE__, MLX5_GET(srqc, srqc, state));
74		}
75
76		MLX5_SET(wq,   wq, wq_signature,  MLX5_GET(srqc, srqc, wq_signature));
77		MLX5_SET(wq,   wq, log_wq_pg_sz,  MLX5_GET(srqc, srqc, log_page_size));
78		MLX5_SET(wq,   wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
79		MLX5_SET(wq,   wq, log_wq_sz,     MLX5_GET(srqc, srqc, log_srq_size));
80		MLX5_SET(wq,   wq, page_offset,   MLX5_GET(srqc, srqc, page_offset));
81		MLX5_SET(wq,   wq, lwm,           MLX5_GET(srqc, srqc, lwm));
82		MLX5_SET(wq,   wq, pd,            MLX5_GET(srqc, srqc, pd));
83		MLX5_SET64(wq, wq, dbr_addr,
84			   ((u64)MLX5_GET(srqc, srqc, db_record_addr_h)) << 32 |
85			   ((u64)MLX5_GET(srqc, srqc, db_record_addr_l)) << 2);
86	} else {
87		switch (MLX5_GET(rmpc, rmpc, state)) {
88		case MLX5_RMPC_STATE_RDY:
89			MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
90			break;
91		case MLX5_RMPC_STATE_ERR:
92			MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
93			break;
94		default:
95			printf("mlx5_core: WARN: ""%s: %d: Unknown rmp state = 0x%x\n", __func__, __LINE__, MLX5_GET(rmpc, rmpc, state));
96		}
97
98		MLX5_SET(srqc, srqc, wq_signature,     MLX5_GET(wq,   wq, wq_signature));
99		MLX5_SET(srqc, srqc, log_page_size,    MLX5_GET(wq,   wq, log_wq_pg_sz));
100		MLX5_SET(srqc, srqc, log_rq_stride,    MLX5_GET(wq,   wq, log_wq_stride) - 4);
101		MLX5_SET(srqc, srqc, log_srq_size,     MLX5_GET(wq,   wq, log_wq_sz));
102		MLX5_SET(srqc, srqc, page_offset,      MLX5_GET(wq,   wq, page_offset));
103		MLX5_SET(srqc, srqc, lwm,	       MLX5_GET(wq,   wq, lwm));
104		MLX5_SET(srqc, srqc, pd,	       MLX5_GET(wq,   wq, pd));
105		MLX5_SET(srqc, srqc, db_record_addr_h, MLX5_GET64(wq, wq, dbr_addr) >> 32);
106		MLX5_SET(srqc, srqc, db_record_addr_l, (MLX5_GET64(wq, wq, dbr_addr) >> 2) & 0x3fffffff);
107	}
108}
109
110struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
111{
112	struct mlx5_srq_table *table = &dev->priv.srq_table;
113	struct mlx5_core_srq *srq;
114
115	spin_lock(&table->lock);
116
117	srq = radix_tree_lookup(&table->tree, srqn);
118	if (srq)
119		atomic_inc(&srq->refcount);
120
121	spin_unlock(&table->lock);
122
123	return srq;
124}
125EXPORT_SYMBOL(mlx5_core_get_srq);
126
127static int get_pas_size(void *srqc)
128{
129	u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
130	u32 log_srq_size  = MLX5_GET(srqc, srqc, log_srq_size);
131	u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
132	u32 page_offset   = MLX5_GET(srqc, srqc, page_offset);
133	u32 po_quanta	  = 1 << (log_page_size - 6);
134	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
135	u32 page_size	  = 1 << log_page_size;
136	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
137	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
138
139	return rq_num_pas * sizeof(u64);
140
141}
142
143static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
144			  struct mlx5_create_srq_mbox_in *in, int srq_inlen)
145{
146	void *create_in;
147	void *rmpc;
148	void *srqc;
149	int pas_size;
150	int inlen;
151	int err;
152
153	srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
154	pas_size = get_pas_size(srqc);
155	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
156	create_in = mlx5_vzalloc(inlen);
157	if (!create_in)
158		return -ENOMEM;
159
160	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
161
162	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
163	rmpc_srqc_reformat(srqc, rmpc, true);
164
165	err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
166
167	kvfree(create_in);
168	return err;
169}
170
171static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
172			    struct mlx5_core_srq *srq)
173{
174	return mlx5_core_destroy_rmp(dev, srq->srqn);
175}
176
177static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
178			 struct mlx5_query_srq_mbox_out *out)
179{
180	u32 *rmp_out;
181	void *rmpc;
182	void *srqc;
183	int err;
184
185	rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
186	if (!rmp_out)
187		return -ENOMEM;
188
189	err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
190	if (err)
191		goto out;
192
193	srqc = MLX5_ADDR_OF(query_srq_out, out,	    srq_context_entry);
194	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
195	rmpc_srqc_reformat(srqc, rmpc, false);
196
197out:
198	kvfree(rmp_out);
199	return 0;
200}
201
202static int arm_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, u16 lwm)
203{
204	return mlx5_core_arm_rmp(dev, srq->srqn, lwm);
205}
206
207static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
208			      struct mlx5_core_srq *srq,
209			      struct mlx5_create_srq_mbox_in *in,
210			      int srq_inlen)
211{
212	void *create_in;
213	void *srqc;
214	void *xrc_srqc;
215	void *pas;
216	int pas_size;
217	int inlen;
218	int err;
219
220	srqc	  = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
221	pas_size  = get_pas_size(srqc);
222	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
223	create_in = mlx5_vzalloc(inlen);
224	if (!create_in)
225		return -ENOMEM;
226
227	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, xrc_srq_context_entry);
228	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
229
230	memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
231	memcpy(pas, in->pas, pas_size);
232
233	err = mlx5_core_create_xsrq(dev, create_in, inlen, &srq->srqn);
234	if (err)
235		goto out;
236
237out:
238	kvfree(create_in);
239	return err;
240}
241
242static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
243			       struct mlx5_core_srq *srq)
244{
245	return mlx5_core_destroy_xsrq(dev, srq->srqn);
246}
247
248static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
249			     struct mlx5_core_srq *srq,
250			     struct mlx5_query_srq_mbox_out *out)
251{
252	u32 *xrcsrq_out;
253	int err;
254
255	xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
256	if (!xrcsrq_out)
257		return -ENOMEM;
258
259	err = mlx5_core_query_xsrq(dev, srq->srqn, xrcsrq_out);
260	if (err)
261		goto out;
262
263out:
264	kvfree(xrcsrq_out);
265	return err;
266}
267
268static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
269			   struct mlx5_core_srq *srq, u16 lwm)
270{
271	return mlx5_core_arm_xsrq(dev, srq->srqn, lwm);
272}
273
274static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
275			  struct mlx5_create_srq_mbox_in *in, int inlen)
276{
277	struct mlx5_create_srq_mbox_out out;
278	int err;
279
280	memset(&out, 0, sizeof(out));
281
282	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
283
284	err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out), sizeof(out));
285
286	srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
287
288	return err;
289}
290
291static int destroy_srq_cmd(struct mlx5_core_dev *dev,
292			   struct mlx5_core_srq *srq)
293{
294	struct mlx5_destroy_srq_mbox_in in;
295	struct mlx5_destroy_srq_mbox_out out;
296
297	memset(&in, 0, sizeof(in));
298	memset(&out, 0, sizeof(out));
299	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
300	in.srqn = cpu_to_be32(srq->srqn);
301
302	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
303}
304
305static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
306			 struct mlx5_query_srq_mbox_out *out)
307{
308	struct mlx5_query_srq_mbox_in in;
309
310	memset(&in, 0, sizeof(in));
311
312	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
313	in.srqn = cpu_to_be32(srq->srqn);
314
315	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)out, sizeof(*out));
316}
317
318static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
319		       u16 lwm, int is_srq)
320{
321	struct mlx5_arm_srq_mbox_in	in;
322	struct mlx5_arm_srq_mbox_out	out;
323
324	memset(&in, 0, sizeof(in));
325	memset(&out, 0, sizeof(out));
326
327	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
328	in.hdr.opmod = cpu_to_be16(!!is_srq);
329	in.srqn = cpu_to_be32(srq->srqn);
330	in.lwm = cpu_to_be16(lwm);
331
332	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in), (u32 *)(&out), sizeof(out));
333}
334
335static int create_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
336			    struct mlx5_create_srq_mbox_in *in, int inlen,
337			    int is_xrc)
338{
339	if (!dev->issi)
340		return create_srq_cmd(dev, srq, in, inlen);
341	else if (srq->common.res == MLX5_RES_XSRQ)
342		return create_xrc_srq_cmd(dev, srq, in, inlen);
343	else
344		return create_rmp_cmd(dev, srq, in, inlen);
345}
346
347static int destroy_srq_split(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
348{
349	if (!dev->issi)
350		return destroy_srq_cmd(dev, srq);
351	else if (srq->common.res == MLX5_RES_XSRQ)
352		return destroy_xrc_srq_cmd(dev, srq);
353	else
354		return destroy_rmp_cmd(dev, srq);
355}
356
357int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
358			 struct mlx5_create_srq_mbox_in *in, int inlen,
359			 int is_xrc)
360{
361	int err;
362	struct mlx5_srq_table *table = &dev->priv.srq_table;
363
364	srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
365
366	err = create_srq_split(dev, srq, in, inlen, is_xrc);
367	if (err)
368		return err;
369
370	atomic_set(&srq->refcount, 1);
371	init_completion(&srq->free);
372
373	spin_lock_irq(&table->lock);
374	err = radix_tree_insert(&table->tree, srq->srqn, srq);
375	spin_unlock_irq(&table->lock);
376	if (err) {
377		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
378		goto err_destroy_srq_split;
379	}
380
381	return 0;
382
383err_destroy_srq_split:
384	destroy_srq_split(dev, srq);
385
386	return err;
387}
388EXPORT_SYMBOL(mlx5_core_create_srq);
389
390int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
391{
392	struct mlx5_srq_table *table = &dev->priv.srq_table;
393	struct mlx5_core_srq *tmp;
394	int err;
395
396	spin_lock_irq(&table->lock);
397	tmp = radix_tree_delete(&table->tree, srq->srqn);
398	spin_unlock_irq(&table->lock);
399	if (!tmp) {
400		mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
401		return -EINVAL;
402	}
403	if (tmp != srq) {
404		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
405		return -EINVAL;
406	}
407
408	err = destroy_srq_split(dev, srq);
409	if (err)
410		return err;
411
412	if (atomic_dec_and_test(&srq->refcount))
413		complete(&srq->free);
414	wait_for_completion(&srq->free);
415
416	return 0;
417}
418EXPORT_SYMBOL(mlx5_core_destroy_srq);
419
420int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
421			struct mlx5_query_srq_mbox_out *out)
422{
423	if (!dev->issi)
424		return query_srq_cmd(dev, srq, out);
425	else if (srq->common.res == MLX5_RES_XSRQ)
426		return query_xrc_srq_cmd(dev, srq, out);
427	else
428		return query_rmp_cmd(dev, srq, out);
429}
430EXPORT_SYMBOL(mlx5_core_query_srq);
431
432int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
433		      u16 lwm, int is_srq)
434{
435	if (!dev->issi)
436		return arm_srq_cmd(dev, srq, lwm, is_srq);
437	else if (srq->common.res == MLX5_RES_XSRQ)
438		return arm_xrc_srq_cmd(dev, srq, lwm);
439	else
440		return arm_rmp_cmd(dev, srq, lwm);
441}
442EXPORT_SYMBOL(mlx5_core_arm_srq);
443
444void mlx5_init_srq_table(struct mlx5_core_dev *dev)
445{
446	struct mlx5_srq_table *table = &dev->priv.srq_table;
447
448	spin_lock_init(&table->lock);
449	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
450}
451
452void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
453{
454	/* nothing */
455}
456