1/*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_qp.c 368220 2020-12-01 12:45:47Z hselasky $
26 */
27
28
29#include <linux/gfp.h>
30#include <dev/mlx5/qp.h>
31#include <dev/mlx5/driver.h>
32
33#include "mlx5_core.h"
34
35#include "transobj.h"
36
37static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
38						 u32 rsn)
39{
40	struct mlx5_qp_table *table = &dev->priv.qp_table;
41	struct mlx5_core_rsc_common *common;
42
43	spin_lock(&table->lock);
44
45	common = radix_tree_lookup(&table->tree, rsn);
46	if (common)
47		atomic_inc(&common->refcount);
48
49	spin_unlock(&table->lock);
50
51	if (!common) {
52		mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
53			       rsn);
54		return NULL;
55	}
56	return common;
57}
58
59void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
60{
61	if (atomic_dec_and_test(&common->refcount))
62		complete(&common->free);
63}
64
65void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
66{
67	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
68	struct mlx5_core_qp *qp;
69
70	if (!common)
71		return;
72
73	switch (common->res) {
74	case MLX5_RES_QP:
75		qp = (struct mlx5_core_qp *)common;
76		qp->event(qp, event_type);
77		break;
78
79	default:
80		mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
81	}
82
83	mlx5_core_put_rsc(common);
84}
85
86static int create_qprqsq_common(struct mlx5_core_dev *dev,
87				struct mlx5_core_qp *qp, int rsc_type)
88{
89	struct mlx5_qp_table *table = &dev->priv.qp_table;
90	int err;
91
92	qp->common.res = rsc_type;
93
94	spin_lock_irq(&table->lock);
95	err = radix_tree_insert(&table->tree, qp->qpn | (rsc_type << 24), qp);
96	spin_unlock_irq(&table->lock);
97	if (err)
98		return err;
99
100	atomic_set(&qp->common.refcount, 1);
101	init_completion(&qp->common.free);
102	qp->pid = curthread->td_proc->p_pid;
103
104	return 0;
105}
106
107static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
108				  struct mlx5_core_qp *qp, int rsc_type)
109{
110	struct mlx5_qp_table *table = &dev->priv.qp_table;
111	unsigned long flags;
112
113	spin_lock_irqsave(&table->lock, flags);
114	radix_tree_delete(&table->tree, qp->qpn | (rsc_type << 24));
115	spin_unlock_irqrestore(&table->lock, flags);
116
117	mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
118	wait_for_completion(&qp->common.free);
119}
120
121int mlx5_core_create_qp(struct mlx5_core_dev *dev,
122			struct mlx5_core_qp *qp,
123			u32 *in, int inlen)
124{
125	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
126	u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
127	u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
128	int err;
129
130	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
131
132	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
133	if (err)
134		return err;
135
136	qp->qpn = MLX5_GET(create_qp_out, out, qpn);
137	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
138
139	err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
140	if (err)
141		goto err_cmd;
142
143	atomic_inc(&dev->num_qps);
144
145	return 0;
146
147err_cmd:
148	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
149	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
150	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
151	return err;
152}
153EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
154
155int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
156			 struct mlx5_core_qp *qp)
157{
158	u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
159	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
160	int err;
161
162
163	destroy_qprqsq_common(dev, qp, MLX5_RES_QP);
164
165	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
166	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
167	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
168	if (err)
169		return err;
170
171	atomic_dec(&dev->num_qps);
172	return 0;
173}
174EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
175
176struct mbox_info {
177	u32 *in;
178	u32 *out;
179	int inlen;
180	int outlen;
181};
182
183static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
184{
185	mbox->inlen  = inlen;
186	mbox->outlen = outlen;
187	mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
188	mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
189	if (!mbox->in || !mbox->out) {
190		kfree(mbox->in);
191		kfree(mbox->out);
192		return -ENOMEM;
193	}
194
195	return 0;
196}
197
198static void mbox_free(struct mbox_info *mbox)
199{
200	kfree(mbox->in);
201	kfree(mbox->out);
202}
203
204static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
205				u32 opt_param_mask, void *qpc,
206				struct mbox_info *mbox)
207{
208	mbox->out = NULL;
209	mbox->in = NULL;
210
211	#define MBOX_ALLOC(mbox, typ)  \
212		mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
213
214	#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
215		MLX5_SET(typ##_in, in, opcode, _opcode); \
216		MLX5_SET(typ##_in, in, qpn, _qpn)
217	#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
218		MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
219		MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
220		memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
221
222	switch (opcode) {
223	/* 2RST & 2ERR */
224	case MLX5_CMD_OP_2RST_QP:
225		if (MBOX_ALLOC(mbox, qp_2rst))
226			return -ENOMEM;
227		MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
228		break;
229	case MLX5_CMD_OP_2ERR_QP:
230		if (MBOX_ALLOC(mbox, qp_2err))
231			return -ENOMEM;
232		MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
233		break;
234
235	/* MODIFY with QPC */
236	case MLX5_CMD_OP_RST2INIT_QP:
237		if (MBOX_ALLOC(mbox, rst2init_qp))
238			return -ENOMEM;
239		MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
240				  opt_param_mask, qpc);
241		break;
242	case MLX5_CMD_OP_INIT2RTR_QP:
243		if (MBOX_ALLOC(mbox, init2rtr_qp))
244			return -ENOMEM;
245		MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
246				  opt_param_mask, qpc);
247		break;
248	case MLX5_CMD_OP_RTR2RTS_QP:
249		if (MBOX_ALLOC(mbox, rtr2rts_qp))
250			return -ENOMEM;
251		MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
252				  opt_param_mask, qpc);
253		break;
254	case MLX5_CMD_OP_RTS2RTS_QP:
255		if (MBOX_ALLOC(mbox, rts2rts_qp))
256			return -ENOMEM;
257		MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
258				  opt_param_mask, qpc);
259		break;
260	case MLX5_CMD_OP_SQERR2RTS_QP:
261		if (MBOX_ALLOC(mbox, sqerr2rts_qp))
262			return -ENOMEM;
263		MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
264				  opt_param_mask, qpc);
265		break;
266	case MLX5_CMD_OP_INIT2INIT_QP:
267		if (MBOX_ALLOC(mbox, init2init_qp))
268			return -ENOMEM;
269		MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
270				  opt_param_mask, qpc);
271		break;
272	default:
273		mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
274			opcode, qpn);
275		return -EINVAL;
276	}
277
278	return 0;
279}
280
281
282int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
283			u32 opt_param_mask, void *qpc,
284			struct mlx5_core_qp *qp)
285{
286	struct mbox_info mbox;
287	int err;
288
289	err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
290				   opt_param_mask, qpc, &mbox);
291	if (err)
292		return err;
293
294	err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
295	mbox_free(&mbox);
296	return err;
297}
298EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
299
300void mlx5_init_qp_table(struct mlx5_core_dev *dev)
301{
302	struct mlx5_qp_table *table = &dev->priv.qp_table;
303
304	memset(table, 0, sizeof(*table));
305	spin_lock_init(&table->lock);
306	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
307}
308
309void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
310{
311}
312
313int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
314		       u32 *out, int outlen)
315{
316	u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
317
318	MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
319	MLX5_SET(query_qp_in, in, qpn, qp->qpn);
320
321	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
322}
323EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
324
325int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
326{
327	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0};
328	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
329	int err;
330
331	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
332	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
333	if (!err)
334		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
335	return err;
336}
337EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
338
339int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
340{
341	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
342	u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
343
344	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
345	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
346	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
347}
348EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
349
350int mlx5_core_create_dct(struct mlx5_core_dev *dev,
351			 struct mlx5_core_dct *dct,
352			 u32 *in, int inlen,
353			 u32 *out, int outlen)
354{
355	struct mlx5_qp_table *table = &dev->priv.qp_table;
356	u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
357	u32 din[MLX5_ST_SZ_DW(destroy_dct_in)]	 = {0};
358	int err;
359
360	init_completion(&dct->drained);
361	MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
362
363	err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
364	if (err) {
365		mlx5_core_warn(dev, "create DCT failed, ret %d", err);
366		return err;
367	}
368
369	dct->dctn = MLX5_GET(create_dct_out, out, dctn);
370
371	dct->common.res = MLX5_RES_DCT;
372	spin_lock_irq(&table->lock);
373	err = radix_tree_insert(&table->tree, dct->dctn, dct);
374	spin_unlock_irq(&table->lock);
375	if (err) {
376		mlx5_core_warn(dev, "err %d", err);
377		goto err_cmd;
378	}
379
380	dct->pid = curthread->td_proc->p_pid;
381	atomic_set(&dct->common.refcount, 1);
382	init_completion(&dct->common.free);
383
384	return 0;
385
386err_cmd:
387	MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
388	MLX5_SET(destroy_dct_in, din, dctn, dct->dctn);
389	mlx5_cmd_exec(dev, &din, sizeof(din), dout, sizeof(dout));
390
391	return err;
392}
393EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
394
395static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
396			       struct mlx5_core_dct *dct)
397{
398	u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0};
399	u32 in[MLX5_ST_SZ_DW(drain_dct_in)]   = {0};
400
401	MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
402	MLX5_SET(drain_dct_in, in, dctn, dct->dctn);
403	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
404			     (void *)&out, sizeof(out));
405}
406
407int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
408			  struct mlx5_core_dct *dct)
409{
410	struct mlx5_qp_table *table = &dev->priv.qp_table;
411	u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
412	u32 in[MLX5_ST_SZ_DW(destroy_dct_in)]	= {0};
413	unsigned long flags;
414	int err;
415
416	err = mlx5_core_drain_dct(dev, dct);
417	if (err) {
418		if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
419			goto free_dct;
420		} else {
421			mlx5_core_warn(dev, "failed drain DCT 0x%x\n", dct->dctn);
422			return err;
423		}
424	}
425
426	wait_for_completion(&dct->drained);
427
428free_dct:
429	spin_lock_irqsave(&table->lock, flags);
430	if (radix_tree_delete(&table->tree, dct->dctn) != dct)
431		mlx5_core_warn(dev, "dct delete differs\n");
432	spin_unlock_irqrestore(&table->lock, flags);
433
434	if (atomic_dec_and_test(&dct->common.refcount))
435		complete(&dct->common.free);
436	wait_for_completion(&dct->common.free);
437
438	MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
439	MLX5_SET(destroy_dct_in, in, dctn, dct->dctn);
440
441	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
442			     (void *)&out, sizeof(out));
443}
444EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
445
446int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
447			u32 *out, int outlen)
448{
449	u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0};
450
451	MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
452	MLX5_SET(query_dct_in, in, dctn, dct->dctn);
453
454	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
455			     (void *)out, outlen);
456}
457EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
458
459int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct)
460{
461	u32 out[MLX5_ST_SZ_DW(arm_dct_out)] = {0};
462	u32 in[MLX5_ST_SZ_DW(arm_dct_in)]   = {0};
463
464	MLX5_SET(arm_dct_in, in, opcode, MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION);
465	MLX5_SET(arm_dct_in, in, dctn, dct->dctn);
466
467	return mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
468			     (void *)&out, sizeof(out));
469}
470EXPORT_SYMBOL_GPL(mlx5_core_arm_dct);
471
472int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
473				struct mlx5_core_qp *rq)
474{
475	int err;
476
477	err = mlx5_core_create_rq(dev, in, inlen, &rq->qpn);
478	if (err)
479		return err;
480
481	err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
482	if (err)
483		mlx5_core_destroy_rq(dev, rq->qpn);
484
485	return err;
486}
487EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
488
489void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
490				  struct mlx5_core_qp *rq)
491{
492	destroy_qprqsq_common(dev, rq, MLX5_RES_RQ);
493	mlx5_core_destroy_rq(dev, rq->qpn);
494}
495EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
496
497int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
498				struct mlx5_core_qp *sq)
499{
500	int err;
501
502	err = mlx5_core_create_sq(dev, in, inlen, &sq->qpn);
503	if (err)
504		return err;
505
506	err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
507	if (err)
508		mlx5_core_destroy_sq(dev, sq->qpn);
509
510	return err;
511}
512EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
513
514void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
515				  struct mlx5_core_qp *sq)
516{
517	destroy_qprqsq_common(dev, sq, MLX5_RES_SQ);
518	mlx5_core_destroy_sq(dev, sq->qpn);
519}
520EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
521