1219820Sjeff/*
2219820Sjeff * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3219820Sjeff * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4272027Shselasky * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
5219820Sjeff * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6219820Sjeff *
7219820Sjeff * This software is available to you under a choice of one of two
8219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
9219820Sjeff * General Public License (GPL) Version 2, available from the file
10219820Sjeff * COPYING in the main directory of this source tree, or the
11219820Sjeff * OpenIB.org BSD license below:
12219820Sjeff *
13219820Sjeff *     Redistribution and use in source and binary forms, with or
14219820Sjeff *     without modification, are permitted provided that the following
15219820Sjeff *     conditions are met:
16219820Sjeff *
17219820Sjeff *      - Redistributions of source code must retain the above
18219820Sjeff *        copyright notice, this list of conditions and the following
19219820Sjeff *        disclaimer.
20219820Sjeff *
21219820Sjeff *      - Redistributions in binary form must reproduce the above
22219820Sjeff *        copyright notice, this list of conditions and the following
23219820Sjeff *        disclaimer in the documentation and/or other materials
24219820Sjeff *        provided with the distribution.
25219820Sjeff *
26219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33219820Sjeff * SOFTWARE.
34219820Sjeff */
35219820Sjeff
36272027Shselasky#include <linux/types.h>
37272027Shselasky#include <linux/gfp.h>
38272027Shselasky#include <linux/module.h>
39272027Shselasky
40306486Shselasky#include <dev/mlx4/cmd.h>
41306486Shselasky#include <dev/mlx4/qp.h>
42219820Sjeff
43219820Sjeff#include "mlx4.h"
44219820Sjeff#include "icm.h"
45219820Sjeff
46329159Shselasky/* QP to support BF should have bits 6,7 cleared */
47255932Salfred#define MLX4_BF_QP_SKIP_MASK	0xc0
48255932Salfred#define MLX4_MAX_BF_QP_RANGE	0x40
49255932Salfred
50219820Sjeffvoid mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
51219820Sjeff{
52219820Sjeff	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
53219820Sjeff	struct mlx4_qp *qp;
54219820Sjeff
55219820Sjeff	spin_lock(&qp_table->lock);
56219820Sjeff
57219820Sjeff	qp = __mlx4_qp_lookup(dev, qpn);
58219820Sjeff	if (qp)
59219820Sjeff		atomic_inc(&qp->refcount);
60219820Sjeff
61219820Sjeff	spin_unlock(&qp_table->lock);
62219820Sjeff
63219820Sjeff	if (!qp) {
64255932Salfred		mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
65219820Sjeff		return;
66219820Sjeff	}
67219820Sjeff
68219820Sjeff	qp->event(qp, event_type);
69219820Sjeff
70219820Sjeff	if (atomic_dec_and_test(&qp->refcount))
71219820Sjeff		complete(&qp->free);
72219820Sjeff}
73219820Sjeff
74255932Salfred/* used for INIT/CLOSE port logic */
75255932Salfredstatic int is_master_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp, int *real_qp0, int *proxy_qp0)
76219820Sjeff{
77255932Salfred	/* this procedure is called after we already know we are on the master */
78255932Salfred	/* qp0 is either the proxy qp0, or the real qp0 */
79255932Salfred	u32 pf_proxy_offset = dev->phys_caps.base_proxy_sqpn + 8 * mlx4_master_func_num(dev);
80255932Salfred	*proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1;
81255932Salfred
82255932Salfred	*real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn &&
83255932Salfred		qp->qpn <= dev->phys_caps.base_sqpn + 1;
84255932Salfred
85255932Salfred	return *real_qp0 || *proxy_qp0;
86255932Salfred}
87255932Salfred
88255932Salfredstatic int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
89255932Salfred		     enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
90255932Salfred		     struct mlx4_qp_context *context,
91255932Salfred		     enum mlx4_qp_optpar optpar,
92255932Salfred		     int sqd_event, struct mlx4_qp *qp, int native)
93255932Salfred{
94219820Sjeff	static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
95219820Sjeff		[MLX4_QP_STATE_RST] = {
96219820Sjeff			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
97219820Sjeff			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
98219820Sjeff			[MLX4_QP_STATE_INIT]	= MLX4_CMD_RST2INIT_QP,
99219820Sjeff		},
100219820Sjeff		[MLX4_QP_STATE_INIT]  = {
101219820Sjeff			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
102219820Sjeff			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
103219820Sjeff			[MLX4_QP_STATE_INIT]	= MLX4_CMD_INIT2INIT_QP,
104219820Sjeff			[MLX4_QP_STATE_RTR]	= MLX4_CMD_INIT2RTR_QP,
105219820Sjeff		},
106219820Sjeff		[MLX4_QP_STATE_RTR]   = {
107219820Sjeff			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
108219820Sjeff			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
109219820Sjeff			[MLX4_QP_STATE_RTS]	= MLX4_CMD_RTR2RTS_QP,
110219820Sjeff		},
111219820Sjeff		[MLX4_QP_STATE_RTS]   = {
112219820Sjeff			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
113219820Sjeff			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
114219820Sjeff			[MLX4_QP_STATE_RTS]	= MLX4_CMD_RTS2RTS_QP,
115219820Sjeff			[MLX4_QP_STATE_SQD]	= MLX4_CMD_RTS2SQD_QP,
116219820Sjeff		},
117219820Sjeff		[MLX4_QP_STATE_SQD] = {
118219820Sjeff			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
119219820Sjeff			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
120219820Sjeff			[MLX4_QP_STATE_RTS]	= MLX4_CMD_SQD2RTS_QP,
121219820Sjeff			[MLX4_QP_STATE_SQD]	= MLX4_CMD_SQD2SQD_QP,
122219820Sjeff		},
123219820Sjeff		[MLX4_QP_STATE_SQER] = {
124219820Sjeff			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
125219820Sjeff			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
126219820Sjeff			[MLX4_QP_STATE_RTS]	= MLX4_CMD_SQERR2RTS_QP,
127219820Sjeff		},
128219820Sjeff		[MLX4_QP_STATE_ERR] = {
129219820Sjeff			[MLX4_QP_STATE_RST]	= MLX4_CMD_2RST_QP,
130219820Sjeff			[MLX4_QP_STATE_ERR]	= MLX4_CMD_2ERR_QP,
131219820Sjeff		}
132219820Sjeff	};
133219820Sjeff
134255932Salfred	struct mlx4_priv *priv = mlx4_priv(dev);
135219820Sjeff	struct mlx4_cmd_mailbox *mailbox;
136219820Sjeff	int ret = 0;
137255932Salfred	int real_qp0 = 0;
138255932Salfred	int proxy_qp0 = 0;
139255932Salfred	u8 port;
140219820Sjeff
141219820Sjeff	if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
142219820Sjeff	    !op[cur_state][new_state])
143219820Sjeff		return -EINVAL;
144219820Sjeff
145255932Salfred	if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
146255932Salfred		ret = mlx4_cmd(dev, 0, qp->qpn, 2,
147255932Salfred			MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
148255932Salfred		if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
149255932Salfred		    cur_state != MLX4_QP_STATE_RST &&
150255932Salfred		    is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
151255932Salfred			port = (qp->qpn & 1) + 1;
152255932Salfred			if (proxy_qp0)
153255932Salfred				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
154255932Salfred			else
155255932Salfred				priv->mfunc.master.qp0_state[port].qp0_active = 0;
156255932Salfred		}
157255932Salfred		return ret;
158255932Salfred	}
159219820Sjeff
160219820Sjeff	mailbox = mlx4_alloc_cmd_mailbox(dev);
161219820Sjeff	if (IS_ERR(mailbox))
162219820Sjeff		return PTR_ERR(mailbox);
163219820Sjeff
164219820Sjeff	if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
165219820Sjeff		u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
166219820Sjeff		context->mtt_base_addr_h = mtt_addr >> 32;
167219820Sjeff		context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
168219820Sjeff		context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
169219820Sjeff	}
170219820Sjeff
171329159Shselasky	if ((cur_state == MLX4_QP_STATE_RTR) &&
172329159Shselasky	    (new_state == MLX4_QP_STATE_RTS) &&
173329159Shselasky	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2)
174329159Shselasky		context->roce_entropy =
175329159Shselasky			cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn));
176329159Shselasky
177219820Sjeff	*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
178219820Sjeff	memcpy(mailbox->buf + 8, context, sizeof *context);
179219820Sjeff
180219820Sjeff	((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
181219820Sjeff		cpu_to_be32(qp->qpn);
182219820Sjeff
183255932Salfred	ret = mlx4_cmd(dev, mailbox->dma,
184255932Salfred		       qp->qpn | (!!sqd_event << 31),
185219820Sjeff		       new_state == MLX4_QP_STATE_RST ? 2 : 0,
186255932Salfred		       op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
187219820Sjeff
188255932Salfred	if (mlx4_is_master(dev) && is_master_qp0(dev, qp, &real_qp0, &proxy_qp0)) {
189255932Salfred		port = (qp->qpn & 1) + 1;
190255932Salfred		if (cur_state != MLX4_QP_STATE_ERR &&
191255932Salfred		    cur_state != MLX4_QP_STATE_RST &&
192255932Salfred		    new_state == MLX4_QP_STATE_ERR) {
193255932Salfred			if (proxy_qp0)
194255932Salfred				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 0;
195255932Salfred			else
196255932Salfred				priv->mfunc.master.qp0_state[port].qp0_active = 0;
197255932Salfred		} else if (new_state == MLX4_QP_STATE_RTR) {
198255932Salfred			if (proxy_qp0)
199255932Salfred				priv->mfunc.master.qp0_state[port].proxy_qp0_active = 1;
200255932Salfred			else
201255932Salfred				priv->mfunc.master.qp0_state[port].qp0_active = 1;
202255932Salfred		}
203255932Salfred	}
204255932Salfred
205219820Sjeff	mlx4_free_cmd_mailbox(dev, mailbox);
206219820Sjeff	return ret;
207219820Sjeff}
208255932Salfred
209255932Salfredint mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
210255932Salfred		   enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
211255932Salfred		   struct mlx4_qp_context *context,
212255932Salfred		   enum mlx4_qp_optpar optpar,
213255932Salfred		   int sqd_event, struct mlx4_qp *qp)
214255932Salfred{
215255932Salfred	return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
216255932Salfred				optpar, sqd_event, qp, 0);
217255932Salfred}
218219820SjeffEXPORT_SYMBOL_GPL(mlx4_qp_modify);
219219820Sjeff
220255932Salfredint __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
221272027Shselasky			    int *base, u8 flags)
222219820Sjeff{
223329159Shselasky	u32 uid;
224329159Shselasky	int bf_qp = !!(flags & (u8)MLX4_RESERVE_ETH_BF_QP);
225272027Shselasky
226219820Sjeff	struct mlx4_priv *priv = mlx4_priv(dev);
227219820Sjeff	struct mlx4_qp_table *qp_table = &priv->qp_table;
228219820Sjeff
229255932Salfred	if (cnt > MLX4_MAX_BF_QP_RANGE && bf_qp)
230329159Shselasky		return -ENOMEM;
231219820Sjeff
232329159Shselasky	uid = MLX4_QP_TABLE_ZONE_GENERAL;
233329159Shselasky	if (flags & (u8)MLX4_RESERVE_A0_QP) {
234329159Shselasky		if (bf_qp)
235329159Shselasky			uid = MLX4_QP_TABLE_ZONE_RAW_ETH;
236329159Shselasky		else
237329159Shselasky			uid = MLX4_QP_TABLE_ZONE_RSS;
238329159Shselasky	}
239329159Shselasky
240329159Shselasky	*base = mlx4_zone_alloc_entries(qp_table->zones, uid, cnt, align,
241329159Shselasky					bf_qp ? MLX4_BF_QP_SKIP_MASK : 0, NULL);
242255932Salfred	if (*base == -1)
243255932Salfred		return -ENOMEM;
244255932Salfred
245219820Sjeff	return 0;
246219820Sjeff}
247255932Salfred
248255932Salfredint mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
249272027Shselasky			  int *base, u8 flags)
250255932Salfred{
251255932Salfred	u64 in_param = 0;
252255932Salfred	u64 out_param;
253255932Salfred	int err;
254255932Salfred
255329159Shselasky	/* Turn off all unsupported QP allocation flags */
256329159Shselasky	flags &= dev->caps.alloc_res_qp_mask;
257329159Shselasky
258255932Salfred	if (mlx4_is_mfunc(dev)) {
259329159Shselasky		set_param_l(&in_param, (((u32)flags) << 24) | (u32)cnt);
260255932Salfred		set_param_h(&in_param, align);
261255932Salfred		err = mlx4_cmd_imm(dev, in_param, &out_param,
262255932Salfred				   RES_QP, RES_OP_RESERVE,
263255932Salfred				   MLX4_CMD_ALLOC_RES,
264255932Salfred				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
265255932Salfred		if (err)
266255932Salfred			return err;
267255932Salfred
268255932Salfred		*base = get_param_l(&out_param);
269255932Salfred		return 0;
270255932Salfred	}
271272027Shselasky	return __mlx4_qp_reserve_range(dev, cnt, align, base, flags);
272255932Salfred}
273219820SjeffEXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
274219820Sjeff
275255932Salfredvoid __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
276219820Sjeff{
277219820Sjeff	struct mlx4_priv *priv = mlx4_priv(dev);
278219820Sjeff	struct mlx4_qp_table *qp_table = &priv->qp_table;
279255932Salfred
280255932Salfred	if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
281219820Sjeff		return;
282329159Shselasky	mlx4_zone_free_entries_unique(qp_table->zones, base_qpn, cnt);
283219820Sjeff}
284255932Salfred
285255932Salfredvoid mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
286255932Salfred{
287255932Salfred	u64 in_param = 0;
288255932Salfred	int err;
289255932Salfred
290255932Salfred	if (mlx4_is_mfunc(dev)) {
291255932Salfred		set_param_l(&in_param, base_qpn);
292255932Salfred		set_param_h(&in_param, cnt);
293255932Salfred		err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
294255932Salfred			       MLX4_CMD_FREE_RES,
295255932Salfred			       MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
296255932Salfred		if (err) {
297329159Shselasky			mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
298329159Shselasky				  base_qpn, cnt);
299255932Salfred		}
300255932Salfred	} else
301255932Salfred		 __mlx4_qp_release_range(dev, base_qpn, cnt);
302255932Salfred}
303219820SjeffEXPORT_SYMBOL_GPL(mlx4_qp_release_range);
304219820Sjeff
305329159Shselaskyint __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
306219820Sjeff{
307219820Sjeff	struct mlx4_priv *priv = mlx4_priv(dev);
308219820Sjeff	struct mlx4_qp_table *qp_table = &priv->qp_table;
309219820Sjeff	int err;
310219820Sjeff
311329159Shselasky	err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
312219820Sjeff	if (err)
313219820Sjeff		goto err_out;
314219820Sjeff
315329159Shselasky	err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
316219820Sjeff	if (err)
317219820Sjeff		goto err_put_qp;
318219820Sjeff
319329159Shselasky	err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
320219820Sjeff	if (err)
321219820Sjeff		goto err_put_auxc;
322219820Sjeff
323329159Shselasky	err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
324219820Sjeff	if (err)
325219820Sjeff		goto err_put_altc;
326219820Sjeff
327329159Shselasky	err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
328219820Sjeff	if (err)
329219820Sjeff		goto err_put_rdmarc;
330219820Sjeff
331219820Sjeff	return 0;
332219820Sjeff
333219820Sjefferr_put_rdmarc:
334255932Salfred	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
335219820Sjeff
336219820Sjefferr_put_altc:
337255932Salfred	mlx4_table_put(dev, &qp_table->altc_table, qpn);
338219820Sjeff
339219820Sjefferr_put_auxc:
340255932Salfred	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
341219820Sjeff
342219820Sjefferr_put_qp:
343255932Salfred	mlx4_table_put(dev, &qp_table->qp_table, qpn);
344219820Sjeff
345219820Sjefferr_out:
346219820Sjeff	return err;
347219820Sjeff}
348219820Sjeff
349329159Shselaskystatic int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
350219820Sjeff{
351255932Salfred	u64 param = 0;
352219820Sjeff
353255932Salfred	if (mlx4_is_mfunc(dev)) {
354255932Salfred		set_param_l(&param, qpn);
355255932Salfred		return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
356255932Salfred				    MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
357255932Salfred				    MLX4_CMD_WRAPPED);
358255932Salfred	}
359329159Shselasky	return __mlx4_qp_alloc_icm(dev, qpn, gfp);
360219820Sjeff}
361219820Sjeff
362255932Salfredvoid __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
363255932Salfred{
364255932Salfred	struct mlx4_priv *priv = mlx4_priv(dev);
365255932Salfred	struct mlx4_qp_table *qp_table = &priv->qp_table;
366255932Salfred
367255932Salfred	mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
368255932Salfred	mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
369255932Salfred	mlx4_table_put(dev, &qp_table->altc_table, qpn);
370255932Salfred	mlx4_table_put(dev, &qp_table->auxc_table, qpn);
371255932Salfred	mlx4_table_put(dev, &qp_table->qp_table, qpn);
372255932Salfred}
373255932Salfred
374255932Salfredstatic void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
375255932Salfred{
376255932Salfred	u64 in_param = 0;
377255932Salfred
378255932Salfred	if (mlx4_is_mfunc(dev)) {
379255932Salfred		set_param_l(&in_param, qpn);
380255932Salfred		if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
381255932Salfred			     MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
382255932Salfred			     MLX4_CMD_WRAPPED))
383255932Salfred			mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
384255932Salfred	} else
385255932Salfred		__mlx4_qp_free_icm(dev, qpn);
386255932Salfred}
387255932Salfred
388329159Shselaskyint mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
389255932Salfred{
390255932Salfred	struct mlx4_priv *priv = mlx4_priv(dev);
391255932Salfred	struct mlx4_qp_table *qp_table = &priv->qp_table;
392255932Salfred	int err;
393255932Salfred
394255932Salfred	if (!qpn)
395255932Salfred		return -EINVAL;
396255932Salfred
397255932Salfred	qp->qpn = qpn;
398255932Salfred
399329159Shselasky	err = mlx4_qp_alloc_icm(dev, qpn, gfp);
400255932Salfred	if (err)
401255932Salfred		return err;
402255932Salfred
403255932Salfred	spin_lock_irq(&qp_table->lock);
404255932Salfred	err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
405255932Salfred				(dev->caps.num_qps - 1), qp);
406255932Salfred	spin_unlock_irq(&qp_table->lock);
407255932Salfred	if (err)
408255932Salfred		goto err_icm;
409255932Salfred
410255932Salfred	atomic_set(&qp->refcount, 1);
411255932Salfred	init_completion(&qp->free);
412255932Salfred
413255932Salfred	return 0;
414255932Salfred
415255932Salfrederr_icm:
416255932Salfred	mlx4_qp_free_icm(dev, qpn);
417255932Salfred	return err;
418255932Salfred}
419255932Salfred
420255932SalfredEXPORT_SYMBOL_GPL(mlx4_qp_alloc);
421255932Salfred
422329159Shselaskyint mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
423329159Shselasky		   enum mlx4_update_qp_attr attr,
424329159Shselasky		   struct mlx4_update_qp_params *params)
425329159Shselasky{
426329159Shselasky	struct mlx4_cmd_mailbox *mailbox;
427329159Shselasky	struct mlx4_update_qp_context *cmd;
428329159Shselasky	u64 pri_addr_path_mask = 0;
429329159Shselasky	u64 qp_mask = 0;
430329159Shselasky	int err = 0;
431329159Shselasky
432329159Shselasky	if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS))
433329159Shselasky		return -EINVAL;
434329159Shselasky
435329159Shselasky	mailbox = mlx4_alloc_cmd_mailbox(dev);
436329159Shselasky	if (IS_ERR(mailbox))
437329159Shselasky		return PTR_ERR(mailbox);
438329159Shselasky
439329159Shselasky	cmd = (struct mlx4_update_qp_context *)mailbox->buf;
440329159Shselasky
441329159Shselasky	if (attr & MLX4_UPDATE_QP_SMAC) {
442329159Shselasky		pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX;
443329159Shselasky		cmd->qp_context.pri_path.grh_mylmc = params->smac_index;
444329159Shselasky	}
445329159Shselasky
446329159Shselasky	if (attr & MLX4_UPDATE_QP_ETH_SRC_CHECK_MC_LB) {
447329159Shselasky		if (!(dev->caps.flags2
448329159Shselasky		      & MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
449329159Shselasky			mlx4_warn(dev,
450329159Shselasky				  "Trying to set src check LB, but it isn't supported\n");
451329159Shselasky			err = -ENOTSUPP;
452329159Shselasky			goto out;
453329159Shselasky		}
454329159Shselasky		pri_addr_path_mask |=
455329159Shselasky			1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB;
456329159Shselasky		if (params->flags &
457329159Shselasky		    MLX4_UPDATE_QP_PARAMS_FLAGS_ETH_CHECK_MC_LB) {
458329159Shselasky			cmd->qp_context.pri_path.fl |=
459329159Shselasky				MLX4_FL_ETH_SRC_CHECK_MC_LB;
460329159Shselasky		}
461329159Shselasky	}
462329159Shselasky
463329159Shselasky	if (attr & MLX4_UPDATE_QP_VSD) {
464329159Shselasky		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_VSD;
465329159Shselasky		if (params->flags & MLX4_UPDATE_QP_PARAMS_FLAGS_VSD_ENABLE)
466329159Shselasky			cmd->qp_context.param3 |= cpu_to_be32(MLX4_STRIP_VLAN);
467329159Shselasky	}
468329159Shselasky
469329159Shselasky	if (attr & MLX4_UPDATE_QP_RATE_LIMIT) {
470329159Shselasky		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_RATE_LIMIT;
471329159Shselasky		cmd->qp_context.rate_limit_params = cpu_to_be16((params->rate_unit << 14) | params->rate_val);
472329159Shselasky	}
473329159Shselasky
474329159Shselasky	if (attr & MLX4_UPDATE_QP_QOS_VPORT) {
475329159Shselasky		qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP;
476329159Shselasky		cmd->qp_context.qos_vport = params->qos_vport;
477329159Shselasky	}
478329159Shselasky
479329159Shselasky	cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask);
480329159Shselasky	cmd->qp_mask = cpu_to_be64(qp_mask);
481329159Shselasky
482329159Shselasky	err = mlx4_cmd(dev, mailbox->dma, qpn & 0xffffff, 0,
483329159Shselasky		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
484329159Shselasky		       MLX4_CMD_NATIVE);
485329159Shselaskyout:
486329159Shselasky	mlx4_free_cmd_mailbox(dev, mailbox);
487329159Shselasky	return err;
488329159Shselasky}
489329159ShselaskyEXPORT_SYMBOL_GPL(mlx4_update_qp);
490329159Shselasky
491219820Sjeffvoid mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
492219820Sjeff{
493219820Sjeff	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
494219820Sjeff	unsigned long flags;
495219820Sjeff
496219820Sjeff	spin_lock_irqsave(&qp_table->lock, flags);
497219820Sjeff	radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
498219820Sjeff	spin_unlock_irqrestore(&qp_table->lock, flags);
499219820Sjeff}
500219820SjeffEXPORT_SYMBOL_GPL(mlx4_qp_remove);
501219820Sjeff
502219820Sjeffvoid mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
503219820Sjeff{
504219820Sjeff	if (atomic_dec_and_test(&qp->refcount))
505219820Sjeff		complete(&qp->free);
506219820Sjeff	wait_for_completion(&qp->free);
507219820Sjeff
508255932Salfred	mlx4_qp_free_icm(dev, qp->qpn);
509219820Sjeff}
510219820SjeffEXPORT_SYMBOL_GPL(mlx4_qp_free);
511219820Sjeff
512219820Sjeffstatic int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
513219820Sjeff{
514255932Salfred	return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
515255932Salfred			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
516219820Sjeff}
517219820Sjeff
518329159Shselasky#define MLX4_QP_TABLE_RSS_ETH_PRIORITY 2
519329159Shselasky#define MLX4_QP_TABLE_RAW_ETH_PRIORITY 1
520329159Shselasky#define MLX4_QP_TABLE_RAW_ETH_SIZE     256
521329159Shselasky
522329159Shselaskystatic int mlx4_create_zones(struct mlx4_dev *dev,
523329159Shselasky			     u32 reserved_bottom_general,
524329159Shselasky			     u32 reserved_top_general,
525329159Shselasky			     u32 reserved_bottom_rss,
526329159Shselasky			     u32 start_offset_rss,
527329159Shselasky			     u32 max_table_offset)
528329159Shselasky{
529329159Shselasky	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
530329159Shselasky	struct mlx4_bitmap (*bitmap)[MLX4_QP_TABLE_ZONE_NUM] = NULL;
531329159Shselasky	int bitmap_initialized = 0;
532329159Shselasky	u32 last_offset;
533329159Shselasky	int k;
534329159Shselasky	int err;
535329159Shselasky
536329159Shselasky	qp_table->zones = mlx4_zone_allocator_create(MLX4_ZONE_ALLOC_FLAGS_NO_OVERLAP);
537329159Shselasky
538329159Shselasky	if (NULL == qp_table->zones)
539329159Shselasky		return -ENOMEM;
540329159Shselasky
541329159Shselasky	bitmap = kmalloc(sizeof(*bitmap), GFP_KERNEL);
542329159Shselasky
543329159Shselasky	if (NULL == bitmap) {
544329159Shselasky		err = -ENOMEM;
545329159Shselasky		goto free_zone;
546329159Shselasky	}
547329159Shselasky
548329159Shselasky	err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps,
549329159Shselasky			       (1 << 23) - 1, reserved_bottom_general,
550329159Shselasky			       reserved_top_general);
551329159Shselasky
552329159Shselasky	if (err)
553329159Shselasky		goto free_bitmap;
554329159Shselasky
555329159Shselasky	++bitmap_initialized;
556329159Shselasky
557329159Shselasky	err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_GENERAL,
558329159Shselasky				MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO |
559329159Shselasky				MLX4_ZONE_USE_RR, 0,
560329159Shselasky				0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_GENERAL);
561329159Shselasky
562329159Shselasky	if (err)
563329159Shselasky		goto free_bitmap;
564329159Shselasky
565329159Shselasky	err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_RSS,
566329159Shselasky			       reserved_bottom_rss,
567329159Shselasky			       reserved_bottom_rss - 1,
568329159Shselasky			       dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
569329159Shselasky			       reserved_bottom_rss - start_offset_rss);
570329159Shselasky
571329159Shselasky	if (err)
572329159Shselasky		goto free_bitmap;
573329159Shselasky
574329159Shselasky	++bitmap_initialized;
575329159Shselasky
576329159Shselasky	err = mlx4_zone_add_one(qp_table->zones, *bitmap + MLX4_QP_TABLE_ZONE_RSS,
577329159Shselasky				MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
578329159Shselasky				MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
579329159Shselasky				MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RSS_ETH_PRIORITY,
580329159Shselasky				0, qp_table->zones_uids + MLX4_QP_TABLE_ZONE_RSS);
581329159Shselasky
582329159Shselasky	if (err)
583329159Shselasky		goto free_bitmap;
584329159Shselasky
585329159Shselasky	last_offset = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
586329159Shselasky	/*  We have a single zone for the A0 steering QPs area of the FW. This area
587329159Shselasky	 *  needs to be split into subareas. One set of subareas is for RSS QPs
588329159Shselasky	 *  (in which qp number bits 6 and/or 7 are set); the other set of subareas
589329159Shselasky	 *  is for RAW_ETH QPs, which require that both bits 6 and 7 are zero.
590329159Shselasky	 *  Currently, the values returned by the FW (A0 steering area starting qp number
591329159Shselasky	 *  and A0 steering area size) are such that there are only two subareas -- one
592329159Shselasky	 *  for RSS and one for RAW_ETH.
593329159Shselasky	 */
594329159Shselasky	for (k = MLX4_QP_TABLE_ZONE_RSS + 1; k < sizeof(*bitmap)/sizeof((*bitmap)[0]);
595329159Shselasky	     k++) {
596329159Shselasky		int size;
597329159Shselasky		u32 offset = start_offset_rss;
598329159Shselasky		u32 bf_mask;
599329159Shselasky		u32 requested_size;
600329159Shselasky
601329159Shselasky		/* Assuming MLX4_BF_QP_SKIP_MASK is consecutive ones, this calculates
602329159Shselasky		 * a mask of all LSB bits set until (and not including) the first
603329159Shselasky		 * set bit of  MLX4_BF_QP_SKIP_MASK. For example, if MLX4_BF_QP_SKIP_MASK
604329159Shselasky		 * is 0xc0, bf_mask will be 0x3f.
605329159Shselasky		 */
606329159Shselasky		bf_mask = (MLX4_BF_QP_SKIP_MASK & ~(MLX4_BF_QP_SKIP_MASK - 1)) - 1;
607329159Shselasky		requested_size = min((u32)MLX4_QP_TABLE_RAW_ETH_SIZE, bf_mask + 1);
608329159Shselasky
609329159Shselasky		if (((last_offset & MLX4_BF_QP_SKIP_MASK) &&
610329159Shselasky		     ((int)(max_table_offset - last_offset)) >=
611329159Shselasky		     roundup_pow_of_two(MLX4_BF_QP_SKIP_MASK)) ||
612329159Shselasky		    (!(last_offset & MLX4_BF_QP_SKIP_MASK) &&
613329159Shselasky		     !((last_offset + requested_size - 1) &
614329159Shselasky		       MLX4_BF_QP_SKIP_MASK)))
615329159Shselasky			size = requested_size;
616329159Shselasky		else {
617329159Shselasky			u32 candidate_offset =
618329159Shselasky				(last_offset | MLX4_BF_QP_SKIP_MASK | bf_mask) + 1;
619329159Shselasky
620329159Shselasky			if (last_offset & MLX4_BF_QP_SKIP_MASK)
621329159Shselasky				last_offset = candidate_offset;
622329159Shselasky
623329159Shselasky			/* From this point, the BF bits are 0 */
624329159Shselasky
625329159Shselasky			if (last_offset > max_table_offset) {
626329159Shselasky				/* need to skip */
627329159Shselasky				size = -1;
628329159Shselasky			} else {
629329159Shselasky				size = min3(max_table_offset - last_offset,
630329159Shselasky					    bf_mask - (last_offset & bf_mask),
631329159Shselasky					    requested_size);
632329159Shselasky				if (size < requested_size) {
633329159Shselasky					int candidate_size;
634329159Shselasky
635329159Shselasky					candidate_size = min3(
636329159Shselasky						max_table_offset - candidate_offset,
637329159Shselasky						bf_mask - (last_offset & bf_mask),
638329159Shselasky						requested_size);
639329159Shselasky
640329159Shselasky					/*  We will not take this path if last_offset was
641329159Shselasky					 *  already set above to candidate_offset
642329159Shselasky					 */
643329159Shselasky					if (candidate_size > size) {
644329159Shselasky						last_offset = candidate_offset;
645329159Shselasky						size = candidate_size;
646329159Shselasky					}
647329159Shselasky				}
648329159Shselasky			}
649329159Shselasky		}
650329159Shselasky
651329159Shselasky		if (size > 0) {
652329159Shselasky			/* mlx4_bitmap_alloc_range will find a contiguous range of "size"
653329159Shselasky			 * QPs in which both bits 6 and 7 are zero, because we pass it the
654329159Shselasky			 * MLX4_BF_SKIP_MASK).
655329159Shselasky			 */
656329159Shselasky			offset = mlx4_bitmap_alloc_range(
657329159Shselasky					*bitmap + MLX4_QP_TABLE_ZONE_RSS,
658329159Shselasky					size, 1,
659329159Shselasky					MLX4_BF_QP_SKIP_MASK);
660329159Shselasky
661329159Shselasky			if (offset == (u32)-1) {
662329159Shselasky				err = -ENOMEM;
663329159Shselasky				break;
664329159Shselasky			}
665329159Shselasky
666329159Shselasky			last_offset = offset + size;
667329159Shselasky
668329159Shselasky			err = mlx4_bitmap_init(*bitmap + k, roundup_pow_of_two(size),
669329159Shselasky					       roundup_pow_of_two(size) - 1, 0,
670329159Shselasky					       roundup_pow_of_two(size) - size);
671329159Shselasky		} else {
672329159Shselasky			/* Add an empty bitmap, we'll allocate from different zones (since
673329159Shselasky			 * at least one is reserved)
674329159Shselasky			 */
675329159Shselasky			err = mlx4_bitmap_init(*bitmap + k, 1,
676329159Shselasky					       MLX4_QP_TABLE_RAW_ETH_SIZE - 1, 0,
677329159Shselasky					       0);
678329159Shselasky			mlx4_bitmap_alloc_range(*bitmap + k, 1, 1, 0);
679329159Shselasky		}
680329159Shselasky
681329159Shselasky		if (err)
682329159Shselasky			break;
683329159Shselasky
684329159Shselasky		++bitmap_initialized;
685329159Shselasky
686329159Shselasky		err = mlx4_zone_add_one(qp_table->zones, *bitmap + k,
687329159Shselasky					MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO |
688329159Shselasky					MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO |
689329159Shselasky					MLX4_ZONE_USE_RR, MLX4_QP_TABLE_RAW_ETH_PRIORITY,
690329159Shselasky					offset, qp_table->zones_uids + k);
691329159Shselasky
692329159Shselasky		if (err)
693329159Shselasky			break;
694329159Shselasky	}
695329159Shselasky
696329159Shselasky	if (err)
697329159Shselasky		goto free_bitmap;
698329159Shselasky
699329159Shselasky	qp_table->bitmap_gen = *bitmap;
700329159Shselasky
701329159Shselasky	return err;
702329159Shselasky
703329159Shselaskyfree_bitmap:
704329159Shselasky	for (k = 0; k < bitmap_initialized; k++)
705329159Shselasky		mlx4_bitmap_cleanup(*bitmap + k);
706329159Shselasky	kfree(bitmap);
707329159Shselaskyfree_zone:
708329159Shselasky	mlx4_zone_allocator_destroy(qp_table->zones);
709329159Shselasky	return err;
710329159Shselasky}
711329159Shselasky
712329159Shselaskystatic void mlx4_cleanup_qp_zones(struct mlx4_dev *dev)
713329159Shselasky{
714329159Shselasky	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
715329159Shselasky
716329159Shselasky	if (qp_table->zones) {
717329159Shselasky		int i;
718329159Shselasky
719329159Shselasky		for (i = 0;
720329159Shselasky		     i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]);
721329159Shselasky		     i++) {
722329159Shselasky			struct mlx4_bitmap *bitmap =
723329159Shselasky				mlx4_zone_get_bitmap(qp_table->zones,
724329159Shselasky						     qp_table->zones_uids[i]);
725329159Shselasky
726329159Shselasky			mlx4_zone_remove_one(qp_table->zones, qp_table->zones_uids[i]);
727329159Shselasky			if (NULL == bitmap)
728329159Shselasky				continue;
729329159Shselasky
730329159Shselasky			mlx4_bitmap_cleanup(bitmap);
731329159Shselasky		}
732329159Shselasky		mlx4_zone_allocator_destroy(qp_table->zones);
733329159Shselasky		kfree(qp_table->bitmap_gen);
734329159Shselasky		qp_table->bitmap_gen = NULL;
735329159Shselasky		qp_table->zones = NULL;
736329159Shselasky	}
737329159Shselasky}
738329159Shselasky
739219820Sjeffint mlx4_init_qp_table(struct mlx4_dev *dev)
740219820Sjeff{
741219820Sjeff	struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
742219820Sjeff	int err;
743219820Sjeff	int reserved_from_top = 0;
744255932Salfred	int reserved_from_bot;
745255932Salfred	int k;
746329159Shselasky	int fixed_reserved_from_bot_rv = 0;
747329159Shselasky	int bottom_reserved_for_rss_bitmap;
748329159Shselasky	u32 max_table_offset = dev->caps.dmfs_high_rate_qpn_base +
749329159Shselasky			dev->caps.dmfs_high_rate_qpn_range;
750219820Sjeff
751219820Sjeff	spin_lock_init(&qp_table->lock);
752219820Sjeff	INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
753255932Salfred	if (mlx4_is_slave(dev))
754255932Salfred		return 0;
755219820Sjeff
756329159Shselasky	/* We reserve 2 extra QPs per port for the special QPs.  The
757219820Sjeff	 * block of special QPs must be aligned to a multiple of 8, so
758219820Sjeff	 * round up.
759255932Salfred	 *
760219820Sjeff	 * We also reserve the MSB of the 24-bit QP number to indicate
761255932Salfred	 * that a QP is an XRC QP.
762219820Sjeff	 */
763329159Shselasky	for (k = 0; k <= MLX4_QP_REGION_BOTTOM; k++)
764329159Shselasky		fixed_reserved_from_bot_rv += dev->caps.reserved_qps_cnt[k];
765219820Sjeff
766329159Shselasky	if (fixed_reserved_from_bot_rv < max_table_offset)
767329159Shselasky		fixed_reserved_from_bot_rv = max_table_offset;
768329159Shselasky
769329159Shselasky	/* We reserve at least 1 extra for bitmaps that we don't have enough space for*/
770329159Shselasky	bottom_reserved_for_rss_bitmap =
771329159Shselasky		roundup_pow_of_two(fixed_reserved_from_bot_rv + 1);
772329159Shselasky	dev->phys_caps.base_sqpn = ALIGN(bottom_reserved_for_rss_bitmap, 8);
773329159Shselasky
774219820Sjeff	{
775219820Sjeff		int sort[MLX4_NUM_QP_REGION];
776329159Shselasky		int i, j;
777219820Sjeff		int last_base = dev->caps.num_qps;
778219820Sjeff
779219820Sjeff		for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
780219820Sjeff			sort[i] = i;
781219820Sjeff
782329159Shselasky		for (i = MLX4_NUM_QP_REGION; i > MLX4_QP_REGION_BOTTOM; --i) {
783329159Shselasky			for (j = MLX4_QP_REGION_BOTTOM + 2; j < i; ++j) {
784219820Sjeff				if (dev->caps.reserved_qps_cnt[sort[j]] >
785329159Shselasky				    dev->caps.reserved_qps_cnt[sort[j - 1]])
786329159Shselasky					swap(sort[j], sort[j - 1]);
787219820Sjeff			}
788219820Sjeff		}
789219820Sjeff
790329159Shselasky		for (i = MLX4_QP_REGION_BOTTOM + 1; i < MLX4_NUM_QP_REGION; ++i) {
791219820Sjeff			last_base -= dev->caps.reserved_qps_cnt[sort[i]];
792219820Sjeff			dev->caps.reserved_qps_base[sort[i]] = last_base;
793219820Sjeff			reserved_from_top +=
794219820Sjeff				dev->caps.reserved_qps_cnt[sort[i]];
795219820Sjeff		}
796219820Sjeff	}
797219820Sjeff
798255932Salfred       /* Reserve 8 real SQPs in both native and SRIOV modes.
799255932Salfred	* In addition, in SRIOV mode, reserve 8 proxy SQPs per function
800255932Salfred	* (for all PFs and VFs), and 8 corresponding tunnel QPs.
801255932Salfred	* Each proxy SQP works opposite its own tunnel QP.
802255932Salfred	*
803255932Salfred	* The QPs are arranged as follows:
804255932Salfred	* a. 8 real SQPs
805255932Salfred	* b. All the proxy SQPs (8 per function)
806255932Salfred	* c. All the tunnel QPs (8 per function)
807255932Salfred	*/
808255932Salfred	reserved_from_bot = mlx4_num_reserved_sqps(dev);
809255932Salfred	if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) {
810329159Shselasky		mlx4_err(dev, "Number of reserved QPs is higher than number of QPs\n");
811255932Salfred		return -EINVAL;
812255932Salfred	}
813255932Salfred
814329159Shselasky	err = mlx4_create_zones(dev, reserved_from_bot, reserved_from_bot,
815329159Shselasky				bottom_reserved_for_rss_bitmap,
816329159Shselasky				fixed_reserved_from_bot_rv,
817329159Shselasky				max_table_offset);
818329159Shselasky
819219820Sjeff	if (err)
820219820Sjeff		return err;
821219820Sjeff
822255932Salfred	if (mlx4_is_mfunc(dev)) {
823255932Salfred		/* for PPF use */
824255932Salfred		dev->phys_caps.base_proxy_sqpn = dev->phys_caps.base_sqpn + 8;
825255932Salfred		dev->phys_caps.base_tunnel_sqpn = dev->phys_caps.base_sqpn + 8 + 8 * MLX4_MFUNC_MAX;
826255932Salfred
827255932Salfred		/* In mfunc, calculate proxy and tunnel qp offsets for the PF here,
828255932Salfred		 * since the PF does not call mlx4_slave_caps */
829255932Salfred		dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
830255932Salfred		dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
831255932Salfred		dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
832255932Salfred		dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL);
833255932Salfred
834255932Salfred		if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy ||
835255932Salfred		    !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy) {
836255932Salfred			err = -ENOMEM;
837255932Salfred			goto err_mem;
838255932Salfred		}
839255932Salfred
840255932Salfred		for (k = 0; k < dev->caps.num_ports; k++) {
841255932Salfred			dev->caps.qp0_proxy[k] = dev->phys_caps.base_proxy_sqpn +
842255932Salfred				8 * mlx4_master_func_num(dev) + k;
843255932Salfred			dev->caps.qp0_tunnel[k] = dev->caps.qp0_proxy[k] + 8 * MLX4_MFUNC_MAX;
844255932Salfred			dev->caps.qp1_proxy[k] = dev->phys_caps.base_proxy_sqpn +
845255932Salfred				8 * mlx4_master_func_num(dev) + MLX4_MAX_PORTS + k;
846255932Salfred			dev->caps.qp1_tunnel[k] = dev->caps.qp1_proxy[k] + 8 * MLX4_MFUNC_MAX;
847255932Salfred		}
848255932Salfred	}
849255932Salfred
850255932Salfred
851255932Salfred	err = mlx4_CONF_SPECIAL_QP(dev, dev->phys_caps.base_sqpn);
852255932Salfred	if (err)
853255932Salfred		goto err_mem;
854255932Salfred
855329159Shselasky	return err;
856329159Shselasky
857255932Salfrederr_mem:
858255932Salfred	kfree(dev->caps.qp0_tunnel);
859255932Salfred	kfree(dev->caps.qp0_proxy);
860255932Salfred	kfree(dev->caps.qp1_tunnel);
861255932Salfred	kfree(dev->caps.qp1_proxy);
862255932Salfred	dev->caps.qp0_tunnel = dev->caps.qp0_proxy =
863255932Salfred		dev->caps.qp1_tunnel = dev->caps.qp1_proxy = NULL;
864329159Shselasky	mlx4_cleanup_qp_zones(dev);
865255932Salfred	return err;
866219820Sjeff}
867219820Sjeff
868219820Sjeffvoid mlx4_cleanup_qp_table(struct mlx4_dev *dev)
869219820Sjeff{
870255932Salfred	if (mlx4_is_slave(dev))
871255932Salfred		return;
872255932Salfred
873219820Sjeff	mlx4_CONF_SPECIAL_QP(dev, 0);
874329159Shselasky
875329159Shselasky	mlx4_cleanup_qp_zones(dev);
876219820Sjeff}
877219820Sjeff
878219820Sjeffint mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
879219820Sjeff		  struct mlx4_qp_context *context)
880219820Sjeff{
881219820Sjeff	struct mlx4_cmd_mailbox *mailbox;
882219820Sjeff	int err;
883219820Sjeff
884219820Sjeff	mailbox = mlx4_alloc_cmd_mailbox(dev);
885219820Sjeff	if (IS_ERR(mailbox))
886219820Sjeff		return PTR_ERR(mailbox);
887219820Sjeff
888219820Sjeff	err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
889255932Salfred			   MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
890255932Salfred			   MLX4_CMD_WRAPPED);
891219820Sjeff	if (!err)
892219820Sjeff		memcpy(context, mailbox->buf + 8, sizeof *context);
893219820Sjeff
894219820Sjeff	mlx4_free_cmd_mailbox(dev, mailbox);
895219820Sjeff	return err;
896219820Sjeff}
897219820SjeffEXPORT_SYMBOL_GPL(mlx4_qp_query);
898219820Sjeff
899219820Sjeffint mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
900219820Sjeff		     struct mlx4_qp_context *context,
901219820Sjeff		     struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
902219820Sjeff{
903219820Sjeff	int err;
904219820Sjeff	int i;
905219820Sjeff	enum mlx4_qp_state states[] = {
906219820Sjeff		MLX4_QP_STATE_RST,
907219820Sjeff		MLX4_QP_STATE_INIT,
908219820Sjeff		MLX4_QP_STATE_RTR,
909219820Sjeff		MLX4_QP_STATE_RTS
910219820Sjeff	};
911219820Sjeff
912219820Sjeff	for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
913219820Sjeff		context->flags &= cpu_to_be32(~(0xf << 28));
914219820Sjeff		context->flags |= cpu_to_be32(states[i + 1] << 28);
915329159Shselasky		if (states[i + 1] != MLX4_QP_STATE_RTR)
916329159Shselasky			context->params2 &= ~MLX4_QP_BIT_FPP;
917219820Sjeff		err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
918219820Sjeff				     context, 0, 0, qp);
919219820Sjeff		if (err) {
920329159Shselasky			mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
921219820Sjeff				 states[i + 1], err);
922219820Sjeff			return err;
923219820Sjeff		}
924219820Sjeff
925219820Sjeff		*qp_state = states[i + 1];
926219820Sjeff	}
927219820Sjeff
928219820Sjeff	return 0;
929219820Sjeff}
930219820SjeffEXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
931329159Shselasky
932329159Shselaskyu16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn)
933329159Shselasky{
934329159Shselasky	struct mlx4_qp_context context;
935329159Shselasky	struct mlx4_qp qp;
936329159Shselasky	int err;
937329159Shselasky
938329159Shselasky	qp.qpn = qpn;
939329159Shselasky	err = mlx4_qp_query(dev, &qp, &context);
940329159Shselasky	if (!err) {
941329159Shselasky		u32 dest_qpn = be32_to_cpu(context.remote_qpn) & 0xffffff;
942329159Shselasky		u16 folded_dst = folded_qp(dest_qpn);
943329159Shselasky		u16 folded_src = folded_qp(qpn);
944329159Shselasky
945329159Shselasky		return (dest_qpn != qpn) ?
946329159Shselasky			((folded_dst ^ folded_src) | 0xC000) :
947329159Shselasky			folded_src | 0xC000;
948329159Shselasky	}
949329159Shselasky	return 0xdead;
950329159Shselasky}
951