1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
41#include <linux/slab.h>
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
44#include <linux/if_ether.h>
45#include <linux/etherdevice.h>
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID		(1ull << 63)
51
52struct mac_res {
53	struct list_head list;
54	u64 mac;
55	int ref_count;
56	u8 smac_index;
57	u8 port;
58};
59
60struct vlan_res {
61	struct list_head list;
62	u16 vlan;
63	int ref_count;
64	int vlan_index;
65	u8 port;
66};
67
68struct res_common {
69	struct list_head	list;
70	struct rb_node		node;
71	u64		        res_id;
72	int			owner;
73	int			state;
74	int			from_state;
75	int			to_state;
76	int			removing;
77};
78
79enum {
80	RES_ANY_BUSY = 1
81};
82
83struct res_gid {
84	struct list_head	list;
85	u8			gid[16];
86	enum mlx4_protocol	prot;
87	enum mlx4_steer_type	steer;
88	u64			reg_id;
89};
90
91enum res_qp_states {
92	RES_QP_BUSY = RES_ANY_BUSY,
93
94	/* QP number was allocated */
95	RES_QP_RESERVED,
96
97	/* ICM memory for QP context was mapped */
98	RES_QP_MAPPED,
99
100	/* QP is in hw ownership */
101	RES_QP_HW
102};
103
104struct res_qp {
105	struct res_common	com;
106	struct res_mtt	       *mtt;
107	struct res_cq	       *rcq;
108	struct res_cq	       *scq;
109	struct res_srq	       *srq;
110	struct list_head	mcg_list;
111	spinlock_t		mcg_spl;
112	int			local_qpn;
113	atomic_t		ref_count;
114	u32			qpc_flags;
115	/* saved qp params before VST enforcement in order to restore on VGT */
116	u8			sched_queue;
117	__be32			param3;
118	u8			vlan_control;
119	u8			fvl_rx;
120	u8			pri_path_fl;
121	u8			vlan_index;
122	u8			feup;
123};
124
125enum res_mtt_states {
126	RES_MTT_BUSY = RES_ANY_BUSY,
127	RES_MTT_ALLOCATED,
128};
129
130static inline const char *mtt_states_str(enum res_mtt_states state)
131{
132	switch (state) {
133	case RES_MTT_BUSY: return "RES_MTT_BUSY";
134	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135	default: return "Unknown";
136	}
137}
138
139struct res_mtt {
140	struct res_common	com;
141	int			order;
142	atomic_t		ref_count;
143};
144
145enum res_mpt_states {
146	RES_MPT_BUSY = RES_ANY_BUSY,
147	RES_MPT_RESERVED,
148	RES_MPT_MAPPED,
149	RES_MPT_HW,
150};
151
152struct res_mpt {
153	struct res_common	com;
154	struct res_mtt	       *mtt;
155	int			key;
156};
157
158enum res_eq_states {
159	RES_EQ_BUSY = RES_ANY_BUSY,
160	RES_EQ_RESERVED,
161	RES_EQ_HW,
162};
163
164struct res_eq {
165	struct res_common	com;
166	struct res_mtt	       *mtt;
167};
168
169enum res_cq_states {
170	RES_CQ_BUSY = RES_ANY_BUSY,
171	RES_CQ_ALLOCATED,
172	RES_CQ_HW,
173};
174
175struct res_cq {
176	struct res_common	com;
177	struct res_mtt	       *mtt;
178	atomic_t		ref_count;
179};
180
181enum res_srq_states {
182	RES_SRQ_BUSY = RES_ANY_BUSY,
183	RES_SRQ_ALLOCATED,
184	RES_SRQ_HW,
185};
186
187struct res_srq {
188	struct res_common	com;
189	struct res_mtt	       *mtt;
190	struct res_cq	       *cq;
191	atomic_t		ref_count;
192};
193
194enum res_counter_states {
195	RES_COUNTER_BUSY = RES_ANY_BUSY,
196	RES_COUNTER_ALLOCATED,
197};
198
199struct res_counter {
200	struct res_common	com;
201	int			port;
202};
203
204enum res_xrcdn_states {
205	RES_XRCD_BUSY = RES_ANY_BUSY,
206	RES_XRCD_ALLOCATED,
207};
208
209struct res_xrcdn {
210	struct res_common	com;
211	int			port;
212};
213
214enum res_fs_rule_states {
215	RES_FS_RULE_BUSY = RES_ANY_BUSY,
216	RES_FS_RULE_ALLOCATED,
217};
218
219struct res_fs_rule {
220	struct res_common	com;
221	int			qpn;
222};
223
224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225{
226	return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227}
228
229static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230{
231	struct rb_node *node = root->rb_node;
232
233	while (node) {
234		struct res_common *res = container_of(node, struct res_common,
235						      node);
236
237		if (res_id < res->res_id)
238			node = node->rb_left;
239		else if (res_id > res->res_id)
240			node = node->rb_right;
241		else
242			return res;
243	}
244	return NULL;
245}
246
247static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248{
249	struct rb_node **new = &(root->rb_node), *parent = NULL;
250
251	/* Figure out where to put new node */
252	while (*new) {
253		struct res_common *this = container_of(*new, struct res_common,
254						       node);
255
256		parent = *new;
257		if (res->res_id < this->res_id)
258			new = &((*new)->rb_left);
259		else if (res->res_id > this->res_id)
260			new = &((*new)->rb_right);
261		else
262			return -EEXIST;
263	}
264
265	/* Add new node and rebalance tree. */
266	rb_link_node(&res->node, parent, new);
267	rb_insert_color(&res->node, root);
268
269	return 0;
270}
271
272enum qp_transition {
273	QP_TRANS_INIT2RTR,
274	QP_TRANS_RTR2RTS,
275	QP_TRANS_RTS2RTS,
276	QP_TRANS_SQERR2RTS,
277	QP_TRANS_SQD2SQD,
278	QP_TRANS_SQD2RTS
279};
280
281/* For Debug uses */
282static const char *ResourceType(enum mlx4_resource rt)
283{
284	switch (rt) {
285	case RES_QP: return "RES_QP";
286	case RES_CQ: return "RES_CQ";
287	case RES_SRQ: return "RES_SRQ";
288	case RES_MPT: return "RES_MPT";
289	case RES_MTT: return "RES_MTT";
290	case RES_MAC: return  "RES_MAC";
291	case RES_VLAN: return  "RES_VLAN";
292	case RES_EQ: return "RES_EQ";
293	case RES_COUNTER: return "RES_COUNTER";
294	case RES_FS_RULE: return "RES_FS_RULE";
295	case RES_XRCD: return "RES_XRCD";
296	default: return "Unknown resource type !!!";
297	};
298}
299
300static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
301static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302				      enum mlx4_resource res_type, int count,
303				      int port)
304{
305	struct mlx4_priv *priv = mlx4_priv(dev);
306	struct resource_allocator *res_alloc =
307		&priv->mfunc.master.res_tracker.res_alloc[res_type];
308	int err = -EINVAL;
309	int allocated, free, reserved, guaranteed, from_free;
310
311	spin_lock(&res_alloc->alloc_lock);
312	allocated = (port > 0) ?
313		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
314		res_alloc->allocated[slave];
315	free = (port > 0) ? res_alloc->res_port_free[port - 1] :
316		res_alloc->res_free;
317	reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
318		res_alloc->res_reserved;
319	guaranteed = res_alloc->guaranteed[slave];
320
321	if (allocated + count > res_alloc->quota[slave])
322		goto out;
323
324	if (allocated + count <= guaranteed) {
325		err = 0;
326	} else {
327		/* portion may need to be obtained from free area */
328		if (guaranteed - allocated > 0)
329			from_free = count - (guaranteed - allocated);
330		else
331			from_free = count;
332
333		if (free - from_free > reserved)
334			err = 0;
335	}
336
337	if (!err) {
338		/* grant the request */
339		if (port > 0) {
340			res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
341			res_alloc->res_port_free[port - 1] -= count;
342		} else {
343			res_alloc->allocated[slave] += count;
344			res_alloc->res_free -= count;
345		}
346	}
347
348out:
349	spin_unlock(&res_alloc->alloc_lock);
350	return err;
351
352}
353
354static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
355				    enum mlx4_resource res_type, int count,
356				    int port)
357{
358	struct mlx4_priv *priv = mlx4_priv(dev);
359	struct resource_allocator *res_alloc =
360		&priv->mfunc.master.res_tracker.res_alloc[res_type];
361
362	spin_lock(&res_alloc->alloc_lock);
363	if (port > 0) {
364		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
365		res_alloc->res_port_free[port - 1] += count;
366	} else {
367		res_alloc->allocated[slave] -= count;
368		res_alloc->res_free += count;
369	}
370
371	spin_unlock(&res_alloc->alloc_lock);
372	return;
373}
374
375static inline void initialize_res_quotas(struct mlx4_dev *dev,
376					 struct resource_allocator *res_alloc,
377					 enum mlx4_resource res_type,
378					 int vf, int num_instances)
379{
380	res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
381	res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
382	if (vf == mlx4_master_func_num(dev)) {
383		res_alloc->res_free = num_instances;
384		if (res_type == RES_MTT) {
385			/* reserved mtts will be taken out of the PF allocation */
386			res_alloc->res_free += dev->caps.reserved_mtts;
387			res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
388			res_alloc->quota[vf] += dev->caps.reserved_mtts;
389		}
390	}
391}
392
393void mlx4_init_quotas(struct mlx4_dev *dev)
394{
395	struct mlx4_priv *priv = mlx4_priv(dev);
396	int pf;
397
398	/* quotas for VFs are initialized in mlx4_slave_cap */
399	if (mlx4_is_slave(dev))
400		return;
401
402	if (!mlx4_is_mfunc(dev)) {
403		dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
404			mlx4_num_reserved_sqps(dev);
405		dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
406		dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
407		dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
408		dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
409		return;
410	}
411
412	pf = mlx4_master_func_num(dev);
413	dev->quotas.qp =
414		priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
415	dev->quotas.cq =
416		priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
417	dev->quotas.srq =
418		priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
419	dev->quotas.mtt =
420		priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
421	dev->quotas.mpt =
422		priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
423}
424int mlx4_init_resource_tracker(struct mlx4_dev *dev)
425{
426	struct mlx4_priv *priv = mlx4_priv(dev);
427	int i, j;
428	int t;
429
430	priv->mfunc.master.res_tracker.slave_list =
431		kzalloc(dev->num_slaves * sizeof(struct slave_list),
432			GFP_KERNEL);
433	if (!priv->mfunc.master.res_tracker.slave_list)
434		return -ENOMEM;
435
436	for (i = 0 ; i < dev->num_slaves; i++) {
437		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
438			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
439				       slave_list[i].res_list[t]);
440		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
441	}
442
443	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
444		 dev->num_slaves);
445	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
446		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
447
448	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
449		struct resource_allocator *res_alloc =
450			&priv->mfunc.master.res_tracker.res_alloc[i];
451		res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
452		res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
453		if (i == RES_MAC || i == RES_VLAN)
454			res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
455						       (dev->num_vfs + 1) * sizeof(int),
456							GFP_KERNEL);
457		else
458			res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
459
460		if (!res_alloc->quota || !res_alloc->guaranteed ||
461		    !res_alloc->allocated)
462			goto no_mem_err;
463
464		spin_lock_init(&res_alloc->alloc_lock);
465		for (t = 0; t < dev->num_vfs + 1; t++) {
466			switch (i) {
467			case RES_QP:
468				initialize_res_quotas(dev, res_alloc, RES_QP,
469						      t, dev->caps.num_qps -
470						      dev->caps.reserved_qps -
471						      mlx4_num_reserved_sqps(dev));
472				break;
473			case RES_CQ:
474				initialize_res_quotas(dev, res_alloc, RES_CQ,
475						      t, dev->caps.num_cqs -
476						      dev->caps.reserved_cqs);
477				break;
478			case RES_SRQ:
479				initialize_res_quotas(dev, res_alloc, RES_SRQ,
480						      t, dev->caps.num_srqs -
481						      dev->caps.reserved_srqs);
482				break;
483			case RES_MPT:
484				initialize_res_quotas(dev, res_alloc, RES_MPT,
485						      t, dev->caps.num_mpts -
486						      dev->caps.reserved_mrws);
487				break;
488			case RES_MTT:
489				initialize_res_quotas(dev, res_alloc, RES_MTT,
490						      t, dev->caps.num_mtts -
491						      dev->caps.reserved_mtts);
492				break;
493			case RES_MAC:
494				if (t == mlx4_master_func_num(dev)) {
495					res_alloc->quota[t] =
496						MLX4_MAX_MAC_NUM - 2 * dev->num_vfs;
497					res_alloc->guaranteed[t] = res_alloc->quota[t];
498					for (j = 0; j < MLX4_MAX_PORTS; j++)
499						res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
500				} else {
501					res_alloc->quota[t] = 2;
502					res_alloc->guaranteed[t] = 2;
503				}
504				break;
505			case RES_VLAN:
506				if (t == mlx4_master_func_num(dev)) {
507					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
508					res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
509					for (j = 0; j < MLX4_MAX_PORTS; j++)
510						res_alloc->res_port_free[j] =
511							res_alloc->quota[t];
512				} else {
513					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
514					res_alloc->guaranteed[t] = 0;
515				}
516				break;
517			case RES_COUNTER:
518				res_alloc->quota[t] = dev->caps.max_counters;
519				res_alloc->guaranteed[t] = 0;
520				if (t == mlx4_master_func_num(dev))
521					res_alloc->res_free = res_alloc->quota[t];
522				break;
523			default:
524				break;
525			}
526			if (i == RES_MAC || i == RES_VLAN) {
527				for (j = 0; j < MLX4_MAX_PORTS; j++)
528					res_alloc->res_port_rsvd[j] +=
529						res_alloc->guaranteed[t];
530			} else {
531				res_alloc->res_reserved += res_alloc->guaranteed[t];
532			}
533		}
534	}
535	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
536	return 0;
537
538no_mem_err:
539	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
540		kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
541		priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
542		kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
543		priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
544		kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
545		priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
546	}
547	return -ENOMEM;
548}
549
550void mlx4_free_resource_tracker(struct mlx4_dev *dev,
551				enum mlx4_res_tracker_free_type type)
552{
553	struct mlx4_priv *priv = mlx4_priv(dev);
554	int i;
555
556	if (priv->mfunc.master.res_tracker.slave_list) {
557		if (type != RES_TR_FREE_STRUCTS_ONLY) {
558			for (i = 0; i < dev->num_slaves; i++) {
559				if (type == RES_TR_FREE_ALL ||
560				    dev->caps.function != i)
561					mlx4_delete_all_resources_for_slave(dev, i);
562			}
563			/* free master's vlans */
564			i = dev->caps.function;
565			mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
566			rem_slave_vlans(dev, i);
567			mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
568		}
569
570		if (type != RES_TR_FREE_SLAVES_ONLY) {
571			for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
572				kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
573				priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
574				kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
575				priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
576				kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
577				priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
578			}
579			kfree(priv->mfunc.master.res_tracker.slave_list);
580			priv->mfunc.master.res_tracker.slave_list = NULL;
581		}
582	}
583}
584
585static void update_pkey_index(struct mlx4_dev *dev, int slave,
586			      struct mlx4_cmd_mailbox *inbox)
587{
588	u8 sched = *(u8 *)(inbox->buf + 64);
589	u8 orig_index = *(u8 *)(inbox->buf + 35);
590	u8 new_index;
591	struct mlx4_priv *priv = mlx4_priv(dev);
592	int port;
593
594	port = (sched >> 6 & 1) + 1;
595
596	new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
597	*(u8 *)(inbox->buf + 35) = new_index;
598}
599
600static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
601		       u8 slave)
602{
603	struct mlx4_qp_context	*qp_ctx = inbox->buf + 8;
604	enum mlx4_qp_optpar	optpar = be32_to_cpu(*(__be32 *) inbox->buf);
605	u32			ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
606	int port;
607
608	if (MLX4_QP_ST_UD == ts) {
609		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
610		if (mlx4_is_eth(dev, port))
611			qp_ctx->pri_path.mgid_index = mlx4_get_base_gid_ix(dev, slave) | 0x80;
612		else
613			qp_ctx->pri_path.mgid_index = 0x80 | slave;
614
615	} else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
616		if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
617			port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
618			if (mlx4_is_eth(dev, port)) {
619				qp_ctx->pri_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
620				qp_ctx->pri_path.mgid_index &= 0x7f;
621			} else {
622				qp_ctx->pri_path.mgid_index = slave & 0x7F;
623			}
624		}
625		if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
626			port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
627			if (mlx4_is_eth(dev, port)) {
628				qp_ctx->alt_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
629				qp_ctx->alt_path.mgid_index &= 0x7f;
630			} else {
631				qp_ctx->alt_path.mgid_index = slave & 0x7F;
632			}
633		}
634	}
635}
636
637static int check_counter_index_validity(struct mlx4_dev *dev, int slave, int port, int idx)
638{
639	struct mlx4_priv *priv = mlx4_priv(dev);
640	struct counter_index *counter, *tmp_counter;
641
642	if (slave == 0) {
643		list_for_each_entry_safe(counter, tmp_counter,
644					 &priv->counters_table.global_port_list[port - 1],
645					 list) {
646			if (counter->index == idx)
647				return 0;
648		}
649		return -EINVAL;
650	} else {
651		list_for_each_entry_safe(counter, tmp_counter,
652					 &priv->counters_table.vf_list[slave - 1][port - 1],
653					 list) {
654			if (counter->index == idx)
655				return 0;
656		}
657		return -EINVAL;
658	}
659}
660
661static int update_vport_qp_param(struct mlx4_dev *dev,
662				 struct mlx4_cmd_mailbox *inbox,
663				 u8 slave, u32 qpn)
664{
665	struct mlx4_qp_context	*qpc = inbox->buf + 8;
666	struct mlx4_vport_oper_state *vp_oper;
667	struct mlx4_priv *priv;
668	u32 qp_type;
669	int port;
670
671	port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
672	priv = mlx4_priv(dev);
673	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
674	qp_type	= (be32_to_cpu(qpc->flags) >> 16) & 0xff;
675
676	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH &&
677	    qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX) {
678		if (check_counter_index_validity(dev, slave, port,
679						 qpc->pri_path.counter_index))
680			return -EINVAL;
681	}
682
683	mlx4_dbg(dev, "%s: QP counter_index %d for slave %d port %d\n",
684		 __func__, qpc->pri_path.counter_index, slave, port);
685
686	if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) &&
687	    dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH &&
688	    !mlx4_is_qp_reserved(dev, qpn) &&
689	    qp_type == MLX4_QP_ST_MLX &&
690	    qpc->pri_path.counter_index != 0xFF) {
691		/* disable multicast loopback to qp with same counter */
692		qpc->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB;
693		qpc->pri_path.vlan_control |=
694			MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER;
695	}
696
697	if (MLX4_VGT != vp_oper->state.default_vlan) {
698		/* the reserved QPs (special, proxy, tunnel)
699		 * do not operate over vlans
700		 */
701		if (mlx4_is_qp_reserved(dev, qpn))
702			return 0;
703
704		/* force strip vlan by clear vsd */
705		qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
706		/* preserve IF_COUNTER flag */
707		qpc->pri_path.vlan_control &=
708			MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER;
709		if (MLX4_QP_ST_RC != qp_type) {
710			if (0 != vp_oper->state.default_vlan) {
711				qpc->pri_path.vlan_control |=
712					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
713					MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
714					MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
715			} else { /* priority tagged */
716				qpc->pri_path.vlan_control |=
717					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
718					MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
719			}
720		}
721		qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
722		qpc->pri_path.vlan_index = vp_oper->vlan_idx;
723		qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
724		qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
725		qpc->pri_path.sched_queue &= 0xC7;
726		qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
727	}
728	if (vp_oper->state.spoofchk) {
729		qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
730		qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
731	}
732	return 0;
733}
734
735static int mpt_mask(struct mlx4_dev *dev)
736{
737	return dev->caps.num_mpts - 1;
738}
739
740static void *find_res(struct mlx4_dev *dev, u64 res_id,
741		      enum mlx4_resource type)
742{
743	struct mlx4_priv *priv = mlx4_priv(dev);
744
745	return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
746				  res_id);
747}
748
749static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
750		   enum mlx4_resource type,
751		   void *res)
752{
753	struct res_common *r;
754	int err = 0;
755
756	spin_lock_irq(mlx4_tlock(dev));
757	r = find_res(dev, res_id, type);
758	if (!r) {
759		err = -ENONET;
760		goto exit;
761	}
762
763	if (r->state == RES_ANY_BUSY) {
764		err = -EBUSY;
765		goto exit;
766	}
767
768	if (r->owner != slave) {
769		err = -EPERM;
770		goto exit;
771	}
772
773	r->from_state = r->state;
774	r->state = RES_ANY_BUSY;
775
776	if (res)
777		*((struct res_common **)res) = r;
778
779exit:
780	spin_unlock_irq(mlx4_tlock(dev));
781	return err;
782}
783
784int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
785				    enum mlx4_resource type,
786				    u64 res_id, int *slave)
787{
788
789	struct res_common *r;
790	int err = -ENOENT;
791	int id = res_id;
792
793	if (type == RES_QP)
794		id &= 0x7fffff;
795	spin_lock(mlx4_tlock(dev));
796
797	r = find_res(dev, id, type);
798	if (r) {
799		*slave = r->owner;
800		err = 0;
801	}
802	spin_unlock(mlx4_tlock(dev));
803
804	return err;
805}
806
807static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
808		    enum mlx4_resource type)
809{
810	struct res_common *r;
811
812	spin_lock_irq(mlx4_tlock(dev));
813	r = find_res(dev, res_id, type);
814	if (r)
815		r->state = r->from_state;
816	spin_unlock_irq(mlx4_tlock(dev));
817}
818
819static struct res_common *alloc_qp_tr(int id)
820{
821	struct res_qp *ret;
822
823	ret = kzalloc(sizeof *ret, GFP_KERNEL);
824	if (!ret)
825		return NULL;
826
827	ret->com.res_id = id;
828	ret->com.state = RES_QP_RESERVED;
829	ret->local_qpn = id;
830	INIT_LIST_HEAD(&ret->mcg_list);
831	spin_lock_init(&ret->mcg_spl);
832	atomic_set(&ret->ref_count, 0);
833
834	return &ret->com;
835}
836
837static struct res_common *alloc_mtt_tr(int id, int order)
838{
839	struct res_mtt *ret;
840
841	ret = kzalloc(sizeof *ret, GFP_KERNEL);
842	if (!ret)
843		return NULL;
844
845	ret->com.res_id = id;
846	ret->order = order;
847	ret->com.state = RES_MTT_ALLOCATED;
848	atomic_set(&ret->ref_count, 0);
849
850	return &ret->com;
851}
852
853static struct res_common *alloc_mpt_tr(int id, int key)
854{
855	struct res_mpt *ret;
856
857	ret = kzalloc(sizeof *ret, GFP_KERNEL);
858	if (!ret)
859		return NULL;
860
861	ret->com.res_id = id;
862	ret->com.state = RES_MPT_RESERVED;
863	ret->key = key;
864
865	return &ret->com;
866}
867
868static struct res_common *alloc_eq_tr(int id)
869{
870	struct res_eq *ret;
871
872	ret = kzalloc(sizeof *ret, GFP_KERNEL);
873	if (!ret)
874		return NULL;
875
876	ret->com.res_id = id;
877	ret->com.state = RES_EQ_RESERVED;
878
879	return &ret->com;
880}
881
882static struct res_common *alloc_cq_tr(int id)
883{
884	struct res_cq *ret;
885
886	ret = kzalloc(sizeof *ret, GFP_KERNEL);
887	if (!ret)
888		return NULL;
889
890	ret->com.res_id = id;
891	ret->com.state = RES_CQ_ALLOCATED;
892	atomic_set(&ret->ref_count, 0);
893
894	return &ret->com;
895}
896
897static struct res_common *alloc_srq_tr(int id)
898{
899	struct res_srq *ret;
900
901	ret = kzalloc(sizeof *ret, GFP_KERNEL);
902	if (!ret)
903		return NULL;
904
905	ret->com.res_id = id;
906	ret->com.state = RES_SRQ_ALLOCATED;
907	atomic_set(&ret->ref_count, 0);
908
909	return &ret->com;
910}
911
912static struct res_common *alloc_counter_tr(int id)
913{
914	struct res_counter *ret;
915
916	ret = kzalloc(sizeof *ret, GFP_KERNEL);
917	if (!ret)
918		return NULL;
919
920	ret->com.res_id = id;
921	ret->com.state = RES_COUNTER_ALLOCATED;
922
923	return &ret->com;
924}
925
926static struct res_common *alloc_xrcdn_tr(int id)
927{
928	struct res_xrcdn *ret;
929
930	ret = kzalloc(sizeof *ret, GFP_KERNEL);
931	if (!ret)
932		return NULL;
933
934	ret->com.res_id = id;
935	ret->com.state = RES_XRCD_ALLOCATED;
936
937	return &ret->com;
938}
939
940static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
941{
942	struct res_fs_rule *ret;
943
944	ret = kzalloc(sizeof *ret, GFP_KERNEL);
945	if (!ret)
946		return NULL;
947
948	ret->com.res_id = id;
949	ret->com.state = RES_FS_RULE_ALLOCATED;
950	ret->qpn = qpn;
951	return &ret->com;
952}
953
954static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
955				   int extra)
956{
957	struct res_common *ret;
958
959	switch (type) {
960	case RES_QP:
961		ret = alloc_qp_tr(id);
962		break;
963	case RES_MPT:
964		ret = alloc_mpt_tr(id, extra);
965		break;
966	case RES_MTT:
967		ret = alloc_mtt_tr(id, extra);
968		break;
969	case RES_EQ:
970		ret = alloc_eq_tr(id);
971		break;
972	case RES_CQ:
973		ret = alloc_cq_tr(id);
974		break;
975	case RES_SRQ:
976		ret = alloc_srq_tr(id);
977		break;
978	case RES_MAC:
979		printk(KERN_ERR "implementation missing\n");
980		return NULL;
981	case RES_COUNTER:
982		ret = alloc_counter_tr(id);
983		break;
984	case RES_XRCD:
985		ret = alloc_xrcdn_tr(id);
986		break;
987	case RES_FS_RULE:
988		ret = alloc_fs_rule_tr(id, extra);
989		break;
990	default:
991		return NULL;
992	}
993	if (ret)
994		ret->owner = slave;
995
996	return ret;
997}
998
999static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1000			 enum mlx4_resource type, int extra)
1001{
1002	int i;
1003	int err;
1004	struct mlx4_priv *priv = mlx4_priv(dev);
1005	struct res_common **res_arr;
1006	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1007	struct rb_root *root = &tracker->res_tree[type];
1008
1009	res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1010	if (!res_arr)
1011		return -ENOMEM;
1012
1013	for (i = 0; i < count; ++i) {
1014		res_arr[i] = alloc_tr(base + i, type, slave, extra);
1015		if (!res_arr[i]) {
1016			for (--i; i >= 0; --i)
1017				kfree(res_arr[i]);
1018
1019			kfree(res_arr);
1020			return -ENOMEM;
1021		}
1022	}
1023
1024	spin_lock_irq(mlx4_tlock(dev));
1025	for (i = 0; i < count; ++i) {
1026		if (find_res(dev, base + i, type)) {
1027			err = -EEXIST;
1028			goto undo;
1029		}
1030		err = res_tracker_insert(root, res_arr[i]);
1031		if (err)
1032			goto undo;
1033		list_add_tail(&res_arr[i]->list,
1034			      &tracker->slave_list[slave].res_list[type]);
1035	}
1036	spin_unlock_irq(mlx4_tlock(dev));
1037	kfree(res_arr);
1038
1039	return 0;
1040
1041undo:
1042	for (--i; i >= 0; --i) {
1043		rb_erase(&res_arr[i]->node, root);
1044		list_del_init(&res_arr[i]->list);
1045	}
1046
1047	spin_unlock_irq(mlx4_tlock(dev));
1048
1049	for (i = 0; i < count; ++i)
1050		kfree(res_arr[i]);
1051
1052	kfree(res_arr);
1053
1054	return err;
1055}
1056
1057static int remove_qp_ok(struct res_qp *res)
1058{
1059	if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1060	    !list_empty(&res->mcg_list)) {
1061		pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1062		       res->com.state, atomic_read(&res->ref_count));
1063		return -EBUSY;
1064	} else if (res->com.state != RES_QP_RESERVED) {
1065		return -EPERM;
1066	}
1067
1068	return 0;
1069}
1070
1071static int remove_mtt_ok(struct res_mtt *res, int order)
1072{
1073	if (res->com.state == RES_MTT_BUSY ||
1074	    atomic_read(&res->ref_count)) {
1075		printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1076		       __func__, __LINE__,
1077		       mtt_states_str(res->com.state),
1078		       atomic_read(&res->ref_count));
1079		return -EBUSY;
1080	} else if (res->com.state != RES_MTT_ALLOCATED)
1081		return -EPERM;
1082	else if (res->order != order)
1083		return -EINVAL;
1084
1085	return 0;
1086}
1087
1088static int remove_mpt_ok(struct res_mpt *res)
1089{
1090	if (res->com.state == RES_MPT_BUSY)
1091		return -EBUSY;
1092	else if (res->com.state != RES_MPT_RESERVED)
1093		return -EPERM;
1094
1095	return 0;
1096}
1097
1098static int remove_eq_ok(struct res_eq *res)
1099{
1100	if (res->com.state == RES_MPT_BUSY)
1101		return -EBUSY;
1102	else if (res->com.state != RES_MPT_RESERVED)
1103		return -EPERM;
1104
1105	return 0;
1106}
1107
1108static int remove_counter_ok(struct res_counter *res)
1109{
1110	if (res->com.state == RES_COUNTER_BUSY)
1111		return -EBUSY;
1112	else if (res->com.state != RES_COUNTER_ALLOCATED)
1113		return -EPERM;
1114
1115	return 0;
1116}
1117
1118static int remove_xrcdn_ok(struct res_xrcdn *res)
1119{
1120	if (res->com.state == RES_XRCD_BUSY)
1121		return -EBUSY;
1122	else if (res->com.state != RES_XRCD_ALLOCATED)
1123		return -EPERM;
1124
1125	return 0;
1126}
1127
1128static int remove_fs_rule_ok(struct res_fs_rule *res)
1129{
1130	if (res->com.state == RES_FS_RULE_BUSY)
1131		return -EBUSY;
1132	else if (res->com.state != RES_FS_RULE_ALLOCATED)
1133		return -EPERM;
1134
1135	return 0;
1136}
1137
1138static int remove_cq_ok(struct res_cq *res)
1139{
1140	if (res->com.state == RES_CQ_BUSY)
1141		return -EBUSY;
1142	else if (res->com.state != RES_CQ_ALLOCATED)
1143		return -EPERM;
1144
1145	return 0;
1146}
1147
1148static int remove_srq_ok(struct res_srq *res)
1149{
1150	if (res->com.state == RES_SRQ_BUSY)
1151		return -EBUSY;
1152	else if (res->com.state != RES_SRQ_ALLOCATED)
1153		return -EPERM;
1154
1155	return 0;
1156}
1157
1158static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1159{
1160	switch (type) {
1161	case RES_QP:
1162		return remove_qp_ok((struct res_qp *)res);
1163	case RES_CQ:
1164		return remove_cq_ok((struct res_cq *)res);
1165	case RES_SRQ:
1166		return remove_srq_ok((struct res_srq *)res);
1167	case RES_MPT:
1168		return remove_mpt_ok((struct res_mpt *)res);
1169	case RES_MTT:
1170		return remove_mtt_ok((struct res_mtt *)res, extra);
1171	case RES_MAC:
1172		return -ENOSYS;
1173	case RES_EQ:
1174		return remove_eq_ok((struct res_eq *)res);
1175	case RES_COUNTER:
1176		return remove_counter_ok((struct res_counter *)res);
1177	case RES_XRCD:
1178		return remove_xrcdn_ok((struct res_xrcdn *)res);
1179	case RES_FS_RULE:
1180		return remove_fs_rule_ok((struct res_fs_rule *)res);
1181	default:
1182		return -EINVAL;
1183	}
1184}
1185
1186static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1187			 enum mlx4_resource type, int extra)
1188{
1189	u64 i;
1190	int err;
1191	struct mlx4_priv *priv = mlx4_priv(dev);
1192	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1193	struct res_common *r;
1194
1195	spin_lock_irq(mlx4_tlock(dev));
1196	for (i = base; i < base + count; ++i) {
1197		r = res_tracker_lookup(&tracker->res_tree[type], i);
1198		if (!r) {
1199			err = -ENOENT;
1200			goto out;
1201		}
1202		if (r->owner != slave) {
1203			err = -EPERM;
1204			goto out;
1205		}
1206		err = remove_ok(r, type, extra);
1207		if (err)
1208			goto out;
1209	}
1210
1211	for (i = base; i < base + count; ++i) {
1212		r = res_tracker_lookup(&tracker->res_tree[type], i);
1213		rb_erase(&r->node, &tracker->res_tree[type]);
1214		list_del(&r->list);
1215		kfree(r);
1216	}
1217	err = 0;
1218
1219out:
1220	spin_unlock_irq(mlx4_tlock(dev));
1221
1222	return err;
1223}
1224
1225static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1226				enum res_qp_states state, struct res_qp **qp,
1227				int alloc)
1228{
1229	struct mlx4_priv *priv = mlx4_priv(dev);
1230	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1231	struct res_qp *r;
1232	int err = 0;
1233
1234	spin_lock_irq(mlx4_tlock(dev));
1235	r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1236	if (!r)
1237		err = -ENOENT;
1238	else if (r->com.owner != slave)
1239		err = -EPERM;
1240	else {
1241		switch (state) {
1242		case RES_QP_BUSY:
1243			mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1244				 __func__, (unsigned long long)r->com.res_id);
1245			err = -EBUSY;
1246			break;
1247
1248		case RES_QP_RESERVED:
1249			if (r->com.state == RES_QP_MAPPED && !alloc)
1250				break;
1251
1252			mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", (unsigned long long)r->com.res_id);
1253			err = -EINVAL;
1254			break;
1255
1256		case RES_QP_MAPPED:
1257			if ((r->com.state == RES_QP_RESERVED && alloc) ||
1258			    r->com.state == RES_QP_HW)
1259				break;
1260			else {
1261				mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1262					  (unsigned long long)r->com.res_id);
1263				err = -EINVAL;
1264			}
1265
1266			break;
1267
1268		case RES_QP_HW:
1269			if (r->com.state != RES_QP_MAPPED)
1270				err = -EINVAL;
1271			break;
1272		default:
1273			err = -EINVAL;
1274		}
1275
1276		if (!err) {
1277			r->com.from_state = r->com.state;
1278			r->com.to_state = state;
1279			r->com.state = RES_QP_BUSY;
1280			if (qp)
1281				*qp = r;
1282		}
1283	}
1284
1285	spin_unlock_irq(mlx4_tlock(dev));
1286
1287	return err;
1288}
1289
1290static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1291				enum res_mpt_states state, struct res_mpt **mpt)
1292{
1293	struct mlx4_priv *priv = mlx4_priv(dev);
1294	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1295	struct res_mpt *r;
1296	int err = 0;
1297
1298	spin_lock_irq(mlx4_tlock(dev));
1299	r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1300	if (!r)
1301		err = -ENOENT;
1302	else if (r->com.owner != slave)
1303		err = -EPERM;
1304	else {
1305		switch (state) {
1306		case RES_MPT_BUSY:
1307			err = -EINVAL;
1308			break;
1309
1310		case RES_MPT_RESERVED:
1311			if (r->com.state != RES_MPT_MAPPED)
1312				err = -EINVAL;
1313			break;
1314
1315		case RES_MPT_MAPPED:
1316			if (r->com.state != RES_MPT_RESERVED &&
1317			    r->com.state != RES_MPT_HW)
1318				err = -EINVAL;
1319			break;
1320
1321		case RES_MPT_HW:
1322			if (r->com.state != RES_MPT_MAPPED)
1323				err = -EINVAL;
1324			break;
1325		default:
1326			err = -EINVAL;
1327		}
1328
1329		if (!err) {
1330			r->com.from_state = r->com.state;
1331			r->com.to_state = state;
1332			r->com.state = RES_MPT_BUSY;
1333			if (mpt)
1334				*mpt = r;
1335		}
1336	}
1337
1338	spin_unlock_irq(mlx4_tlock(dev));
1339
1340	return err;
1341}
1342
1343static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1344				enum res_eq_states state, struct res_eq **eq)
1345{
1346	struct mlx4_priv *priv = mlx4_priv(dev);
1347	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1348	struct res_eq *r;
1349	int err = 0;
1350
1351	spin_lock_irq(mlx4_tlock(dev));
1352	r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1353	if (!r)
1354		err = -ENOENT;
1355	else if (r->com.owner != slave)
1356		err = -EPERM;
1357	else {
1358		switch (state) {
1359		case RES_EQ_BUSY:
1360			err = -EINVAL;
1361			break;
1362
1363		case RES_EQ_RESERVED:
1364			if (r->com.state != RES_EQ_HW)
1365				err = -EINVAL;
1366			break;
1367
1368		case RES_EQ_HW:
1369			if (r->com.state != RES_EQ_RESERVED)
1370				err = -EINVAL;
1371			break;
1372
1373		default:
1374			err = -EINVAL;
1375		}
1376
1377		if (!err) {
1378			r->com.from_state = r->com.state;
1379			r->com.to_state = state;
1380			r->com.state = RES_EQ_BUSY;
1381			if (eq)
1382				*eq = r;
1383		}
1384	}
1385
1386	spin_unlock_irq(mlx4_tlock(dev));
1387
1388	return err;
1389}
1390
1391static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1392				enum res_cq_states state, struct res_cq **cq)
1393{
1394	struct mlx4_priv *priv = mlx4_priv(dev);
1395	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1396	struct res_cq *r;
1397	int err;
1398
1399	spin_lock_irq(mlx4_tlock(dev));
1400	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1401	if (!r)
1402		err = -ENOENT;
1403	else if (r->com.owner != slave)
1404		err = -EPERM;
1405	else {
1406		switch (state) {
1407		case RES_CQ_BUSY:
1408			err = -EBUSY;
1409			break;
1410
1411		case RES_CQ_ALLOCATED:
1412			if (r->com.state != RES_CQ_HW)
1413				err = -EINVAL;
1414			else if (atomic_read(&r->ref_count))
1415				err = -EBUSY;
1416			else
1417				err = 0;
1418			break;
1419
1420		case RES_CQ_HW:
1421			if (r->com.state != RES_CQ_ALLOCATED)
1422				err = -EINVAL;
1423			else
1424				err = 0;
1425			break;
1426
1427		default:
1428			err = -EINVAL;
1429		}
1430
1431		if (!err) {
1432			r->com.from_state = r->com.state;
1433			r->com.to_state = state;
1434			r->com.state = RES_CQ_BUSY;
1435			if (cq)
1436				*cq = r;
1437		}
1438	}
1439
1440	spin_unlock_irq(mlx4_tlock(dev));
1441
1442	return err;
1443}
1444
1445static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1446				 enum res_srq_states state, struct res_srq **srq)
1447{
1448	struct mlx4_priv *priv = mlx4_priv(dev);
1449	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1450	struct res_srq *r;
1451	int err = 0;
1452
1453	spin_lock_irq(mlx4_tlock(dev));
1454	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1455	if (!r)
1456		err = -ENOENT;
1457	else if (r->com.owner != slave)
1458		err = -EPERM;
1459	else {
1460		switch (state) {
1461		case RES_SRQ_BUSY:
1462			err = -EINVAL;
1463			break;
1464
1465		case RES_SRQ_ALLOCATED:
1466			if (r->com.state != RES_SRQ_HW)
1467				err = -EINVAL;
1468			else if (atomic_read(&r->ref_count))
1469				err = -EBUSY;
1470			break;
1471
1472		case RES_SRQ_HW:
1473			if (r->com.state != RES_SRQ_ALLOCATED)
1474				err = -EINVAL;
1475			break;
1476
1477		default:
1478			err = -EINVAL;
1479		}
1480
1481		if (!err) {
1482			r->com.from_state = r->com.state;
1483			r->com.to_state = state;
1484			r->com.state = RES_SRQ_BUSY;
1485			if (srq)
1486				*srq = r;
1487		}
1488	}
1489
1490	spin_unlock_irq(mlx4_tlock(dev));
1491
1492	return err;
1493}
1494
1495static void res_abort_move(struct mlx4_dev *dev, int slave,
1496			   enum mlx4_resource type, int id)
1497{
1498	struct mlx4_priv *priv = mlx4_priv(dev);
1499	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1500	struct res_common *r;
1501
1502	spin_lock_irq(mlx4_tlock(dev));
1503	r = res_tracker_lookup(&tracker->res_tree[type], id);
1504	if (r && (r->owner == slave))
1505		r->state = r->from_state;
1506	spin_unlock_irq(mlx4_tlock(dev));
1507}
1508
1509static void res_end_move(struct mlx4_dev *dev, int slave,
1510			 enum mlx4_resource type, int id)
1511{
1512	struct mlx4_priv *priv = mlx4_priv(dev);
1513	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1514	struct res_common *r;
1515
1516	spin_lock_irq(mlx4_tlock(dev));
1517	r = res_tracker_lookup(&tracker->res_tree[type], id);
1518	if (r && (r->owner == slave))
1519		r->state = r->to_state;
1520	spin_unlock_irq(mlx4_tlock(dev));
1521}
1522
1523static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1524{
1525	return mlx4_is_qp_reserved(dev, qpn) &&
1526		(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1527}
1528
1529static int fw_reserved(struct mlx4_dev *dev, int qpn)
1530{
1531	return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1532}
1533
1534static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1535			u64 in_param, u64 *out_param)
1536{
1537	int err;
1538	int count;
1539	int align;
1540	int base;
1541	int qpn;
1542	u8 flags;
1543
1544	switch (op) {
1545	case RES_OP_RESERVE:
1546		count = get_param_l(&in_param) & 0xffffff;
1547		/* Turn off all unsupported QP allocation flags that the
1548		 * slave tries to set.
1549		 */
1550		flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1551		align = get_param_h(&in_param);
1552		err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1553		if (err)
1554			return err;
1555
1556		err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1557		if (err) {
1558			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1559			return err;
1560		}
1561
1562		err = add_res_range(dev, slave, base, count, RES_QP, 0);
1563		if (err) {
1564			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1565			__mlx4_qp_release_range(dev, base, count);
1566			return err;
1567		}
1568		set_param_l(out_param, base);
1569		break;
1570	case RES_OP_MAP_ICM:
1571		qpn = get_param_l(&in_param) & 0x7fffff;
1572		if (valid_reserved(dev, slave, qpn)) {
1573			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1574			if (err)
1575				return err;
1576		}
1577
1578		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1579					   NULL, 1);
1580		if (err)
1581			return err;
1582
1583		if (!fw_reserved(dev, qpn)) {
1584			err = __mlx4_qp_alloc_icm(dev, qpn);
1585			if (err) {
1586				res_abort_move(dev, slave, RES_QP, qpn);
1587				return err;
1588			}
1589		}
1590
1591		res_end_move(dev, slave, RES_QP, qpn);
1592		break;
1593
1594	default:
1595		err = -EINVAL;
1596		break;
1597	}
1598	return err;
1599}
1600
1601static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1602			 u64 in_param, u64 *out_param)
1603{
1604	int err = -EINVAL;
1605	int base;
1606	int order;
1607
1608	if (op != RES_OP_RESERVE_AND_MAP)
1609		return err;
1610
1611	order = get_param_l(&in_param);
1612
1613	err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1614	if (err)
1615		return err;
1616
1617	base = __mlx4_alloc_mtt_range(dev, order);
1618	if (base == -1) {
1619		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1620		return -ENOMEM;
1621	}
1622
1623	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1624	if (err) {
1625		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1626		__mlx4_free_mtt_range(dev, base, order);
1627	} else
1628		set_param_l(out_param, base);
1629
1630	return err;
1631}
1632
1633static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1634			 u64 in_param, u64 *out_param)
1635{
1636	int err = -EINVAL;
1637	int index;
1638	int id;
1639	struct res_mpt *mpt;
1640
1641	switch (op) {
1642	case RES_OP_RESERVE:
1643		err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1644		if (err)
1645			break;
1646
1647		index = __mlx4_mpt_reserve(dev);
1648		if (index == -1) {
1649			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1650			break;
1651		}
1652		id = index & mpt_mask(dev);
1653
1654		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1655		if (err) {
1656			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1657			__mlx4_mpt_release(dev, index);
1658			break;
1659		}
1660		set_param_l(out_param, index);
1661		break;
1662	case RES_OP_MAP_ICM:
1663		index = get_param_l(&in_param);
1664		id = index & mpt_mask(dev);
1665		err = mr_res_start_move_to(dev, slave, id,
1666					   RES_MPT_MAPPED, &mpt);
1667		if (err)
1668			return err;
1669
1670		err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1671		if (err) {
1672			res_abort_move(dev, slave, RES_MPT, id);
1673			return err;
1674		}
1675
1676		res_end_move(dev, slave, RES_MPT, id);
1677		break;
1678	}
1679	return err;
1680}
1681
1682static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1683			u64 in_param, u64 *out_param)
1684{
1685	int cqn;
1686	int err;
1687
1688	switch (op) {
1689	case RES_OP_RESERVE_AND_MAP:
1690		err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1691		if (err)
1692			break;
1693
1694		err = __mlx4_cq_alloc_icm(dev, &cqn);
1695		if (err) {
1696			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1697			break;
1698		}
1699
1700		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1701		if (err) {
1702			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1703			__mlx4_cq_free_icm(dev, cqn);
1704			break;
1705		}
1706
1707		set_param_l(out_param, cqn);
1708		break;
1709
1710	default:
1711		err = -EINVAL;
1712	}
1713
1714	return err;
1715}
1716
1717static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1718			 u64 in_param, u64 *out_param)
1719{
1720	int srqn;
1721	int err;
1722
1723	switch (op) {
1724	case RES_OP_RESERVE_AND_MAP:
1725		err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1726		if (err)
1727			break;
1728
1729		err = __mlx4_srq_alloc_icm(dev, &srqn);
1730		if (err) {
1731			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1732			break;
1733		}
1734
1735		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1736		if (err) {
1737			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1738			__mlx4_srq_free_icm(dev, srqn);
1739			break;
1740		}
1741
1742		set_param_l(out_param, srqn);
1743		break;
1744
1745	default:
1746		err = -EINVAL;
1747	}
1748
1749	return err;
1750}
1751
1752static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1753				     u8 smac_index, u64 *mac)
1754{
1755	struct mlx4_priv *priv = mlx4_priv(dev);
1756	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1757	struct list_head *mac_list =
1758		&tracker->slave_list[slave].res_list[RES_MAC];
1759	struct mac_res *res, *tmp;
1760
1761	list_for_each_entry_safe(res, tmp, mac_list, list) {
1762		if (res->smac_index == smac_index && res->port == (u8) port) {
1763			*mac = res->mac;
1764			return 0;
1765		}
1766	}
1767	return -ENOENT;
1768}
1769
1770static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1771{
1772	struct mlx4_priv *priv = mlx4_priv(dev);
1773	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1774	struct list_head *mac_list =
1775		&tracker->slave_list[slave].res_list[RES_MAC];
1776	struct mac_res *res, *tmp;
1777
1778	list_for_each_entry_safe(res, tmp, mac_list, list) {
1779		if (res->mac == mac && res->port == (u8) port) {
1780			/* mac found. update ref count */
1781			++res->ref_count;
1782			return 0;
1783		}
1784	}
1785
1786	if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1787		return -EINVAL;
1788	res = kzalloc(sizeof *res, GFP_KERNEL);
1789	if (!res) {
1790		mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1791		return -ENOMEM;
1792	}
1793	res->mac = mac;
1794	res->port = (u8) port;
1795	res->smac_index = smac_index;
1796	res->ref_count = 1;
1797	list_add_tail(&res->list,
1798		      &tracker->slave_list[slave].res_list[RES_MAC]);
1799	return 0;
1800}
1801
1802
1803static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1804			       int port)
1805{
1806	struct mlx4_priv *priv = mlx4_priv(dev);
1807	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1808	struct list_head *mac_list =
1809		&tracker->slave_list[slave].res_list[RES_MAC];
1810	struct mac_res *res, *tmp;
1811
1812	list_for_each_entry_safe(res, tmp, mac_list, list) {
1813		if (res->mac == mac && res->port == (u8) port) {
1814			if (!--res->ref_count) {
1815				list_del(&res->list);
1816				mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1817				kfree(res);
1818			}
1819			break;
1820		}
1821	}
1822}
1823
1824static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1825{
1826	struct mlx4_priv *priv = mlx4_priv(dev);
1827	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1828	struct list_head *mac_list =
1829		&tracker->slave_list[slave].res_list[RES_MAC];
1830	struct mac_res *res, *tmp;
1831	int i;
1832
1833	list_for_each_entry_safe(res, tmp, mac_list, list) {
1834		list_del(&res->list);
1835		/* dereference the mac the num times the slave referenced it */
1836		for (i = 0; i < res->ref_count; i++)
1837			__mlx4_unregister_mac(dev, res->port, res->mac);
1838		mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1839		kfree(res);
1840	}
1841}
1842
1843static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1844			 u64 in_param, u64 *out_param, int in_port)
1845{
1846	int err = -EINVAL;
1847	int port;
1848	u64 mac;
1849	u8 smac_index = 0;
1850
1851	if (op != RES_OP_RESERVE_AND_MAP)
1852		return err;
1853
1854	port = !in_port ? get_param_l(out_param) : in_port;
1855	mac = in_param;
1856
1857	err = __mlx4_register_mac(dev, port, mac);
1858	if (err >= 0) {
1859		smac_index = err;
1860		set_param_l(out_param, err);
1861		err = 0;
1862	}
1863
1864	if (!err) {
1865		err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1866		if (err)
1867			__mlx4_unregister_mac(dev, port, mac);
1868	}
1869	return err;
1870}
1871
1872static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1873			     int port, int vlan_index)
1874{
1875	struct mlx4_priv *priv = mlx4_priv(dev);
1876	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1877	struct list_head *vlan_list =
1878		&tracker->slave_list[slave].res_list[RES_VLAN];
1879	struct vlan_res *res, *tmp;
1880
1881	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1882		if (res->vlan == vlan && res->port == (u8) port) {
1883			/* vlan found. update ref count */
1884			++res->ref_count;
1885			return 0;
1886		}
1887	}
1888
1889	if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1890		return -EINVAL;
1891	res = kzalloc(sizeof(*res), GFP_KERNEL);
1892	if (!res) {
1893		mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1894		return -ENOMEM;
1895	}
1896	res->vlan = vlan;
1897	res->port = (u8) port;
1898	res->vlan_index = vlan_index;
1899	res->ref_count = 1;
1900	list_add_tail(&res->list,
1901		      &tracker->slave_list[slave].res_list[RES_VLAN]);
1902	return 0;
1903}
1904
1905
1906static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1907				int port)
1908{
1909	struct mlx4_priv *priv = mlx4_priv(dev);
1910	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1911	struct list_head *vlan_list =
1912		&tracker->slave_list[slave].res_list[RES_VLAN];
1913	struct vlan_res *res, *tmp;
1914
1915	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1916		if (res->vlan == vlan && res->port == (u8) port) {
1917			if (!--res->ref_count) {
1918				list_del(&res->list);
1919				mlx4_release_resource(dev, slave, RES_VLAN,
1920						      1, port);
1921				kfree(res);
1922			}
1923			break;
1924		}
1925	}
1926}
1927
1928static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1929{
1930	struct mlx4_priv *priv = mlx4_priv(dev);
1931	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1932	struct list_head *vlan_list =
1933		&tracker->slave_list[slave].res_list[RES_VLAN];
1934	struct vlan_res *res, *tmp;
1935	int i;
1936
1937	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1938		list_del(&res->list);
1939		/* dereference the vlan the num times the slave referenced it */
1940		for (i = 0; i < res->ref_count; i++)
1941			__mlx4_unregister_vlan(dev, res->port, res->vlan);
1942		mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1943		kfree(res);
1944	}
1945}
1946
1947static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1948			  u64 in_param, u64 *out_param, int in_port)
1949{
1950	struct mlx4_priv *priv = mlx4_priv(dev);
1951	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1952	int err = -EINVAL;
1953	u16 vlan;
1954	int vlan_index;
1955	int port;
1956
1957	port = !in_port ? get_param_l(out_param) : in_port;
1958
1959	if (!port)
1960		return err;
1961
1962	if (op != RES_OP_RESERVE_AND_MAP)
1963		return err;
1964
1965	/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1966	if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1967		slave_state[slave].old_vlan_api = true;
1968		return 0;
1969	}
1970
1971	vlan = (u16) in_param;
1972
1973	err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1974	if (!err) {
1975		set_param_l(out_param, (u32) vlan_index);
1976		err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1977		if (err)
1978			__mlx4_unregister_vlan(dev, port, vlan);
1979	}
1980	return err;
1981}
1982
1983static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1984			     u64 in_param, u64 *out_param, int port)
1985{
1986	u32 index;
1987	int err;
1988
1989	if (op != RES_OP_RESERVE)
1990		return -EINVAL;
1991
1992	err = __mlx4_counter_alloc(dev, slave, port, &index);
1993	if (!err)
1994		set_param_l(out_param, index);
1995
1996	return err;
1997}
1998
1999static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2000			   u64 in_param, u64 *out_param)
2001{
2002	u32 xrcdn;
2003	int err;
2004
2005	if (op != RES_OP_RESERVE)
2006		return -EINVAL;
2007
2008	err = __mlx4_xrcd_alloc(dev, &xrcdn);
2009	if (err)
2010		return err;
2011
2012	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2013	if (err)
2014		__mlx4_xrcd_free(dev, xrcdn);
2015	else
2016		set_param_l(out_param, xrcdn);
2017
2018	return err;
2019}
2020
2021int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2022			   struct mlx4_vhcr *vhcr,
2023			   struct mlx4_cmd_mailbox *inbox,
2024			   struct mlx4_cmd_mailbox *outbox,
2025			   struct mlx4_cmd_info *cmd)
2026{
2027	int err;
2028	int alop = vhcr->op_modifier;
2029
2030	switch (vhcr->in_modifier & 0xFF) {
2031	case RES_QP:
2032		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2033				   vhcr->in_param, &vhcr->out_param);
2034		break;
2035
2036	case RES_MTT:
2037		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2038				    vhcr->in_param, &vhcr->out_param);
2039		break;
2040
2041	case RES_MPT:
2042		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2043				    vhcr->in_param, &vhcr->out_param);
2044		break;
2045
2046	case RES_CQ:
2047		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2048				   vhcr->in_param, &vhcr->out_param);
2049		break;
2050
2051	case RES_SRQ:
2052		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2053				    vhcr->in_param, &vhcr->out_param);
2054		break;
2055
2056	case RES_MAC:
2057		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2058				    vhcr->in_param, &vhcr->out_param,
2059				    (vhcr->in_modifier >> 8) & 0xFF);
2060		break;
2061
2062	case RES_VLAN:
2063		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2064				     vhcr->in_param, &vhcr->out_param,
2065				     (vhcr->in_modifier >> 8) & 0xFF);
2066		break;
2067
2068	case RES_COUNTER:
2069		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2070					vhcr->in_param, &vhcr->out_param,
2071					(vhcr->in_modifier >> 8) & 0xFF);
2072		break;
2073
2074	case RES_XRCD:
2075		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2076				      vhcr->in_param, &vhcr->out_param);
2077		break;
2078
2079	default:
2080		err = -EINVAL;
2081		break;
2082	}
2083
2084	return err;
2085}
2086
2087static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2088		       u64 in_param)
2089{
2090	int err;
2091	int count;
2092	int base;
2093	int qpn;
2094
2095	switch (op) {
2096	case RES_OP_RESERVE:
2097		base = get_param_l(&in_param) & 0x7fffff;
2098		count = get_param_h(&in_param);
2099		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2100		if (err)
2101			break;
2102		mlx4_release_resource(dev, slave, RES_QP, count, 0);
2103		__mlx4_qp_release_range(dev, base, count);
2104		break;
2105	case RES_OP_MAP_ICM:
2106		qpn = get_param_l(&in_param) & 0x7fffff;
2107		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2108					   NULL, 0);
2109		if (err)
2110			return err;
2111
2112		if (!fw_reserved(dev, qpn))
2113			__mlx4_qp_free_icm(dev, qpn);
2114
2115		res_end_move(dev, slave, RES_QP, qpn);
2116
2117		if (valid_reserved(dev, slave, qpn))
2118			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2119		break;
2120	default:
2121		err = -EINVAL;
2122		break;
2123	}
2124	return err;
2125}
2126
2127static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2128			u64 in_param, u64 *out_param)
2129{
2130	int err = -EINVAL;
2131	int base;
2132	int order;
2133
2134	if (op != RES_OP_RESERVE_AND_MAP)
2135		return err;
2136
2137	base = get_param_l(&in_param);
2138	order = get_param_h(&in_param);
2139	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2140	if (!err) {
2141		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2142		__mlx4_free_mtt_range(dev, base, order);
2143	}
2144	return err;
2145}
2146
2147static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2148			u64 in_param)
2149{
2150	int err = -EINVAL;
2151	int index;
2152	int id;
2153	struct res_mpt *mpt;
2154
2155	switch (op) {
2156	case RES_OP_RESERVE:
2157		index = get_param_l(&in_param);
2158		id = index & mpt_mask(dev);
2159		err = get_res(dev, slave, id, RES_MPT, &mpt);
2160		if (err)
2161			break;
2162		index = mpt->key;
2163		put_res(dev, slave, id, RES_MPT);
2164
2165		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2166		if (err)
2167			break;
2168		mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2169		__mlx4_mpt_release(dev, index);
2170		break;
2171	case RES_OP_MAP_ICM:
2172			index = get_param_l(&in_param);
2173			id = index & mpt_mask(dev);
2174			err = mr_res_start_move_to(dev, slave, id,
2175						   RES_MPT_RESERVED, &mpt);
2176			if (err)
2177				return err;
2178
2179			__mlx4_mpt_free_icm(dev, mpt->key);
2180			res_end_move(dev, slave, RES_MPT, id);
2181			return err;
2182		break;
2183	default:
2184		err = -EINVAL;
2185		break;
2186	}
2187	return err;
2188}
2189
2190static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2191		       u64 in_param, u64 *out_param)
2192{
2193	int cqn;
2194	int err;
2195
2196	switch (op) {
2197	case RES_OP_RESERVE_AND_MAP:
2198		cqn = get_param_l(&in_param);
2199		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2200		if (err)
2201			break;
2202
2203		mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2204		__mlx4_cq_free_icm(dev, cqn);
2205		break;
2206
2207	default:
2208		err = -EINVAL;
2209		break;
2210	}
2211
2212	return err;
2213}
2214
2215static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2216			u64 in_param, u64 *out_param)
2217{
2218	int srqn;
2219	int err;
2220
2221	switch (op) {
2222	case RES_OP_RESERVE_AND_MAP:
2223		srqn = get_param_l(&in_param);
2224		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2225		if (err)
2226			break;
2227
2228		mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2229		__mlx4_srq_free_icm(dev, srqn);
2230		break;
2231
2232	default:
2233		err = -EINVAL;
2234		break;
2235	}
2236
2237	return err;
2238}
2239
2240static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2241			    u64 in_param, u64 *out_param, int in_port)
2242{
2243	int port;
2244	int err = 0;
2245
2246	switch (op) {
2247	case RES_OP_RESERVE_AND_MAP:
2248		port = !in_port ? get_param_l(out_param) : in_port;
2249		mac_del_from_slave(dev, slave, in_param, port);
2250		__mlx4_unregister_mac(dev, port, in_param);
2251		break;
2252	default:
2253		err = -EINVAL;
2254		break;
2255	}
2256
2257	return err;
2258
2259}
2260
2261static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2262			    u64 in_param, u64 *out_param, int port)
2263{
2264	struct mlx4_priv *priv = mlx4_priv(dev);
2265	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2266	int err = 0;
2267
2268	switch (op) {
2269	case RES_OP_RESERVE_AND_MAP:
2270		if (slave_state[slave].old_vlan_api == true)
2271			return 0;
2272		if (!port)
2273			return -EINVAL;
2274		vlan_del_from_slave(dev, slave, in_param, port);
2275		__mlx4_unregister_vlan(dev, port, in_param);
2276		break;
2277	default:
2278		err = -EINVAL;
2279		break;
2280	}
2281
2282	return err;
2283}
2284
2285static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2286			    u64 in_param, u64 *out_param, int port)
2287{
2288	int index;
2289
2290	if (op != RES_OP_RESERVE)
2291		return -EINVAL;
2292
2293	index = get_param_l(&in_param);
2294
2295	__mlx4_counter_free(dev, slave, port, index);
2296
2297	return 0;
2298}
2299
2300static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2301			  u64 in_param, u64 *out_param)
2302{
2303	int xrcdn;
2304	int err;
2305
2306	if (op != RES_OP_RESERVE)
2307		return -EINVAL;
2308
2309	xrcdn = get_param_l(&in_param);
2310	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2311	if (err)
2312		return err;
2313
2314	__mlx4_xrcd_free(dev, xrcdn);
2315
2316	return err;
2317}
2318
2319int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2320			  struct mlx4_vhcr *vhcr,
2321			  struct mlx4_cmd_mailbox *inbox,
2322			  struct mlx4_cmd_mailbox *outbox,
2323			  struct mlx4_cmd_info *cmd)
2324{
2325	int err = -EINVAL;
2326	int alop = vhcr->op_modifier;
2327
2328	switch (vhcr->in_modifier & 0xFF) {
2329	case RES_QP:
2330		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2331				  vhcr->in_param);
2332		break;
2333
2334	case RES_MTT:
2335		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2336				   vhcr->in_param, &vhcr->out_param);
2337		break;
2338
2339	case RES_MPT:
2340		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2341				   vhcr->in_param);
2342		break;
2343
2344	case RES_CQ:
2345		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2346				  vhcr->in_param, &vhcr->out_param);
2347		break;
2348
2349	case RES_SRQ:
2350		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2351				   vhcr->in_param, &vhcr->out_param);
2352		break;
2353
2354	case RES_MAC:
2355		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2356				   vhcr->in_param, &vhcr->out_param,
2357				   (vhcr->in_modifier >> 8) & 0xFF);
2358		break;
2359
2360	case RES_VLAN:
2361		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2362				    vhcr->in_param, &vhcr->out_param,
2363				    (vhcr->in_modifier >> 8) & 0xFF);
2364		break;
2365
2366	case RES_COUNTER:
2367		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2368				       vhcr->in_param, &vhcr->out_param,
2369				       (vhcr->in_modifier >> 8) & 0xFF);
2370		break;
2371
2372	case RES_XRCD:
2373		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2374				     vhcr->in_param, &vhcr->out_param);
2375
2376	default:
2377		break;
2378	}
2379	return err;
2380}
2381
2382/* ugly but other choices are uglier */
2383static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2384{
2385	return (be32_to_cpu(mpt->flags) >> 9) & 1;
2386}
2387
2388static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2389{
2390	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2391}
2392
2393static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2394{
2395	return be32_to_cpu(mpt->mtt_sz);
2396}
2397
2398static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2399{
2400	return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2401}
2402
2403static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2404{
2405	return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2406}
2407
2408static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2409{
2410	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2411}
2412
2413static int mr_is_region(struct mlx4_mpt_entry *mpt)
2414{
2415	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2416}
2417
2418static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2419{
2420	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2421}
2422
2423static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2424{
2425	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2426}
2427
2428static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2429{
2430	int page_shift = (qpc->log_page_size & 0x3f) + 12;
2431	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2432	int log_sq_sride = qpc->sq_size_stride & 7;
2433	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2434	int log_rq_stride = qpc->rq_size_stride & 7;
2435	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2436	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2437	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2438	int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2439	int sq_size;
2440	int rq_size;
2441	int total_pages;
2442	int total_mem;
2443	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2444
2445	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2446	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2447	total_mem = sq_size + rq_size;
2448	total_pages =
2449		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2450				   page_shift);
2451
2452	return total_pages;
2453}
2454
2455static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2456			   int size, struct res_mtt *mtt)
2457{
2458	int res_start = mtt->com.res_id;
2459	int res_size = (1 << mtt->order);
2460
2461	if (start < res_start || start + size > res_start + res_size)
2462		return -EPERM;
2463	return 0;
2464}
2465
2466int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2467			   struct mlx4_vhcr *vhcr,
2468			   struct mlx4_cmd_mailbox *inbox,
2469			   struct mlx4_cmd_mailbox *outbox,
2470			   struct mlx4_cmd_info *cmd)
2471{
2472	int err;
2473	int index = vhcr->in_modifier;
2474	struct res_mtt *mtt;
2475	struct res_mpt *mpt;
2476	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2477	int phys;
2478	int id;
2479	u32 pd;
2480	int pd_slave;
2481
2482	id = index & mpt_mask(dev);
2483	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2484	if (err)
2485		return err;
2486
2487	/* Currently disable memory windows since this feature isn't tested yet
2488	* under virtualization.
2489	*/
2490	if (!mr_is_region(inbox->buf)) {
2491		err = -ENOSYS;
2492		goto ex_abort;
2493	}
2494
2495	/* Make sure that the PD bits related to the slave id are zeros. */
2496	pd = mr_get_pd(inbox->buf);
2497	pd_slave = (pd >> 17) & 0x7f;
2498	if (pd_slave != 0 && pd_slave != slave) {
2499		err = -EPERM;
2500		goto ex_abort;
2501	}
2502
2503	if (mr_is_fmr(inbox->buf)) {
2504		/* FMR and Bind Enable are forbidden in slave devices. */
2505		if (mr_is_bind_enabled(inbox->buf)) {
2506			err = -EPERM;
2507			goto ex_abort;
2508		}
2509		/* FMR and Memory Windows are also forbidden. */
2510		if (!mr_is_region(inbox->buf)) {
2511			err = -EPERM;
2512			goto ex_abort;
2513		}
2514	}
2515
2516	phys = mr_phys_mpt(inbox->buf);
2517	if (!phys) {
2518		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2519		if (err)
2520			goto ex_abort;
2521
2522		err = check_mtt_range(dev, slave, mtt_base,
2523				      mr_get_mtt_size(inbox->buf), mtt);
2524		if (err)
2525			goto ex_put;
2526
2527		mpt->mtt = mtt;
2528	}
2529
2530	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2531	if (err)
2532		goto ex_put;
2533
2534	if (!phys) {
2535		atomic_inc(&mtt->ref_count);
2536		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2537	}
2538
2539	res_end_move(dev, slave, RES_MPT, id);
2540	return 0;
2541
2542ex_put:
2543	if (!phys)
2544		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2545ex_abort:
2546	res_abort_move(dev, slave, RES_MPT, id);
2547
2548	return err;
2549}
2550
2551int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2552			   struct mlx4_vhcr *vhcr,
2553			   struct mlx4_cmd_mailbox *inbox,
2554			   struct mlx4_cmd_mailbox *outbox,
2555			   struct mlx4_cmd_info *cmd)
2556{
2557	int err;
2558	int index = vhcr->in_modifier;
2559	struct res_mpt *mpt;
2560	int id;
2561
2562	id = index & mpt_mask(dev);
2563	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2564	if (err)
2565		return err;
2566
2567	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2568	if (err)
2569		goto ex_abort;
2570
2571	if (mpt->mtt)
2572		atomic_dec(&mpt->mtt->ref_count);
2573
2574	res_end_move(dev, slave, RES_MPT, id);
2575	return 0;
2576
2577ex_abort:
2578	res_abort_move(dev, slave, RES_MPT, id);
2579
2580	return err;
2581}
2582
2583int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2584			   struct mlx4_vhcr *vhcr,
2585			   struct mlx4_cmd_mailbox *inbox,
2586			   struct mlx4_cmd_mailbox *outbox,
2587			   struct mlx4_cmd_info *cmd)
2588{
2589	int err;
2590	int index = vhcr->in_modifier;
2591	struct res_mpt *mpt;
2592	int id;
2593
2594	id = index & mpt_mask(dev);
2595	err = get_res(dev, slave, id, RES_MPT, &mpt);
2596	if (err)
2597		return err;
2598
2599	if (mpt->com.from_state != RES_MPT_HW) {
2600		err = -EBUSY;
2601		goto out;
2602	}
2603
2604	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2605
2606out:
2607	put_res(dev, slave, id, RES_MPT);
2608	return err;
2609}
2610
2611static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2612{
2613	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2614}
2615
2616static int qp_get_scqn(struct mlx4_qp_context *qpc)
2617{
2618	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2619}
2620
2621static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2622{
2623	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2624}
2625
2626static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2627				  struct mlx4_qp_context *context)
2628{
2629	u32 qpn = vhcr->in_modifier & 0xffffff;
2630	u32 qkey = 0;
2631
2632	if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2633		return;
2634
2635	/* adjust qkey in qp context */
2636	context->qkey = cpu_to_be32(qkey);
2637}
2638
2639int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2640			     struct mlx4_vhcr *vhcr,
2641			     struct mlx4_cmd_mailbox *inbox,
2642			     struct mlx4_cmd_mailbox *outbox,
2643			     struct mlx4_cmd_info *cmd)
2644{
2645	int err;
2646	int qpn = vhcr->in_modifier & 0x7fffff;
2647	struct res_mtt *mtt;
2648	struct res_qp *qp;
2649	struct mlx4_qp_context *qpc = inbox->buf + 8;
2650	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2651	int mtt_size = qp_get_mtt_size(qpc);
2652	struct res_cq *rcq;
2653	struct res_cq *scq;
2654	int rcqn = qp_get_rcqn(qpc);
2655	int scqn = qp_get_scqn(qpc);
2656	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2657	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2658	struct res_srq *srq;
2659	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2660
2661	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2662	if (err)
2663		return err;
2664	qp->local_qpn = local_qpn;
2665	qp->sched_queue = 0;
2666	qp->param3 = 0;
2667	qp->vlan_control = 0;
2668	qp->fvl_rx = 0;
2669	qp->pri_path_fl = 0;
2670	qp->vlan_index = 0;
2671	qp->feup = 0;
2672	qp->qpc_flags = be32_to_cpu(qpc->flags);
2673
2674	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2675	if (err)
2676		goto ex_abort;
2677
2678	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2679	if (err)
2680		goto ex_put_mtt;
2681
2682	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2683	if (err)
2684		goto ex_put_mtt;
2685
2686	if (scqn != rcqn) {
2687		err = get_res(dev, slave, scqn, RES_CQ, &scq);
2688		if (err)
2689			goto ex_put_rcq;
2690	} else
2691		scq = rcq;
2692
2693	if (use_srq) {
2694		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2695		if (err)
2696			goto ex_put_scq;
2697	}
2698
2699	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2700	update_pkey_index(dev, slave, inbox);
2701	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2702	if (err)
2703		goto ex_put_srq;
2704	atomic_inc(&mtt->ref_count);
2705	qp->mtt = mtt;
2706	atomic_inc(&rcq->ref_count);
2707	qp->rcq = rcq;
2708	atomic_inc(&scq->ref_count);
2709	qp->scq = scq;
2710
2711	if (scqn != rcqn)
2712		put_res(dev, slave, scqn, RES_CQ);
2713
2714	if (use_srq) {
2715		atomic_inc(&srq->ref_count);
2716		put_res(dev, slave, srqn, RES_SRQ);
2717		qp->srq = srq;
2718	}
2719	put_res(dev, slave, rcqn, RES_CQ);
2720	put_res(dev, slave, mtt_base, RES_MTT);
2721	res_end_move(dev, slave, RES_QP, qpn);
2722
2723	return 0;
2724
2725ex_put_srq:
2726	if (use_srq)
2727		put_res(dev, slave, srqn, RES_SRQ);
2728ex_put_scq:
2729	if (scqn != rcqn)
2730		put_res(dev, slave, scqn, RES_CQ);
2731ex_put_rcq:
2732	put_res(dev, slave, rcqn, RES_CQ);
2733ex_put_mtt:
2734	put_res(dev, slave, mtt_base, RES_MTT);
2735ex_abort:
2736	res_abort_move(dev, slave, RES_QP, qpn);
2737
2738	return err;
2739}
2740
2741static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2742{
2743	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2744}
2745
2746static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2747{
2748	int log_eq_size = eqc->log_eq_size & 0x1f;
2749	int page_shift = (eqc->log_page_size & 0x3f) + 12;
2750
2751	if (log_eq_size + 5 < page_shift)
2752		return 1;
2753
2754	return 1 << (log_eq_size + 5 - page_shift);
2755}
2756
2757static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2758{
2759	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2760}
2761
2762static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2763{
2764	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2765	int page_shift = (cqc->log_page_size & 0x3f) + 12;
2766
2767	if (log_cq_size + 5 < page_shift)
2768		return 1;
2769
2770	return 1 << (log_cq_size + 5 - page_shift);
2771}
2772
2773int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2774			  struct mlx4_vhcr *vhcr,
2775			  struct mlx4_cmd_mailbox *inbox,
2776			  struct mlx4_cmd_mailbox *outbox,
2777			  struct mlx4_cmd_info *cmd)
2778{
2779	int err;
2780	int eqn = vhcr->in_modifier;
2781	int res_id = (slave << 8) | eqn;
2782	struct mlx4_eq_context *eqc = inbox->buf;
2783	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2784	int mtt_size = eq_get_mtt_size(eqc);
2785	struct res_eq *eq;
2786	struct res_mtt *mtt;
2787
2788	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2789	if (err)
2790		return err;
2791	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2792	if (err)
2793		goto out_add;
2794
2795	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2796	if (err)
2797		goto out_move;
2798
2799	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2800	if (err)
2801		goto out_put;
2802
2803	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2804	if (err)
2805		goto out_put;
2806
2807	atomic_inc(&mtt->ref_count);
2808	eq->mtt = mtt;
2809	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2810	res_end_move(dev, slave, RES_EQ, res_id);
2811	return 0;
2812
2813out_put:
2814	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2815out_move:
2816	res_abort_move(dev, slave, RES_EQ, res_id);
2817out_add:
2818	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2819	return err;
2820}
2821
2822static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2823			      int len, struct res_mtt **res)
2824{
2825	struct mlx4_priv *priv = mlx4_priv(dev);
2826	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2827	struct res_mtt *mtt;
2828	int err = -EINVAL;
2829
2830	spin_lock_irq(mlx4_tlock(dev));
2831	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2832			    com.list) {
2833		if (!check_mtt_range(dev, slave, start, len, mtt)) {
2834			*res = mtt;
2835			mtt->com.from_state = mtt->com.state;
2836			mtt->com.state = RES_MTT_BUSY;
2837			err = 0;
2838			break;
2839		}
2840	}
2841	spin_unlock_irq(mlx4_tlock(dev));
2842
2843	return err;
2844}
2845
2846static int verify_qp_parameters(struct mlx4_dev *dev,
2847				struct mlx4_cmd_mailbox *inbox,
2848				enum qp_transition transition, u8 slave)
2849{
2850	u32			qp_type;
2851	struct mlx4_qp_context	*qp_ctx;
2852	enum mlx4_qp_optpar	optpar;
2853	int port;
2854	int num_gids;
2855
2856	qp_ctx  = inbox->buf + 8;
2857	qp_type	= (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2858	optpar	= be32_to_cpu(*(__be32 *) inbox->buf);
2859
2860	switch (qp_type) {
2861	case MLX4_QP_ST_RC:
2862	case MLX4_QP_ST_UC:
2863		switch (transition) {
2864		case QP_TRANS_INIT2RTR:
2865		case QP_TRANS_RTR2RTS:
2866		case QP_TRANS_RTS2RTS:
2867		case QP_TRANS_SQD2SQD:
2868		case QP_TRANS_SQD2RTS:
2869			if (slave != mlx4_master_func_num(dev))
2870				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2871					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2872					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2873						num_gids = mlx4_get_slave_num_gids(dev, slave);
2874					else
2875						num_gids = 1;
2876					if (qp_ctx->pri_path.mgid_index >= num_gids)
2877						return -EINVAL;
2878				}
2879				if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2880					port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2881					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2882						num_gids = mlx4_get_slave_num_gids(dev, slave);
2883					else
2884						num_gids = 1;
2885					if (qp_ctx->alt_path.mgid_index >= num_gids)
2886						return -EINVAL;
2887				}
2888			break;
2889		default:
2890			break;
2891		}
2892
2893		break;
2894	default:
2895		break;
2896	}
2897
2898	return 0;
2899}
2900
2901int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2902			   struct mlx4_vhcr *vhcr,
2903			   struct mlx4_cmd_mailbox *inbox,
2904			   struct mlx4_cmd_mailbox *outbox,
2905			   struct mlx4_cmd_info *cmd)
2906{
2907	struct mlx4_mtt mtt;
2908	__be64 *page_list = inbox->buf;
2909	u64 *pg_list = (u64 *)page_list;
2910	int i;
2911	struct res_mtt *rmtt = NULL;
2912	int start = be64_to_cpu(page_list[0]);
2913	int npages = vhcr->in_modifier;
2914	int err;
2915
2916	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2917	if (err)
2918		return err;
2919
2920	/* Call the SW implementation of write_mtt:
2921	 * - Prepare a dummy mtt struct
2922	 * - Translate inbox contents to simple addresses in host endianess */
2923	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2924			    we don't really use it */
2925	mtt.order = 0;
2926	mtt.page_shift = 0;
2927	for (i = 0; i < npages; ++i)
2928		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2929
2930	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2931			       ((u64 *)page_list + 2));
2932
2933	if (rmtt)
2934		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2935
2936	return err;
2937}
2938
2939int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2940			  struct mlx4_vhcr *vhcr,
2941			  struct mlx4_cmd_mailbox *inbox,
2942			  struct mlx4_cmd_mailbox *outbox,
2943			  struct mlx4_cmd_info *cmd)
2944{
2945	int eqn = vhcr->in_modifier;
2946	int res_id = eqn | (slave << 8);
2947	struct res_eq *eq;
2948	int err;
2949
2950	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2951	if (err)
2952		return err;
2953
2954	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2955	if (err)
2956		goto ex_abort;
2957
2958	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2959	if (err)
2960		goto ex_put;
2961
2962	atomic_dec(&eq->mtt->ref_count);
2963	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2964	res_end_move(dev, slave, RES_EQ, res_id);
2965	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2966
2967	return 0;
2968
2969ex_put:
2970	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2971ex_abort:
2972	res_abort_move(dev, slave, RES_EQ, res_id);
2973
2974	return err;
2975}
2976
2977int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2978{
2979	struct mlx4_priv *priv = mlx4_priv(dev);
2980	struct mlx4_slave_event_eq_info *event_eq;
2981	struct mlx4_cmd_mailbox *mailbox;
2982	u32 in_modifier = 0;
2983	int err;
2984	int res_id;
2985	struct res_eq *req;
2986
2987	if (!priv->mfunc.master.slave_state)
2988		return -EINVAL;
2989
2990	/* check for slave valid, slave not PF, and slave active */
2991	if (slave < 0 || slave >= dev->num_slaves ||
2992	    slave == dev->caps.function ||
2993	    !priv->mfunc.master.slave_state[slave].active)
2994		return 0;
2995
2996	event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2997
2998	/* Create the event only if the slave is registered */
2999	if (event_eq->eqn < 0)
3000		return 0;
3001
3002	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3003	res_id = (slave << 8) | event_eq->eqn;
3004	err = get_res(dev, slave, res_id, RES_EQ, &req);
3005	if (err)
3006		goto unlock;
3007
3008	if (req->com.from_state != RES_EQ_HW) {
3009		err = -EINVAL;
3010		goto put;
3011	}
3012
3013	mailbox = mlx4_alloc_cmd_mailbox(dev);
3014	if (IS_ERR(mailbox)) {
3015		err = PTR_ERR(mailbox);
3016		goto put;
3017	}
3018
3019	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3020		++event_eq->token;
3021		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3022	}
3023
3024	memcpy(mailbox->buf, (u8 *) eqe, 28);
3025
3026	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3027
3028	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3029		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3030		       MLX4_CMD_NATIVE);
3031
3032	put_res(dev, slave, res_id, RES_EQ);
3033	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3034	mlx4_free_cmd_mailbox(dev, mailbox);
3035	return err;
3036
3037put:
3038	put_res(dev, slave, res_id, RES_EQ);
3039
3040unlock:
3041	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3042	return err;
3043}
3044
3045int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3046			  struct mlx4_vhcr *vhcr,
3047			  struct mlx4_cmd_mailbox *inbox,
3048			  struct mlx4_cmd_mailbox *outbox,
3049			  struct mlx4_cmd_info *cmd)
3050{
3051	int eqn = vhcr->in_modifier;
3052	int res_id = eqn | (slave << 8);
3053	struct res_eq *eq;
3054	int err;
3055
3056	err = get_res(dev, slave, res_id, RES_EQ, &eq);
3057	if (err)
3058		return err;
3059
3060	if (eq->com.from_state != RES_EQ_HW) {
3061		err = -EINVAL;
3062		goto ex_put;
3063	}
3064
3065	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3066
3067ex_put:
3068	put_res(dev, slave, res_id, RES_EQ);
3069	return err;
3070}
3071
3072int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3073			  struct mlx4_vhcr *vhcr,
3074			  struct mlx4_cmd_mailbox *inbox,
3075			  struct mlx4_cmd_mailbox *outbox,
3076			  struct mlx4_cmd_info *cmd)
3077{
3078	int err;
3079	int cqn = vhcr->in_modifier;
3080	struct mlx4_cq_context *cqc = inbox->buf;
3081	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3082	struct res_cq *cq;
3083	struct res_mtt *mtt;
3084
3085	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3086	if (err)
3087		return err;
3088	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3089	if (err)
3090		goto out_move;
3091	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3092	if (err)
3093		goto out_put;
3094	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3095	if (err)
3096		goto out_put;
3097	atomic_inc(&mtt->ref_count);
3098	cq->mtt = mtt;
3099	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3100	res_end_move(dev, slave, RES_CQ, cqn);
3101	return 0;
3102
3103out_put:
3104	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3105out_move:
3106	res_abort_move(dev, slave, RES_CQ, cqn);
3107	return err;
3108}
3109
3110int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3111			  struct mlx4_vhcr *vhcr,
3112			  struct mlx4_cmd_mailbox *inbox,
3113			  struct mlx4_cmd_mailbox *outbox,
3114			  struct mlx4_cmd_info *cmd)
3115{
3116	int err;
3117	int cqn = vhcr->in_modifier;
3118	struct res_cq *cq;
3119
3120	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3121	if (err)
3122		return err;
3123	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3124	if (err)
3125		goto out_move;
3126	atomic_dec(&cq->mtt->ref_count);
3127	res_end_move(dev, slave, RES_CQ, cqn);
3128	return 0;
3129
3130out_move:
3131	res_abort_move(dev, slave, RES_CQ, cqn);
3132	return err;
3133}
3134
3135int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3136			  struct mlx4_vhcr *vhcr,
3137			  struct mlx4_cmd_mailbox *inbox,
3138			  struct mlx4_cmd_mailbox *outbox,
3139			  struct mlx4_cmd_info *cmd)
3140{
3141	int cqn = vhcr->in_modifier;
3142	struct res_cq *cq;
3143	int err;
3144
3145	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3146	if (err)
3147		return err;
3148
3149	if (cq->com.from_state != RES_CQ_HW)
3150		goto ex_put;
3151
3152	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3153ex_put:
3154	put_res(dev, slave, cqn, RES_CQ);
3155
3156	return err;
3157}
3158
3159static int handle_resize(struct mlx4_dev *dev, int slave,
3160			 struct mlx4_vhcr *vhcr,
3161			 struct mlx4_cmd_mailbox *inbox,
3162			 struct mlx4_cmd_mailbox *outbox,
3163			 struct mlx4_cmd_info *cmd,
3164			 struct res_cq *cq)
3165{
3166	int err;
3167	struct res_mtt *orig_mtt;
3168	struct res_mtt *mtt;
3169	struct mlx4_cq_context *cqc = inbox->buf;
3170	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3171
3172	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3173	if (err)
3174		return err;
3175
3176	if (orig_mtt != cq->mtt) {
3177		err = -EINVAL;
3178		goto ex_put;
3179	}
3180
3181	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3182	if (err)
3183		goto ex_put;
3184
3185	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3186	if (err)
3187		goto ex_put1;
3188	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3189	if (err)
3190		goto ex_put1;
3191	atomic_dec(&orig_mtt->ref_count);
3192	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3193	atomic_inc(&mtt->ref_count);
3194	cq->mtt = mtt;
3195	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3196	return 0;
3197
3198ex_put1:
3199	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3200ex_put:
3201	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3202
3203	return err;
3204
3205}
3206
3207int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3208			   struct mlx4_vhcr *vhcr,
3209			   struct mlx4_cmd_mailbox *inbox,
3210			   struct mlx4_cmd_mailbox *outbox,
3211			   struct mlx4_cmd_info *cmd)
3212{
3213	int cqn = vhcr->in_modifier;
3214	struct res_cq *cq;
3215	int err;
3216
3217	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3218	if (err)
3219		return err;
3220
3221	if (cq->com.from_state != RES_CQ_HW)
3222		goto ex_put;
3223
3224	if (vhcr->op_modifier == 0) {
3225		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3226		goto ex_put;
3227	}
3228
3229	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3230ex_put:
3231	put_res(dev, slave, cqn, RES_CQ);
3232
3233	return err;
3234}
3235
3236static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3237{
3238	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3239	int log_rq_stride = srqc->logstride & 7;
3240	int page_shift = (srqc->log_page_size & 0x3f) + 12;
3241
3242	if (log_srq_size + log_rq_stride + 4 < page_shift)
3243		return 1;
3244
3245	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3246}
3247
3248int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3249			   struct mlx4_vhcr *vhcr,
3250			   struct mlx4_cmd_mailbox *inbox,
3251			   struct mlx4_cmd_mailbox *outbox,
3252			   struct mlx4_cmd_info *cmd)
3253{
3254	int err;
3255	int srqn = vhcr->in_modifier;
3256	struct res_mtt *mtt;
3257	struct res_srq *srq;
3258	struct mlx4_srq_context *srqc = inbox->buf;
3259	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3260
3261	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3262		return -EINVAL;
3263
3264	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3265	if (err)
3266		return err;
3267	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3268	if (err)
3269		goto ex_abort;
3270	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3271			      mtt);
3272	if (err)
3273		goto ex_put_mtt;
3274
3275	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3276	if (err)
3277		goto ex_put_mtt;
3278
3279	atomic_inc(&mtt->ref_count);
3280	srq->mtt = mtt;
3281	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3282	res_end_move(dev, slave, RES_SRQ, srqn);
3283	return 0;
3284
3285ex_put_mtt:
3286	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3287ex_abort:
3288	res_abort_move(dev, slave, RES_SRQ, srqn);
3289
3290	return err;
3291}
3292
3293int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3294			   struct mlx4_vhcr *vhcr,
3295			   struct mlx4_cmd_mailbox *inbox,
3296			   struct mlx4_cmd_mailbox *outbox,
3297			   struct mlx4_cmd_info *cmd)
3298{
3299	int err;
3300	int srqn = vhcr->in_modifier;
3301	struct res_srq *srq;
3302
3303	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3304	if (err)
3305		return err;
3306	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3307	if (err)
3308		goto ex_abort;
3309	atomic_dec(&srq->mtt->ref_count);
3310	if (srq->cq)
3311		atomic_dec(&srq->cq->ref_count);
3312	res_end_move(dev, slave, RES_SRQ, srqn);
3313
3314	return 0;
3315
3316ex_abort:
3317	res_abort_move(dev, slave, RES_SRQ, srqn);
3318
3319	return err;
3320}
3321
3322int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3323			   struct mlx4_vhcr *vhcr,
3324			   struct mlx4_cmd_mailbox *inbox,
3325			   struct mlx4_cmd_mailbox *outbox,
3326			   struct mlx4_cmd_info *cmd)
3327{
3328	int err;
3329	int srqn = vhcr->in_modifier;
3330	struct res_srq *srq;
3331
3332	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3333	if (err)
3334		return err;
3335	if (srq->com.from_state != RES_SRQ_HW) {
3336		err = -EBUSY;
3337		goto out;
3338	}
3339	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3340out:
3341	put_res(dev, slave, srqn, RES_SRQ);
3342	return err;
3343}
3344
3345int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3346			 struct mlx4_vhcr *vhcr,
3347			 struct mlx4_cmd_mailbox *inbox,
3348			 struct mlx4_cmd_mailbox *outbox,
3349			 struct mlx4_cmd_info *cmd)
3350{
3351	int err;
3352	int srqn = vhcr->in_modifier;
3353	struct res_srq *srq;
3354
3355	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3356	if (err)
3357		return err;
3358
3359	if (srq->com.from_state != RES_SRQ_HW) {
3360		err = -EBUSY;
3361		goto out;
3362	}
3363
3364	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3365out:
3366	put_res(dev, slave, srqn, RES_SRQ);
3367	return err;
3368}
3369
3370int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3371			struct mlx4_vhcr *vhcr,
3372			struct mlx4_cmd_mailbox *inbox,
3373			struct mlx4_cmd_mailbox *outbox,
3374			struct mlx4_cmd_info *cmd)
3375{
3376	int err;
3377	int qpn = vhcr->in_modifier & 0x7fffff;
3378	struct res_qp *qp;
3379
3380	err = get_res(dev, slave, qpn, RES_QP, &qp);
3381	if (err)
3382		return err;
3383	if (qp->com.from_state != RES_QP_HW) {
3384		err = -EBUSY;
3385		goto out;
3386	}
3387
3388	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3389out:
3390	put_res(dev, slave, qpn, RES_QP);
3391	return err;
3392}
3393
3394int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3395			      struct mlx4_vhcr *vhcr,
3396			      struct mlx4_cmd_mailbox *inbox,
3397			      struct mlx4_cmd_mailbox *outbox,
3398			      struct mlx4_cmd_info *cmd)
3399{
3400	struct mlx4_qp_context *context = inbox->buf + 8;
3401	adjust_proxy_tun_qkey(dev, vhcr, context);
3402	update_pkey_index(dev, slave, inbox);
3403	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3404}
3405
3406static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3407				struct mlx4_qp_context *qpc,
3408				struct mlx4_cmd_mailbox *inbox)
3409{
3410	u64 mac;
3411	int port;
3412	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3413	u8 sched = *(u8 *)(inbox->buf + 64);
3414	u8 smac_ix;
3415
3416	port = (sched >> 6 & 1) + 1;
3417	if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3418		smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3419		if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3420			return -ENOENT;
3421	}
3422	return 0;
3423}
3424
3425int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3426			     struct mlx4_vhcr *vhcr,
3427			     struct mlx4_cmd_mailbox *inbox,
3428			     struct mlx4_cmd_mailbox *outbox,
3429			     struct mlx4_cmd_info *cmd)
3430{
3431	int err;
3432	struct mlx4_qp_context *qpc = inbox->buf + 8;
3433	int qpn = vhcr->in_modifier & 0x7fffff;
3434	struct res_qp *qp;
3435	u8 orig_sched_queue;
3436	__be32	orig_param3 = qpc->param3;
3437	u8 orig_vlan_control = qpc->pri_path.vlan_control;
3438	u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3439	u8 orig_pri_path_fl = qpc->pri_path.fl;
3440	u8 orig_vlan_index = qpc->pri_path.vlan_index;
3441	u8 orig_feup = qpc->pri_path.feup;
3442
3443	err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3444	if (err)
3445		return err;
3446
3447	if (roce_verify_mac(dev, slave, qpc, inbox))
3448		return -EINVAL;
3449
3450	update_pkey_index(dev, slave, inbox);
3451	update_gid(dev, inbox, (u8)slave);
3452	adjust_proxy_tun_qkey(dev, vhcr, qpc);
3453	orig_sched_queue = qpc->pri_path.sched_queue;
3454
3455	err = get_res(dev, slave, qpn, RES_QP, &qp);
3456	if (err)
3457		return err;
3458	if (qp->com.from_state != RES_QP_HW) {
3459		err = -EBUSY;
3460		goto out;
3461	}
3462
3463	/* do not modify vport QP params for RSS QPs */
3464	if (!(qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET))) {
3465		err = update_vport_qp_param(dev, inbox, slave, qpn);
3466		if (err)
3467			goto out;
3468	}
3469
3470	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3471out:
3472	/* if no error, save sched queue value passed in by VF. This is
3473	 * essentially the QOS value provided by the VF. This will be useful
3474	 * if we allow dynamic changes from VST back to VGT
3475	 */
3476	if (!err) {
3477		qp->sched_queue = orig_sched_queue;
3478		qp->param3	= orig_param3;
3479		qp->vlan_control = orig_vlan_control;
3480		qp->fvl_rx	=  orig_fvl_rx;
3481		qp->pri_path_fl = orig_pri_path_fl;
3482		qp->vlan_index  = orig_vlan_index;
3483		qp->feup	= orig_feup;
3484	}
3485	put_res(dev, slave, qpn, RES_QP);
3486	return err;
3487}
3488
3489int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3490			    struct mlx4_vhcr *vhcr,
3491			    struct mlx4_cmd_mailbox *inbox,
3492			    struct mlx4_cmd_mailbox *outbox,
3493			    struct mlx4_cmd_info *cmd)
3494{
3495	int err;
3496	struct mlx4_qp_context *context = inbox->buf + 8;
3497
3498	err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3499	if (err)
3500		return err;
3501
3502	update_pkey_index(dev, slave, inbox);
3503	update_gid(dev, inbox, (u8)slave);
3504	adjust_proxy_tun_qkey(dev, vhcr, context);
3505	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3506}
3507
3508int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3509			    struct mlx4_vhcr *vhcr,
3510			    struct mlx4_cmd_mailbox *inbox,
3511			    struct mlx4_cmd_mailbox *outbox,
3512			    struct mlx4_cmd_info *cmd)
3513{
3514	int err;
3515	struct mlx4_qp_context *context = inbox->buf + 8;
3516
3517	err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3518	if (err)
3519		return err;
3520
3521	update_pkey_index(dev, slave, inbox);
3522	update_gid(dev, inbox, (u8)slave);
3523	adjust_proxy_tun_qkey(dev, vhcr, context);
3524	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3525}
3526
3527
3528int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3529			      struct mlx4_vhcr *vhcr,
3530			      struct mlx4_cmd_mailbox *inbox,
3531			      struct mlx4_cmd_mailbox *outbox,
3532			      struct mlx4_cmd_info *cmd)
3533{
3534	struct mlx4_qp_context *context = inbox->buf + 8;
3535	adjust_proxy_tun_qkey(dev, vhcr, context);
3536	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3537}
3538
3539int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3540			    struct mlx4_vhcr *vhcr,
3541			    struct mlx4_cmd_mailbox *inbox,
3542			    struct mlx4_cmd_mailbox *outbox,
3543			    struct mlx4_cmd_info *cmd)
3544{
3545	int err;
3546	struct mlx4_qp_context *context = inbox->buf + 8;
3547
3548	err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3549	if (err)
3550		return err;
3551
3552	adjust_proxy_tun_qkey(dev, vhcr, context);
3553	update_gid(dev, inbox, (u8)slave);
3554	update_pkey_index(dev, slave, inbox);
3555	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3556}
3557
3558int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3559			    struct mlx4_vhcr *vhcr,
3560			    struct mlx4_cmd_mailbox *inbox,
3561			    struct mlx4_cmd_mailbox *outbox,
3562			    struct mlx4_cmd_info *cmd)
3563{
3564	int err;
3565	struct mlx4_qp_context *context = inbox->buf + 8;
3566
3567	err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3568	if (err)
3569		return err;
3570
3571	adjust_proxy_tun_qkey(dev, vhcr, context);
3572	update_gid(dev, inbox, (u8)slave);
3573	update_pkey_index(dev, slave, inbox);
3574	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3575}
3576
3577int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3578			 struct mlx4_vhcr *vhcr,
3579			 struct mlx4_cmd_mailbox *inbox,
3580			 struct mlx4_cmd_mailbox *outbox,
3581			 struct mlx4_cmd_info *cmd)
3582{
3583	int err;
3584	int qpn = vhcr->in_modifier & 0x7fffff;
3585	struct res_qp *qp;
3586
3587	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3588	if (err)
3589		return err;
3590	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3591	if (err)
3592		goto ex_abort;
3593
3594	atomic_dec(&qp->mtt->ref_count);
3595	atomic_dec(&qp->rcq->ref_count);
3596	atomic_dec(&qp->scq->ref_count);
3597	if (qp->srq)
3598		atomic_dec(&qp->srq->ref_count);
3599	res_end_move(dev, slave, RES_QP, qpn);
3600	return 0;
3601
3602ex_abort:
3603	res_abort_move(dev, slave, RES_QP, qpn);
3604
3605	return err;
3606}
3607
3608static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3609				struct res_qp *rqp, u8 *gid)
3610{
3611	struct res_gid *res;
3612
3613	list_for_each_entry(res, &rqp->mcg_list, list) {
3614		if (!memcmp(res->gid, gid, 16))
3615			return res;
3616	}
3617	return NULL;
3618}
3619
3620static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3621		       u8 *gid, enum mlx4_protocol prot,
3622		       enum mlx4_steer_type steer, u64 reg_id)
3623{
3624	struct res_gid *res;
3625	int err;
3626
3627	res = kzalloc(sizeof *res, GFP_KERNEL);
3628	if (!res)
3629		return -ENOMEM;
3630
3631	spin_lock_irq(&rqp->mcg_spl);
3632	if (find_gid(dev, slave, rqp, gid)) {
3633		kfree(res);
3634		err = -EEXIST;
3635	} else {
3636		memcpy(res->gid, gid, 16);
3637		res->prot = prot;
3638		res->steer = steer;
3639		res->reg_id = reg_id;
3640		list_add_tail(&res->list, &rqp->mcg_list);
3641		err = 0;
3642	}
3643	spin_unlock_irq(&rqp->mcg_spl);
3644
3645	return err;
3646}
3647
3648static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3649		       u8 *gid, enum mlx4_protocol prot,
3650		       enum mlx4_steer_type steer, u64 *reg_id)
3651{
3652	struct res_gid *res;
3653	int err;
3654
3655	spin_lock_irq(&rqp->mcg_spl);
3656	res = find_gid(dev, slave, rqp, gid);
3657	if (!res || res->prot != prot || res->steer != steer)
3658		err = -EINVAL;
3659	else {
3660		*reg_id = res->reg_id;
3661		list_del(&res->list);
3662		kfree(res);
3663		err = 0;
3664	}
3665	spin_unlock_irq(&rqp->mcg_spl);
3666
3667	return err;
3668}
3669
3670static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3671		     int block_loopback, enum mlx4_protocol prot,
3672		     enum mlx4_steer_type type, u64 *reg_id)
3673{
3674	switch (dev->caps.steering_mode) {
3675	case MLX4_STEERING_MODE_DEVICE_MANAGED:
3676		return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3677						block_loopback, prot,
3678						reg_id);
3679	case MLX4_STEERING_MODE_B0:
3680		return mlx4_qp_attach_common(dev, qp, gid,
3681					    block_loopback, prot, type);
3682	default:
3683		return -EINVAL;
3684	}
3685}
3686
3687static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3688		     enum mlx4_protocol prot, enum mlx4_steer_type type,
3689		     u64 reg_id)
3690{
3691	switch (dev->caps.steering_mode) {
3692	case MLX4_STEERING_MODE_DEVICE_MANAGED:
3693		return mlx4_flow_detach(dev, reg_id);
3694	case MLX4_STEERING_MODE_B0:
3695		return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3696	default:
3697		return -EINVAL;
3698	}
3699}
3700
3701int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3702			       struct mlx4_vhcr *vhcr,
3703			       struct mlx4_cmd_mailbox *inbox,
3704			       struct mlx4_cmd_mailbox *outbox,
3705			       struct mlx4_cmd_info *cmd)
3706{
3707	struct mlx4_qp qp; /* dummy for calling attach/detach */
3708	u8 *gid = inbox->buf;
3709	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3710	int err;
3711	int qpn;
3712	struct res_qp *rqp;
3713	u64 reg_id = 0;
3714	int attach = vhcr->op_modifier;
3715	int block_loopback = vhcr->in_modifier >> 31;
3716	u8 steer_type_mask = 2;
3717	enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3718
3719	qpn = vhcr->in_modifier & 0xffffff;
3720	err = get_res(dev, slave, qpn, RES_QP, &rqp);
3721	if (err)
3722		return err;
3723
3724	qp.qpn = qpn;
3725	if (attach) {
3726		err = qp_attach(dev, &qp, gid, block_loopback, prot,
3727				type, &reg_id);
3728		if (err) {
3729			pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3730			goto ex_put;
3731		}
3732		err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3733		if (err)
3734			goto ex_detach;
3735	} else {
3736		err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3737		if (err)
3738			goto ex_put;
3739
3740		err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3741		if (err)
3742			pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3743			       qpn, (unsigned long long)reg_id);
3744	}
3745	put_res(dev, slave, qpn, RES_QP);
3746	return err;
3747
3748ex_detach:
3749	qp_detach(dev, &qp, gid, prot, type, reg_id);
3750ex_put:
3751	put_res(dev, slave, qpn, RES_QP);
3752	return err;
3753}
3754
3755/*
3756 * MAC validation for Flow Steering rules.
3757 * VF can attach rules only with a mac address which is assigned to it.
3758 */
3759static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3760				   struct list_head *rlist)
3761{
3762	struct mac_res *res, *tmp;
3763	__be64 be_mac;
3764
3765	/* make sure it isn't multicast or broadcast mac*/
3766	if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3767	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3768		list_for_each_entry_safe(res, tmp, rlist, list) {
3769			be_mac = cpu_to_be64(res->mac << 16);
3770			if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3771				return 0;
3772		}
3773		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3774		       eth_header->eth.dst_mac, slave);
3775		return -EINVAL;
3776	}
3777	return 0;
3778}
3779
3780/*
3781 * In case of missing eth header, append eth header with a MAC address
3782 * assigned to the VF.
3783 */
3784static int add_eth_header(struct mlx4_dev *dev, int slave,
3785			  struct mlx4_cmd_mailbox *inbox,
3786			  struct list_head *rlist, int header_id)
3787{
3788	struct mac_res *res, *tmp;
3789	u8 port;
3790	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3791	struct mlx4_net_trans_rule_hw_eth *eth_header;
3792	struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3793	struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3794	__be64 be_mac = 0;
3795	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3796
3797	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3798	port = ctrl->port;
3799	eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3800
3801	/* Clear a space in the inbox for eth header */
3802	switch (header_id) {
3803	case MLX4_NET_TRANS_RULE_ID_IPV4:
3804		ip_header =
3805			(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3806		memmove(ip_header, eth_header,
3807			sizeof(*ip_header) + sizeof(*l4_header));
3808		break;
3809	case MLX4_NET_TRANS_RULE_ID_TCP:
3810	case MLX4_NET_TRANS_RULE_ID_UDP:
3811		l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3812			    (eth_header + 1);
3813		memmove(l4_header, eth_header, sizeof(*l4_header));
3814		break;
3815	default:
3816		return -EINVAL;
3817	}
3818	list_for_each_entry_safe(res, tmp, rlist, list) {
3819		if (port == res->port) {
3820			be_mac = cpu_to_be64(res->mac << 16);
3821			break;
3822		}
3823	}
3824	if (!be_mac) {
3825		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3826		       port);
3827		return -EINVAL;
3828	}
3829
3830	memset(eth_header, 0, sizeof(*eth_header));
3831	eth_header->size = sizeof(*eth_header) >> 2;
3832	eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3833	memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3834	memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3835
3836	return 0;
3837
3838}
3839
3840int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3841					 struct mlx4_vhcr *vhcr,
3842					 struct mlx4_cmd_mailbox *inbox,
3843					 struct mlx4_cmd_mailbox *outbox,
3844					 struct mlx4_cmd_info *cmd)
3845{
3846
3847	struct mlx4_priv *priv = mlx4_priv(dev);
3848	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3849	struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3850	int err;
3851	int qpn;
3852	struct res_qp *rqp;
3853	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3854	struct _rule_hw  *rule_header;
3855	int header_id;
3856
3857	if (dev->caps.steering_mode !=
3858	    MLX4_STEERING_MODE_DEVICE_MANAGED)
3859		return -EOPNOTSUPP;
3860
3861	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3862	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3863	err = get_res(dev, slave, qpn, RES_QP, &rqp);
3864	if (err) {
3865		pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3866		return err;
3867	}
3868	rule_header = (struct _rule_hw *)(ctrl + 1);
3869	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3870
3871	switch (header_id) {
3872	case MLX4_NET_TRANS_RULE_ID_ETH:
3873		if (validate_eth_header_mac(slave, rule_header, rlist)) {
3874			err = -EINVAL;
3875			goto err_put;
3876		}
3877		break;
3878	case MLX4_NET_TRANS_RULE_ID_IB:
3879		break;
3880	case MLX4_NET_TRANS_RULE_ID_IPV4:
3881	case MLX4_NET_TRANS_RULE_ID_TCP:
3882	case MLX4_NET_TRANS_RULE_ID_UDP:
3883		pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3884		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3885			err = -EINVAL;
3886			goto err_put;
3887		}
3888		vhcr->in_modifier +=
3889			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3890		break;
3891	default:
3892		pr_err("Corrupted mailbox.\n");
3893		err = -EINVAL;
3894		goto err_put;
3895	}
3896
3897	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3898			   vhcr->in_modifier, 0,
3899			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3900			   MLX4_CMD_NATIVE);
3901	if (err)
3902		goto err_put;
3903
3904	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3905	if (err) {
3906		mlx4_err(dev, "Fail to add flow steering resources.\n ");
3907		/* detach rule*/
3908		mlx4_cmd(dev, vhcr->out_param, 0, 0,
3909			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3910			 MLX4_CMD_NATIVE);
3911		goto err_put;
3912	}
3913	atomic_inc(&rqp->ref_count);
3914err_put:
3915	put_res(dev, slave, qpn, RES_QP);
3916	return err;
3917}
3918
3919int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3920					 struct mlx4_vhcr *vhcr,
3921					 struct mlx4_cmd_mailbox *inbox,
3922					 struct mlx4_cmd_mailbox *outbox,
3923					 struct mlx4_cmd_info *cmd)
3924{
3925	int err;
3926	struct res_qp *rqp;
3927	struct res_fs_rule *rrule;
3928
3929	if (dev->caps.steering_mode !=
3930	    MLX4_STEERING_MODE_DEVICE_MANAGED)
3931		return -EOPNOTSUPP;
3932
3933	err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3934	if (err)
3935		return err;
3936	/* Release the rule form busy state before removal */
3937	put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3938	err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3939	if (err)
3940		return err;
3941
3942	err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3943		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3944		       MLX4_CMD_NATIVE);
3945	if (!err) {
3946		err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE,
3947				    0);
3948		atomic_dec(&rqp->ref_count);
3949
3950		if (err) {
3951			mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3952			goto out;
3953		}
3954	}
3955
3956out:
3957	put_res(dev, slave, rrule->qpn, RES_QP);
3958	return err;
3959}
3960
3961enum {
3962	BUSY_MAX_RETRIES = 10
3963};
3964
3965int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3966			       struct mlx4_vhcr *vhcr,
3967			       struct mlx4_cmd_mailbox *inbox,
3968			       struct mlx4_cmd_mailbox *outbox,
3969			       struct mlx4_cmd_info *cmd)
3970{
3971	int err;
3972
3973	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3974
3975	return err;
3976}
3977
3978static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3979{
3980	struct res_gid *rgid;
3981	struct res_gid *tmp;
3982	struct mlx4_qp qp; /* dummy for calling attach/detach */
3983
3984	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3985		switch (dev->caps.steering_mode) {
3986		case MLX4_STEERING_MODE_DEVICE_MANAGED:
3987			mlx4_flow_detach(dev, rgid->reg_id);
3988			break;
3989		case MLX4_STEERING_MODE_B0:
3990			qp.qpn = rqp->local_qpn;
3991			(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3992						     rgid->prot, rgid->steer);
3993			break;
3994		}
3995		list_del(&rgid->list);
3996		kfree(rgid);
3997	}
3998}
3999
4000static int _move_all_busy(struct mlx4_dev *dev, int slave,
4001			  enum mlx4_resource type, int print)
4002{
4003	struct mlx4_priv *priv = mlx4_priv(dev);
4004	struct mlx4_resource_tracker *tracker =
4005		&priv->mfunc.master.res_tracker;
4006	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4007	struct res_common *r;
4008	struct res_common *tmp;
4009	int busy;
4010
4011	busy = 0;
4012	spin_lock_irq(mlx4_tlock(dev));
4013	list_for_each_entry_safe(r, tmp, rlist, list) {
4014		if (r->owner == slave) {
4015			if (!r->removing) {
4016				if (r->state == RES_ANY_BUSY) {
4017					if (print)
4018						mlx4_dbg(dev,
4019							 "%s id 0x%llx is busy\n",
4020							  ResourceType(type),
4021							  (unsigned long long)r->res_id);
4022					++busy;
4023				} else {
4024					r->from_state = r->state;
4025					r->state = RES_ANY_BUSY;
4026					r->removing = 1;
4027				}
4028			}
4029		}
4030	}
4031	spin_unlock_irq(mlx4_tlock(dev));
4032
4033	return busy;
4034}
4035
4036static int move_all_busy(struct mlx4_dev *dev, int slave,
4037			 enum mlx4_resource type)
4038{
4039	unsigned long begin;
4040	int busy;
4041
4042	begin = jiffies;
4043	do {
4044		busy = _move_all_busy(dev, slave, type, 0);
4045		if (time_after(jiffies, begin + 5 * HZ))
4046			break;
4047		if (busy)
4048			cond_resched();
4049	} while (busy);
4050
4051	if (busy)
4052		busy = _move_all_busy(dev, slave, type, 1);
4053
4054	return busy;
4055}
4056static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4057{
4058	struct mlx4_priv *priv = mlx4_priv(dev);
4059	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4060	struct list_head *qp_list =
4061		&tracker->slave_list[slave].res_list[RES_QP];
4062	struct res_qp *qp;
4063	struct res_qp *tmp;
4064	int state;
4065	u64 in_param;
4066	int qpn;
4067	int err;
4068
4069	err = move_all_busy(dev, slave, RES_QP);
4070	if (err)
4071		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
4072			  "for slave %d\n", slave);
4073
4074	spin_lock_irq(mlx4_tlock(dev));
4075	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4076		spin_unlock_irq(mlx4_tlock(dev));
4077		if (qp->com.owner == slave) {
4078			qpn = qp->com.res_id;
4079			detach_qp(dev, slave, qp);
4080			state = qp->com.from_state;
4081			while (state != 0) {
4082				switch (state) {
4083				case RES_QP_RESERVED:
4084					spin_lock_irq(mlx4_tlock(dev));
4085					rb_erase(&qp->com.node,
4086						 &tracker->res_tree[RES_QP]);
4087					list_del(&qp->com.list);
4088					spin_unlock_irq(mlx4_tlock(dev));
4089					if (!valid_reserved(dev, slave, qpn)) {
4090						__mlx4_qp_release_range(dev, qpn, 1);
4091						mlx4_release_resource(dev, slave,
4092								      RES_QP, 1, 0);
4093					}
4094					kfree(qp);
4095					state = 0;
4096					break;
4097				case RES_QP_MAPPED:
4098					if (!valid_reserved(dev, slave, qpn))
4099						__mlx4_qp_free_icm(dev, qpn);
4100					state = RES_QP_RESERVED;
4101					break;
4102				case RES_QP_HW:
4103					in_param = slave;
4104					err = mlx4_cmd(dev, in_param,
4105						       qp->local_qpn, 2,
4106						       MLX4_CMD_2RST_QP,
4107						       MLX4_CMD_TIME_CLASS_A,
4108						       MLX4_CMD_NATIVE);
4109					if (err)
4110						mlx4_dbg(dev, "rem_slave_qps: failed"
4111							 " to move slave %d qpn %d to"
4112							 " reset\n", slave,
4113							 qp->local_qpn);
4114					atomic_dec(&qp->rcq->ref_count);
4115					atomic_dec(&qp->scq->ref_count);
4116					atomic_dec(&qp->mtt->ref_count);
4117					if (qp->srq)
4118						atomic_dec(&qp->srq->ref_count);
4119					state = RES_QP_MAPPED;
4120					break;
4121				default:
4122					state = 0;
4123				}
4124			}
4125		}
4126		spin_lock_irq(mlx4_tlock(dev));
4127	}
4128	spin_unlock_irq(mlx4_tlock(dev));
4129}
4130
4131static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4132{
4133	struct mlx4_priv *priv = mlx4_priv(dev);
4134	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4135	struct list_head *srq_list =
4136		&tracker->slave_list[slave].res_list[RES_SRQ];
4137	struct res_srq *srq;
4138	struct res_srq *tmp;
4139	int state;
4140	u64 in_param;
4141	LIST_HEAD(tlist);
4142	int srqn;
4143	int err;
4144
4145	err = move_all_busy(dev, slave, RES_SRQ);
4146	if (err)
4147		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
4148			  "busy for slave %d\n", slave);
4149
4150	spin_lock_irq(mlx4_tlock(dev));
4151	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4152		spin_unlock_irq(mlx4_tlock(dev));
4153		if (srq->com.owner == slave) {
4154			srqn = srq->com.res_id;
4155			state = srq->com.from_state;
4156			while (state != 0) {
4157				switch (state) {
4158				case RES_SRQ_ALLOCATED:
4159					__mlx4_srq_free_icm(dev, srqn);
4160					spin_lock_irq(mlx4_tlock(dev));
4161					rb_erase(&srq->com.node,
4162						 &tracker->res_tree[RES_SRQ]);
4163					list_del(&srq->com.list);
4164					spin_unlock_irq(mlx4_tlock(dev));
4165					mlx4_release_resource(dev, slave,
4166							      RES_SRQ, 1, 0);
4167					kfree(srq);
4168					state = 0;
4169					break;
4170
4171				case RES_SRQ_HW:
4172					in_param = slave;
4173					err = mlx4_cmd(dev, in_param, srqn, 1,
4174						       MLX4_CMD_HW2SW_SRQ,
4175						       MLX4_CMD_TIME_CLASS_A,
4176						       MLX4_CMD_NATIVE);
4177					if (err)
4178						mlx4_dbg(dev, "rem_slave_srqs: failed"
4179							 " to move slave %d srq %d to"
4180							 " SW ownership\n",
4181							 slave, srqn);
4182
4183					atomic_dec(&srq->mtt->ref_count);
4184					if (srq->cq)
4185						atomic_dec(&srq->cq->ref_count);
4186					state = RES_SRQ_ALLOCATED;
4187					break;
4188
4189				default:
4190					state = 0;
4191				}
4192			}
4193		}
4194		spin_lock_irq(mlx4_tlock(dev));
4195	}
4196	spin_unlock_irq(mlx4_tlock(dev));
4197}
4198
4199static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4200{
4201	struct mlx4_priv *priv = mlx4_priv(dev);
4202	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4203	struct list_head *cq_list =
4204		&tracker->slave_list[slave].res_list[RES_CQ];
4205	struct res_cq *cq;
4206	struct res_cq *tmp;
4207	int state;
4208	u64 in_param;
4209	LIST_HEAD(tlist);
4210	int cqn;
4211	int err;
4212
4213	err = move_all_busy(dev, slave, RES_CQ);
4214	if (err)
4215		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4216			  "busy for slave %d\n", slave);
4217
4218	spin_lock_irq(mlx4_tlock(dev));
4219	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4220		spin_unlock_irq(mlx4_tlock(dev));
4221		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4222			cqn = cq->com.res_id;
4223			state = cq->com.from_state;
4224			while (state != 0) {
4225				switch (state) {
4226				case RES_CQ_ALLOCATED:
4227					__mlx4_cq_free_icm(dev, cqn);
4228					spin_lock_irq(mlx4_tlock(dev));
4229					rb_erase(&cq->com.node,
4230						 &tracker->res_tree[RES_CQ]);
4231					list_del(&cq->com.list);
4232					spin_unlock_irq(mlx4_tlock(dev));
4233					mlx4_release_resource(dev, slave,
4234							      RES_CQ, 1, 0);
4235					kfree(cq);
4236					state = 0;
4237					break;
4238
4239				case RES_CQ_HW:
4240					in_param = slave;
4241					err = mlx4_cmd(dev, in_param, cqn, 1,
4242						       MLX4_CMD_HW2SW_CQ,
4243						       MLX4_CMD_TIME_CLASS_A,
4244						       MLX4_CMD_NATIVE);
4245					if (err)
4246						mlx4_dbg(dev, "rem_slave_cqs: failed"
4247							 " to move slave %d cq %d to"
4248							 " SW ownership\n",
4249							 slave, cqn);
4250					atomic_dec(&cq->mtt->ref_count);
4251					state = RES_CQ_ALLOCATED;
4252					break;
4253
4254				default:
4255					state = 0;
4256				}
4257			}
4258		}
4259		spin_lock_irq(mlx4_tlock(dev));
4260	}
4261	spin_unlock_irq(mlx4_tlock(dev));
4262}
4263
4264static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4265{
4266	struct mlx4_priv *priv = mlx4_priv(dev);
4267	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4268	struct list_head *mpt_list =
4269		&tracker->slave_list[slave].res_list[RES_MPT];
4270	struct res_mpt *mpt;
4271	struct res_mpt *tmp;
4272	int state;
4273	u64 in_param;
4274	LIST_HEAD(tlist);
4275	int mptn;
4276	int err;
4277
4278	err = move_all_busy(dev, slave, RES_MPT);
4279	if (err)
4280		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4281			  "busy for slave %d\n", slave);
4282
4283	spin_lock_irq(mlx4_tlock(dev));
4284	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4285		spin_unlock_irq(mlx4_tlock(dev));
4286		if (mpt->com.owner == slave) {
4287			mptn = mpt->com.res_id;
4288			state = mpt->com.from_state;
4289			while (state != 0) {
4290				switch (state) {
4291				case RES_MPT_RESERVED:
4292					__mlx4_mpt_release(dev, mpt->key);
4293					spin_lock_irq(mlx4_tlock(dev));
4294					rb_erase(&mpt->com.node,
4295						 &tracker->res_tree[RES_MPT]);
4296					list_del(&mpt->com.list);
4297					spin_unlock_irq(mlx4_tlock(dev));
4298					mlx4_release_resource(dev, slave,
4299							      RES_MPT, 1, 0);
4300					kfree(mpt);
4301					state = 0;
4302					break;
4303
4304				case RES_MPT_MAPPED:
4305					__mlx4_mpt_free_icm(dev, mpt->key);
4306					state = RES_MPT_RESERVED;
4307					break;
4308
4309				case RES_MPT_HW:
4310					in_param = slave;
4311					err = mlx4_cmd(dev, in_param, mptn, 0,
4312						     MLX4_CMD_HW2SW_MPT,
4313						     MLX4_CMD_TIME_CLASS_A,
4314						     MLX4_CMD_NATIVE);
4315					if (err)
4316						mlx4_dbg(dev, "rem_slave_mrs: failed"
4317							 " to move slave %d mpt %d to"
4318							 " SW ownership\n",
4319							 slave, mptn);
4320					if (mpt->mtt)
4321						atomic_dec(&mpt->mtt->ref_count);
4322					state = RES_MPT_MAPPED;
4323					break;
4324				default:
4325					state = 0;
4326				}
4327			}
4328		}
4329		spin_lock_irq(mlx4_tlock(dev));
4330	}
4331	spin_unlock_irq(mlx4_tlock(dev));
4332}
4333
4334static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4335{
4336	struct mlx4_priv *priv = mlx4_priv(dev);
4337	struct mlx4_resource_tracker *tracker =
4338		&priv->mfunc.master.res_tracker;
4339	struct list_head *mtt_list =
4340		&tracker->slave_list[slave].res_list[RES_MTT];
4341	struct res_mtt *mtt;
4342	struct res_mtt *tmp;
4343	int state;
4344	LIST_HEAD(tlist);
4345	int base;
4346	int err;
4347
4348	err = move_all_busy(dev, slave, RES_MTT);
4349	if (err)
4350		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4351			  "busy for slave %d\n", slave);
4352
4353	spin_lock_irq(mlx4_tlock(dev));
4354	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4355		spin_unlock_irq(mlx4_tlock(dev));
4356		if (mtt->com.owner == slave) {
4357			base = mtt->com.res_id;
4358			state = mtt->com.from_state;
4359			while (state != 0) {
4360				switch (state) {
4361				case RES_MTT_ALLOCATED:
4362					__mlx4_free_mtt_range(dev, base,
4363							      mtt->order);
4364					spin_lock_irq(mlx4_tlock(dev));
4365					rb_erase(&mtt->com.node,
4366						 &tracker->res_tree[RES_MTT]);
4367					list_del(&mtt->com.list);
4368					spin_unlock_irq(mlx4_tlock(dev));
4369					mlx4_release_resource(dev, slave, RES_MTT,
4370							      1 << mtt->order, 0);
4371					kfree(mtt);
4372					state = 0;
4373					break;
4374
4375				default:
4376					state = 0;
4377				}
4378			}
4379		}
4380		spin_lock_irq(mlx4_tlock(dev));
4381	}
4382	spin_unlock_irq(mlx4_tlock(dev));
4383}
4384
4385static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4386{
4387	struct mlx4_priv *priv = mlx4_priv(dev);
4388	struct mlx4_resource_tracker *tracker =
4389		&priv->mfunc.master.res_tracker;
4390	struct list_head *fs_rule_list =
4391		&tracker->slave_list[slave].res_list[RES_FS_RULE];
4392	struct res_fs_rule *fs_rule;
4393	struct res_fs_rule *tmp;
4394	int state;
4395	u64 base;
4396	int err;
4397
4398	err = move_all_busy(dev, slave, RES_FS_RULE);
4399	if (err)
4400		mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4401			  slave);
4402
4403	spin_lock_irq(mlx4_tlock(dev));
4404	list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4405		spin_unlock_irq(mlx4_tlock(dev));
4406		if (fs_rule->com.owner == slave) {
4407			base = fs_rule->com.res_id;
4408			state = fs_rule->com.from_state;
4409			while (state != 0) {
4410				switch (state) {
4411				case RES_FS_RULE_ALLOCATED:
4412					/* detach rule */
4413					err = mlx4_cmd(dev, base, 0, 0,
4414						       MLX4_QP_FLOW_STEERING_DETACH,
4415						       MLX4_CMD_TIME_CLASS_A,
4416						       MLX4_CMD_NATIVE);
4417
4418					spin_lock_irq(mlx4_tlock(dev));
4419					rb_erase(&fs_rule->com.node,
4420						 &tracker->res_tree[RES_FS_RULE]);
4421					list_del(&fs_rule->com.list);
4422					spin_unlock_irq(mlx4_tlock(dev));
4423					kfree(fs_rule);
4424					state = 0;
4425					break;
4426
4427				default:
4428					state = 0;
4429				}
4430			}
4431		}
4432		spin_lock_irq(mlx4_tlock(dev));
4433	}
4434	spin_unlock_irq(mlx4_tlock(dev));
4435}
4436
4437static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4438{
4439	struct mlx4_priv *priv = mlx4_priv(dev);
4440	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4441	struct list_head *eq_list =
4442		&tracker->slave_list[slave].res_list[RES_EQ];
4443	struct res_eq *eq;
4444	struct res_eq *tmp;
4445	int err;
4446	int state;
4447	LIST_HEAD(tlist);
4448	int eqn;
4449	struct mlx4_cmd_mailbox *mailbox;
4450
4451	err = move_all_busy(dev, slave, RES_EQ);
4452	if (err)
4453		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4454			  "busy for slave %d\n", slave);
4455
4456	spin_lock_irq(mlx4_tlock(dev));
4457	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4458		spin_unlock_irq(mlx4_tlock(dev));
4459		if (eq->com.owner == slave) {
4460			eqn = eq->com.res_id;
4461			state = eq->com.from_state;
4462			while (state != 0) {
4463				switch (state) {
4464				case RES_EQ_RESERVED:
4465					spin_lock_irq(mlx4_tlock(dev));
4466					rb_erase(&eq->com.node,
4467						 &tracker->res_tree[RES_EQ]);
4468					list_del(&eq->com.list);
4469					spin_unlock_irq(mlx4_tlock(dev));
4470					kfree(eq);
4471					state = 0;
4472					break;
4473
4474				case RES_EQ_HW:
4475					mailbox = mlx4_alloc_cmd_mailbox(dev);
4476					if (IS_ERR(mailbox)) {
4477						cond_resched();
4478						continue;
4479					}
4480					err = mlx4_cmd_box(dev, slave, 0,
4481							   eqn & 0xff, 0,
4482							   MLX4_CMD_HW2SW_EQ,
4483							   MLX4_CMD_TIME_CLASS_A,
4484							   MLX4_CMD_NATIVE);
4485					if (err)
4486						mlx4_dbg(dev, "rem_slave_eqs: failed"
4487							 " to move slave %d eqs %d to"
4488							 " SW ownership\n", slave, eqn);
4489					mlx4_free_cmd_mailbox(dev, mailbox);
4490					atomic_dec(&eq->mtt->ref_count);
4491					state = RES_EQ_RESERVED;
4492					break;
4493
4494				default:
4495					state = 0;
4496				}
4497			}
4498		}
4499		spin_lock_irq(mlx4_tlock(dev));
4500	}
4501	spin_unlock_irq(mlx4_tlock(dev));
4502}
4503
4504static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4505{
4506	__mlx4_slave_counters_free(dev, slave);
4507}
4508
4509static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4510{
4511	struct mlx4_priv *priv = mlx4_priv(dev);
4512	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4513	struct list_head *xrcdn_list =
4514		&tracker->slave_list[slave].res_list[RES_XRCD];
4515	struct res_xrcdn *xrcd;
4516	struct res_xrcdn *tmp;
4517	int err;
4518	int xrcdn;
4519
4520	err = move_all_busy(dev, slave, RES_XRCD);
4521	if (err)
4522		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4523			  "busy for slave %d\n", slave);
4524
4525	spin_lock_irq(mlx4_tlock(dev));
4526	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4527		if (xrcd->com.owner == slave) {
4528			xrcdn = xrcd->com.res_id;
4529			rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4530			list_del(&xrcd->com.list);
4531			kfree(xrcd);
4532			__mlx4_xrcd_free(dev, xrcdn);
4533		}
4534	}
4535	spin_unlock_irq(mlx4_tlock(dev));
4536}
4537
4538void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4539{
4540	struct mlx4_priv *priv = mlx4_priv(dev);
4541
4542	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4543	rem_slave_macs(dev, slave);
4544	rem_slave_vlans(dev, slave);
4545	rem_slave_fs_rule(dev, slave);
4546	rem_slave_qps(dev, slave);
4547	rem_slave_srqs(dev, slave);
4548	rem_slave_cqs(dev, slave);
4549	rem_slave_mrs(dev, slave);
4550	rem_slave_eqs(dev, slave);
4551	rem_slave_mtts(dev, slave);
4552	rem_slave_counters(dev, slave);
4553	rem_slave_xrcdns(dev, slave);
4554	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4555}
4556
4557void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4558{
4559	struct mlx4_vf_immed_vlan_work *work =
4560		container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4561	struct mlx4_cmd_mailbox *mailbox;
4562	struct mlx4_update_qp_context *upd_context;
4563	struct mlx4_dev *dev = &work->priv->dev;
4564	struct mlx4_resource_tracker *tracker =
4565		&work->priv->mfunc.master.res_tracker;
4566	struct list_head *qp_list =
4567		&tracker->slave_list[work->slave].res_list[RES_QP];
4568	struct res_qp *qp;
4569	struct res_qp *tmp;
4570	u64 qp_path_mask_vlan_ctrl =
4571		       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4572		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4573		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4574		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4575		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4576		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4577
4578	u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4579		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4580		       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4581		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4582		       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4583		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4584		       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4585
4586	int err;
4587	int port, errors = 0;
4588	u8 vlan_control;
4589
4590	if (mlx4_is_slave(dev)) {
4591		mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4592			  work->slave);
4593		goto out;
4594	}
4595
4596	mailbox = mlx4_alloc_cmd_mailbox(dev);
4597	if (IS_ERR(mailbox))
4598		goto out;
4599
4600	if (!work->vlan_id)
4601		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4602			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4603	else
4604		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4605			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4606			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4607
4608	upd_context = mailbox->buf;
4609	upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4610
4611	spin_lock_irq(mlx4_tlock(dev));
4612	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4613		spin_unlock_irq(mlx4_tlock(dev));
4614		if (qp->com.owner == work->slave) {
4615			if (qp->com.from_state != RES_QP_HW ||
4616			    !qp->sched_queue ||  /* no INIT2RTR trans yet */
4617			    mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4618			    qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4619				spin_lock_irq(mlx4_tlock(dev));
4620				continue;
4621			}
4622			port = (qp->sched_queue >> 6 & 1) + 1;
4623			if (port != work->port) {
4624				spin_lock_irq(mlx4_tlock(dev));
4625				continue;
4626			}
4627			if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4628				upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4629			else
4630				upd_context->primary_addr_path_mask =
4631					cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4632			if (work->vlan_id == MLX4_VGT) {
4633				upd_context->qp_context.param3 = qp->param3;
4634				upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4635				upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4636				upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4637				upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4638				upd_context->qp_context.pri_path.feup = qp->feup;
4639				upd_context->qp_context.pri_path.sched_queue =
4640					qp->sched_queue;
4641			} else {
4642				upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4643				upd_context->qp_context.pri_path.vlan_control = vlan_control;
4644				upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4645				upd_context->qp_context.pri_path.fvl_rx =
4646					qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4647				upd_context->qp_context.pri_path.fl =
4648					qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4649				upd_context->qp_context.pri_path.feup =
4650					qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4651				upd_context->qp_context.pri_path.sched_queue =
4652					qp->sched_queue & 0xC7;
4653				upd_context->qp_context.pri_path.sched_queue |=
4654					((work->qos & 0x7) << 3);
4655			}
4656
4657			err = mlx4_cmd(dev, mailbox->dma,
4658				       qp->local_qpn & 0xffffff,
4659				       0, MLX4_CMD_UPDATE_QP,
4660				       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4661			if (err) {
4662				mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4663					  "port %d, qpn %d (%d)\n",
4664					  work->slave, port, qp->local_qpn,
4665					  err);
4666				errors++;
4667			}
4668		}
4669		spin_lock_irq(mlx4_tlock(dev));
4670	}
4671	spin_unlock_irq(mlx4_tlock(dev));
4672	mlx4_free_cmd_mailbox(dev, mailbox);
4673
4674	if (errors)
4675		mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4676			 errors, work->slave, work->port);
4677
4678	/* unregister previous vlan_id if needed and we had no errors
4679	 * while updating the QPs
4680	 */
4681	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4682	    NO_INDX != work->orig_vlan_ix)
4683		__mlx4_unregister_vlan(&work->priv->dev, work->port,
4684				       work->orig_vlan_id);
4685out:
4686	kfree(work);
4687	return;
4688}
4689
4690