1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
41#include <linux/slab.h>
42#include <dev/mlx4/cmd.h>
43#include <dev/mlx4/qp.h>
44#include <linux/if_ether.h>
45#include <linux/etherdevice.h>
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID		(1ull << 63)
51#define MLX4_PF_COUNTERS_PER_PORT	2
52#define MLX4_VF_COUNTERS_PER_PORT	1
53
54struct mac_res {
55	struct list_head list;
56	u64 mac;
57	int ref_count;
58	u8 smac_index;
59	u8 port;
60};
61
62struct vlan_res {
63	struct list_head list;
64	u16 vlan;
65	int ref_count;
66	int vlan_index;
67	u8 port;
68};
69
70struct res_common {
71	struct list_head	list;
72	struct rb_node		node;
73	u64		        res_id;
74	int			owner;
75	int			state;
76	int			from_state;
77	int			to_state;
78	int			removing;
79};
80
81enum {
82	RES_ANY_BUSY = 1
83};
84
85struct res_gid {
86	struct list_head	list;
87	u8			gid[16];
88	enum mlx4_protocol	prot;
89	enum mlx4_steer_type	steer;
90	u64			reg_id;
91};
92
93enum res_qp_states {
94	RES_QP_BUSY = RES_ANY_BUSY,
95
96	/* QP number was allocated */
97	RES_QP_RESERVED,
98
99	/* ICM memory for QP context was mapped */
100	RES_QP_MAPPED,
101
102	/* QP is in hw ownership */
103	RES_QP_HW
104};
105
106struct res_qp {
107	struct res_common	com;
108	struct res_mtt	       *mtt;
109	struct res_cq	       *rcq;
110	struct res_cq	       *scq;
111	struct res_srq	       *srq;
112	struct list_head	mcg_list;
113	spinlock_t		mcg_spl;
114	int			local_qpn;
115	atomic_t		ref_count;
116	u32			qpc_flags;
117	/* saved qp params before VST enforcement in order to restore on VGT */
118	u8			sched_queue;
119	__be32			param3;
120	u8			vlan_control;
121	u8			fvl_rx;
122	u8			pri_path_fl;
123	u8			vlan_index;
124	u8			feup;
125};
126
127enum res_mtt_states {
128	RES_MTT_BUSY = RES_ANY_BUSY,
129	RES_MTT_ALLOCATED,
130};
131
132static inline const char *mtt_states_str(enum res_mtt_states state)
133{
134	switch (state) {
135	case RES_MTT_BUSY: return "RES_MTT_BUSY";
136	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
137	default: return "Unknown";
138	}
139}
140
141struct res_mtt {
142	struct res_common	com;
143	int			order;
144	atomic_t		ref_count;
145};
146
147enum res_mpt_states {
148	RES_MPT_BUSY = RES_ANY_BUSY,
149	RES_MPT_RESERVED,
150	RES_MPT_MAPPED,
151	RES_MPT_HW,
152};
153
154struct res_mpt {
155	struct res_common	com;
156	struct res_mtt	       *mtt;
157	int			key;
158};
159
160enum res_eq_states {
161	RES_EQ_BUSY = RES_ANY_BUSY,
162	RES_EQ_RESERVED,
163	RES_EQ_HW,
164};
165
166struct res_eq {
167	struct res_common	com;
168	struct res_mtt	       *mtt;
169};
170
171enum res_cq_states {
172	RES_CQ_BUSY = RES_ANY_BUSY,
173	RES_CQ_ALLOCATED,
174	RES_CQ_HW,
175};
176
177struct res_cq {
178	struct res_common	com;
179	struct res_mtt	       *mtt;
180	atomic_t		ref_count;
181};
182
183enum res_srq_states {
184	RES_SRQ_BUSY = RES_ANY_BUSY,
185	RES_SRQ_ALLOCATED,
186	RES_SRQ_HW,
187};
188
189struct res_srq {
190	struct res_common	com;
191	struct res_mtt	       *mtt;
192	struct res_cq	       *cq;
193	atomic_t		ref_count;
194};
195
196enum res_counter_states {
197	RES_COUNTER_BUSY = RES_ANY_BUSY,
198	RES_COUNTER_ALLOCATED,
199};
200
201struct res_counter {
202	struct res_common	com;
203	int			port;
204};
205
206enum res_xrcdn_states {
207	RES_XRCD_BUSY = RES_ANY_BUSY,
208	RES_XRCD_ALLOCATED,
209};
210
211struct res_xrcdn {
212	struct res_common	com;
213	int			port;
214};
215
216enum res_fs_rule_states {
217	RES_FS_RULE_BUSY = RES_ANY_BUSY,
218	RES_FS_RULE_ALLOCATED,
219};
220
221struct res_fs_rule {
222	struct res_common	com;
223	int			qpn;
224	/* VF DMFS mbox with port flipped */
225	void			*mirr_mbox;
226	/* > 0 --> apply mirror when getting into HA mode      */
227	/* = 0 --> un-apply mirror when getting out of HA mode */
228	u32			mirr_mbox_size;
229	struct list_head	mirr_list;
230	u64			mirr_rule_id;
231};
232
233static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
234{
235	struct rb_node *node = root->rb_node;
236
237	while (node) {
238		struct res_common *res = container_of(node, struct res_common,
239						      node);
240
241		if (res_id < res->res_id)
242			node = node->rb_left;
243		else if (res_id > res->res_id)
244			node = node->rb_right;
245		else
246			return res;
247	}
248	return NULL;
249}
250
251static int res_tracker_insert(struct rb_root *root, struct res_common *res)
252{
253	struct rb_node **new = &(root->rb_node), *parent = NULL;
254
255	/* Figure out where to put new node */
256	while (*new) {
257		struct res_common *this = container_of(*new, struct res_common,
258						       node);
259
260		parent = *new;
261		if (res->res_id < this->res_id)
262			new = &((*new)->rb_left);
263		else if (res->res_id > this->res_id)
264			new = &((*new)->rb_right);
265		else
266			return -EEXIST;
267	}
268
269	/* Add new node and rebalance tree. */
270	rb_link_node(&res->node, parent, new);
271	rb_insert_color(&res->node, root);
272
273	return 0;
274}
275
276enum qp_transition {
277	QP_TRANS_INIT2RTR,
278	QP_TRANS_RTR2RTS,
279	QP_TRANS_RTS2RTS,
280	QP_TRANS_SQERR2RTS,
281	QP_TRANS_SQD2SQD,
282	QP_TRANS_SQD2RTS
283};
284
285/* For Debug uses */
286static const char *resource_str(enum mlx4_resource rt)
287{
288	switch (rt) {
289	case RES_QP: return "RES_QP";
290	case RES_CQ: return "RES_CQ";
291	case RES_SRQ: return "RES_SRQ";
292	case RES_MPT: return "RES_MPT";
293	case RES_MTT: return "RES_MTT";
294	case RES_MAC: return  "RES_MAC";
295	case RES_VLAN: return  "RES_VLAN";
296	case RES_EQ: return "RES_EQ";
297	case RES_COUNTER: return "RES_COUNTER";
298	case RES_FS_RULE: return "RES_FS_RULE";
299	case RES_XRCD: return "RES_XRCD";
300	default: return "Unknown resource type !!!";
301	};
302}
303
304static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
305static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
306				      enum mlx4_resource res_type, int count,
307				      int port)
308{
309	struct mlx4_priv *priv = mlx4_priv(dev);
310	struct resource_allocator *res_alloc =
311		&priv->mfunc.master.res_tracker.res_alloc[res_type];
312	int err = -EINVAL;
313	int allocated, free, reserved, guaranteed, from_free;
314	int from_rsvd;
315
316	if (slave > dev->persist->num_vfs)
317		return -EINVAL;
318
319	spin_lock(&res_alloc->alloc_lock);
320	allocated = (port > 0) ?
321		res_alloc->allocated[(port - 1) *
322		(dev->persist->num_vfs + 1) + slave] :
323		res_alloc->allocated[slave];
324	free = (port > 0) ? res_alloc->res_port_free[port - 1] :
325		res_alloc->res_free;
326	reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
327		res_alloc->res_reserved;
328	guaranteed = res_alloc->guaranteed[slave];
329
330	if (allocated + count > res_alloc->quota[slave]) {
331		mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
332			  slave, port, resource_str(res_type), count,
333			  allocated, res_alloc->quota[slave]);
334		goto out;
335	}
336
337	if (allocated + count <= guaranteed) {
338		err = 0;
339		from_rsvd = count;
340	} else {
341		/* portion may need to be obtained from free area */
342		if (guaranteed - allocated > 0)
343			from_free = count - (guaranteed - allocated);
344		else
345			from_free = count;
346
347		from_rsvd = count - from_free;
348
349		if (free - from_free >= reserved)
350			err = 0;
351		else
352			mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
353				  slave, port, resource_str(res_type), free,
354				  from_free, reserved);
355	}
356
357	if (!err) {
358		/* grant the request */
359		if (port > 0) {
360			res_alloc->allocated[(port - 1) *
361			(dev->persist->num_vfs + 1) + slave] += count;
362			res_alloc->res_port_free[port - 1] -= count;
363			res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
364		} else {
365			res_alloc->allocated[slave] += count;
366			res_alloc->res_free -= count;
367			res_alloc->res_reserved -= from_rsvd;
368		}
369	}
370
371out:
372	spin_unlock(&res_alloc->alloc_lock);
373	return err;
374}
375
376static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
377				    enum mlx4_resource res_type, int count,
378				    int port)
379{
380	struct mlx4_priv *priv = mlx4_priv(dev);
381	struct resource_allocator *res_alloc =
382		&priv->mfunc.master.res_tracker.res_alloc[res_type];
383	int allocated, guaranteed, from_rsvd;
384
385	if (slave > dev->persist->num_vfs)
386		return;
387
388	spin_lock(&res_alloc->alloc_lock);
389
390	allocated = (port > 0) ?
391		res_alloc->allocated[(port - 1) *
392		(dev->persist->num_vfs + 1) + slave] :
393		res_alloc->allocated[slave];
394	guaranteed = res_alloc->guaranteed[slave];
395
396	if (allocated - count >= guaranteed) {
397		from_rsvd = 0;
398	} else {
399		/* portion may need to be returned to reserved area */
400		if (allocated - guaranteed > 0)
401			from_rsvd = count - (allocated - guaranteed);
402		else
403			from_rsvd = count;
404	}
405
406	if (port > 0) {
407		res_alloc->allocated[(port - 1) *
408		(dev->persist->num_vfs + 1) + slave] -= count;
409		res_alloc->res_port_free[port - 1] += count;
410		res_alloc->res_port_rsvd[port - 1] += from_rsvd;
411	} else {
412		res_alloc->allocated[slave] -= count;
413		res_alloc->res_free += count;
414		res_alloc->res_reserved += from_rsvd;
415	}
416
417	spin_unlock(&res_alloc->alloc_lock);
418	return;
419}
420
421static inline void initialize_res_quotas(struct mlx4_dev *dev,
422					 struct resource_allocator *res_alloc,
423					 enum mlx4_resource res_type,
424					 int vf, int num_instances)
425{
426	res_alloc->guaranteed[vf] = num_instances /
427				    (2 * (dev->persist->num_vfs + 1));
428	res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
429	if (vf == mlx4_master_func_num(dev)) {
430		res_alloc->res_free = num_instances;
431		if (res_type == RES_MTT) {
432			/* reserved mtts will be taken out of the PF allocation */
433			res_alloc->res_free += dev->caps.reserved_mtts;
434			res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
435			res_alloc->quota[vf] += dev->caps.reserved_mtts;
436		}
437	}
438}
439
440void mlx4_init_quotas(struct mlx4_dev *dev)
441{
442	struct mlx4_priv *priv = mlx4_priv(dev);
443	int pf;
444
445	/* quotas for VFs are initialized in mlx4_slave_cap */
446	if (mlx4_is_slave(dev))
447		return;
448
449	if (!mlx4_is_mfunc(dev)) {
450		dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
451			mlx4_num_reserved_sqps(dev);
452		dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
453		dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
454		dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
455		dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
456		return;
457	}
458
459	pf = mlx4_master_func_num(dev);
460	dev->quotas.qp =
461		priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
462	dev->quotas.cq =
463		priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
464	dev->quotas.srq =
465		priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
466	dev->quotas.mtt =
467		priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
468	dev->quotas.mpt =
469		priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
470}
471
472static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
473{
474	/* reduce the sink counter */
475	return (dev->caps.max_counters - 1 -
476		(MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
477		/ MLX4_MAX_PORTS;
478}
479
480int mlx4_init_resource_tracker(struct mlx4_dev *dev)
481{
482	struct mlx4_priv *priv = mlx4_priv(dev);
483	int i, j;
484	int t;
485	int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
486
487	priv->mfunc.master.res_tracker.slave_list =
488		kzalloc(dev->num_slaves * sizeof(struct slave_list),
489			GFP_KERNEL);
490	if (!priv->mfunc.master.res_tracker.slave_list)
491		return -ENOMEM;
492
493	for (i = 0 ; i < dev->num_slaves; i++) {
494		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
495			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
496				       slave_list[i].res_list[t]);
497		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
498	}
499
500	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
501		 dev->num_slaves);
502	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
503		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
504
505	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
506		struct resource_allocator *res_alloc =
507			&priv->mfunc.master.res_tracker.res_alloc[i];
508		res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
509					   sizeof(int), GFP_KERNEL);
510		res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
511						sizeof(int), GFP_KERNEL);
512		if (i == RES_MAC || i == RES_VLAN)
513			res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
514						       (dev->persist->num_vfs
515						       + 1) *
516						       sizeof(int), GFP_KERNEL);
517		else
518			res_alloc->allocated = kzalloc((dev->persist->
519							num_vfs + 1) *
520						       sizeof(int), GFP_KERNEL);
521		/* Reduce the sink counter */
522		if (i == RES_COUNTER)
523			res_alloc->res_free = dev->caps.max_counters - 1;
524
525		if (!res_alloc->quota || !res_alloc->guaranteed ||
526		    !res_alloc->allocated)
527			goto no_mem_err;
528
529		spin_lock_init(&res_alloc->alloc_lock);
530		for (t = 0; t < dev->persist->num_vfs + 1; t++) {
531			struct mlx4_active_ports actv_ports =
532				mlx4_get_active_ports(dev, t);
533			switch (i) {
534			case RES_QP:
535				initialize_res_quotas(dev, res_alloc, RES_QP,
536						      t, dev->caps.num_qps -
537						      dev->caps.reserved_qps -
538						      mlx4_num_reserved_sqps(dev));
539				break;
540			case RES_CQ:
541				initialize_res_quotas(dev, res_alloc, RES_CQ,
542						      t, dev->caps.num_cqs -
543						      dev->caps.reserved_cqs);
544				break;
545			case RES_SRQ:
546				initialize_res_quotas(dev, res_alloc, RES_SRQ,
547						      t, dev->caps.num_srqs -
548						      dev->caps.reserved_srqs);
549				break;
550			case RES_MPT:
551				initialize_res_quotas(dev, res_alloc, RES_MPT,
552						      t, dev->caps.num_mpts -
553						      dev->caps.reserved_mrws);
554				break;
555			case RES_MTT:
556				initialize_res_quotas(dev, res_alloc, RES_MTT,
557						      t, dev->caps.num_mtts -
558						      dev->caps.reserved_mtts);
559				break;
560			case RES_MAC:
561				if (t == mlx4_master_func_num(dev)) {
562					int max_vfs_pport = 0;
563					/* Calculate the max vfs per port for */
564					/* both ports.			      */
565					for (j = 0; j < dev->caps.num_ports;
566					     j++) {
567						struct mlx4_slaves_pport slaves_pport =
568							mlx4_phys_to_slaves_pport(dev, j + 1);
569						unsigned current_slaves =
570							bitmap_weight(slaves_pport.slaves,
571								      dev->caps.num_ports) - 1;
572						if (max_vfs_pport < current_slaves)
573							max_vfs_pport =
574								current_slaves;
575					}
576					res_alloc->quota[t] =
577						MLX4_MAX_MAC_NUM -
578						2 * max_vfs_pport;
579					res_alloc->guaranteed[t] = 2;
580					for (j = 0; j < MLX4_MAX_PORTS; j++)
581						res_alloc->res_port_free[j] =
582							MLX4_MAX_MAC_NUM;
583				} else {
584					res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
585					res_alloc->guaranteed[t] = 2;
586				}
587				break;
588			case RES_VLAN:
589				if (t == mlx4_master_func_num(dev)) {
590					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
591					res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
592					for (j = 0; j < MLX4_MAX_PORTS; j++)
593						res_alloc->res_port_free[j] =
594							res_alloc->quota[t];
595				} else {
596					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
597					res_alloc->guaranteed[t] = 0;
598				}
599				break;
600			case RES_COUNTER:
601				res_alloc->quota[t] = dev->caps.max_counters;
602				if (t == mlx4_master_func_num(dev))
603					res_alloc->guaranteed[t] =
604						MLX4_PF_COUNTERS_PER_PORT *
605						MLX4_MAX_PORTS;
606				else if (t <= max_vfs_guarantee_counter)
607					res_alloc->guaranteed[t] =
608						MLX4_VF_COUNTERS_PER_PORT *
609						MLX4_MAX_PORTS;
610				else
611					res_alloc->guaranteed[t] = 0;
612				res_alloc->res_free -= res_alloc->guaranteed[t];
613				break;
614			default:
615				break;
616			}
617			if (i == RES_MAC || i == RES_VLAN) {
618				for (j = 0; j < dev->caps.num_ports; j++)
619					if (test_bit(j, actv_ports.ports))
620						res_alloc->res_port_rsvd[j] +=
621							res_alloc->guaranteed[t];
622			} else {
623				res_alloc->res_reserved += res_alloc->guaranteed[t];
624			}
625		}
626	}
627	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
628	return 0;
629
630no_mem_err:
631	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
632		kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
633		priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
634		kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
635		priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
636		kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
637		priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
638	}
639	return -ENOMEM;
640}
641
642void mlx4_free_resource_tracker(struct mlx4_dev *dev,
643				enum mlx4_res_tracker_free_type type)
644{
645	struct mlx4_priv *priv = mlx4_priv(dev);
646	int i;
647
648	if (priv->mfunc.master.res_tracker.slave_list) {
649		if (type != RES_TR_FREE_STRUCTS_ONLY) {
650			for (i = 0; i < dev->num_slaves; i++) {
651				if (type == RES_TR_FREE_ALL ||
652				    dev->caps.function != i)
653					mlx4_delete_all_resources_for_slave(dev, i);
654			}
655			/* free master's vlans */
656			i = dev->caps.function;
657			mlx4_reset_roce_gids(dev, i);
658			mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
659			rem_slave_vlans(dev, i);
660			mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
661		}
662
663		if (type != RES_TR_FREE_SLAVES_ONLY) {
664			for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
665				kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
666				priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
667				kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
668				priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
669				kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
670				priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
671			}
672			kfree(priv->mfunc.master.res_tracker.slave_list);
673			priv->mfunc.master.res_tracker.slave_list = NULL;
674		}
675	}
676}
677
678static void update_pkey_index(struct mlx4_dev *dev, int slave,
679			      struct mlx4_cmd_mailbox *inbox)
680{
681	u8 sched = *(u8 *)(inbox->buf + 64);
682	u8 orig_index = *(u8 *)(inbox->buf + 35);
683	u8 new_index;
684	struct mlx4_priv *priv = mlx4_priv(dev);
685	int port;
686
687	port = (sched >> 6 & 1) + 1;
688
689	new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
690	*(u8 *)(inbox->buf + 35) = new_index;
691}
692
693static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
694		       u8 slave)
695{
696	struct mlx4_qp_context	*qp_ctx = inbox->buf + 8;
697	enum mlx4_qp_optpar	optpar = be32_to_cpu(*(__be32 *) inbox->buf);
698	u32			ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
699	int port;
700
701	if (MLX4_QP_ST_UD == ts) {
702		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
703		if (mlx4_is_eth(dev, port))
704			qp_ctx->pri_path.mgid_index =
705				mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
706		else
707			qp_ctx->pri_path.mgid_index = slave | 0x80;
708
709	} else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
710		if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
711			port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
712			if (mlx4_is_eth(dev, port)) {
713				qp_ctx->pri_path.mgid_index +=
714					mlx4_get_base_gid_ix(dev, slave, port);
715				qp_ctx->pri_path.mgid_index &= 0x7f;
716			} else {
717				qp_ctx->pri_path.mgid_index = slave & 0x7F;
718			}
719		}
720		if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
721			port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
722			if (mlx4_is_eth(dev, port)) {
723				qp_ctx->alt_path.mgid_index +=
724					mlx4_get_base_gid_ix(dev, slave, port);
725				qp_ctx->alt_path.mgid_index &= 0x7f;
726			} else {
727				qp_ctx->alt_path.mgid_index = slave & 0x7F;
728			}
729		}
730	}
731}
732
733static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
734			  u8 slave, int port);
735
736static int update_vport_qp_param(struct mlx4_dev *dev,
737				 struct mlx4_cmd_mailbox *inbox,
738				 u8 slave, u32 qpn)
739{
740	struct mlx4_qp_context	*qpc = inbox->buf + 8;
741	struct mlx4_vport_oper_state *vp_oper;
742	struct mlx4_priv *priv;
743	u32 qp_type;
744	int port, err = 0;
745
746	port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
747	priv = mlx4_priv(dev);
748	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
749	qp_type	= (be32_to_cpu(qpc->flags) >> 16) & 0xff;
750
751	err = handle_counter(dev, qpc, slave, port);
752	if (err)
753		goto out;
754
755	if (MLX4_VGT != vp_oper->state.default_vlan) {
756		/* the reserved QPs (special, proxy, tunnel)
757		 * do not operate over vlans
758		 */
759		if (mlx4_is_qp_reserved(dev, qpn))
760			return 0;
761
762		/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
763		if (qp_type == MLX4_QP_ST_UD ||
764		    (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
765			if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
766				*(__be32 *)inbox->buf =
767					cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
768					MLX4_QP_OPTPAR_VLAN_STRIPPING);
769				qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
770			} else {
771				struct mlx4_update_qp_params params = {.flags = 0};
772
773				err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
774				if (err)
775					goto out;
776			}
777		}
778
779		/* preserve IF_COUNTER flag */
780		qpc->pri_path.vlan_control &=
781			MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
782		if (1 /*vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE*/ &&
783		    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
784			qpc->pri_path.vlan_control |=
785				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
786				MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
787				MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
788				MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
789				MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
790				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
791		} else if (0 != vp_oper->state.default_vlan) {
792			if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
793				/* vst QinQ should block untagged on TX,
794				 * but cvlan is in payload and phv is set so
795				 * hw see it as untagged. Block tagged instead.
796				 */
797				qpc->pri_path.vlan_control |=
798					MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
799					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
800					MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
801					MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
802			} else { /* vst 802.1Q */
803				qpc->pri_path.vlan_control |=
804					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
805					MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
806					MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
807			}
808		} else { /* priority tagged */
809			qpc->pri_path.vlan_control |=
810				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
811				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
812		}
813
814		qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
815		qpc->pri_path.vlan_index = vp_oper->vlan_idx;
816		qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
817		if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
818			qpc->pri_path.fl |= MLX4_FL_SV;
819		else
820			qpc->pri_path.fl |= MLX4_FL_CV;
821		qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
822		qpc->pri_path.sched_queue &= 0xC7;
823		qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
824		qpc->qos_vport = vp_oper->state.qos_vport;
825	}
826	if (vp_oper->state.spoofchk) {
827		qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
828		qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
829	}
830out:
831	return err;
832}
833
834static int mpt_mask(struct mlx4_dev *dev)
835{
836	return dev->caps.num_mpts - 1;
837}
838
839static void *find_res(struct mlx4_dev *dev, u64 res_id,
840		      enum mlx4_resource type)
841{
842	struct mlx4_priv *priv = mlx4_priv(dev);
843
844	return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
845				  res_id);
846}
847
848static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
849		   enum mlx4_resource type,
850		   void *res)
851{
852	struct res_common *r;
853	int err = 0;
854
855	spin_lock_irq(mlx4_tlock(dev));
856	r = find_res(dev, res_id, type);
857	if (!r) {
858		err = -ENONET;
859		goto exit;
860	}
861
862	if (r->state == RES_ANY_BUSY) {
863		err = -EBUSY;
864		goto exit;
865	}
866
867	if (r->owner != slave) {
868		err = -EPERM;
869		goto exit;
870	}
871
872	r->from_state = r->state;
873	r->state = RES_ANY_BUSY;
874
875	if (res)
876		*((struct res_common **)res) = r;
877
878exit:
879	spin_unlock_irq(mlx4_tlock(dev));
880	return err;
881}
882
883int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
884				    enum mlx4_resource type,
885				    u64 res_id, int *slave)
886{
887
888	struct res_common *r;
889	int err = -ENOENT;
890	int id = res_id;
891
892	if (type == RES_QP)
893		id &= 0x7fffff;
894	spin_lock(mlx4_tlock(dev));
895
896	r = find_res(dev, id, type);
897	if (r) {
898		*slave = r->owner;
899		err = 0;
900	}
901	spin_unlock(mlx4_tlock(dev));
902
903	return err;
904}
905
906static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
907		    enum mlx4_resource type)
908{
909	struct res_common *r;
910
911	spin_lock_irq(mlx4_tlock(dev));
912	r = find_res(dev, res_id, type);
913	if (r)
914		r->state = r->from_state;
915	spin_unlock_irq(mlx4_tlock(dev));
916}
917
918static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
919			     u64 in_param, u64 *out_param, int port);
920
921static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
922				   int counter_index)
923{
924	struct res_common *r;
925	struct res_counter *counter;
926	int ret = 0;
927
928	if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
929		return ret;
930
931	spin_lock_irq(mlx4_tlock(dev));
932	r = find_res(dev, counter_index, RES_COUNTER);
933	if (!r || r->owner != slave) {
934		ret = -EINVAL;
935	} else {
936		counter = container_of(r, struct res_counter, com);
937		if (!counter->port)
938			counter->port = port;
939	}
940
941	spin_unlock_irq(mlx4_tlock(dev));
942	return ret;
943}
944
945static int handle_unexisting_counter(struct mlx4_dev *dev,
946				     struct mlx4_qp_context *qpc, u8 slave,
947				     int port)
948{
949	struct mlx4_priv *priv = mlx4_priv(dev);
950	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
951	struct res_common *tmp;
952	struct res_counter *counter;
953	u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
954	int err = 0;
955
956	spin_lock_irq(mlx4_tlock(dev));
957	list_for_each_entry(tmp,
958			    &tracker->slave_list[slave].res_list[RES_COUNTER],
959			    list) {
960		counter = container_of(tmp, struct res_counter, com);
961		if (port == counter->port) {
962			qpc->pri_path.counter_index  = counter->com.res_id;
963			spin_unlock_irq(mlx4_tlock(dev));
964			return 0;
965		}
966	}
967	spin_unlock_irq(mlx4_tlock(dev));
968
969	/* No existing counter, need to allocate a new counter */
970	err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
971				port);
972	if (err == -ENOENT) {
973		err = 0;
974	} else if (err && err != -ENOSPC) {
975		mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
976			 __func__, slave, err);
977	} else {
978		qpc->pri_path.counter_index = counter_idx;
979		mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
980			 __func__, slave, qpc->pri_path.counter_index);
981		err = 0;
982	}
983
984	return err;
985}
986
987static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
988			  u8 slave, int port)
989{
990	if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
991		return handle_existing_counter(dev, slave, port,
992					       qpc->pri_path.counter_index);
993
994	return handle_unexisting_counter(dev, qpc, slave, port);
995}
996
997static struct res_common *alloc_qp_tr(int id)
998{
999	struct res_qp *ret;
1000
1001	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1002	if (!ret)
1003		return NULL;
1004
1005	ret->com.res_id = id;
1006	ret->com.state = RES_QP_RESERVED;
1007	ret->local_qpn = id;
1008	INIT_LIST_HEAD(&ret->mcg_list);
1009	spin_lock_init(&ret->mcg_spl);
1010	atomic_set(&ret->ref_count, 0);
1011
1012	return &ret->com;
1013}
1014
1015static struct res_common *alloc_mtt_tr(int id, int order)
1016{
1017	struct res_mtt *ret;
1018
1019	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1020	if (!ret)
1021		return NULL;
1022
1023	ret->com.res_id = id;
1024	ret->order = order;
1025	ret->com.state = RES_MTT_ALLOCATED;
1026	atomic_set(&ret->ref_count, 0);
1027
1028	return &ret->com;
1029}
1030
1031static struct res_common *alloc_mpt_tr(int id, int key)
1032{
1033	struct res_mpt *ret;
1034
1035	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1036	if (!ret)
1037		return NULL;
1038
1039	ret->com.res_id = id;
1040	ret->com.state = RES_MPT_RESERVED;
1041	ret->key = key;
1042
1043	return &ret->com;
1044}
1045
1046static struct res_common *alloc_eq_tr(int id)
1047{
1048	struct res_eq *ret;
1049
1050	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1051	if (!ret)
1052		return NULL;
1053
1054	ret->com.res_id = id;
1055	ret->com.state = RES_EQ_RESERVED;
1056
1057	return &ret->com;
1058}
1059
1060static struct res_common *alloc_cq_tr(int id)
1061{
1062	struct res_cq *ret;
1063
1064	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1065	if (!ret)
1066		return NULL;
1067
1068	ret->com.res_id = id;
1069	ret->com.state = RES_CQ_ALLOCATED;
1070	atomic_set(&ret->ref_count, 0);
1071
1072	return &ret->com;
1073}
1074
1075static struct res_common *alloc_srq_tr(int id)
1076{
1077	struct res_srq *ret;
1078
1079	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1080	if (!ret)
1081		return NULL;
1082
1083	ret->com.res_id = id;
1084	ret->com.state = RES_SRQ_ALLOCATED;
1085	atomic_set(&ret->ref_count, 0);
1086
1087	return &ret->com;
1088}
1089
1090static struct res_common *alloc_counter_tr(int id, int port)
1091{
1092	struct res_counter *ret;
1093
1094	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1095	if (!ret)
1096		return NULL;
1097
1098	ret->com.res_id = id;
1099	ret->com.state = RES_COUNTER_ALLOCATED;
1100	ret->port = port;
1101
1102	return &ret->com;
1103}
1104
1105static struct res_common *alloc_xrcdn_tr(int id)
1106{
1107	struct res_xrcdn *ret;
1108
1109	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1110	if (!ret)
1111		return NULL;
1112
1113	ret->com.res_id = id;
1114	ret->com.state = RES_XRCD_ALLOCATED;
1115
1116	return &ret->com;
1117}
1118
1119static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1120{
1121	struct res_fs_rule *ret;
1122
1123	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1124	if (!ret)
1125		return NULL;
1126
1127	ret->com.res_id = id;
1128	ret->com.state = RES_FS_RULE_ALLOCATED;
1129	ret->qpn = qpn;
1130	return &ret->com;
1131}
1132
1133static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1134				   int extra)
1135{
1136	struct res_common *ret;
1137
1138	switch (type) {
1139	case RES_QP:
1140		ret = alloc_qp_tr(id);
1141		break;
1142	case RES_MPT:
1143		ret = alloc_mpt_tr(id, extra);
1144		break;
1145	case RES_MTT:
1146		ret = alloc_mtt_tr(id, extra);
1147		break;
1148	case RES_EQ:
1149		ret = alloc_eq_tr(id);
1150		break;
1151	case RES_CQ:
1152		ret = alloc_cq_tr(id);
1153		break;
1154	case RES_SRQ:
1155		ret = alloc_srq_tr(id);
1156		break;
1157	case RES_MAC:
1158		pr_err("implementation missing\n");
1159		return NULL;
1160	case RES_COUNTER:
1161		ret = alloc_counter_tr(id, extra);
1162		break;
1163	case RES_XRCD:
1164		ret = alloc_xrcdn_tr(id);
1165		break;
1166	case RES_FS_RULE:
1167		ret = alloc_fs_rule_tr(id, extra);
1168		break;
1169	default:
1170		return NULL;
1171	}
1172	if (ret)
1173		ret->owner = slave;
1174
1175	return ret;
1176}
1177
1178int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1179			  struct mlx4_counter *data)
1180{
1181	struct mlx4_priv *priv = mlx4_priv(dev);
1182	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1183	struct res_common *tmp;
1184	struct res_counter *counter;
1185	int *counters_arr;
1186	int i = 0, err = 0;
1187
1188	memset(data, 0, sizeof(*data));
1189
1190	counters_arr = kmalloc_array(dev->caps.max_counters,
1191				     sizeof(*counters_arr), GFP_KERNEL);
1192	if (!counters_arr)
1193		return -ENOMEM;
1194
1195	spin_lock_irq(mlx4_tlock(dev));
1196	list_for_each_entry(tmp,
1197			    &tracker->slave_list[slave].res_list[RES_COUNTER],
1198			    list) {
1199		counter = container_of(tmp, struct res_counter, com);
1200		if (counter->port == port) {
1201			counters_arr[i] = (int)tmp->res_id;
1202			i++;
1203		}
1204	}
1205	spin_unlock_irq(mlx4_tlock(dev));
1206	counters_arr[i] = -1;
1207
1208	i = 0;
1209
1210	while (counters_arr[i] != -1) {
1211		err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1212					     0);
1213		if (err) {
1214			memset(data, 0, sizeof(*data));
1215			goto table_changed;
1216		}
1217		i++;
1218	}
1219
1220table_changed:
1221	kfree(counters_arr);
1222	return 0;
1223}
1224
1225static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1226			 enum mlx4_resource type, int extra)
1227{
1228	int i;
1229	int err;
1230	struct mlx4_priv *priv = mlx4_priv(dev);
1231	struct res_common **res_arr;
1232	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1233	struct rb_root *root = &tracker->res_tree[type];
1234
1235	res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1236	if (!res_arr)
1237		return -ENOMEM;
1238
1239	for (i = 0; i < count; ++i) {
1240		res_arr[i] = alloc_tr(base + i, type, slave, extra);
1241		if (!res_arr[i]) {
1242			for (--i; i >= 0; --i)
1243				kfree(res_arr[i]);
1244
1245			kfree(res_arr);
1246			return -ENOMEM;
1247		}
1248	}
1249
1250	spin_lock_irq(mlx4_tlock(dev));
1251	for (i = 0; i < count; ++i) {
1252		if (find_res(dev, base + i, type)) {
1253			err = -EEXIST;
1254			goto undo;
1255		}
1256		err = res_tracker_insert(root, res_arr[i]);
1257		if (err)
1258			goto undo;
1259		list_add_tail(&res_arr[i]->list,
1260			      &tracker->slave_list[slave].res_list[type]);
1261	}
1262	spin_unlock_irq(mlx4_tlock(dev));
1263	kfree(res_arr);
1264
1265	return 0;
1266
1267undo:
1268	for (--i; i >= 0; --i) {
1269		rb_erase(&res_arr[i]->node, root);
1270		list_del_init(&res_arr[i]->list);
1271	}
1272
1273	spin_unlock_irq(mlx4_tlock(dev));
1274
1275	for (i = 0; i < count; ++i)
1276		kfree(res_arr[i]);
1277
1278	kfree(res_arr);
1279
1280	return err;
1281}
1282
1283static int remove_qp_ok(struct res_qp *res)
1284{
1285	if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1286	    !list_empty(&res->mcg_list)) {
1287		pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1288		       res->com.state, atomic_read(&res->ref_count));
1289		return -EBUSY;
1290	} else if (res->com.state != RES_QP_RESERVED) {
1291		return -EPERM;
1292	}
1293
1294	return 0;
1295}
1296
1297static int remove_mtt_ok(struct res_mtt *res, int order)
1298{
1299	if (res->com.state == RES_MTT_BUSY ||
1300	    atomic_read(&res->ref_count)) {
1301		pr_devel("%s-%d: state %s, ref_count %d\n",
1302			 __func__, __LINE__,
1303			 mtt_states_str(res->com.state),
1304			 atomic_read(&res->ref_count));
1305		return -EBUSY;
1306	} else if (res->com.state != RES_MTT_ALLOCATED)
1307		return -EPERM;
1308	else if (res->order != order)
1309		return -EINVAL;
1310
1311	return 0;
1312}
1313
1314static int remove_mpt_ok(struct res_mpt *res)
1315{
1316	if (res->com.state == RES_MPT_BUSY)
1317		return -EBUSY;
1318	else if (res->com.state != RES_MPT_RESERVED)
1319		return -EPERM;
1320
1321	return 0;
1322}
1323
1324static int remove_eq_ok(struct res_eq *res)
1325{
1326	if (res->com.state == RES_MPT_BUSY)
1327		return -EBUSY;
1328	else if (res->com.state != RES_MPT_RESERVED)
1329		return -EPERM;
1330
1331	return 0;
1332}
1333
1334static int remove_counter_ok(struct res_counter *res)
1335{
1336	if (res->com.state == RES_COUNTER_BUSY)
1337		return -EBUSY;
1338	else if (res->com.state != RES_COUNTER_ALLOCATED)
1339		return -EPERM;
1340
1341	return 0;
1342}
1343
1344static int remove_xrcdn_ok(struct res_xrcdn *res)
1345{
1346	if (res->com.state == RES_XRCD_BUSY)
1347		return -EBUSY;
1348	else if (res->com.state != RES_XRCD_ALLOCATED)
1349		return -EPERM;
1350
1351	return 0;
1352}
1353
1354static int remove_fs_rule_ok(struct res_fs_rule *res)
1355{
1356	if (res->com.state == RES_FS_RULE_BUSY)
1357		return -EBUSY;
1358	else if (res->com.state != RES_FS_RULE_ALLOCATED)
1359		return -EPERM;
1360
1361	return 0;
1362}
1363
1364static int remove_cq_ok(struct res_cq *res)
1365{
1366	if (res->com.state == RES_CQ_BUSY)
1367		return -EBUSY;
1368	else if (res->com.state != RES_CQ_ALLOCATED)
1369		return -EPERM;
1370
1371	return 0;
1372}
1373
1374static int remove_srq_ok(struct res_srq *res)
1375{
1376	if (res->com.state == RES_SRQ_BUSY)
1377		return -EBUSY;
1378	else if (res->com.state != RES_SRQ_ALLOCATED)
1379		return -EPERM;
1380
1381	return 0;
1382}
1383
1384static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1385{
1386	switch (type) {
1387	case RES_QP:
1388		return remove_qp_ok((struct res_qp *)res);
1389	case RES_CQ:
1390		return remove_cq_ok((struct res_cq *)res);
1391	case RES_SRQ:
1392		return remove_srq_ok((struct res_srq *)res);
1393	case RES_MPT:
1394		return remove_mpt_ok((struct res_mpt *)res);
1395	case RES_MTT:
1396		return remove_mtt_ok((struct res_mtt *)res, extra);
1397	case RES_MAC:
1398		return -ENOSYS;
1399	case RES_EQ:
1400		return remove_eq_ok((struct res_eq *)res);
1401	case RES_COUNTER:
1402		return remove_counter_ok((struct res_counter *)res);
1403	case RES_XRCD:
1404		return remove_xrcdn_ok((struct res_xrcdn *)res);
1405	case RES_FS_RULE:
1406		return remove_fs_rule_ok((struct res_fs_rule *)res);
1407	default:
1408		return -EINVAL;
1409	}
1410}
1411
1412static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1413			 enum mlx4_resource type, int extra)
1414{
1415	u64 i;
1416	int err;
1417	struct mlx4_priv *priv = mlx4_priv(dev);
1418	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1419	struct res_common *r;
1420
1421	spin_lock_irq(mlx4_tlock(dev));
1422	for (i = base; i < base + count; ++i) {
1423		r = res_tracker_lookup(&tracker->res_tree[type], i);
1424		if (!r) {
1425			err = -ENOENT;
1426			goto out;
1427		}
1428		if (r->owner != slave) {
1429			err = -EPERM;
1430			goto out;
1431		}
1432		err = remove_ok(r, type, extra);
1433		if (err)
1434			goto out;
1435	}
1436
1437	for (i = base; i < base + count; ++i) {
1438		r = res_tracker_lookup(&tracker->res_tree[type], i);
1439		rb_erase(&r->node, &tracker->res_tree[type]);
1440		list_del(&r->list);
1441		kfree(r);
1442	}
1443	err = 0;
1444
1445out:
1446	spin_unlock_irq(mlx4_tlock(dev));
1447
1448	return err;
1449}
1450
1451static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1452				enum res_qp_states state, struct res_qp **qp,
1453				int alloc)
1454{
1455	struct mlx4_priv *priv = mlx4_priv(dev);
1456	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1457	struct res_qp *r;
1458	int err = 0;
1459
1460	spin_lock_irq(mlx4_tlock(dev));
1461	r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1462	if (!r)
1463		err = -ENOENT;
1464	else if (r->com.owner != slave)
1465		err = -EPERM;
1466	else {
1467		switch (state) {
1468		case RES_QP_BUSY:
1469			mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1470				 __func__, (unsigned long long)r->com.res_id);
1471			err = -EBUSY;
1472			break;
1473
1474		case RES_QP_RESERVED:
1475			if (r->com.state == RES_QP_MAPPED && !alloc)
1476				break;
1477
1478			mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", (unsigned long long)r->com.res_id);
1479			err = -EINVAL;
1480			break;
1481
1482		case RES_QP_MAPPED:
1483			if ((r->com.state == RES_QP_RESERVED && alloc) ||
1484			    r->com.state == RES_QP_HW)
1485				break;
1486			else {
1487				mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1488					  (unsigned long long)r->com.res_id);
1489				err = -EINVAL;
1490			}
1491
1492			break;
1493
1494		case RES_QP_HW:
1495			if (r->com.state != RES_QP_MAPPED)
1496				err = -EINVAL;
1497			break;
1498		default:
1499			err = -EINVAL;
1500		}
1501
1502		if (!err) {
1503			r->com.from_state = r->com.state;
1504			r->com.to_state = state;
1505			r->com.state = RES_QP_BUSY;
1506			if (qp)
1507				*qp = r;
1508		}
1509	}
1510
1511	spin_unlock_irq(mlx4_tlock(dev));
1512
1513	return err;
1514}
1515
1516static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1517				enum res_mpt_states state, struct res_mpt **mpt)
1518{
1519	struct mlx4_priv *priv = mlx4_priv(dev);
1520	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1521	struct res_mpt *r;
1522	int err = 0;
1523
1524	spin_lock_irq(mlx4_tlock(dev));
1525	r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1526	if (!r)
1527		err = -ENOENT;
1528	else if (r->com.owner != slave)
1529		err = -EPERM;
1530	else {
1531		switch (state) {
1532		case RES_MPT_BUSY:
1533			err = -EINVAL;
1534			break;
1535
1536		case RES_MPT_RESERVED:
1537			if (r->com.state != RES_MPT_MAPPED)
1538				err = -EINVAL;
1539			break;
1540
1541		case RES_MPT_MAPPED:
1542			if (r->com.state != RES_MPT_RESERVED &&
1543			    r->com.state != RES_MPT_HW)
1544				err = -EINVAL;
1545			break;
1546
1547		case RES_MPT_HW:
1548			if (r->com.state != RES_MPT_MAPPED)
1549				err = -EINVAL;
1550			break;
1551		default:
1552			err = -EINVAL;
1553		}
1554
1555		if (!err) {
1556			r->com.from_state = r->com.state;
1557			r->com.to_state = state;
1558			r->com.state = RES_MPT_BUSY;
1559			if (mpt)
1560				*mpt = r;
1561		}
1562	}
1563
1564	spin_unlock_irq(mlx4_tlock(dev));
1565
1566	return err;
1567}
1568
1569static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1570				enum res_eq_states state, struct res_eq **eq)
1571{
1572	struct mlx4_priv *priv = mlx4_priv(dev);
1573	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1574	struct res_eq *r;
1575	int err = 0;
1576
1577	spin_lock_irq(mlx4_tlock(dev));
1578	r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1579	if (!r)
1580		err = -ENOENT;
1581	else if (r->com.owner != slave)
1582		err = -EPERM;
1583	else {
1584		switch (state) {
1585		case RES_EQ_BUSY:
1586			err = -EINVAL;
1587			break;
1588
1589		case RES_EQ_RESERVED:
1590			if (r->com.state != RES_EQ_HW)
1591				err = -EINVAL;
1592			break;
1593
1594		case RES_EQ_HW:
1595			if (r->com.state != RES_EQ_RESERVED)
1596				err = -EINVAL;
1597			break;
1598
1599		default:
1600			err = -EINVAL;
1601		}
1602
1603		if (!err) {
1604			r->com.from_state = r->com.state;
1605			r->com.to_state = state;
1606			r->com.state = RES_EQ_BUSY;
1607		}
1608	}
1609
1610	spin_unlock_irq(mlx4_tlock(dev));
1611
1612	if (!err && eq)
1613		*eq = r;
1614
1615	return err;
1616}
1617
1618static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1619				enum res_cq_states state, struct res_cq **cq)
1620{
1621	struct mlx4_priv *priv = mlx4_priv(dev);
1622	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1623	struct res_cq *r;
1624	int err;
1625
1626	spin_lock_irq(mlx4_tlock(dev));
1627	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1628	if (!r) {
1629		err = -ENOENT;
1630	} else if (r->com.owner != slave) {
1631		err = -EPERM;
1632	} else if (state == RES_CQ_ALLOCATED) {
1633		if (r->com.state != RES_CQ_HW)
1634			err = -EINVAL;
1635		else if (atomic_read(&r->ref_count))
1636			err = -EBUSY;
1637		else
1638			err = 0;
1639	} else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1640		err = -EINVAL;
1641	} else {
1642		err = 0;
1643	}
1644
1645	if (!err) {
1646		r->com.from_state = r->com.state;
1647		r->com.to_state = state;
1648		r->com.state = RES_CQ_BUSY;
1649		if (cq)
1650			*cq = r;
1651	}
1652
1653	spin_unlock_irq(mlx4_tlock(dev));
1654
1655	return err;
1656}
1657
1658static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1659				 enum res_srq_states state, struct res_srq **srq)
1660{
1661	struct mlx4_priv *priv = mlx4_priv(dev);
1662	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1663	struct res_srq *r;
1664	int err = 0;
1665
1666	spin_lock_irq(mlx4_tlock(dev));
1667	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1668	if (!r) {
1669		err = -ENOENT;
1670	} else if (r->com.owner != slave) {
1671		err = -EPERM;
1672	} else if (state == RES_SRQ_ALLOCATED) {
1673		if (r->com.state != RES_SRQ_HW)
1674			err = -EINVAL;
1675		else if (atomic_read(&r->ref_count))
1676			err = -EBUSY;
1677	} else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1678		err = -EINVAL;
1679	}
1680
1681	if (!err) {
1682		r->com.from_state = r->com.state;
1683		r->com.to_state = state;
1684		r->com.state = RES_SRQ_BUSY;
1685		if (srq)
1686			*srq = r;
1687	}
1688
1689	spin_unlock_irq(mlx4_tlock(dev));
1690
1691	return err;
1692}
1693
1694static void res_abort_move(struct mlx4_dev *dev, int slave,
1695			   enum mlx4_resource type, int id)
1696{
1697	struct mlx4_priv *priv = mlx4_priv(dev);
1698	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1699	struct res_common *r;
1700
1701	spin_lock_irq(mlx4_tlock(dev));
1702	r = res_tracker_lookup(&tracker->res_tree[type], id);
1703	if (r && (r->owner == slave))
1704		r->state = r->from_state;
1705	spin_unlock_irq(mlx4_tlock(dev));
1706}
1707
1708static void res_end_move(struct mlx4_dev *dev, int slave,
1709			 enum mlx4_resource type, int id)
1710{
1711	struct mlx4_priv *priv = mlx4_priv(dev);
1712	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1713	struct res_common *r;
1714
1715	spin_lock_irq(mlx4_tlock(dev));
1716	r = res_tracker_lookup(&tracker->res_tree[type], id);
1717	if (r && (r->owner == slave))
1718		r->state = r->to_state;
1719	spin_unlock_irq(mlx4_tlock(dev));
1720}
1721
1722static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1723{
1724	return mlx4_is_qp_reserved(dev, qpn) &&
1725		(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1726}
1727
1728static int fw_reserved(struct mlx4_dev *dev, int qpn)
1729{
1730	return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1731}
1732
1733static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1734			u64 in_param, u64 *out_param)
1735{
1736	int err;
1737	int count;
1738	int align;
1739	int base;
1740	int qpn;
1741	u8 flags;
1742
1743	switch (op) {
1744	case RES_OP_RESERVE:
1745		count = get_param_l(&in_param) & 0xffffff;
1746		/* Turn off all unsupported QP allocation flags that the
1747		 * slave tries to set.
1748		 */
1749		flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1750		align = get_param_h(&in_param);
1751		err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1752		if (err)
1753			return err;
1754
1755		err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1756		if (err) {
1757			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1758			return err;
1759		}
1760
1761		err = add_res_range(dev, slave, base, count, RES_QP, 0);
1762		if (err) {
1763			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1764			__mlx4_qp_release_range(dev, base, count);
1765			return err;
1766		}
1767		set_param_l(out_param, base);
1768		break;
1769	case RES_OP_MAP_ICM:
1770		qpn = get_param_l(&in_param) & 0x7fffff;
1771		if (valid_reserved(dev, slave, qpn)) {
1772			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1773			if (err)
1774				return err;
1775		}
1776
1777		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1778					   NULL, 1);
1779		if (err)
1780			return err;
1781
1782		if (!fw_reserved(dev, qpn)) {
1783			err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1784			if (err) {
1785				res_abort_move(dev, slave, RES_QP, qpn);
1786				return err;
1787			}
1788		}
1789
1790		res_end_move(dev, slave, RES_QP, qpn);
1791		break;
1792
1793	default:
1794		err = -EINVAL;
1795		break;
1796	}
1797	return err;
1798}
1799
1800static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1801			 u64 in_param, u64 *out_param)
1802{
1803	int err = -EINVAL;
1804	int base;
1805	int order;
1806
1807	if (op != RES_OP_RESERVE_AND_MAP)
1808		return err;
1809
1810	order = get_param_l(&in_param);
1811
1812	err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1813	if (err)
1814		return err;
1815
1816	base = __mlx4_alloc_mtt_range(dev, order);
1817	if (base == -1) {
1818		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1819		return -ENOMEM;
1820	}
1821
1822	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1823	if (err) {
1824		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1825		__mlx4_free_mtt_range(dev, base, order);
1826	} else {
1827		set_param_l(out_param, base);
1828	}
1829
1830	return err;
1831}
1832
1833static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1834			 u64 in_param, u64 *out_param)
1835{
1836	int err = -EINVAL;
1837	int index;
1838	int id;
1839	struct res_mpt *mpt;
1840
1841	switch (op) {
1842	case RES_OP_RESERVE:
1843		err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1844		if (err)
1845			break;
1846
1847		index = __mlx4_mpt_reserve(dev);
1848		if (index == -1) {
1849			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1850			break;
1851		}
1852		id = index & mpt_mask(dev);
1853
1854		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1855		if (err) {
1856			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1857			__mlx4_mpt_release(dev, index);
1858			break;
1859		}
1860		set_param_l(out_param, index);
1861		break;
1862	case RES_OP_MAP_ICM:
1863		index = get_param_l(&in_param);
1864		id = index & mpt_mask(dev);
1865		err = mr_res_start_move_to(dev, slave, id,
1866					   RES_MPT_MAPPED, &mpt);
1867		if (err)
1868			return err;
1869
1870		err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1871		if (err) {
1872			res_abort_move(dev, slave, RES_MPT, id);
1873			return err;
1874		}
1875
1876		res_end_move(dev, slave, RES_MPT, id);
1877		break;
1878	}
1879	return err;
1880}
1881
1882static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1883			u64 in_param, u64 *out_param)
1884{
1885	int cqn;
1886	int err;
1887
1888	switch (op) {
1889	case RES_OP_RESERVE_AND_MAP:
1890		err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1891		if (err)
1892			break;
1893
1894		err = __mlx4_cq_alloc_icm(dev, &cqn);
1895		if (err) {
1896			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1897			break;
1898		}
1899
1900		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1901		if (err) {
1902			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1903			__mlx4_cq_free_icm(dev, cqn);
1904			break;
1905		}
1906
1907		set_param_l(out_param, cqn);
1908		break;
1909
1910	default:
1911		err = -EINVAL;
1912	}
1913
1914	return err;
1915}
1916
1917static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1918			 u64 in_param, u64 *out_param)
1919{
1920	int srqn;
1921	int err;
1922
1923	switch (op) {
1924	case RES_OP_RESERVE_AND_MAP:
1925		err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1926		if (err)
1927			break;
1928
1929		err = __mlx4_srq_alloc_icm(dev, &srqn);
1930		if (err) {
1931			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1932			break;
1933		}
1934
1935		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1936		if (err) {
1937			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1938			__mlx4_srq_free_icm(dev, srqn);
1939			break;
1940		}
1941
1942		set_param_l(out_param, srqn);
1943		break;
1944
1945	default:
1946		err = -EINVAL;
1947	}
1948
1949	return err;
1950}
1951
1952static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1953				     u8 smac_index, u64 *mac)
1954{
1955	struct mlx4_priv *priv = mlx4_priv(dev);
1956	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1957	struct list_head *mac_list =
1958		&tracker->slave_list[slave].res_list[RES_MAC];
1959	struct mac_res *res, *tmp;
1960
1961	list_for_each_entry_safe(res, tmp, mac_list, list) {
1962		if (res->smac_index == smac_index && res->port == (u8) port) {
1963			*mac = res->mac;
1964			return 0;
1965		}
1966	}
1967	return -ENOENT;
1968}
1969
1970static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1971{
1972	struct mlx4_priv *priv = mlx4_priv(dev);
1973	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1974	struct list_head *mac_list =
1975		&tracker->slave_list[slave].res_list[RES_MAC];
1976	struct mac_res *res, *tmp;
1977
1978	list_for_each_entry_safe(res, tmp, mac_list, list) {
1979		if (res->mac == mac && res->port == (u8) port) {
1980			/* mac found. update ref count */
1981			++res->ref_count;
1982			return 0;
1983		}
1984	}
1985
1986	if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1987		return -EINVAL;
1988	res = kzalloc(sizeof *res, GFP_KERNEL);
1989	if (!res) {
1990		mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1991		return -ENOMEM;
1992	}
1993	res->mac = mac;
1994	res->port = (u8) port;
1995	res->smac_index = smac_index;
1996	res->ref_count = 1;
1997	list_add_tail(&res->list,
1998		      &tracker->slave_list[slave].res_list[RES_MAC]);
1999	return 0;
2000}
2001
2002static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2003			       int port)
2004{
2005	struct mlx4_priv *priv = mlx4_priv(dev);
2006	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2007	struct list_head *mac_list =
2008		&tracker->slave_list[slave].res_list[RES_MAC];
2009	struct mac_res *res, *tmp;
2010
2011	list_for_each_entry_safe(res, tmp, mac_list, list) {
2012		if (res->mac == mac && res->port == (u8) port) {
2013			if (!--res->ref_count) {
2014				list_del(&res->list);
2015				mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2016				kfree(res);
2017			}
2018			break;
2019		}
2020	}
2021}
2022
2023static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2024{
2025	struct mlx4_priv *priv = mlx4_priv(dev);
2026	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2027	struct list_head *mac_list =
2028		&tracker->slave_list[slave].res_list[RES_MAC];
2029	struct mac_res *res, *tmp;
2030	int i;
2031
2032	list_for_each_entry_safe(res, tmp, mac_list, list) {
2033		list_del(&res->list);
2034		/* dereference the mac the num times the slave referenced it */
2035		for (i = 0; i < res->ref_count; i++)
2036			__mlx4_unregister_mac(dev, res->port, res->mac);
2037		mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2038		kfree(res);
2039	}
2040}
2041
2042static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2043			 u64 in_param, u64 *out_param, int in_port)
2044{
2045	int err = -EINVAL;
2046	int port;
2047	u64 mac;
2048	u8 smac_index = 0;
2049
2050	if (op != RES_OP_RESERVE_AND_MAP)
2051		return err;
2052
2053	port = !in_port ? get_param_l(out_param) : in_port;
2054	port = mlx4_slave_convert_port(
2055			dev, slave, port);
2056
2057	if (port < 0)
2058		return -EINVAL;
2059	mac = in_param;
2060
2061	err = __mlx4_register_mac(dev, port, mac);
2062	if (err >= 0) {
2063		smac_index = err;
2064		set_param_l(out_param, err);
2065		err = 0;
2066	}
2067
2068	if (!err) {
2069		err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2070		if (err)
2071			__mlx4_unregister_mac(dev, port, mac);
2072	}
2073	return err;
2074}
2075
2076static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2077			     int port, int vlan_index)
2078{
2079	struct mlx4_priv *priv = mlx4_priv(dev);
2080	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2081	struct list_head *vlan_list =
2082		&tracker->slave_list[slave].res_list[RES_VLAN];
2083	struct vlan_res *res, *tmp;
2084
2085	list_for_each_entry_safe(res, tmp, vlan_list, list) {
2086		if (res->vlan == vlan && res->port == (u8) port) {
2087			/* vlan found. update ref count */
2088			++res->ref_count;
2089			return 0;
2090		}
2091	}
2092
2093	if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2094		return -EINVAL;
2095	res = kzalloc(sizeof(*res), GFP_KERNEL);
2096	if (!res) {
2097		mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2098		return -ENOMEM;
2099	}
2100	res->vlan = vlan;
2101	res->port = (u8) port;
2102	res->vlan_index = vlan_index;
2103	res->ref_count = 1;
2104	list_add_tail(&res->list,
2105		      &tracker->slave_list[slave].res_list[RES_VLAN]);
2106	return 0;
2107}
2108
2109
2110static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2111				int port)
2112{
2113	struct mlx4_priv *priv = mlx4_priv(dev);
2114	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2115	struct list_head *vlan_list =
2116		&tracker->slave_list[slave].res_list[RES_VLAN];
2117	struct vlan_res *res, *tmp;
2118
2119	list_for_each_entry_safe(res, tmp, vlan_list, list) {
2120		if (res->vlan == vlan && res->port == (u8) port) {
2121			if (!--res->ref_count) {
2122				list_del(&res->list);
2123				mlx4_release_resource(dev, slave, RES_VLAN,
2124						      1, port);
2125				kfree(res);
2126			}
2127			break;
2128		}
2129	}
2130}
2131
2132static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2133{
2134	struct mlx4_priv *priv = mlx4_priv(dev);
2135	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2136	struct list_head *vlan_list =
2137		&tracker->slave_list[slave].res_list[RES_VLAN];
2138	struct vlan_res *res, *tmp;
2139	int i;
2140
2141	list_for_each_entry_safe(res, tmp, vlan_list, list) {
2142		list_del(&res->list);
2143		/* dereference the vlan the num times the slave referenced it */
2144		for (i = 0; i < res->ref_count; i++)
2145			__mlx4_unregister_vlan(dev, res->port, res->vlan);
2146		mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2147		kfree(res);
2148	}
2149}
2150
2151static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2152			  u64 in_param, u64 *out_param, int in_port)
2153{
2154	struct mlx4_priv *priv = mlx4_priv(dev);
2155	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2156	int err;
2157	u16 vlan;
2158	int vlan_index;
2159	int port;
2160
2161	port = !in_port ? get_param_l(out_param) : in_port;
2162
2163	if (!port || op != RES_OP_RESERVE_AND_MAP)
2164		return -EINVAL;
2165
2166	port = mlx4_slave_convert_port(
2167			dev, slave, port);
2168
2169	if (port < 0)
2170		return -EINVAL;
2171	/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2172	if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2173		slave_state[slave].old_vlan_api = true;
2174		return 0;
2175	}
2176
2177	vlan = (u16) in_param;
2178
2179	err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2180	if (!err) {
2181		set_param_l(out_param, (u32) vlan_index);
2182		err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2183		if (err)
2184			__mlx4_unregister_vlan(dev, port, vlan);
2185	}
2186	return err;
2187}
2188
2189static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2190			     u64 in_param, u64 *out_param, int port)
2191{
2192	u32 index;
2193	int err;
2194
2195	if (op != RES_OP_RESERVE)
2196		return -EINVAL;
2197
2198	err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2199	if (err)
2200		return err;
2201
2202	err = __mlx4_counter_alloc(dev, &index);
2203	if (err) {
2204		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2205		return err;
2206	}
2207
2208	err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2209	if (err) {
2210		__mlx4_counter_free(dev, index);
2211		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2212	} else {
2213		set_param_l(out_param, index);
2214	}
2215
2216	return err;
2217}
2218
2219static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2220			   u64 in_param, u64 *out_param)
2221{
2222	u32 xrcdn;
2223	int err;
2224
2225	if (op != RES_OP_RESERVE)
2226		return -EINVAL;
2227
2228	err = __mlx4_xrcd_alloc(dev, &xrcdn);
2229	if (err)
2230		return err;
2231
2232	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2233	if (err)
2234		__mlx4_xrcd_free(dev, xrcdn);
2235	else
2236		set_param_l(out_param, xrcdn);
2237
2238	return err;
2239}
2240
2241int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2242			   struct mlx4_vhcr *vhcr,
2243			   struct mlx4_cmd_mailbox *inbox,
2244			   struct mlx4_cmd_mailbox *outbox,
2245			   struct mlx4_cmd_info *cmd)
2246{
2247	int err;
2248	int alop = vhcr->op_modifier;
2249
2250	switch (vhcr->in_modifier & 0xFF) {
2251	case RES_QP:
2252		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2253				   vhcr->in_param, &vhcr->out_param);
2254		break;
2255
2256	case RES_MTT:
2257		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2258				    vhcr->in_param, &vhcr->out_param);
2259		break;
2260
2261	case RES_MPT:
2262		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2263				    vhcr->in_param, &vhcr->out_param);
2264		break;
2265
2266	case RES_CQ:
2267		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2268				   vhcr->in_param, &vhcr->out_param);
2269		break;
2270
2271	case RES_SRQ:
2272		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2273				    vhcr->in_param, &vhcr->out_param);
2274		break;
2275
2276	case RES_MAC:
2277		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2278				    vhcr->in_param, &vhcr->out_param,
2279				    (vhcr->in_modifier >> 8) & 0xFF);
2280		break;
2281
2282	case RES_VLAN:
2283		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2284				     vhcr->in_param, &vhcr->out_param,
2285				     (vhcr->in_modifier >> 8) & 0xFF);
2286		break;
2287
2288	case RES_COUNTER:
2289		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2290					vhcr->in_param, &vhcr->out_param, 0);
2291		break;
2292
2293	case RES_XRCD:
2294		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2295				      vhcr->in_param, &vhcr->out_param);
2296		break;
2297
2298	default:
2299		err = -EINVAL;
2300		break;
2301	}
2302
2303	return err;
2304}
2305
2306static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2307		       u64 in_param)
2308{
2309	int err;
2310	int count;
2311	int base;
2312	int qpn;
2313
2314	switch (op) {
2315	case RES_OP_RESERVE:
2316		base = get_param_l(&in_param) & 0x7fffff;
2317		count = get_param_h(&in_param);
2318		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2319		if (err)
2320			break;
2321		mlx4_release_resource(dev, slave, RES_QP, count, 0);
2322		__mlx4_qp_release_range(dev, base, count);
2323		break;
2324	case RES_OP_MAP_ICM:
2325		qpn = get_param_l(&in_param) & 0x7fffff;
2326		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2327					   NULL, 0);
2328		if (err)
2329			return err;
2330
2331		if (!fw_reserved(dev, qpn))
2332			__mlx4_qp_free_icm(dev, qpn);
2333
2334		res_end_move(dev, slave, RES_QP, qpn);
2335
2336		if (valid_reserved(dev, slave, qpn))
2337			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2338		break;
2339	default:
2340		err = -EINVAL;
2341		break;
2342	}
2343	return err;
2344}
2345
2346static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2347			u64 in_param, u64 *out_param)
2348{
2349	int err = -EINVAL;
2350	int base;
2351	int order;
2352
2353	if (op != RES_OP_RESERVE_AND_MAP)
2354		return err;
2355
2356	base = get_param_l(&in_param);
2357	order = get_param_h(&in_param);
2358	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2359	if (!err) {
2360		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2361		__mlx4_free_mtt_range(dev, base, order);
2362	}
2363	return err;
2364}
2365
2366static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2367			u64 in_param)
2368{
2369	int err = -EINVAL;
2370	int index;
2371	int id;
2372	struct res_mpt *mpt;
2373
2374	switch (op) {
2375	case RES_OP_RESERVE:
2376		index = get_param_l(&in_param);
2377		id = index & mpt_mask(dev);
2378		err = get_res(dev, slave, id, RES_MPT, &mpt);
2379		if (err)
2380			break;
2381		index = mpt->key;
2382		put_res(dev, slave, id, RES_MPT);
2383
2384		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2385		if (err)
2386			break;
2387		mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2388		__mlx4_mpt_release(dev, index);
2389		break;
2390	case RES_OP_MAP_ICM:
2391		index = get_param_l(&in_param);
2392		id = index & mpt_mask(dev);
2393		err = mr_res_start_move_to(dev, slave, id,
2394					   RES_MPT_RESERVED, &mpt);
2395		if (err)
2396			return err;
2397
2398		__mlx4_mpt_free_icm(dev, mpt->key);
2399		res_end_move(dev, slave, RES_MPT, id);
2400		break;
2401	default:
2402		err = -EINVAL;
2403		break;
2404	}
2405	return err;
2406}
2407
2408static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2409		       u64 in_param, u64 *out_param)
2410{
2411	int cqn;
2412	int err;
2413
2414	switch (op) {
2415	case RES_OP_RESERVE_AND_MAP:
2416		cqn = get_param_l(&in_param);
2417		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2418		if (err)
2419			break;
2420
2421		mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2422		__mlx4_cq_free_icm(dev, cqn);
2423		break;
2424
2425	default:
2426		err = -EINVAL;
2427		break;
2428	}
2429
2430	return err;
2431}
2432
2433static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2434			u64 in_param, u64 *out_param)
2435{
2436	int srqn;
2437	int err;
2438
2439	switch (op) {
2440	case RES_OP_RESERVE_AND_MAP:
2441		srqn = get_param_l(&in_param);
2442		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2443		if (err)
2444			break;
2445
2446		mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2447		__mlx4_srq_free_icm(dev, srqn);
2448		break;
2449
2450	default:
2451		err = -EINVAL;
2452		break;
2453	}
2454
2455	return err;
2456}
2457
2458static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2459			    u64 in_param, u64 *out_param, int in_port)
2460{
2461	int port;
2462	int err = 0;
2463
2464	switch (op) {
2465	case RES_OP_RESERVE_AND_MAP:
2466		port = !in_port ? get_param_l(out_param) : in_port;
2467		port = mlx4_slave_convert_port(
2468				dev, slave, port);
2469
2470		if (port < 0)
2471			return -EINVAL;
2472		mac_del_from_slave(dev, slave, in_param, port);
2473		__mlx4_unregister_mac(dev, port, in_param);
2474		break;
2475	default:
2476		err = -EINVAL;
2477		break;
2478	}
2479
2480	return err;
2481
2482}
2483
2484static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2485			    u64 in_param, u64 *out_param, int port)
2486{
2487	struct mlx4_priv *priv = mlx4_priv(dev);
2488	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2489	int err = 0;
2490
2491	port = mlx4_slave_convert_port(
2492			dev, slave, port);
2493
2494	if (port < 0)
2495		return -EINVAL;
2496	switch (op) {
2497	case RES_OP_RESERVE_AND_MAP:
2498		if (slave_state[slave].old_vlan_api)
2499			return 0;
2500		if (!port)
2501			return -EINVAL;
2502		vlan_del_from_slave(dev, slave, in_param, port);
2503		__mlx4_unregister_vlan(dev, port, in_param);
2504		break;
2505	default:
2506		err = -EINVAL;
2507		break;
2508	}
2509
2510	return err;
2511}
2512
2513static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2514			    u64 in_param, u64 *out_param)
2515{
2516	int index;
2517	int err;
2518
2519	if (op != RES_OP_RESERVE)
2520		return -EINVAL;
2521
2522	index = get_param_l(&in_param);
2523	if (index == MLX4_SINK_COUNTER_INDEX(dev))
2524		return 0;
2525
2526	err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2527	if (err)
2528		return err;
2529
2530	__mlx4_counter_free(dev, index);
2531	mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2532
2533	return err;
2534}
2535
2536static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2537			  u64 in_param, u64 *out_param)
2538{
2539	int xrcdn;
2540	int err;
2541
2542	if (op != RES_OP_RESERVE)
2543		return -EINVAL;
2544
2545	xrcdn = get_param_l(&in_param);
2546	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2547	if (err)
2548		return err;
2549
2550	__mlx4_xrcd_free(dev, xrcdn);
2551
2552	return err;
2553}
2554
2555int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2556			  struct mlx4_vhcr *vhcr,
2557			  struct mlx4_cmd_mailbox *inbox,
2558			  struct mlx4_cmd_mailbox *outbox,
2559			  struct mlx4_cmd_info *cmd)
2560{
2561	int err = -EINVAL;
2562	int alop = vhcr->op_modifier;
2563
2564	switch (vhcr->in_modifier & 0xFF) {
2565	case RES_QP:
2566		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2567				  vhcr->in_param);
2568		break;
2569
2570	case RES_MTT:
2571		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2572				   vhcr->in_param, &vhcr->out_param);
2573		break;
2574
2575	case RES_MPT:
2576		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2577				   vhcr->in_param);
2578		break;
2579
2580	case RES_CQ:
2581		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2582				  vhcr->in_param, &vhcr->out_param);
2583		break;
2584
2585	case RES_SRQ:
2586		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2587				   vhcr->in_param, &vhcr->out_param);
2588		break;
2589
2590	case RES_MAC:
2591		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2592				   vhcr->in_param, &vhcr->out_param,
2593				   (vhcr->in_modifier >> 8) & 0xFF);
2594		break;
2595
2596	case RES_VLAN:
2597		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2598				    vhcr->in_param, &vhcr->out_param,
2599				    (vhcr->in_modifier >> 8) & 0xFF);
2600		break;
2601
2602	case RES_COUNTER:
2603		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2604				       vhcr->in_param, &vhcr->out_param);
2605		break;
2606
2607	case RES_XRCD:
2608		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2609				     vhcr->in_param, &vhcr->out_param);
2610
2611	default:
2612		break;
2613	}
2614	return err;
2615}
2616
2617/* ugly but other choices are uglier */
2618static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2619{
2620	return (be32_to_cpu(mpt->flags) >> 9) & 1;
2621}
2622
2623static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2624{
2625	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2626}
2627
2628static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2629{
2630	return be32_to_cpu(mpt->mtt_sz);
2631}
2632
2633static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2634{
2635	return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2636}
2637
2638static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2639{
2640	return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2641}
2642
2643static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2644{
2645	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2646}
2647
2648static int mr_is_region(struct mlx4_mpt_entry *mpt)
2649{
2650	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2651}
2652
2653static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2654{
2655	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2656}
2657
2658static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2659{
2660	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2661}
2662
2663static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2664{
2665	int page_shift = (qpc->log_page_size & 0x3f) + 12;
2666	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2667	int log_sq_sride = qpc->sq_size_stride & 7;
2668	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2669	int log_rq_stride = qpc->rq_size_stride & 7;
2670	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2671	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2672	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2673	int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2674	int sq_size;
2675	int rq_size;
2676	int total_pages;
2677	int total_mem;
2678	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2679
2680	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2681	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2682	total_mem = sq_size + rq_size;
2683	total_pages =
2684		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2685				   page_shift);
2686
2687	return total_pages;
2688}
2689
2690static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2691			   int size, struct res_mtt *mtt)
2692{
2693	int res_start = mtt->com.res_id;
2694	int res_size = (1 << mtt->order);
2695
2696	if (start < res_start || start + size > res_start + res_size)
2697		return -EPERM;
2698	return 0;
2699}
2700
2701int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2702			   struct mlx4_vhcr *vhcr,
2703			   struct mlx4_cmd_mailbox *inbox,
2704			   struct mlx4_cmd_mailbox *outbox,
2705			   struct mlx4_cmd_info *cmd)
2706{
2707	int err;
2708	int index = vhcr->in_modifier;
2709	struct res_mtt *mtt;
2710	struct res_mpt *mpt;
2711	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2712	int phys;
2713	int id;
2714	u32 pd;
2715	int pd_slave;
2716
2717	id = index & mpt_mask(dev);
2718	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2719	if (err)
2720		return err;
2721
2722	/* Disable memory windows for VFs. */
2723	if (!mr_is_region(inbox->buf)) {
2724		err = -EPERM;
2725		goto ex_abort;
2726	}
2727
2728	/* Make sure that the PD bits related to the slave id are zeros. */
2729	pd = mr_get_pd(inbox->buf);
2730	pd_slave = (pd >> 17) & 0x7f;
2731	if (pd_slave != 0 && --pd_slave != slave) {
2732		err = -EPERM;
2733		goto ex_abort;
2734	}
2735
2736	if (mr_is_fmr(inbox->buf)) {
2737		/* FMR and Bind Enable are forbidden in slave devices. */
2738		if (mr_is_bind_enabled(inbox->buf)) {
2739			err = -EPERM;
2740			goto ex_abort;
2741		}
2742		/* FMR and Memory Windows are also forbidden. */
2743		if (!mr_is_region(inbox->buf)) {
2744			err = -EPERM;
2745			goto ex_abort;
2746		}
2747	}
2748
2749	phys = mr_phys_mpt(inbox->buf);
2750	if (!phys) {
2751		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2752		if (err)
2753			goto ex_abort;
2754
2755		err = check_mtt_range(dev, slave, mtt_base,
2756				      mr_get_mtt_size(inbox->buf), mtt);
2757		if (err)
2758			goto ex_put;
2759
2760		mpt->mtt = mtt;
2761	}
2762
2763	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2764	if (err)
2765		goto ex_put;
2766
2767	if (!phys) {
2768		atomic_inc(&mtt->ref_count);
2769		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2770	}
2771
2772	res_end_move(dev, slave, RES_MPT, id);
2773	return 0;
2774
2775ex_put:
2776	if (!phys)
2777		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2778ex_abort:
2779	res_abort_move(dev, slave, RES_MPT, id);
2780
2781	return err;
2782}
2783
2784int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2785			   struct mlx4_vhcr *vhcr,
2786			   struct mlx4_cmd_mailbox *inbox,
2787			   struct mlx4_cmd_mailbox *outbox,
2788			   struct mlx4_cmd_info *cmd)
2789{
2790	int err;
2791	int index = vhcr->in_modifier;
2792	struct res_mpt *mpt;
2793	int id;
2794
2795	id = index & mpt_mask(dev);
2796	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2797	if (err)
2798		return err;
2799
2800	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2801	if (err)
2802		goto ex_abort;
2803
2804	if (mpt->mtt)
2805		atomic_dec(&mpt->mtt->ref_count);
2806
2807	res_end_move(dev, slave, RES_MPT, id);
2808	return 0;
2809
2810ex_abort:
2811	res_abort_move(dev, slave, RES_MPT, id);
2812
2813	return err;
2814}
2815
2816int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2817			   struct mlx4_vhcr *vhcr,
2818			   struct mlx4_cmd_mailbox *inbox,
2819			   struct mlx4_cmd_mailbox *outbox,
2820			   struct mlx4_cmd_info *cmd)
2821{
2822	int err;
2823	int index = vhcr->in_modifier;
2824	struct res_mpt *mpt;
2825	int id;
2826
2827	id = index & mpt_mask(dev);
2828	err = get_res(dev, slave, id, RES_MPT, &mpt);
2829	if (err)
2830		return err;
2831
2832	if (mpt->com.from_state == RES_MPT_MAPPED) {
2833		/* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2834		 * that, the VF must read the MPT. But since the MPT entry memory is not
2835		 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2836		 * entry contents. To guarantee that the MPT cannot be changed, the driver
2837		 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2838		 * ownership fofollowing the change. The change here allows the VF to
2839		 * perform QUERY_MPT also when the entry is in SW ownership.
2840		 */
2841		struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2842					&mlx4_priv(dev)->mr_table.dmpt_table,
2843					mpt->key, NULL);
2844
2845		if (NULL == mpt_entry || NULL == outbox->buf) {
2846			err = -EINVAL;
2847			goto out;
2848		}
2849
2850		memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2851
2852		err = 0;
2853	} else if (mpt->com.from_state == RES_MPT_HW) {
2854		err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2855	} else {
2856		err = -EBUSY;
2857		goto out;
2858	}
2859
2860
2861out:
2862	put_res(dev, slave, id, RES_MPT);
2863	return err;
2864}
2865
2866static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2867{
2868	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2869}
2870
2871static int qp_get_scqn(struct mlx4_qp_context *qpc)
2872{
2873	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2874}
2875
2876static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2877{
2878	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2879}
2880
2881static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2882				  struct mlx4_qp_context *context)
2883{
2884	u32 qpn = vhcr->in_modifier & 0xffffff;
2885	u32 qkey = 0;
2886
2887	if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2888		return;
2889
2890	/* adjust qkey in qp context */
2891	context->qkey = cpu_to_be32(qkey);
2892}
2893
2894static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2895				 struct mlx4_qp_context *qpc,
2896				 struct mlx4_cmd_mailbox *inbox);
2897
2898int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2899			     struct mlx4_vhcr *vhcr,
2900			     struct mlx4_cmd_mailbox *inbox,
2901			     struct mlx4_cmd_mailbox *outbox,
2902			     struct mlx4_cmd_info *cmd)
2903{
2904	int err;
2905	int qpn = vhcr->in_modifier & 0x7fffff;
2906	struct res_mtt *mtt;
2907	struct res_qp *qp;
2908	struct mlx4_qp_context *qpc = inbox->buf + 8;
2909	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2910	int mtt_size = qp_get_mtt_size(qpc);
2911	struct res_cq *rcq;
2912	struct res_cq *scq;
2913	int rcqn = qp_get_rcqn(qpc);
2914	int scqn = qp_get_scqn(qpc);
2915	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2916	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2917	struct res_srq *srq;
2918	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2919
2920	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2921	if (err)
2922		return err;
2923
2924	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2925	if (err)
2926		return err;
2927	qp->local_qpn = local_qpn;
2928	qp->sched_queue = 0;
2929	qp->param3 = 0;
2930	qp->vlan_control = 0;
2931	qp->fvl_rx = 0;
2932	qp->pri_path_fl = 0;
2933	qp->vlan_index = 0;
2934	qp->feup = 0;
2935	qp->qpc_flags = be32_to_cpu(qpc->flags);
2936
2937	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2938	if (err)
2939		goto ex_abort;
2940
2941	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2942	if (err)
2943		goto ex_put_mtt;
2944
2945	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2946	if (err)
2947		goto ex_put_mtt;
2948
2949	if (scqn != rcqn) {
2950		err = get_res(dev, slave, scqn, RES_CQ, &scq);
2951		if (err)
2952			goto ex_put_rcq;
2953	} else
2954		scq = rcq;
2955
2956	if (use_srq) {
2957		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2958		if (err)
2959			goto ex_put_scq;
2960	}
2961
2962	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2963	update_pkey_index(dev, slave, inbox);
2964	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2965	if (err)
2966		goto ex_put_srq;
2967	atomic_inc(&mtt->ref_count);
2968	qp->mtt = mtt;
2969	atomic_inc(&rcq->ref_count);
2970	qp->rcq = rcq;
2971	atomic_inc(&scq->ref_count);
2972	qp->scq = scq;
2973
2974	if (scqn != rcqn)
2975		put_res(dev, slave, scqn, RES_CQ);
2976
2977	if (use_srq) {
2978		atomic_inc(&srq->ref_count);
2979		put_res(dev, slave, srqn, RES_SRQ);
2980		qp->srq = srq;
2981	}
2982	put_res(dev, slave, rcqn, RES_CQ);
2983	put_res(dev, slave, mtt_base, RES_MTT);
2984	res_end_move(dev, slave, RES_QP, qpn);
2985
2986	return 0;
2987
2988ex_put_srq:
2989	if (use_srq)
2990		put_res(dev, slave, srqn, RES_SRQ);
2991ex_put_scq:
2992	if (scqn != rcqn)
2993		put_res(dev, slave, scqn, RES_CQ);
2994ex_put_rcq:
2995	put_res(dev, slave, rcqn, RES_CQ);
2996ex_put_mtt:
2997	put_res(dev, slave, mtt_base, RES_MTT);
2998ex_abort:
2999	res_abort_move(dev, slave, RES_QP, qpn);
3000
3001	return err;
3002}
3003
3004static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3005{
3006	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3007}
3008
3009static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3010{
3011	int log_eq_size = eqc->log_eq_size & 0x1f;
3012	int page_shift = (eqc->log_page_size & 0x3f) + 12;
3013
3014	if (log_eq_size + 5 < page_shift)
3015		return 1;
3016
3017	return 1 << (log_eq_size + 5 - page_shift);
3018}
3019
3020static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3021{
3022	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3023}
3024
3025static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3026{
3027	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3028	int page_shift = (cqc->log_page_size & 0x3f) + 12;
3029
3030	if (log_cq_size + 5 < page_shift)
3031		return 1;
3032
3033	return 1 << (log_cq_size + 5 - page_shift);
3034}
3035
3036int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3037			  struct mlx4_vhcr *vhcr,
3038			  struct mlx4_cmd_mailbox *inbox,
3039			  struct mlx4_cmd_mailbox *outbox,
3040			  struct mlx4_cmd_info *cmd)
3041{
3042	int err;
3043	int eqn = vhcr->in_modifier;
3044	int res_id = (slave << 10) | eqn;
3045	struct mlx4_eq_context *eqc = inbox->buf;
3046	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3047	int mtt_size = eq_get_mtt_size(eqc);
3048	struct res_eq *eq;
3049	struct res_mtt *mtt;
3050
3051	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3052	if (err)
3053		return err;
3054	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3055	if (err)
3056		goto out_add;
3057
3058	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3059	if (err)
3060		goto out_move;
3061
3062	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3063	if (err)
3064		goto out_put;
3065
3066	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3067	if (err)
3068		goto out_put;
3069
3070	atomic_inc(&mtt->ref_count);
3071	eq->mtt = mtt;
3072	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3073	res_end_move(dev, slave, RES_EQ, res_id);
3074	return 0;
3075
3076out_put:
3077	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3078out_move:
3079	res_abort_move(dev, slave, RES_EQ, res_id);
3080out_add:
3081	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3082	return err;
3083}
3084
3085int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3086			    struct mlx4_vhcr *vhcr,
3087			    struct mlx4_cmd_mailbox *inbox,
3088			    struct mlx4_cmd_mailbox *outbox,
3089			    struct mlx4_cmd_info *cmd)
3090{
3091	int err;
3092	u8 get = vhcr->op_modifier;
3093
3094	if (get != 1)
3095		return -EPERM;
3096
3097	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3098
3099	return err;
3100}
3101
3102static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3103			      int len, struct res_mtt **res)
3104{
3105	struct mlx4_priv *priv = mlx4_priv(dev);
3106	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3107	struct res_mtt *mtt;
3108	int err = -EINVAL;
3109
3110	spin_lock_irq(mlx4_tlock(dev));
3111	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3112			    com.list) {
3113		if (!check_mtt_range(dev, slave, start, len, mtt)) {
3114			*res = mtt;
3115			mtt->com.from_state = mtt->com.state;
3116			mtt->com.state = RES_MTT_BUSY;
3117			err = 0;
3118			break;
3119		}
3120	}
3121	spin_unlock_irq(mlx4_tlock(dev));
3122
3123	return err;
3124}
3125
3126static int verify_qp_parameters(struct mlx4_dev *dev,
3127				struct mlx4_vhcr *vhcr,
3128				struct mlx4_cmd_mailbox *inbox,
3129				enum qp_transition transition, u8 slave)
3130{
3131	u32			qp_type;
3132	u32			qpn;
3133	struct mlx4_qp_context	*qp_ctx;
3134	enum mlx4_qp_optpar	optpar;
3135	int port;
3136	int num_gids;
3137
3138	qp_ctx  = inbox->buf + 8;
3139	qp_type	= (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3140	optpar	= be32_to_cpu(*(__be32 *) inbox->buf);
3141
3142	if (slave != mlx4_master_func_num(dev)) {
3143		qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3144		/* setting QP rate-limit is disallowed for VFs */
3145		if (qp_ctx->rate_limit_params)
3146			return -EPERM;
3147	}
3148
3149	switch (qp_type) {
3150	case MLX4_QP_ST_RC:
3151	case MLX4_QP_ST_XRC:
3152	case MLX4_QP_ST_UC:
3153		switch (transition) {
3154		case QP_TRANS_INIT2RTR:
3155		case QP_TRANS_RTR2RTS:
3156		case QP_TRANS_RTS2RTS:
3157		case QP_TRANS_SQD2SQD:
3158		case QP_TRANS_SQD2RTS:
3159			if (slave != mlx4_master_func_num(dev)) {
3160				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3161					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3162					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3163						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3164					else
3165						num_gids = 1;
3166					if (qp_ctx->pri_path.mgid_index >= num_gids)
3167						return -EINVAL;
3168				}
3169				if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3170					port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3171					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3172						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3173					else
3174						num_gids = 1;
3175					if (qp_ctx->alt_path.mgid_index >= num_gids)
3176						return -EINVAL;
3177				}
3178			}
3179			break;
3180		default:
3181			break;
3182		}
3183		break;
3184
3185	case MLX4_QP_ST_MLX:
3186		qpn = vhcr->in_modifier & 0x7fffff;
3187		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3188		if (transition == QP_TRANS_INIT2RTR &&
3189		    slave != mlx4_master_func_num(dev) &&
3190		    mlx4_is_qp_reserved(dev, qpn) &&
3191		    !mlx4_vf_smi_enabled(dev, slave, port)) {
3192			/* only enabled VFs may create MLX proxy QPs */
3193			mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3194				 __func__, slave, port);
3195			return -EPERM;
3196		}
3197		break;
3198
3199	default:
3200		break;
3201	}
3202
3203	return 0;
3204}
3205
3206int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3207			   struct mlx4_vhcr *vhcr,
3208			   struct mlx4_cmd_mailbox *inbox,
3209			   struct mlx4_cmd_mailbox *outbox,
3210			   struct mlx4_cmd_info *cmd)
3211{
3212	struct mlx4_mtt mtt;
3213	__be64 *page_list = inbox->buf;
3214	u64 *pg_list = (u64 *)page_list;
3215	int i;
3216	struct res_mtt *rmtt = NULL;
3217	int start = be64_to_cpu(page_list[0]);
3218	int npages = vhcr->in_modifier;
3219	int err;
3220
3221	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3222	if (err)
3223		return err;
3224
3225	/* Call the SW implementation of write_mtt:
3226	 * - Prepare a dummy mtt struct
3227	 * - Translate inbox contents to simple addresses in host endianness */
3228	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3229			    we don't really use it */
3230	mtt.order = 0;
3231	mtt.page_shift = 0;
3232	for (i = 0; i < npages; ++i)
3233		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3234
3235	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3236			       ((u64 *)page_list + 2));
3237
3238	if (rmtt)
3239		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3240
3241	return err;
3242}
3243
3244int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3245			  struct mlx4_vhcr *vhcr,
3246			  struct mlx4_cmd_mailbox *inbox,
3247			  struct mlx4_cmd_mailbox *outbox,
3248			  struct mlx4_cmd_info *cmd)
3249{
3250	int eqn = vhcr->in_modifier;
3251	int res_id = eqn | (slave << 10);
3252	struct res_eq *eq;
3253	int err;
3254
3255	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3256	if (err)
3257		return err;
3258
3259	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3260	if (err)
3261		goto ex_abort;
3262
3263	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3264	if (err)
3265		goto ex_put;
3266
3267	atomic_dec(&eq->mtt->ref_count);
3268	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3269	res_end_move(dev, slave, RES_EQ, res_id);
3270	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3271
3272	return 0;
3273
3274ex_put:
3275	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3276ex_abort:
3277	res_abort_move(dev, slave, RES_EQ, res_id);
3278
3279	return err;
3280}
3281
3282int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3283{
3284	struct mlx4_priv *priv = mlx4_priv(dev);
3285	struct mlx4_slave_event_eq_info *event_eq;
3286	struct mlx4_cmd_mailbox *mailbox;
3287	u32 in_modifier = 0;
3288	int err;
3289	int res_id;
3290	struct res_eq *req;
3291
3292	if (!priv->mfunc.master.slave_state)
3293		return -EINVAL;
3294
3295	/* check for slave valid, slave not PF, and slave active */
3296	if (slave < 0 || slave > dev->persist->num_vfs ||
3297	    slave == dev->caps.function ||
3298	    !priv->mfunc.master.slave_state[slave].active)
3299		return 0;
3300
3301	event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3302
3303	/* Create the event only if the slave is registered */
3304	if (event_eq->eqn < 0)
3305		return 0;
3306
3307	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3308	res_id = (slave << 10) | event_eq->eqn;
3309	err = get_res(dev, slave, res_id, RES_EQ, &req);
3310	if (err)
3311		goto unlock;
3312
3313	if (req->com.from_state != RES_EQ_HW) {
3314		err = -EINVAL;
3315		goto put;
3316	}
3317
3318	mailbox = mlx4_alloc_cmd_mailbox(dev);
3319	if (IS_ERR(mailbox)) {
3320		err = PTR_ERR(mailbox);
3321		goto put;
3322	}
3323
3324	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3325		++event_eq->token;
3326		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3327	}
3328
3329	memcpy(mailbox->buf, (u8 *) eqe, 28);
3330
3331	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3332
3333	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3334		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3335		       MLX4_CMD_NATIVE);
3336
3337	put_res(dev, slave, res_id, RES_EQ);
3338	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3339	mlx4_free_cmd_mailbox(dev, mailbox);
3340	return err;
3341
3342put:
3343	put_res(dev, slave, res_id, RES_EQ);
3344
3345unlock:
3346	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3347	return err;
3348}
3349
3350int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3351			  struct mlx4_vhcr *vhcr,
3352			  struct mlx4_cmd_mailbox *inbox,
3353			  struct mlx4_cmd_mailbox *outbox,
3354			  struct mlx4_cmd_info *cmd)
3355{
3356	int eqn = vhcr->in_modifier;
3357	int res_id = eqn | (slave << 10);
3358	struct res_eq *eq;
3359	int err;
3360
3361	err = get_res(dev, slave, res_id, RES_EQ, &eq);
3362	if (err)
3363		return err;
3364
3365	if (eq->com.from_state != RES_EQ_HW) {
3366		err = -EINVAL;
3367		goto ex_put;
3368	}
3369
3370	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3371
3372ex_put:
3373	put_res(dev, slave, res_id, RES_EQ);
3374	return err;
3375}
3376
3377int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3378			  struct mlx4_vhcr *vhcr,
3379			  struct mlx4_cmd_mailbox *inbox,
3380			  struct mlx4_cmd_mailbox *outbox,
3381			  struct mlx4_cmd_info *cmd)
3382{
3383	int err;
3384	int cqn = vhcr->in_modifier;
3385	struct mlx4_cq_context *cqc = inbox->buf;
3386	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3387	struct res_cq *cq = NULL;
3388	struct res_mtt *mtt;
3389
3390	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3391	if (err)
3392		return err;
3393	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3394	if (err)
3395		goto out_move;
3396	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3397	if (err)
3398		goto out_put;
3399	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3400	if (err)
3401		goto out_put;
3402	atomic_inc(&mtt->ref_count);
3403	cq->mtt = mtt;
3404	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3405	res_end_move(dev, slave, RES_CQ, cqn);
3406	return 0;
3407
3408out_put:
3409	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3410out_move:
3411	res_abort_move(dev, slave, RES_CQ, cqn);
3412	return err;
3413}
3414
3415int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3416			  struct mlx4_vhcr *vhcr,
3417			  struct mlx4_cmd_mailbox *inbox,
3418			  struct mlx4_cmd_mailbox *outbox,
3419			  struct mlx4_cmd_info *cmd)
3420{
3421	int err;
3422	int cqn = vhcr->in_modifier;
3423	struct res_cq *cq = NULL;
3424
3425	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3426	if (err)
3427		return err;
3428	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3429	if (err)
3430		goto out_move;
3431	atomic_dec(&cq->mtt->ref_count);
3432	res_end_move(dev, slave, RES_CQ, cqn);
3433	return 0;
3434
3435out_move:
3436	res_abort_move(dev, slave, RES_CQ, cqn);
3437	return err;
3438}
3439
3440int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3441			  struct mlx4_vhcr *vhcr,
3442			  struct mlx4_cmd_mailbox *inbox,
3443			  struct mlx4_cmd_mailbox *outbox,
3444			  struct mlx4_cmd_info *cmd)
3445{
3446	int cqn = vhcr->in_modifier;
3447	struct res_cq *cq;
3448	int err;
3449
3450	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3451	if (err)
3452		return err;
3453
3454	if (cq->com.from_state != RES_CQ_HW)
3455		goto ex_put;
3456
3457	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3458ex_put:
3459	put_res(dev, slave, cqn, RES_CQ);
3460
3461	return err;
3462}
3463
3464static int handle_resize(struct mlx4_dev *dev, int slave,
3465			 struct mlx4_vhcr *vhcr,
3466			 struct mlx4_cmd_mailbox *inbox,
3467			 struct mlx4_cmd_mailbox *outbox,
3468			 struct mlx4_cmd_info *cmd,
3469			 struct res_cq *cq)
3470{
3471	int err;
3472	struct res_mtt *orig_mtt;
3473	struct res_mtt *mtt;
3474	struct mlx4_cq_context *cqc = inbox->buf;
3475	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3476
3477	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3478	if (err)
3479		return err;
3480
3481	if (orig_mtt != cq->mtt) {
3482		err = -EINVAL;
3483		goto ex_put;
3484	}
3485
3486	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3487	if (err)
3488		goto ex_put;
3489
3490	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3491	if (err)
3492		goto ex_put1;
3493	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3494	if (err)
3495		goto ex_put1;
3496	atomic_dec(&orig_mtt->ref_count);
3497	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3498	atomic_inc(&mtt->ref_count);
3499	cq->mtt = mtt;
3500	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3501	return 0;
3502
3503ex_put1:
3504	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3505ex_put:
3506	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3507
3508	return err;
3509
3510}
3511
3512int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3513			   struct mlx4_vhcr *vhcr,
3514			   struct mlx4_cmd_mailbox *inbox,
3515			   struct mlx4_cmd_mailbox *outbox,
3516			   struct mlx4_cmd_info *cmd)
3517{
3518	int cqn = vhcr->in_modifier;
3519	struct res_cq *cq;
3520	int err;
3521
3522	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3523	if (err)
3524		return err;
3525
3526	if (cq->com.from_state != RES_CQ_HW)
3527		goto ex_put;
3528
3529	if (vhcr->op_modifier == 0) {
3530		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3531		goto ex_put;
3532	}
3533
3534	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3535ex_put:
3536	put_res(dev, slave, cqn, RES_CQ);
3537
3538	return err;
3539}
3540
3541static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3542{
3543	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3544	int log_rq_stride = srqc->logstride & 7;
3545	int page_shift = (srqc->log_page_size & 0x3f) + 12;
3546
3547	if (log_srq_size + log_rq_stride + 4 < page_shift)
3548		return 1;
3549
3550	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3551}
3552
3553int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3554			   struct mlx4_vhcr *vhcr,
3555			   struct mlx4_cmd_mailbox *inbox,
3556			   struct mlx4_cmd_mailbox *outbox,
3557			   struct mlx4_cmd_info *cmd)
3558{
3559	int err;
3560	int srqn = vhcr->in_modifier;
3561	struct res_mtt *mtt;
3562	struct res_srq *srq = NULL;
3563	struct mlx4_srq_context *srqc = inbox->buf;
3564	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3565
3566	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3567		return -EINVAL;
3568
3569	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3570	if (err)
3571		return err;
3572	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3573	if (err)
3574		goto ex_abort;
3575	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3576			      mtt);
3577	if (err)
3578		goto ex_put_mtt;
3579
3580	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3581	if (err)
3582		goto ex_put_mtt;
3583
3584	atomic_inc(&mtt->ref_count);
3585	srq->mtt = mtt;
3586	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3587	res_end_move(dev, slave, RES_SRQ, srqn);
3588	return 0;
3589
3590ex_put_mtt:
3591	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3592ex_abort:
3593	res_abort_move(dev, slave, RES_SRQ, srqn);
3594
3595	return err;
3596}
3597
3598int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3599			   struct mlx4_vhcr *vhcr,
3600			   struct mlx4_cmd_mailbox *inbox,
3601			   struct mlx4_cmd_mailbox *outbox,
3602			   struct mlx4_cmd_info *cmd)
3603{
3604	int err;
3605	int srqn = vhcr->in_modifier;
3606	struct res_srq *srq = NULL;
3607
3608	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3609	if (err)
3610		return err;
3611	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3612	if (err)
3613		goto ex_abort;
3614	atomic_dec(&srq->mtt->ref_count);
3615	if (srq->cq)
3616		atomic_dec(&srq->cq->ref_count);
3617	res_end_move(dev, slave, RES_SRQ, srqn);
3618
3619	return 0;
3620
3621ex_abort:
3622	res_abort_move(dev, slave, RES_SRQ, srqn);
3623
3624	return err;
3625}
3626
3627int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3628			   struct mlx4_vhcr *vhcr,
3629			   struct mlx4_cmd_mailbox *inbox,
3630			   struct mlx4_cmd_mailbox *outbox,
3631			   struct mlx4_cmd_info *cmd)
3632{
3633	int err;
3634	int srqn = vhcr->in_modifier;
3635	struct res_srq *srq;
3636
3637	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3638	if (err)
3639		return err;
3640	if (srq->com.from_state != RES_SRQ_HW) {
3641		err = -EBUSY;
3642		goto out;
3643	}
3644	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3645out:
3646	put_res(dev, slave, srqn, RES_SRQ);
3647	return err;
3648}
3649
3650int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3651			 struct mlx4_vhcr *vhcr,
3652			 struct mlx4_cmd_mailbox *inbox,
3653			 struct mlx4_cmd_mailbox *outbox,
3654			 struct mlx4_cmd_info *cmd)
3655{
3656	int err;
3657	int srqn = vhcr->in_modifier;
3658	struct res_srq *srq;
3659
3660	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3661	if (err)
3662		return err;
3663
3664	if (srq->com.from_state != RES_SRQ_HW) {
3665		err = -EBUSY;
3666		goto out;
3667	}
3668
3669	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3670out:
3671	put_res(dev, slave, srqn, RES_SRQ);
3672	return err;
3673}
3674
3675int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3676			struct mlx4_vhcr *vhcr,
3677			struct mlx4_cmd_mailbox *inbox,
3678			struct mlx4_cmd_mailbox *outbox,
3679			struct mlx4_cmd_info *cmd)
3680{
3681	int err;
3682	int qpn = vhcr->in_modifier & 0x7fffff;
3683	struct res_qp *qp;
3684
3685	err = get_res(dev, slave, qpn, RES_QP, &qp);
3686	if (err)
3687		return err;
3688	if (qp->com.from_state != RES_QP_HW) {
3689		err = -EBUSY;
3690		goto out;
3691	}
3692
3693	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3694out:
3695	put_res(dev, slave, qpn, RES_QP);
3696	return err;
3697}
3698
3699int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3700			      struct mlx4_vhcr *vhcr,
3701			      struct mlx4_cmd_mailbox *inbox,
3702			      struct mlx4_cmd_mailbox *outbox,
3703			      struct mlx4_cmd_info *cmd)
3704{
3705	struct mlx4_qp_context *context = inbox->buf + 8;
3706	adjust_proxy_tun_qkey(dev, vhcr, context);
3707	update_pkey_index(dev, slave, inbox);
3708	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3709}
3710
3711static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3712				  struct mlx4_qp_context *qpc,
3713				  struct mlx4_cmd_mailbox *inbox)
3714{
3715	enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3716	u8 pri_sched_queue;
3717	int port = mlx4_slave_convert_port(
3718		   dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3719
3720	if (port < 0)
3721		return -EINVAL;
3722
3723	pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3724			  ((port & 1) << 6);
3725
3726	if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3727	    qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3728		qpc->pri_path.sched_queue = pri_sched_queue;
3729	}
3730
3731	if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3732		port = mlx4_slave_convert_port(
3733				dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3734				+ 1) - 1;
3735		if (port < 0)
3736			return -EINVAL;
3737		qpc->alt_path.sched_queue =
3738			(qpc->alt_path.sched_queue & ~(1 << 6)) |
3739			(port & 1) << 6;
3740	}
3741	return 0;
3742}
3743
3744static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3745				struct mlx4_qp_context *qpc,
3746				struct mlx4_cmd_mailbox *inbox)
3747{
3748	u64 mac;
3749	int port;
3750	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3751	u8 sched = *(u8 *)(inbox->buf + 64);
3752	u8 smac_ix;
3753
3754	port = (sched >> 6 & 1) + 1;
3755	if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3756		smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3757		if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3758			return -ENOENT;
3759	}
3760	return 0;
3761}
3762
3763int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3764			     struct mlx4_vhcr *vhcr,
3765			     struct mlx4_cmd_mailbox *inbox,
3766			     struct mlx4_cmd_mailbox *outbox,
3767			     struct mlx4_cmd_info *cmd)
3768{
3769	int err;
3770	struct mlx4_qp_context *qpc = inbox->buf + 8;
3771	int qpn = vhcr->in_modifier & 0x7fffff;
3772	struct res_qp *qp;
3773	u8 orig_sched_queue;
3774	__be32	orig_param3 = qpc->param3;
3775	u8 orig_vlan_control = qpc->pri_path.vlan_control;
3776	u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3777	u8 orig_pri_path_fl = qpc->pri_path.fl;
3778	u8 orig_vlan_index = qpc->pri_path.vlan_index;
3779	u8 orig_feup = qpc->pri_path.feup;
3780
3781	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3782	if (err)
3783		return err;
3784	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3785	if (err)
3786		return err;
3787
3788	if (roce_verify_mac(dev, slave, qpc, inbox))
3789		return -EINVAL;
3790
3791	update_pkey_index(dev, slave, inbox);
3792	update_gid(dev, inbox, (u8)slave);
3793	adjust_proxy_tun_qkey(dev, vhcr, qpc);
3794	orig_sched_queue = qpc->pri_path.sched_queue;
3795
3796	err = get_res(dev, slave, qpn, RES_QP, &qp);
3797	if (err)
3798		return err;
3799	if (qp->com.from_state != RES_QP_HW) {
3800		err = -EBUSY;
3801		goto out;
3802	}
3803
3804	err = update_vport_qp_param(dev, inbox, slave, qpn);
3805	if (err)
3806		goto out;
3807
3808	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3809out:
3810	/* if no error, save sched queue value passed in by VF. This is
3811	 * essentially the QOS value provided by the VF. This will be useful
3812	 * if we allow dynamic changes from VST back to VGT
3813	 */
3814	if (!err) {
3815		qp->sched_queue = orig_sched_queue;
3816		qp->param3	= orig_param3;
3817		qp->vlan_control = orig_vlan_control;
3818		qp->fvl_rx	=  orig_fvl_rx;
3819		qp->pri_path_fl = orig_pri_path_fl;
3820		qp->vlan_index  = orig_vlan_index;
3821		qp->feup	= orig_feup;
3822	}
3823	put_res(dev, slave, qpn, RES_QP);
3824	return err;
3825}
3826
3827int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3828			    struct mlx4_vhcr *vhcr,
3829			    struct mlx4_cmd_mailbox *inbox,
3830			    struct mlx4_cmd_mailbox *outbox,
3831			    struct mlx4_cmd_info *cmd)
3832{
3833	int err;
3834	struct mlx4_qp_context *context = inbox->buf + 8;
3835
3836	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3837	if (err)
3838		return err;
3839	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3840	if (err)
3841		return err;
3842
3843	update_pkey_index(dev, slave, inbox);
3844	update_gid(dev, inbox, (u8)slave);
3845	adjust_proxy_tun_qkey(dev, vhcr, context);
3846	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3847}
3848
3849int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3850			    struct mlx4_vhcr *vhcr,
3851			    struct mlx4_cmd_mailbox *inbox,
3852			    struct mlx4_cmd_mailbox *outbox,
3853			    struct mlx4_cmd_info *cmd)
3854{
3855	int err;
3856	struct mlx4_qp_context *context = inbox->buf + 8;
3857
3858	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3859	if (err)
3860		return err;
3861	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3862	if (err)
3863		return err;
3864
3865	update_pkey_index(dev, slave, inbox);
3866	update_gid(dev, inbox, (u8)slave);
3867	adjust_proxy_tun_qkey(dev, vhcr, context);
3868	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3869}
3870
3871
3872int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3873			      struct mlx4_vhcr *vhcr,
3874			      struct mlx4_cmd_mailbox *inbox,
3875			      struct mlx4_cmd_mailbox *outbox,
3876			      struct mlx4_cmd_info *cmd)
3877{
3878	struct mlx4_qp_context *context = inbox->buf + 8;
3879	int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3880	if (err)
3881		return err;
3882	adjust_proxy_tun_qkey(dev, vhcr, context);
3883	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3884}
3885
3886int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3887			    struct mlx4_vhcr *vhcr,
3888			    struct mlx4_cmd_mailbox *inbox,
3889			    struct mlx4_cmd_mailbox *outbox,
3890			    struct mlx4_cmd_info *cmd)
3891{
3892	int err;
3893	struct mlx4_qp_context *context = inbox->buf + 8;
3894
3895	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3896	if (err)
3897		return err;
3898	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3899	if (err)
3900		return err;
3901
3902	adjust_proxy_tun_qkey(dev, vhcr, context);
3903	update_gid(dev, inbox, (u8)slave);
3904	update_pkey_index(dev, slave, inbox);
3905	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3906}
3907
3908int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3909			    struct mlx4_vhcr *vhcr,
3910			    struct mlx4_cmd_mailbox *inbox,
3911			    struct mlx4_cmd_mailbox *outbox,
3912			    struct mlx4_cmd_info *cmd)
3913{
3914	int err;
3915	struct mlx4_qp_context *context = inbox->buf + 8;
3916
3917	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3918	if (err)
3919		return err;
3920	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3921	if (err)
3922		return err;
3923
3924	adjust_proxy_tun_qkey(dev, vhcr, context);
3925	update_gid(dev, inbox, (u8)slave);
3926	update_pkey_index(dev, slave, inbox);
3927	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3928}
3929
3930int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3931			 struct mlx4_vhcr *vhcr,
3932			 struct mlx4_cmd_mailbox *inbox,
3933			 struct mlx4_cmd_mailbox *outbox,
3934			 struct mlx4_cmd_info *cmd)
3935{
3936	int err;
3937	int qpn = vhcr->in_modifier & 0x7fffff;
3938	struct res_qp *qp;
3939
3940	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3941	if (err)
3942		return err;
3943	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3944	if (err)
3945		goto ex_abort;
3946
3947	atomic_dec(&qp->mtt->ref_count);
3948	atomic_dec(&qp->rcq->ref_count);
3949	atomic_dec(&qp->scq->ref_count);
3950	if (qp->srq)
3951		atomic_dec(&qp->srq->ref_count);
3952	res_end_move(dev, slave, RES_QP, qpn);
3953	return 0;
3954
3955ex_abort:
3956	res_abort_move(dev, slave, RES_QP, qpn);
3957
3958	return err;
3959}
3960
3961static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3962				struct res_qp *rqp, u8 *gid)
3963{
3964	struct res_gid *res;
3965
3966	list_for_each_entry(res, &rqp->mcg_list, list) {
3967		if (!memcmp(res->gid, gid, 16))
3968			return res;
3969	}
3970	return NULL;
3971}
3972
3973static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3974		       u8 *gid, enum mlx4_protocol prot,
3975		       enum mlx4_steer_type steer, u64 reg_id)
3976{
3977	struct res_gid *res;
3978	int err;
3979
3980	res = kzalloc(sizeof *res, GFP_KERNEL);
3981	if (!res)
3982		return -ENOMEM;
3983
3984	spin_lock_irq(&rqp->mcg_spl);
3985	if (find_gid(dev, slave, rqp, gid)) {
3986		kfree(res);
3987		err = -EEXIST;
3988	} else {
3989		memcpy(res->gid, gid, 16);
3990		res->prot = prot;
3991		res->steer = steer;
3992		res->reg_id = reg_id;
3993		list_add_tail(&res->list, &rqp->mcg_list);
3994		err = 0;
3995	}
3996	spin_unlock_irq(&rqp->mcg_spl);
3997
3998	return err;
3999}
4000
4001static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4002		       u8 *gid, enum mlx4_protocol prot,
4003		       enum mlx4_steer_type steer, u64 *reg_id)
4004{
4005	struct res_gid *res;
4006	int err;
4007
4008	spin_lock_irq(&rqp->mcg_spl);
4009	res = find_gid(dev, slave, rqp, gid);
4010	if (!res || res->prot != prot || res->steer != steer)
4011		err = -EINVAL;
4012	else {
4013		*reg_id = res->reg_id;
4014		list_del(&res->list);
4015		kfree(res);
4016		err = 0;
4017	}
4018	spin_unlock_irq(&rqp->mcg_spl);
4019
4020	return err;
4021}
4022
4023static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4024		     u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4025		     enum mlx4_steer_type type, u64 *reg_id)
4026{
4027	switch (dev->caps.steering_mode) {
4028	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4029		int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4030		if (port < 0)
4031			return port;
4032		return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4033						block_loopback, prot,
4034						reg_id);
4035	}
4036	case MLX4_STEERING_MODE_B0:
4037		if (prot == MLX4_PROT_ETH) {
4038			int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4039			if (port < 0)
4040				return port;
4041			gid[5] = port;
4042		}
4043		return mlx4_qp_attach_common(dev, qp, gid,
4044					    block_loopback, prot, type);
4045	default:
4046		return -EINVAL;
4047	}
4048}
4049
4050static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4051		     u8 gid[16], enum mlx4_protocol prot,
4052		     enum mlx4_steer_type type, u64 reg_id)
4053{
4054	switch (dev->caps.steering_mode) {
4055	case MLX4_STEERING_MODE_DEVICE_MANAGED:
4056		return mlx4_flow_detach(dev, reg_id);
4057	case MLX4_STEERING_MODE_B0:
4058		return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4059	default:
4060		return -EINVAL;
4061	}
4062}
4063
4064static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4065			    u8 *gid, enum mlx4_protocol prot)
4066{
4067	int real_port;
4068
4069	if (prot != MLX4_PROT_ETH)
4070		return 0;
4071
4072	if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4073	    dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4074		real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4075		if (real_port < 0)
4076			return -EINVAL;
4077		gid[5] = real_port;
4078	}
4079
4080	return 0;
4081}
4082
4083int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4084			       struct mlx4_vhcr *vhcr,
4085			       struct mlx4_cmd_mailbox *inbox,
4086			       struct mlx4_cmd_mailbox *outbox,
4087			       struct mlx4_cmd_info *cmd)
4088{
4089	struct mlx4_qp qp; /* dummy for calling attach/detach */
4090	u8 *gid = inbox->buf;
4091	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4092	int err;
4093	int qpn;
4094	struct res_qp *rqp;
4095	u64 reg_id = 0;
4096	int attach = vhcr->op_modifier;
4097	int block_loopback = vhcr->in_modifier >> 31;
4098	u8 steer_type_mask = 2;
4099	enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4100
4101	qpn = vhcr->in_modifier & 0xffffff;
4102	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4103	if (err)
4104		return err;
4105
4106	qp.qpn = qpn;
4107	if (attach) {
4108		err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4109				type, &reg_id);
4110		if (err) {
4111			pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4112			goto ex_put;
4113		}
4114		err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4115		if (err)
4116			goto ex_detach;
4117	} else {
4118		err = mlx4_adjust_port(dev, slave, gid, prot);
4119		if (err)
4120			goto ex_put;
4121
4122		err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4123		if (err)
4124			goto ex_put;
4125
4126		err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4127		if (err)
4128			pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4129			       qpn, (unsigned long long)reg_id);
4130	}
4131	put_res(dev, slave, qpn, RES_QP);
4132	return err;
4133
4134ex_detach:
4135	qp_detach(dev, &qp, gid, prot, type, reg_id);
4136ex_put:
4137	put_res(dev, slave, qpn, RES_QP);
4138	return err;
4139}
4140
4141/*
4142 * MAC validation for Flow Steering rules.
4143 * VF can attach rules only with a mac address which is assigned to it.
4144 */
4145static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4146				   struct list_head *rlist)
4147{
4148	struct mac_res *res, *tmp;
4149	__be64 be_mac;
4150
4151	/* make sure it isn't multicast or broadcast mac*/
4152	if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4153	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4154		list_for_each_entry_safe(res, tmp, rlist, list) {
4155			be_mac = cpu_to_be64(res->mac << 16);
4156			if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4157				return 0;
4158		}
4159		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4160		       eth_header->eth.dst_mac, slave);
4161		return -EINVAL;
4162	}
4163	return 0;
4164}
4165
4166static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4167					 struct _rule_hw *eth_header)
4168{
4169	if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4170	    is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4171		struct mlx4_net_trans_rule_hw_eth *eth =
4172			(struct mlx4_net_trans_rule_hw_eth *)eth_header;
4173		struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4174		bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4175			next_rule->rsvd == 0;
4176
4177		if (last_rule)
4178			ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4179	}
4180}
4181
4182/*
4183 * In case of missing eth header, append eth header with a MAC address
4184 * assigned to the VF.
4185 */
4186static int add_eth_header(struct mlx4_dev *dev, int slave,
4187			  struct mlx4_cmd_mailbox *inbox,
4188			  struct list_head *rlist, int header_id)
4189{
4190	struct mac_res *res, *tmp;
4191	u8 port;
4192	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4193	struct mlx4_net_trans_rule_hw_eth *eth_header;
4194	struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4195	struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4196	__be64 be_mac = 0;
4197	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4198
4199	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4200	port = ctrl->port;
4201	eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4202
4203	/* Clear a space in the inbox for eth header */
4204	switch (header_id) {
4205	case MLX4_NET_TRANS_RULE_ID_IPV4:
4206		ip_header =
4207			(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4208		memmove(ip_header, eth_header,
4209			sizeof(*ip_header) + sizeof(*l4_header));
4210		break;
4211	case MLX4_NET_TRANS_RULE_ID_TCP:
4212	case MLX4_NET_TRANS_RULE_ID_UDP:
4213		l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4214			    (eth_header + 1);
4215		memmove(l4_header, eth_header, sizeof(*l4_header));
4216		break;
4217	default:
4218		return -EINVAL;
4219	}
4220	list_for_each_entry_safe(res, tmp, rlist, list) {
4221		if (port == res->port) {
4222			be_mac = cpu_to_be64(res->mac << 16);
4223			break;
4224		}
4225	}
4226	if (!be_mac) {
4227		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4228		       port);
4229		return -EINVAL;
4230	}
4231
4232	memset(eth_header, 0, sizeof(*eth_header));
4233	eth_header->size = sizeof(*eth_header) >> 2;
4234	eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4235	memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4236	memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4237
4238	return 0;
4239}
4240
4241#define MLX4_UPD_QP_PATH_MASK_SUPPORTED      (                                \
4242	1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX                     |\
4243	1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
4244int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4245			   struct mlx4_vhcr *vhcr,
4246			   struct mlx4_cmd_mailbox *inbox,
4247			   struct mlx4_cmd_mailbox *outbox,
4248			   struct mlx4_cmd_info *cmd_info)
4249{
4250	int err;
4251	u32 qpn = vhcr->in_modifier & 0xffffff;
4252	struct res_qp *rqp;
4253	u64 mac;
4254	unsigned port;
4255	u64 pri_addr_path_mask;
4256	struct mlx4_update_qp_context *cmd;
4257	int smac_index;
4258
4259	cmd = (struct mlx4_update_qp_context *)inbox->buf;
4260
4261	pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4262	if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4263	    (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4264		return -EPERM;
4265
4266	if ((pri_addr_path_mask &
4267	     (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4268		!(dev->caps.flags2 &
4269		  MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4270		mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4271			  slave);
4272		return -ENOTSUPP;
4273	}
4274
4275	/* Just change the smac for the QP */
4276	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4277	if (err) {
4278		mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4279		return err;
4280	}
4281
4282	port = (rqp->sched_queue >> 6 & 1) + 1;
4283
4284	if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4285		smac_index = cmd->qp_context.pri_path.grh_mylmc;
4286		err = mac_find_smac_ix_in_slave(dev, slave, port,
4287						smac_index, &mac);
4288
4289		if (err) {
4290			mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4291				 qpn, smac_index);
4292			goto err_mac;
4293		}
4294	}
4295
4296	err = mlx4_cmd(dev, inbox->dma,
4297		       vhcr->in_modifier, 0,
4298		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4299		       MLX4_CMD_NATIVE);
4300	if (err) {
4301		mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4302		goto err_mac;
4303	}
4304
4305err_mac:
4306	put_res(dev, slave, qpn, RES_QP);
4307	return err;
4308}
4309
4310static u32 qp_attach_mbox_size(void *mbox)
4311{
4312	u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4313	struct _rule_hw  *rule_header;
4314
4315	rule_header = (struct _rule_hw *)(mbox + size);
4316
4317	while (rule_header->size) {
4318		size += rule_header->size * sizeof(u32);
4319		rule_header += 1;
4320	}
4321	return size;
4322}
4323
4324static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4325
4326int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4327					 struct mlx4_vhcr *vhcr,
4328					 struct mlx4_cmd_mailbox *inbox,
4329					 struct mlx4_cmd_mailbox *outbox,
4330					 struct mlx4_cmd_info *cmd)
4331{
4332
4333	struct mlx4_priv *priv = mlx4_priv(dev);
4334	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4335	struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4336	int err;
4337	int qpn;
4338	struct res_qp *rqp;
4339	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4340	struct _rule_hw  *rule_header;
4341	int header_id;
4342	struct res_fs_rule *rrule;
4343	u32 mbox_size;
4344
4345	if (dev->caps.steering_mode !=
4346	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4347		return -EOPNOTSUPP;
4348
4349	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4350	err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4351	if (err <= 0)
4352		return -EINVAL;
4353	ctrl->port = err;
4354	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4355	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4356	if (err) {
4357		pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4358		return err;
4359	}
4360	rule_header = (struct _rule_hw *)(ctrl + 1);
4361	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4362
4363	if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4364		handle_eth_header_mcast_prio(ctrl, rule_header);
4365
4366	if (slave == dev->caps.function)
4367		goto execute;
4368
4369	switch (header_id) {
4370	case MLX4_NET_TRANS_RULE_ID_ETH:
4371		if (validate_eth_header_mac(slave, rule_header, rlist)) {
4372			err = -EINVAL;
4373			goto err_put_qp;
4374		}
4375		break;
4376	case MLX4_NET_TRANS_RULE_ID_IB:
4377		break;
4378	case MLX4_NET_TRANS_RULE_ID_IPV4:
4379	case MLX4_NET_TRANS_RULE_ID_TCP:
4380	case MLX4_NET_TRANS_RULE_ID_UDP:
4381		pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4382		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4383			err = -EINVAL;
4384			goto err_put_qp;
4385		}
4386		vhcr->in_modifier +=
4387			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4388		break;
4389	default:
4390		pr_err("Corrupted mailbox\n");
4391		err = -EINVAL;
4392		goto err_put_qp;
4393	}
4394
4395execute:
4396	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4397			   vhcr->in_modifier, 0,
4398			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4399			   MLX4_CMD_NATIVE);
4400	if (err)
4401		goto err_put_qp;
4402
4403
4404	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4405	if (err) {
4406		mlx4_err(dev, "Fail to add flow steering resources\n");
4407		goto err_detach;
4408	}
4409
4410	err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4411	if (err)
4412		goto err_detach;
4413
4414	mbox_size = qp_attach_mbox_size(inbox->buf);
4415	rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4416	if (!rrule->mirr_mbox) {
4417		err = -ENOMEM;
4418		goto err_put_rule;
4419	}
4420	rrule->mirr_mbox_size = mbox_size;
4421	rrule->mirr_rule_id = 0;
4422	memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4423
4424	/* set different port */
4425	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4426	if (ctrl->port == 1)
4427		ctrl->port = 2;
4428	else
4429		ctrl->port = 1;
4430
4431	if (mlx4_is_bonded(dev))
4432		mlx4_do_mirror_rule(dev, rrule);
4433
4434	atomic_inc(&rqp->ref_count);
4435
4436err_put_rule:
4437	put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4438err_detach:
4439	/* detach rule on error */
4440	if (err)
4441		mlx4_cmd(dev, vhcr->out_param, 0, 0,
4442			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4443			 MLX4_CMD_NATIVE);
4444err_put_qp:
4445	put_res(dev, slave, qpn, RES_QP);
4446	return err;
4447}
4448
4449static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4450{
4451	int err;
4452
4453	err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4454	if (err) {
4455		mlx4_err(dev, "Fail to remove flow steering resources\n");
4456		return err;
4457	}
4458
4459	mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4460		 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4461	return 0;
4462}
4463
4464int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4465					 struct mlx4_vhcr *vhcr,
4466					 struct mlx4_cmd_mailbox *inbox,
4467					 struct mlx4_cmd_mailbox *outbox,
4468					 struct mlx4_cmd_info *cmd)
4469{
4470	int err;
4471	struct res_qp *rqp;
4472	struct res_fs_rule *rrule;
4473	u64 mirr_reg_id;
4474
4475	if (dev->caps.steering_mode !=
4476	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4477		return -EOPNOTSUPP;
4478
4479	err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4480	if (err)
4481		return err;
4482
4483	if (!rrule->mirr_mbox) {
4484		mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4485		put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4486		return -EINVAL;
4487	}
4488	mirr_reg_id = rrule->mirr_rule_id;
4489	kfree(rrule->mirr_mbox);
4490
4491	/* Release the rule form busy state before removal */
4492	put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4493	err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4494	if (err)
4495		return err;
4496
4497	if (mirr_reg_id && mlx4_is_bonded(dev)) {
4498		err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4499		if (err) {
4500			mlx4_err(dev, "Fail to get resource of mirror rule\n");
4501		} else {
4502			put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4503			mlx4_undo_mirror_rule(dev, rrule);
4504		}
4505	}
4506	err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4507	if (err) {
4508		mlx4_err(dev, "Fail to remove flow steering resources\n");
4509		goto out;
4510	}
4511
4512	err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4513		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4514		       MLX4_CMD_NATIVE);
4515	if (!err)
4516		atomic_dec(&rqp->ref_count);
4517out:
4518	put_res(dev, slave, rrule->qpn, RES_QP);
4519	return err;
4520}
4521
4522enum {
4523	BUSY_MAX_RETRIES = 10
4524};
4525
4526int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4527			       struct mlx4_vhcr *vhcr,
4528			       struct mlx4_cmd_mailbox *inbox,
4529			       struct mlx4_cmd_mailbox *outbox,
4530			       struct mlx4_cmd_info *cmd)
4531{
4532	int err;
4533	int index = vhcr->in_modifier & 0xffff;
4534
4535	err = get_res(dev, slave, index, RES_COUNTER, NULL);
4536	if (err)
4537		return err;
4538
4539	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4540	put_res(dev, slave, index, RES_COUNTER);
4541	return err;
4542}
4543
4544static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4545{
4546	struct res_gid *rgid;
4547	struct res_gid *tmp;
4548	struct mlx4_qp qp; /* dummy for calling attach/detach */
4549
4550	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4551		switch (dev->caps.steering_mode) {
4552		case MLX4_STEERING_MODE_DEVICE_MANAGED:
4553			mlx4_flow_detach(dev, rgid->reg_id);
4554			break;
4555		case MLX4_STEERING_MODE_B0:
4556			qp.qpn = rqp->local_qpn;
4557			(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4558						     rgid->prot, rgid->steer);
4559			break;
4560		}
4561		list_del(&rgid->list);
4562		kfree(rgid);
4563	}
4564}
4565
4566static int _move_all_busy(struct mlx4_dev *dev, int slave,
4567			  enum mlx4_resource type, int print)
4568{
4569	struct mlx4_priv *priv = mlx4_priv(dev);
4570	struct mlx4_resource_tracker *tracker =
4571		&priv->mfunc.master.res_tracker;
4572	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4573	struct res_common *r;
4574	struct res_common *tmp;
4575	int busy;
4576
4577	busy = 0;
4578	spin_lock_irq(mlx4_tlock(dev));
4579	list_for_each_entry_safe(r, tmp, rlist, list) {
4580		if (r->owner == slave) {
4581			if (!r->removing) {
4582				if (r->state == RES_ANY_BUSY) {
4583					if (print)
4584						mlx4_dbg(dev,
4585							 "%s id 0x%llx is busy\n",
4586							  resource_str(type),
4587							  (long long)r->res_id);
4588					++busy;
4589				} else {
4590					r->from_state = r->state;
4591					r->state = RES_ANY_BUSY;
4592					r->removing = 1;
4593				}
4594			}
4595		}
4596	}
4597	spin_unlock_irq(mlx4_tlock(dev));
4598
4599	return busy;
4600}
4601
4602static int move_all_busy(struct mlx4_dev *dev, int slave,
4603			 enum mlx4_resource type)
4604{
4605	unsigned long begin;
4606	int busy;
4607
4608	begin = jiffies;
4609	do {
4610		busy = _move_all_busy(dev, slave, type, 0);
4611		if (time_after(jiffies, begin + 5 * HZ))
4612			break;
4613		if (busy)
4614			cond_resched();
4615	} while (busy);
4616
4617	if (busy)
4618		busy = _move_all_busy(dev, slave, type, 1);
4619
4620	return busy;
4621}
4622static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4623{
4624	struct mlx4_priv *priv = mlx4_priv(dev);
4625	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4626	struct list_head *qp_list =
4627		&tracker->slave_list[slave].res_list[RES_QP];
4628	struct res_qp *qp;
4629	struct res_qp *tmp;
4630	int state;
4631	u64 in_param;
4632	int qpn;
4633	int err;
4634
4635	err = move_all_busy(dev, slave, RES_QP);
4636	if (err)
4637		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4638			  slave);
4639
4640	spin_lock_irq(mlx4_tlock(dev));
4641	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4642		spin_unlock_irq(mlx4_tlock(dev));
4643		if (qp->com.owner == slave) {
4644			qpn = qp->com.res_id;
4645			detach_qp(dev, slave, qp);
4646			state = qp->com.from_state;
4647			while (state != 0) {
4648				switch (state) {
4649				case RES_QP_RESERVED:
4650					spin_lock_irq(mlx4_tlock(dev));
4651					rb_erase(&qp->com.node,
4652						 &tracker->res_tree[RES_QP]);
4653					list_del(&qp->com.list);
4654					spin_unlock_irq(mlx4_tlock(dev));
4655					if (!valid_reserved(dev, slave, qpn)) {
4656						__mlx4_qp_release_range(dev, qpn, 1);
4657						mlx4_release_resource(dev, slave,
4658								      RES_QP, 1, 0);
4659					}
4660					kfree(qp);
4661					state = 0;
4662					break;
4663				case RES_QP_MAPPED:
4664					if (!valid_reserved(dev, slave, qpn))
4665						__mlx4_qp_free_icm(dev, qpn);
4666					state = RES_QP_RESERVED;
4667					break;
4668				case RES_QP_HW:
4669					in_param = slave;
4670					err = mlx4_cmd(dev, in_param,
4671						       qp->local_qpn, 2,
4672						       MLX4_CMD_2RST_QP,
4673						       MLX4_CMD_TIME_CLASS_A,
4674						       MLX4_CMD_NATIVE);
4675					if (err)
4676						mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4677							 slave, qp->local_qpn);
4678					atomic_dec(&qp->rcq->ref_count);
4679					atomic_dec(&qp->scq->ref_count);
4680					atomic_dec(&qp->mtt->ref_count);
4681					if (qp->srq)
4682						atomic_dec(&qp->srq->ref_count);
4683					state = RES_QP_MAPPED;
4684					break;
4685				default:
4686					state = 0;
4687				}
4688			}
4689		}
4690		spin_lock_irq(mlx4_tlock(dev));
4691	}
4692	spin_unlock_irq(mlx4_tlock(dev));
4693}
4694
4695static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4696{
4697	struct mlx4_priv *priv = mlx4_priv(dev);
4698	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4699	struct list_head *srq_list =
4700		&tracker->slave_list[slave].res_list[RES_SRQ];
4701	struct res_srq *srq;
4702	struct res_srq *tmp;
4703	int state;
4704	u64 in_param;
4705	LIST_HEAD(tlist);
4706	int srqn;
4707	int err;
4708
4709	err = move_all_busy(dev, slave, RES_SRQ);
4710	if (err)
4711		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4712			  slave);
4713
4714	spin_lock_irq(mlx4_tlock(dev));
4715	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4716		spin_unlock_irq(mlx4_tlock(dev));
4717		if (srq->com.owner == slave) {
4718			srqn = srq->com.res_id;
4719			state = srq->com.from_state;
4720			while (state != 0) {
4721				switch (state) {
4722				case RES_SRQ_ALLOCATED:
4723					__mlx4_srq_free_icm(dev, srqn);
4724					spin_lock_irq(mlx4_tlock(dev));
4725					rb_erase(&srq->com.node,
4726						 &tracker->res_tree[RES_SRQ]);
4727					list_del(&srq->com.list);
4728					spin_unlock_irq(mlx4_tlock(dev));
4729					mlx4_release_resource(dev, slave,
4730							      RES_SRQ, 1, 0);
4731					kfree(srq);
4732					state = 0;
4733					break;
4734
4735				case RES_SRQ_HW:
4736					in_param = slave;
4737					err = mlx4_cmd(dev, in_param, srqn, 1,
4738						       MLX4_CMD_HW2SW_SRQ,
4739						       MLX4_CMD_TIME_CLASS_A,
4740						       MLX4_CMD_NATIVE);
4741					if (err)
4742						mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4743							 slave, srqn);
4744
4745					atomic_dec(&srq->mtt->ref_count);
4746					if (srq->cq)
4747						atomic_dec(&srq->cq->ref_count);
4748					state = RES_SRQ_ALLOCATED;
4749					break;
4750
4751				default:
4752					state = 0;
4753				}
4754			}
4755		}
4756		spin_lock_irq(mlx4_tlock(dev));
4757	}
4758	spin_unlock_irq(mlx4_tlock(dev));
4759}
4760
4761static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4762{
4763	struct mlx4_priv *priv = mlx4_priv(dev);
4764	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4765	struct list_head *cq_list =
4766		&tracker->slave_list[slave].res_list[RES_CQ];
4767	struct res_cq *cq;
4768	struct res_cq *tmp;
4769	int state;
4770	u64 in_param;
4771	LIST_HEAD(tlist);
4772	int cqn;
4773	int err;
4774
4775	err = move_all_busy(dev, slave, RES_CQ);
4776	if (err)
4777		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4778			  slave);
4779
4780	spin_lock_irq(mlx4_tlock(dev));
4781	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4782		spin_unlock_irq(mlx4_tlock(dev));
4783		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4784			cqn = cq->com.res_id;
4785			state = cq->com.from_state;
4786			while (state != 0) {
4787				switch (state) {
4788				case RES_CQ_ALLOCATED:
4789					__mlx4_cq_free_icm(dev, cqn);
4790					spin_lock_irq(mlx4_tlock(dev));
4791					rb_erase(&cq->com.node,
4792						 &tracker->res_tree[RES_CQ]);
4793					list_del(&cq->com.list);
4794					spin_unlock_irq(mlx4_tlock(dev));
4795					mlx4_release_resource(dev, slave,
4796							      RES_CQ, 1, 0);
4797					kfree(cq);
4798					state = 0;
4799					break;
4800
4801				case RES_CQ_HW:
4802					in_param = slave;
4803					err = mlx4_cmd(dev, in_param, cqn, 1,
4804						       MLX4_CMD_HW2SW_CQ,
4805						       MLX4_CMD_TIME_CLASS_A,
4806						       MLX4_CMD_NATIVE);
4807					if (err)
4808						mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4809							 slave, cqn);
4810					atomic_dec(&cq->mtt->ref_count);
4811					state = RES_CQ_ALLOCATED;
4812					break;
4813
4814				default:
4815					state = 0;
4816				}
4817			}
4818		}
4819		spin_lock_irq(mlx4_tlock(dev));
4820	}
4821	spin_unlock_irq(mlx4_tlock(dev));
4822}
4823
4824static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4825{
4826	struct mlx4_priv *priv = mlx4_priv(dev);
4827	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4828	struct list_head *mpt_list =
4829		&tracker->slave_list[slave].res_list[RES_MPT];
4830	struct res_mpt *mpt;
4831	struct res_mpt *tmp;
4832	int state;
4833	u64 in_param;
4834	LIST_HEAD(tlist);
4835	int mptn;
4836	int err;
4837
4838	err = move_all_busy(dev, slave, RES_MPT);
4839	if (err)
4840		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4841			  slave);
4842
4843	spin_lock_irq(mlx4_tlock(dev));
4844	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4845		spin_unlock_irq(mlx4_tlock(dev));
4846		if (mpt->com.owner == slave) {
4847			mptn = mpt->com.res_id;
4848			state = mpt->com.from_state;
4849			while (state != 0) {
4850				switch (state) {
4851				case RES_MPT_RESERVED:
4852					__mlx4_mpt_release(dev, mpt->key);
4853					spin_lock_irq(mlx4_tlock(dev));
4854					rb_erase(&mpt->com.node,
4855						 &tracker->res_tree[RES_MPT]);
4856					list_del(&mpt->com.list);
4857					spin_unlock_irq(mlx4_tlock(dev));
4858					mlx4_release_resource(dev, slave,
4859							      RES_MPT, 1, 0);
4860					kfree(mpt);
4861					state = 0;
4862					break;
4863
4864				case RES_MPT_MAPPED:
4865					__mlx4_mpt_free_icm(dev, mpt->key);
4866					state = RES_MPT_RESERVED;
4867					break;
4868
4869				case RES_MPT_HW:
4870					in_param = slave;
4871					err = mlx4_cmd(dev, in_param, mptn, 0,
4872						     MLX4_CMD_HW2SW_MPT,
4873						     MLX4_CMD_TIME_CLASS_A,
4874						     MLX4_CMD_NATIVE);
4875					if (err)
4876						mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4877							 slave, mptn);
4878					if (mpt->mtt)
4879						atomic_dec(&mpt->mtt->ref_count);
4880					state = RES_MPT_MAPPED;
4881					break;
4882				default:
4883					state = 0;
4884				}
4885			}
4886		}
4887		spin_lock_irq(mlx4_tlock(dev));
4888	}
4889	spin_unlock_irq(mlx4_tlock(dev));
4890}
4891
4892static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4893{
4894	struct mlx4_priv *priv = mlx4_priv(dev);
4895	struct mlx4_resource_tracker *tracker =
4896		&priv->mfunc.master.res_tracker;
4897	struct list_head *mtt_list =
4898		&tracker->slave_list[slave].res_list[RES_MTT];
4899	struct res_mtt *mtt;
4900	struct res_mtt *tmp;
4901	int state;
4902	LIST_HEAD(tlist);
4903	int base;
4904	int err;
4905
4906	err = move_all_busy(dev, slave, RES_MTT);
4907	if (err)
4908		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4909			  slave);
4910
4911	spin_lock_irq(mlx4_tlock(dev));
4912	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4913		spin_unlock_irq(mlx4_tlock(dev));
4914		if (mtt->com.owner == slave) {
4915			base = mtt->com.res_id;
4916			state = mtt->com.from_state;
4917			while (state != 0) {
4918				switch (state) {
4919				case RES_MTT_ALLOCATED:
4920					__mlx4_free_mtt_range(dev, base,
4921							      mtt->order);
4922					spin_lock_irq(mlx4_tlock(dev));
4923					rb_erase(&mtt->com.node,
4924						 &tracker->res_tree[RES_MTT]);
4925					list_del(&mtt->com.list);
4926					spin_unlock_irq(mlx4_tlock(dev));
4927					mlx4_release_resource(dev, slave, RES_MTT,
4928							      1 << mtt->order, 0);
4929					kfree(mtt);
4930					state = 0;
4931					break;
4932
4933				default:
4934					state = 0;
4935				}
4936			}
4937		}
4938		spin_lock_irq(mlx4_tlock(dev));
4939	}
4940	spin_unlock_irq(mlx4_tlock(dev));
4941}
4942
4943static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4944{
4945	struct mlx4_cmd_mailbox *mailbox;
4946	int err;
4947	struct res_fs_rule *mirr_rule;
4948	u64 reg_id;
4949
4950	mailbox = mlx4_alloc_cmd_mailbox(dev);
4951	if (IS_ERR(mailbox))
4952		return PTR_ERR(mailbox);
4953
4954	if (!fs_rule->mirr_mbox) {
4955		mlx4_err(dev, "rule mirroring mailbox is null\n");
4956		return -EINVAL;
4957	}
4958	memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4959	err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4960			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4961			   MLX4_CMD_NATIVE);
4962	mlx4_free_cmd_mailbox(dev, mailbox);
4963
4964	if (err)
4965		goto err;
4966
4967	err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4968	if (err)
4969		goto err_detach;
4970
4971	err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4972	if (err)
4973		goto err_rem;
4974
4975	fs_rule->mirr_rule_id = reg_id;
4976	mirr_rule->mirr_rule_id = 0;
4977	mirr_rule->mirr_mbox_size = 0;
4978	mirr_rule->mirr_mbox = NULL;
4979	put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
4980
4981	return 0;
4982err_rem:
4983	rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
4984err_detach:
4985	mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4986		 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4987err:
4988	return err;
4989}
4990
4991static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
4992{
4993	struct mlx4_priv *priv = mlx4_priv(dev);
4994	struct mlx4_resource_tracker *tracker =
4995		&priv->mfunc.master.res_tracker;
4996	struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
4997	struct rb_node *p;
4998	struct res_fs_rule *fs_rule;
4999	int err = 0;
5000	LIST_HEAD(mirr_list);
5001
5002	for (p = rb_first(root); p; p = rb_next(p)) {
5003		fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5004		if ((bond && fs_rule->mirr_mbox_size) ||
5005		    (!bond && !fs_rule->mirr_mbox_size))
5006			list_add_tail(&fs_rule->mirr_list, &mirr_list);
5007	}
5008
5009	list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5010		if (bond)
5011			err += mlx4_do_mirror_rule(dev, fs_rule);
5012		else
5013			err += mlx4_undo_mirror_rule(dev, fs_rule);
5014	}
5015	return err;
5016}
5017
5018int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5019{
5020	return mlx4_mirror_fs_rules(dev, true);
5021}
5022
5023int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5024{
5025	return mlx4_mirror_fs_rules(dev, false);
5026}
5027
5028static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5029{
5030	struct mlx4_priv *priv = mlx4_priv(dev);
5031	struct mlx4_resource_tracker *tracker =
5032		&priv->mfunc.master.res_tracker;
5033	struct list_head *fs_rule_list =
5034		&tracker->slave_list[slave].res_list[RES_FS_RULE];
5035	struct res_fs_rule *fs_rule;
5036	struct res_fs_rule *tmp;
5037	int state;
5038	u64 base;
5039	int err;
5040
5041	err = move_all_busy(dev, slave, RES_FS_RULE);
5042	if (err)
5043		mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5044			  slave);
5045
5046	spin_lock_irq(mlx4_tlock(dev));
5047	list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5048		spin_unlock_irq(mlx4_tlock(dev));
5049		if (fs_rule->com.owner == slave) {
5050			base = fs_rule->com.res_id;
5051			state = fs_rule->com.from_state;
5052			while (state != 0) {
5053				switch (state) {
5054				case RES_FS_RULE_ALLOCATED:
5055					/* detach rule */
5056					err = mlx4_cmd(dev, base, 0, 0,
5057						       MLX4_QP_FLOW_STEERING_DETACH,
5058						       MLX4_CMD_TIME_CLASS_A,
5059						       MLX4_CMD_NATIVE);
5060
5061					spin_lock_irq(mlx4_tlock(dev));
5062					rb_erase(&fs_rule->com.node,
5063						 &tracker->res_tree[RES_FS_RULE]);
5064					list_del(&fs_rule->com.list);
5065					spin_unlock_irq(mlx4_tlock(dev));
5066					kfree(fs_rule);
5067					state = 0;
5068					break;
5069
5070				default:
5071					state = 0;
5072				}
5073			}
5074		}
5075		spin_lock_irq(mlx4_tlock(dev));
5076	}
5077	spin_unlock_irq(mlx4_tlock(dev));
5078}
5079
5080static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5081{
5082	struct mlx4_priv *priv = mlx4_priv(dev);
5083	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5084	struct list_head *eq_list =
5085		&tracker->slave_list[slave].res_list[RES_EQ];
5086	struct res_eq *eq;
5087	struct res_eq *tmp;
5088	int err;
5089	int state;
5090	LIST_HEAD(tlist);
5091	int eqn;
5092
5093	err = move_all_busy(dev, slave, RES_EQ);
5094	if (err)
5095		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5096			  slave);
5097
5098	spin_lock_irq(mlx4_tlock(dev));
5099	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5100		spin_unlock_irq(mlx4_tlock(dev));
5101		if (eq->com.owner == slave) {
5102			eqn = eq->com.res_id;
5103			state = eq->com.from_state;
5104			while (state != 0) {
5105				switch (state) {
5106				case RES_EQ_RESERVED:
5107					spin_lock_irq(mlx4_tlock(dev));
5108					rb_erase(&eq->com.node,
5109						 &tracker->res_tree[RES_EQ]);
5110					list_del(&eq->com.list);
5111					spin_unlock_irq(mlx4_tlock(dev));
5112					kfree(eq);
5113					state = 0;
5114					break;
5115
5116				case RES_EQ_HW:
5117					err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5118						       1, MLX4_CMD_HW2SW_EQ,
5119						       MLX4_CMD_TIME_CLASS_A,
5120						       MLX4_CMD_NATIVE);
5121					if (err)
5122						mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5123							 slave, eqn & 0x3ff);
5124					atomic_dec(&eq->mtt->ref_count);
5125					state = RES_EQ_RESERVED;
5126					break;
5127
5128				default:
5129					state = 0;
5130				}
5131			}
5132		}
5133		spin_lock_irq(mlx4_tlock(dev));
5134	}
5135	spin_unlock_irq(mlx4_tlock(dev));
5136}
5137
5138static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5139{
5140	struct mlx4_priv *priv = mlx4_priv(dev);
5141	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5142	struct list_head *counter_list =
5143		&tracker->slave_list[slave].res_list[RES_COUNTER];
5144	struct res_counter *counter;
5145	struct res_counter *tmp;
5146	int err;
5147	int *counters_arr = NULL;
5148	int i, j;
5149
5150	err = move_all_busy(dev, slave, RES_COUNTER);
5151	if (err)
5152		mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5153			  slave);
5154
5155	counters_arr = kmalloc_array(dev->caps.max_counters,
5156				     sizeof(*counters_arr), GFP_KERNEL);
5157	if (!counters_arr)
5158		return;
5159
5160	do {
5161		i = 0;
5162		j = 0;
5163		spin_lock_irq(mlx4_tlock(dev));
5164		list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5165			if (counter->com.owner == slave) {
5166				counters_arr[i++] = counter->com.res_id;
5167				rb_erase(&counter->com.node,
5168					 &tracker->res_tree[RES_COUNTER]);
5169				list_del(&counter->com.list);
5170				kfree(counter);
5171			}
5172		}
5173		spin_unlock_irq(mlx4_tlock(dev));
5174
5175		while (j < i) {
5176			__mlx4_counter_free(dev, counters_arr[j++]);
5177			mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5178		}
5179	} while (i);
5180
5181	kfree(counters_arr);
5182}
5183
5184static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5185{
5186	struct mlx4_priv *priv = mlx4_priv(dev);
5187	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5188	struct list_head *xrcdn_list =
5189		&tracker->slave_list[slave].res_list[RES_XRCD];
5190	struct res_xrcdn *xrcd;
5191	struct res_xrcdn *tmp;
5192	int err;
5193	int xrcdn;
5194
5195	err = move_all_busy(dev, slave, RES_XRCD);
5196	if (err)
5197		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5198			  slave);
5199
5200	spin_lock_irq(mlx4_tlock(dev));
5201	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5202		if (xrcd->com.owner == slave) {
5203			xrcdn = xrcd->com.res_id;
5204			rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5205			list_del(&xrcd->com.list);
5206			kfree(xrcd);
5207			__mlx4_xrcd_free(dev, xrcdn);
5208		}
5209	}
5210	spin_unlock_irq(mlx4_tlock(dev));
5211}
5212
5213void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5214{
5215	struct mlx4_priv *priv = mlx4_priv(dev);
5216	mlx4_reset_roce_gids(dev, slave);
5217	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5218	rem_slave_vlans(dev, slave);
5219	rem_slave_macs(dev, slave);
5220	rem_slave_fs_rule(dev, slave);
5221	rem_slave_qps(dev, slave);
5222	rem_slave_srqs(dev, slave);
5223	rem_slave_cqs(dev, slave);
5224	rem_slave_mrs(dev, slave);
5225	rem_slave_eqs(dev, slave);
5226	rem_slave_mtts(dev, slave);
5227	rem_slave_counters(dev, slave);
5228	rem_slave_xrcdns(dev, slave);
5229	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5230}
5231
5232void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5233{
5234	struct mlx4_vf_immed_vlan_work *work =
5235		container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5236	struct mlx4_cmd_mailbox *mailbox;
5237	struct mlx4_update_qp_context *upd_context;
5238	struct mlx4_dev *dev = &work->priv->dev;
5239	struct mlx4_resource_tracker *tracker =
5240		&work->priv->mfunc.master.res_tracker;
5241	struct list_head *qp_list =
5242		&tracker->slave_list[work->slave].res_list[RES_QP];
5243	struct res_qp *qp;
5244	struct res_qp *tmp;
5245	u64 qp_path_mask_vlan_ctrl =
5246		       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5247		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5248		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5249		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5250		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5251		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5252
5253	u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5254		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5255		       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5256		       (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5257		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5258		       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5259		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5260		       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5261
5262	int err;
5263	int port, errors = 0;
5264	u8 vlan_control;
5265
5266	if (mlx4_is_slave(dev)) {
5267		mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5268			  work->slave);
5269		goto out;
5270	}
5271
5272	mailbox = mlx4_alloc_cmd_mailbox(dev);
5273	if (IS_ERR(mailbox))
5274		goto out;
5275	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5276		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5277			MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5278			MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5279			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5280			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5281			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5282	else if (!work->vlan_id)
5283		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5284			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5285	else if (work->vlan_proto == htons(ETH_P_8021AD))
5286		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5287			MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5288			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5289			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5290	else  /* vst 802.1Q */
5291		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5292			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5293			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5294
5295	upd_context = mailbox->buf;
5296	upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5297
5298	spin_lock_irq(mlx4_tlock(dev));
5299	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5300		spin_unlock_irq(mlx4_tlock(dev));
5301		if (qp->com.owner == work->slave) {
5302			if (qp->com.from_state != RES_QP_HW ||
5303			    !qp->sched_queue ||  /* no INIT2RTR trans yet */
5304			    mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5305			    qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5306				spin_lock_irq(mlx4_tlock(dev));
5307				continue;
5308			}
5309			port = (qp->sched_queue >> 6 & 1) + 1;
5310			if (port != work->port) {
5311				spin_lock_irq(mlx4_tlock(dev));
5312				continue;
5313			}
5314			if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5315				upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5316			else
5317				upd_context->primary_addr_path_mask =
5318					cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5319			if (work->vlan_id == MLX4_VGT) {
5320				upd_context->qp_context.param3 = qp->param3;
5321				upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5322				upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5323				upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5324				upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5325				upd_context->qp_context.pri_path.feup = qp->feup;
5326				upd_context->qp_context.pri_path.sched_queue =
5327					qp->sched_queue;
5328			} else {
5329				upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5330				upd_context->qp_context.pri_path.vlan_control = vlan_control;
5331				upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5332				upd_context->qp_context.pri_path.fvl_rx =
5333					qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5334				upd_context->qp_context.pri_path.fl =
5335					qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5336				if (work->vlan_proto == htons(ETH_P_8021AD))
5337					upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5338				else
5339					upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5340				upd_context->qp_context.pri_path.feup =
5341					qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5342				upd_context->qp_context.pri_path.sched_queue =
5343					qp->sched_queue & 0xC7;
5344				upd_context->qp_context.pri_path.sched_queue |=
5345					((work->qos & 0x7) << 3);
5346				upd_context->qp_mask |=
5347					cpu_to_be64(1ULL <<
5348						    MLX4_UPD_QP_MASK_QOS_VPP);
5349				upd_context->qp_context.qos_vport =
5350					work->qos_vport;
5351			}
5352
5353			err = mlx4_cmd(dev, mailbox->dma,
5354				       qp->local_qpn & 0xffffff,
5355				       0, MLX4_CMD_UPDATE_QP,
5356				       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5357			if (err) {
5358				mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5359					  work->slave, port, qp->local_qpn, err);
5360				errors++;
5361			}
5362		}
5363		spin_lock_irq(mlx4_tlock(dev));
5364	}
5365	spin_unlock_irq(mlx4_tlock(dev));
5366	mlx4_free_cmd_mailbox(dev, mailbox);
5367
5368	if (errors)
5369		mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5370			 errors, work->slave, work->port);
5371
5372	/* unregister previous vlan_id if needed and we had no errors
5373	 * while updating the QPs
5374	 */
5375	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5376	    NO_INDX != work->orig_vlan_ix)
5377		__mlx4_unregister_vlan(&work->priv->dev, work->port,
5378				       work->orig_vlan_id);
5379out:
5380	kfree(work);
5381	return;
5382}
5383
5384