mlx4_resource_tracker.c revision 299179
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
41#include <linux/slab.h>
42#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
44#include <linux/if_ether.h>
45#include <linux/etherdevice.h>
46
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID		(1ull << 63)
51
52struct mac_res {
53	struct list_head list;
54	u64 mac;
55	int ref_count;
56	u8 smac_index;
57	u8 port;
58};
59
60struct vlan_res {
61	struct list_head list;
62	u16 vlan;
63	int ref_count;
64	int vlan_index;
65	u8 port;
66};
67
68struct res_common {
69	struct list_head	list;
70	struct rb_node		node;
71	u64		        res_id;
72	int			owner;
73	int			state;
74	int			from_state;
75	int			to_state;
76	int			removing;
77};
78
79enum {
80	RES_ANY_BUSY = 1
81};
82
83struct res_gid {
84	struct list_head	list;
85	u8			gid[16];
86	enum mlx4_protocol	prot;
87	enum mlx4_steer_type	steer;
88	u64			reg_id;
89};
90
91enum res_qp_states {
92	RES_QP_BUSY = RES_ANY_BUSY,
93
94	/* QP number was allocated */
95	RES_QP_RESERVED,
96
97	/* ICM memory for QP context was mapped */
98	RES_QP_MAPPED,
99
100	/* QP is in hw ownership */
101	RES_QP_HW
102};
103
104struct res_qp {
105	struct res_common	com;
106	struct res_mtt	       *mtt;
107	struct res_cq	       *rcq;
108	struct res_cq	       *scq;
109	struct res_srq	       *srq;
110	struct list_head	mcg_list;
111	spinlock_t		mcg_spl;
112	int			local_qpn;
113	atomic_t		ref_count;
114	u32			qpc_flags;
115	/* saved qp params before VST enforcement in order to restore on VGT */
116	u8			sched_queue;
117	__be32			param3;
118	u8			vlan_control;
119	u8			fvl_rx;
120	u8			pri_path_fl;
121	u8			vlan_index;
122	u8			feup;
123};
124
125enum res_mtt_states {
126	RES_MTT_BUSY = RES_ANY_BUSY,
127	RES_MTT_ALLOCATED,
128};
129
130static inline const char *mtt_states_str(enum res_mtt_states state)
131{
132	switch (state) {
133	case RES_MTT_BUSY: return "RES_MTT_BUSY";
134	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
135	default: return "Unknown";
136	}
137}
138
139struct res_mtt {
140	struct res_common	com;
141	int			order;
142	atomic_t		ref_count;
143};
144
145enum res_mpt_states {
146	RES_MPT_BUSY = RES_ANY_BUSY,
147	RES_MPT_RESERVED,
148	RES_MPT_MAPPED,
149	RES_MPT_HW,
150};
151
152struct res_mpt {
153	struct res_common	com;
154	struct res_mtt	       *mtt;
155	int			key;
156};
157
158enum res_eq_states {
159	RES_EQ_BUSY = RES_ANY_BUSY,
160	RES_EQ_RESERVED,
161	RES_EQ_HW,
162};
163
164struct res_eq {
165	struct res_common	com;
166	struct res_mtt	       *mtt;
167};
168
169enum res_cq_states {
170	RES_CQ_BUSY = RES_ANY_BUSY,
171	RES_CQ_ALLOCATED,
172	RES_CQ_HW,
173};
174
175struct res_cq {
176	struct res_common	com;
177	struct res_mtt	       *mtt;
178	atomic_t		ref_count;
179};
180
181enum res_srq_states {
182	RES_SRQ_BUSY = RES_ANY_BUSY,
183	RES_SRQ_ALLOCATED,
184	RES_SRQ_HW,
185};
186
187struct res_srq {
188	struct res_common	com;
189	struct res_mtt	       *mtt;
190	struct res_cq	       *cq;
191	atomic_t		ref_count;
192};
193
194enum res_counter_states {
195	RES_COUNTER_BUSY = RES_ANY_BUSY,
196	RES_COUNTER_ALLOCATED,
197};
198
199struct res_counter {
200	struct res_common	com;
201	int			port;
202};
203
204enum res_xrcdn_states {
205	RES_XRCD_BUSY = RES_ANY_BUSY,
206	RES_XRCD_ALLOCATED,
207};
208
209struct res_xrcdn {
210	struct res_common	com;
211	int			port;
212};
213
214enum res_fs_rule_states {
215	RES_FS_RULE_BUSY = RES_ANY_BUSY,
216	RES_FS_RULE_ALLOCATED,
217};
218
219struct res_fs_rule {
220	struct res_common	com;
221	int			qpn;
222};
223
224static int mlx4_is_eth(struct mlx4_dev *dev, int port)
225{
226	return dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB ? 0 : 1;
227}
228
229static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
230{
231	struct rb_node *node = root->rb_node;
232
233	while (node) {
234		struct res_common *res = container_of(node, struct res_common,
235						      node);
236
237		if (res_id < res->res_id)
238			node = node->rb_left;
239		else if (res_id > res->res_id)
240			node = node->rb_right;
241		else
242			return res;
243	}
244	return NULL;
245}
246
247static int res_tracker_insert(struct rb_root *root, struct res_common *res)
248{
249	struct rb_node **new = &(root->rb_node), *parent = NULL;
250
251	/* Figure out where to put new node */
252	while (*new) {
253		struct res_common *this = container_of(*new, struct res_common,
254						       node);
255
256		parent = *new;
257		if (res->res_id < this->res_id)
258			new = &((*new)->rb_left);
259		else if (res->res_id > this->res_id)
260			new = &((*new)->rb_right);
261		else
262			return -EEXIST;
263	}
264
265	/* Add new node and rebalance tree. */
266	rb_link_node(&res->node, parent, new);
267	rb_insert_color(&res->node, root);
268
269	return 0;
270}
271
272enum qp_transition {
273	QP_TRANS_INIT2RTR,
274	QP_TRANS_RTR2RTS,
275	QP_TRANS_RTS2RTS,
276	QP_TRANS_SQERR2RTS,
277	QP_TRANS_SQD2SQD,
278	QP_TRANS_SQD2RTS
279};
280
281/* For Debug uses */
282static const char *ResourceType(enum mlx4_resource rt)
283{
284	switch (rt) {
285	case RES_QP: return "RES_QP";
286	case RES_CQ: return "RES_CQ";
287	case RES_SRQ: return "RES_SRQ";
288	case RES_MPT: return "RES_MPT";
289	case RES_MTT: return "RES_MTT";
290	case RES_MAC: return  "RES_MAC";
291	case RES_VLAN: return  "RES_VLAN";
292	case RES_EQ: return "RES_EQ";
293	case RES_COUNTER: return "RES_COUNTER";
294	case RES_FS_RULE: return "RES_FS_RULE";
295	case RES_XRCD: return "RES_XRCD";
296	default: return "Unknown resource type !!!";
297	};
298}
299
300static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
301static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
302				      enum mlx4_resource res_type, int count,
303				      int port)
304{
305	struct mlx4_priv *priv = mlx4_priv(dev);
306	struct resource_allocator *res_alloc =
307		&priv->mfunc.master.res_tracker.res_alloc[res_type];
308	int err = -EINVAL;
309	int allocated, free, reserved, guaranteed, from_free;
310
311	spin_lock(&res_alloc->alloc_lock);
312	allocated = (port > 0) ?
313		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
314		res_alloc->allocated[slave];
315	free = (port > 0) ? res_alloc->res_port_free[port - 1] :
316		res_alloc->res_free;
317	reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
318		res_alloc->res_reserved;
319	guaranteed = res_alloc->guaranteed[slave];
320
321	if (allocated + count > res_alloc->quota[slave])
322		goto out;
323
324	if (allocated + count <= guaranteed) {
325		err = 0;
326	} else {
327		/* portion may need to be obtained from free area */
328		if (guaranteed - allocated > 0)
329			from_free = count - (guaranteed - allocated);
330		else
331			from_free = count;
332
333		if (free - from_free > reserved)
334			err = 0;
335	}
336
337	if (!err) {
338		/* grant the request */
339		if (port > 0) {
340			res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
341			res_alloc->res_port_free[port - 1] -= count;
342		} else {
343			res_alloc->allocated[slave] += count;
344			res_alloc->res_free -= count;
345		}
346	}
347
348out:
349	spin_unlock(&res_alloc->alloc_lock);
350	return err;
351
352}
353
354static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
355				    enum mlx4_resource res_type, int count,
356				    int port)
357{
358	struct mlx4_priv *priv = mlx4_priv(dev);
359	struct resource_allocator *res_alloc =
360		&priv->mfunc.master.res_tracker.res_alloc[res_type];
361
362	spin_lock(&res_alloc->alloc_lock);
363	if (port > 0) {
364		res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
365		res_alloc->res_port_free[port - 1] += count;
366	} else {
367		res_alloc->allocated[slave] -= count;
368		res_alloc->res_free += count;
369	}
370
371	spin_unlock(&res_alloc->alloc_lock);
372	return;
373}
374
375static inline void initialize_res_quotas(struct mlx4_dev *dev,
376					 struct resource_allocator *res_alloc,
377					 enum mlx4_resource res_type,
378					 int vf, int num_instances)
379{
380	res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1));
381	res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
382	if (vf == mlx4_master_func_num(dev)) {
383		res_alloc->res_free = num_instances;
384		if (res_type == RES_MTT) {
385			/* reserved mtts will be taken out of the PF allocation */
386			res_alloc->res_free += dev->caps.reserved_mtts;
387			res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
388			res_alloc->quota[vf] += dev->caps.reserved_mtts;
389		}
390	}
391}
392
393void mlx4_init_quotas(struct mlx4_dev *dev)
394{
395	struct mlx4_priv *priv = mlx4_priv(dev);
396	int pf;
397
398	/* quotas for VFs are initialized in mlx4_slave_cap */
399	if (mlx4_is_slave(dev))
400		return;
401
402	if (!mlx4_is_mfunc(dev)) {
403		dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
404			mlx4_num_reserved_sqps(dev);
405		dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
406		dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
407		dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
408		dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
409		return;
410	}
411
412	pf = mlx4_master_func_num(dev);
413	dev->quotas.qp =
414		priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
415	dev->quotas.cq =
416		priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
417	dev->quotas.srq =
418		priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
419	dev->quotas.mtt =
420		priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
421	dev->quotas.mpt =
422		priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
423}
424int mlx4_init_resource_tracker(struct mlx4_dev *dev)
425{
426	struct mlx4_priv *priv = mlx4_priv(dev);
427	int i, j;
428	int t;
429
430	priv->mfunc.master.res_tracker.slave_list =
431		kzalloc(dev->num_slaves * sizeof(struct slave_list),
432			GFP_KERNEL);
433	if (!priv->mfunc.master.res_tracker.slave_list)
434		return -ENOMEM;
435
436	for (i = 0 ; i < dev->num_slaves; i++) {
437		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
438			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
439				       slave_list[i].res_list[t]);
440		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
441	}
442
443	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
444		 dev->num_slaves);
445	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
446		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
447
448	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
449		struct resource_allocator *res_alloc =
450			&priv->mfunc.master.res_tracker.res_alloc[i];
451		res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
452		res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
453		if (i == RES_MAC || i == RES_VLAN)
454			res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
455						       (dev->num_vfs + 1) * sizeof(int),
456							GFP_KERNEL);
457		else
458			res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL);
459
460		if (!res_alloc->quota || !res_alloc->guaranteed ||
461		    !res_alloc->allocated)
462			goto no_mem_err;
463
464		spin_lock_init(&res_alloc->alloc_lock);
465		for (t = 0; t < dev->num_vfs + 1; t++) {
466			switch (i) {
467			case RES_QP:
468				initialize_res_quotas(dev, res_alloc, RES_QP,
469						      t, dev->caps.num_qps -
470						      dev->caps.reserved_qps -
471						      mlx4_num_reserved_sqps(dev));
472				break;
473			case RES_CQ:
474				initialize_res_quotas(dev, res_alloc, RES_CQ,
475						      t, dev->caps.num_cqs -
476						      dev->caps.reserved_cqs);
477				break;
478			case RES_SRQ:
479				initialize_res_quotas(dev, res_alloc, RES_SRQ,
480						      t, dev->caps.num_srqs -
481						      dev->caps.reserved_srqs);
482				break;
483			case RES_MPT:
484				initialize_res_quotas(dev, res_alloc, RES_MPT,
485						      t, dev->caps.num_mpts -
486						      dev->caps.reserved_mrws);
487				break;
488			case RES_MTT:
489				initialize_res_quotas(dev, res_alloc, RES_MTT,
490						      t, dev->caps.num_mtts -
491						      dev->caps.reserved_mtts);
492				break;
493			case RES_MAC:
494				if (t == mlx4_master_func_num(dev)) {
495					res_alloc->quota[t] =
496						MLX4_MAX_MAC_NUM - 2 * dev->num_vfs;
497					res_alloc->guaranteed[t] = res_alloc->quota[t];
498					for (j = 0; j < MLX4_MAX_PORTS; j++)
499						res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM;
500				} else {
501					res_alloc->quota[t] = 2;
502					res_alloc->guaranteed[t] = 2;
503				}
504				break;
505			case RES_VLAN:
506				if (t == mlx4_master_func_num(dev)) {
507					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
508					res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
509					for (j = 0; j < MLX4_MAX_PORTS; j++)
510						res_alloc->res_port_free[j] =
511							res_alloc->quota[t];
512				} else {
513					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
514					res_alloc->guaranteed[t] = 0;
515				}
516				break;
517			case RES_COUNTER:
518				res_alloc->quota[t] = dev->caps.max_counters;
519				res_alloc->guaranteed[t] = 0;
520				if (t == mlx4_master_func_num(dev))
521					res_alloc->res_free = res_alloc->quota[t];
522				break;
523			default:
524				break;
525			}
526			if (i == RES_MAC || i == RES_VLAN) {
527				for (j = 0; j < MLX4_MAX_PORTS; j++)
528					res_alloc->res_port_rsvd[j] +=
529						res_alloc->guaranteed[t];
530			} else {
531				res_alloc->res_reserved += res_alloc->guaranteed[t];
532			}
533		}
534	}
535	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
536	return 0;
537
538no_mem_err:
539	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
540		kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
541		priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
542		kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
543		priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
544		kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
545		priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
546	}
547	return -ENOMEM;
548}
549
550void mlx4_free_resource_tracker(struct mlx4_dev *dev,
551				enum mlx4_res_tracker_free_type type)
552{
553	struct mlx4_priv *priv = mlx4_priv(dev);
554	int i;
555
556	if (priv->mfunc.master.res_tracker.slave_list) {
557		if (type != RES_TR_FREE_STRUCTS_ONLY) {
558			for (i = 0; i < dev->num_slaves; i++) {
559				if (type == RES_TR_FREE_ALL ||
560				    dev->caps.function != i)
561					mlx4_delete_all_resources_for_slave(dev, i);
562			}
563			/* free master's vlans */
564			i = dev->caps.function;
565			mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
566			rem_slave_vlans(dev, i);
567			mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
568		}
569
570		if (type != RES_TR_FREE_SLAVES_ONLY) {
571			for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
572				kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
573				priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
574				kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
575				priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
576				kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
577				priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
578			}
579			kfree(priv->mfunc.master.res_tracker.slave_list);
580			priv->mfunc.master.res_tracker.slave_list = NULL;
581		}
582	}
583}
584
585static void update_pkey_index(struct mlx4_dev *dev, int slave,
586			      struct mlx4_cmd_mailbox *inbox)
587{
588	u8 sched = *(u8 *)(inbox->buf + 64);
589	u8 orig_index = *(u8 *)(inbox->buf + 35);
590	u8 new_index;
591	struct mlx4_priv *priv = mlx4_priv(dev);
592	int port;
593
594	port = (sched >> 6 & 1) + 1;
595
596	new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
597	*(u8 *)(inbox->buf + 35) = new_index;
598}
599
600static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
601		       u8 slave)
602{
603	struct mlx4_qp_context	*qp_ctx = inbox->buf + 8;
604	enum mlx4_qp_optpar	optpar = be32_to_cpu(*(__be32 *) inbox->buf);
605	u32			ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
606	int port;
607
608	if (MLX4_QP_ST_UD == ts) {
609		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
610		if (mlx4_is_eth(dev, port))
611			qp_ctx->pri_path.mgid_index = mlx4_get_base_gid_ix(dev, slave) | 0x80;
612		else
613			qp_ctx->pri_path.mgid_index = 0x80 | slave;
614
615	} else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_UC == ts) {
616		if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
617			port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
618			if (mlx4_is_eth(dev, port)) {
619				qp_ctx->pri_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
620				qp_ctx->pri_path.mgid_index &= 0x7f;
621			} else {
622				qp_ctx->pri_path.mgid_index = slave & 0x7F;
623			}
624		}
625		if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
626			port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
627			if (mlx4_is_eth(dev, port)) {
628				qp_ctx->alt_path.mgid_index += mlx4_get_base_gid_ix(dev, slave);
629				qp_ctx->alt_path.mgid_index &= 0x7f;
630			} else {
631				qp_ctx->alt_path.mgid_index = slave & 0x7F;
632			}
633		}
634	}
635}
636
637static int check_counter_index_validity(struct mlx4_dev *dev, int slave, int port, int idx)
638{
639	struct mlx4_priv *priv = mlx4_priv(dev);
640	struct counter_index *counter, *tmp_counter;
641
642	if (slave == 0) {
643		list_for_each_entry_safe(counter, tmp_counter,
644					 &priv->counters_table.global_port_list[port - 1],
645					 list) {
646			if (counter->index == idx)
647				return 0;
648		}
649		return -EINVAL;
650	} else {
651		list_for_each_entry_safe(counter, tmp_counter,
652					 &priv->counters_table.vf_list[slave - 1][port - 1],
653					 list) {
654			if (counter->index == idx)
655				return 0;
656		}
657		return -EINVAL;
658	}
659}
660
661static int update_vport_qp_param(struct mlx4_dev *dev,
662				 struct mlx4_cmd_mailbox *inbox,
663				 u8 slave, u32 qpn)
664{
665	struct mlx4_qp_context	*qpc = inbox->buf + 8;
666	struct mlx4_vport_oper_state *vp_oper;
667	struct mlx4_priv *priv;
668	u32 qp_type;
669	int port;
670
671	port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
672	priv = mlx4_priv(dev);
673	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
674	qp_type	= (be32_to_cpu(qpc->flags) >> 16) & 0xff;
675
676	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH &&
677	    qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX) {
678		if (check_counter_index_validity(dev, slave, port,
679						 qpc->pri_path.counter_index))
680			return -EINVAL;
681	}
682
683	mlx4_dbg(dev, "%s: QP counter_index %d for slave %d port %d\n",
684		 __func__, qpc->pri_path.counter_index, slave, port);
685
686	if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_LB_SRC_CHK) &&
687	    dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH &&
688	    !mlx4_is_qp_reserved(dev, qpn) &&
689	    qp_type == MLX4_QP_ST_MLX &&
690	    qpc->pri_path.counter_index != 0xFF) {
691		/* disable multicast loopback to qp with same counter */
692		qpc->pri_path.fl |= MLX4_FL_ETH_SRC_CHECK_MC_LB;
693		qpc->pri_path.vlan_control |=
694			MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER;
695	}
696
697	if (MLX4_VGT != vp_oper->state.default_vlan) {
698		/* the reserved QPs (special, proxy, tunnel)
699		 * do not operate over vlans
700		 */
701		if (mlx4_is_qp_reserved(dev, qpn))
702			return 0;
703
704		/* force strip vlan by clear vsd */
705		qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
706		/* preserve IF_COUNTER flag */
707		qpc->pri_path.vlan_control &=
708			MLX4_VLAN_CTRL_ETH_SRC_CHECK_IF_COUNTER;
709		if (MLX4_QP_ST_RC != qp_type) {
710			if (0 != vp_oper->state.default_vlan) {
711				qpc->pri_path.vlan_control |=
712					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
713					MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
714					MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
715			} else { /* priority tagged */
716				qpc->pri_path.vlan_control |=
717					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
718					MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
719			}
720		}
721		qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
722		qpc->pri_path.vlan_index = vp_oper->vlan_idx;
723		qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
724		qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
725		qpc->pri_path.sched_queue &= 0xC7;
726		qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
727	}
728	if (vp_oper->state.spoofchk) {
729		qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
730		qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
731	}
732	return 0;
733}
734
735static int mpt_mask(struct mlx4_dev *dev)
736{
737	return dev->caps.num_mpts - 1;
738}
739
740static void *find_res(struct mlx4_dev *dev, u64 res_id,
741		      enum mlx4_resource type)
742{
743	struct mlx4_priv *priv = mlx4_priv(dev);
744
745	return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
746				  res_id);
747}
748
749static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
750		   enum mlx4_resource type,
751		   void *res)
752{
753	struct res_common *r;
754	int err = 0;
755
756	spin_lock_irq(mlx4_tlock(dev));
757	r = find_res(dev, res_id, type);
758	if (!r) {
759		err = -ENONET;
760		goto exit;
761	}
762
763	if (r->state == RES_ANY_BUSY) {
764		err = -EBUSY;
765		goto exit;
766	}
767
768	if (r->owner != slave) {
769		err = -EPERM;
770		goto exit;
771	}
772
773	r->from_state = r->state;
774	r->state = RES_ANY_BUSY;
775
776	if (res)
777		*((struct res_common **)res) = r;
778
779exit:
780	spin_unlock_irq(mlx4_tlock(dev));
781	return err;
782}
783
784int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
785				    enum mlx4_resource type,
786				    u64 res_id, int *slave)
787{
788
789	struct res_common *r;
790	int err = -ENOENT;
791	int id = res_id;
792
793	if (type == RES_QP)
794		id &= 0x7fffff;
795	spin_lock(mlx4_tlock(dev));
796
797	r = find_res(dev, id, type);
798	if (r) {
799		*slave = r->owner;
800		err = 0;
801	}
802	spin_unlock(mlx4_tlock(dev));
803
804	return err;
805}
806
807static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
808		    enum mlx4_resource type)
809{
810	struct res_common *r;
811
812	spin_lock_irq(mlx4_tlock(dev));
813	r = find_res(dev, res_id, type);
814	if (r)
815		r->state = r->from_state;
816	spin_unlock_irq(mlx4_tlock(dev));
817}
818
819static struct res_common *alloc_qp_tr(int id)
820{
821	struct res_qp *ret;
822
823	ret = kzalloc(sizeof *ret, GFP_KERNEL);
824	if (!ret)
825		return NULL;
826
827	ret->com.res_id = id;
828	ret->com.state = RES_QP_RESERVED;
829	ret->local_qpn = id;
830	INIT_LIST_HEAD(&ret->mcg_list);
831	spin_lock_init(&ret->mcg_spl);
832	atomic_set(&ret->ref_count, 0);
833
834	return &ret->com;
835}
836
837static struct res_common *alloc_mtt_tr(int id, int order)
838{
839	struct res_mtt *ret;
840
841	ret = kzalloc(sizeof *ret, GFP_KERNEL);
842	if (!ret)
843		return NULL;
844
845	ret->com.res_id = id;
846	ret->order = order;
847	ret->com.state = RES_MTT_ALLOCATED;
848	atomic_set(&ret->ref_count, 0);
849
850	return &ret->com;
851}
852
853static struct res_common *alloc_mpt_tr(int id, int key)
854{
855	struct res_mpt *ret;
856
857	ret = kzalloc(sizeof *ret, GFP_KERNEL);
858	if (!ret)
859		return NULL;
860
861	ret->com.res_id = id;
862	ret->com.state = RES_MPT_RESERVED;
863	ret->key = key;
864
865	return &ret->com;
866}
867
868static struct res_common *alloc_eq_tr(int id)
869{
870	struct res_eq *ret;
871
872	ret = kzalloc(sizeof *ret, GFP_KERNEL);
873	if (!ret)
874		return NULL;
875
876	ret->com.res_id = id;
877	ret->com.state = RES_EQ_RESERVED;
878
879	return &ret->com;
880}
881
882static struct res_common *alloc_cq_tr(int id)
883{
884	struct res_cq *ret;
885
886	ret = kzalloc(sizeof *ret, GFP_KERNEL);
887	if (!ret)
888		return NULL;
889
890	ret->com.res_id = id;
891	ret->com.state = RES_CQ_ALLOCATED;
892	atomic_set(&ret->ref_count, 0);
893
894	return &ret->com;
895}
896
897static struct res_common *alloc_srq_tr(int id)
898{
899	struct res_srq *ret;
900
901	ret = kzalloc(sizeof *ret, GFP_KERNEL);
902	if (!ret)
903		return NULL;
904
905	ret->com.res_id = id;
906	ret->com.state = RES_SRQ_ALLOCATED;
907	atomic_set(&ret->ref_count, 0);
908
909	return &ret->com;
910}
911
912static struct res_common *alloc_counter_tr(int id)
913{
914	struct res_counter *ret;
915
916	ret = kzalloc(sizeof *ret, GFP_KERNEL);
917	if (!ret)
918		return NULL;
919
920	ret->com.res_id = id;
921	ret->com.state = RES_COUNTER_ALLOCATED;
922
923	return &ret->com;
924}
925
926static struct res_common *alloc_xrcdn_tr(int id)
927{
928	struct res_xrcdn *ret;
929
930	ret = kzalloc(sizeof *ret, GFP_KERNEL);
931	if (!ret)
932		return NULL;
933
934	ret->com.res_id = id;
935	ret->com.state = RES_XRCD_ALLOCATED;
936
937	return &ret->com;
938}
939
940static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
941{
942	struct res_fs_rule *ret;
943
944	ret = kzalloc(sizeof *ret, GFP_KERNEL);
945	if (!ret)
946		return NULL;
947
948	ret->com.res_id = id;
949	ret->com.state = RES_FS_RULE_ALLOCATED;
950	ret->qpn = qpn;
951	return &ret->com;
952}
953
954static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
955				   int extra)
956{
957	struct res_common *ret;
958
959	switch (type) {
960	case RES_QP:
961		ret = alloc_qp_tr(id);
962		break;
963	case RES_MPT:
964		ret = alloc_mpt_tr(id, extra);
965		break;
966	case RES_MTT:
967		ret = alloc_mtt_tr(id, extra);
968		break;
969	case RES_EQ:
970		ret = alloc_eq_tr(id);
971		break;
972	case RES_CQ:
973		ret = alloc_cq_tr(id);
974		break;
975	case RES_SRQ:
976		ret = alloc_srq_tr(id);
977		break;
978	case RES_MAC:
979		printk(KERN_ERR "implementation missing\n");
980		return NULL;
981	case RES_COUNTER:
982		ret = alloc_counter_tr(id);
983		break;
984	case RES_XRCD:
985		ret = alloc_xrcdn_tr(id);
986		break;
987	case RES_FS_RULE:
988		ret = alloc_fs_rule_tr(id, extra);
989		break;
990	default:
991		return NULL;
992	}
993	if (ret)
994		ret->owner = slave;
995
996	return ret;
997}
998
999static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1000			 enum mlx4_resource type, int extra)
1001{
1002	int i;
1003	int err;
1004	struct mlx4_priv *priv = mlx4_priv(dev);
1005	struct res_common **res_arr;
1006	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1007	struct rb_root *root = &tracker->res_tree[type];
1008
1009	res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1010	if (!res_arr)
1011		return -ENOMEM;
1012
1013	for (i = 0; i < count; ++i) {
1014		res_arr[i] = alloc_tr(base + i, type, slave, extra);
1015		if (!res_arr[i]) {
1016			for (--i; i >= 0; --i)
1017				kfree(res_arr[i]);
1018
1019			kfree(res_arr);
1020			return -ENOMEM;
1021		}
1022	}
1023
1024	spin_lock_irq(mlx4_tlock(dev));
1025	for (i = 0; i < count; ++i) {
1026		if (find_res(dev, base + i, type)) {
1027			err = -EEXIST;
1028			goto undo;
1029		}
1030		err = res_tracker_insert(root, res_arr[i]);
1031		if (err)
1032			goto undo;
1033		list_add_tail(&res_arr[i]->list,
1034			      &tracker->slave_list[slave].res_list[type]);
1035	}
1036	spin_unlock_irq(mlx4_tlock(dev));
1037	kfree(res_arr);
1038
1039	return 0;
1040
1041undo:
1042	for (--i; i >= 0; --i) {
1043		rb_erase(&res_arr[i]->node, root);
1044		list_del_init(&res_arr[i]->list);
1045	}
1046
1047	spin_unlock_irq(mlx4_tlock(dev));
1048
1049	for (i = 0; i < count; ++i)
1050		kfree(res_arr[i]);
1051
1052	kfree(res_arr);
1053
1054	return err;
1055}
1056
1057static int remove_qp_ok(struct res_qp *res)
1058{
1059	if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1060	    !list_empty(&res->mcg_list)) {
1061		pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1062		       res->com.state, atomic_read(&res->ref_count));
1063		return -EBUSY;
1064	} else if (res->com.state != RES_QP_RESERVED) {
1065		return -EPERM;
1066	}
1067
1068	return 0;
1069}
1070
1071static int remove_mtt_ok(struct res_mtt *res, int order)
1072{
1073	if (res->com.state == RES_MTT_BUSY ||
1074	    atomic_read(&res->ref_count)) {
1075		printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
1076		       __func__, __LINE__,
1077		       mtt_states_str(res->com.state),
1078		       atomic_read(&res->ref_count));
1079		return -EBUSY;
1080	} else if (res->com.state != RES_MTT_ALLOCATED)
1081		return -EPERM;
1082	else if (res->order != order)
1083		return -EINVAL;
1084
1085	return 0;
1086}
1087
1088static int remove_mpt_ok(struct res_mpt *res)
1089{
1090	if (res->com.state == RES_MPT_BUSY)
1091		return -EBUSY;
1092	else if (res->com.state != RES_MPT_RESERVED)
1093		return -EPERM;
1094
1095	return 0;
1096}
1097
1098static int remove_eq_ok(struct res_eq *res)
1099{
1100	if (res->com.state == RES_MPT_BUSY)
1101		return -EBUSY;
1102	else if (res->com.state != RES_MPT_RESERVED)
1103		return -EPERM;
1104
1105	return 0;
1106}
1107
1108static int remove_counter_ok(struct res_counter *res)
1109{
1110	if (res->com.state == RES_COUNTER_BUSY)
1111		return -EBUSY;
1112	else if (res->com.state != RES_COUNTER_ALLOCATED)
1113		return -EPERM;
1114
1115	return 0;
1116}
1117
1118static int remove_xrcdn_ok(struct res_xrcdn *res)
1119{
1120	if (res->com.state == RES_XRCD_BUSY)
1121		return -EBUSY;
1122	else if (res->com.state != RES_XRCD_ALLOCATED)
1123		return -EPERM;
1124
1125	return 0;
1126}
1127
1128static int remove_fs_rule_ok(struct res_fs_rule *res)
1129{
1130	if (res->com.state == RES_FS_RULE_BUSY)
1131		return -EBUSY;
1132	else if (res->com.state != RES_FS_RULE_ALLOCATED)
1133		return -EPERM;
1134
1135	return 0;
1136}
1137
1138static int remove_cq_ok(struct res_cq *res)
1139{
1140	if (res->com.state == RES_CQ_BUSY)
1141		return -EBUSY;
1142	else if (res->com.state != RES_CQ_ALLOCATED)
1143		return -EPERM;
1144
1145	return 0;
1146}
1147
1148static int remove_srq_ok(struct res_srq *res)
1149{
1150	if (res->com.state == RES_SRQ_BUSY)
1151		return -EBUSY;
1152	else if (res->com.state != RES_SRQ_ALLOCATED)
1153		return -EPERM;
1154
1155	return 0;
1156}
1157
1158static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1159{
1160	switch (type) {
1161	case RES_QP:
1162		return remove_qp_ok((struct res_qp *)res);
1163	case RES_CQ:
1164		return remove_cq_ok((struct res_cq *)res);
1165	case RES_SRQ:
1166		return remove_srq_ok((struct res_srq *)res);
1167	case RES_MPT:
1168		return remove_mpt_ok((struct res_mpt *)res);
1169	case RES_MTT:
1170		return remove_mtt_ok((struct res_mtt *)res, extra);
1171	case RES_MAC:
1172		return -ENOSYS;
1173	case RES_EQ:
1174		return remove_eq_ok((struct res_eq *)res);
1175	case RES_COUNTER:
1176		return remove_counter_ok((struct res_counter *)res);
1177	case RES_XRCD:
1178		return remove_xrcdn_ok((struct res_xrcdn *)res);
1179	case RES_FS_RULE:
1180		return remove_fs_rule_ok((struct res_fs_rule *)res);
1181	default:
1182		return -EINVAL;
1183	}
1184}
1185
1186static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1187			 enum mlx4_resource type, int extra)
1188{
1189	u64 i;
1190	int err;
1191	struct mlx4_priv *priv = mlx4_priv(dev);
1192	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1193	struct res_common *r;
1194
1195	spin_lock_irq(mlx4_tlock(dev));
1196	for (i = base; i < base + count; ++i) {
1197		r = res_tracker_lookup(&tracker->res_tree[type], i);
1198		if (!r) {
1199			err = -ENOENT;
1200			goto out;
1201		}
1202		if (r->owner != slave) {
1203			err = -EPERM;
1204			goto out;
1205		}
1206		err = remove_ok(r, type, extra);
1207		if (err)
1208			goto out;
1209	}
1210
1211	for (i = base; i < base + count; ++i) {
1212		r = res_tracker_lookup(&tracker->res_tree[type], i);
1213		rb_erase(&r->node, &tracker->res_tree[type]);
1214		list_del(&r->list);
1215		kfree(r);
1216	}
1217	err = 0;
1218
1219out:
1220	spin_unlock_irq(mlx4_tlock(dev));
1221
1222	return err;
1223}
1224
1225static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1226				enum res_qp_states state, struct res_qp **qp,
1227				int alloc)
1228{
1229	struct mlx4_priv *priv = mlx4_priv(dev);
1230	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1231	struct res_qp *r;
1232	int err = 0;
1233
1234	spin_lock_irq(mlx4_tlock(dev));
1235	r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1236	if (!r)
1237		err = -ENOENT;
1238	else if (r->com.owner != slave)
1239		err = -EPERM;
1240	else {
1241		switch (state) {
1242		case RES_QP_BUSY:
1243			mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1244				 __func__, (unsigned long long)r->com.res_id);
1245			err = -EBUSY;
1246			break;
1247
1248		case RES_QP_RESERVED:
1249			if (r->com.state == RES_QP_MAPPED && !alloc)
1250				break;
1251
1252			mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", (unsigned long long)r->com.res_id);
1253			err = -EINVAL;
1254			break;
1255
1256		case RES_QP_MAPPED:
1257			if ((r->com.state == RES_QP_RESERVED && alloc) ||
1258			    r->com.state == RES_QP_HW)
1259				break;
1260			else {
1261				mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1262					  (unsigned long long)r->com.res_id);
1263				err = -EINVAL;
1264			}
1265
1266			break;
1267
1268		case RES_QP_HW:
1269			if (r->com.state != RES_QP_MAPPED)
1270				err = -EINVAL;
1271			break;
1272		default:
1273			err = -EINVAL;
1274		}
1275
1276		if (!err) {
1277			r->com.from_state = r->com.state;
1278			r->com.to_state = state;
1279			r->com.state = RES_QP_BUSY;
1280			if (qp)
1281				*qp = r;
1282		}
1283	}
1284
1285	spin_unlock_irq(mlx4_tlock(dev));
1286
1287	return err;
1288}
1289
1290static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1291				enum res_mpt_states state, struct res_mpt **mpt)
1292{
1293	struct mlx4_priv *priv = mlx4_priv(dev);
1294	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1295	struct res_mpt *r;
1296	int err = 0;
1297
1298	spin_lock_irq(mlx4_tlock(dev));
1299	r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1300	if (!r)
1301		err = -ENOENT;
1302	else if (r->com.owner != slave)
1303		err = -EPERM;
1304	else {
1305		switch (state) {
1306		case RES_MPT_BUSY:
1307			err = -EINVAL;
1308			break;
1309
1310		case RES_MPT_RESERVED:
1311			if (r->com.state != RES_MPT_MAPPED)
1312				err = -EINVAL;
1313			break;
1314
1315		case RES_MPT_MAPPED:
1316			if (r->com.state != RES_MPT_RESERVED &&
1317			    r->com.state != RES_MPT_HW)
1318				err = -EINVAL;
1319			break;
1320
1321		case RES_MPT_HW:
1322			if (r->com.state != RES_MPT_MAPPED)
1323				err = -EINVAL;
1324			break;
1325		default:
1326			err = -EINVAL;
1327		}
1328
1329		if (!err) {
1330			r->com.from_state = r->com.state;
1331			r->com.to_state = state;
1332			r->com.state = RES_MPT_BUSY;
1333			if (mpt)
1334				*mpt = r;
1335		}
1336	}
1337
1338	spin_unlock_irq(mlx4_tlock(dev));
1339
1340	return err;
1341}
1342
1343static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1344				enum res_eq_states state, struct res_eq **eq)
1345{
1346	struct mlx4_priv *priv = mlx4_priv(dev);
1347	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1348	struct res_eq *r;
1349	int err = 0;
1350
1351	spin_lock_irq(mlx4_tlock(dev));
1352	r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1353	if (!r)
1354		err = -ENOENT;
1355	else if (r->com.owner != slave)
1356		err = -EPERM;
1357	else {
1358		switch (state) {
1359		case RES_EQ_BUSY:
1360			err = -EINVAL;
1361			break;
1362
1363		case RES_EQ_RESERVED:
1364			if (r->com.state != RES_EQ_HW)
1365				err = -EINVAL;
1366			break;
1367
1368		case RES_EQ_HW:
1369			if (r->com.state != RES_EQ_RESERVED)
1370				err = -EINVAL;
1371			break;
1372
1373		default:
1374			err = -EINVAL;
1375		}
1376
1377		if (!err) {
1378			r->com.from_state = r->com.state;
1379			r->com.to_state = state;
1380			r->com.state = RES_EQ_BUSY;
1381			if (eq)
1382				*eq = r;
1383		}
1384	}
1385
1386	spin_unlock_irq(mlx4_tlock(dev));
1387
1388	return err;
1389}
1390
1391static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1392				enum res_cq_states state, struct res_cq **cq)
1393{
1394	struct mlx4_priv *priv = mlx4_priv(dev);
1395	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1396	struct res_cq *r;
1397	int err;
1398
1399	spin_lock_irq(mlx4_tlock(dev));
1400	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1401	if (!r)
1402		err = -ENOENT;
1403	else if (r->com.owner != slave)
1404		err = -EPERM;
1405	else {
1406		switch (state) {
1407		case RES_CQ_BUSY:
1408			err = -EBUSY;
1409			break;
1410
1411		case RES_CQ_ALLOCATED:
1412			if (r->com.state != RES_CQ_HW)
1413				err = -EINVAL;
1414			else if (atomic_read(&r->ref_count))
1415				err = -EBUSY;
1416			else
1417				err = 0;
1418			break;
1419
1420		case RES_CQ_HW:
1421			if (r->com.state != RES_CQ_ALLOCATED)
1422				err = -EINVAL;
1423			else
1424				err = 0;
1425			break;
1426
1427		default:
1428			err = -EINVAL;
1429		}
1430
1431		if (!err) {
1432			r->com.from_state = r->com.state;
1433			r->com.to_state = state;
1434			r->com.state = RES_CQ_BUSY;
1435			if (cq)
1436				*cq = r;
1437		}
1438	}
1439
1440	spin_unlock_irq(mlx4_tlock(dev));
1441
1442	return err;
1443}
1444
1445static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1446				 enum res_srq_states state, struct res_srq **srq)
1447{
1448	struct mlx4_priv *priv = mlx4_priv(dev);
1449	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1450	struct res_srq *r;
1451	int err = 0;
1452
1453	spin_lock_irq(mlx4_tlock(dev));
1454	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1455	if (!r)
1456		err = -ENOENT;
1457	else if (r->com.owner != slave)
1458		err = -EPERM;
1459	else {
1460		switch (state) {
1461		case RES_SRQ_BUSY:
1462			err = -EINVAL;
1463			break;
1464
1465		case RES_SRQ_ALLOCATED:
1466			if (r->com.state != RES_SRQ_HW)
1467				err = -EINVAL;
1468			else if (atomic_read(&r->ref_count))
1469				err = -EBUSY;
1470			break;
1471
1472		case RES_SRQ_HW:
1473			if (r->com.state != RES_SRQ_ALLOCATED)
1474				err = -EINVAL;
1475			break;
1476
1477		default:
1478			err = -EINVAL;
1479		}
1480
1481		if (!err) {
1482			r->com.from_state = r->com.state;
1483			r->com.to_state = state;
1484			r->com.state = RES_SRQ_BUSY;
1485			if (srq)
1486				*srq = r;
1487		}
1488	}
1489
1490	spin_unlock_irq(mlx4_tlock(dev));
1491
1492	return err;
1493}
1494
1495static void res_abort_move(struct mlx4_dev *dev, int slave,
1496			   enum mlx4_resource type, int id)
1497{
1498	struct mlx4_priv *priv = mlx4_priv(dev);
1499	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1500	struct res_common *r;
1501
1502	spin_lock_irq(mlx4_tlock(dev));
1503	r = res_tracker_lookup(&tracker->res_tree[type], id);
1504	if (r && (r->owner == slave))
1505		r->state = r->from_state;
1506	spin_unlock_irq(mlx4_tlock(dev));
1507}
1508
1509static void res_end_move(struct mlx4_dev *dev, int slave,
1510			 enum mlx4_resource type, int id)
1511{
1512	struct mlx4_priv *priv = mlx4_priv(dev);
1513	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1514	struct res_common *r;
1515
1516	spin_lock_irq(mlx4_tlock(dev));
1517	r = res_tracker_lookup(&tracker->res_tree[type], id);
1518	if (r && (r->owner == slave))
1519		r->state = r->to_state;
1520	spin_unlock_irq(mlx4_tlock(dev));
1521}
1522
1523static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1524{
1525	return mlx4_is_qp_reserved(dev, qpn) &&
1526		(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1527}
1528
1529static int fw_reserved(struct mlx4_dev *dev, int qpn)
1530{
1531	return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1532}
1533
1534static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1535			u64 in_param, u64 *out_param)
1536{
1537	int err;
1538	int count;
1539	int align;
1540	int base;
1541	int qpn;
1542	u8 flags;
1543
1544	switch (op) {
1545	case RES_OP_RESERVE:
1546		count = get_param_l(&in_param) & 0xffffff;
1547		flags = get_param_l(&in_param) >> 24;
1548		align = get_param_h(&in_param);
1549		err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1550		if (err)
1551			return err;
1552
1553		err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1554		if (err) {
1555			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1556			return err;
1557		}
1558
1559		err = add_res_range(dev, slave, base, count, RES_QP, 0);
1560		if (err) {
1561			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1562			__mlx4_qp_release_range(dev, base, count);
1563			return err;
1564		}
1565		set_param_l(out_param, base);
1566		break;
1567	case RES_OP_MAP_ICM:
1568		qpn = get_param_l(&in_param) & 0x7fffff;
1569		if (valid_reserved(dev, slave, qpn)) {
1570			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1571			if (err)
1572				return err;
1573		}
1574
1575		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1576					   NULL, 1);
1577		if (err)
1578			return err;
1579
1580		if (!fw_reserved(dev, qpn)) {
1581			err = __mlx4_qp_alloc_icm(dev, qpn);
1582			if (err) {
1583				res_abort_move(dev, slave, RES_QP, qpn);
1584				return err;
1585			}
1586		}
1587
1588		res_end_move(dev, slave, RES_QP, qpn);
1589		break;
1590
1591	default:
1592		err = -EINVAL;
1593		break;
1594	}
1595	return err;
1596}
1597
1598static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1599			 u64 in_param, u64 *out_param)
1600{
1601	int err = -EINVAL;
1602	int base;
1603	int order;
1604
1605	if (op != RES_OP_RESERVE_AND_MAP)
1606		return err;
1607
1608	order = get_param_l(&in_param);
1609
1610	err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1611	if (err)
1612		return err;
1613
1614	base = __mlx4_alloc_mtt_range(dev, order);
1615	if (base == -1) {
1616		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1617		return -ENOMEM;
1618	}
1619
1620	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1621	if (err) {
1622		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1623		__mlx4_free_mtt_range(dev, base, order);
1624	} else
1625		set_param_l(out_param, base);
1626
1627	return err;
1628}
1629
1630static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1631			 u64 in_param, u64 *out_param)
1632{
1633	int err = -EINVAL;
1634	int index;
1635	int id;
1636	struct res_mpt *mpt;
1637
1638	switch (op) {
1639	case RES_OP_RESERVE:
1640		err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1641		if (err)
1642			break;
1643
1644		index = __mlx4_mpt_reserve(dev);
1645		if (index == -1) {
1646			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1647			break;
1648		}
1649		id = index & mpt_mask(dev);
1650
1651		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1652		if (err) {
1653			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1654			__mlx4_mpt_release(dev, index);
1655			break;
1656		}
1657		set_param_l(out_param, index);
1658		break;
1659	case RES_OP_MAP_ICM:
1660		index = get_param_l(&in_param);
1661		id = index & mpt_mask(dev);
1662		err = mr_res_start_move_to(dev, slave, id,
1663					   RES_MPT_MAPPED, &mpt);
1664		if (err)
1665			return err;
1666
1667		err = __mlx4_mpt_alloc_icm(dev, mpt->key);
1668		if (err) {
1669			res_abort_move(dev, slave, RES_MPT, id);
1670			return err;
1671		}
1672
1673		res_end_move(dev, slave, RES_MPT, id);
1674		break;
1675	}
1676	return err;
1677}
1678
1679static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1680			u64 in_param, u64 *out_param)
1681{
1682	int cqn;
1683	int err;
1684
1685	switch (op) {
1686	case RES_OP_RESERVE_AND_MAP:
1687		err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1688		if (err)
1689			break;
1690
1691		err = __mlx4_cq_alloc_icm(dev, &cqn);
1692		if (err) {
1693			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1694			break;
1695		}
1696
1697		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1698		if (err) {
1699			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1700			__mlx4_cq_free_icm(dev, cqn);
1701			break;
1702		}
1703
1704		set_param_l(out_param, cqn);
1705		break;
1706
1707	default:
1708		err = -EINVAL;
1709	}
1710
1711	return err;
1712}
1713
1714static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1715			 u64 in_param, u64 *out_param)
1716{
1717	int srqn;
1718	int err;
1719
1720	switch (op) {
1721	case RES_OP_RESERVE_AND_MAP:
1722		err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1723		if (err)
1724			break;
1725
1726		err = __mlx4_srq_alloc_icm(dev, &srqn);
1727		if (err) {
1728			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1729			break;
1730		}
1731
1732		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1733		if (err) {
1734			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1735			__mlx4_srq_free_icm(dev, srqn);
1736			break;
1737		}
1738
1739		set_param_l(out_param, srqn);
1740		break;
1741
1742	default:
1743		err = -EINVAL;
1744	}
1745
1746	return err;
1747}
1748
1749static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1750				     u8 smac_index, u64 *mac)
1751{
1752	struct mlx4_priv *priv = mlx4_priv(dev);
1753	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1754	struct list_head *mac_list =
1755		&tracker->slave_list[slave].res_list[RES_MAC];
1756	struct mac_res *res, *tmp;
1757
1758	list_for_each_entry_safe(res, tmp, mac_list, list) {
1759		if (res->smac_index == smac_index && res->port == (u8) port) {
1760			*mac = res->mac;
1761			return 0;
1762		}
1763	}
1764	return -ENOENT;
1765}
1766
1767static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1768{
1769	struct mlx4_priv *priv = mlx4_priv(dev);
1770	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1771	struct list_head *mac_list =
1772		&tracker->slave_list[slave].res_list[RES_MAC];
1773	struct mac_res *res, *tmp;
1774
1775	list_for_each_entry_safe(res, tmp, mac_list, list) {
1776		if (res->mac == mac && res->port == (u8) port) {
1777			/* mac found. update ref count */
1778			++res->ref_count;
1779			return 0;
1780		}
1781	}
1782
1783	if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1784		return -EINVAL;
1785	res = kzalloc(sizeof *res, GFP_KERNEL);
1786	if (!res) {
1787		mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1788		return -ENOMEM;
1789	}
1790	res->mac = mac;
1791	res->port = (u8) port;
1792	res->smac_index = smac_index;
1793	res->ref_count = 1;
1794	list_add_tail(&res->list,
1795		      &tracker->slave_list[slave].res_list[RES_MAC]);
1796	return 0;
1797}
1798
1799
1800static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1801			       int port)
1802{
1803	struct mlx4_priv *priv = mlx4_priv(dev);
1804	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1805	struct list_head *mac_list =
1806		&tracker->slave_list[slave].res_list[RES_MAC];
1807	struct mac_res *res, *tmp;
1808
1809	list_for_each_entry_safe(res, tmp, mac_list, list) {
1810		if (res->mac == mac && res->port == (u8) port) {
1811			if (!--res->ref_count) {
1812				list_del(&res->list);
1813				mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1814				kfree(res);
1815			}
1816			break;
1817		}
1818	}
1819}
1820
1821static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1822{
1823	struct mlx4_priv *priv = mlx4_priv(dev);
1824	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1825	struct list_head *mac_list =
1826		&tracker->slave_list[slave].res_list[RES_MAC];
1827	struct mac_res *res, *tmp;
1828	int i;
1829
1830	list_for_each_entry_safe(res, tmp, mac_list, list) {
1831		list_del(&res->list);
1832		/* dereference the mac the num times the slave referenced it */
1833		for (i = 0; i < res->ref_count; i++)
1834			__mlx4_unregister_mac(dev, res->port, res->mac);
1835		mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
1836		kfree(res);
1837	}
1838}
1839
1840static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1841			 u64 in_param, u64 *out_param, int in_port)
1842{
1843	int err = -EINVAL;
1844	int port;
1845	u64 mac;
1846	u8 smac_index = 0;
1847
1848	if (op != RES_OP_RESERVE_AND_MAP)
1849		return err;
1850
1851	port = !in_port ? get_param_l(out_param) : in_port;
1852	mac = in_param;
1853
1854	err = __mlx4_register_mac(dev, port, mac);
1855	if (err >= 0) {
1856		smac_index = err;
1857		set_param_l(out_param, err);
1858		err = 0;
1859	}
1860
1861	if (!err) {
1862		err = mac_add_to_slave(dev, slave, mac, port, smac_index);
1863		if (err)
1864			__mlx4_unregister_mac(dev, port, mac);
1865	}
1866	return err;
1867}
1868
1869static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1870			     int port, int vlan_index)
1871{
1872	struct mlx4_priv *priv = mlx4_priv(dev);
1873	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1874	struct list_head *vlan_list =
1875		&tracker->slave_list[slave].res_list[RES_VLAN];
1876	struct vlan_res *res, *tmp;
1877
1878	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1879		if (res->vlan == vlan && res->port == (u8) port) {
1880			/* vlan found. update ref count */
1881			++res->ref_count;
1882			return 0;
1883		}
1884	}
1885
1886	if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1887		return -EINVAL;
1888	res = kzalloc(sizeof(*res), GFP_KERNEL);
1889	if (!res) {
1890		mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
1891		return -ENOMEM;
1892	}
1893	res->vlan = vlan;
1894	res->port = (u8) port;
1895	res->vlan_index = vlan_index;
1896	res->ref_count = 1;
1897	list_add_tail(&res->list,
1898		      &tracker->slave_list[slave].res_list[RES_VLAN]);
1899	return 0;
1900}
1901
1902
1903static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1904				int port)
1905{
1906	struct mlx4_priv *priv = mlx4_priv(dev);
1907	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1908	struct list_head *vlan_list =
1909		&tracker->slave_list[slave].res_list[RES_VLAN];
1910	struct vlan_res *res, *tmp;
1911
1912	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1913		if (res->vlan == vlan && res->port == (u8) port) {
1914			if (!--res->ref_count) {
1915				list_del(&res->list);
1916				mlx4_release_resource(dev, slave, RES_VLAN,
1917						      1, port);
1918				kfree(res);
1919			}
1920			break;
1921		}
1922	}
1923}
1924
1925static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1926{
1927	struct mlx4_priv *priv = mlx4_priv(dev);
1928	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1929	struct list_head *vlan_list =
1930		&tracker->slave_list[slave].res_list[RES_VLAN];
1931	struct vlan_res *res, *tmp;
1932	int i;
1933
1934	list_for_each_entry_safe(res, tmp, vlan_list, list) {
1935		list_del(&res->list);
1936		/* dereference the vlan the num times the slave referenced it */
1937		for (i = 0; i < res->ref_count; i++)
1938			__mlx4_unregister_vlan(dev, res->port, res->vlan);
1939		mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
1940		kfree(res);
1941	}
1942}
1943
1944static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1945			  u64 in_param, u64 *out_param, int in_port)
1946{
1947	struct mlx4_priv *priv = mlx4_priv(dev);
1948	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1949	int err = -EINVAL;
1950	u16 vlan;
1951	int vlan_index;
1952	int port;
1953
1954	port = !in_port ? get_param_l(out_param) : in_port;
1955
1956	if (!port)
1957		return err;
1958
1959	if (op != RES_OP_RESERVE_AND_MAP)
1960		return err;
1961
1962	/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
1963	if (!in_port && port > 0 && port <= dev->caps.num_ports) {
1964		slave_state[slave].old_vlan_api = true;
1965		return 0;
1966	}
1967
1968	vlan = (u16) in_param;
1969
1970	err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
1971	if (!err) {
1972		set_param_l(out_param, (u32) vlan_index);
1973		err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
1974		if (err)
1975			__mlx4_unregister_vlan(dev, port, vlan);
1976	}
1977	return err;
1978}
1979
1980static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1981			     u64 in_param, u64 *out_param, int port)
1982{
1983	u32 index;
1984	int err;
1985
1986	if (op != RES_OP_RESERVE)
1987		return -EINVAL;
1988
1989	err = __mlx4_counter_alloc(dev, slave, port, &index);
1990	if (!err)
1991		set_param_l(out_param, index);
1992
1993	return err;
1994}
1995
1996static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1997			   u64 in_param, u64 *out_param)
1998{
1999	u32 xrcdn;
2000	int err;
2001
2002	if (op != RES_OP_RESERVE)
2003		return -EINVAL;
2004
2005	err = __mlx4_xrcd_alloc(dev, &xrcdn);
2006	if (err)
2007		return err;
2008
2009	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2010	if (err)
2011		__mlx4_xrcd_free(dev, xrcdn);
2012	else
2013		set_param_l(out_param, xrcdn);
2014
2015	return err;
2016}
2017
2018int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2019			   struct mlx4_vhcr *vhcr,
2020			   struct mlx4_cmd_mailbox *inbox,
2021			   struct mlx4_cmd_mailbox *outbox,
2022			   struct mlx4_cmd_info *cmd)
2023{
2024	int err;
2025	int alop = vhcr->op_modifier;
2026
2027	switch (vhcr->in_modifier & 0xFF) {
2028	case RES_QP:
2029		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2030				   vhcr->in_param, &vhcr->out_param);
2031		break;
2032
2033	case RES_MTT:
2034		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2035				    vhcr->in_param, &vhcr->out_param);
2036		break;
2037
2038	case RES_MPT:
2039		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2040				    vhcr->in_param, &vhcr->out_param);
2041		break;
2042
2043	case RES_CQ:
2044		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2045				   vhcr->in_param, &vhcr->out_param);
2046		break;
2047
2048	case RES_SRQ:
2049		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2050				    vhcr->in_param, &vhcr->out_param);
2051		break;
2052
2053	case RES_MAC:
2054		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2055				    vhcr->in_param, &vhcr->out_param,
2056				    (vhcr->in_modifier >> 8) & 0xFF);
2057		break;
2058
2059	case RES_VLAN:
2060		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2061				     vhcr->in_param, &vhcr->out_param,
2062				     (vhcr->in_modifier >> 8) & 0xFF);
2063		break;
2064
2065	case RES_COUNTER:
2066		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2067					vhcr->in_param, &vhcr->out_param,
2068					(vhcr->in_modifier >> 8) & 0xFF);
2069		break;
2070
2071	case RES_XRCD:
2072		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2073				      vhcr->in_param, &vhcr->out_param);
2074		break;
2075
2076	default:
2077		err = -EINVAL;
2078		break;
2079	}
2080
2081	return err;
2082}
2083
2084static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2085		       u64 in_param)
2086{
2087	int err;
2088	int count;
2089	int base;
2090	int qpn;
2091
2092	switch (op) {
2093	case RES_OP_RESERVE:
2094		base = get_param_l(&in_param) & 0x7fffff;
2095		count = get_param_h(&in_param);
2096		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2097		if (err)
2098			break;
2099		mlx4_release_resource(dev, slave, RES_QP, count, 0);
2100		__mlx4_qp_release_range(dev, base, count);
2101		break;
2102	case RES_OP_MAP_ICM:
2103		qpn = get_param_l(&in_param) & 0x7fffff;
2104		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2105					   NULL, 0);
2106		if (err)
2107			return err;
2108
2109		if (!fw_reserved(dev, qpn))
2110			__mlx4_qp_free_icm(dev, qpn);
2111
2112		res_end_move(dev, slave, RES_QP, qpn);
2113
2114		if (valid_reserved(dev, slave, qpn))
2115			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2116		break;
2117	default:
2118		err = -EINVAL;
2119		break;
2120	}
2121	return err;
2122}
2123
2124static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2125			u64 in_param, u64 *out_param)
2126{
2127	int err = -EINVAL;
2128	int base;
2129	int order;
2130
2131	if (op != RES_OP_RESERVE_AND_MAP)
2132		return err;
2133
2134	base = get_param_l(&in_param);
2135	order = get_param_h(&in_param);
2136	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2137	if (!err) {
2138		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2139		__mlx4_free_mtt_range(dev, base, order);
2140	}
2141	return err;
2142}
2143
2144static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2145			u64 in_param)
2146{
2147	int err = -EINVAL;
2148	int index;
2149	int id;
2150	struct res_mpt *mpt;
2151
2152	switch (op) {
2153	case RES_OP_RESERVE:
2154		index = get_param_l(&in_param);
2155		id = index & mpt_mask(dev);
2156		err = get_res(dev, slave, id, RES_MPT, &mpt);
2157		if (err)
2158			break;
2159		index = mpt->key;
2160		put_res(dev, slave, id, RES_MPT);
2161
2162		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2163		if (err)
2164			break;
2165		mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2166		__mlx4_mpt_release(dev, index);
2167		break;
2168	case RES_OP_MAP_ICM:
2169			index = get_param_l(&in_param);
2170			id = index & mpt_mask(dev);
2171			err = mr_res_start_move_to(dev, slave, id,
2172						   RES_MPT_RESERVED, &mpt);
2173			if (err)
2174				return err;
2175
2176			__mlx4_mpt_free_icm(dev, mpt->key);
2177			res_end_move(dev, slave, RES_MPT, id);
2178			return err;
2179		break;
2180	default:
2181		err = -EINVAL;
2182		break;
2183	}
2184	return err;
2185}
2186
2187static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2188		       u64 in_param, u64 *out_param)
2189{
2190	int cqn;
2191	int err;
2192
2193	switch (op) {
2194	case RES_OP_RESERVE_AND_MAP:
2195		cqn = get_param_l(&in_param);
2196		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2197		if (err)
2198			break;
2199
2200		mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2201		__mlx4_cq_free_icm(dev, cqn);
2202		break;
2203
2204	default:
2205		err = -EINVAL;
2206		break;
2207	}
2208
2209	return err;
2210}
2211
2212static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2213			u64 in_param, u64 *out_param)
2214{
2215	int srqn;
2216	int err;
2217
2218	switch (op) {
2219	case RES_OP_RESERVE_AND_MAP:
2220		srqn = get_param_l(&in_param);
2221		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2222		if (err)
2223			break;
2224
2225		mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2226		__mlx4_srq_free_icm(dev, srqn);
2227		break;
2228
2229	default:
2230		err = -EINVAL;
2231		break;
2232	}
2233
2234	return err;
2235}
2236
2237static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2238			    u64 in_param, u64 *out_param, int in_port)
2239{
2240	int port;
2241	int err = 0;
2242
2243	switch (op) {
2244	case RES_OP_RESERVE_AND_MAP:
2245		port = !in_port ? get_param_l(out_param) : in_port;
2246		mac_del_from_slave(dev, slave, in_param, port);
2247		__mlx4_unregister_mac(dev, port, in_param);
2248		break;
2249	default:
2250		err = -EINVAL;
2251		break;
2252	}
2253
2254	return err;
2255
2256}
2257
2258static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2259			    u64 in_param, u64 *out_param, int port)
2260{
2261	struct mlx4_priv *priv = mlx4_priv(dev);
2262	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2263	int err = 0;
2264
2265	switch (op) {
2266	case RES_OP_RESERVE_AND_MAP:
2267		if (slave_state[slave].old_vlan_api == true)
2268			return 0;
2269		if (!port)
2270			return -EINVAL;
2271		vlan_del_from_slave(dev, slave, in_param, port);
2272		__mlx4_unregister_vlan(dev, port, in_param);
2273		break;
2274	default:
2275		err = -EINVAL;
2276		break;
2277	}
2278
2279	return err;
2280}
2281
2282static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2283			    u64 in_param, u64 *out_param, int port)
2284{
2285	int index;
2286
2287	if (op != RES_OP_RESERVE)
2288		return -EINVAL;
2289
2290	index = get_param_l(&in_param);
2291
2292	__mlx4_counter_free(dev, slave, port, index);
2293
2294	return 0;
2295}
2296
2297static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2298			  u64 in_param, u64 *out_param)
2299{
2300	int xrcdn;
2301	int err;
2302
2303	if (op != RES_OP_RESERVE)
2304		return -EINVAL;
2305
2306	xrcdn = get_param_l(&in_param);
2307	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2308	if (err)
2309		return err;
2310
2311	__mlx4_xrcd_free(dev, xrcdn);
2312
2313	return err;
2314}
2315
2316int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2317			  struct mlx4_vhcr *vhcr,
2318			  struct mlx4_cmd_mailbox *inbox,
2319			  struct mlx4_cmd_mailbox *outbox,
2320			  struct mlx4_cmd_info *cmd)
2321{
2322	int err = -EINVAL;
2323	int alop = vhcr->op_modifier;
2324
2325	switch (vhcr->in_modifier & 0xFF) {
2326	case RES_QP:
2327		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2328				  vhcr->in_param);
2329		break;
2330
2331	case RES_MTT:
2332		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2333				   vhcr->in_param, &vhcr->out_param);
2334		break;
2335
2336	case RES_MPT:
2337		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2338				   vhcr->in_param);
2339		break;
2340
2341	case RES_CQ:
2342		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2343				  vhcr->in_param, &vhcr->out_param);
2344		break;
2345
2346	case RES_SRQ:
2347		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2348				   vhcr->in_param, &vhcr->out_param);
2349		break;
2350
2351	case RES_MAC:
2352		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2353				   vhcr->in_param, &vhcr->out_param,
2354				   (vhcr->in_modifier >> 8) & 0xFF);
2355		break;
2356
2357	case RES_VLAN:
2358		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2359				    vhcr->in_param, &vhcr->out_param,
2360				    (vhcr->in_modifier >> 8) & 0xFF);
2361		break;
2362
2363	case RES_COUNTER:
2364		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2365				       vhcr->in_param, &vhcr->out_param,
2366				       (vhcr->in_modifier >> 8) & 0xFF);
2367		break;
2368
2369	case RES_XRCD:
2370		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2371				     vhcr->in_param, &vhcr->out_param);
2372
2373	default:
2374		break;
2375	}
2376	return err;
2377}
2378
2379/* ugly but other choices are uglier */
2380static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2381{
2382	return (be32_to_cpu(mpt->flags) >> 9) & 1;
2383}
2384
2385static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2386{
2387	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2388}
2389
2390static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2391{
2392	return be32_to_cpu(mpt->mtt_sz);
2393}
2394
2395static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2396{
2397	return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2398}
2399
2400static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2401{
2402	return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2403}
2404
2405static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2406{
2407	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2408}
2409
2410static int mr_is_region(struct mlx4_mpt_entry *mpt)
2411{
2412	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2413}
2414
2415static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2416{
2417	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2418}
2419
2420static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2421{
2422	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2423}
2424
2425static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2426{
2427	int page_shift = (qpc->log_page_size & 0x3f) + 12;
2428	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2429	int log_sq_sride = qpc->sq_size_stride & 7;
2430	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2431	int log_rq_stride = qpc->rq_size_stride & 7;
2432	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2433	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2434	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2435	int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2436	int sq_size;
2437	int rq_size;
2438	int total_pages;
2439	int total_mem;
2440	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2441
2442	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2443	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2444	total_mem = sq_size + rq_size;
2445	total_pages =
2446		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2447				   page_shift);
2448
2449	return total_pages;
2450}
2451
2452static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2453			   int size, struct res_mtt *mtt)
2454{
2455	int res_start = mtt->com.res_id;
2456	int res_size = (1 << mtt->order);
2457
2458	if (start < res_start || start + size > res_start + res_size)
2459		return -EPERM;
2460	return 0;
2461}
2462
2463int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2464			   struct mlx4_vhcr *vhcr,
2465			   struct mlx4_cmd_mailbox *inbox,
2466			   struct mlx4_cmd_mailbox *outbox,
2467			   struct mlx4_cmd_info *cmd)
2468{
2469	int err;
2470	int index = vhcr->in_modifier;
2471	struct res_mtt *mtt;
2472	struct res_mpt *mpt;
2473	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2474	int phys;
2475	int id;
2476	u32 pd;
2477	int pd_slave;
2478
2479	id = index & mpt_mask(dev);
2480	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2481	if (err)
2482		return err;
2483
2484	/* Currently disable memory windows since this feature isn't tested yet
2485	* under virtualization.
2486	*/
2487	if (!mr_is_region(inbox->buf)) {
2488		err = -ENOSYS;
2489		goto ex_abort;
2490	}
2491
2492	/* Make sure that the PD bits related to the slave id are zeros. */
2493	pd = mr_get_pd(inbox->buf);
2494	pd_slave = (pd >> 17) & 0x7f;
2495	if (pd_slave != 0 && pd_slave != slave) {
2496		err = -EPERM;
2497		goto ex_abort;
2498	}
2499
2500	if (mr_is_fmr(inbox->buf)) {
2501		/* FMR and Bind Enable are forbidden in slave devices. */
2502		if (mr_is_bind_enabled(inbox->buf)) {
2503			err = -EPERM;
2504			goto ex_abort;
2505		}
2506		/* FMR and Memory Windows are also forbidden. */
2507		if (!mr_is_region(inbox->buf)) {
2508			err = -EPERM;
2509			goto ex_abort;
2510		}
2511	}
2512
2513	phys = mr_phys_mpt(inbox->buf);
2514	if (!phys) {
2515		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2516		if (err)
2517			goto ex_abort;
2518
2519		err = check_mtt_range(dev, slave, mtt_base,
2520				      mr_get_mtt_size(inbox->buf), mtt);
2521		if (err)
2522			goto ex_put;
2523
2524		mpt->mtt = mtt;
2525	}
2526
2527	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2528	if (err)
2529		goto ex_put;
2530
2531	if (!phys) {
2532		atomic_inc(&mtt->ref_count);
2533		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2534	}
2535
2536	res_end_move(dev, slave, RES_MPT, id);
2537	return 0;
2538
2539ex_put:
2540	if (!phys)
2541		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2542ex_abort:
2543	res_abort_move(dev, slave, RES_MPT, id);
2544
2545	return err;
2546}
2547
2548int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2549			   struct mlx4_vhcr *vhcr,
2550			   struct mlx4_cmd_mailbox *inbox,
2551			   struct mlx4_cmd_mailbox *outbox,
2552			   struct mlx4_cmd_info *cmd)
2553{
2554	int err;
2555	int index = vhcr->in_modifier;
2556	struct res_mpt *mpt;
2557	int id;
2558
2559	id = index & mpt_mask(dev);
2560	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2561	if (err)
2562		return err;
2563
2564	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2565	if (err)
2566		goto ex_abort;
2567
2568	if (mpt->mtt)
2569		atomic_dec(&mpt->mtt->ref_count);
2570
2571	res_end_move(dev, slave, RES_MPT, id);
2572	return 0;
2573
2574ex_abort:
2575	res_abort_move(dev, slave, RES_MPT, id);
2576
2577	return err;
2578}
2579
2580int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2581			   struct mlx4_vhcr *vhcr,
2582			   struct mlx4_cmd_mailbox *inbox,
2583			   struct mlx4_cmd_mailbox *outbox,
2584			   struct mlx4_cmd_info *cmd)
2585{
2586	int err;
2587	int index = vhcr->in_modifier;
2588	struct res_mpt *mpt;
2589	int id;
2590
2591	id = index & mpt_mask(dev);
2592	err = get_res(dev, slave, id, RES_MPT, &mpt);
2593	if (err)
2594		return err;
2595
2596	if (mpt->com.from_state != RES_MPT_HW) {
2597		err = -EBUSY;
2598		goto out;
2599	}
2600
2601	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2602
2603out:
2604	put_res(dev, slave, id, RES_MPT);
2605	return err;
2606}
2607
2608static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2609{
2610	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2611}
2612
2613static int qp_get_scqn(struct mlx4_qp_context *qpc)
2614{
2615	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2616}
2617
2618static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2619{
2620	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2621}
2622
2623static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2624				  struct mlx4_qp_context *context)
2625{
2626	u32 qpn = vhcr->in_modifier & 0xffffff;
2627	u32 qkey = 0;
2628
2629	if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2630		return;
2631
2632	/* adjust qkey in qp context */
2633	context->qkey = cpu_to_be32(qkey);
2634}
2635
2636int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2637			     struct mlx4_vhcr *vhcr,
2638			     struct mlx4_cmd_mailbox *inbox,
2639			     struct mlx4_cmd_mailbox *outbox,
2640			     struct mlx4_cmd_info *cmd)
2641{
2642	int err;
2643	int qpn = vhcr->in_modifier & 0x7fffff;
2644	struct res_mtt *mtt;
2645	struct res_qp *qp;
2646	struct mlx4_qp_context *qpc = inbox->buf + 8;
2647	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2648	int mtt_size = qp_get_mtt_size(qpc);
2649	struct res_cq *rcq;
2650	struct res_cq *scq;
2651	int rcqn = qp_get_rcqn(qpc);
2652	int scqn = qp_get_scqn(qpc);
2653	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2654	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2655	struct res_srq *srq;
2656	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2657
2658	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2659	if (err)
2660		return err;
2661	qp->local_qpn = local_qpn;
2662	qp->sched_queue = 0;
2663	qp->param3 = 0;
2664	qp->vlan_control = 0;
2665	qp->fvl_rx = 0;
2666	qp->pri_path_fl = 0;
2667	qp->vlan_index = 0;
2668	qp->feup = 0;
2669	qp->qpc_flags = be32_to_cpu(qpc->flags);
2670
2671	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2672	if (err)
2673		goto ex_abort;
2674
2675	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2676	if (err)
2677		goto ex_put_mtt;
2678
2679	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2680	if (err)
2681		goto ex_put_mtt;
2682
2683	if (scqn != rcqn) {
2684		err = get_res(dev, slave, scqn, RES_CQ, &scq);
2685		if (err)
2686			goto ex_put_rcq;
2687	} else
2688		scq = rcq;
2689
2690	if (use_srq) {
2691		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2692		if (err)
2693			goto ex_put_scq;
2694	}
2695
2696	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2697	update_pkey_index(dev, slave, inbox);
2698	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2699	if (err)
2700		goto ex_put_srq;
2701	atomic_inc(&mtt->ref_count);
2702	qp->mtt = mtt;
2703	atomic_inc(&rcq->ref_count);
2704	qp->rcq = rcq;
2705	atomic_inc(&scq->ref_count);
2706	qp->scq = scq;
2707
2708	if (scqn != rcqn)
2709		put_res(dev, slave, scqn, RES_CQ);
2710
2711	if (use_srq) {
2712		atomic_inc(&srq->ref_count);
2713		put_res(dev, slave, srqn, RES_SRQ);
2714		qp->srq = srq;
2715	}
2716	put_res(dev, slave, rcqn, RES_CQ);
2717	put_res(dev, slave, mtt_base, RES_MTT);
2718	res_end_move(dev, slave, RES_QP, qpn);
2719
2720	return 0;
2721
2722ex_put_srq:
2723	if (use_srq)
2724		put_res(dev, slave, srqn, RES_SRQ);
2725ex_put_scq:
2726	if (scqn != rcqn)
2727		put_res(dev, slave, scqn, RES_CQ);
2728ex_put_rcq:
2729	put_res(dev, slave, rcqn, RES_CQ);
2730ex_put_mtt:
2731	put_res(dev, slave, mtt_base, RES_MTT);
2732ex_abort:
2733	res_abort_move(dev, slave, RES_QP, qpn);
2734
2735	return err;
2736}
2737
2738static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
2739{
2740	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2741}
2742
2743static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2744{
2745	int log_eq_size = eqc->log_eq_size & 0x1f;
2746	int page_shift = (eqc->log_page_size & 0x3f) + 12;
2747
2748	if (log_eq_size + 5 < page_shift)
2749		return 1;
2750
2751	return 1 << (log_eq_size + 5 - page_shift);
2752}
2753
2754static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
2755{
2756	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2757}
2758
2759static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2760{
2761	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2762	int page_shift = (cqc->log_page_size & 0x3f) + 12;
2763
2764	if (log_cq_size + 5 < page_shift)
2765		return 1;
2766
2767	return 1 << (log_cq_size + 5 - page_shift);
2768}
2769
2770int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2771			  struct mlx4_vhcr *vhcr,
2772			  struct mlx4_cmd_mailbox *inbox,
2773			  struct mlx4_cmd_mailbox *outbox,
2774			  struct mlx4_cmd_info *cmd)
2775{
2776	int err;
2777	int eqn = vhcr->in_modifier;
2778	int res_id = (slave << 8) | eqn;
2779	struct mlx4_eq_context *eqc = inbox->buf;
2780	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
2781	int mtt_size = eq_get_mtt_size(eqc);
2782	struct res_eq *eq;
2783	struct res_mtt *mtt;
2784
2785	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2786	if (err)
2787		return err;
2788	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2789	if (err)
2790		goto out_add;
2791
2792	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2793	if (err)
2794		goto out_move;
2795
2796	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2797	if (err)
2798		goto out_put;
2799
2800	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2801	if (err)
2802		goto out_put;
2803
2804	atomic_inc(&mtt->ref_count);
2805	eq->mtt = mtt;
2806	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2807	res_end_move(dev, slave, RES_EQ, res_id);
2808	return 0;
2809
2810out_put:
2811	put_res(dev, slave, mtt->com.res_id, RES_MTT);
2812out_move:
2813	res_abort_move(dev, slave, RES_EQ, res_id);
2814out_add:
2815	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2816	return err;
2817}
2818
2819static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2820			      int len, struct res_mtt **res)
2821{
2822	struct mlx4_priv *priv = mlx4_priv(dev);
2823	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2824	struct res_mtt *mtt;
2825	int err = -EINVAL;
2826
2827	spin_lock_irq(mlx4_tlock(dev));
2828	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2829			    com.list) {
2830		if (!check_mtt_range(dev, slave, start, len, mtt)) {
2831			*res = mtt;
2832			mtt->com.from_state = mtt->com.state;
2833			mtt->com.state = RES_MTT_BUSY;
2834			err = 0;
2835			break;
2836		}
2837	}
2838	spin_unlock_irq(mlx4_tlock(dev));
2839
2840	return err;
2841}
2842
2843static int verify_qp_parameters(struct mlx4_dev *dev,
2844				struct mlx4_cmd_mailbox *inbox,
2845				enum qp_transition transition, u8 slave)
2846{
2847	u32			qp_type;
2848	struct mlx4_qp_context	*qp_ctx;
2849	enum mlx4_qp_optpar	optpar;
2850	int port;
2851	int num_gids;
2852
2853	qp_ctx  = inbox->buf + 8;
2854	qp_type	= (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2855	optpar	= be32_to_cpu(*(__be32 *) inbox->buf);
2856
2857	switch (qp_type) {
2858	case MLX4_QP_ST_RC:
2859	case MLX4_QP_ST_UC:
2860		switch (transition) {
2861		case QP_TRANS_INIT2RTR:
2862		case QP_TRANS_RTR2RTS:
2863		case QP_TRANS_RTS2RTS:
2864		case QP_TRANS_SQD2SQD:
2865		case QP_TRANS_SQD2RTS:
2866			if (slave != mlx4_master_func_num(dev))
2867				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
2868					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
2869					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2870						num_gids = mlx4_get_slave_num_gids(dev, slave);
2871					else
2872						num_gids = 1;
2873					if (qp_ctx->pri_path.mgid_index >= num_gids)
2874						return -EINVAL;
2875				}
2876				if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
2877					port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
2878					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
2879						num_gids = mlx4_get_slave_num_gids(dev, slave);
2880					else
2881						num_gids = 1;
2882					if (qp_ctx->alt_path.mgid_index >= num_gids)
2883						return -EINVAL;
2884				}
2885			break;
2886		default:
2887			break;
2888		}
2889
2890		break;
2891	default:
2892		break;
2893	}
2894
2895	return 0;
2896}
2897
2898int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
2899			   struct mlx4_vhcr *vhcr,
2900			   struct mlx4_cmd_mailbox *inbox,
2901			   struct mlx4_cmd_mailbox *outbox,
2902			   struct mlx4_cmd_info *cmd)
2903{
2904	struct mlx4_mtt mtt;
2905	__be64 *page_list = inbox->buf;
2906	u64 *pg_list = (u64 *)page_list;
2907	int i;
2908	struct res_mtt *rmtt = NULL;
2909	int start = be64_to_cpu(page_list[0]);
2910	int npages = vhcr->in_modifier;
2911	int err;
2912
2913	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
2914	if (err)
2915		return err;
2916
2917	/* Call the SW implementation of write_mtt:
2918	 * - Prepare a dummy mtt struct
2919	 * - Translate inbox contents to simple addresses in host endianness */
2920	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
2921			    we don't really use it */
2922	mtt.order = 0;
2923	mtt.page_shift = 0;
2924	for (i = 0; i < npages; ++i)
2925		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
2926
2927	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
2928			       ((u64 *)page_list + 2));
2929
2930	if (rmtt)
2931		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
2932
2933	return err;
2934}
2935
2936int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2937			  struct mlx4_vhcr *vhcr,
2938			  struct mlx4_cmd_mailbox *inbox,
2939			  struct mlx4_cmd_mailbox *outbox,
2940			  struct mlx4_cmd_info *cmd)
2941{
2942	int eqn = vhcr->in_modifier;
2943	int res_id = eqn | (slave << 8);
2944	struct res_eq *eq;
2945	int err;
2946
2947	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
2948	if (err)
2949		return err;
2950
2951	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
2952	if (err)
2953		goto ex_abort;
2954
2955	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2956	if (err)
2957		goto ex_put;
2958
2959	atomic_dec(&eq->mtt->ref_count);
2960	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2961	res_end_move(dev, slave, RES_EQ, res_id);
2962	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2963
2964	return 0;
2965
2966ex_put:
2967	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
2968ex_abort:
2969	res_abort_move(dev, slave, RES_EQ, res_id);
2970
2971	return err;
2972}
2973
2974int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
2975{
2976	struct mlx4_priv *priv = mlx4_priv(dev);
2977	struct mlx4_slave_event_eq_info *event_eq;
2978	struct mlx4_cmd_mailbox *mailbox;
2979	u32 in_modifier = 0;
2980	int err;
2981	int res_id;
2982	struct res_eq *req;
2983
2984	if (!priv->mfunc.master.slave_state)
2985		return -EINVAL;
2986
2987	/* check for slave valid, slave not PF, and slave active */
2988	if (slave < 0 || slave >= dev->num_slaves ||
2989	    slave == dev->caps.function ||
2990	    !priv->mfunc.master.slave_state[slave].active)
2991		return 0;
2992
2993	event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
2994
2995	/* Create the event only if the slave is registered */
2996	if (event_eq->eqn < 0)
2997		return 0;
2998
2999	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3000	res_id = (slave << 8) | event_eq->eqn;
3001	err = get_res(dev, slave, res_id, RES_EQ, &req);
3002	if (err)
3003		goto unlock;
3004
3005	if (req->com.from_state != RES_EQ_HW) {
3006		err = -EINVAL;
3007		goto put;
3008	}
3009
3010	mailbox = mlx4_alloc_cmd_mailbox(dev);
3011	if (IS_ERR(mailbox)) {
3012		err = PTR_ERR(mailbox);
3013		goto put;
3014	}
3015
3016	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3017		++event_eq->token;
3018		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3019	}
3020
3021	memcpy(mailbox->buf, (u8 *) eqe, 28);
3022
3023	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
3024
3025	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3026		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3027		       MLX4_CMD_NATIVE);
3028
3029	put_res(dev, slave, res_id, RES_EQ);
3030	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3031	mlx4_free_cmd_mailbox(dev, mailbox);
3032	return err;
3033
3034put:
3035	put_res(dev, slave, res_id, RES_EQ);
3036
3037unlock:
3038	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3039	return err;
3040}
3041
3042int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3043			  struct mlx4_vhcr *vhcr,
3044			  struct mlx4_cmd_mailbox *inbox,
3045			  struct mlx4_cmd_mailbox *outbox,
3046			  struct mlx4_cmd_info *cmd)
3047{
3048	int eqn = vhcr->in_modifier;
3049	int res_id = eqn | (slave << 8);
3050	struct res_eq *eq;
3051	int err;
3052
3053	err = get_res(dev, slave, res_id, RES_EQ, &eq);
3054	if (err)
3055		return err;
3056
3057	if (eq->com.from_state != RES_EQ_HW) {
3058		err = -EINVAL;
3059		goto ex_put;
3060	}
3061
3062	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3063
3064ex_put:
3065	put_res(dev, slave, res_id, RES_EQ);
3066	return err;
3067}
3068
3069int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3070			  struct mlx4_vhcr *vhcr,
3071			  struct mlx4_cmd_mailbox *inbox,
3072			  struct mlx4_cmd_mailbox *outbox,
3073			  struct mlx4_cmd_info *cmd)
3074{
3075	int err;
3076	int cqn = vhcr->in_modifier;
3077	struct mlx4_cq_context *cqc = inbox->buf;
3078	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3079	struct res_cq *cq;
3080	struct res_mtt *mtt;
3081
3082	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3083	if (err)
3084		return err;
3085	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3086	if (err)
3087		goto out_move;
3088	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3089	if (err)
3090		goto out_put;
3091	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3092	if (err)
3093		goto out_put;
3094	atomic_inc(&mtt->ref_count);
3095	cq->mtt = mtt;
3096	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3097	res_end_move(dev, slave, RES_CQ, cqn);
3098	return 0;
3099
3100out_put:
3101	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3102out_move:
3103	res_abort_move(dev, slave, RES_CQ, cqn);
3104	return err;
3105}
3106
3107int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3108			  struct mlx4_vhcr *vhcr,
3109			  struct mlx4_cmd_mailbox *inbox,
3110			  struct mlx4_cmd_mailbox *outbox,
3111			  struct mlx4_cmd_info *cmd)
3112{
3113	int err;
3114	int cqn = vhcr->in_modifier;
3115	struct res_cq *cq;
3116
3117	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3118	if (err)
3119		return err;
3120	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3121	if (err)
3122		goto out_move;
3123	atomic_dec(&cq->mtt->ref_count);
3124	res_end_move(dev, slave, RES_CQ, cqn);
3125	return 0;
3126
3127out_move:
3128	res_abort_move(dev, slave, RES_CQ, cqn);
3129	return err;
3130}
3131
3132int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3133			  struct mlx4_vhcr *vhcr,
3134			  struct mlx4_cmd_mailbox *inbox,
3135			  struct mlx4_cmd_mailbox *outbox,
3136			  struct mlx4_cmd_info *cmd)
3137{
3138	int cqn = vhcr->in_modifier;
3139	struct res_cq *cq;
3140	int err;
3141
3142	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3143	if (err)
3144		return err;
3145
3146	if (cq->com.from_state != RES_CQ_HW)
3147		goto ex_put;
3148
3149	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3150ex_put:
3151	put_res(dev, slave, cqn, RES_CQ);
3152
3153	return err;
3154}
3155
3156static int handle_resize(struct mlx4_dev *dev, int slave,
3157			 struct mlx4_vhcr *vhcr,
3158			 struct mlx4_cmd_mailbox *inbox,
3159			 struct mlx4_cmd_mailbox *outbox,
3160			 struct mlx4_cmd_info *cmd,
3161			 struct res_cq *cq)
3162{
3163	int err;
3164	struct res_mtt *orig_mtt;
3165	struct res_mtt *mtt;
3166	struct mlx4_cq_context *cqc = inbox->buf;
3167	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3168
3169	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3170	if (err)
3171		return err;
3172
3173	if (orig_mtt != cq->mtt) {
3174		err = -EINVAL;
3175		goto ex_put;
3176	}
3177
3178	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3179	if (err)
3180		goto ex_put;
3181
3182	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3183	if (err)
3184		goto ex_put1;
3185	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3186	if (err)
3187		goto ex_put1;
3188	atomic_dec(&orig_mtt->ref_count);
3189	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3190	atomic_inc(&mtt->ref_count);
3191	cq->mtt = mtt;
3192	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3193	return 0;
3194
3195ex_put1:
3196	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3197ex_put:
3198	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3199
3200	return err;
3201
3202}
3203
3204int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3205			   struct mlx4_vhcr *vhcr,
3206			   struct mlx4_cmd_mailbox *inbox,
3207			   struct mlx4_cmd_mailbox *outbox,
3208			   struct mlx4_cmd_info *cmd)
3209{
3210	int cqn = vhcr->in_modifier;
3211	struct res_cq *cq;
3212	int err;
3213
3214	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3215	if (err)
3216		return err;
3217
3218	if (cq->com.from_state != RES_CQ_HW)
3219		goto ex_put;
3220
3221	if (vhcr->op_modifier == 0) {
3222		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3223		goto ex_put;
3224	}
3225
3226	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3227ex_put:
3228	put_res(dev, slave, cqn, RES_CQ);
3229
3230	return err;
3231}
3232
3233static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3234{
3235	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3236	int log_rq_stride = srqc->logstride & 7;
3237	int page_shift = (srqc->log_page_size & 0x3f) + 12;
3238
3239	if (log_srq_size + log_rq_stride + 4 < page_shift)
3240		return 1;
3241
3242	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3243}
3244
3245int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3246			   struct mlx4_vhcr *vhcr,
3247			   struct mlx4_cmd_mailbox *inbox,
3248			   struct mlx4_cmd_mailbox *outbox,
3249			   struct mlx4_cmd_info *cmd)
3250{
3251	int err;
3252	int srqn = vhcr->in_modifier;
3253	struct res_mtt *mtt;
3254	struct res_srq *srq;
3255	struct mlx4_srq_context *srqc = inbox->buf;
3256	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3257
3258	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3259		return -EINVAL;
3260
3261	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3262	if (err)
3263		return err;
3264	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3265	if (err)
3266		goto ex_abort;
3267	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3268			      mtt);
3269	if (err)
3270		goto ex_put_mtt;
3271
3272	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3273	if (err)
3274		goto ex_put_mtt;
3275
3276	atomic_inc(&mtt->ref_count);
3277	srq->mtt = mtt;
3278	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3279	res_end_move(dev, slave, RES_SRQ, srqn);
3280	return 0;
3281
3282ex_put_mtt:
3283	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3284ex_abort:
3285	res_abort_move(dev, slave, RES_SRQ, srqn);
3286
3287	return err;
3288}
3289
3290int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3291			   struct mlx4_vhcr *vhcr,
3292			   struct mlx4_cmd_mailbox *inbox,
3293			   struct mlx4_cmd_mailbox *outbox,
3294			   struct mlx4_cmd_info *cmd)
3295{
3296	int err;
3297	int srqn = vhcr->in_modifier;
3298	struct res_srq *srq;
3299
3300	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3301	if (err)
3302		return err;
3303	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3304	if (err)
3305		goto ex_abort;
3306	atomic_dec(&srq->mtt->ref_count);
3307	if (srq->cq)
3308		atomic_dec(&srq->cq->ref_count);
3309	res_end_move(dev, slave, RES_SRQ, srqn);
3310
3311	return 0;
3312
3313ex_abort:
3314	res_abort_move(dev, slave, RES_SRQ, srqn);
3315
3316	return err;
3317}
3318
3319int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3320			   struct mlx4_vhcr *vhcr,
3321			   struct mlx4_cmd_mailbox *inbox,
3322			   struct mlx4_cmd_mailbox *outbox,
3323			   struct mlx4_cmd_info *cmd)
3324{
3325	int err;
3326	int srqn = vhcr->in_modifier;
3327	struct res_srq *srq;
3328
3329	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3330	if (err)
3331		return err;
3332	if (srq->com.from_state != RES_SRQ_HW) {
3333		err = -EBUSY;
3334		goto out;
3335	}
3336	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3337out:
3338	put_res(dev, slave, srqn, RES_SRQ);
3339	return err;
3340}
3341
3342int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3343			 struct mlx4_vhcr *vhcr,
3344			 struct mlx4_cmd_mailbox *inbox,
3345			 struct mlx4_cmd_mailbox *outbox,
3346			 struct mlx4_cmd_info *cmd)
3347{
3348	int err;
3349	int srqn = vhcr->in_modifier;
3350	struct res_srq *srq;
3351
3352	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3353	if (err)
3354		return err;
3355
3356	if (srq->com.from_state != RES_SRQ_HW) {
3357		err = -EBUSY;
3358		goto out;
3359	}
3360
3361	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3362out:
3363	put_res(dev, slave, srqn, RES_SRQ);
3364	return err;
3365}
3366
3367int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3368			struct mlx4_vhcr *vhcr,
3369			struct mlx4_cmd_mailbox *inbox,
3370			struct mlx4_cmd_mailbox *outbox,
3371			struct mlx4_cmd_info *cmd)
3372{
3373	int err;
3374	int qpn = vhcr->in_modifier & 0x7fffff;
3375	struct res_qp *qp;
3376
3377	err = get_res(dev, slave, qpn, RES_QP, &qp);
3378	if (err)
3379		return err;
3380	if (qp->com.from_state != RES_QP_HW) {
3381		err = -EBUSY;
3382		goto out;
3383	}
3384
3385	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3386out:
3387	put_res(dev, slave, qpn, RES_QP);
3388	return err;
3389}
3390
3391int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3392			      struct mlx4_vhcr *vhcr,
3393			      struct mlx4_cmd_mailbox *inbox,
3394			      struct mlx4_cmd_mailbox *outbox,
3395			      struct mlx4_cmd_info *cmd)
3396{
3397	struct mlx4_qp_context *context = inbox->buf + 8;
3398	adjust_proxy_tun_qkey(dev, vhcr, context);
3399	update_pkey_index(dev, slave, inbox);
3400	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3401}
3402
3403static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3404				struct mlx4_qp_context *qpc,
3405				struct mlx4_cmd_mailbox *inbox)
3406{
3407	u64 mac;
3408	int port;
3409	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3410	u8 sched = *(u8 *)(inbox->buf + 64);
3411	u8 smac_ix;
3412
3413	port = (sched >> 6 & 1) + 1;
3414	if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3415		smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3416		if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3417			return -ENOENT;
3418	}
3419	return 0;
3420}
3421
3422int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3423			     struct mlx4_vhcr *vhcr,
3424			     struct mlx4_cmd_mailbox *inbox,
3425			     struct mlx4_cmd_mailbox *outbox,
3426			     struct mlx4_cmd_info *cmd)
3427{
3428	int err;
3429	struct mlx4_qp_context *qpc = inbox->buf + 8;
3430	int qpn = vhcr->in_modifier & 0x7fffff;
3431	struct res_qp *qp;
3432	u8 orig_sched_queue;
3433	__be32	orig_param3 = qpc->param3;
3434	u8 orig_vlan_control = qpc->pri_path.vlan_control;
3435	u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3436	u8 orig_pri_path_fl = qpc->pri_path.fl;
3437	u8 orig_vlan_index = qpc->pri_path.vlan_index;
3438	u8 orig_feup = qpc->pri_path.feup;
3439
3440	err = verify_qp_parameters(dev, inbox, QP_TRANS_INIT2RTR, slave);
3441	if (err)
3442		return err;
3443
3444	if (roce_verify_mac(dev, slave, qpc, inbox))
3445		return -EINVAL;
3446
3447	update_pkey_index(dev, slave, inbox);
3448	update_gid(dev, inbox, (u8)slave);
3449	adjust_proxy_tun_qkey(dev, vhcr, qpc);
3450	orig_sched_queue = qpc->pri_path.sched_queue;
3451
3452	err = get_res(dev, slave, qpn, RES_QP, &qp);
3453	if (err)
3454		return err;
3455	if (qp->com.from_state != RES_QP_HW) {
3456		err = -EBUSY;
3457		goto out;
3458	}
3459
3460	/* do not modify vport QP params for RSS QPs */
3461	if (!(qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET))) {
3462		err = update_vport_qp_param(dev, inbox, slave, qpn);
3463		if (err)
3464			goto out;
3465	}
3466
3467	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3468out:
3469	/* if no error, save sched queue value passed in by VF. This is
3470	 * essentially the QOS value provided by the VF. This will be useful
3471	 * if we allow dynamic changes from VST back to VGT
3472	 */
3473	if (!err) {
3474		qp->sched_queue = orig_sched_queue;
3475		qp->param3	= orig_param3;
3476		qp->vlan_control = orig_vlan_control;
3477		qp->fvl_rx	=  orig_fvl_rx;
3478		qp->pri_path_fl = orig_pri_path_fl;
3479		qp->vlan_index  = orig_vlan_index;
3480		qp->feup	= orig_feup;
3481	}
3482	put_res(dev, slave, qpn, RES_QP);
3483	return err;
3484}
3485
3486int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3487			    struct mlx4_vhcr *vhcr,
3488			    struct mlx4_cmd_mailbox *inbox,
3489			    struct mlx4_cmd_mailbox *outbox,
3490			    struct mlx4_cmd_info *cmd)
3491{
3492	int err;
3493	struct mlx4_qp_context *context = inbox->buf + 8;
3494
3495	err = verify_qp_parameters(dev, inbox, QP_TRANS_RTR2RTS, slave);
3496	if (err)
3497		return err;
3498
3499	update_pkey_index(dev, slave, inbox);
3500	update_gid(dev, inbox, (u8)slave);
3501	adjust_proxy_tun_qkey(dev, vhcr, context);
3502	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3503}
3504
3505int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3506			    struct mlx4_vhcr *vhcr,
3507			    struct mlx4_cmd_mailbox *inbox,
3508			    struct mlx4_cmd_mailbox *outbox,
3509			    struct mlx4_cmd_info *cmd)
3510{
3511	int err;
3512	struct mlx4_qp_context *context = inbox->buf + 8;
3513
3514	err = verify_qp_parameters(dev, inbox, QP_TRANS_RTS2RTS, slave);
3515	if (err)
3516		return err;
3517
3518	update_pkey_index(dev, slave, inbox);
3519	update_gid(dev, inbox, (u8)slave);
3520	adjust_proxy_tun_qkey(dev, vhcr, context);
3521	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3522}
3523
3524
3525int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3526			      struct mlx4_vhcr *vhcr,
3527			      struct mlx4_cmd_mailbox *inbox,
3528			      struct mlx4_cmd_mailbox *outbox,
3529			      struct mlx4_cmd_info *cmd)
3530{
3531	struct mlx4_qp_context *context = inbox->buf + 8;
3532	adjust_proxy_tun_qkey(dev, vhcr, context);
3533	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3534}
3535
3536int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3537			    struct mlx4_vhcr *vhcr,
3538			    struct mlx4_cmd_mailbox *inbox,
3539			    struct mlx4_cmd_mailbox *outbox,
3540			    struct mlx4_cmd_info *cmd)
3541{
3542	int err;
3543	struct mlx4_qp_context *context = inbox->buf + 8;
3544
3545	err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2SQD, slave);
3546	if (err)
3547		return err;
3548
3549	adjust_proxy_tun_qkey(dev, vhcr, context);
3550	update_gid(dev, inbox, (u8)slave);
3551	update_pkey_index(dev, slave, inbox);
3552	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3553}
3554
3555int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3556			    struct mlx4_vhcr *vhcr,
3557			    struct mlx4_cmd_mailbox *inbox,
3558			    struct mlx4_cmd_mailbox *outbox,
3559			    struct mlx4_cmd_info *cmd)
3560{
3561	int err;
3562	struct mlx4_qp_context *context = inbox->buf + 8;
3563
3564	err = verify_qp_parameters(dev, inbox, QP_TRANS_SQD2RTS, slave);
3565	if (err)
3566		return err;
3567
3568	adjust_proxy_tun_qkey(dev, vhcr, context);
3569	update_gid(dev, inbox, (u8)slave);
3570	update_pkey_index(dev, slave, inbox);
3571	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3572}
3573
3574int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3575			 struct mlx4_vhcr *vhcr,
3576			 struct mlx4_cmd_mailbox *inbox,
3577			 struct mlx4_cmd_mailbox *outbox,
3578			 struct mlx4_cmd_info *cmd)
3579{
3580	int err;
3581	int qpn = vhcr->in_modifier & 0x7fffff;
3582	struct res_qp *qp;
3583
3584	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3585	if (err)
3586		return err;
3587	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3588	if (err)
3589		goto ex_abort;
3590
3591	atomic_dec(&qp->mtt->ref_count);
3592	atomic_dec(&qp->rcq->ref_count);
3593	atomic_dec(&qp->scq->ref_count);
3594	if (qp->srq)
3595		atomic_dec(&qp->srq->ref_count);
3596	res_end_move(dev, slave, RES_QP, qpn);
3597	return 0;
3598
3599ex_abort:
3600	res_abort_move(dev, slave, RES_QP, qpn);
3601
3602	return err;
3603}
3604
3605static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3606				struct res_qp *rqp, u8 *gid)
3607{
3608	struct res_gid *res;
3609
3610	list_for_each_entry(res, &rqp->mcg_list, list) {
3611		if (!memcmp(res->gid, gid, 16))
3612			return res;
3613	}
3614	return NULL;
3615}
3616
3617static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3618		       u8 *gid, enum mlx4_protocol prot,
3619		       enum mlx4_steer_type steer, u64 reg_id)
3620{
3621	struct res_gid *res;
3622	int err;
3623
3624	res = kzalloc(sizeof *res, GFP_KERNEL);
3625	if (!res)
3626		return -ENOMEM;
3627
3628	spin_lock_irq(&rqp->mcg_spl);
3629	if (find_gid(dev, slave, rqp, gid)) {
3630		kfree(res);
3631		err = -EEXIST;
3632	} else {
3633		memcpy(res->gid, gid, 16);
3634		res->prot = prot;
3635		res->steer = steer;
3636		res->reg_id = reg_id;
3637		list_add_tail(&res->list, &rqp->mcg_list);
3638		err = 0;
3639	}
3640	spin_unlock_irq(&rqp->mcg_spl);
3641
3642	return err;
3643}
3644
3645static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3646		       u8 *gid, enum mlx4_protocol prot,
3647		       enum mlx4_steer_type steer, u64 *reg_id)
3648{
3649	struct res_gid *res;
3650	int err;
3651
3652	spin_lock_irq(&rqp->mcg_spl);
3653	res = find_gid(dev, slave, rqp, gid);
3654	if (!res || res->prot != prot || res->steer != steer)
3655		err = -EINVAL;
3656	else {
3657		*reg_id = res->reg_id;
3658		list_del(&res->list);
3659		kfree(res);
3660		err = 0;
3661	}
3662	spin_unlock_irq(&rqp->mcg_spl);
3663
3664	return err;
3665}
3666
3667static int qp_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3668		     int block_loopback, enum mlx4_protocol prot,
3669		     enum mlx4_steer_type type, u64 *reg_id)
3670{
3671	switch (dev->caps.steering_mode) {
3672	case MLX4_STEERING_MODE_DEVICE_MANAGED:
3673		return mlx4_trans_to_dmfs_attach(dev, qp, gid, gid[5],
3674						block_loopback, prot,
3675						reg_id);
3676	case MLX4_STEERING_MODE_B0:
3677		return mlx4_qp_attach_common(dev, qp, gid,
3678					    block_loopback, prot, type);
3679	default:
3680		return -EINVAL;
3681	}
3682}
3683
3684static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
3685		     enum mlx4_protocol prot, enum mlx4_steer_type type,
3686		     u64 reg_id)
3687{
3688	switch (dev->caps.steering_mode) {
3689	case MLX4_STEERING_MODE_DEVICE_MANAGED:
3690		return mlx4_flow_detach(dev, reg_id);
3691	case MLX4_STEERING_MODE_B0:
3692		return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3693	default:
3694		return -EINVAL;
3695	}
3696}
3697
3698int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3699			       struct mlx4_vhcr *vhcr,
3700			       struct mlx4_cmd_mailbox *inbox,
3701			       struct mlx4_cmd_mailbox *outbox,
3702			       struct mlx4_cmd_info *cmd)
3703{
3704	struct mlx4_qp qp; /* dummy for calling attach/detach */
3705	u8 *gid = inbox->buf;
3706	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
3707	int err;
3708	int qpn;
3709	struct res_qp *rqp;
3710	u64 reg_id = 0;
3711	int attach = vhcr->op_modifier;
3712	int block_loopback = vhcr->in_modifier >> 31;
3713	u8 steer_type_mask = 2;
3714	enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
3715
3716	qpn = vhcr->in_modifier & 0xffffff;
3717	err = get_res(dev, slave, qpn, RES_QP, &rqp);
3718	if (err)
3719		return err;
3720
3721	qp.qpn = qpn;
3722	if (attach) {
3723		err = qp_attach(dev, &qp, gid, block_loopback, prot,
3724				type, &reg_id);
3725		if (err) {
3726			pr_err("Fail to attach rule to qp 0x%x\n", qpn);
3727			goto ex_put;
3728		}
3729		err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
3730		if (err)
3731			goto ex_detach;
3732	} else {
3733		err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
3734		if (err)
3735			goto ex_put;
3736
3737		err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3738		if (err)
3739			pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3740			       qpn, (unsigned long long)reg_id);
3741	}
3742	put_res(dev, slave, qpn, RES_QP);
3743	return err;
3744
3745ex_detach:
3746	qp_detach(dev, &qp, gid, prot, type, reg_id);
3747ex_put:
3748	put_res(dev, slave, qpn, RES_QP);
3749	return err;
3750}
3751
3752/*
3753 * MAC validation for Flow Steering rules.
3754 * VF can attach rules only with a mac address which is assigned to it.
3755 */
3756static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3757				   struct list_head *rlist)
3758{
3759	struct mac_res *res, *tmp;
3760	__be64 be_mac;
3761
3762	/* make sure it isn't multicast or broadcast mac*/
3763	if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3764	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3765		list_for_each_entry_safe(res, tmp, rlist, list) {
3766			be_mac = cpu_to_be64(res->mac << 16);
3767			if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN))
3768				return 0;
3769		}
3770		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3771		       eth_header->eth.dst_mac, slave);
3772		return -EINVAL;
3773	}
3774	return 0;
3775}
3776
3777/*
3778 * In case of missing eth header, append eth header with a MAC address
3779 * assigned to the VF.
3780 */
3781static int add_eth_header(struct mlx4_dev *dev, int slave,
3782			  struct mlx4_cmd_mailbox *inbox,
3783			  struct list_head *rlist, int header_id)
3784{
3785	struct mac_res *res, *tmp;
3786	u8 port;
3787	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3788	struct mlx4_net_trans_rule_hw_eth *eth_header;
3789	struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
3790	struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
3791	__be64 be_mac = 0;
3792	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
3793
3794	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3795	port = ctrl->port;
3796	eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
3797
3798	/* Clear a space in the inbox for eth header */
3799	switch (header_id) {
3800	case MLX4_NET_TRANS_RULE_ID_IPV4:
3801		ip_header =
3802			(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
3803		memmove(ip_header, eth_header,
3804			sizeof(*ip_header) + sizeof(*l4_header));
3805		break;
3806	case MLX4_NET_TRANS_RULE_ID_TCP:
3807	case MLX4_NET_TRANS_RULE_ID_UDP:
3808		l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
3809			    (eth_header + 1);
3810		memmove(l4_header, eth_header, sizeof(*l4_header));
3811		break;
3812	default:
3813		return -EINVAL;
3814	}
3815	list_for_each_entry_safe(res, tmp, rlist, list) {
3816		if (port == res->port) {
3817			be_mac = cpu_to_be64(res->mac << 16);
3818			break;
3819		}
3820	}
3821	if (!be_mac) {
3822		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
3823		       port);
3824		return -EINVAL;
3825	}
3826
3827	memset(eth_header, 0, sizeof(*eth_header));
3828	eth_header->size = sizeof(*eth_header) >> 2;
3829	eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
3830	memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
3831	memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
3832
3833	return 0;
3834
3835}
3836
3837int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3838					 struct mlx4_vhcr *vhcr,
3839					 struct mlx4_cmd_mailbox *inbox,
3840					 struct mlx4_cmd_mailbox *outbox,
3841					 struct mlx4_cmd_info *cmd)
3842{
3843
3844	struct mlx4_priv *priv = mlx4_priv(dev);
3845	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3846	struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
3847	int err;
3848	int qpn;
3849	struct res_qp *rqp;
3850	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
3851	struct _rule_hw  *rule_header;
3852	int header_id;
3853
3854	if (dev->caps.steering_mode !=
3855	    MLX4_STEERING_MODE_DEVICE_MANAGED)
3856		return -EOPNOTSUPP;
3857
3858	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
3859	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
3860	err = get_res(dev, slave, qpn, RES_QP, &rqp);
3861	if (err) {
3862		pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
3863		return err;
3864	}
3865	rule_header = (struct _rule_hw *)(ctrl + 1);
3866	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
3867
3868	switch (header_id) {
3869	case MLX4_NET_TRANS_RULE_ID_ETH:
3870		if (validate_eth_header_mac(slave, rule_header, rlist)) {
3871			err = -EINVAL;
3872			goto err_put;
3873		}
3874		break;
3875	case MLX4_NET_TRANS_RULE_ID_IB:
3876		break;
3877	case MLX4_NET_TRANS_RULE_ID_IPV4:
3878	case MLX4_NET_TRANS_RULE_ID_TCP:
3879	case MLX4_NET_TRANS_RULE_ID_UDP:
3880		pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
3881		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
3882			err = -EINVAL;
3883			goto err_put;
3884		}
3885		vhcr->in_modifier +=
3886			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
3887		break;
3888	default:
3889		pr_err("Corrupted mailbox.\n");
3890		err = -EINVAL;
3891		goto err_put;
3892	}
3893
3894	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
3895			   vhcr->in_modifier, 0,
3896			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
3897			   MLX4_CMD_NATIVE);
3898	if (err)
3899		goto err_put;
3900
3901	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
3902	if (err) {
3903		mlx4_err(dev, "Fail to add flow steering resources.\n ");
3904		/* detach rule*/
3905		mlx4_cmd(dev, vhcr->out_param, 0, 0,
3906			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3907			 MLX4_CMD_NATIVE);
3908		goto err_put;
3909	}
3910	atomic_inc(&rqp->ref_count);
3911err_put:
3912	put_res(dev, slave, qpn, RES_QP);
3913	return err;
3914}
3915
3916int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
3917					 struct mlx4_vhcr *vhcr,
3918					 struct mlx4_cmd_mailbox *inbox,
3919					 struct mlx4_cmd_mailbox *outbox,
3920					 struct mlx4_cmd_info *cmd)
3921{
3922	int err;
3923	struct res_qp *rqp;
3924	struct res_fs_rule *rrule;
3925
3926	if (dev->caps.steering_mode !=
3927	    MLX4_STEERING_MODE_DEVICE_MANAGED)
3928		return -EOPNOTSUPP;
3929
3930	err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
3931	if (err)
3932		return err;
3933	/* Release the rule form busy state before removal */
3934	put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
3935	err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
3936	if (err)
3937		return err;
3938
3939	err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
3940		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
3941		       MLX4_CMD_NATIVE);
3942	if (!err) {
3943		err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE,
3944				    0);
3945		atomic_dec(&rqp->ref_count);
3946
3947		if (err) {
3948			mlx4_err(dev, "Fail to remove flow steering resources.\n ");
3949			goto out;
3950		}
3951	}
3952
3953out:
3954	put_res(dev, slave, rrule->qpn, RES_QP);
3955	return err;
3956}
3957
3958enum {
3959	BUSY_MAX_RETRIES = 10
3960};
3961
3962int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
3963			       struct mlx4_vhcr *vhcr,
3964			       struct mlx4_cmd_mailbox *inbox,
3965			       struct mlx4_cmd_mailbox *outbox,
3966			       struct mlx4_cmd_info *cmd)
3967{
3968	int err;
3969
3970	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3971
3972	return err;
3973}
3974
3975static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
3976{
3977	struct res_gid *rgid;
3978	struct res_gid *tmp;
3979	struct mlx4_qp qp; /* dummy for calling attach/detach */
3980
3981	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
3982		switch (dev->caps.steering_mode) {
3983		case MLX4_STEERING_MODE_DEVICE_MANAGED:
3984			mlx4_flow_detach(dev, rgid->reg_id);
3985			break;
3986		case MLX4_STEERING_MODE_B0:
3987			qp.qpn = rqp->local_qpn;
3988			(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
3989						     rgid->prot, rgid->steer);
3990			break;
3991		}
3992		list_del(&rgid->list);
3993		kfree(rgid);
3994	}
3995}
3996
3997static int _move_all_busy(struct mlx4_dev *dev, int slave,
3998			  enum mlx4_resource type, int print)
3999{
4000	struct mlx4_priv *priv = mlx4_priv(dev);
4001	struct mlx4_resource_tracker *tracker =
4002		&priv->mfunc.master.res_tracker;
4003	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4004	struct res_common *r;
4005	struct res_common *tmp;
4006	int busy;
4007
4008	busy = 0;
4009	spin_lock_irq(mlx4_tlock(dev));
4010	list_for_each_entry_safe(r, tmp, rlist, list) {
4011		if (r->owner == slave) {
4012			if (!r->removing) {
4013				if (r->state == RES_ANY_BUSY) {
4014					if (print)
4015						mlx4_dbg(dev,
4016							 "%s id 0x%llx is busy\n",
4017							  ResourceType(type),
4018							  (unsigned long long)r->res_id);
4019					++busy;
4020				} else {
4021					r->from_state = r->state;
4022					r->state = RES_ANY_BUSY;
4023					r->removing = 1;
4024				}
4025			}
4026		}
4027	}
4028	spin_unlock_irq(mlx4_tlock(dev));
4029
4030	return busy;
4031}
4032
4033static int move_all_busy(struct mlx4_dev *dev, int slave,
4034			 enum mlx4_resource type)
4035{
4036	unsigned long begin;
4037	int busy;
4038
4039	begin = jiffies;
4040	do {
4041		busy = _move_all_busy(dev, slave, type, 0);
4042		if (time_after(jiffies, begin + 5 * HZ))
4043			break;
4044		if (busy)
4045			cond_resched();
4046	} while (busy);
4047
4048	if (busy)
4049		busy = _move_all_busy(dev, slave, type, 1);
4050
4051	return busy;
4052}
4053static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4054{
4055	struct mlx4_priv *priv = mlx4_priv(dev);
4056	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4057	struct list_head *qp_list =
4058		&tracker->slave_list[slave].res_list[RES_QP];
4059	struct res_qp *qp;
4060	struct res_qp *tmp;
4061	int state;
4062	u64 in_param;
4063	int qpn;
4064	int err;
4065
4066	err = move_all_busy(dev, slave, RES_QP);
4067	if (err)
4068		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
4069			  "for slave %d\n", slave);
4070
4071	spin_lock_irq(mlx4_tlock(dev));
4072	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4073		spin_unlock_irq(mlx4_tlock(dev));
4074		if (qp->com.owner == slave) {
4075			qpn = qp->com.res_id;
4076			detach_qp(dev, slave, qp);
4077			state = qp->com.from_state;
4078			while (state != 0) {
4079				switch (state) {
4080				case RES_QP_RESERVED:
4081					spin_lock_irq(mlx4_tlock(dev));
4082					rb_erase(&qp->com.node,
4083						 &tracker->res_tree[RES_QP]);
4084					list_del(&qp->com.list);
4085					spin_unlock_irq(mlx4_tlock(dev));
4086					if (!valid_reserved(dev, slave, qpn)) {
4087						__mlx4_qp_release_range(dev, qpn, 1);
4088						mlx4_release_resource(dev, slave,
4089								      RES_QP, 1, 0);
4090					}
4091					kfree(qp);
4092					state = 0;
4093					break;
4094				case RES_QP_MAPPED:
4095					if (!valid_reserved(dev, slave, qpn))
4096						__mlx4_qp_free_icm(dev, qpn);
4097					state = RES_QP_RESERVED;
4098					break;
4099				case RES_QP_HW:
4100					in_param = slave;
4101					err = mlx4_cmd(dev, in_param,
4102						       qp->local_qpn, 2,
4103						       MLX4_CMD_2RST_QP,
4104						       MLX4_CMD_TIME_CLASS_A,
4105						       MLX4_CMD_NATIVE);
4106					if (err)
4107						mlx4_dbg(dev, "rem_slave_qps: failed"
4108							 " to move slave %d qpn %d to"
4109							 " reset\n", slave,
4110							 qp->local_qpn);
4111					atomic_dec(&qp->rcq->ref_count);
4112					atomic_dec(&qp->scq->ref_count);
4113					atomic_dec(&qp->mtt->ref_count);
4114					if (qp->srq)
4115						atomic_dec(&qp->srq->ref_count);
4116					state = RES_QP_MAPPED;
4117					break;
4118				default:
4119					state = 0;
4120				}
4121			}
4122		}
4123		spin_lock_irq(mlx4_tlock(dev));
4124	}
4125	spin_unlock_irq(mlx4_tlock(dev));
4126}
4127
4128static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4129{
4130	struct mlx4_priv *priv = mlx4_priv(dev);
4131	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4132	struct list_head *srq_list =
4133		&tracker->slave_list[slave].res_list[RES_SRQ];
4134	struct res_srq *srq;
4135	struct res_srq *tmp;
4136	int state;
4137	u64 in_param;
4138	LIST_HEAD(tlist);
4139	int srqn;
4140	int err;
4141
4142	err = move_all_busy(dev, slave, RES_SRQ);
4143	if (err)
4144		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
4145			  "busy for slave %d\n", slave);
4146
4147	spin_lock_irq(mlx4_tlock(dev));
4148	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4149		spin_unlock_irq(mlx4_tlock(dev));
4150		if (srq->com.owner == slave) {
4151			srqn = srq->com.res_id;
4152			state = srq->com.from_state;
4153			while (state != 0) {
4154				switch (state) {
4155				case RES_SRQ_ALLOCATED:
4156					__mlx4_srq_free_icm(dev, srqn);
4157					spin_lock_irq(mlx4_tlock(dev));
4158					rb_erase(&srq->com.node,
4159						 &tracker->res_tree[RES_SRQ]);
4160					list_del(&srq->com.list);
4161					spin_unlock_irq(mlx4_tlock(dev));
4162					mlx4_release_resource(dev, slave,
4163							      RES_SRQ, 1, 0);
4164					kfree(srq);
4165					state = 0;
4166					break;
4167
4168				case RES_SRQ_HW:
4169					in_param = slave;
4170					err = mlx4_cmd(dev, in_param, srqn, 1,
4171						       MLX4_CMD_HW2SW_SRQ,
4172						       MLX4_CMD_TIME_CLASS_A,
4173						       MLX4_CMD_NATIVE);
4174					if (err)
4175						mlx4_dbg(dev, "rem_slave_srqs: failed"
4176							 " to move slave %d srq %d to"
4177							 " SW ownership\n",
4178							 slave, srqn);
4179
4180					atomic_dec(&srq->mtt->ref_count);
4181					if (srq->cq)
4182						atomic_dec(&srq->cq->ref_count);
4183					state = RES_SRQ_ALLOCATED;
4184					break;
4185
4186				default:
4187					state = 0;
4188				}
4189			}
4190		}
4191		spin_lock_irq(mlx4_tlock(dev));
4192	}
4193	spin_unlock_irq(mlx4_tlock(dev));
4194}
4195
4196static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4197{
4198	struct mlx4_priv *priv = mlx4_priv(dev);
4199	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4200	struct list_head *cq_list =
4201		&tracker->slave_list[slave].res_list[RES_CQ];
4202	struct res_cq *cq;
4203	struct res_cq *tmp;
4204	int state;
4205	u64 in_param;
4206	LIST_HEAD(tlist);
4207	int cqn;
4208	int err;
4209
4210	err = move_all_busy(dev, slave, RES_CQ);
4211	if (err)
4212		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
4213			  "busy for slave %d\n", slave);
4214
4215	spin_lock_irq(mlx4_tlock(dev));
4216	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4217		spin_unlock_irq(mlx4_tlock(dev));
4218		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4219			cqn = cq->com.res_id;
4220			state = cq->com.from_state;
4221			while (state != 0) {
4222				switch (state) {
4223				case RES_CQ_ALLOCATED:
4224					__mlx4_cq_free_icm(dev, cqn);
4225					spin_lock_irq(mlx4_tlock(dev));
4226					rb_erase(&cq->com.node,
4227						 &tracker->res_tree[RES_CQ]);
4228					list_del(&cq->com.list);
4229					spin_unlock_irq(mlx4_tlock(dev));
4230					mlx4_release_resource(dev, slave,
4231							      RES_CQ, 1, 0);
4232					kfree(cq);
4233					state = 0;
4234					break;
4235
4236				case RES_CQ_HW:
4237					in_param = slave;
4238					err = mlx4_cmd(dev, in_param, cqn, 1,
4239						       MLX4_CMD_HW2SW_CQ,
4240						       MLX4_CMD_TIME_CLASS_A,
4241						       MLX4_CMD_NATIVE);
4242					if (err)
4243						mlx4_dbg(dev, "rem_slave_cqs: failed"
4244							 " to move slave %d cq %d to"
4245							 " SW ownership\n",
4246							 slave, cqn);
4247					atomic_dec(&cq->mtt->ref_count);
4248					state = RES_CQ_ALLOCATED;
4249					break;
4250
4251				default:
4252					state = 0;
4253				}
4254			}
4255		}
4256		spin_lock_irq(mlx4_tlock(dev));
4257	}
4258	spin_unlock_irq(mlx4_tlock(dev));
4259}
4260
4261static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4262{
4263	struct mlx4_priv *priv = mlx4_priv(dev);
4264	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4265	struct list_head *mpt_list =
4266		&tracker->slave_list[slave].res_list[RES_MPT];
4267	struct res_mpt *mpt;
4268	struct res_mpt *tmp;
4269	int state;
4270	u64 in_param;
4271	LIST_HEAD(tlist);
4272	int mptn;
4273	int err;
4274
4275	err = move_all_busy(dev, slave, RES_MPT);
4276	if (err)
4277		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
4278			  "busy for slave %d\n", slave);
4279
4280	spin_lock_irq(mlx4_tlock(dev));
4281	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4282		spin_unlock_irq(mlx4_tlock(dev));
4283		if (mpt->com.owner == slave) {
4284			mptn = mpt->com.res_id;
4285			state = mpt->com.from_state;
4286			while (state != 0) {
4287				switch (state) {
4288				case RES_MPT_RESERVED:
4289					__mlx4_mpt_release(dev, mpt->key);
4290					spin_lock_irq(mlx4_tlock(dev));
4291					rb_erase(&mpt->com.node,
4292						 &tracker->res_tree[RES_MPT]);
4293					list_del(&mpt->com.list);
4294					spin_unlock_irq(mlx4_tlock(dev));
4295					mlx4_release_resource(dev, slave,
4296							      RES_MPT, 1, 0);
4297					kfree(mpt);
4298					state = 0;
4299					break;
4300
4301				case RES_MPT_MAPPED:
4302					__mlx4_mpt_free_icm(dev, mpt->key);
4303					state = RES_MPT_RESERVED;
4304					break;
4305
4306				case RES_MPT_HW:
4307					in_param = slave;
4308					err = mlx4_cmd(dev, in_param, mptn, 0,
4309						     MLX4_CMD_HW2SW_MPT,
4310						     MLX4_CMD_TIME_CLASS_A,
4311						     MLX4_CMD_NATIVE);
4312					if (err)
4313						mlx4_dbg(dev, "rem_slave_mrs: failed"
4314							 " to move slave %d mpt %d to"
4315							 " SW ownership\n",
4316							 slave, mptn);
4317					if (mpt->mtt)
4318						atomic_dec(&mpt->mtt->ref_count);
4319					state = RES_MPT_MAPPED;
4320					break;
4321				default:
4322					state = 0;
4323				}
4324			}
4325		}
4326		spin_lock_irq(mlx4_tlock(dev));
4327	}
4328	spin_unlock_irq(mlx4_tlock(dev));
4329}
4330
4331static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4332{
4333	struct mlx4_priv *priv = mlx4_priv(dev);
4334	struct mlx4_resource_tracker *tracker =
4335		&priv->mfunc.master.res_tracker;
4336	struct list_head *mtt_list =
4337		&tracker->slave_list[slave].res_list[RES_MTT];
4338	struct res_mtt *mtt;
4339	struct res_mtt *tmp;
4340	int state;
4341	LIST_HEAD(tlist);
4342	int base;
4343	int err;
4344
4345	err = move_all_busy(dev, slave, RES_MTT);
4346	if (err)
4347		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
4348			  "busy for slave %d\n", slave);
4349
4350	spin_lock_irq(mlx4_tlock(dev));
4351	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4352		spin_unlock_irq(mlx4_tlock(dev));
4353		if (mtt->com.owner == slave) {
4354			base = mtt->com.res_id;
4355			state = mtt->com.from_state;
4356			while (state != 0) {
4357				switch (state) {
4358				case RES_MTT_ALLOCATED:
4359					__mlx4_free_mtt_range(dev, base,
4360							      mtt->order);
4361					spin_lock_irq(mlx4_tlock(dev));
4362					rb_erase(&mtt->com.node,
4363						 &tracker->res_tree[RES_MTT]);
4364					list_del(&mtt->com.list);
4365					spin_unlock_irq(mlx4_tlock(dev));
4366					mlx4_release_resource(dev, slave, RES_MTT,
4367							      1 << mtt->order, 0);
4368					kfree(mtt);
4369					state = 0;
4370					break;
4371
4372				default:
4373					state = 0;
4374				}
4375			}
4376		}
4377		spin_lock_irq(mlx4_tlock(dev));
4378	}
4379	spin_unlock_irq(mlx4_tlock(dev));
4380}
4381
4382static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4383{
4384	struct mlx4_priv *priv = mlx4_priv(dev);
4385	struct mlx4_resource_tracker *tracker =
4386		&priv->mfunc.master.res_tracker;
4387	struct list_head *fs_rule_list =
4388		&tracker->slave_list[slave].res_list[RES_FS_RULE];
4389	struct res_fs_rule *fs_rule;
4390	struct res_fs_rule *tmp;
4391	int state;
4392	u64 base;
4393	int err;
4394
4395	err = move_all_busy(dev, slave, RES_FS_RULE);
4396	if (err)
4397		mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4398			  slave);
4399
4400	spin_lock_irq(mlx4_tlock(dev));
4401	list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4402		spin_unlock_irq(mlx4_tlock(dev));
4403		if (fs_rule->com.owner == slave) {
4404			base = fs_rule->com.res_id;
4405			state = fs_rule->com.from_state;
4406			while (state != 0) {
4407				switch (state) {
4408				case RES_FS_RULE_ALLOCATED:
4409					/* detach rule */
4410					err = mlx4_cmd(dev, base, 0, 0,
4411						       MLX4_QP_FLOW_STEERING_DETACH,
4412						       MLX4_CMD_TIME_CLASS_A,
4413						       MLX4_CMD_NATIVE);
4414
4415					spin_lock_irq(mlx4_tlock(dev));
4416					rb_erase(&fs_rule->com.node,
4417						 &tracker->res_tree[RES_FS_RULE]);
4418					list_del(&fs_rule->com.list);
4419					spin_unlock_irq(mlx4_tlock(dev));
4420					kfree(fs_rule);
4421					state = 0;
4422					break;
4423
4424				default:
4425					state = 0;
4426				}
4427			}
4428		}
4429		spin_lock_irq(mlx4_tlock(dev));
4430	}
4431	spin_unlock_irq(mlx4_tlock(dev));
4432}
4433
4434static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4435{
4436	struct mlx4_priv *priv = mlx4_priv(dev);
4437	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4438	struct list_head *eq_list =
4439		&tracker->slave_list[slave].res_list[RES_EQ];
4440	struct res_eq *eq;
4441	struct res_eq *tmp;
4442	int err;
4443	int state;
4444	LIST_HEAD(tlist);
4445	int eqn;
4446	struct mlx4_cmd_mailbox *mailbox;
4447
4448	err = move_all_busy(dev, slave, RES_EQ);
4449	if (err)
4450		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
4451			  "busy for slave %d\n", slave);
4452
4453	spin_lock_irq(mlx4_tlock(dev));
4454	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4455		spin_unlock_irq(mlx4_tlock(dev));
4456		if (eq->com.owner == slave) {
4457			eqn = eq->com.res_id;
4458			state = eq->com.from_state;
4459			while (state != 0) {
4460				switch (state) {
4461				case RES_EQ_RESERVED:
4462					spin_lock_irq(mlx4_tlock(dev));
4463					rb_erase(&eq->com.node,
4464						 &tracker->res_tree[RES_EQ]);
4465					list_del(&eq->com.list);
4466					spin_unlock_irq(mlx4_tlock(dev));
4467					kfree(eq);
4468					state = 0;
4469					break;
4470
4471				case RES_EQ_HW:
4472					mailbox = mlx4_alloc_cmd_mailbox(dev);
4473					if (IS_ERR(mailbox)) {
4474						cond_resched();
4475						continue;
4476					}
4477					err = mlx4_cmd_box(dev, slave, 0,
4478							   eqn & 0xff, 0,
4479							   MLX4_CMD_HW2SW_EQ,
4480							   MLX4_CMD_TIME_CLASS_A,
4481							   MLX4_CMD_NATIVE);
4482					if (err)
4483						mlx4_dbg(dev, "rem_slave_eqs: failed"
4484							 " to move slave %d eqs %d to"
4485							 " SW ownership\n", slave, eqn);
4486					mlx4_free_cmd_mailbox(dev, mailbox);
4487					atomic_dec(&eq->mtt->ref_count);
4488					state = RES_EQ_RESERVED;
4489					break;
4490
4491				default:
4492					state = 0;
4493				}
4494			}
4495		}
4496		spin_lock_irq(mlx4_tlock(dev));
4497	}
4498	spin_unlock_irq(mlx4_tlock(dev));
4499}
4500
4501static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4502{
4503	__mlx4_slave_counters_free(dev, slave);
4504}
4505
4506static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4507{
4508	struct mlx4_priv *priv = mlx4_priv(dev);
4509	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4510	struct list_head *xrcdn_list =
4511		&tracker->slave_list[slave].res_list[RES_XRCD];
4512	struct res_xrcdn *xrcd;
4513	struct res_xrcdn *tmp;
4514	int err;
4515	int xrcdn;
4516
4517	err = move_all_busy(dev, slave, RES_XRCD);
4518	if (err)
4519		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
4520			  "busy for slave %d\n", slave);
4521
4522	spin_lock_irq(mlx4_tlock(dev));
4523	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4524		if (xrcd->com.owner == slave) {
4525			xrcdn = xrcd->com.res_id;
4526			rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
4527			list_del(&xrcd->com.list);
4528			kfree(xrcd);
4529			__mlx4_xrcd_free(dev, xrcdn);
4530		}
4531	}
4532	spin_unlock_irq(mlx4_tlock(dev));
4533}
4534
4535void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4536{
4537	struct mlx4_priv *priv = mlx4_priv(dev);
4538
4539	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4540	rem_slave_macs(dev, slave);
4541	rem_slave_vlans(dev, slave);
4542	rem_slave_fs_rule(dev, slave);
4543	rem_slave_qps(dev, slave);
4544	rem_slave_srqs(dev, slave);
4545	rem_slave_cqs(dev, slave);
4546	rem_slave_mrs(dev, slave);
4547	rem_slave_eqs(dev, slave);
4548	rem_slave_mtts(dev, slave);
4549	rem_slave_counters(dev, slave);
4550	rem_slave_xrcdns(dev, slave);
4551	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4552}
4553
4554void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4555{
4556	struct mlx4_vf_immed_vlan_work *work =
4557		container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4558	struct mlx4_cmd_mailbox *mailbox;
4559	struct mlx4_update_qp_context *upd_context;
4560	struct mlx4_dev *dev = &work->priv->dev;
4561	struct mlx4_resource_tracker *tracker =
4562		&work->priv->mfunc.master.res_tracker;
4563	struct list_head *qp_list =
4564		&tracker->slave_list[work->slave].res_list[RES_QP];
4565	struct res_qp *qp;
4566	struct res_qp *tmp;
4567	u64 qp_path_mask_vlan_ctrl =
4568		       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
4569		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4570		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4571		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4572		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
4573		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4574
4575	u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4576		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4577		       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4578		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4579		       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4580		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
4581		       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4582
4583	int err;
4584	int port, errors = 0;
4585	u8 vlan_control;
4586
4587	if (mlx4_is_slave(dev)) {
4588		mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4589			  work->slave);
4590		goto out;
4591	}
4592
4593	mailbox = mlx4_alloc_cmd_mailbox(dev);
4594	if (IS_ERR(mailbox))
4595		goto out;
4596
4597	if (!work->vlan_id)
4598		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4599			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4600	else
4601		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4602			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4603			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4604
4605	upd_context = mailbox->buf;
4606	upd_context->qp_mask = cpu_to_be64(MLX4_UPD_QP_MASK_VSD);
4607
4608	spin_lock_irq(mlx4_tlock(dev));
4609	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4610		spin_unlock_irq(mlx4_tlock(dev));
4611		if (qp->com.owner == work->slave) {
4612			if (qp->com.from_state != RES_QP_HW ||
4613			    !qp->sched_queue ||  /* no INIT2RTR trans yet */
4614			    mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4615			    qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4616				spin_lock_irq(mlx4_tlock(dev));
4617				continue;
4618			}
4619			port = (qp->sched_queue >> 6 & 1) + 1;
4620			if (port != work->port) {
4621				spin_lock_irq(mlx4_tlock(dev));
4622				continue;
4623			}
4624			if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4625				upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4626			else
4627				upd_context->primary_addr_path_mask =
4628					cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4629			if (work->vlan_id == MLX4_VGT) {
4630				upd_context->qp_context.param3 = qp->param3;
4631				upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4632				upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4633				upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4634				upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4635				upd_context->qp_context.pri_path.feup = qp->feup;
4636				upd_context->qp_context.pri_path.sched_queue =
4637					qp->sched_queue;
4638			} else {
4639				upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4640				upd_context->qp_context.pri_path.vlan_control = vlan_control;
4641				upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4642				upd_context->qp_context.pri_path.fvl_rx =
4643					qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4644				upd_context->qp_context.pri_path.fl =
4645					qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4646				upd_context->qp_context.pri_path.feup =
4647					qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4648				upd_context->qp_context.pri_path.sched_queue =
4649					qp->sched_queue & 0xC7;
4650				upd_context->qp_context.pri_path.sched_queue |=
4651					((work->qos & 0x7) << 3);
4652			}
4653
4654			err = mlx4_cmd(dev, mailbox->dma,
4655				       qp->local_qpn & 0xffffff,
4656				       0, MLX4_CMD_UPDATE_QP,
4657				       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4658			if (err) {
4659				mlx4_info(dev, "UPDATE_QP failed for slave %d, "
4660					  "port %d, qpn %d (%d)\n",
4661					  work->slave, port, qp->local_qpn,
4662					  err);
4663				errors++;
4664			}
4665		}
4666		spin_lock_irq(mlx4_tlock(dev));
4667	}
4668	spin_unlock_irq(mlx4_tlock(dev));
4669	mlx4_free_cmd_mailbox(dev, mailbox);
4670
4671	if (errors)
4672		mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
4673			 errors, work->slave, work->port);
4674
4675	/* unregister previous vlan_id if needed and we had no errors
4676	 * while updating the QPs
4677	 */
4678	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
4679	    NO_INDX != work->orig_vlan_ix)
4680		__mlx4_unregister_vlan(&work->priv->dev, work->port,
4681				       work->orig_vlan_id);
4682out:
4683	kfree(work);
4684	return;
4685}
4686
4687