cmd.c revision 271127
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39
40#include <linux/mlx4/cmd.h>
41#include <linux/semaphore.h>
42#include <rdma/ib_smi.h>
43
44#include <asm/io.h>
45
46#include "mlx4.h"
47#include "fw.h"
48
49#define CMD_POLL_TOKEN 0xffff
50#define INBOX_MASK	0xffffffffffffff00ULL
51
52#define CMD_CHAN_VER 1
53#define CMD_CHAN_IF_REV 1
54
55enum {
56	/* command completed successfully: */
57	CMD_STAT_OK		= 0x00,
58	/* Internal error (such as a bus error) occurred while processing command: */
59	CMD_STAT_INTERNAL_ERR	= 0x01,
60	/* Operation/command not supported or opcode modifier not supported: */
61	CMD_STAT_BAD_OP		= 0x02,
62	/* Parameter not supported or parameter out of range: */
63	CMD_STAT_BAD_PARAM	= 0x03,
64	/* System not enabled or bad system state: */
65	CMD_STAT_BAD_SYS_STATE	= 0x04,
66	/* Attempt to access reserved or unallocaterd resource: */
67	CMD_STAT_BAD_RESOURCE	= 0x05,
68	/* Requested resource is currently executing a command, or is otherwise busy: */
69	CMD_STAT_RESOURCE_BUSY	= 0x06,
70	/* Required capability exceeds device limits: */
71	CMD_STAT_EXCEED_LIM	= 0x08,
72	/* Resource is not in the appropriate state or ownership: */
73	CMD_STAT_BAD_RES_STATE	= 0x09,
74	/* Index out of range: */
75	CMD_STAT_BAD_INDEX	= 0x0a,
76	/* FW image corrupted: */
77	CMD_STAT_BAD_NVMEM	= 0x0b,
78	/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
79	CMD_STAT_ICM_ERROR	= 0x0c,
80	/* Attempt to modify a QP/EE which is not in the presumed state: */
81	CMD_STAT_BAD_QP_STATE   = 0x10,
82	/* Bad segment parameters (Address/Size): */
83	CMD_STAT_BAD_SEG_PARAM	= 0x20,
84	/* Memory Region has Memory Windows bound to: */
85	CMD_STAT_REG_BOUND	= 0x21,
86	/* HCA local attached memory not present: */
87	CMD_STAT_LAM_NOT_PRE	= 0x22,
88	/* Bad management packet (silently discarded): */
89	CMD_STAT_BAD_PKT	= 0x30,
90	/* More outstanding CQEs in CQ than new CQ size: */
91	CMD_STAT_BAD_SIZE	= 0x40,
92	/* Multi Function device support required: */
93	CMD_STAT_MULTI_FUNC_REQ	= 0x50,
94};
95
96enum {
97	HCR_IN_PARAM_OFFSET	= 0x00,
98	HCR_IN_MODIFIER_OFFSET	= 0x08,
99	HCR_OUT_PARAM_OFFSET	= 0x0c,
100	HCR_TOKEN_OFFSET	= 0x14,
101	HCR_STATUS_OFFSET	= 0x18,
102
103	HCR_OPMOD_SHIFT		= 12,
104	HCR_T_BIT		= 21,
105	HCR_E_BIT		= 22,
106	HCR_GO_BIT		= 23
107};
108
109enum {
110	GO_BIT_TIMEOUT_MSECS	= 10000
111};
112
113struct mlx4_cmd_context {
114	struct completion	done;
115	int			result;
116	int			next;
117	u64			out_param;
118	u16			token;
119	u8			fw_status;
120};
121
122static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
123				    struct mlx4_vhcr_cmd *in_vhcr);
124
125static int mlx4_status_to_errno(u8 status)
126{
127	static const int trans_table[] = {
128		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
129		[CMD_STAT_BAD_OP]	  = -EPERM,
130		[CMD_STAT_BAD_PARAM]	  = -EINVAL,
131		[CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
132		[CMD_STAT_BAD_RESOURCE]	  = -EBADF,
133		[CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
134		[CMD_STAT_EXCEED_LIM]	  = -ENOMEM,
135		[CMD_STAT_BAD_RES_STATE]  = -EBADF,
136		[CMD_STAT_BAD_INDEX]	  = -EBADF,
137		[CMD_STAT_BAD_NVMEM]	  = -EFAULT,
138		[CMD_STAT_ICM_ERROR]	  = -ENFILE,
139		[CMD_STAT_BAD_QP_STATE]   = -EINVAL,
140		[CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
141		[CMD_STAT_REG_BOUND]	  = -EBUSY,
142		[CMD_STAT_LAM_NOT_PRE]	  = -EAGAIN,
143		[CMD_STAT_BAD_PKT]	  = -EINVAL,
144		[CMD_STAT_BAD_SIZE]	  = -ENOMEM,
145		[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
146	};
147
148	if (status >= ARRAY_SIZE(trans_table) ||
149	    (status != CMD_STAT_OK && trans_table[status] == 0))
150		return -EIO;
151
152	return trans_table[status];
153}
154
155static u8 mlx4_errno_to_status(int errno)
156{
157	switch (errno) {
158	case -EPERM:
159		return CMD_STAT_BAD_OP;
160	case -EINVAL:
161		return CMD_STAT_BAD_PARAM;
162	case -ENXIO:
163		return CMD_STAT_BAD_SYS_STATE;
164	case -EBUSY:
165		return CMD_STAT_RESOURCE_BUSY;
166	case -ENOMEM:
167		return CMD_STAT_EXCEED_LIM;
168	case -ENFILE:
169		return CMD_STAT_ICM_ERROR;
170	default:
171		return CMD_STAT_INTERNAL_ERR;
172	}
173}
174
175static int comm_pending(struct mlx4_dev *dev)
176{
177	struct mlx4_priv *priv = mlx4_priv(dev);
178	u32 status = readl(&priv->mfunc.comm->slave_read);
179
180	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
181}
182
183static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
184{
185	struct mlx4_priv *priv = mlx4_priv(dev);
186	u32 val;
187
188	priv->cmd.comm_toggle ^= 1;
189	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
190	__raw_writel((__force u32) cpu_to_be32(val),
191		     &priv->mfunc.comm->slave_write);
192	mmiowb();
193}
194
195static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
196		       unsigned long timeout)
197{
198	struct mlx4_priv *priv = mlx4_priv(dev);
199	unsigned long end;
200	int err = 0;
201	int ret_from_pending = 0;
202
203	/* First, verify that the master reports correct status */
204	if (comm_pending(dev)) {
205		mlx4_warn(dev, "Communication channel is not idle."
206			  "my toggle is %d (cmd:0x%x)\n",
207			  priv->cmd.comm_toggle, cmd);
208		return -EAGAIN;
209	}
210
211	/* Write command */
212	down(&priv->cmd.poll_sem);
213	mlx4_comm_cmd_post(dev, cmd, param);
214
215	end = msecs_to_jiffies(timeout) + jiffies;
216	while (comm_pending(dev) && time_before(jiffies, end))
217		cond_resched();
218	ret_from_pending = comm_pending(dev);
219	if (ret_from_pending) {
220		/* check if the slave is trying to boot in the middle of
221		 * FLR process. The only non-zero result in the RESET command
222		 * is MLX4_DELAY_RESET_SLAVE*/
223		if ((MLX4_COMM_CMD_RESET == cmd)) {
224			mlx4_warn(dev, "Got slave FLRed from Communication"
225				  " channel (ret:0x%x)\n", ret_from_pending);
226			err = MLX4_DELAY_RESET_SLAVE;
227		} else {
228			mlx4_warn(dev, "Communication channel timed out\n");
229			err = -ETIMEDOUT;
230		}
231	}
232
233	up(&priv->cmd.poll_sem);
234	return err;
235}
236
237static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
238			      u16 param, unsigned long timeout)
239{
240	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
241	struct mlx4_cmd_context *context;
242	unsigned long end;
243	int err = 0;
244
245	down(&cmd->event_sem);
246
247	spin_lock(&cmd->context_lock);
248	BUG_ON(cmd->free_head < 0);
249	context = &cmd->context[cmd->free_head];
250	context->token += cmd->token_mask + 1;
251	cmd->free_head = context->next;
252	spin_unlock(&cmd->context_lock);
253
254	init_completion(&context->done);
255
256	mlx4_comm_cmd_post(dev, op, param);
257
258	if (!wait_for_completion_timeout(&context->done,
259					 msecs_to_jiffies(timeout))) {
260		mlx4_warn(dev, "communication channel command 0x%x timed out\n", op);
261		err = -EBUSY;
262		goto out;
263	}
264
265	err = context->result;
266	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
267		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
268			 op, context->fw_status);
269		goto out;
270	}
271
272out:
273	/* wait for comm channel ready
274	 * this is necessary for prevention the race
275	 * when switching between event to polling mode
276	 */
277	end = msecs_to_jiffies(timeout) + jiffies;
278	while (comm_pending(dev) && time_before(jiffies, end))
279		cond_resched();
280
281	spin_lock(&cmd->context_lock);
282	context->next = cmd->free_head;
283	cmd->free_head = context - cmd->context;
284	spin_unlock(&cmd->context_lock);
285
286	up(&cmd->event_sem);
287	return err;
288}
289
290int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
291		  unsigned long timeout)
292{
293	if (mlx4_priv(dev)->cmd.use_events)
294		return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
295	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
296}
297
298static int cmd_pending(struct mlx4_dev *dev)
299{
300	u32 status;
301
302	if (pci_channel_offline(dev->pdev))
303		return -EIO;
304
305	status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
306
307	return (status & swab32(1 << HCR_GO_BIT)) ||
308		(mlx4_priv(dev)->cmd.toggle ==
309		 !!(status & swab32(1 << HCR_T_BIT)));
310}
311
312static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
313			 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
314			 int event)
315{
316	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
317	u32 __iomem *hcr = cmd->hcr;
318	int ret = -EAGAIN;
319	unsigned long end;
320
321	mutex_lock(&cmd->hcr_mutex);
322
323	if (pci_channel_offline(dev->pdev)) {
324		/*
325		 * Device is going through error recovery
326		 * and cannot accept commands.
327		 */
328		ret = -EIO;
329		goto out;
330	}
331
332	end = jiffies;
333	if (event)
334		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
335
336	while (cmd_pending(dev)) {
337		if (pci_channel_offline(dev->pdev)) {
338			/*
339			 * Device is going through error recovery
340			 * and cannot accept commands.
341			 */
342			ret = -EIO;
343			goto out;
344		}
345
346		if (time_after_eq(jiffies, end)) {
347			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
348			goto out;
349		}
350		cond_resched();
351	}
352
353	/*
354	 * We use writel (instead of something like memcpy_toio)
355	 * because writes of less than 32 bits to the HCR don't work
356	 * (and some architectures such as ia64 implement memcpy_toio
357	 * in terms of writeb).
358	 */
359	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),		  hcr + 0);
360	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
361	__raw_writel((__force u32) cpu_to_be32(in_modifier),		  hcr + 2);
362	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),	  hcr + 3);
363	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
364	__raw_writel((__force u32) cpu_to_be32(token << 16),		  hcr + 5);
365
366	/* __raw_writel may not order writes. */
367	wmb();
368
369	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)		|
370					       (cmd->toggle << HCR_T_BIT)	|
371					       (event ? (1 << HCR_E_BIT) : 0)	|
372					       (op_modifier << HCR_OPMOD_SHIFT) |
373					       op), hcr + 6);
374
375	/*
376	 * Make sure that our HCR writes don't get mixed in with
377	 * writes from another CPU starting a FW command.
378	 */
379	mmiowb();
380
381	cmd->toggle = cmd->toggle ^ 1;
382
383	ret = 0;
384
385out:
386	mutex_unlock(&cmd->hcr_mutex);
387	return ret;
388}
389
390static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
391			  int out_is_imm, u32 in_modifier, u8 op_modifier,
392			  u16 op, unsigned long timeout)
393{
394	struct mlx4_priv *priv = mlx4_priv(dev);
395	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
396	int ret;
397
398	mutex_lock(&priv->cmd.slave_cmd_mutex);
399
400	vhcr->in_param = cpu_to_be64(in_param);
401	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
402	vhcr->in_modifier = cpu_to_be32(in_modifier);
403	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
404	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
405	vhcr->status = 0;
406	vhcr->flags = !!(priv->cmd.use_events) << 6;
407
408	if (mlx4_is_master(dev)) {
409		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
410		if (!ret) {
411			if (out_is_imm) {
412				if (out_param)
413					*out_param =
414						be64_to_cpu(vhcr->out_param);
415				else {
416					mlx4_err(dev, "response expected while"
417						 "output mailbox is NULL for "
418						 "command 0x%x\n", op);
419					vhcr->status = CMD_STAT_BAD_PARAM;
420				}
421			}
422			ret = mlx4_status_to_errno(vhcr->status);
423		}
424	} else {
425		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
426				    MLX4_COMM_TIME + timeout);
427		if (!ret) {
428			if (out_is_imm) {
429				if (out_param)
430					*out_param =
431						be64_to_cpu(vhcr->out_param);
432				else {
433					mlx4_err(dev, "response expected while"
434						 "output mailbox is NULL for "
435						 "command 0x%x\n", op);
436					vhcr->status = CMD_STAT_BAD_PARAM;
437				}
438			}
439			ret = mlx4_status_to_errno(vhcr->status);
440		} else
441			mlx4_err(dev, "failed execution of VHCR_POST command"
442				 "opcode 0x%x\n", op);
443	}
444
445	mutex_unlock(&priv->cmd.slave_cmd_mutex);
446	return ret;
447}
448
449static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
450			 int out_is_imm, u32 in_modifier, u8 op_modifier,
451			 u16 op, unsigned long timeout)
452{
453	struct mlx4_priv *priv = mlx4_priv(dev);
454	void __iomem *hcr = priv->cmd.hcr;
455	int err = 0;
456	unsigned long end;
457	u32 stat;
458
459	down(&priv->cmd.poll_sem);
460
461	if (pci_channel_offline(dev->pdev)) {
462		/*
463		 * Device is going through error recovery
464		 * and cannot accept commands.
465		 */
466		err = -EIO;
467		goto out;
468	}
469
470	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
471			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
472	if (err)
473		goto out;
474
475	end = msecs_to_jiffies(timeout) + jiffies;
476	while (cmd_pending(dev) && time_before(jiffies, end)) {
477		if (pci_channel_offline(dev->pdev)) {
478			/*
479			 * Device is going through error recovery
480			 * and cannot accept commands.
481			 */
482			err = -EIO;
483			goto out;
484		}
485
486		cond_resched();
487	}
488
489	if (cmd_pending(dev)) {
490		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", op);
491		err = -ETIMEDOUT;
492		goto out;
493	}
494
495	if (out_is_imm)
496		*out_param =
497			(u64) be32_to_cpu((__force __be32)
498					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
499			(u64) be32_to_cpu((__force __be32)
500					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
501	stat = be32_to_cpu((__force __be32)
502			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
503	err = mlx4_status_to_errno(stat);
504	if (err)
505		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
506			 op, stat);
507
508out:
509	up(&priv->cmd.poll_sem);
510	return err;
511}
512
513void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
514{
515	struct mlx4_priv *priv = mlx4_priv(dev);
516	struct mlx4_cmd_context *context =
517		&priv->cmd.context[token & priv->cmd.token_mask];
518
519	/* previously timed out command completing at long last */
520	if (token != context->token)
521		return;
522
523	context->fw_status = status;
524	context->result    = mlx4_status_to_errno(status);
525	context->out_param = out_param;
526
527	complete(&context->done);
528}
529
530static int get_status(struct mlx4_dev *dev, u32 *status, int *go_bit,
531		      int *t_bit)
532{
533	if (pci_channel_offline(dev->pdev))
534		return -EIO;
535
536	*status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
537	*t_bit = !!(*status & swab32(1 << HCR_T_BIT));
538	*go_bit = !!(*status & swab32(1 << HCR_GO_BIT));
539
540	return 0;
541}
542
543static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
544			 int out_is_imm, u32 in_modifier, u8 op_modifier,
545			 u16 op, unsigned long timeout)
546{
547	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
548	struct mlx4_cmd_context *context;
549	int err = 0;
550	int go_bit = 0, t_bit = 0, stat_err;
551	u32 status = 0;
552
553	down(&cmd->event_sem);
554
555	spin_lock(&cmd->context_lock);
556	BUG_ON(cmd->free_head < 0);
557	context = &cmd->context[cmd->free_head];
558	context->token += cmd->token_mask + 1;
559	cmd->free_head = context->next;
560	spin_unlock(&cmd->context_lock);
561
562	init_completion(&context->done);
563
564	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
565			    in_modifier, op_modifier, op, context->token, 1);
566	if (err) {
567		mlx4_warn(dev, "command 0x%x could not be posted (%d)\n",
568			  op, err);
569		goto out;
570	}
571
572	if (!wait_for_completion_timeout(&context->done,
573					 msecs_to_jiffies(timeout))) {
574		stat_err = get_status(dev, &status, &go_bit, &t_bit);
575		mlx4_warn(dev, "command 0x%x timed out: "
576			  "get_status err=%d, status=0x%x, go_bit=%d, "
577			  "t_bit=%d, toggle=0x%x\n", op, stat_err, status,
578			  go_bit, t_bit, mlx4_priv(dev)->cmd.toggle);
579		err = -EBUSY;
580		goto out;
581	}
582
583	err = context->result;
584	if (err) {
585		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
586			 op, context->fw_status);
587		goto out;
588	}
589
590	if (out_is_imm)
591		*out_param = context->out_param;
592
593out:
594	spin_lock(&cmd->context_lock);
595	context->next = cmd->free_head;
596	cmd->free_head = context - cmd->context;
597	spin_unlock(&cmd->context_lock);
598
599	up(&cmd->event_sem);
600	return err;
601}
602
603int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
604	       int out_is_imm, u32 in_modifier, u8 op_modifier,
605	       u16 op, unsigned long timeout, int native)
606{
607	if (pci_channel_offline(dev->pdev))
608		return -EIO;
609
610	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
611		if (mlx4_priv(dev)->cmd.use_events)
612			return mlx4_cmd_wait(dev, in_param, out_param,
613					     out_is_imm, in_modifier,
614					     op_modifier, op, timeout);
615		else
616			return mlx4_cmd_poll(dev, in_param, out_param,
617					     out_is_imm, in_modifier,
618					     op_modifier, op, timeout);
619	}
620	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
621			      in_modifier, op_modifier, op, timeout);
622}
623EXPORT_SYMBOL_GPL(__mlx4_cmd);
624
625
626static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
627{
628	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
629			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
630}
631
632static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
633			   int slave, u64 slave_addr,
634			   int size, int is_read)
635{
636	u64 in_param;
637	u64 out_param;
638
639	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
640	    (slave & ~0x7f) | (size & 0xff)) {
641		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
642			      "master_addr:0x%llx slave_id:%d size:%d\n",
643			      (long long)slave_addr, (long long)master_addr, slave, size);
644		return -EINVAL;
645	}
646
647	if (is_read) {
648		in_param = (u64) slave | slave_addr;
649		out_param = (u64) dev->caps.function | master_addr;
650	} else {
651		in_param = (u64) dev->caps.function | master_addr;
652		out_param = (u64) slave | slave_addr;
653	}
654
655	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
656			    MLX4_CMD_ACCESS_MEM,
657			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
658}
659
660static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
661			       struct mlx4_cmd_mailbox *inbox,
662			       struct mlx4_cmd_mailbox *outbox)
663{
664	struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
665	struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
666	int err;
667	int i;
668
669	if (index & 0x1f)
670		return -EINVAL;
671
672	in_mad->attr_mod = cpu_to_be32(index / 32);
673
674	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
675			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
676			   MLX4_CMD_NATIVE);
677	if (err)
678		return err;
679
680	for (i = 0; i < 32; ++i)
681		pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
682
683	return err;
684}
685
686static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
687			       struct mlx4_cmd_mailbox *inbox,
688			       struct mlx4_cmd_mailbox *outbox)
689{
690	int i;
691	int err;
692
693	for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
694		err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
695		if (err)
696			return err;
697	}
698
699	return 0;
700}
701#define PORT_CAPABILITY_LOCATION_IN_SMP 20
702#define PORT_STATE_OFFSET 32
703
704static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
705{
706	if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
707		return IB_PORT_ACTIVE;
708	else
709		return IB_PORT_DOWN;
710}
711
712static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
713				struct mlx4_vhcr *vhcr,
714				struct mlx4_cmd_mailbox *inbox,
715				struct mlx4_cmd_mailbox *outbox,
716				struct mlx4_cmd_info *cmd)
717{
718	struct ib_smp *smp = inbox->buf;
719	u32 index;
720	u8 port;
721	u16 *table;
722	int err;
723	int vidx, pidx;
724	struct mlx4_priv *priv = mlx4_priv(dev);
725	struct ib_smp *outsmp = outbox->buf;
726	__be16 *outtab = (__be16 *)(outsmp->data);
727	__be32 slave_cap_mask;
728	__be64 slave_node_guid;
729	port = vhcr->in_modifier;
730
731	if (smp->base_version == 1 &&
732	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
733	    smp->class_version == 1) {
734		if (smp->method	== IB_MGMT_METHOD_GET) {
735			if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
736				index = be32_to_cpu(smp->attr_mod);
737				if (port < 1 || port > dev->caps.num_ports)
738					return -EINVAL;
739				table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL);
740				if (!table)
741					return -ENOMEM;
742				/* need to get the full pkey table because the paravirtualized
743				 * pkeys may be scattered among several pkey blocks.
744				 */
745				err = get_full_pkey_table(dev, port, table, inbox, outbox);
746				if (!err) {
747					for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
748						pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
749						outtab[vidx % 32] = cpu_to_be16(table[pidx]);
750					}
751				}
752				kfree(table);
753				return err;
754			}
755			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
756				/*get the slave specific caps:*/
757				/*do the command */
758				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
759					    vhcr->in_modifier, vhcr->op_modifier,
760					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
761				/* modify the response for slaves */
762				if (!err && slave != mlx4_master_func_num(dev)) {
763					u8 *state = outsmp->data + PORT_STATE_OFFSET;
764
765					*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
766					slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
767					memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
768				}
769				return err;
770			}
771			if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
772				/* compute slave's gid block */
773				smp->attr_mod = cpu_to_be32(slave / 8);
774				/* execute cmd */
775				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
776					     vhcr->in_modifier, vhcr->op_modifier,
777					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
778				if (!err) {
779					/* if needed, move slave gid to index 0 */
780					if (slave % 8)
781						memcpy(outsmp->data,
782						       outsmp->data + (slave % 8) * 8, 8);
783					/* delete all other gids */
784					memset(outsmp->data + 8, 0, 56);
785				}
786				return err;
787			}
788			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
789				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
790					     vhcr->in_modifier, vhcr->op_modifier,
791					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
792				if (!err) {
793					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
794					memcpy(outsmp->data + 12, &slave_node_guid, 8);
795				}
796				return err;
797			}
798		}
799	}
800	if (slave != mlx4_master_func_num(dev) &&
801	    ((smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) ||
802	     (smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
803	      smp->method == IB_MGMT_METHOD_SET))) {
804		mlx4_err(dev, "slave %d is trying to execute a Subnet MGMT MAD, "
805			 "class 0x%x, method 0x%x for attr 0x%x. Rejecting\n",
806			 slave, smp->method, smp->mgmt_class,
807			 be16_to_cpu(smp->attr_id));
808		return -EPERM;
809	}
810	/*default:*/
811	return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
812				    vhcr->in_modifier, vhcr->op_modifier,
813				    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
814}
815
816int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
817		     struct mlx4_vhcr *vhcr,
818		     struct mlx4_cmd_mailbox *inbox,
819		     struct mlx4_cmd_mailbox *outbox,
820		     struct mlx4_cmd_info *cmd)
821{
822	u64 in_param;
823	u64 out_param;
824	int err;
825
826	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
827	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
828	if (cmd->encode_slave_id) {
829		in_param &= 0xffffffffffffff00ll;
830		in_param |= slave;
831	}
832
833	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
834			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
835			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
836
837	if (cmd->out_is_imm)
838		vhcr->out_param = out_param;
839
840	return err;
841}
842
843static struct mlx4_cmd_info cmd_info[] = {
844	{
845		.opcode = MLX4_CMD_QUERY_FW,
846		.has_inbox = false,
847		.has_outbox = true,
848		.out_is_imm = false,
849		.encode_slave_id = false,
850		.verify = NULL,
851		.wrapper = mlx4_QUERY_FW_wrapper
852	},
853	{
854		.opcode = MLX4_CMD_QUERY_HCA,
855		.has_inbox = false,
856		.has_outbox = true,
857		.out_is_imm = false,
858		.encode_slave_id = false,
859		.verify = NULL,
860		.wrapper = NULL
861	},
862	{
863		.opcode = MLX4_CMD_QUERY_DEV_CAP,
864		.has_inbox = false,
865		.has_outbox = true,
866		.out_is_imm = false,
867		.encode_slave_id = false,
868		.verify = NULL,
869		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
870	},
871	{
872		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
873		.has_inbox = false,
874		.has_outbox = true,
875		.out_is_imm = false,
876		.encode_slave_id = false,
877		.verify = NULL,
878		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
879	},
880	{
881		.opcode = MLX4_CMD_QUERY_ADAPTER,
882		.has_inbox = false,
883		.has_outbox = true,
884		.out_is_imm = false,
885		.encode_slave_id = false,
886		.verify = NULL,
887		.wrapper = NULL
888	},
889	{
890		.opcode = MLX4_CMD_INIT_PORT,
891		.has_inbox = false,
892		.has_outbox = false,
893		.out_is_imm = false,
894		.encode_slave_id = false,
895		.verify = NULL,
896		.wrapper = mlx4_INIT_PORT_wrapper
897	},
898	{
899		.opcode = MLX4_CMD_CLOSE_PORT,
900		.has_inbox = false,
901		.has_outbox = false,
902		.out_is_imm  = false,
903		.encode_slave_id = false,
904		.verify = NULL,
905		.wrapper = mlx4_CLOSE_PORT_wrapper
906	},
907	{
908		.opcode = MLX4_CMD_QUERY_PORT,
909		.has_inbox = false,
910		.has_outbox = true,
911		.out_is_imm = false,
912		.encode_slave_id = false,
913		.verify = NULL,
914		.wrapper = mlx4_QUERY_PORT_wrapper
915	},
916	{
917		.opcode = MLX4_CMD_SET_PORT,
918		.has_inbox = true,
919		.has_outbox = false,
920		.out_is_imm = false,
921		.encode_slave_id = false,
922		.verify = NULL,
923		.wrapper = mlx4_SET_PORT_wrapper
924	},
925	{
926		.opcode = MLX4_CMD_MAP_EQ,
927		.has_inbox = false,
928		.has_outbox = false,
929		.out_is_imm = false,
930		.encode_slave_id = false,
931		.verify = NULL,
932		.wrapper = mlx4_MAP_EQ_wrapper
933	},
934	{
935		.opcode = MLX4_CMD_SW2HW_EQ,
936		.has_inbox = true,
937		.has_outbox = false,
938		.out_is_imm = false,
939		.encode_slave_id = true,
940		.verify = NULL,
941		.wrapper = mlx4_SW2HW_EQ_wrapper
942	},
943	{
944		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
945		.has_inbox = false,
946		.has_outbox = false,
947		.out_is_imm = false,
948		.encode_slave_id = false,
949		.verify = NULL,
950		.wrapper = NULL
951	},
952	{
953		.opcode = MLX4_CMD_NOP,
954		.has_inbox = false,
955		.has_outbox = false,
956		.out_is_imm = false,
957		.encode_slave_id = false,
958		.verify = NULL,
959		.wrapper = NULL
960	},
961	{
962		.opcode = MLX4_CMD_ALLOC_RES,
963		.has_inbox = false,
964		.has_outbox = false,
965		.out_is_imm = true,
966		.encode_slave_id = false,
967		.verify = NULL,
968		.wrapper = mlx4_ALLOC_RES_wrapper
969	},
970	{
971		.opcode = MLX4_CMD_FREE_RES,
972		.has_inbox = false,
973		.has_outbox = false,
974		.out_is_imm = false,
975		.encode_slave_id = false,
976		.verify = NULL,
977		.wrapper = mlx4_FREE_RES_wrapper
978	},
979	{
980		.opcode = MLX4_CMD_SW2HW_MPT,
981		.has_inbox = true,
982		.has_outbox = false,
983		.out_is_imm = false,
984		.encode_slave_id = true,
985		.verify = NULL,
986		.wrapper = mlx4_SW2HW_MPT_wrapper
987	},
988	{
989		.opcode = MLX4_CMD_QUERY_MPT,
990		.has_inbox = false,
991		.has_outbox = true,
992		.out_is_imm = false,
993		.encode_slave_id = false,
994		.verify = NULL,
995		.wrapper = mlx4_QUERY_MPT_wrapper
996	},
997	{
998		.opcode = MLX4_CMD_HW2SW_MPT,
999		.has_inbox = false,
1000		.has_outbox = false,
1001		.out_is_imm = false,
1002		.encode_slave_id = false,
1003		.verify = NULL,
1004		.wrapper = mlx4_HW2SW_MPT_wrapper
1005	},
1006	{
1007		.opcode = MLX4_CMD_READ_MTT,
1008		.has_inbox = false,
1009		.has_outbox = true,
1010		.out_is_imm = false,
1011		.encode_slave_id = false,
1012		.verify = NULL,
1013		.wrapper = NULL
1014	},
1015	{
1016		.opcode = MLX4_CMD_WRITE_MTT,
1017		.has_inbox = true,
1018		.has_outbox = false,
1019		.out_is_imm = false,
1020		.encode_slave_id = false,
1021		.verify = NULL,
1022		.wrapper = mlx4_WRITE_MTT_wrapper
1023	},
1024	{
1025		.opcode = MLX4_CMD_SYNC_TPT,
1026		.has_inbox = true,
1027		.has_outbox = false,
1028		.out_is_imm = false,
1029		.encode_slave_id = false,
1030		.verify = NULL,
1031		.wrapper = NULL
1032	},
1033	{
1034		.opcode = MLX4_CMD_HW2SW_EQ,
1035		.has_inbox = false,
1036		.has_outbox = true,
1037		.out_is_imm = false,
1038		.encode_slave_id = true,
1039		.verify = NULL,
1040		.wrapper = mlx4_HW2SW_EQ_wrapper
1041	},
1042	{
1043		.opcode = MLX4_CMD_QUERY_EQ,
1044		.has_inbox = false,
1045		.has_outbox = true,
1046		.out_is_imm = false,
1047		.encode_slave_id = true,
1048		.verify = NULL,
1049		.wrapper = mlx4_QUERY_EQ_wrapper
1050	},
1051	{
1052		.opcode = MLX4_CMD_SW2HW_CQ,
1053		.has_inbox = true,
1054		.has_outbox = false,
1055		.out_is_imm = false,
1056		.encode_slave_id = true,
1057		.verify = NULL,
1058		.wrapper = mlx4_SW2HW_CQ_wrapper
1059	},
1060	{
1061		.opcode = MLX4_CMD_HW2SW_CQ,
1062		.has_inbox = false,
1063		.has_outbox = false,
1064		.out_is_imm = false,
1065		.encode_slave_id = false,
1066		.verify = NULL,
1067		.wrapper = mlx4_HW2SW_CQ_wrapper
1068	},
1069	{
1070		.opcode = MLX4_CMD_QUERY_CQ,
1071		.has_inbox = false,
1072		.has_outbox = true,
1073		.out_is_imm = false,
1074		.encode_slave_id = false,
1075		.verify = NULL,
1076		.wrapper = mlx4_QUERY_CQ_wrapper
1077	},
1078	{
1079		.opcode = MLX4_CMD_MODIFY_CQ,
1080		.has_inbox = true,
1081		.has_outbox = false,
1082		.out_is_imm = true,
1083		.encode_slave_id = false,
1084		.verify = NULL,
1085		.wrapper = mlx4_MODIFY_CQ_wrapper
1086	},
1087	{
1088		.opcode = MLX4_CMD_SW2HW_SRQ,
1089		.has_inbox = true,
1090		.has_outbox = false,
1091		.out_is_imm = false,
1092		.encode_slave_id = true,
1093		.verify = NULL,
1094		.wrapper = mlx4_SW2HW_SRQ_wrapper
1095	},
1096	{
1097		.opcode = MLX4_CMD_HW2SW_SRQ,
1098		.has_inbox = false,
1099		.has_outbox = false,
1100		.out_is_imm = false,
1101		.encode_slave_id = false,
1102		.verify = NULL,
1103		.wrapper = mlx4_HW2SW_SRQ_wrapper
1104	},
1105	{
1106		.opcode = MLX4_CMD_QUERY_SRQ,
1107		.has_inbox = false,
1108		.has_outbox = true,
1109		.out_is_imm = false,
1110		.encode_slave_id = false,
1111		.verify = NULL,
1112		.wrapper = mlx4_QUERY_SRQ_wrapper
1113	},
1114	{
1115		.opcode = MLX4_CMD_ARM_SRQ,
1116		.has_inbox = false,
1117		.has_outbox = false,
1118		.out_is_imm = false,
1119		.encode_slave_id = false,
1120		.verify = NULL,
1121		.wrapper = mlx4_ARM_SRQ_wrapper
1122	},
1123	{
1124		.opcode = MLX4_CMD_RST2INIT_QP,
1125		.has_inbox = true,
1126		.has_outbox = false,
1127		.out_is_imm = false,
1128		.encode_slave_id = true,
1129		.verify = NULL,
1130		.wrapper = mlx4_RST2INIT_QP_wrapper
1131	},
1132	{
1133		.opcode = MLX4_CMD_INIT2INIT_QP,
1134		.has_inbox = true,
1135		.has_outbox = false,
1136		.out_is_imm = false,
1137		.encode_slave_id = false,
1138		.verify = NULL,
1139		.wrapper = mlx4_INIT2INIT_QP_wrapper
1140	},
1141	{
1142		.opcode = MLX4_CMD_INIT2RTR_QP,
1143		.has_inbox = true,
1144		.has_outbox = false,
1145		.out_is_imm = false,
1146		.encode_slave_id = false,
1147		.verify = NULL,
1148		.wrapper = mlx4_INIT2RTR_QP_wrapper
1149	},
1150	{
1151		.opcode = MLX4_CMD_RTR2RTS_QP,
1152		.has_inbox = true,
1153		.has_outbox = false,
1154		.out_is_imm = false,
1155		.encode_slave_id = false,
1156		.verify = NULL,
1157		.wrapper = mlx4_RTR2RTS_QP_wrapper
1158	},
1159	{
1160		.opcode = MLX4_CMD_RTS2RTS_QP,
1161		.has_inbox = true,
1162		.has_outbox = false,
1163		.out_is_imm = false,
1164		.encode_slave_id = false,
1165		.verify = NULL,
1166		.wrapper = mlx4_RTS2RTS_QP_wrapper
1167	},
1168	{
1169		.opcode = MLX4_CMD_SQERR2RTS_QP,
1170		.has_inbox = true,
1171		.has_outbox = false,
1172		.out_is_imm = false,
1173		.encode_slave_id = false,
1174		.verify = NULL,
1175		.wrapper = mlx4_SQERR2RTS_QP_wrapper
1176	},
1177	{
1178		.opcode = MLX4_CMD_2ERR_QP,
1179		.has_inbox = false,
1180		.has_outbox = false,
1181		.out_is_imm = false,
1182		.encode_slave_id = false,
1183		.verify = NULL,
1184		.wrapper = mlx4_GEN_QP_wrapper
1185	},
1186	{
1187		.opcode = MLX4_CMD_RTS2SQD_QP,
1188		.has_inbox = false,
1189		.has_outbox = false,
1190		.out_is_imm = false,
1191		.encode_slave_id = false,
1192		.verify = NULL,
1193		.wrapper = mlx4_GEN_QP_wrapper
1194	},
1195	{
1196		.opcode = MLX4_CMD_SQD2SQD_QP,
1197		.has_inbox = true,
1198		.has_outbox = false,
1199		.out_is_imm = false,
1200		.encode_slave_id = false,
1201		.verify = NULL,
1202		.wrapper = mlx4_SQD2SQD_QP_wrapper
1203	},
1204	{
1205		.opcode = MLX4_CMD_SQD2RTS_QP,
1206		.has_inbox = true,
1207		.has_outbox = false,
1208		.out_is_imm = false,
1209		.encode_slave_id = false,
1210		.verify = NULL,
1211		.wrapper = mlx4_SQD2RTS_QP_wrapper
1212	},
1213	{
1214		.opcode = MLX4_CMD_2RST_QP,
1215		.has_inbox = false,
1216		.has_outbox = false,
1217		.out_is_imm = false,
1218		.encode_slave_id = false,
1219		.verify = NULL,
1220		.wrapper = mlx4_2RST_QP_wrapper
1221	},
1222	{
1223		.opcode = MLX4_CMD_QUERY_QP,
1224		.has_inbox = false,
1225		.has_outbox = true,
1226		.out_is_imm = false,
1227		.encode_slave_id = false,
1228		.verify = NULL,
1229		.wrapper = mlx4_GEN_QP_wrapper
1230	},
1231	{
1232		.opcode = MLX4_CMD_SUSPEND_QP,
1233		.has_inbox = false,
1234		.has_outbox = false,
1235		.out_is_imm = false,
1236		.encode_slave_id = false,
1237		.verify = NULL,
1238		.wrapper = mlx4_GEN_QP_wrapper
1239	},
1240	{
1241		.opcode = MLX4_CMD_UNSUSPEND_QP,
1242		.has_inbox = false,
1243		.has_outbox = false,
1244		.out_is_imm = false,
1245		.encode_slave_id = false,
1246		.verify = NULL,
1247		.wrapper = mlx4_GEN_QP_wrapper
1248	},
1249	{
1250		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
1251		.has_inbox = false,
1252		.has_outbox = false,
1253		.out_is_imm = false,
1254		.encode_slave_id = false,
1255		.verify = NULL, /* XXX verify: only demux can do this */
1256		.wrapper = NULL
1257	},
1258	{
1259		.opcode = MLX4_CMD_MAD_IFC,
1260		.has_inbox = true,
1261		.has_outbox = true,
1262		.out_is_imm = false,
1263		.encode_slave_id = false,
1264		.verify = NULL,
1265		.wrapper = mlx4_MAD_IFC_wrapper
1266	},
1267	{
1268		.opcode = MLX4_CMD_QUERY_IF_STAT,
1269		.has_inbox = false,
1270		.has_outbox = true,
1271		.out_is_imm = false,
1272		.encode_slave_id = false,
1273		.verify = NULL,
1274		.wrapper = mlx4_QUERY_IF_STAT_wrapper
1275	},
1276	/* Native multicast commands are not available for guests */
1277	{
1278		.opcode = MLX4_CMD_QP_ATTACH,
1279		.has_inbox = true,
1280		.has_outbox = false,
1281		.out_is_imm = false,
1282		.encode_slave_id = false,
1283		.verify = NULL,
1284		.wrapper = mlx4_QP_ATTACH_wrapper
1285	},
1286	{
1287		.opcode = MLX4_CMD_PROMISC,
1288		.has_inbox = false,
1289		.has_outbox = false,
1290		.out_is_imm = false,
1291		.encode_slave_id = false,
1292		.verify = NULL,
1293		.wrapper = mlx4_PROMISC_wrapper
1294	},
1295	/* Ethernet specific commands */
1296	{
1297		.opcode = MLX4_CMD_SET_VLAN_FLTR,
1298		.has_inbox = true,
1299		.has_outbox = false,
1300		.out_is_imm = false,
1301		.encode_slave_id = false,
1302		.verify = NULL,
1303		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
1304	},
1305	{
1306		.opcode = MLX4_CMD_SET_MCAST_FLTR,
1307		.has_inbox = false,
1308		.has_outbox = false,
1309		.out_is_imm = false,
1310		.encode_slave_id = false,
1311		.verify = NULL,
1312		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
1313	},
1314	{
1315		.opcode = MLX4_CMD_DUMP_ETH_STATS,
1316		.has_inbox = false,
1317		.has_outbox = true,
1318		.out_is_imm = false,
1319		.encode_slave_id = false,
1320		.verify = NULL,
1321		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
1322	},
1323	{
1324		.opcode = MLX4_CMD_INFORM_FLR_DONE,
1325		.has_inbox = false,
1326		.has_outbox = false,
1327		.out_is_imm = false,
1328		.encode_slave_id = false,
1329		.verify = NULL,
1330		.wrapper = NULL
1331	},
1332	/* flow steering commands */
1333	{
1334		.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1335		.has_inbox = true,
1336		.has_outbox = false,
1337		.out_is_imm = true,
1338		.encode_slave_id = false,
1339		.verify = NULL,
1340		.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1341	},
1342	{
1343		.opcode = MLX4_QP_FLOW_STEERING_DETACH,
1344		.has_inbox = false,
1345		.has_outbox = false,
1346		.out_is_imm = false,
1347		.encode_slave_id = false,
1348		.verify = NULL,
1349		.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1350	},
1351};
1352
1353static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1354				    struct mlx4_vhcr_cmd *in_vhcr)
1355{
1356	struct mlx4_priv *priv = mlx4_priv(dev);
1357	struct mlx4_cmd_info *cmd = NULL;
1358	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1359	struct mlx4_vhcr *vhcr;
1360	struct mlx4_cmd_mailbox *inbox = NULL;
1361	struct mlx4_cmd_mailbox *outbox = NULL;
1362	u64 in_param;
1363	u64 out_param;
1364	int ret = 0;
1365	int i;
1366	int err = 0;
1367
1368	/* Create sw representation of Virtual HCR */
1369	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1370	if (!vhcr)
1371		return -ENOMEM;
1372
1373	/* DMA in the vHCR */
1374	if (!in_vhcr) {
1375		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1376				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1377				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1378					    MLX4_ACCESS_MEM_ALIGN), 1);
1379		if (ret) {
1380			mlx4_err(dev, "%s:Failed reading vhcr"
1381				 "ret: 0x%x\n", __func__, ret);
1382			kfree(vhcr);
1383			return ret;
1384		}
1385	}
1386
1387	/* Fill SW VHCR fields */
1388	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1389	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1390	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1391	vhcr->token = be16_to_cpu(vhcr_cmd->token);
1392	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1393	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1394	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1395
1396	/* Lookup command */
1397	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1398		if (vhcr->op == cmd_info[i].opcode) {
1399			cmd = &cmd_info[i];
1400			break;
1401		}
1402	}
1403	if (!cmd) {
1404		mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1405			 vhcr->op, slave);
1406		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1407		goto out_status;
1408	}
1409
1410	/* Read inbox */
1411	if (cmd->has_inbox) {
1412		vhcr->in_param &= INBOX_MASK;
1413		inbox = mlx4_alloc_cmd_mailbox(dev);
1414		if (IS_ERR(inbox)) {
1415			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1416			inbox = NULL;
1417			goto out_status;
1418		}
1419
1420		if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1421				    vhcr->in_param,
1422				    MLX4_MAILBOX_SIZE, 1)) {
1423			mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1424				 __func__, cmd->opcode);
1425			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1426			goto out_status;
1427		}
1428	}
1429
1430	/* Apply permission and bound checks if applicable */
1431	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1432		mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
1433			  "checks for resource_id:%d\n", vhcr->op, slave,
1434			  vhcr->in_modifier);
1435		vhcr_cmd->status = CMD_STAT_BAD_OP;
1436		goto out_status;
1437	}
1438
1439	/* Allocate outbox */
1440	if (cmd->has_outbox) {
1441		outbox = mlx4_alloc_cmd_mailbox(dev);
1442		if (IS_ERR(outbox)) {
1443			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1444			outbox = NULL;
1445			goto out_status;
1446		}
1447	}
1448
1449	/* Execute the command! */
1450	if (cmd->wrapper) {
1451		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1452				   cmd);
1453		if (cmd->out_is_imm)
1454			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1455	} else {
1456		in_param = cmd->has_inbox ? (u64) inbox->dma :
1457			vhcr->in_param;
1458		out_param = cmd->has_outbox ? (u64) outbox->dma :
1459			vhcr->out_param;
1460		err = __mlx4_cmd(dev, in_param, &out_param,
1461				 cmd->out_is_imm, vhcr->in_modifier,
1462				 vhcr->op_modifier, vhcr->op,
1463				 MLX4_CMD_TIME_CLASS_A,
1464				 MLX4_CMD_NATIVE);
1465
1466		if (cmd->out_is_imm) {
1467			vhcr->out_param = out_param;
1468			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1469		}
1470	}
1471
1472	if (err) {
1473		mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
1474			  " error:%d, status %d\n",
1475			  vhcr->op, slave, vhcr->errno, err);
1476		vhcr_cmd->status = mlx4_errno_to_status(err);
1477		goto out_status;
1478	}
1479
1480
1481	/* Write outbox if command completed successfully */
1482	if (cmd->has_outbox && !vhcr_cmd->status) {
1483		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1484				      vhcr->out_param,
1485				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1486		if (ret) {
1487			/* If we failed to write back the outbox after the
1488			 *command was successfully executed, we must fail this
1489			 * slave, as it is now in undefined state */
1490			mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1491			goto out;
1492		}
1493	}
1494
1495out_status:
1496	/* DMA back vhcr result */
1497	if (!in_vhcr) {
1498		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1499				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1500				      ALIGN(sizeof(struct mlx4_vhcr),
1501					    MLX4_ACCESS_MEM_ALIGN),
1502				      MLX4_CMD_WRAPPED);
1503		if (ret)
1504			mlx4_err(dev, "%s:Failed writing vhcr result\n",
1505				 __func__);
1506		else if (vhcr->e_bit &&
1507			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1508				mlx4_warn(dev, "Failed to generate command completion "
1509					  "eqe for slave %d\n", slave);
1510	}
1511
1512out:
1513	kfree(vhcr);
1514	mlx4_free_cmd_mailbox(dev, inbox);
1515	mlx4_free_cmd_mailbox(dev, outbox);
1516	return ret;
1517}
1518
1519static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1520{
1521	int port, err;
1522	struct mlx4_vport_state *vp_admin;
1523	struct mlx4_vport_oper_state *vp_oper;
1524
1525	for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1526		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1527		vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1528		vp_oper->state = *vp_admin;
1529		if (MLX4_VGT != vp_admin->default_vlan) {
1530			err = mlx4_register_vlan(&priv->dev, port,
1531						 vp_admin->default_vlan, &(vp_oper->vlan_idx));
1532			if (err) {
1533				vp_oper->vlan_idx = NO_INDX;
1534				mlx4_warn((&priv->dev),
1535					  "No vlan resorces slave %d, port %d\n",
1536					  slave, port);
1537				return err;
1538			}
1539			mlx4_dbg((&(priv->dev)), "alloc vlan %d idx  %d slave %d port %d\n",
1540				 (int)(vp_oper->state.default_vlan),
1541				 vp_oper->vlan_idx, slave, port);
1542		}
1543		if (vp_admin->spoofchk) {
1544			vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
1545							       port,
1546							       vp_admin->mac);
1547			if (0 > vp_oper->mac_idx) {
1548				err = vp_oper->mac_idx;
1549				vp_oper->mac_idx = NO_INDX;
1550				mlx4_warn((&priv->dev),
1551					  "No mac resorces slave %d, port %d\n",
1552					  slave, port);
1553				return err;
1554			}
1555			mlx4_dbg((&(priv->dev)), "alloc mac %llx idx  %d slave %d port %d\n",
1556				 (long long)vp_oper->state.mac, vp_oper->mac_idx, slave, port);
1557		}
1558	}
1559	return 0;
1560}
1561
1562static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
1563{
1564	int port;
1565	struct mlx4_vport_oper_state *vp_oper;
1566
1567	for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1568		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1569		if (NO_INDX != vp_oper->vlan_idx) {
1570			__mlx4_unregister_vlan(&priv->dev,
1571					       port, vp_oper->state.default_vlan);
1572			vp_oper->vlan_idx = NO_INDX;
1573		}
1574		if (NO_INDX != vp_oper->mac_idx) {
1575			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
1576			vp_oper->mac_idx = NO_INDX;
1577		}
1578	}
1579	return;
1580}
1581
1582static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
1583			       u16 param, u8 toggle)
1584{
1585	struct mlx4_priv *priv = mlx4_priv(dev);
1586	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
1587	u32 reply;
1588	u8 is_going_down = 0;
1589	int i;
1590	unsigned long flags;
1591
1592	slave_state[slave].comm_toggle ^= 1;
1593	reply = (u32) slave_state[slave].comm_toggle << 31;
1594	if (toggle != slave_state[slave].comm_toggle) {
1595		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
1596			  "STATE COMPROMISIED ***\n", toggle, slave);
1597		goto reset_slave;
1598	}
1599	if (cmd == MLX4_COMM_CMD_RESET) {
1600		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
1601		slave_state[slave].active = false;
1602		mlx4_master_deactivate_admin_state(priv, slave);
1603		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
1604				slave_state[slave].event_eq[i].eqn = -1;
1605				slave_state[slave].event_eq[i].token = 0;
1606		}
1607		/*check if we are in the middle of FLR process,
1608		if so return "retry" status to the slave*/
1609		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
1610			goto inform_slave_state;
1611
1612		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
1613
1614		/* write the version in the event field */
1615		reply |= mlx4_comm_get_version();
1616
1617		goto reset_slave;
1618	}
1619	/*command from slave in the middle of FLR*/
1620	if (cmd != MLX4_COMM_CMD_RESET &&
1621	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
1622		mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
1623			  "in the middle of FLR\n", slave, cmd);
1624		return;
1625	}
1626
1627	switch (cmd) {
1628	case MLX4_COMM_CMD_VHCR0:
1629		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
1630			goto reset_slave;
1631		slave_state[slave].vhcr_dma = ((u64) param) << 48;
1632		priv->mfunc.master.slave_state[slave].cookie = 0;
1633		mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
1634		break;
1635	case MLX4_COMM_CMD_VHCR1:
1636		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
1637			goto reset_slave;
1638		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
1639		break;
1640	case MLX4_COMM_CMD_VHCR2:
1641		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
1642			goto reset_slave;
1643		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
1644		break;
1645	case MLX4_COMM_CMD_VHCR_EN:
1646		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
1647			goto reset_slave;
1648		slave_state[slave].vhcr_dma |= param;
1649		if (mlx4_master_activate_admin_state(priv, slave))
1650				goto reset_slave;
1651		slave_state[slave].active = true;
1652		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
1653		break;
1654	case MLX4_COMM_CMD_VHCR_POST:
1655		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
1656		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
1657			goto reset_slave;
1658
1659		mutex_lock(&priv->cmd.slave_cmd_mutex);
1660		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
1661			mlx4_err(dev, "Failed processing vhcr for slave:%d,"
1662				 " resetting slave.\n", slave);
1663			mutex_unlock(&priv->cmd.slave_cmd_mutex);
1664			goto reset_slave;
1665		}
1666		mutex_unlock(&priv->cmd.slave_cmd_mutex);
1667		break;
1668	default:
1669		mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
1670		goto reset_slave;
1671	}
1672	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1673	if (!slave_state[slave].is_slave_going_down)
1674		slave_state[slave].last_cmd = cmd;
1675	else
1676		is_going_down = 1;
1677	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1678	if (is_going_down) {
1679		mlx4_warn(dev, "Slave is going down aborting command(%d)"
1680			  " executing from slave:%d\n",
1681			  cmd, slave);
1682		return;
1683	}
1684	__raw_writel((__force u32) cpu_to_be32(reply),
1685		     &priv->mfunc.comm[slave].slave_read);
1686	mmiowb();
1687
1688	return;
1689
1690reset_slave:
1691	/* cleanup any slave resources */
1692	mlx4_delete_all_resources_for_slave(dev, slave);
1693	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
1694	if (!slave_state[slave].is_slave_going_down)
1695		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
1696	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
1697	/*with slave in the middle of flr, no need to clean resources again.*/
1698inform_slave_state:
1699	memset(&slave_state[slave].event_eq, 0,
1700	       sizeof(struct mlx4_slave_event_eq_info));
1701	__raw_writel((__force u32) cpu_to_be32(reply),
1702		     &priv->mfunc.comm[slave].slave_read);
1703	wmb();
1704}
1705
1706/* master command processing */
1707void mlx4_master_comm_channel(struct work_struct *work)
1708{
1709	struct mlx4_mfunc_master_ctx *master =
1710		container_of(work,
1711			     struct mlx4_mfunc_master_ctx,
1712			     comm_work);
1713	struct mlx4_mfunc *mfunc =
1714		container_of(master, struct mlx4_mfunc, master);
1715	struct mlx4_priv *priv =
1716		container_of(mfunc, struct mlx4_priv, mfunc);
1717	struct mlx4_dev *dev = &priv->dev;
1718	__be32 *bit_vec;
1719	u32 comm_cmd;
1720	u32 vec;
1721	int i, j, slave;
1722	int toggle;
1723	int served = 0;
1724	int reported = 0;
1725	u32 slt;
1726
1727	bit_vec = master->comm_arm_bit_vector;
1728	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
1729		vec = be32_to_cpu(bit_vec[i]);
1730		for (j = 0; j < 32; j++) {
1731			if (!(vec & (1 << j)))
1732				continue;
1733			++reported;
1734			slave = (i * 32) + j;
1735			comm_cmd = swab32(readl(
1736					  &mfunc->comm[slave].slave_write));
1737			slt = swab32(readl(&mfunc->comm[slave].slave_read))
1738				     >> 31;
1739			toggle = comm_cmd >> 31;
1740			if (toggle != slt) {
1741				if (master->slave_state[slave].comm_toggle
1742				    != slt) {
1743					mlx4_info(dev, "slave %d out of sync."
1744						  " read toggle %d, state toggle %d. "
1745						  "Resynching.\n", slave, slt,
1746						  master->slave_state[slave].comm_toggle);
1747					master->slave_state[slave].comm_toggle =
1748						slt;
1749				}
1750				mlx4_master_do_cmd(dev, slave,
1751						   comm_cmd >> 16 & 0xff,
1752						   comm_cmd & 0xffff, toggle);
1753				++served;
1754			}
1755		}
1756	}
1757
1758	if (reported && reported != served)
1759		mlx4_warn(dev, "Got command event with bitmask from %d slaves"
1760			  " but %d were served\n",
1761			  reported, served);
1762
1763	if (mlx4_ARM_COMM_CHANNEL(dev))
1764		mlx4_warn(dev, "Failed to arm comm channel events\n");
1765}
1766
1767static int sync_toggles(struct mlx4_dev *dev)
1768{
1769	struct mlx4_priv *priv = mlx4_priv(dev);
1770	int wr_toggle;
1771	int rd_toggle;
1772	unsigned long end;
1773
1774	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
1775	end = jiffies + msecs_to_jiffies(5000);
1776
1777	while (time_before(jiffies, end)) {
1778		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
1779		if (rd_toggle == wr_toggle) {
1780			priv->cmd.comm_toggle = rd_toggle;
1781			return 0;
1782		}
1783
1784		cond_resched();
1785	}
1786
1787	/*
1788	 * we could reach here if for example the previous VM using this
1789	 * function misbehaved and left the channel with unsynced state. We
1790	 * should fix this here and give this VM a chance to use a properly
1791	 * synced channel
1792	 */
1793	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
1794	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
1795	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
1796	priv->cmd.comm_toggle = 0;
1797
1798	return 0;
1799}
1800
1801int mlx4_multi_func_init(struct mlx4_dev *dev)
1802{
1803	struct mlx4_priv *priv = mlx4_priv(dev);
1804	struct mlx4_slave_state *s_state;
1805	int i, j, err, port;
1806
1807	if (mlx4_is_master(dev))
1808		priv->mfunc.comm =
1809		ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
1810			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
1811	else
1812		priv->mfunc.comm =
1813		ioremap(pci_resource_start(dev->pdev, 2) +
1814			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
1815	if (!priv->mfunc.comm) {
1816		mlx4_err(dev, "Couldn't map communication vector.\n");
1817		goto err_vhcr;
1818	}
1819
1820	if (mlx4_is_master(dev)) {
1821		priv->mfunc.master.slave_state =
1822			kzalloc(dev->num_slaves *
1823				sizeof(struct mlx4_slave_state), GFP_KERNEL);
1824		if (!priv->mfunc.master.slave_state)
1825			goto err_comm;
1826
1827		priv->mfunc.master.vf_admin =
1828			kzalloc(dev->num_slaves *
1829				sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
1830		if (!priv->mfunc.master.vf_admin)
1831			goto err_comm_admin;
1832
1833		priv->mfunc.master.vf_oper =
1834			kzalloc(dev->num_slaves *
1835				sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
1836		if (!priv->mfunc.master.vf_oper)
1837			goto err_comm_oper;
1838
1839		for (i = 0; i < dev->num_slaves; ++i) {
1840			s_state = &priv->mfunc.master.slave_state[i];
1841			s_state->last_cmd = MLX4_COMM_CMD_RESET;
1842			for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
1843				s_state->event_eq[j].eqn = -1;
1844			__raw_writel((__force u32) 0,
1845				     &priv->mfunc.comm[i].slave_write);
1846			__raw_writel((__force u32) 0,
1847				     &priv->mfunc.comm[i].slave_read);
1848			mmiowb();
1849			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
1850				s_state->vlan_filter[port] =
1851					kzalloc(sizeof(struct mlx4_vlan_fltr),
1852						GFP_KERNEL);
1853				if (!s_state->vlan_filter[port]) {
1854					if (--port)
1855						kfree(s_state->vlan_filter[port]);
1856					goto err_slaves;
1857				}
1858				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
1859				priv->mfunc.master.vf_admin[i].vport[port].default_vlan = MLX4_VGT;
1860				priv->mfunc.master.vf_oper[i].vport[port].state.default_vlan = MLX4_VGT;
1861				priv->mfunc.master.vf_oper[i].vport[port].vlan_idx = NO_INDX;
1862				priv->mfunc.master.vf_oper[i].vport[port].mac_idx = NO_INDX;
1863			}
1864			spin_lock_init(&s_state->lock);
1865		}
1866
1867		memset(&priv->mfunc.master.cmd_eqe, 0, dev->caps.eqe_size);
1868		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
1869		INIT_WORK(&priv->mfunc.master.comm_work,
1870			  mlx4_master_comm_channel);
1871		INIT_WORK(&priv->mfunc.master.slave_event_work,
1872			  mlx4_gen_slave_eqe);
1873		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
1874			  mlx4_master_handle_slave_flr);
1875		spin_lock_init(&priv->mfunc.master.slave_state_lock);
1876		spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
1877		priv->mfunc.master.comm_wq =
1878			create_singlethread_workqueue("mlx4_comm");
1879		if (!priv->mfunc.master.comm_wq)
1880			goto err_slaves;
1881
1882		if (mlx4_init_resource_tracker(dev))
1883			goto err_thread;
1884
1885		err = mlx4_ARM_COMM_CHANNEL(dev);
1886		if (err) {
1887			mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
1888				 err);
1889			goto err_resource;
1890		}
1891
1892	} else {
1893		err = sync_toggles(dev);
1894		if (err) {
1895			mlx4_err(dev, "Couldn't sync toggles\n");
1896			goto err_comm;
1897		}
1898	}
1899	return 0;
1900
1901err_resource:
1902	mlx4_free_resource_tracker(dev, RES_TR_FREE_ALL);
1903err_thread:
1904	flush_workqueue(priv->mfunc.master.comm_wq);
1905	destroy_workqueue(priv->mfunc.master.comm_wq);
1906err_slaves:
1907	while (--i) {
1908		for (port = 1; port <= MLX4_MAX_PORTS; port++)
1909			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1910	}
1911	kfree(priv->mfunc.master.vf_oper);
1912err_comm_oper:
1913	kfree(priv->mfunc.master.vf_admin);
1914err_comm_admin:
1915	kfree(priv->mfunc.master.slave_state);
1916err_comm:
1917	iounmap(priv->mfunc.comm);
1918err_vhcr:
1919	dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1920					     priv->mfunc.vhcr,
1921					     priv->mfunc.vhcr_dma);
1922	priv->mfunc.vhcr = NULL;
1923	return -ENOMEM;
1924}
1925
1926int mlx4_cmd_init(struct mlx4_dev *dev)
1927{
1928	struct mlx4_priv *priv = mlx4_priv(dev);
1929
1930	mutex_init(&priv->cmd.hcr_mutex);
1931	mutex_init(&priv->cmd.slave_cmd_mutex);
1932	sema_init(&priv->cmd.poll_sem, 1);
1933	priv->cmd.use_events = 0;
1934	priv->cmd.toggle     = 1;
1935
1936	priv->cmd.hcr = NULL;
1937	priv->mfunc.vhcr = NULL;
1938
1939	if (!mlx4_is_slave(dev)) {
1940		priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
1941					MLX4_HCR_BASE, MLX4_HCR_SIZE);
1942		if (!priv->cmd.hcr) {
1943			mlx4_err(dev, "Couldn't map command register.\n");
1944			return -ENOMEM;
1945		}
1946	}
1947
1948	if (mlx4_is_mfunc(dev)) {
1949		priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1950						      &priv->mfunc.vhcr_dma,
1951						      GFP_KERNEL);
1952		if (!priv->mfunc.vhcr) {
1953			mlx4_err(dev, "Couldn't allocate VHCR.\n");
1954			goto err_hcr;
1955		}
1956	}
1957
1958	priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
1959					 MLX4_MAILBOX_SIZE,
1960					 MLX4_MAILBOX_SIZE, 0);
1961	if (!priv->cmd.pool)
1962		goto err_vhcr;
1963
1964	return 0;
1965
1966err_vhcr:
1967	if (mlx4_is_mfunc(dev))
1968		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
1969				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
1970	priv->mfunc.vhcr = NULL;
1971
1972err_hcr:
1973	if (!mlx4_is_slave(dev))
1974		iounmap(priv->cmd.hcr);
1975	return -ENOMEM;
1976}
1977
1978void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
1979{
1980	struct mlx4_priv *priv = mlx4_priv(dev);
1981	int i, port;
1982
1983	if (mlx4_is_master(dev)) {
1984		flush_workqueue(priv->mfunc.master.comm_wq);
1985		destroy_workqueue(priv->mfunc.master.comm_wq);
1986		for (i = 0; i < dev->num_slaves; i++) {
1987			for (port = 1; port <= MLX4_MAX_PORTS; port++)
1988				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
1989		}
1990		kfree(priv->mfunc.master.slave_state);
1991		kfree(priv->mfunc.master.vf_admin);
1992		kfree(priv->mfunc.master.vf_oper);
1993	}
1994
1995	iounmap(priv->mfunc.comm);
1996}
1997
1998void mlx4_cmd_cleanup(struct mlx4_dev *dev)
1999{
2000	struct mlx4_priv *priv = mlx4_priv(dev);
2001
2002	pci_pool_destroy(priv->cmd.pool);
2003
2004	if (!mlx4_is_slave(dev))
2005		iounmap(priv->cmd.hcr);
2006	if (mlx4_is_mfunc(dev))
2007		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
2008				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2009	priv->mfunc.vhcr = NULL;
2010}
2011
2012/*
2013 * Switch to using events to issue FW commands (can only be called
2014 * after event queue for command events has been initialized).
2015 */
2016int mlx4_cmd_use_events(struct mlx4_dev *dev)
2017{
2018	struct mlx4_priv *priv = mlx4_priv(dev);
2019	int i;
2020	int err = 0;
2021
2022	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2023				   sizeof (struct mlx4_cmd_context),
2024				   GFP_KERNEL);
2025	if (!priv->cmd.context)
2026		return -ENOMEM;
2027
2028	for (i = 0; i < priv->cmd.max_cmds; ++i) {
2029		priv->cmd.context[i].token = i;
2030		priv->cmd.context[i].next  = i + 1;
2031	}
2032
2033	priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2034	priv->cmd.free_head = 0;
2035
2036	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2037	spin_lock_init(&priv->cmd.context_lock);
2038
2039	for (priv->cmd.token_mask = 1;
2040	     priv->cmd.token_mask < priv->cmd.max_cmds;
2041	     priv->cmd.token_mask <<= 1)
2042		; /* nothing */
2043	--priv->cmd.token_mask;
2044
2045	down(&priv->cmd.poll_sem);
2046	priv->cmd.use_events = 1;
2047
2048	return err;
2049}
2050
2051/*
2052 * Switch back to polling (used when shutting down the device)
2053 */
2054void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2055{
2056	struct mlx4_priv *priv = mlx4_priv(dev);
2057	int i;
2058
2059	priv->cmd.use_events = 0;
2060
2061	for (i = 0; i < priv->cmd.max_cmds; ++i)
2062		down(&priv->cmd.event_sem);
2063
2064	kfree(priv->cmd.context);
2065
2066	up(&priv->cmd.poll_sem);
2067}
2068
2069struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2070{
2071	struct mlx4_cmd_mailbox *mailbox;
2072
2073	mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2074	if (!mailbox)
2075		return ERR_PTR(-ENOMEM);
2076
2077	mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2078				      &mailbox->dma);
2079	if (!mailbox->buf) {
2080		kfree(mailbox);
2081		return ERR_PTR(-ENOMEM);
2082	}
2083
2084	return mailbox;
2085}
2086EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2087
2088void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2089			   struct mlx4_cmd_mailbox *mailbox)
2090{
2091	if (!mailbox)
2092		return;
2093
2094	pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2095	kfree(mailbox);
2096}
2097EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2098
2099u32 mlx4_comm_get_version(void)
2100{
2101	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2102}
2103
2104int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
2105{
2106	struct mlx4_priv *priv = mlx4_priv(dev);
2107	struct mlx4_vport_state *s_info;
2108
2109	if (!mlx4_is_master(dev))
2110		return -EPROTONOSUPPORT;
2111
2112	if ((vf <= 0) || (vf > dev->num_vfs)) {
2113		mlx4_err(dev, "Bad vf number:%d (max vf activated: %d)\n", vf, dev->num_vfs);
2114		return -EINVAL;
2115	}
2116
2117	s_info = &priv->mfunc.master.vf_admin[vf].vport[port];
2118	s_info->mac = mlx4_mac_to_u64(mac);
2119	mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2120		  vf, port, (long long)s_info->mac);
2121	return 0;
2122}
2123EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2124
2125int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2126{
2127	struct mlx4_priv *priv = mlx4_priv(dev);
2128	struct mlx4_vport_state *s_info;
2129
2130	if ((!mlx4_is_master(dev)) ||
2131	    !(dev->caps.flags & MLX4_DEV_CAP_FLAG_ESWITCH_SUPPORT))
2132		return -EPROTONOSUPPORT;
2133
2134	if ((vf <= 0) || (vf > dev->num_vfs) || (vlan > 4095) || (qos > 7))
2135		return -EINVAL;
2136
2137	s_info = &priv->mfunc.master.vf_admin[vf].vport[port];
2138	if ((0 == vlan) && (0 == qos))
2139		s_info->default_vlan = MLX4_VGT;
2140	else
2141		s_info->default_vlan = vlan;
2142	s_info->default_qos = qos;
2143	return 0;
2144}
2145EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2146
2147int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
2148{
2149	struct mlx4_priv *priv = mlx4_priv(dev);
2150	struct mlx4_vport_state *s_info;
2151
2152	if ((!mlx4_is_master(dev)) ||
2153	    !(dev->caps.flags & MLX4_DEV_CAP_FLAG_ESWITCH_SUPPORT))
2154		return -EPROTONOSUPPORT;
2155
2156	if ((vf <= 0) || (vf > dev->num_vfs))
2157		return -EINVAL;
2158
2159	s_info = &priv->mfunc.master.vf_admin[vf].vport[port];
2160	s_info->spoofchk = setting;
2161
2162	return 0;
2163}
2164EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
2165