1/*
2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#define	LINUXKPI_PARAM_PREFIX mlx4_
34
35#include <linux/errno.h>
36#include <linux/if_ether.h>
37#include <linux/module.h>
38#include <linux/err.h>
39
40#include <linux/mlx4/cmd.h>
41#include <linux/moduleparam.h>
42#include "mlx4.h"
43#include "mlx4_stats.h"
44
45
46int mlx4_set_4k_mtu = -1;
47module_param_named(set_4k_mtu, mlx4_set_4k_mtu, int, 0444);
48MODULE_PARM_DESC(set_4k_mtu,
49	"(Obsolete) attempt to set 4K MTU to all ConnectX ports");
50
51
52#define MLX4_MAC_VALID		(1ull << 63)
53
54#define MLX4_VLAN_VALID		(1u << 31)
55#define MLX4_VLAN_MASK		0xfff
56
57void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
58{
59	int i;
60
61	mutex_init(&table->mutex);
62	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
63		table->entries[i] = 0;
64		table->refs[i]	 = 0;
65	}
66	table->max   = 1 << dev->caps.log_num_macs;
67	table->total = 0;
68}
69
70void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
71{
72	int i;
73
74	mutex_init(&table->mutex);
75	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
76		table->entries[i] = 0;
77		table->refs[i]	 = 0;
78	}
79	table->max   = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
80	table->total = 0;
81}
82
83static int validate_index(struct mlx4_dev *dev,
84			  struct mlx4_mac_table *table, int index)
85{
86	int err = 0;
87
88	if (index < 0 || index >= table->max || !table->refs[index]) {
89		mlx4_warn(dev, "No valid Mac entry for the given index\n");
90		err = -EINVAL;
91	}
92	return err;
93}
94
95static int find_index(struct mlx4_dev *dev,
96		      struct mlx4_mac_table *table, u64 mac)
97{
98	int i;
99
100	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
101		if ((mac & MLX4_MAC_MASK) ==
102		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
103			return i;
104	}
105	/* Mac not found */
106	return -EINVAL;
107}
108
109static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
110				   __be64 *entries)
111{
112	struct mlx4_cmd_mailbox *mailbox;
113	u32 in_mod;
114	int err;
115
116	mailbox = mlx4_alloc_cmd_mailbox(dev);
117	if (IS_ERR(mailbox))
118		return PTR_ERR(mailbox);
119
120	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
121
122	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
123
124	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
125		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
126
127	mlx4_free_cmd_mailbox(dev, mailbox);
128	return err;
129}
130
131int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
132{
133	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
134	struct mlx4_mac_table *table = &info->mac_table;
135	int i, err = 0;
136	int free = -1;
137
138	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
139		 (unsigned long long) mac, port);
140
141	mutex_lock(&table->mutex);
142	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
143		if (free < 0 && !table->refs[i]) {
144			free = i;
145			continue;
146		}
147
148		if ((mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) &&
149		    table->refs[i]) {
150			/* MAC already registered, Must not have duplicates */
151			err = i;
152			++table->refs[i];
153			goto out;
154		}
155	}
156
157	mlx4_dbg(dev, "Free MAC index is %d\n", free);
158
159	if (table->total == table->max) {
160		/* No free mac entries */
161		err = -ENOSPC;
162		goto out;
163	}
164
165	/* Register new MAC */
166	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
167
168	err = mlx4_set_port_mac_table(dev, port, table->entries);
169	if (unlikely(err)) {
170		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
171			 (unsigned long long) mac);
172		table->entries[free] = 0;
173		goto out;
174	}
175	table->refs[free] = 1;
176
177	err = free;
178	++table->total;
179out:
180	mutex_unlock(&table->mutex);
181	return err;
182}
183EXPORT_SYMBOL_GPL(__mlx4_register_mac);
184
185int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
186{
187	u64 out_param = 0;
188	int err = -EINVAL;
189
190	if (mlx4_is_mfunc(dev)) {
191		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
192			err = mlx4_cmd_imm(dev, mac, &out_param,
193					   ((u32) port) << 8 | (u32) RES_MAC,
194					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
195					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
196		}
197		if (err && err == -EINVAL && mlx4_is_slave(dev)) {
198			/* retry using old REG_MAC format */
199			set_param_l(&out_param, port);
200			err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
201					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
202					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
203			if (!err)
204				dev->flags |= MLX4_FLAG_OLD_REG_MAC;
205		}
206		if (err)
207			return err;
208
209		return get_param_l(&out_param);
210	}
211	return __mlx4_register_mac(dev, port, mac);
212}
213EXPORT_SYMBOL_GPL(mlx4_register_mac);
214
215int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
216{
217	return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
218			(port - 1) * (1 << dev->caps.log_num_macs);
219}
220EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
221
222void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
223{
224	struct mlx4_port_info *info;
225	struct mlx4_mac_table *table;
226	int index;
227
228	if (port < 1 || port > dev->caps.num_ports) {
229		mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
230		return;
231	}
232	info = &mlx4_priv(dev)->port[port];
233	table = &info->mac_table;
234	mutex_lock(&table->mutex);
235
236	index = find_index(dev, table, mac);
237
238	if (validate_index(dev, table, index))
239		goto out;
240
241	if (--table->refs[index]) {
242		mlx4_dbg(dev, "Have more references for index %d,"
243			 "no need to modify mac table\n", index);
244		goto out;
245	}
246
247	table->entries[index] = 0;
248	mlx4_set_port_mac_table(dev, port, table->entries);
249	--table->total;
250out:
251	mutex_unlock(&table->mutex);
252}
253EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
254
255void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
256{
257	u64 out_param = 0;
258
259	if (mlx4_is_mfunc(dev)) {
260		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
261			(void) mlx4_cmd_imm(dev, mac, &out_param,
262					    ((u32) port) << 8 | (u32) RES_MAC,
263					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
264					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
265		} else {
266			/* use old unregister mac format */
267			set_param_l(&out_param, port);
268			(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
269					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
270					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
271		}
272		return;
273	}
274	__mlx4_unregister_mac(dev, port, mac);
275	return;
276}
277EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
278
279int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
280{
281	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
282	struct mlx4_mac_table *table = &info->mac_table;
283	int index = qpn - info->base_qpn;
284	int err = 0;
285
286	/* CX1 doesn't support multi-functions */
287	mutex_lock(&table->mutex);
288
289	err = validate_index(dev, table, index);
290	if (err)
291		goto out;
292
293	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
294
295	err = mlx4_set_port_mac_table(dev, port, table->entries);
296	if (unlikely(err)) {
297		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
298			 (unsigned long long) new_mac);
299		table->entries[index] = 0;
300	}
301out:
302	mutex_unlock(&table->mutex);
303	return err;
304}
305EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
306
307static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
308				    __be32 *entries)
309{
310	struct mlx4_cmd_mailbox *mailbox;
311	u32 in_mod;
312	int err;
313
314	mailbox = mlx4_alloc_cmd_mailbox(dev);
315	if (IS_ERR(mailbox))
316		return PTR_ERR(mailbox);
317
318	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
319	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
320	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
321		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
322
323	mlx4_free_cmd_mailbox(dev, mailbox);
324
325	return err;
326}
327
328int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
329{
330	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
331	int i;
332
333	for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
334		if (table->refs[i] &&
335		    (vid == (MLX4_VLAN_MASK &
336			      be32_to_cpu(table->entries[i])))) {
337			/* VLAN already registered, increase reference count */
338			*idx = i;
339			return 0;
340		}
341	}
342
343	return -ENOENT;
344}
345EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
346
347int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
348				int *index)
349{
350	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
351	int i, err = 0;
352	int free = -1;
353
354	mutex_lock(&table->mutex);
355
356	if (table->total == table->max) {
357		/* No free vlan entries */
358		err = -ENOSPC;
359		goto out;
360	}
361
362	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
363		if (free < 0 && (table->refs[i] == 0)) {
364			free = i;
365			continue;
366		}
367
368		if (table->refs[i] &&
369		    (vlan == (MLX4_VLAN_MASK &
370			      be32_to_cpu(table->entries[i])))) {
371			/* Vlan already registered, increase references count */
372			*index = i;
373			++table->refs[i];
374			goto out;
375		}
376	}
377
378	if (free < 0) {
379		err = -ENOMEM;
380		goto out;
381	}
382
383	/* Register new VLAN */
384	table->refs[free] = 1;
385	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
386
387	err = mlx4_set_port_vlan_table(dev, port, table->entries);
388	if (unlikely(err)) {
389		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
390		table->refs[free] = 0;
391		table->entries[free] = 0;
392		goto out;
393	}
394
395	*index = free;
396	++table->total;
397out:
398	mutex_unlock(&table->mutex);
399	return err;
400}
401
402int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
403{
404	u64 out_param = 0;
405	int err;
406
407	if (vlan > 4095)
408		return -EINVAL;
409
410	if (mlx4_is_mfunc(dev)) {
411		err = mlx4_cmd_imm(dev, vlan, &out_param,
412				   ((u32) port) << 8 | (u32) RES_VLAN,
413				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
414				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
415		if (!err)
416			*index = get_param_l(&out_param);
417
418		return err;
419	}
420	return __mlx4_register_vlan(dev, port, vlan, index);
421}
422EXPORT_SYMBOL_GPL(mlx4_register_vlan);
423
424void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
425{
426	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
427	int index;
428
429	mutex_lock(&table->mutex);
430	if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
431		mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
432		goto out;
433	}
434
435	if (index < MLX4_VLAN_REGULAR) {
436		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
437		goto out;
438	}
439
440	if (--table->refs[index]) {
441		mlx4_dbg(dev, "Have %d more references for index %d, "
442			 "no need to modify vlan table\n", table->refs[index],
443			 index);
444		goto out;
445	}
446	table->entries[index] = 0;
447	mlx4_set_port_vlan_table(dev, port, table->entries);
448	--table->total;
449out:
450	mutex_unlock(&table->mutex);
451}
452
453void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
454{
455	u64 out_param = 0;
456
457	if (mlx4_is_mfunc(dev)) {
458		(void) mlx4_cmd_imm(dev, vlan, &out_param,
459				    ((u32) port) << 8 | (u32) RES_VLAN,
460				    RES_OP_RESERVE_AND_MAP,
461				    MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
462				    MLX4_CMD_WRAPPED);
463		return;
464	}
465	__mlx4_unregister_vlan(dev, port, vlan);
466}
467EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
468
469int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
470{
471	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
472	u8 *inbuf, *outbuf;
473	int err;
474
475	inmailbox = mlx4_alloc_cmd_mailbox(dev);
476	if (IS_ERR(inmailbox))
477		return PTR_ERR(inmailbox);
478
479	outmailbox = mlx4_alloc_cmd_mailbox(dev);
480	if (IS_ERR(outmailbox)) {
481		mlx4_free_cmd_mailbox(dev, inmailbox);
482		return PTR_ERR(outmailbox);
483	}
484
485	inbuf = inmailbox->buf;
486	outbuf = outmailbox->buf;
487	memset(inbuf, 0, 256);
488	memset(outbuf, 0, 256);
489	inbuf[0] = 1;
490	inbuf[1] = 1;
491	inbuf[2] = 1;
492	inbuf[3] = 1;
493	*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
494	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
495
496	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
497			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
498			   MLX4_CMD_NATIVE);
499	if (!err)
500		*caps = *(__be32 *) (outbuf + 84);
501	mlx4_free_cmd_mailbox(dev, inmailbox);
502	mlx4_free_cmd_mailbox(dev, outmailbox);
503	return err;
504}
505static struct mlx4_roce_gid_entry zgid_entry;
506
507int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave)
508{
509	if (slave == 0)
510		return MLX4_ROCE_PF_GIDS;
511	if (slave <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % dev->num_vfs))
512		return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / dev->num_vfs) + 1;
513	return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / dev->num_vfs;
514}
515
516int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave)
517{
518	int gids;
519	int vfs;
520
521	gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
522	vfs = dev->num_vfs;
523
524	if (slave == 0)
525		return 0;
526	if (slave <= gids % vfs)
527		return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave - 1);
528
529	return MLX4_ROCE_PF_GIDS + (gids % vfs) + ((gids / vfs) * (slave - 1));
530}
531
532static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
533				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
534{
535	struct mlx4_priv *priv = mlx4_priv(dev);
536	struct mlx4_port_info *port_info;
537	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
538	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
539	struct mlx4_set_port_rqp_calc_context *qpn_context;
540	struct mlx4_set_port_general_context *gen_context;
541	struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
542	int reset_qkey_viols;
543	int port;
544	int is_eth;
545	int num_gids;
546	int base;
547	u32 in_modifier;
548	u32 promisc;
549	u16 mtu, prev_mtu;
550	int err;
551	int i, j;
552	int offset;
553	__be32 agg_cap_mask;
554	__be32 slave_cap_mask;
555	__be32 new_cap_mask;
556
557	port = in_mod & 0xff;
558	in_modifier = (in_mod >> 8) & 0xff;
559	is_eth = op_mod;
560	port_info = &priv->port[port];
561
562	if (op_mod > 1)
563		return -EINVAL;
564
565	/* Slaves cannot perform SET_PORT operations except changing MTU */
566	if (is_eth) {
567		if (slave != dev->caps.function &&
568		    in_modifier != MLX4_SET_PORT_GENERAL &&
569		    in_modifier != MLX4_SET_PORT_GID_TABLE) {
570			mlx4_warn(dev, "denying SET_PORT for slave:%d,"
571				  "port %d, config_select 0x%x\n",
572				  slave, port, in_modifier);
573			return -EINVAL;
574		}
575		switch (in_modifier) {
576		case MLX4_SET_PORT_RQP_CALC:
577			qpn_context = inbox->buf;
578			qpn_context->base_qpn =
579				cpu_to_be32(port_info->base_qpn);
580			qpn_context->n_mac = 0x7;
581			promisc = be32_to_cpu(qpn_context->promisc) >>
582				SET_PORT_PROMISC_SHIFT;
583			qpn_context->promisc = cpu_to_be32(
584				promisc << SET_PORT_PROMISC_SHIFT |
585				port_info->base_qpn);
586			promisc = be32_to_cpu(qpn_context->mcast) >>
587				SET_PORT_MC_PROMISC_SHIFT;
588			qpn_context->mcast = cpu_to_be32(
589				promisc << SET_PORT_MC_PROMISC_SHIFT |
590				port_info->base_qpn);
591			break;
592		case MLX4_SET_PORT_GENERAL:
593			gen_context = inbox->buf;
594			/* Mtu is configured as the max MTU among all the
595			 * the functions on the port. */
596			mtu = be16_to_cpu(gen_context->mtu);
597			mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
598				    ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
599			prev_mtu = slave_st->mtu[port];
600			slave_st->mtu[port] = mtu;
601			if (mtu > master->max_mtu[port])
602				master->max_mtu[port] = mtu;
603			if (mtu < prev_mtu && prev_mtu ==
604						master->max_mtu[port]) {
605				slave_st->mtu[port] = mtu;
606				master->max_mtu[port] = mtu;
607				for (i = 0; i < dev->num_slaves; i++) {
608					master->max_mtu[port] =
609					max(master->max_mtu[port],
610					    master->slave_state[i].mtu[port]);
611				}
612			}
613
614			gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
615			break;
616		case MLX4_SET_PORT_GID_TABLE:
617			/* change to MULTIPLE entries: number of guest's gids
618			 * need a FOR-loop here over number of gids the guest has.
619			 * 1. Check no duplicates in gids passed by slave
620			 */
621			num_gids = mlx4_get_slave_num_gids(dev, slave);
622			base = mlx4_get_base_gid_ix(dev, slave);
623			gid_entry_mbox = (struct mlx4_roce_gid_entry *) (inbox->buf);
624			for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
625				if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
626					    sizeof(zgid_entry)))
627					continue;
628				gid_entry_mb1 = gid_entry_mbox + 1;
629				for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
630					if (!memcmp(gid_entry_mb1->raw,
631						    zgid_entry.raw, sizeof(zgid_entry)))
632						continue;
633					if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
634						    sizeof(gid_entry_mbox->raw))) {
635						/* found duplicate */
636						return -EINVAL;
637					}
638				}
639			}
640
641			/* 2. Check that do not have duplicates in OTHER
642			 *    entries in the port GID table
643			 */
644			for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
645				if (i >= base && i < base + num_gids)
646					continue; /* don't compare to slave's current gids */
647				gid_entry_tbl = &priv->roce_gids[port - 1][i];
648				if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
649					continue;
650				gid_entry_mbox = (struct mlx4_roce_gid_entry *) (inbox->buf);
651				for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
652					if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
653						    sizeof(zgid_entry)))
654						continue;
655					if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
656						    sizeof(gid_entry_tbl->raw))) {
657						/* found duplicate */
658						mlx4_warn(dev, "requested gid entry for slave:%d "
659							  "is a duplicate of gid at index %d\n",
660							  slave, i);
661						return -EINVAL;
662					}
663				}
664			}
665
666			/* insert slave GIDs with memcpy, starting at slave's base index */
667			gid_entry_mbox = (struct mlx4_roce_gid_entry *) (inbox->buf);
668			for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
669				memcpy(priv->roce_gids[port - 1][offset].raw, gid_entry_mbox->raw, 16);
670
671			/* Now, copy roce port gids table to current mailbox for passing to FW */
672			gid_entry_mbox = (struct mlx4_roce_gid_entry *) (inbox->buf);
673			for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
674				memcpy(gid_entry_mbox->raw, priv->roce_gids[port - 1][i].raw, 16);
675
676			break;
677		}
678		return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
679				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
680				MLX4_CMD_NATIVE);
681	}
682
683	/* For IB, we only consider:
684	 * - The capability mask, which is set to the aggregate of all
685	 *   slave function capabilities
686	 * - The QKey violatin counter - reset according to each request.
687	 */
688
689	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
690		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
691		new_cap_mask = ((__be32 *) inbox->buf)[2];
692	} else {
693		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
694		new_cap_mask = ((__be32 *) inbox->buf)[1];
695	}
696
697	/* slave may not set the IS_SM capability for the port */
698	if (slave != mlx4_master_func_num(dev) &&
699	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
700		return -EINVAL;
701
702	/* No DEV_MGMT in multifunc mode */
703	if (mlx4_is_mfunc(dev) &&
704	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
705		return -EINVAL;
706
707	agg_cap_mask = 0;
708	slave_cap_mask =
709		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
710	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
711	for (i = 0; i < dev->num_slaves; i++)
712		agg_cap_mask |=
713			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
714
715	/* only clear mailbox for guests.  Master may be setting
716	* MTU or PKEY table size
717	*/
718	if (slave != dev->caps.function)
719		memset(inbox->buf, 0, 256);
720	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
721		*(u8 *) inbox->buf	   |= !!reset_qkey_viols << 6;
722		((__be32 *) inbox->buf)[2] = agg_cap_mask;
723	} else {
724		((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
725		((__be32 *) inbox->buf)[1] = agg_cap_mask;
726	}
727
728	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
729		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
730	if (err)
731		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
732			slave_cap_mask;
733	return err;
734}
735
736int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
737			  struct mlx4_vhcr *vhcr,
738			  struct mlx4_cmd_mailbox *inbox,
739			  struct mlx4_cmd_mailbox *outbox,
740			  struct mlx4_cmd_info *cmd)
741{
742	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
743				    vhcr->op_modifier, inbox);
744}
745
746/* bit locations for set port command with zero op modifier */
747enum {
748	MLX4_SET_PORT_VL_CAP	 = 4, /* bits 7:4 */
749	MLX4_SET_PORT_MTU_CAP	 = 12, /* bits 15:12 */
750	MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
751	MLX4_CHANGE_PORT_VL_CAP	 = 21,
752	MLX4_CHANGE_PORT_MTU_CAP = 22,
753};
754
755int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
756{
757	struct mlx4_cmd_mailbox *mailbox;
758	int err = -EINVAL, vl_cap, pkey_tbl_flag = 0;
759	u32 in_mod;
760
761	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_NONE)
762		return 0;
763
764	mailbox = mlx4_alloc_cmd_mailbox(dev);
765	if (IS_ERR(mailbox))
766		return PTR_ERR(mailbox);
767
768	memset(mailbox->buf, 0, 256);
769
770	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
771		in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
772		err = mlx4_cmd(dev, mailbox->dma, in_mod, 1,
773			       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
774			       MLX4_CMD_WRAPPED);
775	} else {
776		((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
777
778		if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
779			pkey_tbl_flag = 1;
780			((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
781		}
782
783		/* IB VL CAP enum isn't used by the firmware, just numerical values */
784		for (vl_cap = dev->caps.vl_cap[port];
785				vl_cap >= 1; vl_cap >>= 1) {
786			((__be32 *) mailbox->buf)[0] = cpu_to_be32(
787				(1 << MLX4_CHANGE_PORT_MTU_CAP) |
788				(1 << MLX4_CHANGE_PORT_VL_CAP)  |
789				(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
790				(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
791				(vl_cap << MLX4_SET_PORT_VL_CAP));
792			err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
793					MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
794			if (err != -ENOMEM)
795				break;
796		}
797	}
798
799	mlx4_free_cmd_mailbox(dev, mailbox);
800	return err;
801}
802
803int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
804			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
805{
806	struct mlx4_cmd_mailbox *mailbox;
807	struct mlx4_set_port_general_context *context;
808	int err;
809	u32 in_mod;
810
811	mailbox = mlx4_alloc_cmd_mailbox(dev);
812	if (IS_ERR(mailbox))
813		return PTR_ERR(mailbox);
814	context = mailbox->buf;
815	memset(context, 0, sizeof *context);
816
817	context->flags = SET_PORT_GEN_ALL_VALID;
818	context->mtu = cpu_to_be16(mtu);
819	context->pptx = (pptx * (!pfctx)) << 7;
820	context->pfctx = pfctx;
821	context->pprx = (pprx * (!pfcrx)) << 7;
822	context->pfcrx = pfcrx;
823
824	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
825	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
826		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
827
828	mlx4_free_cmd_mailbox(dev, mailbox);
829	return err;
830}
831EXPORT_SYMBOL(mlx4_SET_PORT_general);
832
833int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
834			   u8 promisc)
835{
836	struct mlx4_cmd_mailbox *mailbox;
837	struct mlx4_set_port_rqp_calc_context *context;
838	int err;
839	u32 in_mod;
840	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
841		MCAST_DIRECT : MCAST_DEFAULT;
842
843	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
844		return 0;
845
846	mailbox = mlx4_alloc_cmd_mailbox(dev);
847	if (IS_ERR(mailbox))
848		return PTR_ERR(mailbox);
849	context = mailbox->buf;
850	memset(context, 0, sizeof *context);
851
852	context->base_qpn = cpu_to_be32(base_qpn);
853	context->n_mac = dev->caps.log_num_macs;
854	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
855				       base_qpn);
856	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
857				     base_qpn);
858	context->intra_no_vlan = 0;
859	context->no_vlan = MLX4_NO_VLAN_IDX;
860	context->intra_vlan_miss = 0;
861	context->vlan_miss = MLX4_VLAN_MISS_IDX;
862
863	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
864	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
865		       MLX4_CMD_TIME_CLASS_B,  MLX4_CMD_WRAPPED);
866
867	mlx4_free_cmd_mailbox(dev, mailbox);
868	return err;
869}
870EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
871
872int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
873{
874	struct mlx4_cmd_mailbox *mailbox;
875	struct mlx4_set_port_prio2tc_context *context;
876	int err;
877	u32 in_mod;
878	int i;
879
880	mailbox = mlx4_alloc_cmd_mailbox(dev);
881	if (IS_ERR(mailbox))
882		return PTR_ERR(mailbox);
883	context = mailbox->buf;
884	memset(context, 0, sizeof *context);
885
886	for (i = 0; i < MLX4_NUM_UP; i += 2)
887		context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
888
889	in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
890	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
891		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
892
893	mlx4_free_cmd_mailbox(dev, mailbox);
894	return err;
895}
896EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
897
898int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
899		u8 *pg, u16 *ratelimit)
900{
901	struct mlx4_cmd_mailbox *mailbox;
902	struct mlx4_set_port_scheduler_context *context;
903	int err;
904	u32 in_mod;
905	int i;
906
907	mailbox = mlx4_alloc_cmd_mailbox(dev);
908	if (IS_ERR(mailbox))
909		return PTR_ERR(mailbox);
910	context = mailbox->buf;
911	memset(context, 0, sizeof *context);
912
913	for (i = 0; i < MLX4_NUM_TC; i++) {
914		struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
915		u16 r;
916		if (ratelimit && ratelimit[i]) {
917			if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
918				r = ratelimit[i];
919				tc->max_bw_units =
920					htons(MLX4_RATELIMIT_100M_UNITS);
921			} else {
922				r = ratelimit[i]/10;
923				tc->max_bw_units =
924					htons(MLX4_RATELIMIT_1G_UNITS);
925			}
926			tc->max_bw_value = htons(r);
927		} else {
928			tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
929			tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
930		}
931
932		tc->pg = htons(pg[i]);
933		tc->bw_precentage = htons(tc_tx_bw[i]);
934	}
935
936	in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
937	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
938		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
939
940	mlx4_free_cmd_mailbox(dev, mailbox);
941	return err;
942}
943EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
944
945int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
946				struct mlx4_vhcr *vhcr,
947				struct mlx4_cmd_mailbox *inbox,
948				struct mlx4_cmd_mailbox *outbox,
949				struct mlx4_cmd_info *cmd)
950{
951	int err = 0;
952
953	return err;
954}
955
956int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
957			u64 mac, u64 clear, u8 mode)
958{
959	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
960			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
961			MLX4_CMD_WRAPPED);
962}
963EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
964
965int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
966			       struct mlx4_vhcr *vhcr,
967			       struct mlx4_cmd_mailbox *inbox,
968			       struct mlx4_cmd_mailbox *outbox,
969			       struct mlx4_cmd_info *cmd)
970{
971	int err = 0;
972
973	return err;
974}
975
976int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
977				struct mlx4_vhcr *vhcr,
978				struct mlx4_cmd_mailbox *inbox,
979				struct mlx4_cmd_mailbox *outbox,
980				struct mlx4_cmd_info *cmd)
981{
982	return 0;
983}
984
985int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, int *slave_id)
986{
987	struct mlx4_priv *priv = mlx4_priv(dev);
988	int i, found_ix = -1;
989	int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
990
991	if (!mlx4_is_mfunc(dev))
992		return -EINVAL;
993
994	for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
995		if (!memcmp(priv->roce_gids[port - 1][i].raw, gid, 16)) {
996			found_ix = i;
997			break;
998		}
999	}
1000
1001	if (found_ix >= 0) {
1002		if (found_ix < MLX4_ROCE_PF_GIDS)
1003			*slave_id = 0;
1004		else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % dev->num_vfs) *
1005			 (vf_gids / dev->num_vfs + 1))
1006			*slave_id = ((found_ix - MLX4_ROCE_PF_GIDS) /
1007				     (vf_gids / dev->num_vfs + 1)) + 1;
1008		else
1009			*slave_id =
1010			((found_ix - MLX4_ROCE_PF_GIDS -
1011			  ((vf_gids % dev->num_vfs) * ((vf_gids / dev->num_vfs + 1)))) /
1012			 (vf_gids / dev->num_vfs)) + vf_gids % dev->num_vfs + 1;
1013	}
1014
1015	return (found_ix >= 0) ? 0 : -EINVAL;
1016}
1017EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
1018
1019int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id, u8 *gid)
1020{
1021	struct mlx4_priv *priv = mlx4_priv(dev);
1022
1023	if (!mlx4_is_master(dev))
1024		return -EINVAL;
1025
1026	memcpy(gid, priv->roce_gids[port - 1][slave_id].raw, 16);
1027	return 0;
1028}
1029EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
1030
1031/* Cable Module Info */
1032#define MODULE_INFO_MAX_READ 48
1033
1034#define I2C_ADDR_LOW  0x50
1035#define I2C_ADDR_HIGH 0x51
1036#define I2C_PAGE_SIZE 256
1037
1038/* Module Info Data */
1039struct mlx4_cable_info {
1040	u8	i2c_addr;
1041	u8	page_num;
1042	__be16	dev_mem_address;
1043	__be16	reserved1;
1044	__be16	size;
1045	__be32	reserved2[2];
1046	u8	data[MODULE_INFO_MAX_READ];
1047};
1048
1049enum cable_info_err {
1050	CABLE_INF_INV_PORT	= 0x1,
1051	CABLE_INF_OP_NOSUP	= 0x2,
1052	CABLE_INF_NOT_CONN	= 0x3,
1053	CABLE_INF_NO_EEPRM	= 0x4,
1054	CABLE_INF_PAGE_ERR	= 0x5,
1055	CABLE_INF_INV_ADDR	= 0x6,
1056	CABLE_INF_I2C_ADDR	= 0x7,
1057	CABLE_INF_QSFP_VIO	= 0x8,
1058	CABLE_INF_I2C_BUSY	= 0x9,
1059};
1060
1061#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
1062
1063#ifdef DEBUG
1064static inline const char *cable_info_mad_err_str(u16 mad_status)
1065{
1066	u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
1067
1068	switch (err) {
1069	case CABLE_INF_INV_PORT:
1070		return "invalid port selected";
1071	case CABLE_INF_OP_NOSUP:
1072		return "operation not supported for this port (the port is of type CX4 or internal)";
1073	case CABLE_INF_NOT_CONN:
1074		return "cable is not connected";
1075	case CABLE_INF_NO_EEPRM:
1076		return "the connected cable has no EPROM (passive copper cable)";
1077	case CABLE_INF_PAGE_ERR:
1078		return "page number is greater than 15";
1079	case CABLE_INF_INV_ADDR:
1080		return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
1081	case CABLE_INF_I2C_ADDR:
1082		return "invalid I2C slave address";
1083	case CABLE_INF_QSFP_VIO:
1084		return "at least one cable violates the QSFP specification and ignores the modsel signal";
1085	case CABLE_INF_I2C_BUSY:
1086		return "I2C bus is constantly busy";
1087	}
1088	return "Unknown Error";
1089}
1090#endif /* DEBUG */
1091
1092/**
1093 * mlx4_get_module_info - Read cable module eeprom data
1094 * @dev: mlx4_dev.
1095 * @port: port number.
1096 * @offset: byte offset in eeprom to start reading data from.
1097 * @size: num of bytes to read.
1098 * @data: output buffer to put the requested data into.
1099 *
1100 * Reads cable module eeprom data, puts the outcome data into
1101 * data pointer paramer.
1102 * Returns num of read bytes on success or a negative error
1103 * code.
1104 */
1105int mlx4_get_module_info(struct mlx4_dev *dev, u8 port, u16 offset,
1106    u16 size, u8 *data)
1107{
1108	struct mlx4_cmd_mailbox *inbox, *outbox;
1109	struct mlx4_mad_ifc *inmad, *outmad;
1110	struct mlx4_cable_info *cable_info;
1111	u16 i2c_addr;
1112	int ret;
1113
1114	if (size > MODULE_INFO_MAX_READ)
1115		size = MODULE_INFO_MAX_READ;
1116
1117	inbox = mlx4_alloc_cmd_mailbox(dev);
1118	if (IS_ERR(inbox)) {
1119		mlx4_err(dev,
1120			 "mlx4_alloc_cmd_mailbox returned with error(%lx)", PTR_ERR(inbox));
1121		return PTR_ERR(inbox);
1122	}
1123
1124	outbox = mlx4_alloc_cmd_mailbox(dev);
1125	if (IS_ERR(outbox)) {
1126		mlx4_free_cmd_mailbox(dev, inbox);
1127		mlx4_err(dev,
1128			 "mlx4_alloc_cmd_mailbox returned with error(%lx)", PTR_ERR(outbox));
1129		return PTR_ERR(outbox);
1130	}
1131
1132	inmad = (struct mlx4_mad_ifc *)(inbox->buf);
1133	outmad = (struct mlx4_mad_ifc *)(outbox->buf);
1134
1135	inmad->method = 0x1; /* Get */
1136	inmad->class_version = 0x1;
1137	inmad->mgmt_class = 0x1;
1138	inmad->base_version = 0x1;
1139	inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
1140
1141	if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
1142		/* Cross pages reads are not allowed
1143		 * read until offset 256 in low page
1144		 */
1145		size -= offset + size - I2C_PAGE_SIZE;
1146
1147	i2c_addr = I2C_ADDR_LOW;
1148	if (offset >= I2C_PAGE_SIZE) {
1149		/* Reset offset to high page */
1150		i2c_addr = I2C_ADDR_HIGH;
1151		offset -= I2C_PAGE_SIZE;
1152	}
1153
1154	cable_info = (struct mlx4_cable_info *)inmad->data;
1155	cable_info->dev_mem_address = cpu_to_be16(offset);
1156	cable_info->page_num = 0;
1157	cable_info->i2c_addr = i2c_addr;
1158	cable_info->size = cpu_to_be16(size);
1159
1160	ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
1161	    MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1162	if (ret)
1163		goto out;
1164
1165	if (be16_to_cpu(outmad->status)) {
1166		/* Mad returned with bad status */
1167		ret = be16_to_cpu(outmad->status);
1168#ifdef DEBUG
1169		mlx4_warn(dev, "MLX4_CMD_MAD_IFC Get Module info attr(%x) "
1170		    "port(%d) i2c_addr(%x) offset(%d) size(%d): Response "
1171		    "Mad Status(%x) - %s\n", 0xFF60, port, i2c_addr, offset,
1172		    size, ret, cable_info_mad_err_str(ret));
1173#endif
1174		if (i2c_addr == I2C_ADDR_HIGH &&
1175		    MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
1176			/* Some SFP cables do not support i2c slave
1177			 * address 0x51 (high page), abort silently.
1178			 */
1179			ret = 0;
1180		else
1181			ret = -ret;
1182		goto out;
1183	}
1184	cable_info = (struct mlx4_cable_info *)outmad->data;
1185	memcpy(data, cable_info->data, size);
1186	ret = size;
1187out:
1188	mlx4_free_cmd_mailbox(dev, inbox);
1189	mlx4_free_cmd_mailbox(dev, outbox);
1190	return ret;
1191}
1192EXPORT_SYMBOL(mlx4_get_module_info);
1193