1/*
2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/errno.h>
34#include <linux/if_ether.h>
35#include <linux/if_vlan.h>
36#include <linux/export.h>
37
38#include <linux/mlx4/cmd.h>
39
40#include "mlx4.h"
41#include "mlx4_stats.h"
42
43#define MLX4_MAC_VALID		(1ull << 63)
44
45#define MLX4_VLAN_VALID		(1u << 31)
46#define MLX4_VLAN_MASK		0xfff
47
48#define MLX4_STATS_TRAFFIC_COUNTERS_MASK	0xfULL
49#define MLX4_STATS_TRAFFIC_DROPS_MASK		0xc0ULL
50#define MLX4_STATS_ERROR_COUNTERS_MASK		0x1ffc30ULL
51#define MLX4_STATS_PORT_COUNTERS_MASK		0x1fe00000ULL
52
53#define MLX4_FLAG2_V_IGNORE_FCS_MASK		BIT(1)
54#define MLX4_FLAG2_V_USER_MTU_MASK		BIT(5)
55#define MLX4_FLAG2_V_USER_MAC_MASK		BIT(6)
56#define MLX4_FLAG_V_MTU_MASK			BIT(0)
57#define MLX4_FLAG_V_PPRX_MASK			BIT(1)
58#define MLX4_FLAG_V_PPTX_MASK			BIT(2)
59#define MLX4_IGNORE_FCS_MASK			0x1
60#define MLX4_TC_MAX_NUMBER			8
61
62void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
63{
64	int i;
65
66	mutex_init(&table->mutex);
67	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
68		table->entries[i] = 0;
69		table->refs[i]	 = 0;
70		table->is_dup[i] = false;
71	}
72	table->max   = 1 << dev->caps.log_num_macs;
73	table->total = 0;
74}
75
76void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
77{
78	int i;
79
80	mutex_init(&table->mutex);
81	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
82		table->entries[i] = 0;
83		table->refs[i]	 = 0;
84		table->is_dup[i] = false;
85	}
86	table->max   = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
87	table->total = 0;
88}
89
90void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
91			      struct mlx4_roce_gid_table *table)
92{
93	int i;
94
95	mutex_init(&table->mutex);
96	for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
97		memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
98}
99
100static int validate_index(struct mlx4_dev *dev,
101			  struct mlx4_mac_table *table, int index)
102{
103	int err = 0;
104
105	if (index < 0 || index >= table->max || !table->entries[index]) {
106		mlx4_warn(dev, "No valid Mac entry for the given index\n");
107		err = -EINVAL;
108	}
109	return err;
110}
111
112static int find_index(struct mlx4_dev *dev,
113		      struct mlx4_mac_table *table, u64 mac)
114{
115	int i;
116
117	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
118		if (table->refs[i] &&
119		    (MLX4_MAC_MASK & mac) ==
120		    (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
121			return i;
122	}
123	/* Mac not found */
124	return -EINVAL;
125}
126
127static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
128				   __be64 *entries)
129{
130	struct mlx4_cmd_mailbox *mailbox;
131	u32 in_mod;
132	int err;
133
134	mailbox = mlx4_alloc_cmd_mailbox(dev);
135	if (IS_ERR(mailbox))
136		return PTR_ERR(mailbox);
137
138	memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
139
140	in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
141
142	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
143		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
144		       MLX4_CMD_NATIVE);
145
146	mlx4_free_cmd_mailbox(dev, mailbox);
147	return err;
148}
149
150int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
151{
152	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
153	struct mlx4_mac_table *table = &info->mac_table;
154	int i;
155
156	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
157		if (!table->refs[i])
158			continue;
159
160		if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
161			*idx = i;
162			return 0;
163		}
164	}
165
166	return -ENOENT;
167}
168EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
169
170static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
171{
172	int i, num_eth_ports = 0;
173
174	if (!mlx4_is_mfunc(dev))
175		return false;
176	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
177		++num_eth_ports;
178
179	return (num_eth_ports ==  2) ? true : false;
180}
181
182int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
183{
184	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
185	struct mlx4_mac_table *table = &info->mac_table;
186	int i, err = 0;
187	int free = -1;
188	int free_for_dup = -1;
189	bool dup = mlx4_is_mf_bonded(dev);
190	u8 dup_port = (port == 1) ? 2 : 1;
191	struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
192	bool need_mf_bond = mlx4_need_mf_bond(dev);
193	bool can_mf_bond = true;
194
195	mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n",
196		 (unsigned long long)mac, port,
197		 dup ? "with" : "without");
198
199	if (need_mf_bond) {
200		if (port == 1) {
201			mutex_lock(&table->mutex);
202			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
203		} else {
204			mutex_lock(&dup_table->mutex);
205			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
206		}
207	} else {
208		mutex_lock(&table->mutex);
209	}
210
211	if (need_mf_bond) {
212		int index_at_port = -1;
213		int index_at_dup_port = -1;
214
215		for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
216			if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
217				index_at_port = i;
218			if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
219				index_at_dup_port = i;
220		}
221
222		/* check that same mac is not in the tables at different indices */
223		if ((index_at_port != index_at_dup_port) &&
224		    (index_at_port >= 0) &&
225		    (index_at_dup_port >= 0))
226			can_mf_bond = false;
227
228		/* If the mac is already in the primary table, the slot must be
229		 * available in the duplicate table as well.
230		 */
231		if (index_at_port >= 0 && index_at_dup_port < 0 &&
232		    dup_table->refs[index_at_port]) {
233			can_mf_bond = false;
234		}
235		/* If the mac is already in the duplicate table, check that the
236		 * corresponding index is not occupied in the primary table, or
237		 * the primary table already contains the mac at the same index.
238		 * Otherwise, you cannot bond (primary contains a different mac
239		 * at that index).
240		 */
241		if (index_at_dup_port >= 0) {
242			if (!table->refs[index_at_dup_port] ||
243			    ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
244				free_for_dup = index_at_dup_port;
245			else
246				can_mf_bond = false;
247		}
248	}
249
250	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
251		if (!table->refs[i]) {
252			if (free < 0)
253				free = i;
254			if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
255				if (!dup_table->refs[i])
256					free_for_dup = i;
257			}
258			continue;
259		}
260
261		if ((MLX4_MAC_MASK & mac) ==
262		     (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
263			/* MAC already registered, increment ref count */
264			err = i;
265			++table->refs[i];
266			if (dup) {
267				u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
268
269				if (dup_mac != mac || !dup_table->is_dup[i]) {
270					mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
271						  mac, dup_port, i);
272				}
273			}
274			goto out;
275		}
276	}
277
278	if (need_mf_bond && (free_for_dup < 0)) {
279		if (dup) {
280			mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
281			mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
282			dup = false;
283		}
284		can_mf_bond = false;
285	}
286
287	if (need_mf_bond && can_mf_bond)
288		free = free_for_dup;
289
290	mlx4_dbg(dev, "Free MAC index is %d\n", free);
291
292	if (table->total == table->max) {
293		/* No free mac entries */
294		err = -ENOSPC;
295		goto out;
296	}
297
298	/* Register new MAC */
299	table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
300
301	err = mlx4_set_port_mac_table(dev, port, table->entries);
302	if (unlikely(err)) {
303		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
304			 (unsigned long long) mac);
305		table->entries[free] = 0;
306		goto out;
307	}
308	table->refs[free] = 1;
309	table->is_dup[free] = false;
310	++table->total;
311	if (dup) {
312		dup_table->refs[free] = 0;
313		dup_table->is_dup[free] = true;
314		dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
315
316		err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
317		if (unlikely(err)) {
318			mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac);
319			dup_table->is_dup[free] = false;
320			dup_table->entries[free] = 0;
321			goto out;
322		}
323		++dup_table->total;
324	}
325	err = free;
326out:
327	if (need_mf_bond) {
328		if (port == 2) {
329			mutex_unlock(&table->mutex);
330			mutex_unlock(&dup_table->mutex);
331		} else {
332			mutex_unlock(&dup_table->mutex);
333			mutex_unlock(&table->mutex);
334		}
335	} else {
336		mutex_unlock(&table->mutex);
337	}
338	return err;
339}
340EXPORT_SYMBOL_GPL(__mlx4_register_mac);
341
342int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
343{
344	u64 out_param = 0;
345	int err = -EINVAL;
346
347	if (mlx4_is_mfunc(dev)) {
348		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
349			err = mlx4_cmd_imm(dev, mac, &out_param,
350					   ((u32) port) << 8 | (u32) RES_MAC,
351					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
352					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
353		}
354		if (err && err == -EINVAL && mlx4_is_slave(dev)) {
355			/* retry using old REG_MAC format */
356			set_param_l(&out_param, port);
357			err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
358					   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
359					   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
360			if (!err)
361				dev->flags |= MLX4_FLAG_OLD_REG_MAC;
362		}
363		if (err)
364			return err;
365
366		return get_param_l(&out_param);
367	}
368	return __mlx4_register_mac(dev, port, mac);
369}
370EXPORT_SYMBOL_GPL(mlx4_register_mac);
371
372int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
373{
374	return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
375			(port - 1) * (1 << dev->caps.log_num_macs);
376}
377EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
378
379void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
380{
381	struct mlx4_port_info *info;
382	struct mlx4_mac_table *table;
383	int index;
384	bool dup = mlx4_is_mf_bonded(dev);
385	u8 dup_port = (port == 1) ? 2 : 1;
386	struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
387
388	if (port < 1 || port > dev->caps.num_ports) {
389		mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
390		return;
391	}
392	info = &mlx4_priv(dev)->port[port];
393	table = &info->mac_table;
394
395	if (dup) {
396		if (port == 1) {
397			mutex_lock(&table->mutex);
398			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
399		} else {
400			mutex_lock(&dup_table->mutex);
401			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
402		}
403	} else {
404		mutex_lock(&table->mutex);
405	}
406
407	index = find_index(dev, table, mac);
408
409	if (validate_index(dev, table, index))
410		goto out;
411
412	if (--table->refs[index] || table->is_dup[index]) {
413		mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
414			 index);
415		if (!table->refs[index])
416			dup_table->is_dup[index] = false;
417		goto out;
418	}
419
420	table->entries[index] = 0;
421	if (mlx4_set_port_mac_table(dev, port, table->entries))
422		mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
423	--table->total;
424
425	if (dup) {
426		dup_table->is_dup[index] = false;
427		if (dup_table->refs[index])
428			goto out;
429		dup_table->entries[index] = 0;
430		if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
431			mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
432
433		--table->total;
434	}
435out:
436	if (dup) {
437		if (port == 2) {
438			mutex_unlock(&table->mutex);
439			mutex_unlock(&dup_table->mutex);
440		} else {
441			mutex_unlock(&dup_table->mutex);
442			mutex_unlock(&table->mutex);
443		}
444	} else {
445		mutex_unlock(&table->mutex);
446	}
447}
448EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
449
450void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
451{
452	u64 out_param = 0;
453
454	if (mlx4_is_mfunc(dev)) {
455		if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
456			(void) mlx4_cmd_imm(dev, mac, &out_param,
457					    ((u32) port) << 8 | (u32) RES_MAC,
458					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
459					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
460		} else {
461			/* use old unregister mac format */
462			set_param_l(&out_param, port);
463			(void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
464					    RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
465					    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
466		}
467		return;
468	}
469	__mlx4_unregister_mac(dev, port, mac);
470	return;
471}
472EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
473
474int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
475{
476	struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
477	struct mlx4_mac_table *table = &info->mac_table;
478	int index = qpn - info->base_qpn;
479	int err = 0;
480	bool dup = mlx4_is_mf_bonded(dev);
481	u8 dup_port = (port == 1) ? 2 : 1;
482	struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
483
484	/* CX1 doesn't support multi-functions */
485	if (dup) {
486		if (port == 1) {
487			mutex_lock(&table->mutex);
488			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
489		} else {
490			mutex_lock(&dup_table->mutex);
491			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
492		}
493	} else {
494		mutex_lock(&table->mutex);
495	}
496
497	err = validate_index(dev, table, index);
498	if (err)
499		goto out;
500
501	table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
502
503	err = mlx4_set_port_mac_table(dev, port, table->entries);
504	if (unlikely(err)) {
505		mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
506			 (unsigned long long) new_mac);
507		table->entries[index] = 0;
508	} else {
509		if (dup) {
510			dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
511
512			err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
513			if (unlikely(err)) {
514				mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n",
515					 (unsigned long long)new_mac);
516				dup_table->entries[index] = 0;
517			}
518		}
519	}
520out:
521	if (dup) {
522		if (port == 2) {
523			mutex_unlock(&table->mutex);
524			mutex_unlock(&dup_table->mutex);
525		} else {
526			mutex_unlock(&dup_table->mutex);
527			mutex_unlock(&table->mutex);
528		}
529	} else {
530		mutex_unlock(&table->mutex);
531	}
532	return err;
533}
534EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
535
536static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
537				    __be32 *entries)
538{
539	struct mlx4_cmd_mailbox *mailbox;
540	u32 in_mod;
541	int err;
542
543	mailbox = mlx4_alloc_cmd_mailbox(dev);
544	if (IS_ERR(mailbox))
545		return PTR_ERR(mailbox);
546
547	memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
548	in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
549	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
550		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
551		       MLX4_CMD_NATIVE);
552
553	mlx4_free_cmd_mailbox(dev, mailbox);
554
555	return err;
556}
557
558int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
559{
560	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
561	int i;
562
563	for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
564		if (table->refs[i] &&
565		    (vid == (MLX4_VLAN_MASK &
566			      be32_to_cpu(table->entries[i])))) {
567			/* VLAN already registered, increase reference count */
568			*idx = i;
569			return 0;
570		}
571	}
572
573	return -ENOENT;
574}
575EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
576
577int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
578				int *index)
579{
580	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
581	int i, err = 0;
582	int free = -1;
583	int free_for_dup = -1;
584	bool dup = mlx4_is_mf_bonded(dev);
585	u8 dup_port = (port == 1) ? 2 : 1;
586	struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
587	bool need_mf_bond = mlx4_need_mf_bond(dev);
588	bool can_mf_bond = true;
589
590	mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n",
591		 vlan, port,
592		 dup ? "with" : "without");
593
594	if (need_mf_bond) {
595		if (port == 1) {
596			mutex_lock(&table->mutex);
597			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
598		} else {
599			mutex_lock(&dup_table->mutex);
600			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
601		}
602	} else {
603		mutex_lock(&table->mutex);
604	}
605
606	if (table->total == table->max) {
607		/* No free vlan entries */
608		err = -ENOSPC;
609		goto out;
610	}
611
612	if (need_mf_bond) {
613		int index_at_port = -1;
614		int index_at_dup_port = -1;
615
616		for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
617			if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))
618				index_at_port = i;
619			if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))
620				index_at_dup_port = i;
621		}
622		/* check that same vlan is not in the tables at different indices */
623		if ((index_at_port != index_at_dup_port) &&
624		    (index_at_port >= 0) &&
625		    (index_at_dup_port >= 0))
626			can_mf_bond = false;
627
628		/* If the vlan is already in the primary table, the slot must be
629		 * available in the duplicate table as well.
630		 */
631		if (index_at_port >= 0 && index_at_dup_port < 0 &&
632		    dup_table->refs[index_at_port]) {
633			can_mf_bond = false;
634		}
635		/* If the vlan is already in the duplicate table, check that the
636		 * corresponding index is not occupied in the primary table, or
637		 * the primary table already contains the vlan at the same index.
638		 * Otherwise, you cannot bond (primary contains a different vlan
639		 * at that index).
640		 */
641		if (index_at_dup_port >= 0) {
642			if (!table->refs[index_at_dup_port] ||
643			    (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
644				free_for_dup = index_at_dup_port;
645			else
646				can_mf_bond = false;
647		}
648	}
649
650	for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
651		if (!table->refs[i]) {
652			if (free < 0)
653				free = i;
654			if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
655				if (!dup_table->refs[i])
656					free_for_dup = i;
657			}
658		}
659
660		if ((table->refs[i] || table->is_dup[i]) &&
661		    (vlan == (MLX4_VLAN_MASK &
662			      be32_to_cpu(table->entries[i])))) {
663			/* Vlan already registered, increase references count */
664			mlx4_dbg(dev, "vlan %u is already registered.\n", vlan);
665			*index = i;
666			++table->refs[i];
667			if (dup) {
668				u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]);
669
670				if (dup_vlan != vlan || !dup_table->is_dup[i]) {
671					mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
672						  vlan, dup_port, i);
673				}
674			}
675			goto out;
676		}
677	}
678
679	if (need_mf_bond && (free_for_dup < 0)) {
680		if (dup) {
681			mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
682			mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
683			dup = false;
684		}
685		can_mf_bond = false;
686	}
687
688	if (need_mf_bond && can_mf_bond)
689		free = free_for_dup;
690
691	if (free < 0) {
692		err = -ENOMEM;
693		goto out;
694	}
695
696	/* Register new VLAN */
697	table->refs[free] = 1;
698	table->is_dup[free] = false;
699	table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
700
701	err = mlx4_set_port_vlan_table(dev, port, table->entries);
702	if (unlikely(err)) {
703		mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
704		table->refs[free] = 0;
705		table->entries[free] = 0;
706		goto out;
707	}
708	++table->total;
709	if (dup) {
710		dup_table->refs[free] = 0;
711		dup_table->is_dup[free] = true;
712		dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
713
714		err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries);
715		if (unlikely(err)) {
716			mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan);
717			dup_table->is_dup[free] = false;
718			dup_table->entries[free] = 0;
719			goto out;
720		}
721		++dup_table->total;
722	}
723
724	*index = free;
725out:
726	if (need_mf_bond) {
727		if (port == 2) {
728			mutex_unlock(&table->mutex);
729			mutex_unlock(&dup_table->mutex);
730		} else {
731			mutex_unlock(&dup_table->mutex);
732			mutex_unlock(&table->mutex);
733		}
734	} else {
735		mutex_unlock(&table->mutex);
736	}
737	return err;
738}
739
740int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
741{
742	u64 out_param = 0;
743	int err;
744
745	if (vlan > 4095)
746		return -EINVAL;
747
748	if (mlx4_is_mfunc(dev)) {
749		err = mlx4_cmd_imm(dev, vlan, &out_param,
750				   ((u32) port) << 8 | (u32) RES_VLAN,
751				   RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
752				   MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
753		if (!err)
754			*index = get_param_l(&out_param);
755
756		return err;
757	}
758	return __mlx4_register_vlan(dev, port, vlan, index);
759}
760EXPORT_SYMBOL_GPL(mlx4_register_vlan);
761
762void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
763{
764	struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
765	int index;
766	bool dup = mlx4_is_mf_bonded(dev);
767	u8 dup_port = (port == 1) ? 2 : 1;
768	struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
769
770	if (dup) {
771		if (port == 1) {
772			mutex_lock(&table->mutex);
773			mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
774		} else {
775			mutex_lock(&dup_table->mutex);
776			mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
777		}
778	} else {
779		mutex_lock(&table->mutex);
780	}
781
782	if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
783		mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
784		goto out;
785	}
786
787	if (index < MLX4_VLAN_REGULAR) {
788		mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
789		goto out;
790	}
791
792	if (--table->refs[index] || table->is_dup[index]) {
793		mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
794			 table->refs[index], index);
795		if (!table->refs[index])
796			dup_table->is_dup[index] = false;
797		goto out;
798	}
799	table->entries[index] = 0;
800	if (mlx4_set_port_vlan_table(dev, port, table->entries))
801		mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
802	--table->total;
803	if (dup) {
804		dup_table->is_dup[index] = false;
805		if (dup_table->refs[index])
806			goto out;
807		dup_table->entries[index] = 0;
808		if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
809			mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
810		--dup_table->total;
811	}
812out:
813	if (dup) {
814		if (port == 2) {
815			mutex_unlock(&table->mutex);
816			mutex_unlock(&dup_table->mutex);
817		} else {
818			mutex_unlock(&dup_table->mutex);
819			mutex_unlock(&table->mutex);
820		}
821	} else {
822		mutex_unlock(&table->mutex);
823	}
824}
825
826void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
827{
828	u64 out_param = 0;
829
830	if (mlx4_is_mfunc(dev)) {
831		(void) mlx4_cmd_imm(dev, vlan, &out_param,
832				    ((u32) port) << 8 | (u32) RES_VLAN,
833				    RES_OP_RESERVE_AND_MAP,
834				    MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
835				    MLX4_CMD_WRAPPED);
836		return;
837	}
838	__mlx4_unregister_vlan(dev, port, vlan);
839}
840EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
841
842int mlx4_bond_mac_table(struct mlx4_dev *dev)
843{
844	struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
845	struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
846	int ret = 0;
847	int i;
848	bool update1 = false;
849	bool update2 = false;
850
851	mutex_lock(&t1->mutex);
852	mutex_lock(&t2->mutex);
853	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
854		if ((t1->entries[i] != t2->entries[i]) &&
855		    t1->entries[i] && t2->entries[i]) {
856			mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i);
857			ret = -EINVAL;
858			goto unlock;
859		}
860	}
861
862	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
863		if (t1->entries[i] && !t2->entries[i]) {
864			t2->entries[i] = t1->entries[i];
865			t2->is_dup[i] = true;
866			update2 = true;
867		} else if (!t1->entries[i] && t2->entries[i]) {
868			t1->entries[i] = t2->entries[i];
869			t1->is_dup[i] = true;
870			update1 = true;
871		} else if (t1->entries[i] && t2->entries[i]) {
872			t1->is_dup[i] = true;
873			t2->is_dup[i] = true;
874		}
875	}
876
877	if (update1) {
878		ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
879		if (ret)
880			mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
881	}
882	if (!ret && update2) {
883		ret = mlx4_set_port_mac_table(dev, 2, t2->entries);
884		if (ret)
885			mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
886	}
887
888	if (ret)
889		mlx4_warn(dev, "failed to create mirror MAC tables\n");
890unlock:
891	mutex_unlock(&t2->mutex);
892	mutex_unlock(&t1->mutex);
893	return ret;
894}
895
896int mlx4_unbond_mac_table(struct mlx4_dev *dev)
897{
898	struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
899	struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
900	int ret = 0;
901	int ret1;
902	int i;
903	bool update1 = false;
904	bool update2 = false;
905
906	mutex_lock(&t1->mutex);
907	mutex_lock(&t2->mutex);
908	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
909		if (t1->entries[i] != t2->entries[i]) {
910			mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
911			ret = -EINVAL;
912			goto unlock;
913		}
914	}
915
916	for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
917		if (!t1->entries[i])
918			continue;
919		t1->is_dup[i] = false;
920		if (!t1->refs[i]) {
921			t1->entries[i] = 0;
922			update1 = true;
923		}
924		t2->is_dup[i] = false;
925		if (!t2->refs[i]) {
926			t2->entries[i] = 0;
927			update2 = true;
928		}
929	}
930
931	if (update1) {
932		ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
933		if (ret)
934			mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
935	}
936	if (update2) {
937		ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries);
938		if (ret1) {
939			mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
940			ret = ret1;
941		}
942	}
943unlock:
944	mutex_unlock(&t2->mutex);
945	mutex_unlock(&t1->mutex);
946	return ret;
947}
948
949int mlx4_bond_vlan_table(struct mlx4_dev *dev)
950{
951	struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
952	struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
953	int ret = 0;
954	int i;
955	bool update1 = false;
956	bool update2 = false;
957
958	mutex_lock(&t1->mutex);
959	mutex_lock(&t2->mutex);
960	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
961		if ((t1->entries[i] != t2->entries[i]) &&
962		    t1->entries[i] && t2->entries[i]) {
963			mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
964			ret = -EINVAL;
965			goto unlock;
966		}
967	}
968
969	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
970		if (t1->entries[i] && !t2->entries[i]) {
971			t2->entries[i] = t1->entries[i];
972			t2->is_dup[i] = true;
973			update2 = true;
974		} else if (!t1->entries[i] && t2->entries[i]) {
975			t1->entries[i] = t2->entries[i];
976			t1->is_dup[i] = true;
977			update1 = true;
978		} else if (t1->entries[i] && t2->entries[i]) {
979			t1->is_dup[i] = true;
980			t2->is_dup[i] = true;
981		}
982	}
983
984	if (update1) {
985		ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
986		if (ret)
987			mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
988	}
989	if (!ret && update2) {
990		ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
991		if (ret)
992			mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
993	}
994
995	if (ret)
996		mlx4_warn(dev, "failed to create mirror VLAN tables\n");
997unlock:
998	mutex_unlock(&t2->mutex);
999	mutex_unlock(&t1->mutex);
1000	return ret;
1001}
1002
1003int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
1004{
1005	struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
1006	struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
1007	int ret = 0;
1008	int ret1;
1009	int i;
1010	bool update1 = false;
1011	bool update2 = false;
1012
1013	mutex_lock(&t1->mutex);
1014	mutex_lock(&t2->mutex);
1015	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
1016		if (t1->entries[i] != t2->entries[i]) {
1017			mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
1018			ret = -EINVAL;
1019			goto unlock;
1020		}
1021	}
1022
1023	for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
1024		if (!t1->entries[i])
1025			continue;
1026		t1->is_dup[i] = false;
1027		if (!t1->refs[i]) {
1028			t1->entries[i] = 0;
1029			update1 = true;
1030		}
1031		t2->is_dup[i] = false;
1032		if (!t2->refs[i]) {
1033			t2->entries[i] = 0;
1034			update2 = true;
1035		}
1036	}
1037
1038	if (update1) {
1039		ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
1040		if (ret)
1041			mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
1042	}
1043	if (update2) {
1044		ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
1045		if (ret1) {
1046			mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
1047			ret = ret1;
1048		}
1049	}
1050unlock:
1051	mutex_unlock(&t2->mutex);
1052	mutex_unlock(&t1->mutex);
1053	return ret;
1054}
1055
1056int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
1057{
1058	struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
1059	u8 *inbuf, *outbuf;
1060	int err;
1061
1062	inmailbox = mlx4_alloc_cmd_mailbox(dev);
1063	if (IS_ERR(inmailbox))
1064		return PTR_ERR(inmailbox);
1065
1066	outmailbox = mlx4_alloc_cmd_mailbox(dev);
1067	if (IS_ERR(outmailbox)) {
1068		mlx4_free_cmd_mailbox(dev, inmailbox);
1069		return PTR_ERR(outmailbox);
1070	}
1071
1072	inbuf = inmailbox->buf;
1073	outbuf = outmailbox->buf;
1074	inbuf[0] = 1;
1075	inbuf[1] = 1;
1076	inbuf[2] = 1;
1077	inbuf[3] = 1;
1078	*(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
1079	*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
1080
1081	err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
1082			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
1083			   MLX4_CMD_NATIVE);
1084	if (!err)
1085		*caps = *(__be32 *) (outbuf + 84);
1086	mlx4_free_cmd_mailbox(dev, inmailbox);
1087	mlx4_free_cmd_mailbox(dev, outmailbox);
1088	return err;
1089}
1090static struct mlx4_roce_gid_entry zgid_entry;
1091
1092int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
1093{
1094	int vfs;
1095	int slave_gid = slave;
1096	unsigned i;
1097	struct mlx4_slaves_pport slaves_pport;
1098	struct mlx4_active_ports actv_ports;
1099	unsigned max_port_p_one;
1100
1101	if (slave == 0)
1102		return MLX4_ROCE_PF_GIDS;
1103
1104	/* Slave is a VF */
1105	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1106	actv_ports = mlx4_get_active_ports(dev, slave);
1107	max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
1108		bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
1109
1110	for (i = 1; i < max_port_p_one; i++) {
1111		struct mlx4_active_ports exclusive_ports;
1112		struct mlx4_slaves_pport slaves_pport_actv;
1113		bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1114		set_bit(i - 1, exclusive_ports.ports);
1115		if (i == port)
1116			continue;
1117		slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
1118				    dev, &exclusive_ports);
1119		slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
1120					   dev->persist->num_vfs + 1);
1121	}
1122	vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
1123	if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
1124		return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
1125	return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
1126}
1127
1128int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
1129{
1130	int gids;
1131	unsigned i;
1132	int slave_gid = slave;
1133	int vfs;
1134
1135	struct mlx4_slaves_pport slaves_pport;
1136	struct mlx4_active_ports actv_ports;
1137	unsigned max_port_p_one;
1138
1139	if (slave == 0)
1140		return 0;
1141
1142	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1143	actv_ports = mlx4_get_active_ports(dev, slave);
1144	max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
1145		bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
1146
1147	for (i = 1; i < max_port_p_one; i++) {
1148		struct mlx4_active_ports exclusive_ports;
1149		struct mlx4_slaves_pport slaves_pport_actv;
1150		bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1151		set_bit(i - 1, exclusive_ports.ports);
1152		if (i == port)
1153			continue;
1154		slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
1155				    dev, &exclusive_ports);
1156		slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
1157					   dev->persist->num_vfs + 1);
1158	}
1159	gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1160	vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
1161	if (slave_gid <= gids % vfs)
1162		return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
1163
1164	return MLX4_ROCE_PF_GIDS + (gids % vfs) +
1165		((gids / vfs) * (slave_gid - 1));
1166}
1167EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
1168
1169static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
1170				     int port, struct mlx4_cmd_mailbox *mailbox)
1171{
1172	struct mlx4_roce_gid_entry *gid_entry_mbox;
1173	struct mlx4_priv *priv = mlx4_priv(dev);
1174	int num_gids, base, offset;
1175	int i, err;
1176
1177	num_gids = mlx4_get_slave_num_gids(dev, slave, port);
1178	base = mlx4_get_base_gid_ix(dev, slave, port);
1179
1180	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
1181
1182	mutex_lock(&(priv->port[port].gid_table.mutex));
1183	/* Zero-out gids belonging to that slave in the port GID table */
1184	for (i = 0, offset = base; i < num_gids; offset++, i++)
1185		memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
1186		       zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
1187
1188	/* Now, copy roce port gids table to mailbox for passing to FW */
1189	gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
1190	for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
1191		memcpy(gid_entry_mbox->raw,
1192		       priv->port[port].gid_table.roce_gids[i].raw,
1193		       MLX4_ROCE_GID_ENTRY_SIZE);
1194
1195	err = mlx4_cmd(dev, mailbox->dma,
1196		       ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
1197		       MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
1198		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1199	mutex_unlock(&(priv->port[port].gid_table.mutex));
1200	return err;
1201}
1202
1203
1204void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
1205{
1206	struct mlx4_active_ports actv_ports;
1207	struct mlx4_cmd_mailbox *mailbox;
1208	int num_eth_ports, err;
1209	int i;
1210
1211	if (slave < 0 || slave > dev->persist->num_vfs)
1212		return;
1213
1214	actv_ports = mlx4_get_active_ports(dev, slave);
1215
1216	for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
1217		if (test_bit(i, actv_ports.ports)) {
1218			if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
1219				continue;
1220			num_eth_ports++;
1221		}
1222	}
1223
1224	if (!num_eth_ports)
1225		return;
1226
1227	/* have ETH ports.  Alloc mailbox for SET_PORT command */
1228	mailbox = mlx4_alloc_cmd_mailbox(dev);
1229	if (IS_ERR(mailbox))
1230		return;
1231
1232	for (i = 0; i < dev->caps.num_ports; i++) {
1233		if (test_bit(i, actv_ports.ports)) {
1234			if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
1235				continue;
1236			err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
1237			if (err)
1238				mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
1239					  slave, i + 1, err);
1240		}
1241	}
1242
1243	mlx4_free_cmd_mailbox(dev, mailbox);
1244	return;
1245}
1246
1247static void
1248mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port,
1249		     struct mlx4_set_port_general_context *gen_context)
1250{
1251	struct mlx4_priv *priv = mlx4_priv(dev);
1252	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
1253	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
1254	u16 mtu, prev_mtu;
1255
1256	/* Mtu is configured as the max USER_MTU among all
1257	 * the functions on the port.
1258	 */
1259	mtu = be16_to_cpu(gen_context->mtu);
1260	mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
1261		    ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
1262	prev_mtu = slave_st->mtu[port];
1263	slave_st->mtu[port] = mtu;
1264	if (mtu > master->max_mtu[port])
1265		master->max_mtu[port] = mtu;
1266	if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
1267		int i;
1268
1269		slave_st->mtu[port] = mtu;
1270		master->max_mtu[port] = mtu;
1271		for (i = 0; i < dev->num_slaves; i++)
1272			master->max_mtu[port] =
1273				max_t(u16, master->max_mtu[port],
1274				      master->slave_state[i].mtu[port]);
1275	}
1276	gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
1277}
1278
1279static void
1280mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port,
1281			  struct mlx4_set_port_general_context *gen_context)
1282{
1283	struct mlx4_priv *priv = mlx4_priv(dev);
1284	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
1285	struct mlx4_slave_state *slave_st = &master->slave_state[slave];
1286	u16 user_mtu, prev_user_mtu;
1287
1288	/* User Mtu is configured as the max USER_MTU among all
1289	 * the functions on the port.
1290	 */
1291	user_mtu = be16_to_cpu(gen_context->user_mtu);
1292	user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
1293	prev_user_mtu = slave_st->user_mtu[port];
1294	slave_st->user_mtu[port] = user_mtu;
1295	if (user_mtu > master->max_user_mtu[port])
1296		master->max_user_mtu[port] = user_mtu;
1297	if (user_mtu < prev_user_mtu &&
1298	    prev_user_mtu == master->max_user_mtu[port]) {
1299		int i;
1300
1301		slave_st->user_mtu[port] = user_mtu;
1302		master->max_user_mtu[port] = user_mtu;
1303		for (i = 0; i < dev->num_slaves; i++)
1304			master->max_user_mtu[port] =
1305				max_t(u16, master->max_user_mtu[port],
1306				      master->slave_state[i].user_mtu[port]);
1307	}
1308	gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
1309}
1310
1311static void
1312mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave,
1313			      struct mlx4_set_port_general_context *gen_context)
1314{
1315	struct mlx4_priv *priv = mlx4_priv(dev);
1316	struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
1317
1318	/* Slave cannot change Global Pause configuration */
1319	if (slave != mlx4_master_func_num(dev) &&
1320	    (gen_context->pptx != master->pptx ||
1321	     gen_context->pprx != master->pprx)) {
1322		gen_context->pptx = master->pptx;
1323		gen_context->pprx = master->pprx;
1324		mlx4_warn(dev, "denying Global Pause change for slave:%d\n",
1325			  slave);
1326	} else {
1327		master->pptx = gen_context->pptx;
1328		master->pprx = gen_context->pprx;
1329	}
1330}
1331
1332static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
1333				u8 op_mod, struct mlx4_cmd_mailbox *inbox)
1334{
1335	struct mlx4_priv *priv = mlx4_priv(dev);
1336	struct mlx4_port_info *port_info;
1337	struct mlx4_set_port_rqp_calc_context *qpn_context;
1338	struct mlx4_set_port_general_context *gen_context;
1339	struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
1340	int reset_qkey_viols;
1341	int port;
1342	int is_eth;
1343	int num_gids;
1344	int base;
1345	u32 in_modifier;
1346	u32 promisc;
1347	int err;
1348	int i, j;
1349	int offset;
1350	__be32 agg_cap_mask;
1351	__be32 slave_cap_mask;
1352	__be32 new_cap_mask;
1353
1354	port = in_mod & 0xff;
1355	in_modifier = in_mod >> 8;
1356	is_eth = op_mod;
1357	port_info = &priv->port[port];
1358
1359	/* Slaves cannot perform SET_PORT operations,
1360	 * except for changing MTU and USER_MTU.
1361	 */
1362	if (is_eth) {
1363		if (slave != dev->caps.function &&
1364		    in_modifier != MLX4_SET_PORT_GENERAL &&
1365		    in_modifier != MLX4_SET_PORT_GID_TABLE) {
1366			mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
1367					slave);
1368			return -EINVAL;
1369		}
1370		switch (in_modifier) {
1371		case MLX4_SET_PORT_RQP_CALC:
1372			qpn_context = inbox->buf;
1373			qpn_context->base_qpn =
1374				cpu_to_be32(port_info->base_qpn);
1375			qpn_context->n_mac = 0x7;
1376			promisc = be32_to_cpu(qpn_context->promisc) >>
1377				SET_PORT_PROMISC_SHIFT;
1378			qpn_context->promisc = cpu_to_be32(
1379				promisc << SET_PORT_PROMISC_SHIFT |
1380				port_info->base_qpn);
1381			promisc = be32_to_cpu(qpn_context->mcast) >>
1382				SET_PORT_MC_PROMISC_SHIFT;
1383			qpn_context->mcast = cpu_to_be32(
1384				promisc << SET_PORT_MC_PROMISC_SHIFT |
1385				port_info->base_qpn);
1386			break;
1387		case MLX4_SET_PORT_GENERAL:
1388			gen_context = inbox->buf;
1389
1390			if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
1391				mlx4_en_set_port_mtu(dev, slave, port,
1392						     gen_context);
1393
1394			if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
1395				mlx4_en_set_port_user_mtu(dev, slave, port,
1396							  gen_context);
1397
1398			if (gen_context->flags &
1399			    (MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK))
1400				mlx4_en_set_port_global_pause(dev, slave,
1401							      gen_context);
1402
1403			break;
1404		case MLX4_SET_PORT_GID_TABLE:
1405			/* change to MULTIPLE entries: number of guest's gids
1406			 * need a FOR-loop here over number of gids the guest has.
1407			 * 1. Check no duplicates in gids passed by slave
1408			 */
1409			num_gids = mlx4_get_slave_num_gids(dev, slave, port);
1410			base = mlx4_get_base_gid_ix(dev, slave, port);
1411			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1412			for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
1413				if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
1414					    sizeof(zgid_entry)))
1415					continue;
1416				gid_entry_mb1 = gid_entry_mbox + 1;
1417				for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
1418					if (!memcmp(gid_entry_mb1->raw,
1419						    zgid_entry.raw, sizeof(zgid_entry)))
1420						continue;
1421					if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
1422						    sizeof(gid_entry_mbox->raw))) {
1423						/* found duplicate */
1424						return -EINVAL;
1425					}
1426				}
1427			}
1428
1429			/* 2. Check that do not have duplicates in OTHER
1430			 *    entries in the port GID table
1431			 */
1432
1433			mutex_lock(&(priv->port[port].gid_table.mutex));
1434			for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1435				if (i >= base && i < base + num_gids)
1436					continue; /* don't compare to slave's current gids */
1437				gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
1438				if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
1439					continue;
1440				gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1441				for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
1442					if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
1443						    sizeof(zgid_entry)))
1444						continue;
1445					if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
1446						    sizeof(gid_entry_tbl->raw))) {
1447						/* found duplicate */
1448						mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
1449							  slave, i);
1450						mutex_unlock(&(priv->port[port].gid_table.mutex));
1451						return -EINVAL;
1452					}
1453				}
1454			}
1455
1456			/* insert slave GIDs with memcpy, starting at slave's base index */
1457			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1458			for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
1459				memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
1460				       gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
1461
1462			/* Now, copy roce port gids table to current mailbox for passing to FW */
1463			gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
1464			for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
1465				memcpy(gid_entry_mbox->raw,
1466				       priv->port[port].gid_table.roce_gids[i].raw,
1467				       MLX4_ROCE_GID_ENTRY_SIZE);
1468
1469			err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
1470				       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1471				       MLX4_CMD_NATIVE);
1472			mutex_unlock(&(priv->port[port].gid_table.mutex));
1473			return err;
1474		}
1475
1476		return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
1477				MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1478				MLX4_CMD_NATIVE);
1479	}
1480
1481	/* Slaves are not allowed to SET_PORT beacon (LED) blink */
1482	if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
1483		mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
1484		return -EPERM;
1485	}
1486
1487	/* For IB, we only consider:
1488	 * - The capability mask, which is set to the aggregate of all
1489	 *   slave function capabilities
1490	 * - The QKey violatin counter - reset according to each request.
1491	 */
1492
1493	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1494		reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
1495		new_cap_mask = ((__be32 *) inbox->buf)[2];
1496	} else {
1497		reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
1498		new_cap_mask = ((__be32 *) inbox->buf)[1];
1499	}
1500
1501	/* slave may not set the IS_SM capability for the port */
1502	if (slave != mlx4_master_func_num(dev) &&
1503	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
1504		return -EINVAL;
1505
1506	/* No DEV_MGMT in multifunc mode */
1507	if (mlx4_is_mfunc(dev) &&
1508	    (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
1509		return -EINVAL;
1510
1511	agg_cap_mask = 0;
1512	slave_cap_mask =
1513		priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
1514	priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
1515	for (i = 0; i < dev->num_slaves; i++)
1516		agg_cap_mask |=
1517			priv->mfunc.master.slave_state[i].ib_cap_mask[port];
1518
1519	/* only clear mailbox for guests.  Master may be setting
1520	* MTU or PKEY table size
1521	*/
1522	if (slave != dev->caps.function)
1523		memset(inbox->buf, 0, 256);
1524	if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
1525		*(u8 *) inbox->buf	   |= !!reset_qkey_viols << 6;
1526		((__be32 *) inbox->buf)[2] = agg_cap_mask;
1527	} else {
1528		((u8 *) inbox->buf)[3]     |= !!reset_qkey_viols;
1529		((__be32 *) inbox->buf)[1] = agg_cap_mask;
1530	}
1531
1532	err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
1533		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1534	if (err)
1535		priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
1536			slave_cap_mask;
1537	return err;
1538}
1539
1540int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
1541			  struct mlx4_vhcr *vhcr,
1542			  struct mlx4_cmd_mailbox *inbox,
1543			  struct mlx4_cmd_mailbox *outbox,
1544			  struct mlx4_cmd_info *cmd)
1545{
1546	int port = mlx4_slave_convert_port(
1547			dev, slave, vhcr->in_modifier & 0xFF);
1548
1549	if (port < 0)
1550		return -EINVAL;
1551
1552	vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
1553			    (port & 0xFF);
1554
1555	return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
1556				    vhcr->op_modifier, inbox);
1557}
1558
1559/* bit locations for set port command with zero op modifier */
1560enum {
1561	MLX4_SET_PORT_VL_CAP	 = 4, /* bits 7:4 */
1562	MLX4_SET_PORT_MTU_CAP	 = 12, /* bits 15:12 */
1563	MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
1564	MLX4_CHANGE_PORT_VL_CAP	 = 21,
1565	MLX4_CHANGE_PORT_MTU_CAP = 22,
1566};
1567
1568int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
1569{
1570	struct mlx4_cmd_mailbox *mailbox;
1571	int err, vl_cap, pkey_tbl_flag = 0;
1572
1573	if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
1574		return 0;
1575
1576	mailbox = mlx4_alloc_cmd_mailbox(dev);
1577	if (IS_ERR(mailbox))
1578		return PTR_ERR(mailbox);
1579
1580	((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
1581
1582	if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
1583		pkey_tbl_flag = 1;
1584		((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
1585	}
1586
1587	/* IB VL CAP enum isn't used by the firmware, just numerical values */
1588	for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
1589		((__be32 *) mailbox->buf)[0] = cpu_to_be32(
1590			(1 << MLX4_CHANGE_PORT_MTU_CAP) |
1591			(1 << MLX4_CHANGE_PORT_VL_CAP)  |
1592			(pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
1593			(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
1594			(vl_cap << MLX4_SET_PORT_VL_CAP));
1595		err = mlx4_cmd(dev, mailbox->dma, port,
1596			       MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
1597			       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
1598		if (err != -ENOMEM)
1599			break;
1600	}
1601
1602	mlx4_free_cmd_mailbox(dev, mailbox);
1603	return err;
1604}
1605
1606#define SET_PORT_ROCE_2_FLAGS          0x10
1607#define MLX4_SET_PORT_ROCE_V1_V2       0x2
1608int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
1609			  u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
1610{
1611	struct mlx4_cmd_mailbox *mailbox;
1612	struct mlx4_set_port_general_context *context;
1613	int err;
1614	u32 in_mod;
1615
1616	mailbox = mlx4_alloc_cmd_mailbox(dev);
1617	if (IS_ERR(mailbox))
1618		return PTR_ERR(mailbox);
1619	context = mailbox->buf;
1620	context->flags = SET_PORT_GEN_ALL_VALID;
1621	context->mtu = cpu_to_be16(mtu);
1622	context->pptx = (pptx * (!pfctx)) << 7;
1623	context->pfctx = pfctx;
1624	context->pprx = (pprx * (!pfcrx)) << 7;
1625	context->pfcrx = pfcrx;
1626
1627	if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
1628		context->flags |= SET_PORT_ROCE_2_FLAGS;
1629		context->roce_mode |=
1630			MLX4_SET_PORT_ROCE_V1_V2 << 4;
1631	}
1632	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1633	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1634		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1635		       MLX4_CMD_WRAPPED);
1636
1637	mlx4_free_cmd_mailbox(dev, mailbox);
1638	return err;
1639}
1640EXPORT_SYMBOL(mlx4_SET_PORT_general);
1641
1642int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
1643			   u8 promisc)
1644{
1645	struct mlx4_cmd_mailbox *mailbox;
1646	struct mlx4_set_port_rqp_calc_context *context;
1647	int err;
1648	u32 in_mod;
1649	u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
1650		MCAST_DIRECT : MCAST_DEFAULT;
1651
1652	if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
1653		return 0;
1654
1655	mailbox = mlx4_alloc_cmd_mailbox(dev);
1656	if (IS_ERR(mailbox))
1657		return PTR_ERR(mailbox);
1658	context = mailbox->buf;
1659	context->base_qpn = cpu_to_be32(base_qpn);
1660	context->n_mac = dev->caps.log_num_macs;
1661	context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
1662				       base_qpn);
1663	context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
1664				     base_qpn);
1665	context->intra_no_vlan = 0;
1666	context->no_vlan = MLX4_NO_VLAN_IDX;
1667	context->intra_vlan_miss = 0;
1668	context->vlan_miss = MLX4_VLAN_MISS_IDX;
1669
1670	in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
1671	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1672		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1673		       MLX4_CMD_WRAPPED);
1674
1675	mlx4_free_cmd_mailbox(dev, mailbox);
1676	return err;
1677}
1678EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
1679
1680int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu)
1681{
1682	struct mlx4_cmd_mailbox *mailbox;
1683	struct mlx4_set_port_general_context *context;
1684	u32 in_mod;
1685	int err;
1686
1687	mailbox = mlx4_alloc_cmd_mailbox(dev);
1688	if (IS_ERR(mailbox))
1689		return PTR_ERR(mailbox);
1690	context = mailbox->buf;
1691	context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK;
1692	context->user_mtu = cpu_to_be16(user_mtu);
1693
1694	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1695	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1696		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1697		       MLX4_CMD_WRAPPED);
1698
1699	mlx4_free_cmd_mailbox(dev, mailbox);
1700	return err;
1701}
1702EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu);
1703
1704int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac)
1705{
1706	struct mlx4_cmd_mailbox *mailbox;
1707	struct mlx4_set_port_general_context *context;
1708	u32 in_mod;
1709	int err;
1710
1711	mailbox = mlx4_alloc_cmd_mailbox(dev);
1712	if (IS_ERR(mailbox))
1713		return PTR_ERR(mailbox);
1714	context = mailbox->buf;
1715	context->flags2 |= MLX4_FLAG2_V_USER_MAC_MASK;
1716	memcpy(context->user_mac, user_mac, sizeof(context->user_mac));
1717
1718	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1719	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1720		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1721		       MLX4_CMD_NATIVE);
1722
1723	mlx4_free_cmd_mailbox(dev, mailbox);
1724	return err;
1725}
1726EXPORT_SYMBOL(mlx4_SET_PORT_user_mac);
1727
1728int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
1729{
1730	struct mlx4_cmd_mailbox *mailbox;
1731	struct mlx4_set_port_general_context *context;
1732	u32 in_mod;
1733	int err;
1734
1735	mailbox = mlx4_alloc_cmd_mailbox(dev);
1736	if (IS_ERR(mailbox))
1737		return PTR_ERR(mailbox);
1738	context = mailbox->buf;
1739	context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK;
1740	if (ignore_fcs_value)
1741		context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
1742	else
1743		context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
1744
1745	in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
1746	err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
1747		       MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
1748
1749	mlx4_free_cmd_mailbox(dev, mailbox);
1750	return err;
1751}
1752EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
1753
1754enum {
1755	VXLAN_ENABLE_MODIFY	= 1 << 7,
1756	VXLAN_STEERING_MODIFY	= 1 << 6,
1757
1758	VXLAN_ENABLE		= 1 << 7,
1759};
1760
1761struct mlx4_set_port_vxlan_context {
1762	u32	reserved1;
1763	u8	modify_flags;
1764	u8	reserved2;
1765	u8	enable_flags;
1766	u8	steering;
1767};
1768
1769int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
1770{
1771	int err;
1772	u32 in_mod;
1773	struct mlx4_cmd_mailbox *mailbox;
1774	struct mlx4_set_port_vxlan_context  *context;
1775
1776	mailbox = mlx4_alloc_cmd_mailbox(dev);
1777	if (IS_ERR(mailbox))
1778		return PTR_ERR(mailbox);
1779	context = mailbox->buf;
1780	memset(context, 0, sizeof(*context));
1781
1782	context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
1783	if (enable)
1784		context->enable_flags = VXLAN_ENABLE;
1785	context->steering  = steering;
1786
1787	in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
1788	err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
1789		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1790		       MLX4_CMD_NATIVE);
1791
1792	mlx4_free_cmd_mailbox(dev, mailbox);
1793	return err;
1794}
1795EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
1796
1797int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
1798{
1799	int err;
1800	struct mlx4_cmd_mailbox *mailbox;
1801
1802	mailbox = mlx4_alloc_cmd_mailbox(dev);
1803	if (IS_ERR(mailbox))
1804		return PTR_ERR(mailbox);
1805
1806	*((__be32 *)mailbox->buf) = cpu_to_be32(time);
1807
1808	err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
1809		       MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
1810		       MLX4_CMD_NATIVE);
1811
1812	mlx4_free_cmd_mailbox(dev, mailbox);
1813	return err;
1814}
1815EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
1816
1817int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1818				struct mlx4_vhcr *vhcr,
1819				struct mlx4_cmd_mailbox *inbox,
1820				struct mlx4_cmd_mailbox *outbox,
1821				struct mlx4_cmd_info *cmd)
1822{
1823	int err = 0;
1824
1825	return err;
1826}
1827
1828int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
1829			u64 mac, u64 clear, u8 mode)
1830{
1831	return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
1832			MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
1833			MLX4_CMD_WRAPPED);
1834}
1835EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
1836
1837int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
1838			       struct mlx4_vhcr *vhcr,
1839			       struct mlx4_cmd_mailbox *inbox,
1840			       struct mlx4_cmd_mailbox *outbox,
1841			       struct mlx4_cmd_info *cmd)
1842{
1843	int err = 0;
1844
1845	return err;
1846}
1847
1848int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
1849				struct mlx4_vhcr *vhcr,
1850				struct mlx4_cmd_mailbox *inbox,
1851				struct mlx4_cmd_mailbox *outbox,
1852				struct mlx4_cmd_info *cmd)
1853{
1854	return 0;
1855}
1856
1857int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
1858				 int *slave_id)
1859{
1860	struct mlx4_priv *priv = mlx4_priv(dev);
1861	int i, found_ix = -1;
1862	int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
1863	struct mlx4_slaves_pport slaves_pport;
1864	unsigned num_vfs;
1865	int slave_gid;
1866
1867	if (!mlx4_is_mfunc(dev))
1868		return -EINVAL;
1869
1870	slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
1871	num_vfs = bitmap_weight(slaves_pport.slaves,
1872				dev->persist->num_vfs + 1) - 1;
1873
1874	for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
1875		if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
1876			    MLX4_ROCE_GID_ENTRY_SIZE)) {
1877			found_ix = i;
1878			break;
1879		}
1880	}
1881
1882	if (found_ix >= 0) {
1883		/* Calculate a slave_gid which is the slave number in the gid
1884		 * table and not a globally unique slave number.
1885		 */
1886		if (found_ix < MLX4_ROCE_PF_GIDS)
1887			slave_gid = 0;
1888		else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
1889			 (vf_gids / num_vfs + 1))
1890			slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
1891				     (vf_gids / num_vfs + 1)) + 1;
1892		else
1893			slave_gid =
1894			((found_ix - MLX4_ROCE_PF_GIDS -
1895			  ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
1896			 (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
1897
1898		/* Calculate the globally unique slave id */
1899		if (slave_gid) {
1900			struct mlx4_active_ports exclusive_ports;
1901			struct mlx4_active_ports actv_ports;
1902			struct mlx4_slaves_pport slaves_pport_actv;
1903			unsigned max_port_p_one;
1904			int num_vfs_before = 0;
1905			int candidate_slave_gid;
1906
1907			/* Calculate how many VFs are on the previous port, if exists */
1908			for (i = 1; i < port; i++) {
1909				bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
1910				set_bit(i - 1, exclusive_ports.ports);
1911				slaves_pport_actv =
1912					mlx4_phys_to_slaves_pport_actv(
1913							dev, &exclusive_ports);
1914				num_vfs_before += bitmap_weight(
1915						slaves_pport_actv.slaves,
1916						dev->persist->num_vfs + 1);
1917			}
1918
1919			/* candidate_slave_gid isn't necessarily the correct slave, but
1920			 * it has the same number of ports and is assigned to the same
1921			 * ports as the real slave we're looking for. On dual port VF,
1922			 * slave_gid = [single port VFs on port <port>] +
1923			 * [offset of the current slave from the first dual port VF] +
1924			 * 1 (for the PF).
1925			 */
1926			candidate_slave_gid = slave_gid + num_vfs_before;
1927
1928			actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
1929			max_port_p_one = find_first_bit(
1930				actv_ports.ports, dev->caps.num_ports) +
1931				bitmap_weight(actv_ports.ports,
1932					      dev->caps.num_ports) + 1;
1933
1934			/* Calculate the real slave number */
1935			for (i = 1; i < max_port_p_one; i++) {
1936				if (i == port)
1937					continue;
1938				bitmap_zero(exclusive_ports.ports,
1939					    dev->caps.num_ports);
1940				set_bit(i - 1, exclusive_ports.ports);
1941				slaves_pport_actv =
1942					mlx4_phys_to_slaves_pport_actv(
1943						dev, &exclusive_ports);
1944				slave_gid += bitmap_weight(
1945						slaves_pport_actv.slaves,
1946						dev->persist->num_vfs + 1);
1947			}
1948		}
1949		*slave_id = slave_gid;
1950	}
1951
1952	return (found_ix >= 0) ? 0 : -EINVAL;
1953}
1954EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
1955
1956int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
1957				 u8 *gid)
1958{
1959	struct mlx4_priv *priv = mlx4_priv(dev);
1960
1961	if (!mlx4_is_master(dev))
1962		return -EINVAL;
1963
1964	memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
1965	       MLX4_ROCE_GID_ENTRY_SIZE);
1966	return 0;
1967}
1968EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
1969
1970/* Cable Module Info */
1971#define MODULE_INFO_MAX_READ 48
1972
1973#define I2C_ADDR_LOW  0x50
1974#define I2C_ADDR_HIGH 0x51
1975#define I2C_PAGE_SIZE 256
1976#define I2C_HIGH_PAGE_SIZE 128
1977
1978/* Module Info Data */
1979struct mlx4_cable_info {
1980	u8	i2c_addr;
1981	u8	page_num;
1982	__be16	dev_mem_address;
1983	__be16	reserved1;
1984	__be16	size;
1985	__be32	reserved2[2];
1986	u8	data[MODULE_INFO_MAX_READ];
1987};
1988
1989enum cable_info_err {
1990	 CABLE_INF_INV_PORT      = 0x1,
1991	 CABLE_INF_OP_NOSUP      = 0x2,
1992	 CABLE_INF_NOT_CONN      = 0x3,
1993	 CABLE_INF_NO_EEPRM      = 0x4,
1994	 CABLE_INF_PAGE_ERR      = 0x5,
1995	 CABLE_INF_INV_ADDR      = 0x6,
1996	 CABLE_INF_I2C_ADDR      = 0x7,
1997	 CABLE_INF_QSFP_VIO      = 0x8,
1998	 CABLE_INF_I2C_BUSY      = 0x9,
1999};
2000
2001#define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
2002
2003static inline const char *cable_info_mad_err_str(u16 mad_status)
2004{
2005	u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
2006
2007	switch (err) {
2008	case CABLE_INF_INV_PORT:
2009		return "invalid port selected";
2010	case CABLE_INF_OP_NOSUP:
2011		return "operation not supported for this port (the port is of type CX4 or internal)";
2012	case CABLE_INF_NOT_CONN:
2013		return "cable is not connected";
2014	case CABLE_INF_NO_EEPRM:
2015		return "the connected cable has no EPROM (passive copper cable)";
2016	case CABLE_INF_PAGE_ERR:
2017		return "page number is greater than 15";
2018	case CABLE_INF_INV_ADDR:
2019		return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
2020	case CABLE_INF_I2C_ADDR:
2021		return "invalid I2C slave address";
2022	case CABLE_INF_QSFP_VIO:
2023		return "at least one cable violates the QSFP specification and ignores the modsel signal";
2024	case CABLE_INF_I2C_BUSY:
2025		return "I2C bus is constantly busy";
2026	}
2027	return "Unknown Error";
2028}
2029
2030static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
2031{
2032	struct mlx4_cmd_mailbox *inbox, *outbox;
2033	struct mlx4_mad_ifc *inmad, *outmad;
2034	struct mlx4_cable_info *cable_info;
2035	int ret;
2036
2037	inbox = mlx4_alloc_cmd_mailbox(dev);
2038	if (IS_ERR(inbox))
2039		return PTR_ERR(inbox);
2040
2041	outbox = mlx4_alloc_cmd_mailbox(dev);
2042	if (IS_ERR(outbox)) {
2043		mlx4_free_cmd_mailbox(dev, inbox);
2044		return PTR_ERR(outbox);
2045	}
2046
2047	inmad = (struct mlx4_mad_ifc *)(inbox->buf);
2048	outmad = (struct mlx4_mad_ifc *)(outbox->buf);
2049
2050	inmad->method = 0x1; /* Get */
2051	inmad->class_version = 0x1;
2052	inmad->mgmt_class = 0x1;
2053	inmad->base_version = 0x1;
2054	inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
2055
2056	cable_info = (struct mlx4_cable_info *)inmad->data;
2057	cable_info->dev_mem_address = 0;
2058	cable_info->page_num = 0;
2059	cable_info->i2c_addr = I2C_ADDR_LOW;
2060	cable_info->size = cpu_to_be16(1);
2061
2062	ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
2063			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
2064			   MLX4_CMD_NATIVE);
2065	if (ret)
2066		goto out;
2067
2068	if (be16_to_cpu(outmad->status)) {
2069		/* Mad returned with bad status */
2070		ret = be16_to_cpu(outmad->status);
2071		mlx4_warn(dev,
2072			  "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
2073			  0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
2074			  cable_info_mad_err_str(ret));
2075		ret = -ret;
2076		goto out;
2077	}
2078	cable_info = (struct mlx4_cable_info *)outmad->data;
2079	*module_id = cable_info->data[0];
2080out:
2081	mlx4_free_cmd_mailbox(dev, inbox);
2082	mlx4_free_cmd_mailbox(dev, outbox);
2083	return ret;
2084}
2085
2086static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
2087{
2088	*i2c_addr = I2C_ADDR_LOW;
2089	*page_num = 0;
2090
2091	if (*offset < I2C_PAGE_SIZE)
2092		return;
2093
2094	*i2c_addr = I2C_ADDR_HIGH;
2095	*offset -= I2C_PAGE_SIZE;
2096}
2097
2098static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
2099{
2100	/* Offsets 0-255 belong to page 0.
2101	 * Offsets 256-639 belong to pages 01, 02, 03.
2102	 * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
2103	 */
2104	if (*offset < I2C_PAGE_SIZE)
2105		*page_num = 0;
2106	else
2107		*page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
2108	*i2c_addr = I2C_ADDR_LOW;
2109	*offset -= *page_num * I2C_HIGH_PAGE_SIZE;
2110}
2111
2112/**
2113 * mlx4_get_module_info - Read cable module eeprom data
2114 * @dev: mlx4_dev.
2115 * @port: port number.
2116 * @offset: byte offset in eeprom to start reading data from.
2117 * @size: num of bytes to read.
2118 * @data: output buffer to put the requested data into.
2119 *
2120 * Reads cable module eeprom data, puts the outcome data into
2121 * data pointer parameter.
2122 * Returns num of read bytes on success or a negative error
2123 * code.
2124 */
2125int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
2126			 u16 offset, u16 size, u8 *data)
2127{
2128	struct mlx4_cmd_mailbox *inbox, *outbox;
2129	struct mlx4_mad_ifc *inmad, *outmad;
2130	struct mlx4_cable_info *cable_info;
2131	u8 module_id, i2c_addr, page_num;
2132	int ret;
2133
2134	if (size > MODULE_INFO_MAX_READ)
2135		size = MODULE_INFO_MAX_READ;
2136
2137	ret = mlx4_get_module_id(dev, port, &module_id);
2138	if (ret)
2139		return ret;
2140
2141	switch (module_id) {
2142	case MLX4_MODULE_ID_SFP:
2143		mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
2144		break;
2145	case MLX4_MODULE_ID_QSFP:
2146	case MLX4_MODULE_ID_QSFP_PLUS:
2147	case MLX4_MODULE_ID_QSFP28:
2148		mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
2149		break;
2150	default:
2151		mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
2152		return -EINVAL;
2153	}
2154
2155	inbox = mlx4_alloc_cmd_mailbox(dev);
2156	if (IS_ERR(inbox))
2157		return PTR_ERR(inbox);
2158
2159	outbox = mlx4_alloc_cmd_mailbox(dev);
2160	if (IS_ERR(outbox)) {
2161		mlx4_free_cmd_mailbox(dev, inbox);
2162		return PTR_ERR(outbox);
2163	}
2164
2165	inmad = (struct mlx4_mad_ifc *)(inbox->buf);
2166	outmad = (struct mlx4_mad_ifc *)(outbox->buf);
2167
2168	inmad->method = 0x1; /* Get */
2169	inmad->class_version = 0x1;
2170	inmad->mgmt_class = 0x1;
2171	inmad->base_version = 0x1;
2172	inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
2173
2174	if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
2175		/* Cross pages reads are not allowed
2176		 * read until offset 256 in low page
2177		 */
2178		size -= offset + size - I2C_PAGE_SIZE;
2179
2180	cable_info = (struct mlx4_cable_info *)inmad->data;
2181	cable_info->dev_mem_address = cpu_to_be16(offset);
2182	cable_info->page_num = page_num;
2183	cable_info->i2c_addr = i2c_addr;
2184	cable_info->size = cpu_to_be16(size);
2185
2186	ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
2187			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
2188			   MLX4_CMD_NATIVE);
2189	if (ret)
2190		goto out;
2191
2192	if (be16_to_cpu(outmad->status)) {
2193		/* Mad returned with bad status */
2194		ret = be16_to_cpu(outmad->status);
2195		mlx4_warn(dev,
2196			  "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
2197			  0xFF60, port, i2c_addr, offset, size,
2198			  ret, cable_info_mad_err_str(ret));
2199
2200		if (i2c_addr == I2C_ADDR_HIGH &&
2201		    MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
2202			/* Some SFP cables do not support i2c slave
2203			 * address 0x51 (high page), abort silently.
2204			 */
2205			ret = 0;
2206		else
2207			ret = -ret;
2208		goto out;
2209	}
2210	cable_info = (struct mlx4_cable_info *)outmad->data;
2211	memcpy(data, cable_info->data, size);
2212	ret = size;
2213out:
2214	mlx4_free_cmd_mailbox(dev, inbox);
2215	mlx4_free_cmd_mailbox(dev, outbox);
2216	return ret;
2217}
2218EXPORT_SYMBOL(mlx4_get_module_info);
2219
2220int mlx4_max_tc(struct mlx4_dev *dev)
2221{
2222	u8 num_tc = dev->caps.max_tc_eth;
2223
2224	if (!num_tc)
2225		num_tc = MLX4_TC_MAX_NUMBER;
2226
2227	return num_tc;
2228}
2229EXPORT_SYMBOL(mlx4_max_tc);
2230