mlx5_vport.c revision 302139
1285307Sed/*-
2285307Sed * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3285307Sed *
4285307Sed * Redistribution and use in source and binary forms, with or without
5285307Sed * modification, are permitted provided that the following conditions
6285307Sed * are met:
7285307Sed * 1. Redistributions of source code must retain the above copyright
8285307Sed *    notice, this list of conditions and the following disclaimer.
9285307Sed * 2. Redistributions in binary form must reproduce the above copyright
10285307Sed *    notice, this list of conditions and the following disclaimer in the
11285307Sed *    documentation and/or other materials provided with the distribution.
12285307Sed *
13285307Sed * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14285307Sed * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15285307Sed * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16285307Sed * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17285307Sed * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18285307Sed * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19285307Sed * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20285307Sed * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21285307Sed * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22285307Sed * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23285307Sed * SUCH DAMAGE.
24285307Sed *
25285307Sed * $FreeBSD: head/sys/dev/mlx5/mlx5_core/mlx5_vport.c 302139 2016-06-23 09:23:37Z hselasky $
26285307Sed */
27285307Sed
28285307Sed#include <linux/etherdevice.h>
29285307Sed#include <dev/mlx5/driver.h>
30285307Sed#include <dev/mlx5/vport.h>
31285307Sed#include "mlx5_core.h"
32285307Sed
33285307Sedu8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
34285307Sed{
35285307Sed	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
36285307Sed	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
37285307Sed	int err;
38285307Sed
39285307Sed	memset(in, 0, sizeof(in));
40285307Sed
41285307Sed	MLX5_SET(query_vport_state_in, in, opcode,
42285307Sed		 MLX5_CMD_OP_QUERY_VPORT_STATE);
43285307Sed	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
44285307Sed
45285307Sed	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
46285307Sed					 sizeof(out));
47285307Sed	if (err)
48285307Sed		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
49285307Sed
50285307Sed	return MLX5_GET(query_vport_state_out, out, state);
51285307Sed}
52285307SedEXPORT_SYMBOL_GPL(mlx5_query_vport_state);
53285307Sed
54285307Sedstatic int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u32 vport,
55285307Sed					u32 *out, int outlen)
56285307Sed{
57285307Sed	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
58285307Sed
59285307Sed	memset(in, 0, sizeof(in));
60285307Sed
61285307Sed	MLX5_SET(query_nic_vport_context_in, in, opcode,
62285307Sed		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
63285307Sed
64285307Sed	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
65285307Sed	if (vport)
66285307Sed		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
67285307Sed
68285307Sed	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
69285307Sed}
70285307Sed
71285307Sedint mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int *counter_set_id)
72285307Sed{
73285307Sed	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
74285307Sed	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
75285307Sed	int err;
76285307Sed
77285307Sed	memset(in, 0, sizeof(in));
78285307Sed	memset(out, 0, sizeof(out));
79285307Sed
80285307Sed	MLX5_SET(alloc_q_counter_in, in, opcode,
81285307Sed		 MLX5_CMD_OP_ALLOC_Q_COUNTER);
82285307Sed
83285307Sed	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
84285307Sed					 out, sizeof(out));
85285307Sed
86285307Sed	if (err)
87285307Sed		return err;
88285307Sed
89285307Sed	*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
90285307Sed				   counter_set_id);
91285307Sed	return err;
92285307Sed}
93285307Sed
94285307Sedint mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
95285307Sed				 int counter_set_id)
96285307Sed{
97285307Sed	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
98285307Sed	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
99285307Sed
100285307Sed	memset(in, 0, sizeof(in));
101285307Sed	memset(out, 0, sizeof(out));
102285307Sed
103285307Sed	MLX5_SET(dealloc_q_counter_in, in, opcode,
104285307Sed		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
105285307Sed	MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
106285307Sed		 counter_set_id);
107285307Sed
108285307Sed	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
109285307Sed					  out, sizeof(out));
110285307Sed}
111285307Sed
112285307Sedstatic int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
113285307Sed				      int counter_set_id,
114285307Sed				      int reset,
115285307Sed				      void *out,
116285307Sed				      int out_size)
117285307Sed{
118285307Sed	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
119285307Sed
120285307Sed	memset(in, 0, sizeof(in));
121285307Sed
122285307Sed	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
123285307Sed	MLX5_SET(query_q_counter_in, in, clear, reset);
124285307Sed	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
125285307Sed
126285307Sed	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
127285307Sed					  out, out_size);
128285307Sed}
129285307Sed
130285307Sedint mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
131285307Sed				      int counter_set_id,
132285307Sed				      u32 *out_of_rx_buffer)
133285307Sed{
134285307Sed	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
135285307Sed	int err;
136285307Sed
137285307Sed	memset(out, 0, sizeof(out));
138285307Sed
139285307Sed	err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
140285307Sed					 sizeof(out));
141285307Sed
142285307Sed	if (err)
143285307Sed		return err;
144285307Sed
145285307Sed	*out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
146285307Sed				     out_of_buffer);
147285307Sed	return err;
148285307Sed}
149285307Sed
150285307Sedint mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
151285307Sed				     u32 vport, u8 *addr)
152285307Sed{
153285307Sed	u32 *out;
154285307Sed	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
155285307Sed	u8 *out_addr;
156	int err;
157
158	out = mlx5_vzalloc(outlen);
159	if (!out)
160		return -ENOMEM;
161
162	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
163				nic_vport_context.permanent_address);
164
165	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
166	if (err)
167		goto out;
168
169	ether_addr_copy(addr, &out_addr[2]);
170
171out:
172	kvfree(out);
173	return err;
174}
175EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
176
177int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
178					   u64 *system_image_guid)
179{
180	u32 *out;
181	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
182	int err;
183
184	out = mlx5_vzalloc(outlen);
185	if (!out)
186		return -ENOMEM;
187
188	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
189	if (err)
190		goto out;
191
192	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
193					nic_vport_context.system_image_guid);
194out:
195	kvfree(out);
196	return err;
197}
198EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
199
200int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
201{
202	u32 *out;
203	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
204	int err;
205
206	out = mlx5_vzalloc(outlen);
207	if (!out)
208		return -ENOMEM;
209
210	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
211	if (err)
212		goto out;
213
214	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
215				nic_vport_context.node_guid);
216
217out:
218	kvfree(out);
219	return err;
220}
221EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
222
223int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev, u64 *port_guid)
224{
225	u32 *out;
226	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
227	int err;
228
229	out = mlx5_vzalloc(outlen);
230	if (!out)
231		return -ENOMEM;
232
233	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
234	if (err)
235		goto out;
236
237	*port_guid = MLX5_GET64(query_nic_vport_context_out, out,
238				nic_vport_context.port_guid);
239
240out:
241	kvfree(out);
242	return err;
243}
244EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_port_guid);
245
246int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
247					u16 *qkey_viol_cntr)
248{
249	u32 *out;
250	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
251	int err;
252
253	out = mlx5_vzalloc(outlen);
254	if (!out)
255		return -ENOMEM;
256
257	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
258	if (err)
259		goto out;
260
261	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
262				nic_vport_context.qkey_violation_counter);
263
264out:
265	kvfree(out);
266	return err;
267}
268EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
269
270static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
271					 int inlen)
272{
273	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
274
275	MLX5_SET(modify_nic_vport_context_in, in, opcode,
276		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
277
278	memset(out, 0, sizeof(out));
279	return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
280}
281
282static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
283					      int enable_disable)
284{
285	void *in;
286	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
287	int err;
288
289	in = mlx5_vzalloc(inlen);
290	if (!in) {
291		mlx5_core_warn(mdev, "failed to allocate inbox\n");
292		return -ENOMEM;
293	}
294
295	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
296	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
297		 enable_disable);
298
299	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
300
301	kvfree(in);
302
303	return err;
304}
305
306int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
307				   bool other_vport, u8 *addr)
308{
309	void *in;
310	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
311		  + MLX5_ST_SZ_BYTES(mac_address_layout);
312	u8  *mac_layout;
313	u8  *mac_ptr;
314	int err;
315
316	in = mlx5_vzalloc(inlen);
317	if (!in) {
318		mlx5_core_warn(mdev, "failed to allocate inbox\n");
319		return -ENOMEM;
320	}
321
322	MLX5_SET(modify_nic_vport_context_in, in,
323		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
324	MLX5_SET(modify_nic_vport_context_in, in,
325		 vport_number, vport);
326	MLX5_SET(modify_nic_vport_context_in, in,
327		 other_vport, other_vport);
328	MLX5_SET(modify_nic_vport_context_in, in,
329		 field_select.addresses_list, 1);
330	MLX5_SET(modify_nic_vport_context_in, in,
331		 nic_vport_context.allowed_list_type,
332		 MLX5_NIC_VPORT_LIST_TYPE_UC);
333	MLX5_SET(modify_nic_vport_context_in, in,
334		 nic_vport_context.allowed_list_size, 1);
335
336	mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
337		nic_vport_context.current_uc_mac_address);
338	mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
339		mac_addr_47_32);
340	ether_addr_copy(mac_ptr, addr);
341
342	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
343
344	kvfree(in);
345
346	return err;
347}
348EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
349
350int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u32 vport,
351				 u16 *vlan_list, int list_len)
352{
353	void *in, *ctx;
354	int i, err;
355	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
356		+ MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
357
358	int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
359
360	if (list_len > max_list_size) {
361		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
362			       list_len, max_list_size);
363		return -ENOSPC;
364	}
365
366	in = mlx5_vzalloc(inlen);
367	if (!in) {
368		mlx5_core_warn(dev, "failed to allocate inbox\n");
369		return -ENOMEM;
370	}
371
372	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
373	if (vport)
374		MLX5_SET(modify_nic_vport_context_in, in,
375			 other_vport, 1);
376	MLX5_SET(modify_nic_vport_context_in, in,
377		 field_select.addresses_list, 1);
378
379	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
380
381	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
382		 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
383	MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
384
385	for (i = 0; i < list_len; i++) {
386		u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
387					 current_uc_mac_address[i]);
388		MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
389	}
390
391	err = mlx5_modify_nic_vport_context(dev, in, inlen);
392
393	kvfree(in);
394	return err;
395}
396EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
397
398int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
399			       u64 *addr_list, size_t addr_list_len)
400{
401	void *in, *ctx;
402	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
403		  + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
404	int err;
405	size_t i;
406	int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
407
408	if ((int)addr_list_len > max_list_sz) {
409		mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
410			       (int)addr_list_len, max_list_sz);
411		return -ENOSPC;
412	}
413
414	in = mlx5_vzalloc(inlen);
415	if (!in) {
416		mlx5_core_warn(mdev, "failed to allocate inbox\n");
417		return -ENOMEM;
418	}
419
420	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
421	if (vport)
422		MLX5_SET(modify_nic_vport_context_in, in,
423			 other_vport, 1);
424	MLX5_SET(modify_nic_vport_context_in, in,
425		 field_select.addresses_list, 1);
426
427	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
428
429	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
430		 MLX5_NIC_VPORT_LIST_TYPE_MC);
431	MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
432
433	for (i = 0; i < addr_list_len; i++) {
434		u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
435						  current_uc_mac_address[i]);
436		u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
437						 mac_addr_47_32);
438		ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
439	}
440
441	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
442
443	kvfree(in);
444
445	return err;
446}
447EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
448
449int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
450			       bool promisc_mc, bool promisc_uc,
451			       bool promisc_all)
452{
453	u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
454	u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
455			       nic_vport_context);
456
457	memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
458
459	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
460	if (vport)
461		MLX5_SET(modify_nic_vport_context_in, in,
462			 other_vport, 1);
463	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
464	if (promisc_mc)
465		MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
466	if (promisc_uc)
467		MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
468	if (promisc_all)
469		MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
470
471	return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
472}
473EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
474
475int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
476				  u32 vport,
477				  enum mlx5_list_type list_type,
478				  u8 addr_list[][ETH_ALEN],
479				  int *list_size)
480{
481	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
482	void *nic_vport_ctx;
483	int max_list_size;
484	int req_list_size;
485	u8 *mac_addr;
486	int out_sz;
487	void *out;
488	int err;
489	int i;
490
491	req_list_size = *list_size;
492
493	max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
494			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
495			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
496
497	if (req_list_size > max_list_size) {
498		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
499			       req_list_size, max_list_size);
500		req_list_size = max_list_size;
501	}
502
503	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
504		 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
505
506	memset(in, 0, sizeof(in));
507	out = kzalloc(out_sz, GFP_KERNEL);
508	if (!out)
509		return -ENOMEM;
510
511	MLX5_SET(query_nic_vport_context_in, in, opcode,
512		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
513	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
514	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
515
516	if (vport)
517		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
518
519	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
520	if (err)
521		goto out;
522
523	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
524				     nic_vport_context);
525	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
526				 allowed_list_size);
527
528	*list_size = req_list_size;
529	for (i = 0; i < req_list_size; i++) {
530		mac_addr = MLX5_ADDR_OF(nic_vport_context,
531					nic_vport_ctx,
532					current_uc_mac_address[i]) + 2;
533		ether_addr_copy(addr_list[i], mac_addr);
534	}
535out:
536	kfree(out);
537	return err;
538}
539EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
540
541int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
542				   enum mlx5_list_type list_type,
543				   u8 addr_list[][ETH_ALEN],
544				   int list_size)
545{
546	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
547	void *nic_vport_ctx;
548	int max_list_size;
549	int in_sz;
550	void *in;
551	int err;
552	int i;
553
554	max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
555		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
556		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
557
558	if (list_size > max_list_size)
559		return -ENOSPC;
560
561	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
562		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
563
564	memset(out, 0, sizeof(out));
565	in = kzalloc(in_sz, GFP_KERNEL);
566	if (!in)
567		return -ENOMEM;
568
569	MLX5_SET(modify_nic_vport_context_in, in, opcode,
570		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
571	MLX5_SET(modify_nic_vport_context_in, in,
572		 field_select.addresses_list, 1);
573
574	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
575				     nic_vport_context);
576
577	MLX5_SET(nic_vport_context, nic_vport_ctx,
578		 allowed_list_type, list_type);
579	MLX5_SET(nic_vport_context, nic_vport_ctx,
580		 allowed_list_size, list_size);
581
582	for (i = 0; i < list_size; i++) {
583		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
584					    nic_vport_ctx,
585					    current_uc_mac_address[i]) + 2;
586		ether_addr_copy(curr_mac, addr_list[i]);
587	}
588
589	err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
590	kfree(in);
591	return err;
592}
593EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
594
595int mlx5_query_nic_vport_vlan_list(struct mlx5_core_dev *dev,
596				   u32 vport,
597				   u16 *vlan_list,
598				   int *list_size)
599{
600	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
601	void *nic_vport_ctx;
602	int max_list_size;
603	int req_list_size;
604	int out_sz;
605	void *out;
606	void *vlan_addr;
607	int err;
608	int i;
609
610	req_list_size = *list_size;
611
612	max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
613
614	if (req_list_size > max_list_size) {
615		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
616			       req_list_size, max_list_size);
617		req_list_size = max_list_size;
618	}
619
620	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
621		 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
622
623	memset(in, 0, sizeof(in));
624	out = kzalloc(out_sz, GFP_KERNEL);
625	if (!out)
626		return -ENOMEM;
627
628	MLX5_SET(query_nic_vport_context_in, in, opcode,
629		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
630	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
631		 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
632	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
633
634	if (vport)
635		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
636
637	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
638	if (err)
639		goto out;
640
641	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
642				     nic_vport_context);
643	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
644				 allowed_list_size);
645
646	*list_size = req_list_size;
647	for (i = 0; i < req_list_size; i++) {
648		vlan_addr = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
649					 current_uc_mac_address[i]);
650		vlan_list[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
651	}
652out:
653	kfree(out);
654	return err;
655}
656EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlan_list);
657
658int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
659				u16 vlans[],
660				int list_size)
661{
662	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
663	void *nic_vport_ctx;
664	int max_list_size;
665	int in_sz;
666	void *in;
667	int err;
668	int i;
669
670	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
671
672	if (list_size > max_list_size)
673		return -ENOSPC;
674
675	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
676		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
677
678	memset(out, 0, sizeof(out));
679	in = kzalloc(in_sz, GFP_KERNEL);
680	if (!in)
681		return -ENOMEM;
682
683	MLX5_SET(modify_nic_vport_context_in, in, opcode,
684		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
685	MLX5_SET(modify_nic_vport_context_in, in,
686		 field_select.addresses_list, 1);
687
688	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
689				     nic_vport_context);
690
691	MLX5_SET(nic_vport_context, nic_vport_ctx,
692		 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
693	MLX5_SET(nic_vport_context, nic_vport_ctx,
694		 allowed_list_size, list_size);
695
696	for (i = 0; i < list_size; i++) {
697		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
698					       nic_vport_ctx,
699					       current_uc_mac_address[i]);
700		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
701	}
702
703	err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
704	kfree(in);
705	return err;
706}
707EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
708
709int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
710				     u8 *addr)
711{
712	void *in;
713	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
714	u8  *mac_ptr;
715	int err;
716
717	in = mlx5_vzalloc(inlen);
718	if (!in) {
719		mlx5_core_warn(mdev, "failed to allocate inbox\n");
720		return -ENOMEM;
721	}
722
723	MLX5_SET(modify_nic_vport_context_in, in,
724		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
725	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
726	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
727	MLX5_SET(modify_nic_vport_context_in, in,
728		 field_select.permanent_address, 1);
729	mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
730		nic_vport_context.permanent_address.mac_addr_47_32);
731	ether_addr_copy(mac_ptr, addr);
732
733	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
734
735	kvfree(in);
736
737	return err;
738}
739EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
740
741int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
742{
743	return mlx5_nic_vport_enable_disable_roce(mdev, 1);
744}
745EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
746
747int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
748{
749	return mlx5_nic_vport_enable_disable_roce(mdev, 0);
750}
751EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
752
753int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
754				 u8 port_num, u8 vport_num, u32 *out,
755				 int outlen)
756{
757	u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
758	int is_group_manager;
759
760	is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
761
762	memset(in, 0, sizeof(in));
763
764	MLX5_SET(query_hca_vport_context_in, in, opcode,
765		 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
766
767	if (vport_num) {
768		if (is_group_manager) {
769			MLX5_SET(query_hca_vport_context_in, in, other_vport,
770				 1);
771			MLX5_SET(query_hca_vport_context_in, in, vport_number,
772				 vport_num);
773		} else {
774			return -EPERM;
775		}
776	}
777
778	if (MLX5_CAP_GEN(mdev, num_ports) == 2)
779		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
780
781	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
782}
783
784int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
785					   u64 *system_image_guid)
786{
787	u32 *out;
788	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
789	int err;
790
791	out = mlx5_vzalloc(outlen);
792	if (!out)
793		return -ENOMEM;
794
795	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
796	if (err)
797		goto out;
798
799	*system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
800					hca_vport_context.system_image_guid);
801
802out:
803	kvfree(out);
804	return err;
805}
806EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
807
808int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
809{
810	u32 *out;
811	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
812	int err;
813
814	out = mlx5_vzalloc(outlen);
815	if (!out)
816		return -ENOMEM;
817
818	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
819	if (err)
820		goto out;
821
822	*node_guid = MLX5_GET64(query_hca_vport_context_out, out,
823				hca_vport_context.node_guid);
824
825out:
826	kvfree(out);
827	return err;
828}
829EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
830
831int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
832			     u16 vport_num, u16 gid_index, union ib_gid *gid)
833{
834	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
835	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
836	int is_group_manager;
837	void *out = NULL;
838	void *in = NULL;
839	union ib_gid *tmp;
840	int tbsz;
841	int nout;
842	int err;
843
844	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
845	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
846
847	if (gid_index > tbsz && gid_index != 0xffff)
848		return -EINVAL;
849
850	if (gid_index == 0xffff)
851		nout = tbsz;
852	else
853		nout = 1;
854
855	out_sz += nout * sizeof(*gid);
856
857	in = mlx5_vzalloc(in_sz);
858	out = mlx5_vzalloc(out_sz);
859	if (!in || !out) {
860		err = -ENOMEM;
861		goto out;
862	}
863
864	MLX5_SET(query_hca_vport_gid_in, in, opcode,
865		 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
866	if (vport_num) {
867		if (is_group_manager) {
868			MLX5_SET(query_hca_vport_gid_in, in, vport_number,
869				 vport_num);
870			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
871		} else {
872			err = -EPERM;
873			goto out;
874		}
875	}
876
877	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
878
879	if (MLX5_CAP_GEN(dev, num_ports) == 2)
880		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
881
882	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
883	if (err)
884		goto out;
885
886	err = mlx5_cmd_status_to_err_v2(out);
887	if (err)
888		goto out;
889
890	tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
891	gid->global.subnet_prefix = tmp->global.subnet_prefix;
892	gid->global.interface_id = tmp->global.interface_id;
893
894out:
895	kvfree(in);
896	kvfree(out);
897	return err;
898}
899EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
900
901int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
902			      u8 port_num, u16 vf_num, u16 pkey_index,
903			      u16 *pkey)
904{
905	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
906	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
907	int is_group_manager;
908	void *out = NULL;
909	void *in = NULL;
910	void *pkarr;
911	int nout;
912	int tbsz;
913	int err;
914	int i;
915
916	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
917
918	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
919	if (pkey_index > tbsz && pkey_index != 0xffff)
920		return -EINVAL;
921
922	if (pkey_index == 0xffff)
923		nout = tbsz;
924	else
925		nout = 1;
926
927	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
928
929	in = kzalloc(in_sz, GFP_KERNEL);
930	out = kzalloc(out_sz, GFP_KERNEL);
931
932	MLX5_SET(query_hca_vport_pkey_in, in, opcode,
933		 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
934	if (other_vport) {
935		if (is_group_manager) {
936			MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
937				 vf_num);
938			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
939		} else {
940			err = -EPERM;
941			goto out;
942		}
943	}
944	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
945
946	if (MLX5_CAP_GEN(dev, num_ports) == 2)
947		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
948
949	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
950	if (err)
951		goto out;
952
953	err = mlx5_cmd_status_to_err_v2(out);
954	if (err)
955		goto out;
956
957	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
958	for (i = 0; i < nout; i++, pkey++,
959	     pkarr += MLX5_ST_SZ_BYTES(pkey))
960		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
961
962out:
963	kfree(in);
964	kfree(out);
965	return err;
966}
967EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
968
969static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
970					     u16 vport, void *in, int inlen)
971{
972	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
973	int err;
974
975	memset(out, 0, sizeof(out));
976
977	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
978	if (vport)
979		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
980
981	MLX5_SET(modify_esw_vport_context_in, in, opcode,
982		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
983
984	err = mlx5_cmd_exec_check_status(mdev, in, inlen,
985					 out, sizeof(out));
986	if (err)
987		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
988
989	return err;
990}
991
992int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
993				u8 insert_mode, u8 strip_mode,
994				u16 vlan, u8 cfi, u8 pcp)
995{
996	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
997
998	memset(in, 0, sizeof(in));
999
1000	if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1001		MLX5_SET(modify_esw_vport_context_in, in,
1002			 esw_vport_context.cvlan_cfi, cfi);
1003		MLX5_SET(modify_esw_vport_context_in, in,
1004			 esw_vport_context.cvlan_pcp, pcp);
1005		MLX5_SET(modify_esw_vport_context_in, in,
1006			 esw_vport_context.cvlan_id, vlan);
1007	}
1008
1009	MLX5_SET(modify_esw_vport_context_in, in,
1010		 esw_vport_context.vport_cvlan_insert, insert_mode);
1011
1012	MLX5_SET(modify_esw_vport_context_in, in,
1013		 esw_vport_context.vport_cvlan_strip, strip_mode);
1014
1015	MLX5_SET(modify_esw_vport_context_in, in, field_select,
1016		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1017		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1018
1019	return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1020}
1021EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1022
1023int mlx5_arm_vport_context_events(struct mlx5_core_dev *mdev,
1024				  u8 vport,
1025				  u32 events_mask)
1026{
1027	u32 *in;
1028	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1029	void *nic_vport_ctx;
1030	int err;
1031
1032	in = mlx5_vzalloc(inlen);
1033	if (!in)
1034		return -ENOMEM;
1035
1036	MLX5_SET(modify_nic_vport_context_in,
1037		 in,
1038		 opcode,
1039		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1040	MLX5_SET(modify_nic_vport_context_in,
1041		 in,
1042		 field_select.change_event,
1043		 1);
1044	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1045	if (vport)
1046		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1047	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
1048				     in,
1049				     nic_vport_context);
1050
1051	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
1052
1053	if (events_mask & MLX5_UC_ADDR_CHANGE)
1054		MLX5_SET(nic_vport_context,
1055			 nic_vport_ctx,
1056			 event_on_uc_address_change,
1057			 1);
1058	if (events_mask & MLX5_MC_ADDR_CHANGE)
1059		MLX5_SET(nic_vport_context,
1060			 nic_vport_ctx,
1061			 event_on_mc_address_change,
1062			 1);
1063	if (events_mask & MLX5_VLAN_CHANGE)
1064		MLX5_SET(nic_vport_context,
1065			 nic_vport_ctx,
1066			 event_on_vlan_change,
1067			 1);
1068	if (events_mask & MLX5_PROMISC_CHANGE)
1069		MLX5_SET(nic_vport_context,
1070			 nic_vport_ctx,
1071			 event_on_promisc_change,
1072			 1);
1073	if (events_mask & MLX5_MTU_CHANGE)
1074		MLX5_SET(nic_vport_context,
1075			 nic_vport_ctx,
1076			 event_on_mtu,
1077			 1);
1078
1079	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1080
1081	kvfree(in);
1082	return err;
1083}
1084EXPORT_SYMBOL_GPL(mlx5_arm_vport_context_events);
1085
1086int mlx5_query_vport_promisc(struct mlx5_core_dev *mdev,
1087			     u32 vport,
1088			     u8 *promisc_uc,
1089			     u8 *promisc_mc,
1090			     u8 *promisc_all)
1091{
1092	u32 *out;
1093	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1094	int err;
1095
1096	out = kzalloc(outlen, GFP_KERNEL);
1097	if (!out)
1098		return -ENOMEM;
1099
1100	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1101	if (err)
1102		goto out;
1103
1104	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1105			       nic_vport_context.promisc_uc);
1106	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1107			       nic_vport_context.promisc_mc);
1108	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1109				nic_vport_context.promisc_all);
1110
1111out:
1112	kfree(out);
1113	return err;
1114}
1115EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1116
1117int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1118				  int promisc_uc,
1119				  int promisc_mc,
1120				  int promisc_all)
1121{
1122	void *in;
1123	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1124	int err;
1125
1126	in = mlx5_vzalloc(inlen);
1127	if (!in) {
1128		mlx5_core_err(mdev, "failed to allocate inbox\n");
1129		return -ENOMEM;
1130	}
1131
1132	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1133	MLX5_SET(modify_nic_vport_context_in, in,
1134		 nic_vport_context.promisc_uc, promisc_uc);
1135	MLX5_SET(modify_nic_vport_context_in, in,
1136		 nic_vport_context.promisc_mc, promisc_mc);
1137	MLX5_SET(modify_nic_vport_context_in, in,
1138		 nic_vport_context.promisc_all, promisc_all);
1139
1140	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1141	kvfree(in);
1142	return err;
1143}
1144EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1145
1146int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1147			     u8 port_num, u16 vport_num,
1148			     void *out, int out_size)
1149{
1150	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1151	int is_group_manager;
1152	void *in;
1153	int err;
1154
1155	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1156
1157	in = mlx5_vzalloc(in_sz);
1158	if (!in)
1159		return -ENOMEM;
1160
1161	MLX5_SET(query_vport_counter_in, in, opcode,
1162		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1163	if (vport_num) {
1164		if (is_group_manager) {
1165			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1166			MLX5_SET(query_vport_counter_in, in, vport_number,
1167				 vport_num);
1168		} else {
1169			err = -EPERM;
1170			goto ex;
1171		}
1172	}
1173	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1174		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1175
1176	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
1177	if (err)
1178		goto ex;
1179	err = mlx5_cmd_status_to_err_v2(out);
1180	if (err)
1181		goto ex;
1182
1183ex:
1184	kvfree(in);
1185	return err;
1186}
1187EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1188
1189int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1190			    struct mlx5_vport_counters *vc)
1191{
1192	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1193	void *out;
1194	int err;
1195
1196	out = mlx5_vzalloc(out_sz);
1197	if (!out)
1198		return -ENOMEM;
1199
1200	err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1201	if (err)
1202		goto ex;
1203
1204	vc->received_errors.packets =
1205		MLX5_GET64(query_vport_counter_out,
1206			   out, received_errors.packets);
1207	vc->received_errors.octets =
1208		MLX5_GET64(query_vport_counter_out,
1209			   out, received_errors.octets);
1210	vc->transmit_errors.packets =
1211		MLX5_GET64(query_vport_counter_out,
1212			   out, transmit_errors.packets);
1213	vc->transmit_errors.octets =
1214		MLX5_GET64(query_vport_counter_out,
1215			   out, transmit_errors.octets);
1216	vc->received_ib_unicast.packets =
1217		MLX5_GET64(query_vport_counter_out,
1218			   out, received_ib_unicast.packets);
1219	vc->received_ib_unicast.octets =
1220		MLX5_GET64(query_vport_counter_out,
1221			   out, received_ib_unicast.octets);
1222	vc->transmitted_ib_unicast.packets =
1223		MLX5_GET64(query_vport_counter_out,
1224			   out, transmitted_ib_unicast.packets);
1225	vc->transmitted_ib_unicast.octets =
1226		MLX5_GET64(query_vport_counter_out,
1227			   out, transmitted_ib_unicast.octets);
1228	vc->received_ib_multicast.packets =
1229		MLX5_GET64(query_vport_counter_out,
1230			   out, received_ib_multicast.packets);
1231	vc->received_ib_multicast.octets =
1232		MLX5_GET64(query_vport_counter_out,
1233			   out, received_ib_multicast.octets);
1234	vc->transmitted_ib_multicast.packets =
1235		MLX5_GET64(query_vport_counter_out,
1236			   out, transmitted_ib_multicast.packets);
1237	vc->transmitted_ib_multicast.octets =
1238		MLX5_GET64(query_vport_counter_out,
1239			   out, transmitted_ib_multicast.octets);
1240	vc->received_eth_broadcast.packets =
1241		MLX5_GET64(query_vport_counter_out,
1242			   out, received_eth_broadcast.packets);
1243	vc->received_eth_broadcast.octets =
1244		MLX5_GET64(query_vport_counter_out,
1245			   out, received_eth_broadcast.octets);
1246	vc->transmitted_eth_broadcast.packets =
1247		MLX5_GET64(query_vport_counter_out,
1248			   out, transmitted_eth_broadcast.packets);
1249	vc->transmitted_eth_broadcast.octets =
1250		MLX5_GET64(query_vport_counter_out,
1251			   out, transmitted_eth_broadcast.octets);
1252	vc->received_eth_unicast.octets =
1253		MLX5_GET64(query_vport_counter_out,
1254			   out, received_eth_unicast.octets);
1255	vc->received_eth_unicast.packets =
1256		MLX5_GET64(query_vport_counter_out,
1257			   out, received_eth_unicast.packets);
1258	vc->transmitted_eth_unicast.octets =
1259		MLX5_GET64(query_vport_counter_out,
1260			   out, transmitted_eth_unicast.octets);
1261	vc->transmitted_eth_unicast.packets =
1262		MLX5_GET64(query_vport_counter_out,
1263			   out, transmitted_eth_unicast.packets);
1264	vc->received_eth_multicast.octets =
1265		MLX5_GET64(query_vport_counter_out,
1266			   out, received_eth_multicast.octets);
1267	vc->received_eth_multicast.packets =
1268		MLX5_GET64(query_vport_counter_out,
1269			   out, received_eth_multicast.packets);
1270	vc->transmitted_eth_multicast.octets =
1271		MLX5_GET64(query_vport_counter_out,
1272			   out, transmitted_eth_multicast.octets);
1273	vc->transmitted_eth_multicast.packets =
1274		MLX5_GET64(query_vport_counter_out,
1275			   out, transmitted_eth_multicast.packets);
1276
1277ex:
1278	kvfree(out);
1279	return err;
1280}
1281