mlx5_vport.c revision 290650
1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: head/sys/dev/mlx5/mlx5_core/mlx5_vport.c 290650 2015-11-10 12:20:22Z hselasky $
26 */
27
28#include <linux/etherdevice.h>
29#include <dev/mlx5/driver.h>
30#include <dev/mlx5/vport.h>
31#include "mlx5_core.h"
32
33u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
34{
35	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
36	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
37	int err;
38
39	memset(in, 0, sizeof(in));
40
41	MLX5_SET(query_vport_state_in, in, opcode,
42		 MLX5_CMD_OP_QUERY_VPORT_STATE);
43	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
44
45	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
46					 sizeof(out));
47	if (err)
48		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
49
50	return MLX5_GET(query_vport_state_out, out, state);
51}
52EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
53
54static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u32 vport,
55					u32 *out, int outlen)
56{
57	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
58
59	memset(in, 0, sizeof(in));
60
61	MLX5_SET(query_nic_vport_context_in, in, opcode,
62		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
63
64	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
65	if (vport)
66		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
67
68	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
69}
70
71int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev, int *counter_set_id)
72{
73	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
74	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_in)];
75	int err;
76
77	memset(in, 0, sizeof(in));
78	memset(out, 0, sizeof(out));
79
80	MLX5_SET(alloc_q_counter_in, in, opcode,
81		 MLX5_CMD_OP_ALLOC_Q_COUNTER);
82
83	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
84					 out, sizeof(out));
85
86	if (err)
87		return err;
88
89	*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
90				   counter_set_id);
91	return err;
92}
93
94int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
95				 int counter_set_id)
96{
97	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
98	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
99
100	memset(in, 0, sizeof(in));
101	memset(out, 0, sizeof(out));
102
103	MLX5_SET(dealloc_q_counter_in, in, opcode,
104		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
105	MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
106		 counter_set_id);
107
108	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
109					  out, sizeof(out));
110}
111
112static int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
113				      int counter_set_id,
114				      int reset,
115				      void *out,
116				      int out_size)
117{
118	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
119
120	memset(in, 0, sizeof(in));
121
122	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
123	MLX5_SET(query_q_counter_in, in, clear, reset);
124	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
125
126	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
127					  out, out_size);
128}
129
130int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
131				      int counter_set_id,
132				      u32 *out_of_rx_buffer)
133{
134	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
135	int err;
136
137	memset(out, 0, sizeof(out));
138
139	err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
140					 sizeof(out));
141
142	if (err)
143		return err;
144
145	*out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
146				     out_of_buffer);
147	return err;
148}
149
150int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
151				     u32 vport, u8 *addr)
152{
153	u32 *out;
154	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
155	u8 *out_addr;
156	int err;
157
158	out = mlx5_vzalloc(outlen);
159	if (!out)
160		return -ENOMEM;
161
162	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
163				nic_vport_context.permanent_address);
164
165	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
166	if (err)
167		goto out;
168
169	ether_addr_copy(addr, &out_addr[2]);
170
171out:
172	kvfree(out);
173	return err;
174}
175EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
176
177int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
178					   u64 *system_image_guid)
179{
180	u32 *out;
181	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
182	int err;
183
184	out = mlx5_vzalloc(outlen);
185	if (!out)
186		return -ENOMEM;
187
188	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
189	if (err)
190		goto out;
191
192	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
193					nic_vport_context.system_image_guid);
194out:
195	kvfree(out);
196	return err;
197}
198EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
199
200int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
201{
202	u32 *out;
203	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
204	int err;
205
206	out = mlx5_vzalloc(outlen);
207	if (!out)
208		return -ENOMEM;
209
210	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
211	if (err)
212		goto out;
213
214	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
215				nic_vport_context.node_guid);
216
217out:
218	kvfree(out);
219	return err;
220}
221EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
222
223int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev, u64 *port_guid)
224{
225	u32 *out;
226	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
227	int err;
228
229	out = mlx5_vzalloc(outlen);
230	if (!out)
231		return -ENOMEM;
232
233	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
234	if (err)
235		goto out;
236
237	*port_guid = MLX5_GET64(query_nic_vport_context_out, out,
238				nic_vport_context.port_guid);
239
240out:
241	kvfree(out);
242	return err;
243}
244EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_port_guid);
245
246int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
247					u16 *qkey_viol_cntr)
248{
249	u32 *out;
250	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
251	int err;
252
253	out = mlx5_vzalloc(outlen);
254	if (!out)
255		return -ENOMEM;
256
257	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
258	if (err)
259		goto out;
260
261	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
262				nic_vport_context.qkey_violation_counter);
263
264out:
265	kvfree(out);
266	return err;
267}
268EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
269
270static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
271					 int inlen)
272{
273	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
274
275	MLX5_SET(modify_nic_vport_context_in, in, opcode,
276		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
277
278	memset(out, 0, sizeof(out));
279	return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
280}
281
282static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
283					      int enable_disable)
284{
285	void *in;
286	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
287	int err;
288
289	in = mlx5_vzalloc(inlen);
290	if (!in) {
291		mlx5_core_warn(mdev, "failed to allocate inbox\n");
292		return -ENOMEM;
293	}
294
295	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
296	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
297		 enable_disable);
298
299	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
300
301	kvfree(in);
302
303	return err;
304}
305
306int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
307				   bool other_vport, u8 *addr)
308{
309	void *in;
310	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
311		  + MLX5_ST_SZ_BYTES(mac_address_layout);
312	u8  *mac_layout;
313	u8  *mac_ptr;
314	int err;
315
316	in = mlx5_vzalloc(inlen);
317	if (!in) {
318		mlx5_core_warn(mdev, "failed to allocate inbox\n");
319		return -ENOMEM;
320	}
321
322	MLX5_SET(modify_nic_vport_context_in, in,
323		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
324	MLX5_SET(modify_nic_vport_context_in, in,
325		 vport_number, vport);
326	MLX5_SET(modify_nic_vport_context_in, in,
327		 other_vport, other_vport);
328	MLX5_SET(modify_nic_vport_context_in, in,
329		 field_select.addresses_list, 1);
330	MLX5_SET(modify_nic_vport_context_in, in,
331		 nic_vport_context.allowed_list_type, 0);
332	MLX5_SET(modify_nic_vport_context_in, in,
333		 nic_vport_context.allowed_list_size, 1);
334
335	mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
336		nic_vport_context.current_uc_mac_address);
337	mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
338		mac_addr_47_32);
339	ether_addr_copy(mac_ptr, addr);
340
341	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
342
343	kvfree(in);
344
345	return err;
346}
347EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
348int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
349				     u8 *addr)
350{
351	void *in;
352	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
353	u8  *mac_ptr;
354	int err;
355
356	in = mlx5_vzalloc(inlen);
357	if (!in) {
358		mlx5_core_warn(mdev, "failed to allocate inbox\n");
359		return -ENOMEM;
360	}
361
362	MLX5_SET(modify_nic_vport_context_in, in,
363		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
364	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
365	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
366	MLX5_SET(modify_nic_vport_context_in, in,
367		 field_select.permanent_address, 1);
368	mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
369		nic_vport_context.permanent_address.mac_addr_47_32);
370	ether_addr_copy(mac_ptr, addr);
371
372	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
373
374	kvfree(in);
375
376	return err;
377}
378EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
379
380int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
381{
382	return mlx5_nic_vport_enable_disable_roce(mdev, 1);
383}
384EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
385
386int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
387{
388	return mlx5_nic_vport_enable_disable_roce(mdev, 0);
389}
390EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
391
392int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
393				 u8 port_num, u8 vport_num, u32 *out,
394				 int outlen)
395{
396	u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
397	int is_group_manager;
398
399	is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
400
401	memset(in, 0, sizeof(in));
402
403	MLX5_SET(query_hca_vport_context_in, in, opcode,
404		 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
405
406	if (vport_num) {
407		if (is_group_manager) {
408			MLX5_SET(query_hca_vport_context_in, in, other_vport,
409				 1);
410			MLX5_SET(query_hca_vport_context_in, in, vport_number,
411				 vport_num);
412		} else {
413			return -EPERM;
414		}
415	}
416
417	if (MLX5_CAP_GEN(mdev, num_ports) == 2)
418		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
419
420	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
421}
422
423int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
424					   u64 *system_image_guid)
425{
426	u32 *out;
427	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
428	int err;
429
430	out = mlx5_vzalloc(outlen);
431	if (!out)
432		return -ENOMEM;
433
434	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
435	if (err)
436		goto out;
437
438	*system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
439					hca_vport_context.system_image_guid);
440
441out:
442	kvfree(out);
443	return err;
444}
445EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
446
447int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
448{
449	u32 *out;
450	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
451	int err;
452
453	out = mlx5_vzalloc(outlen);
454	if (!out)
455		return -ENOMEM;
456
457	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
458	if (err)
459		goto out;
460
461	*node_guid = MLX5_GET64(query_hca_vport_context_out, out,
462				hca_vport_context.node_guid);
463
464out:
465	kvfree(out);
466	return err;
467}
468EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
469
470int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
471			     u16 vport_num, u16 gid_index, union ib_gid *gid)
472{
473	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
474	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
475	int is_group_manager;
476	void *out = NULL;
477	void *in = NULL;
478	union ib_gid *tmp;
479	int tbsz;
480	int nout;
481	int err;
482
483	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
484	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
485
486	if (gid_index > tbsz && gid_index != 0xffff)
487		return -EINVAL;
488
489	if (gid_index == 0xffff)
490		nout = tbsz;
491	else
492		nout = 1;
493
494	out_sz += nout * sizeof(*gid);
495
496	in = mlx5_vzalloc(in_sz);
497	out = mlx5_vzalloc(out_sz);
498	if (!in || !out) {
499		err = -ENOMEM;
500		goto out;
501	}
502
503	MLX5_SET(query_hca_vport_gid_in, in, opcode,
504		 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
505	if (vport_num) {
506		if (is_group_manager) {
507			MLX5_SET(query_hca_vport_gid_in, in, vport_number,
508				 vport_num);
509			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
510		} else {
511			err = -EPERM;
512			goto out;
513		}
514	}
515
516	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
517
518	if (MLX5_CAP_GEN(dev, num_ports) == 2)
519		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
520
521	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
522	if (err)
523		goto out;
524
525	err = mlx5_cmd_status_to_err_v2(out);
526	if (err)
527		goto out;
528
529	tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
530	gid->global.subnet_prefix = tmp->global.subnet_prefix;
531	gid->global.interface_id = tmp->global.interface_id;
532
533out:
534	kvfree(in);
535	kvfree(out);
536	return err;
537}
538EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
539
540int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
541			      u8 port_num, u16 vf_num, u16 pkey_index,
542			      u16 *pkey)
543{
544	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
545	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
546	int is_group_manager;
547	void *out = NULL;
548	void *in = NULL;
549	void *pkarr;
550	int nout;
551	int tbsz;
552	int err;
553	int i;
554
555	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
556
557	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
558	if (pkey_index > tbsz && pkey_index != 0xffff)
559		return -EINVAL;
560
561	if (pkey_index == 0xffff)
562		nout = tbsz;
563	else
564		nout = 1;
565
566	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
567
568	in = kzalloc(in_sz, GFP_KERNEL);
569	out = kzalloc(out_sz, GFP_KERNEL);
570
571	MLX5_SET(query_hca_vport_pkey_in, in, opcode,
572		 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
573	if (other_vport) {
574		if (is_group_manager) {
575			MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
576				 vf_num);
577			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
578		} else {
579			err = -EPERM;
580			goto out;
581		}
582	}
583	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
584
585	if (MLX5_CAP_GEN(dev, num_ports) == 2)
586		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
587
588	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
589	if (err)
590		goto out;
591
592	err = mlx5_cmd_status_to_err_v2(out);
593	if (err)
594		goto out;
595
596	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
597	for (i = 0; i < nout; i++, pkey++,
598	     pkarr += MLX5_ST_SZ_BYTES(pkey))
599		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
600
601out:
602	kfree(in);
603	kfree(out);
604	return err;
605}
606EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
607
608static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
609					     u16 vport, void *in, int inlen)
610{
611	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
612	int err;
613
614	memset(out, 0, sizeof(out));
615
616	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
617	if (vport)
618		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
619
620	MLX5_SET(modify_esw_vport_context_in, in, opcode,
621		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
622
623	err = mlx5_cmd_exec_check_status(mdev, in, inlen,
624					 out, sizeof(out));
625	if (err)
626		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
627
628	return err;
629}
630
631int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
632				u8 insert_mode, u8 strip_mode,
633				u16 vlan, u8 cfi, u8 pcp)
634{
635	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
636
637	memset(in, 0, sizeof(in));
638
639	if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
640		MLX5_SET(modify_esw_vport_context_in, in,
641			 esw_vport_context.cvlan_cfi, cfi);
642		MLX5_SET(modify_esw_vport_context_in, in,
643			 esw_vport_context.cvlan_pcp, pcp);
644		MLX5_SET(modify_esw_vport_context_in, in,
645			 esw_vport_context.cvlan_id, vlan);
646	}
647
648	MLX5_SET(modify_esw_vport_context_in, in,
649		 esw_vport_context.vport_cvlan_insert, insert_mode);
650
651	MLX5_SET(modify_esw_vport_context_in, in,
652		 esw_vport_context.vport_cvlan_strip, strip_mode);
653
654	MLX5_SET(modify_esw_vport_context_in, in, field_select,
655		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
656		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
657
658	return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
659}
660EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
661
662int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
663			     u8 port_num, u16 vport_num,
664			     void *out, int out_size)
665{
666	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
667	int is_group_manager;
668	void *in;
669	int err;
670
671	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
672
673	in = mlx5_vzalloc(in_sz);
674	if (!in)
675		return -ENOMEM;
676
677	MLX5_SET(query_vport_counter_in, in, opcode,
678		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
679	if (vport_num) {
680		if (is_group_manager) {
681			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
682			MLX5_SET(query_vport_counter_in, in, vport_number,
683				 vport_num);
684		} else {
685			err = -EPERM;
686			goto ex;
687		}
688	}
689	if (MLX5_CAP_GEN(dev, num_ports) == 2)
690		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
691
692	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
693	if (err)
694		goto ex;
695	err = mlx5_cmd_status_to_err_v2(out);
696	if (err)
697		goto ex;
698
699ex:
700	kvfree(in);
701	return err;
702}
703EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
704
705int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
706			    struct mlx5_vport_counters *vc)
707{
708	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
709	void *out;
710	int err;
711
712	out = mlx5_vzalloc(out_sz);
713	if (!out)
714		return -ENOMEM;
715
716	err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
717	if (err)
718		goto ex;
719
720	vc->received_errors.packets =
721		MLX5_GET64(query_vport_counter_out,
722			   out, received_errors.packets);
723	vc->received_errors.octets =
724		MLX5_GET64(query_vport_counter_out,
725			   out, received_errors.octets);
726	vc->transmit_errors.packets =
727		MLX5_GET64(query_vport_counter_out,
728			   out, transmit_errors.packets);
729	vc->transmit_errors.octets =
730		MLX5_GET64(query_vport_counter_out,
731			   out, transmit_errors.octets);
732	vc->received_ib_unicast.packets =
733		MLX5_GET64(query_vport_counter_out,
734			   out, received_ib_unicast.packets);
735	vc->received_ib_unicast.octets =
736		MLX5_GET64(query_vport_counter_out,
737			   out, received_ib_unicast.octets);
738	vc->transmitted_ib_unicast.packets =
739		MLX5_GET64(query_vport_counter_out,
740			   out, transmitted_ib_unicast.packets);
741	vc->transmitted_ib_unicast.octets =
742		MLX5_GET64(query_vport_counter_out,
743			   out, transmitted_ib_unicast.octets);
744	vc->received_ib_multicast.packets =
745		MLX5_GET64(query_vport_counter_out,
746			   out, received_ib_multicast.packets);
747	vc->received_ib_multicast.octets =
748		MLX5_GET64(query_vport_counter_out,
749			   out, received_ib_multicast.octets);
750	vc->transmitted_ib_multicast.packets =
751		MLX5_GET64(query_vport_counter_out,
752			   out, transmitted_ib_multicast.packets);
753	vc->transmitted_ib_multicast.octets =
754		MLX5_GET64(query_vport_counter_out,
755			   out, transmitted_ib_multicast.octets);
756	vc->received_eth_broadcast.packets =
757		MLX5_GET64(query_vport_counter_out,
758			   out, received_eth_broadcast.packets);
759	vc->received_eth_broadcast.octets =
760		MLX5_GET64(query_vport_counter_out,
761			   out, received_eth_broadcast.octets);
762	vc->transmitted_eth_broadcast.packets =
763		MLX5_GET64(query_vport_counter_out,
764			   out, transmitted_eth_broadcast.packets);
765	vc->transmitted_eth_broadcast.octets =
766		MLX5_GET64(query_vport_counter_out,
767			   out, transmitted_eth_broadcast.octets);
768	vc->received_eth_unicast.octets =
769		MLX5_GET64(query_vport_counter_out,
770			   out, received_eth_unicast.octets);
771	vc->received_eth_unicast.packets =
772		MLX5_GET64(query_vport_counter_out,
773			   out, received_eth_unicast.packets);
774	vc->transmitted_eth_unicast.octets =
775		MLX5_GET64(query_vport_counter_out,
776			   out, transmitted_eth_unicast.octets);
777	vc->transmitted_eth_unicast.packets =
778		MLX5_GET64(query_vport_counter_out,
779			   out, transmitted_eth_unicast.packets);
780	vc->received_eth_multicast.octets =
781		MLX5_GET64(query_vport_counter_out,
782			   out, received_eth_multicast.octets);
783	vc->received_eth_multicast.packets =
784		MLX5_GET64(query_vport_counter_out,
785			   out, received_eth_multicast.packets);
786	vc->transmitted_eth_multicast.octets =
787		MLX5_GET64(query_vport_counter_out,
788			   out, transmitted_eth_multicast.octets);
789	vc->transmitted_eth_multicast.packets =
790		MLX5_GET64(query_vport_counter_out,
791			   out, transmitted_eth_multicast.packets);
792
793ex:
794	kvfree(out);
795	return err;
796}
797