mlx5_vport.c revision 329201
1/*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_core/mlx5_vport.c 329201 2018-02-13 14:45:05Z hselasky $
26 */
27
28#include <linux/etherdevice.h>
29#include <dev/mlx5/driver.h>
30#include <dev/mlx5/vport.h>
31#include "mlx5_core.h"
32
33static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
34					 int inlen);
35
36static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37				   u16 vport, u32 *out, int outlen)
38{
39	int err;
40	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
41
42	memset(in, 0, sizeof(in));
43
44	MLX5_SET(query_vport_state_in, in, opcode,
45		 MLX5_CMD_OP_QUERY_VPORT_STATE);
46	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
47	MLX5_SET(query_vport_state_in, in, vport_number, vport);
48	if (vport)
49		MLX5_SET(query_vport_state_in, in, other_vport, 1);
50
51	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
52	if (err)
53		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
54
55	return err;
56}
57
58u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
59{
60	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
61
62	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
63
64	return MLX5_GET(query_vport_state_out, out, state);
65}
66EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
67
68u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
69{
70	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
71
72	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
73
74	return MLX5_GET(query_vport_state_out, out, admin_state);
75}
76EXPORT_SYMBOL(mlx5_query_vport_admin_state);
77
78int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
79				  u16 vport, u8 state)
80{
81	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
82	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
83	int err;
84
85	memset(in, 0, sizeof(in));
86
87	MLX5_SET(modify_vport_state_in, in, opcode,
88		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
89	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
90	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
91
92	if (vport)
93		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
94
95	MLX5_SET(modify_vport_state_in, in, admin_state, state);
96
97	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
98					 sizeof(out));
99	if (err)
100		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
101
102	return err;
103}
104EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
105
106static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
107					u32 *out, int outlen)
108{
109	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
110
111	memset(in, 0, sizeof(in));
112
113	MLX5_SET(query_nic_vport_context_in, in, opcode,
114		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
115
116	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
117	if (vport)
118		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
119
120	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
121}
122
123static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
124					      int client_id)
125{
126	switch (client_id) {
127	case MLX5_INTERFACE_PROTOCOL_IB:
128		return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
129			MLX5_QCOUNTER_SETS_NETDEV);
130	case MLX5_INTERFACE_PROTOCOL_ETH:
131		return MLX5_QCOUNTER_SETS_NETDEV;
132	default:
133		mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
134		return 0;
135	}
136}
137
138int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
139			       int client_id, u16 *counter_set_id)
140{
141	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
142	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
143	int err;
144
145	if (mdev->num_q_counter_allocated[client_id] >
146	    mlx5_vport_max_q_counter_allocator(mdev, client_id))
147		return -EINVAL;
148
149	memset(in, 0, sizeof(in));
150	memset(out, 0, sizeof(out));
151
152	MLX5_SET(alloc_q_counter_in, in, opcode,
153		 MLX5_CMD_OP_ALLOC_Q_COUNTER);
154
155	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
156					 out, sizeof(out));
157
158	if (!err)
159		*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
160					   counter_set_id);
161
162	mdev->num_q_counter_allocated[client_id]++;
163
164	return err;
165}
166
167int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
168				 int client_id, u16 counter_set_id)
169{
170	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
171	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
172	int err;
173
174	if (mdev->num_q_counter_allocated[client_id] <= 0)
175		return -EINVAL;
176
177	memset(in, 0, sizeof(in));
178	memset(out, 0, sizeof(out));
179
180	MLX5_SET(dealloc_q_counter_in, in, opcode,
181		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
182	MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
183		 counter_set_id);
184
185	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
186					 out, sizeof(out));
187
188	mdev->num_q_counter_allocated[client_id]--;
189
190	return err;
191}
192
193int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
194				      u16 counter_set_id,
195				      int reset,
196				      void *out,
197				      int out_size)
198{
199	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
200
201	memset(in, 0, sizeof(in));
202
203	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
204	MLX5_SET(query_q_counter_in, in, clear, reset);
205	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
206
207	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
208					  out, out_size);
209}
210
211int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
212				      u16 counter_set_id,
213				      u32 *out_of_rx_buffer)
214{
215	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
216	int err;
217
218	memset(out, 0, sizeof(out));
219
220	err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
221					 sizeof(out));
222
223	if (err)
224		return err;
225
226	*out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
227				     out_of_buffer);
228	return err;
229}
230
231int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
232				     u16 vport, u8 *addr)
233{
234	u32 *out;
235	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
236	u8 *out_addr;
237	int err;
238
239	out = mlx5_vzalloc(outlen);
240	if (!out)
241		return -ENOMEM;
242
243	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
244				nic_vport_context.permanent_address);
245
246	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
247	if (err)
248		goto out;
249
250	ether_addr_copy(addr, &out_addr[2]);
251
252out:
253	kvfree(out);
254	return err;
255}
256EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
257
258int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
259				      u16 vport, u8 *addr)
260{
261	void *in;
262	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
263	int err;
264	void *nic_vport_ctx;
265	u8 *perm_mac;
266
267	in = mlx5_vzalloc(inlen);
268	if (!in) {
269		mlx5_core_warn(mdev, "failed to allocate inbox\n");
270		return -ENOMEM;
271	}
272
273	MLX5_SET(modify_nic_vport_context_in, in,
274		 field_select.permanent_address, 1);
275	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
276
277	if (vport)
278		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
279
280	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
281				     in, nic_vport_context);
282	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
283				permanent_address);
284
285	ether_addr_copy(&perm_mac[2], addr);
286
287	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
288
289	kvfree(in);
290
291	return err;
292}
293EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
294
295int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
296					   u64 *system_image_guid)
297{
298	u32 *out;
299	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
300	int err;
301
302	out = mlx5_vzalloc(outlen);
303	if (!out)
304		return -ENOMEM;
305
306	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
307	if (err)
308		goto out;
309
310	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
311					nic_vport_context.system_image_guid);
312out:
313	kvfree(out);
314	return err;
315}
316EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
317
318int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
319{
320	u32 *out;
321	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
322	int err;
323
324	out = mlx5_vzalloc(outlen);
325	if (!out)
326		return -ENOMEM;
327
328	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
329	if (err)
330		goto out;
331
332	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
333				nic_vport_context.node_guid);
334
335out:
336	kvfree(out);
337	return err;
338}
339EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
340
341static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
342					  u64 *port_guid)
343{
344	u32 *out;
345	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
346	int err;
347
348	out = mlx5_vzalloc(outlen);
349	if (!out)
350		return -ENOMEM;
351
352	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
353	if (err)
354		goto out;
355
356	*port_guid = MLX5_GET64(query_nic_vport_context_out, out,
357				nic_vport_context.port_guid);
358
359out:
360	kvfree(out);
361	return err;
362}
363
364int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
365					u16 *qkey_viol_cntr)
366{
367	u32 *out;
368	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
369	int err;
370
371	out = mlx5_vzalloc(outlen);
372	if (!out)
373		return -ENOMEM;
374
375	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
376	if (err)
377		goto out;
378
379	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
380				nic_vport_context.qkey_violation_counter);
381
382out:
383	kvfree(out);
384	return err;
385}
386EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
387
388static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
389					 int inlen)
390{
391	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
392
393	MLX5_SET(modify_nic_vport_context_in, in, opcode,
394		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
395
396	memset(out, 0, sizeof(out));
397	return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
398}
399
400static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
401					      int enable_disable)
402{
403	void *in;
404	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
405	int err;
406
407	in = mlx5_vzalloc(inlen);
408	if (!in) {
409		mlx5_core_warn(mdev, "failed to allocate inbox\n");
410		return -ENOMEM;
411	}
412
413	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
414	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
415		 enable_disable);
416
417	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
418
419	kvfree(in);
420
421	return err;
422}
423
424int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
425				   bool other_vport, u8 *addr)
426{
427	void *in;
428	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
429		  + MLX5_ST_SZ_BYTES(mac_address_layout);
430	u8  *mac_layout;
431	u8  *mac_ptr;
432	int err;
433
434	in = mlx5_vzalloc(inlen);
435	if (!in) {
436		mlx5_core_warn(mdev, "failed to allocate inbox\n");
437		return -ENOMEM;
438	}
439
440	MLX5_SET(modify_nic_vport_context_in, in,
441		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
442	MLX5_SET(modify_nic_vport_context_in, in,
443		 vport_number, vport);
444	MLX5_SET(modify_nic_vport_context_in, in,
445		 other_vport, other_vport);
446	MLX5_SET(modify_nic_vport_context_in, in,
447		 field_select.addresses_list, 1);
448	MLX5_SET(modify_nic_vport_context_in, in,
449		 nic_vport_context.allowed_list_type,
450		 MLX5_NIC_VPORT_LIST_TYPE_UC);
451	MLX5_SET(modify_nic_vport_context_in, in,
452		 nic_vport_context.allowed_list_size, 1);
453
454	mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
455		nic_vport_context.current_uc_mac_address);
456	mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
457		mac_addr_47_32);
458	ether_addr_copy(mac_ptr, addr);
459
460	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
461
462	kvfree(in);
463
464	return err;
465}
466EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
467
468int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
469				    u32 vport, u64 node_guid)
470{
471	void *in;
472	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
473	int err;
474	void *nic_vport_context;
475
476	if (!vport)
477		return -EINVAL;
478	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
479		return -EPERM;
480	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
481		return -ENOTSUPP;
482
483	in = mlx5_vzalloc(inlen);
484	if (!in) {
485		mlx5_core_warn(mdev, "failed to allocate inbox\n");
486		return -ENOMEM;
487	}
488
489	MLX5_SET(modify_nic_vport_context_in, in,
490		 field_select.node_guid, 1);
491	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
492
493	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
494
495	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
496					 in, nic_vport_context);
497	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
498
499	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
500
501	kvfree(in);
502
503	return err;
504}
505EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
506
507int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
508				    u32 vport, u64 port_guid)
509{
510	void *in;
511	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
512	int err;
513	void *nic_vport_context;
514
515	if (!vport)
516		return -EINVAL;
517	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
518		return -EPERM;
519	if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
520		return -ENOTSUPP;
521
522	in = mlx5_vzalloc(inlen);
523	if (!in) {
524		mlx5_core_warn(mdev, "failed to allocate inbox\n");
525		return -ENOMEM;
526	}
527
528	MLX5_SET(modify_nic_vport_context_in, in,
529		 field_select.port_guid, 1);
530	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
531
532	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
533
534	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
535					 in, nic_vport_context);
536	MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
537
538	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
539
540	kvfree(in);
541
542	return err;
543}
544EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
545
546int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
547				 u16 *vlan_list, int list_len)
548{
549	void *in, *ctx;
550	int i, err;
551	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
552		+ MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
553
554	int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
555
556	if (list_len > max_list_size) {
557		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
558			       list_len, max_list_size);
559		return -ENOSPC;
560	}
561
562	in = mlx5_vzalloc(inlen);
563	if (!in) {
564		mlx5_core_warn(dev, "failed to allocate inbox\n");
565		return -ENOMEM;
566	}
567
568	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
569	if (vport)
570		MLX5_SET(modify_nic_vport_context_in, in,
571			 other_vport, 1);
572	MLX5_SET(modify_nic_vport_context_in, in,
573		 field_select.addresses_list, 1);
574
575	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
576
577	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
578		 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
579	MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
580
581	for (i = 0; i < list_len; i++) {
582		u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
583					 current_uc_mac_address[i]);
584		MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
585	}
586
587	err = mlx5_modify_nic_vport_context(dev, in, inlen);
588
589	kvfree(in);
590	return err;
591}
592EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
593
594int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
595			       u64 *addr_list, size_t addr_list_len)
596{
597	void *in, *ctx;
598	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
599		  + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
600	int err;
601	size_t i;
602	int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
603
604	if ((int)addr_list_len > max_list_sz) {
605		mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
606			       (int)addr_list_len, max_list_sz);
607		return -ENOSPC;
608	}
609
610	in = mlx5_vzalloc(inlen);
611	if (!in) {
612		mlx5_core_warn(mdev, "failed to allocate inbox\n");
613		return -ENOMEM;
614	}
615
616	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
617	if (vport)
618		MLX5_SET(modify_nic_vport_context_in, in,
619			 other_vport, 1);
620	MLX5_SET(modify_nic_vport_context_in, in,
621		 field_select.addresses_list, 1);
622
623	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
624
625	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
626		 MLX5_NIC_VPORT_LIST_TYPE_MC);
627	MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
628
629	for (i = 0; i < addr_list_len; i++) {
630		u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
631						  current_uc_mac_address[i]);
632		u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
633						 mac_addr_47_32);
634		ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
635	}
636
637	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
638
639	kvfree(in);
640
641	return err;
642}
643EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
644
645int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
646			       bool promisc_mc, bool promisc_uc,
647			       bool promisc_all)
648{
649	u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
650	u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
651			       nic_vport_context);
652
653	memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
654
655	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
656	if (vport)
657		MLX5_SET(modify_nic_vport_context_in, in,
658			 other_vport, 1);
659	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
660	if (promisc_mc)
661		MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
662	if (promisc_uc)
663		MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
664	if (promisc_all)
665		MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
666
667	return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
668}
669EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
670
671int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
672				  u16 vport,
673				  enum mlx5_list_type list_type,
674				  u8 addr_list[][ETH_ALEN],
675				  int *list_size)
676{
677	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
678	void *nic_vport_ctx;
679	int max_list_size;
680	int req_list_size;
681	int out_sz;
682	void *out;
683	int err;
684	int i;
685
686	req_list_size = *list_size;
687
688	max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
689			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
690			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
691
692	if (req_list_size > max_list_size) {
693		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
694			       req_list_size, max_list_size);
695		req_list_size = max_list_size;
696	}
697
698	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
699		 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
700
701	memset(in, 0, sizeof(in));
702	out = kzalloc(out_sz, GFP_KERNEL);
703	if (!out)
704		return -ENOMEM;
705
706	MLX5_SET(query_nic_vport_context_in, in, opcode,
707		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
708	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
709	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
710
711	if (vport)
712		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
713
714	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
715	if (err)
716		goto out;
717
718	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
719				     nic_vport_context);
720	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
721				 allowed_list_size);
722
723	*list_size = req_list_size;
724	for (i = 0; i < req_list_size; i++) {
725		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
726					nic_vport_ctx,
727					current_uc_mac_address[i]) + 2;
728		ether_addr_copy(addr_list[i], mac_addr);
729	}
730out:
731	kfree(out);
732	return err;
733}
734EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
735
736int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
737				   enum mlx5_list_type list_type,
738				   u8 addr_list[][ETH_ALEN],
739				   int list_size)
740{
741	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
742	void *nic_vport_ctx;
743	int max_list_size;
744	int in_sz;
745	void *in;
746	int err;
747	int i;
748
749	max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
750		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
751		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
752
753	if (list_size > max_list_size)
754		return -ENOSPC;
755
756	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
757		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
758
759	memset(out, 0, sizeof(out));
760	in = kzalloc(in_sz, GFP_KERNEL);
761	if (!in)
762		return -ENOMEM;
763
764	MLX5_SET(modify_nic_vport_context_in, in, opcode,
765		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
766	MLX5_SET(modify_nic_vport_context_in, in,
767		 field_select.addresses_list, 1);
768
769	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
770				     nic_vport_context);
771
772	MLX5_SET(nic_vport_context, nic_vport_ctx,
773		 allowed_list_type, list_type);
774	MLX5_SET(nic_vport_context, nic_vport_ctx,
775		 allowed_list_size, list_size);
776
777	for (i = 0; i < list_size; i++) {
778		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
779					    nic_vport_ctx,
780					    current_uc_mac_address[i]) + 2;
781		ether_addr_copy(curr_mac, addr_list[i]);
782	}
783
784	err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
785	kfree(in);
786	return err;
787}
788EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
789
790int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
791			       u16 vport,
792			       u16 vlans[],
793			       int *size)
794{
795	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
796	void *nic_vport_ctx;
797	int req_list_size;
798	int max_list_size;
799	int out_sz;
800	void *out;
801	int err;
802	int i;
803
804	req_list_size = *size;
805	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
806	if (req_list_size > max_list_size) {
807		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
808			       req_list_size, max_list_size);
809		req_list_size = max_list_size;
810	}
811
812	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
813		 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
814
815	memset(in, 0, sizeof(in));
816	out = kzalloc(out_sz, GFP_KERNEL);
817	if (!out)
818		return -ENOMEM;
819
820	MLX5_SET(query_nic_vport_context_in, in, opcode,
821		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
822	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
823		 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
824	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
825
826	if (vport)
827		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
828
829	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
830	if (err)
831		goto out;
832
833	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
834				     nic_vport_context);
835	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
836				 allowed_list_size);
837
838	*size = req_list_size;
839	for (i = 0; i < req_list_size; i++) {
840		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
841					       nic_vport_ctx,
842					 current_uc_mac_address[i]);
843		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
844	}
845out:
846	kfree(out);
847	return err;
848}
849EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
850
851int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
852				u16 vlans[],
853				int list_size)
854{
855	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
856	void *nic_vport_ctx;
857	int max_list_size;
858	int in_sz;
859	void *in;
860	int err;
861	int i;
862
863	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
864
865	if (list_size > max_list_size)
866		return -ENOSPC;
867
868	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
869		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
870
871	memset(out, 0, sizeof(out));
872	in = kzalloc(in_sz, GFP_KERNEL);
873	if (!in)
874		return -ENOMEM;
875
876	MLX5_SET(modify_nic_vport_context_in, in, opcode,
877		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
878	MLX5_SET(modify_nic_vport_context_in, in,
879		 field_select.addresses_list, 1);
880
881	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
882				     nic_vport_context);
883
884	MLX5_SET(nic_vport_context, nic_vport_ctx,
885		 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
886	MLX5_SET(nic_vport_context, nic_vport_ctx,
887		 allowed_list_size, list_size);
888
889	for (i = 0; i < list_size; i++) {
890		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
891					       nic_vport_ctx,
892					       current_uc_mac_address[i]);
893		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
894	}
895
896	err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
897	kfree(in);
898	return err;
899}
900EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
901
902int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
903{
904	u32 *out;
905	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
906	int err;
907
908	out = kzalloc(outlen, GFP_KERNEL);
909	if (!out)
910		return -ENOMEM;
911
912	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
913	if (err)
914		goto out;
915
916	*enable = MLX5_GET(query_nic_vport_context_out, out,
917				nic_vport_context.roce_en);
918
919out:
920	kfree(out);
921	return err;
922}
923EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
924
925int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
926				     u8 *addr)
927{
928	void *in;
929	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
930	u8  *mac_ptr;
931	int err;
932
933	in = mlx5_vzalloc(inlen);
934	if (!in) {
935		mlx5_core_warn(mdev, "failed to allocate inbox\n");
936		return -ENOMEM;
937	}
938
939	MLX5_SET(modify_nic_vport_context_in, in,
940		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
941	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
942	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
943	MLX5_SET(modify_nic_vport_context_in, in,
944		 field_select.permanent_address, 1);
945	mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
946		nic_vport_context.permanent_address.mac_addr_47_32);
947	ether_addr_copy(mac_ptr, addr);
948
949	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
950
951	kvfree(in);
952
953	return err;
954}
955EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
956
957int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
958{
959	return mlx5_nic_vport_enable_disable_roce(mdev, 1);
960}
961EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
962
963int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
964{
965	return mlx5_nic_vport_enable_disable_roce(mdev, 0);
966}
967EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
968
969int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
970				  int vf, u8 port_num, void *out,
971				  size_t out_sz)
972{
973	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
974	int	is_group_manager;
975	void   *in;
976	int	err;
977
978	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
979	in = mlx5_vzalloc(in_sz);
980	if (!in) {
981		err = -ENOMEM;
982		return err;
983	}
984
985	MLX5_SET(query_vport_counter_in, in, opcode,
986		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
987	if (other_vport) {
988		if (is_group_manager) {
989			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
990			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
991		} else {
992			err = -EPERM;
993			goto free;
994		}
995	}
996	if (MLX5_CAP_GEN(dev, num_ports) == 2)
997		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
998
999	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
1000free:
1001	kvfree(in);
1002	return err;
1003}
1004EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1005
1006int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
1007				 u8 port_num, u8 vport_num, u32 *out,
1008				 int outlen)
1009{
1010	u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
1011	int is_group_manager;
1012
1013	is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
1014
1015	memset(in, 0, sizeof(in));
1016
1017	MLX5_SET(query_hca_vport_context_in, in, opcode,
1018		 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
1019
1020	if (vport_num) {
1021		if (is_group_manager) {
1022			MLX5_SET(query_hca_vport_context_in, in, other_vport,
1023				 1);
1024			MLX5_SET(query_hca_vport_context_in, in, vport_number,
1025				 vport_num);
1026		} else {
1027			return -EPERM;
1028		}
1029	}
1030
1031	if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1032		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1033
1034	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
1035}
1036
1037int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1038					   u64 *system_image_guid)
1039{
1040	u32 *out;
1041	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1042	int err;
1043
1044	out = mlx5_vzalloc(outlen);
1045	if (!out)
1046		return -ENOMEM;
1047
1048	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1049	if (err)
1050		goto out;
1051
1052	*system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1053					hca_vport_context.system_image_guid);
1054
1055out:
1056	kvfree(out);
1057	return err;
1058}
1059EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1060
1061int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1062{
1063	u32 *out;
1064	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1065	int err;
1066
1067	out = mlx5_vzalloc(outlen);
1068	if (!out)
1069		return -ENOMEM;
1070
1071	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1072	if (err)
1073		goto out;
1074
1075	*node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1076				hca_vport_context.node_guid);
1077
1078out:
1079	kvfree(out);
1080	return err;
1081}
1082EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1083
1084static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1085					  u64 *port_guid)
1086{
1087	u32 *out;
1088	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1089	int err;
1090
1091	out = mlx5_vzalloc(outlen);
1092	if (!out)
1093		return -ENOMEM;
1094
1095	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1096	if (err)
1097		goto out;
1098
1099	*port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1100				hca_vport_context.port_guid);
1101
1102out:
1103	kvfree(out);
1104	return err;
1105}
1106
1107int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1108			     u16 vport_num, u16 gid_index, union ib_gid *gid)
1109{
1110	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1111	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1112	int is_group_manager;
1113	void *out = NULL;
1114	void *in = NULL;
1115	union ib_gid *tmp;
1116	int tbsz;
1117	int nout;
1118	int err;
1119
1120	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1121	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1122
1123	if (gid_index > tbsz && gid_index != 0xffff)
1124		return -EINVAL;
1125
1126	if (gid_index == 0xffff)
1127		nout = tbsz;
1128	else
1129		nout = 1;
1130
1131	out_sz += nout * sizeof(*gid);
1132
1133	in = mlx5_vzalloc(in_sz);
1134	out = mlx5_vzalloc(out_sz);
1135	if (!in || !out) {
1136		err = -ENOMEM;
1137		goto out;
1138	}
1139
1140	MLX5_SET(query_hca_vport_gid_in, in, opcode,
1141		 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1142	if (vport_num) {
1143		if (is_group_manager) {
1144			MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1145				 vport_num);
1146			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1147		} else {
1148			err = -EPERM;
1149			goto out;
1150		}
1151	}
1152
1153	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1154
1155	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1156		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1157
1158	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1159	if (err)
1160		goto out;
1161
1162	err = mlx5_cmd_status_to_err_v2(out);
1163	if (err)
1164		goto out;
1165
1166	tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1167	gid->global.subnet_prefix = tmp->global.subnet_prefix;
1168	gid->global.interface_id = tmp->global.interface_id;
1169
1170out:
1171	kvfree(in);
1172	kvfree(out);
1173	return err;
1174}
1175EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1176
1177int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1178			      u8 port_num, u16 vf_num, u16 pkey_index,
1179			      u16 *pkey)
1180{
1181	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1182	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1183	int is_group_manager;
1184	void *out = NULL;
1185	void *in = NULL;
1186	void *pkarr;
1187	int nout;
1188	int tbsz;
1189	int err;
1190	int i;
1191
1192	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1193
1194	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1195	if (pkey_index > tbsz && pkey_index != 0xffff)
1196		return -EINVAL;
1197
1198	if (pkey_index == 0xffff)
1199		nout = tbsz;
1200	else
1201		nout = 1;
1202
1203	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1204
1205	in = kzalloc(in_sz, GFP_KERNEL);
1206	out = kzalloc(out_sz, GFP_KERNEL);
1207
1208	MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1209		 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1210	if (other_vport) {
1211		if (is_group_manager) {
1212			MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1213				 vf_num);
1214			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1215		} else {
1216			err = -EPERM;
1217			goto out;
1218		}
1219	}
1220	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1221
1222	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1223		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1224
1225	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1226	if (err)
1227		goto out;
1228
1229	err = mlx5_cmd_status_to_err_v2(out);
1230	if (err)
1231		goto out;
1232
1233	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1234	for (i = 0; i < nout; i++, pkey++,
1235	     pkarr += MLX5_ST_SZ_BYTES(pkey))
1236		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1237
1238out:
1239	kfree(in);
1240	kfree(out);
1241	return err;
1242}
1243EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1244
1245static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1246					 int *min_header)
1247{
1248	u32 *out;
1249	u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1250	int err;
1251
1252	out = mlx5_vzalloc(outlen);
1253	if (!out)
1254		return -ENOMEM;
1255
1256	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1257	if (err)
1258		goto out;
1259
1260	*min_header = MLX5_GET(query_hca_vport_context_out, out,
1261			       hca_vport_context.min_wqe_inline_mode);
1262
1263out:
1264	kvfree(out);
1265	return err;
1266}
1267
1268static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1269					     u16 vport, void *in, int inlen)
1270{
1271	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
1272	int err;
1273
1274	memset(out, 0, sizeof(out));
1275
1276	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1277	if (vport)
1278		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1279
1280	MLX5_SET(modify_esw_vport_context_in, in, opcode,
1281		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1282
1283	err = mlx5_cmd_exec_check_status(mdev, in, inlen,
1284					 out, sizeof(out));
1285	if (err)
1286		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1287
1288	return err;
1289}
1290
1291int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1292				u8 insert_mode, u8 strip_mode,
1293				u16 vlan, u8 cfi, u8 pcp)
1294{
1295	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1296
1297	memset(in, 0, sizeof(in));
1298
1299	if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1300		MLX5_SET(modify_esw_vport_context_in, in,
1301			 esw_vport_context.cvlan_cfi, cfi);
1302		MLX5_SET(modify_esw_vport_context_in, in,
1303			 esw_vport_context.cvlan_pcp, pcp);
1304		MLX5_SET(modify_esw_vport_context_in, in,
1305			 esw_vport_context.cvlan_id, vlan);
1306	}
1307
1308	MLX5_SET(modify_esw_vport_context_in, in,
1309		 esw_vport_context.vport_cvlan_insert, insert_mode);
1310
1311	MLX5_SET(modify_esw_vport_context_in, in,
1312		 esw_vport_context.vport_cvlan_strip, strip_mode);
1313
1314	MLX5_SET(modify_esw_vport_context_in, in, field_select,
1315		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1316		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1317
1318	return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1319}
1320EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1321
1322int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1323{
1324	u32 *out;
1325	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1326	int err;
1327
1328	out = mlx5_vzalloc(outlen);
1329	if (!out)
1330		return -ENOMEM;
1331
1332	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1333	if (err)
1334		goto out;
1335
1336	*mtu = MLX5_GET(query_nic_vport_context_out, out,
1337			nic_vport_context.mtu);
1338
1339out:
1340	kvfree(out);
1341	return err;
1342}
1343EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1344
1345int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1346{
1347	u32 *in;
1348	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1349	int err;
1350
1351	in = mlx5_vzalloc(inlen);
1352	if (!in)
1353		return -ENOMEM;
1354
1355	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1356	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1357
1358	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1359
1360	kvfree(in);
1361	return err;
1362}
1363EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1364
1365static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1366					   int *min_header)
1367{
1368	u32 *out;
1369	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1370	int err;
1371
1372	out = mlx5_vzalloc(outlen);
1373	if (!out)
1374		return -ENOMEM;
1375
1376	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1377	if (err)
1378		goto out;
1379
1380	*min_header = MLX5_GET(query_nic_vport_context_out, out,
1381			       nic_vport_context.min_wqe_inline_mode);
1382
1383out:
1384	kvfree(out);
1385	return err;
1386}
1387
1388int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1389				  u8 vport, int min_header)
1390{
1391	u32 *in;
1392	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1393	int err;
1394
1395	in = mlx5_vzalloc(inlen);
1396	if (!in)
1397		return -ENOMEM;
1398
1399	MLX5_SET(modify_nic_vport_context_in, in,
1400		 field_select.min_wqe_inline_mode, 1);
1401	MLX5_SET(modify_nic_vport_context_in, in,
1402		 nic_vport_context.min_wqe_inline_mode, min_header);
1403	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1404	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1405
1406	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1407
1408	kvfree(in);
1409	return err;
1410}
1411EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1412
1413int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1414{
1415	switch (MLX5_CAP_GEN(dev, port_type)) {
1416	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1417		return mlx5_query_hca_min_wqe_header(dev, min_header);
1418
1419	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1420		return mlx5_query_vport_min_wqe_header(dev, min_header);
1421
1422	default:
1423		return -EINVAL;
1424	}
1425}
1426EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1427
1428int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1429				 u16 vport,
1430				 int *promisc_uc,
1431				 int *promisc_mc,
1432				 int *promisc_all)
1433{
1434	u32 *out;
1435	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1436	int err;
1437
1438	out = kzalloc(outlen, GFP_KERNEL);
1439	if (!out)
1440		return -ENOMEM;
1441
1442	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1443	if (err)
1444		goto out;
1445
1446	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1447			       nic_vport_context.promisc_uc);
1448	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1449			       nic_vport_context.promisc_mc);
1450	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1451				nic_vport_context.promisc_all);
1452
1453out:
1454	kfree(out);
1455	return err;
1456}
1457EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1458
1459int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1460				  int promisc_uc,
1461				  int promisc_mc,
1462				  int promisc_all)
1463{
1464	void *in;
1465	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1466	int err;
1467
1468	in = mlx5_vzalloc(inlen);
1469	if (!in) {
1470		mlx5_core_err(mdev, "failed to allocate inbox\n");
1471		return -ENOMEM;
1472	}
1473
1474	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1475	MLX5_SET(modify_nic_vport_context_in, in,
1476		 nic_vport_context.promisc_uc, promisc_uc);
1477	MLX5_SET(modify_nic_vport_context_in, in,
1478		 nic_vport_context.promisc_mc, promisc_mc);
1479	MLX5_SET(modify_nic_vport_context_in, in,
1480		 nic_vport_context.promisc_all, promisc_all);
1481
1482	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1483	kvfree(in);
1484	return err;
1485}
1486EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1487
1488int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1489			     u8 port_num, u16 vport_num,
1490			     void *out, int out_size)
1491{
1492	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1493	int is_group_manager;
1494	void *in;
1495	int err;
1496
1497	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1498
1499	in = mlx5_vzalloc(in_sz);
1500	if (!in)
1501		return -ENOMEM;
1502
1503	MLX5_SET(query_vport_counter_in, in, opcode,
1504		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1505	if (vport_num) {
1506		if (is_group_manager) {
1507			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1508			MLX5_SET(query_vport_counter_in, in, vport_number,
1509				 vport_num);
1510		} else {
1511			err = -EPERM;
1512			goto ex;
1513		}
1514	}
1515	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1516		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1517
1518	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
1519	if (err)
1520		goto ex;
1521	err = mlx5_cmd_status_to_err_v2(out);
1522	if (err)
1523		goto ex;
1524
1525ex:
1526	kvfree(in);
1527	return err;
1528}
1529EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1530
1531int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1532			    struct mlx5_vport_counters *vc)
1533{
1534	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1535	void *out;
1536	int err;
1537
1538	out = mlx5_vzalloc(out_sz);
1539	if (!out)
1540		return -ENOMEM;
1541
1542	err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1543	if (err)
1544		goto ex;
1545
1546	vc->received_errors.packets =
1547		MLX5_GET64(query_vport_counter_out,
1548			   out, received_errors.packets);
1549	vc->received_errors.octets =
1550		MLX5_GET64(query_vport_counter_out,
1551			   out, received_errors.octets);
1552	vc->transmit_errors.packets =
1553		MLX5_GET64(query_vport_counter_out,
1554			   out, transmit_errors.packets);
1555	vc->transmit_errors.octets =
1556		MLX5_GET64(query_vport_counter_out,
1557			   out, transmit_errors.octets);
1558	vc->received_ib_unicast.packets =
1559		MLX5_GET64(query_vport_counter_out,
1560			   out, received_ib_unicast.packets);
1561	vc->received_ib_unicast.octets =
1562		MLX5_GET64(query_vport_counter_out,
1563			   out, received_ib_unicast.octets);
1564	vc->transmitted_ib_unicast.packets =
1565		MLX5_GET64(query_vport_counter_out,
1566			   out, transmitted_ib_unicast.packets);
1567	vc->transmitted_ib_unicast.octets =
1568		MLX5_GET64(query_vport_counter_out,
1569			   out, transmitted_ib_unicast.octets);
1570	vc->received_ib_multicast.packets =
1571		MLX5_GET64(query_vport_counter_out,
1572			   out, received_ib_multicast.packets);
1573	vc->received_ib_multicast.octets =
1574		MLX5_GET64(query_vport_counter_out,
1575			   out, received_ib_multicast.octets);
1576	vc->transmitted_ib_multicast.packets =
1577		MLX5_GET64(query_vport_counter_out,
1578			   out, transmitted_ib_multicast.packets);
1579	vc->transmitted_ib_multicast.octets =
1580		MLX5_GET64(query_vport_counter_out,
1581			   out, transmitted_ib_multicast.octets);
1582	vc->received_eth_broadcast.packets =
1583		MLX5_GET64(query_vport_counter_out,
1584			   out, received_eth_broadcast.packets);
1585	vc->received_eth_broadcast.octets =
1586		MLX5_GET64(query_vport_counter_out,
1587			   out, received_eth_broadcast.octets);
1588	vc->transmitted_eth_broadcast.packets =
1589		MLX5_GET64(query_vport_counter_out,
1590			   out, transmitted_eth_broadcast.packets);
1591	vc->transmitted_eth_broadcast.octets =
1592		MLX5_GET64(query_vport_counter_out,
1593			   out, transmitted_eth_broadcast.octets);
1594	vc->received_eth_unicast.octets =
1595		MLX5_GET64(query_vport_counter_out,
1596			   out, received_eth_unicast.octets);
1597	vc->received_eth_unicast.packets =
1598		MLX5_GET64(query_vport_counter_out,
1599			   out, received_eth_unicast.packets);
1600	vc->transmitted_eth_unicast.octets =
1601		MLX5_GET64(query_vport_counter_out,
1602			   out, transmitted_eth_unicast.octets);
1603	vc->transmitted_eth_unicast.packets =
1604		MLX5_GET64(query_vport_counter_out,
1605			   out, transmitted_eth_unicast.packets);
1606	vc->received_eth_multicast.octets =
1607		MLX5_GET64(query_vport_counter_out,
1608			   out, received_eth_multicast.octets);
1609	vc->received_eth_multicast.packets =
1610		MLX5_GET64(query_vport_counter_out,
1611			   out, received_eth_multicast.packets);
1612	vc->transmitted_eth_multicast.octets =
1613		MLX5_GET64(query_vport_counter_out,
1614			   out, transmitted_eth_multicast.octets);
1615	vc->transmitted_eth_multicast.packets =
1616		MLX5_GET64(query_vport_counter_out,
1617			   out, transmitted_eth_multicast.packets);
1618
1619ex:
1620	kvfree(out);
1621	return err;
1622}
1623
1624int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1625				       u64 *sys_image_guid)
1626{
1627	switch (MLX5_CAP_GEN(dev, port_type)) {
1628	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1629		return mlx5_query_hca_vport_system_image_guid(dev,
1630							      sys_image_guid);
1631
1632	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1633		return mlx5_query_nic_vport_system_image_guid(dev,
1634							      sys_image_guid);
1635
1636	default:
1637		return -EINVAL;
1638	}
1639}
1640EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1641
1642int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1643{
1644	switch (MLX5_CAP_GEN(dev, port_type)) {
1645	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1646		return mlx5_query_hca_vport_node_guid(dev, node_guid);
1647
1648	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1649		return mlx5_query_nic_vport_node_guid(dev, node_guid);
1650
1651	default:
1652		return -EINVAL;
1653	}
1654}
1655EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1656
1657int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1658{
1659	switch (MLX5_CAP_GEN(dev, port_type)) {
1660	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1661		return mlx5_query_hca_vport_port_guid(dev, port_guid);
1662
1663	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1664		return mlx5_query_nic_vport_port_guid(dev, port_guid);
1665
1666	default:
1667		return -EINVAL;
1668	}
1669}
1670EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1671
1672int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1673{
1674	u32 *out;
1675	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1676	int err;
1677
1678	out = mlx5_vzalloc(outlen);
1679	if (!out)
1680		return -ENOMEM;
1681
1682	err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1683	if (err)
1684		goto out;
1685
1686	*vport_state = MLX5_GET(query_hca_vport_context_out, out,
1687				hca_vport_context.vport_state);
1688
1689out:
1690	kvfree(out);
1691	return err;
1692}
1693EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1694