1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/export.h>
34#include <linux/etherdevice.h>
35#include <linux/mlx5/driver.h>
36#include <linux/mlx5/vport.h>
37#include <linux/mlx5/eswitch.h>
38#include "mlx5_core.h"
39#include "sf/sf.h"
40
41/* Mutex to hold while enabling or disabling RoCE */
42static DEFINE_MUTEX(mlx5_roce_en_lock);
43
44u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
45{
46	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
47	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
48	int err;
49
50	MLX5_SET(query_vport_state_in, in, opcode,
51		 MLX5_CMD_OP_QUERY_VPORT_STATE);
52	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
53	MLX5_SET(query_vport_state_in, in, vport_number, vport);
54	if (vport)
55		MLX5_SET(query_vport_state_in, in, other_vport, 1);
56
57	err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
58	if (err)
59		return 0;
60
61	return MLX5_GET(query_vport_state_out, out, state);
62}
63
64int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
65				  u16 vport, u8 other_vport, u8 state)
66{
67	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
68
69	MLX5_SET(modify_vport_state_in, in, opcode,
70		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
71	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
72	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
73	MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
74	MLX5_SET(modify_vport_state_in, in, admin_state, state);
75
76	return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
77}
78
79static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
80					u32 *out)
81{
82	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
83
84	MLX5_SET(query_nic_vport_context_in, in, opcode,
85		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
86	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
87	if (vport)
88		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
89
90	return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
91}
92
93int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
94				    u16 vport, u8 *min_inline)
95{
96	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
97	int err;
98
99	err = mlx5_query_nic_vport_context(mdev, vport, out);
100	if (!err)
101		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
102				       nic_vport_context.min_wqe_inline_mode);
103	return err;
104}
105EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
106
107void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
108			   u8 *min_inline_mode)
109{
110	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
111	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
112		if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
113			break;
114		fallthrough;
115	case MLX5_CAP_INLINE_MODE_L2:
116		*min_inline_mode = MLX5_INLINE_MODE_L2;
117		break;
118	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
119		*min_inline_mode = MLX5_INLINE_MODE_NONE;
120		break;
121	}
122}
123EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
124
125int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
126				     u16 vport, u8 min_inline)
127{
128	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
129	void *nic_vport_ctx;
130
131	MLX5_SET(modify_nic_vport_context_in, in,
132		 field_select.min_inline, 1);
133	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
134	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
135
136	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
137				     in, nic_vport_context);
138	MLX5_SET(nic_vport_context, nic_vport_ctx,
139		 min_wqe_inline_mode, min_inline);
140	MLX5_SET(modify_nic_vport_context_in, in, opcode,
141		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
142
143	return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
144}
145
146int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
147				     u16 vport, bool other, u8 *addr)
148{
149	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
150	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
151	u8 *out_addr;
152	int err;
153
154	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
155				nic_vport_context.permanent_address);
156
157	MLX5_SET(query_nic_vport_context_in, in, opcode,
158		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
159	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
160	MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
161
162	err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
163	if (!err)
164		ether_addr_copy(addr, &out_addr[2]);
165
166	return err;
167}
168EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
169
170int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
171{
172	return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
173}
174EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
175
176int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
177				      u16 vport, const u8 *addr)
178{
179	void *in;
180	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
181	int err;
182	void *nic_vport_ctx;
183	u8 *perm_mac;
184
185	in = kvzalloc(inlen, GFP_KERNEL);
186	if (!in)
187		return -ENOMEM;
188
189	MLX5_SET(modify_nic_vport_context_in, in,
190		 field_select.permanent_address, 1);
191	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
192	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
193
194	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
195				     in, nic_vport_context);
196	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
197				permanent_address);
198
199	ether_addr_copy(&perm_mac[2], addr);
200	MLX5_SET(modify_nic_vport_context_in, in, opcode,
201		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
202
203	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
204
205	kvfree(in);
206
207	return err;
208}
209EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
210
211int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
212{
213	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
214	u32 *out;
215	int err;
216
217	out = kvzalloc(outlen, GFP_KERNEL);
218	if (!out)
219		return -ENOMEM;
220
221	err = mlx5_query_nic_vport_context(mdev, 0, out);
222	if (!err)
223		*mtu = MLX5_GET(query_nic_vport_context_out, out,
224				nic_vport_context.mtu);
225
226	kvfree(out);
227	return err;
228}
229EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
230
231int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
232{
233	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
234	void *in;
235	int err;
236
237	in = kvzalloc(inlen, GFP_KERNEL);
238	if (!in)
239		return -ENOMEM;
240
241	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
242	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
243	MLX5_SET(modify_nic_vport_context_in, in, opcode,
244		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
245
246	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
247
248	kvfree(in);
249	return err;
250}
251EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
252
253int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
254				  u16 vport,
255				  enum mlx5_list_type list_type,
256				  u8 addr_list[][ETH_ALEN],
257				  int *list_size)
258{
259	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
260	void *nic_vport_ctx;
261	int max_list_size;
262	int req_list_size;
263	int out_sz;
264	void *out;
265	int err;
266	int i;
267
268	req_list_size = *list_size;
269
270	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
271		1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
272		1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
273
274	if (req_list_size > max_list_size) {
275		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
276			       req_list_size, max_list_size);
277		req_list_size = max_list_size;
278	}
279
280	out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
281			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
282
283	out = kvzalloc(out_sz, GFP_KERNEL);
284	if (!out)
285		return -ENOMEM;
286
287	MLX5_SET(query_nic_vport_context_in, in, opcode,
288		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
289	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
290	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
291	if (vport || mlx5_core_is_ecpf(dev))
292		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
293
294	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
295	if (err)
296		goto out;
297
298	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
299				     nic_vport_context);
300	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
301				 allowed_list_size);
302
303	*list_size = req_list_size;
304	for (i = 0; i < req_list_size; i++) {
305		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
306					nic_vport_ctx,
307					current_uc_mac_address[i]) + 2;
308		ether_addr_copy(addr_list[i], mac_addr);
309	}
310out:
311	kvfree(out);
312	return err;
313}
314EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
315
316int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
317				   enum mlx5_list_type list_type,
318				   u8 addr_list[][ETH_ALEN],
319				   int list_size)
320{
321	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {};
322	void *nic_vport_ctx;
323	int max_list_size;
324	int in_sz;
325	void *in;
326	int err;
327	int i;
328
329	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
330		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
331		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
332
333	if (list_size > max_list_size)
334		return -ENOSPC;
335
336	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
337		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
338
339	in = kvzalloc(in_sz, GFP_KERNEL);
340	if (!in)
341		return -ENOMEM;
342
343	MLX5_SET(modify_nic_vport_context_in, in, opcode,
344		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
345	MLX5_SET(modify_nic_vport_context_in, in,
346		 field_select.addresses_list, 1);
347
348	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
349				     nic_vport_context);
350
351	MLX5_SET(nic_vport_context, nic_vport_ctx,
352		 allowed_list_type, list_type);
353	MLX5_SET(nic_vport_context, nic_vport_ctx,
354		 allowed_list_size, list_size);
355
356	for (i = 0; i < list_size; i++) {
357		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
358					    nic_vport_ctx,
359					    current_uc_mac_address[i]) + 2;
360		ether_addr_copy(curr_mac, addr_list[i]);
361	}
362
363	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
364	kvfree(in);
365	return err;
366}
367EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
368
369int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
370				u16 vlans[],
371				int list_size)
372{
373	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
374	void *nic_vport_ctx;
375	int max_list_size;
376	int in_sz;
377	void *in;
378	int err;
379	int i;
380
381	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
382
383	if (list_size > max_list_size)
384		return -ENOSPC;
385
386	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
387		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
388
389	memset(out, 0, sizeof(out));
390	in = kvzalloc(in_sz, GFP_KERNEL);
391	if (!in)
392		return -ENOMEM;
393
394	MLX5_SET(modify_nic_vport_context_in, in, opcode,
395		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
396	MLX5_SET(modify_nic_vport_context_in, in,
397		 field_select.addresses_list, 1);
398
399	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
400				     nic_vport_context);
401
402	MLX5_SET(nic_vport_context, nic_vport_ctx,
403		 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
404	MLX5_SET(nic_vport_context, nic_vport_ctx,
405		 allowed_list_size, list_size);
406
407	for (i = 0; i < list_size; i++) {
408		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
409					       nic_vport_ctx,
410					       current_uc_mac_address[i]);
411		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
412	}
413
414	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
415	kvfree(in);
416	return err;
417}
418EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
419
420int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
421					   u64 *system_image_guid)
422{
423	u32 *out;
424	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
425	int err;
426
427	out = kvzalloc(outlen, GFP_KERNEL);
428	if (!out)
429		return -ENOMEM;
430
431	err = mlx5_query_nic_vport_context(mdev, 0, out);
432	if (err)
433		goto out;
434
435	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
436					nic_vport_context.system_image_guid);
437out:
438	kvfree(out);
439	return err;
440}
441EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
442
443int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
444{
445	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
446	u32 *out;
447	int err;
448
449	out = kvzalloc(outlen, GFP_KERNEL);
450	if (!out)
451		return -ENOMEM;
452
453	err = mlx5_query_nic_vport_context(mdev, 0, out);
454	if (err)
455		goto out;
456
457	*sd_group = MLX5_GET(query_nic_vport_context_out, out,
458			     nic_vport_context.sd_group);
459out:
460	kvfree(out);
461	return err;
462}
463
464int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
465{
466	u32 *out;
467	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
468
469	out = kvzalloc(outlen, GFP_KERNEL);
470	if (!out)
471		return -ENOMEM;
472
473	mlx5_query_nic_vport_context(mdev, 0, out);
474
475	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
476				nic_vport_context.node_guid);
477
478	kvfree(out);
479
480	return 0;
481}
482EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
483
484int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
485				    u16 vport, u64 node_guid)
486{
487	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
488	void *nic_vport_context;
489	void *in;
490	int err;
491
492	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
493		return -EACCES;
494
495	in = kvzalloc(inlen, GFP_KERNEL);
496	if (!in)
497		return -ENOMEM;
498
499	MLX5_SET(modify_nic_vport_context_in, in,
500		 field_select.node_guid, 1);
501	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
502	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
503
504	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
505					 in, nic_vport_context);
506	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
507	MLX5_SET(modify_nic_vport_context_in, in, opcode,
508		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
509
510	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
511
512	kvfree(in);
513
514	return err;
515}
516
517int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
518					u16 *qkey_viol_cntr)
519{
520	u32 *out;
521	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
522
523	out = kvzalloc(outlen, GFP_KERNEL);
524	if (!out)
525		return -ENOMEM;
526
527	mlx5_query_nic_vport_context(mdev, 0, out);
528
529	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
530				   nic_vport_context.qkey_violation_counter);
531
532	kvfree(out);
533
534	return 0;
535}
536EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
537
538int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
539			     u8 port_num, u16  vf_num, u16 gid_index,
540			     union ib_gid *gid)
541{
542	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
543	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
544	int is_group_manager;
545	void *out = NULL;
546	void *in = NULL;
547	union ib_gid *tmp;
548	int tbsz;
549	int nout;
550	int err;
551
552	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
553	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
554	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
555		      vf_num, gid_index, tbsz);
556
557	if (gid_index > tbsz && gid_index != 0xffff)
558		return -EINVAL;
559
560	if (gid_index == 0xffff)
561		nout = tbsz;
562	else
563		nout = 1;
564
565	out_sz += nout * sizeof(*gid);
566
567	in = kvzalloc(in_sz, GFP_KERNEL);
568	out = kvzalloc(out_sz, GFP_KERNEL);
569	if (!in || !out) {
570		err = -ENOMEM;
571		goto out;
572	}
573
574	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
575	if (other_vport) {
576		if (is_group_manager) {
577			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
578			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
579		} else {
580			err = -EPERM;
581			goto out;
582		}
583	}
584	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
585
586	if (MLX5_CAP_GEN(dev, num_ports) == 2)
587		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
588
589	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
590	if (err)
591		goto out;
592
593	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
594	gid->global.subnet_prefix = tmp->global.subnet_prefix;
595	gid->global.interface_id = tmp->global.interface_id;
596
597out:
598	kvfree(in);
599	kvfree(out);
600	return err;
601}
602EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
603
604int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
605			      u8 port_num, u16 vf_num, u16 pkey_index,
606			      u16 *pkey)
607{
608	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
609	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
610	int is_group_manager;
611	void *out = NULL;
612	void *in = NULL;
613	void *pkarr;
614	int nout;
615	int tbsz;
616	int err;
617	int i;
618
619	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
620
621	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
622	if (pkey_index > tbsz && pkey_index != 0xffff)
623		return -EINVAL;
624
625	if (pkey_index == 0xffff)
626		nout = tbsz;
627	else
628		nout = 1;
629
630	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
631
632	in = kvzalloc(in_sz, GFP_KERNEL);
633	out = kvzalloc(out_sz, GFP_KERNEL);
634	if (!in || !out) {
635		err = -ENOMEM;
636		goto out;
637	}
638
639	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
640	if (other_vport) {
641		if (is_group_manager) {
642			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
643			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
644		} else {
645			err = -EPERM;
646			goto out;
647		}
648	}
649	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
650
651	if (MLX5_CAP_GEN(dev, num_ports) == 2)
652		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
653
654	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
655	if (err)
656		goto out;
657
658	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
659	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
660		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
661
662out:
663	kvfree(in);
664	kvfree(out);
665	return err;
666}
667EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
668
669int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
670				 u8 other_vport, u8 port_num,
671				 u16 vf_num,
672				 struct mlx5_hca_vport_context *rep)
673{
674	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
675	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {};
676	int is_group_manager;
677	void *out;
678	void *ctx;
679	int err;
680
681	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
682
683	out = kvzalloc(out_sz, GFP_KERNEL);
684	if (!out)
685		return -ENOMEM;
686
687	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
688
689	if (other_vport) {
690		if (is_group_manager) {
691			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
692			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
693		} else {
694			err = -EPERM;
695			goto ex;
696		}
697	}
698
699	if (MLX5_CAP_GEN(dev, num_ports) == 2)
700		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
701
702	err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out);
703	if (err)
704		goto ex;
705
706	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
707	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
708	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
709	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
710	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
711	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
712	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
713				      port_physical_state);
714	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
715	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
716					       port_physical_state);
717	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
718	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
719	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
720	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
721					  cap_mask1_field_select);
722	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
723	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
724					  cap_mask2_field_select);
725	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
726	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
727					   init_type_reply);
728	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
729	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
730					  subnet_timeout);
731	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
732	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
733	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
734						  qkey_violation_counter);
735	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
736						  pkey_violation_counter);
737	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
738	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
739					    system_image_guid);
740
741ex:
742	kvfree(out);
743	return err;
744}
745EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
746
747int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
748					   u64 *sys_image_guid)
749{
750	struct mlx5_hca_vport_context *rep;
751	int err;
752
753	rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
754	if (!rep)
755		return -ENOMEM;
756
757	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
758	if (!err)
759		*sys_image_guid = rep->sys_image_guid;
760
761	kvfree(rep);
762	return err;
763}
764EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
765
766int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
767				   u64 *node_guid)
768{
769	struct mlx5_hca_vport_context *rep;
770	int err;
771
772	rep = kvzalloc(sizeof(*rep), GFP_KERNEL);
773	if (!rep)
774		return -ENOMEM;
775
776	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
777	if (!err)
778		*node_guid = rep->node_guid;
779
780	kvfree(rep);
781	return err;
782}
783EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
784
785int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
786				 u16 vport,
787				 int *promisc_uc,
788				 int *promisc_mc,
789				 int *promisc_all)
790{
791	u32 *out;
792	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
793	int err;
794
795	out = kvzalloc(outlen, GFP_KERNEL);
796	if (!out)
797		return -ENOMEM;
798
799	err = mlx5_query_nic_vport_context(mdev, vport, out);
800	if (err)
801		goto out;
802
803	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
804			       nic_vport_context.promisc_uc);
805	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
806			       nic_vport_context.promisc_mc);
807	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
808				nic_vport_context.promisc_all);
809
810out:
811	kvfree(out);
812	return err;
813}
814EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
815
816int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
817				  int promisc_uc,
818				  int promisc_mc,
819				  int promisc_all)
820{
821	void *in;
822	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
823	int err;
824
825	in = kvzalloc(inlen, GFP_KERNEL);
826	if (!in)
827		return -ENOMEM;
828
829	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
830	MLX5_SET(modify_nic_vport_context_in, in,
831		 nic_vport_context.promisc_uc, promisc_uc);
832	MLX5_SET(modify_nic_vport_context_in, in,
833		 nic_vport_context.promisc_mc, promisc_mc);
834	MLX5_SET(modify_nic_vport_context_in, in,
835		 nic_vport_context.promisc_all, promisc_all);
836	MLX5_SET(modify_nic_vport_context_in, in, opcode,
837		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
838
839	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
840
841	kvfree(in);
842
843	return err;
844}
845EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
846
847enum {
848	UC_LOCAL_LB,
849	MC_LOCAL_LB
850};
851
852int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
853{
854	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
855	void *in;
856	int err;
857
858	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
859	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
860		return 0;
861
862	in = kvzalloc(inlen, GFP_KERNEL);
863	if (!in)
864		return -ENOMEM;
865
866	MLX5_SET(modify_nic_vport_context_in, in,
867		 nic_vport_context.disable_mc_local_lb, !enable);
868	MLX5_SET(modify_nic_vport_context_in, in,
869		 nic_vport_context.disable_uc_local_lb, !enable);
870
871	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
872		MLX5_SET(modify_nic_vport_context_in, in,
873			 field_select.disable_mc_local_lb, 1);
874
875	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
876		MLX5_SET(modify_nic_vport_context_in, in,
877			 field_select.disable_uc_local_lb, 1);
878	MLX5_SET(modify_nic_vport_context_in, in, opcode,
879		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
880
881	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
882
883	if (!err)
884		mlx5_core_dbg(mdev, "%s local_lb\n",
885			      enable ? "enable" : "disable");
886
887	kvfree(in);
888	return err;
889}
890EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
891
892int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
893{
894	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
895	u32 *out;
896	int value;
897	int err;
898
899	out = kvzalloc(outlen, GFP_KERNEL);
900	if (!out)
901		return -ENOMEM;
902
903	err = mlx5_query_nic_vport_context(mdev, 0, out);
904	if (err)
905		goto out;
906
907	value = MLX5_GET(query_nic_vport_context_out, out,
908			 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
909
910	value |= MLX5_GET(query_nic_vport_context_out, out,
911			  nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
912
913	*status = !value;
914
915out:
916	kvfree(out);
917	return err;
918}
919EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
920
921enum mlx5_vport_roce_state {
922	MLX5_VPORT_ROCE_DISABLED = 0,
923	MLX5_VPORT_ROCE_ENABLED  = 1,
924};
925
926static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
927					    enum mlx5_vport_roce_state state)
928{
929	void *in;
930	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
931	int err;
932
933	in = kvzalloc(inlen, GFP_KERNEL);
934	if (!in)
935		return -ENOMEM;
936
937	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
938	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
939		 state);
940	MLX5_SET(modify_nic_vport_context_in, in, opcode,
941		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
942
943	err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
944
945	kvfree(in);
946
947	return err;
948}
949
950int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
951{
952	int err = 0;
953
954	mutex_lock(&mlx5_roce_en_lock);
955	if (!mdev->roce.roce_en)
956		err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
957
958	if (!err)
959		mdev->roce.roce_en++;
960	mutex_unlock(&mlx5_roce_en_lock);
961
962	return err;
963}
964EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
965
966int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
967{
968	int err = 0;
969
970	mutex_lock(&mlx5_roce_en_lock);
971	if (mdev->roce.roce_en) {
972		mdev->roce.roce_en--;
973		if (mdev->roce.roce_en == 0)
974			err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
975
976		if (err)
977			mdev->roce.roce_en++;
978	}
979	mutex_unlock(&mlx5_roce_en_lock);
980	return err;
981}
982EXPORT_SYMBOL(mlx5_nic_vport_disable_roce);
983
984int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
985				  int vf, u8 port_num, void *out)
986{
987	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
988	int is_group_manager;
989	void *in;
990	int err;
991
992	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
993	in = kvzalloc(in_sz, GFP_KERNEL);
994	if (!in) {
995		err = -ENOMEM;
996		return err;
997	}
998
999	MLX5_SET(query_vport_counter_in, in, opcode,
1000		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1001	if (other_vport) {
1002		if (is_group_manager) {
1003			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1004			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1005		} else {
1006			err = -EPERM;
1007			goto free;
1008		}
1009	}
1010	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1011		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1012
1013	err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out);
1014free:
1015	kvfree(in);
1016	return err;
1017}
1018EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1019
1020int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
1021				u8 other_vport, u64 *rx_discard_vport_down,
1022				u64 *tx_discard_vport_down)
1023{
1024	u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
1025	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
1026	int err;
1027
1028	MLX5_SET(query_vnic_env_in, in, opcode,
1029		 MLX5_CMD_OP_QUERY_VNIC_ENV);
1030	MLX5_SET(query_vnic_env_in, in, op_mod, 0);
1031	MLX5_SET(query_vnic_env_in, in, vport_number, vport);
1032	MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
1033
1034	err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
1035	if (err)
1036		return err;
1037
1038	*rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1039					    vport_env.receive_discard_vport_down);
1040	*tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1041					    vport_env.transmit_discard_vport_down);
1042	return 0;
1043}
1044
1045int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1046				       u8 other_vport, u8 port_num,
1047				       int vf,
1048				       struct mlx5_hca_vport_context *req)
1049{
1050	int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1051	int is_group_manager;
1052	void *ctx;
1053	void *in;
1054	int err;
1055
1056	mlx5_core_dbg(dev, "vf %d\n", vf);
1057	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1058	in = kvzalloc(in_sz, GFP_KERNEL);
1059	if (!in)
1060		return -ENOMEM;
1061
1062	MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1063	if (other_vport) {
1064		if (is_group_manager) {
1065			MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1066			MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1067		} else {
1068			err = -EPERM;
1069			goto ex;
1070		}
1071	}
1072
1073	if (MLX5_CAP_GEN(dev, num_ports) > 1)
1074		MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1075
1076	ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1077	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1078	if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY)
1079		MLX5_SET(hca_vport_context, ctx, vport_state_policy,
1080			 req->policy);
1081	if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID)
1082		MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1083	if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
1084		MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1085	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1086	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
1087		 req->cap_mask1_perm);
1088	err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
1089ex:
1090	kvfree(in);
1091	return err;
1092}
1093EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1094
1095int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1096				       struct mlx5_core_dev *port_mdev)
1097{
1098	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1099	void *in;
1100	int err;
1101
1102	in = kvzalloc(inlen, GFP_KERNEL);
1103	if (!in)
1104		return -ENOMEM;
1105
1106	err = mlx5_nic_vport_enable_roce(port_mdev);
1107	if (err)
1108		goto free;
1109
1110	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1111	if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
1112		MLX5_SET(modify_nic_vport_context_in, in,
1113			 nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
1114		MLX5_SET(modify_nic_vport_context_in, in,
1115			 nic_vport_context.affiliated_vhca_id,
1116			 MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
1117	} else {
1118		MLX5_SET(modify_nic_vport_context_in, in,
1119			 nic_vport_context.affiliated_vhca_id,
1120			 MLX5_CAP_GEN(master_mdev, vhca_id));
1121	}
1122	MLX5_SET(modify_nic_vport_context_in, in,
1123		 nic_vport_context.affiliation_criteria,
1124		 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1125	MLX5_SET(modify_nic_vport_context_in, in, opcode,
1126		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1127
1128	err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1129	if (err)
1130		mlx5_nic_vport_disable_roce(port_mdev);
1131
1132free:
1133	kvfree(in);
1134	return err;
1135}
1136EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
1137
1138int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
1139{
1140	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1141	void *in;
1142	int err;
1143
1144	in = kvzalloc(inlen, GFP_KERNEL);
1145	if (!in)
1146		return -ENOMEM;
1147
1148	MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1149	MLX5_SET(modify_nic_vport_context_in, in,
1150		 nic_vport_context.affiliated_vhca_id, 0);
1151	MLX5_SET(modify_nic_vport_context_in, in,
1152		 nic_vport_context.affiliation_criteria, 0);
1153	MLX5_SET(modify_nic_vport_context_in, in, opcode,
1154		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1155
1156	err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1157	if (!err)
1158		mlx5_nic_vport_disable_roce(port_mdev);
1159
1160	kvfree(in);
1161	return err;
1162}
1163EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
1164
1165u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
1166{
1167	int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
1168	u64 tmp;
1169	int err;
1170
1171	if (mdev->sys_image_guid)
1172		return mdev->sys_image_guid;
1173
1174	if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
1175		err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
1176	else
1177		err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
1178
1179	mdev->sys_image_guid = err ? 0 : tmp;
1180
1181	return mdev->sys_image_guid;
1182}
1183EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
1184
1185int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
1186				  u16 opmod)
1187{
1188	bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
1189	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1190
1191	opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1192	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1193	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1194	MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(dev, vport, ec_vf_func));
1195	MLX5_SET(query_hca_cap_in, in, other_function, true);
1196	MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
1197	return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1198}
1199EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
1200
1201int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
1202				  u16 vport, u16 opmod)
1203{
1204	bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
1205	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
1206	void *set_hca_cap;
1207	void *set_ctx;
1208	int ret;
1209
1210	set_ctx = kzalloc(set_sz, GFP_KERNEL);
1211	if (!set_ctx)
1212		return -ENOMEM;
1213
1214	MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP);
1215	MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
1216	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
1217	memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
1218	MLX5_SET(set_hca_cap_in, set_ctx, function_id,
1219		 mlx5_vport_to_func_id(dev, vport, ec_vf_func));
1220	MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
1221	MLX5_SET(set_hca_cap_in, set_ctx, ec_vf_function, ec_vf_func);
1222	ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
1223
1224	kfree(set_ctx);
1225	return ret;
1226}
1227