1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/10/sys/dev/mlx5/mlx5_core/mlx5_vport.c 337742 2018-08-14 11:19:04Z hselasky $
26 */
27
28#include <linux/etherdevice.h>
29#include <dev/mlx5/driver.h>
30#include <dev/mlx5/vport.h>
31#include "mlx5_core.h"
32
33static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
34					 int inlen);
35
36static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37				   u16 vport, u32 *out, int outlen)
38{
39	int err;
40	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
41
42	memset(in, 0, sizeof(in));
43
44	MLX5_SET(query_vport_state_in, in, opcode,
45		 MLX5_CMD_OP_QUERY_VPORT_STATE);
46	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
47	MLX5_SET(query_vport_state_in, in, vport_number, vport);
48	if (vport)
49		MLX5_SET(query_vport_state_in, in, other_vport, 1);
50
51	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
52	if (err)
53		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
54
55	return err;
56}
57
58u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
59{
60	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
61
62	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
63
64	return MLX5_GET(query_vport_state_out, out, state);
65}
66EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
67
68u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
69{
70	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
71
72	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
73
74	return MLX5_GET(query_vport_state_out, out, admin_state);
75}
76EXPORT_SYMBOL(mlx5_query_vport_admin_state);
77
78int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
79				  u16 vport, u8 state)
80{
81	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)];
82	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)];
83	int err;
84
85	memset(in, 0, sizeof(in));
86
87	MLX5_SET(modify_vport_state_in, in, opcode,
88		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
89	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
90	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
91
92	if (vport)
93		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
94
95	MLX5_SET(modify_vport_state_in, in, admin_state, state);
96
97	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
98					 sizeof(out));
99	if (err)
100		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
101
102	return err;
103}
104EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
105
106static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
107					u32 *out, int outlen)
108{
109	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
110
111	memset(in, 0, sizeof(in));
112
113	MLX5_SET(query_nic_vport_context_in, in, opcode,
114		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
115
116	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
117	if (vport)
118		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
119
120	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
121}
122
123static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
124					      int client_id)
125{
126	switch (client_id) {
127	case MLX5_INTERFACE_PROTOCOL_IB:
128		return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
129			MLX5_QCOUNTER_SETS_NETDEV);
130	case MLX5_INTERFACE_PROTOCOL_ETH:
131		return MLX5_QCOUNTER_SETS_NETDEV;
132	default:
133		mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
134		return 0;
135	}
136}
137
138int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
139			       int client_id, u16 *counter_set_id)
140{
141	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
142	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
143	int err;
144
145	if (mdev->num_q_counter_allocated[client_id] >
146	    mlx5_vport_max_q_counter_allocator(mdev, client_id))
147		return -EINVAL;
148
149	memset(in, 0, sizeof(in));
150	memset(out, 0, sizeof(out));
151
152	MLX5_SET(alloc_q_counter_in, in, opcode,
153		 MLX5_CMD_OP_ALLOC_Q_COUNTER);
154
155	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
156					 out, sizeof(out));
157
158	if (!err)
159		*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
160					   counter_set_id);
161
162	mdev->num_q_counter_allocated[client_id]++;
163
164	return err;
165}
166
167int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
168				 int client_id, u16 counter_set_id)
169{
170	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
171	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
172	int err;
173
174	if (mdev->num_q_counter_allocated[client_id] <= 0)
175		return -EINVAL;
176
177	memset(in, 0, sizeof(in));
178	memset(out, 0, sizeof(out));
179
180	MLX5_SET(dealloc_q_counter_in, in, opcode,
181		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
182	MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
183		 counter_set_id);
184
185	err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
186					 out, sizeof(out));
187
188	mdev->num_q_counter_allocated[client_id]--;
189
190	return err;
191}
192
193int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
194				      u16 counter_set_id,
195				      int reset,
196				      void *out,
197				      int out_size)
198{
199	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
200
201	memset(in, 0, sizeof(in));
202
203	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
204	MLX5_SET(query_q_counter_in, in, clear, reset);
205	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
206
207	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in),
208					  out, out_size);
209}
210
211int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
212				      u16 counter_set_id,
213				      u32 *out_of_rx_buffer)
214{
215	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
216	int err;
217
218	memset(out, 0, sizeof(out));
219
220	err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
221					 sizeof(out));
222
223	if (err)
224		return err;
225
226	*out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
227				     out_of_buffer);
228	return err;
229}
230
231int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
232				    u16 vport, u8 *min_inline)
233{
234	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
235	int err;
236
237	err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
238	if (!err)
239		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
240				       nic_vport_context.min_wqe_inline_mode);
241	return err;
242}
243EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
244
245void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
246			   u8 *min_inline_mode)
247{
248	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
249	case MLX5_CAP_INLINE_MODE_L2:
250		*min_inline_mode = MLX5_INLINE_MODE_L2;
251		break;
252	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
253		mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
254		break;
255	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
256		*min_inline_mode = MLX5_INLINE_MODE_NONE;
257		break;
258	}
259}
260EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
261
262int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
263				     u16 vport, u8 min_inline)
264{
265	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
266	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
267	void *nic_vport_ctx;
268
269	MLX5_SET(modify_nic_vport_context_in, in,
270		 field_select.min_wqe_inline_mode, 1);
271	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
272	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
273
274	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
275				     in, nic_vport_context);
276	MLX5_SET(nic_vport_context, nic_vport_ctx,
277		 min_wqe_inline_mode, min_inline);
278
279	return mlx5_modify_nic_vport_context(mdev, in, inlen);
280}
281EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_min_inline);
282
283int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
284				     u16 vport, u8 *addr)
285{
286	u32 *out;
287	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
288	u8 *out_addr;
289	int err;
290
291	out = mlx5_vzalloc(outlen);
292	if (!out)
293		return -ENOMEM;
294
295	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
296				nic_vport_context.permanent_address);
297
298	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
299	if (err)
300		goto out;
301
302	ether_addr_copy(addr, &out_addr[2]);
303
304out:
305	kvfree(out);
306	return err;
307}
308EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
309
310int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
311				      u16 vport, u8 *addr)
312{
313	void *in;
314	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
315	int err;
316	void *nic_vport_ctx;
317	u8 *perm_mac;
318
319	in = mlx5_vzalloc(inlen);
320	if (!in) {
321		mlx5_core_warn(mdev, "failed to allocate inbox\n");
322		return -ENOMEM;
323	}
324
325	MLX5_SET(modify_nic_vport_context_in, in,
326		 field_select.permanent_address, 1);
327	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
328
329	if (vport)
330		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
331
332	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
333				     in, nic_vport_context);
334	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
335				permanent_address);
336
337	ether_addr_copy(&perm_mac[2], addr);
338
339	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
340
341	kvfree(in);
342
343	return err;
344}
345EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
346
347int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
348					   u64 *system_image_guid)
349{
350	u32 *out;
351	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
352	int err;
353
354	out = mlx5_vzalloc(outlen);
355	if (!out)
356		return -ENOMEM;
357
358	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
359	if (err)
360		goto out;
361
362	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
363					nic_vport_context.system_image_guid);
364out:
365	kvfree(out);
366	return err;
367}
368EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
369
370int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
371{
372	u32 *out;
373	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
374	int err;
375
376	out = mlx5_vzalloc(outlen);
377	if (!out)
378		return -ENOMEM;
379
380	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
381	if (err)
382		goto out;
383
384	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
385				nic_vport_context.node_guid);
386
387out:
388	kvfree(out);
389	return err;
390}
391EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
392
393static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
394					  u64 *port_guid)
395{
396	u32 *out;
397	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
398	int err;
399
400	out = mlx5_vzalloc(outlen);
401	if (!out)
402		return -ENOMEM;
403
404	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
405	if (err)
406		goto out;
407
408	*port_guid = MLX5_GET64(query_nic_vport_context_out, out,
409				nic_vport_context.port_guid);
410
411out:
412	kvfree(out);
413	return err;
414}
415
416int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
417					u16 *qkey_viol_cntr)
418{
419	u32 *out;
420	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
421	int err;
422
423	out = mlx5_vzalloc(outlen);
424	if (!out)
425		return -ENOMEM;
426
427	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
428	if (err)
429		goto out;
430
431	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
432				nic_vport_context.qkey_violation_counter);
433
434out:
435	kvfree(out);
436	return err;
437}
438EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
439
440static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
441					 int inlen)
442{
443	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
444
445	MLX5_SET(modify_nic_vport_context_in, in, opcode,
446		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
447
448	memset(out, 0, sizeof(out));
449	return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
450}
451
452static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
453					      int enable_disable)
454{
455	void *in;
456	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
457	int err;
458
459	in = mlx5_vzalloc(inlen);
460	if (!in) {
461		mlx5_core_warn(mdev, "failed to allocate inbox\n");
462		return -ENOMEM;
463	}
464
465	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
466	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
467		 enable_disable);
468
469	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
470
471	kvfree(in);
472
473	return err;
474}
475
476int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
477				   bool other_vport, u8 *addr)
478{
479	void *in;
480	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
481		  + MLX5_ST_SZ_BYTES(mac_address_layout);
482	u8  *mac_layout;
483	u8  *mac_ptr;
484	int err;
485
486	in = mlx5_vzalloc(inlen);
487	if (!in) {
488		mlx5_core_warn(mdev, "failed to allocate inbox\n");
489		return -ENOMEM;
490	}
491
492	MLX5_SET(modify_nic_vport_context_in, in,
493		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
494	MLX5_SET(modify_nic_vport_context_in, in,
495		 vport_number, vport);
496	MLX5_SET(modify_nic_vport_context_in, in,
497		 other_vport, other_vport);
498	MLX5_SET(modify_nic_vport_context_in, in,
499		 field_select.addresses_list, 1);
500	MLX5_SET(modify_nic_vport_context_in, in,
501		 nic_vport_context.allowed_list_type,
502		 MLX5_NIC_VPORT_LIST_TYPE_UC);
503	MLX5_SET(modify_nic_vport_context_in, in,
504		 nic_vport_context.allowed_list_size, 1);
505
506	mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
507		nic_vport_context.current_uc_mac_address);
508	mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
509		mac_addr_47_32);
510	ether_addr_copy(mac_ptr, addr);
511
512	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
513
514	kvfree(in);
515
516	return err;
517}
518EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
519
520int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
521				    u32 vport, u64 node_guid)
522{
523	void *in;
524	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
525	int err;
526	void *nic_vport_context;
527
528	if (!vport)
529		return -EINVAL;
530	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
531		return -EPERM;
532	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
533		return -ENOTSUPP;
534
535	in = mlx5_vzalloc(inlen);
536	if (!in) {
537		mlx5_core_warn(mdev, "failed to allocate inbox\n");
538		return -ENOMEM;
539	}
540
541	MLX5_SET(modify_nic_vport_context_in, in,
542		 field_select.node_guid, 1);
543	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
544
545	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
546
547	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
548					 in, nic_vport_context);
549	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
550
551	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
552
553	kvfree(in);
554
555	return err;
556}
557EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
558
559int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
560				    u32 vport, u64 port_guid)
561{
562	void *in;
563	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
564	int err;
565	void *nic_vport_context;
566
567	if (!vport)
568		return -EINVAL;
569	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
570		return -EPERM;
571	if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
572		return -ENOTSUPP;
573
574	in = mlx5_vzalloc(inlen);
575	if (!in) {
576		mlx5_core_warn(mdev, "failed to allocate inbox\n");
577		return -ENOMEM;
578	}
579
580	MLX5_SET(modify_nic_vport_context_in, in,
581		 field_select.port_guid, 1);
582	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
583
584	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
585
586	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
587					 in, nic_vport_context);
588	MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
589
590	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
591
592	kvfree(in);
593
594	return err;
595}
596EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
597
598int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
599				 u16 *vlan_list, int list_len)
600{
601	void *in, *ctx;
602	int i, err;
603	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
604		+ MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
605
606	int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
607
608	if (list_len > max_list_size) {
609		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
610			       list_len, max_list_size);
611		return -ENOSPC;
612	}
613
614	in = mlx5_vzalloc(inlen);
615	if (!in) {
616		mlx5_core_warn(dev, "failed to allocate inbox\n");
617		return -ENOMEM;
618	}
619
620	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
621	if (vport)
622		MLX5_SET(modify_nic_vport_context_in, in,
623			 other_vport, 1);
624	MLX5_SET(modify_nic_vport_context_in, in,
625		 field_select.addresses_list, 1);
626
627	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
628
629	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
630		 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
631	MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
632
633	for (i = 0; i < list_len; i++) {
634		u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
635					 current_uc_mac_address[i]);
636		MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
637	}
638
639	err = mlx5_modify_nic_vport_context(dev, in, inlen);
640
641	kvfree(in);
642	return err;
643}
644EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
645
646int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
647			       u64 *addr_list, size_t addr_list_len)
648{
649	void *in, *ctx;
650	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
651		  + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
652	int err;
653	size_t i;
654	int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
655
656	if ((int)addr_list_len > max_list_sz) {
657		mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
658			       (int)addr_list_len, max_list_sz);
659		return -ENOSPC;
660	}
661
662	in = mlx5_vzalloc(inlen);
663	if (!in) {
664		mlx5_core_warn(mdev, "failed to allocate inbox\n");
665		return -ENOMEM;
666	}
667
668	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
669	if (vport)
670		MLX5_SET(modify_nic_vport_context_in, in,
671			 other_vport, 1);
672	MLX5_SET(modify_nic_vport_context_in, in,
673		 field_select.addresses_list, 1);
674
675	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
676
677	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
678		 MLX5_NIC_VPORT_LIST_TYPE_MC);
679	MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
680
681	for (i = 0; i < addr_list_len; i++) {
682		u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
683						  current_uc_mac_address[i]);
684		u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
685						 mac_addr_47_32);
686		ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
687	}
688
689	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
690
691	kvfree(in);
692
693	return err;
694}
695EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
696
697int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
698			       bool promisc_mc, bool promisc_uc,
699			       bool promisc_all)
700{
701	u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
702	u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
703			       nic_vport_context);
704
705	memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
706
707	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
708	if (vport)
709		MLX5_SET(modify_nic_vport_context_in, in,
710			 other_vport, 1);
711	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
712	if (promisc_mc)
713		MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
714	if (promisc_uc)
715		MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
716	if (promisc_all)
717		MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
718
719	return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
720}
721EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
722
723int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
724				  u16 vport,
725				  enum mlx5_list_type list_type,
726				  u8 addr_list[][ETH_ALEN],
727				  int *list_size)
728{
729	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
730	void *nic_vport_ctx;
731	int max_list_size;
732	int req_list_size;
733	int out_sz;
734	void *out;
735	int err;
736	int i;
737
738	req_list_size = *list_size;
739
740	max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
741			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
742			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
743
744	if (req_list_size > max_list_size) {
745		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
746			       req_list_size, max_list_size);
747		req_list_size = max_list_size;
748	}
749
750	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
751		 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
752
753	memset(in, 0, sizeof(in));
754	out = kzalloc(out_sz, GFP_KERNEL);
755	if (!out)
756		return -ENOMEM;
757
758	MLX5_SET(query_nic_vport_context_in, in, opcode,
759		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
760	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
761	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
762
763	if (vport)
764		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
765
766	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
767	if (err)
768		goto out;
769
770	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
771				     nic_vport_context);
772	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
773				 allowed_list_size);
774
775	*list_size = req_list_size;
776	for (i = 0; i < req_list_size; i++) {
777		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
778					nic_vport_ctx,
779					current_uc_mac_address[i]) + 2;
780		ether_addr_copy(addr_list[i], mac_addr);
781	}
782out:
783	kfree(out);
784	return err;
785}
786EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
787
788int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
789				   enum mlx5_list_type list_type,
790				   u8 addr_list[][ETH_ALEN],
791				   int list_size)
792{
793	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
794	void *nic_vport_ctx;
795	int max_list_size;
796	int in_sz;
797	void *in;
798	int err;
799	int i;
800
801	max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
802		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
803		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
804
805	if (list_size > max_list_size)
806		return -ENOSPC;
807
808	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
809		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
810
811	memset(out, 0, sizeof(out));
812	in = kzalloc(in_sz, GFP_KERNEL);
813	if (!in)
814		return -ENOMEM;
815
816	MLX5_SET(modify_nic_vport_context_in, in, opcode,
817		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
818	MLX5_SET(modify_nic_vport_context_in, in,
819		 field_select.addresses_list, 1);
820
821	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
822				     nic_vport_context);
823
824	MLX5_SET(nic_vport_context, nic_vport_ctx,
825		 allowed_list_type, list_type);
826	MLX5_SET(nic_vport_context, nic_vport_ctx,
827		 allowed_list_size, list_size);
828
829	for (i = 0; i < list_size; i++) {
830		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
831					    nic_vport_ctx,
832					    current_uc_mac_address[i]) + 2;
833		ether_addr_copy(curr_mac, addr_list[i]);
834	}
835
836	err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
837	kfree(in);
838	return err;
839}
840EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
841
842int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
843			       u16 vport,
844			       u16 vlans[],
845			       int *size)
846{
847	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
848	void *nic_vport_ctx;
849	int req_list_size;
850	int max_list_size;
851	int out_sz;
852	void *out;
853	int err;
854	int i;
855
856	req_list_size = *size;
857	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
858	if (req_list_size > max_list_size) {
859		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
860			       req_list_size, max_list_size);
861		req_list_size = max_list_size;
862	}
863
864	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
865		 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
866
867	memset(in, 0, sizeof(in));
868	out = kzalloc(out_sz, GFP_KERNEL);
869	if (!out)
870		return -ENOMEM;
871
872	MLX5_SET(query_nic_vport_context_in, in, opcode,
873		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
874	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
875		 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
876	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
877
878	if (vport)
879		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
880
881	err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_sz);
882	if (err)
883		goto out;
884
885	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
886				     nic_vport_context);
887	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
888				 allowed_list_size);
889
890	*size = req_list_size;
891	for (i = 0; i < req_list_size; i++) {
892		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
893					       nic_vport_ctx,
894					 current_uc_mac_address[i]);
895		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
896	}
897out:
898	kfree(out);
899	return err;
900}
901EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
902
903int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
904				u16 vlans[],
905				int list_size)
906{
907	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
908	void *nic_vport_ctx;
909	int max_list_size;
910	int in_sz;
911	void *in;
912	int err;
913	int i;
914
915	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
916
917	if (list_size > max_list_size)
918		return -ENOSPC;
919
920	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
921		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
922
923	memset(out, 0, sizeof(out));
924	in = kzalloc(in_sz, GFP_KERNEL);
925	if (!in)
926		return -ENOMEM;
927
928	MLX5_SET(modify_nic_vport_context_in, in, opcode,
929		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
930	MLX5_SET(modify_nic_vport_context_in, in,
931		 field_select.addresses_list, 1);
932
933	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
934				     nic_vport_context);
935
936	MLX5_SET(nic_vport_context, nic_vport_ctx,
937		 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
938	MLX5_SET(nic_vport_context, nic_vport_ctx,
939		 allowed_list_size, list_size);
940
941	for (i = 0; i < list_size; i++) {
942		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
943					       nic_vport_ctx,
944					       current_uc_mac_address[i]);
945		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
946	}
947
948	err = mlx5_cmd_exec_check_status(dev, in, in_sz, out, sizeof(out));
949	kfree(in);
950	return err;
951}
952EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
953
954int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
955{
956	u32 *out;
957	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
958	int err;
959
960	out = kzalloc(outlen, GFP_KERNEL);
961	if (!out)
962		return -ENOMEM;
963
964	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
965	if (err)
966		goto out;
967
968	*enable = MLX5_GET(query_nic_vport_context_out, out,
969				nic_vport_context.roce_en);
970
971out:
972	kfree(out);
973	return err;
974}
975EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
976
977int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
978				     u8 *addr)
979{
980	void *in;
981	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
982	u8  *mac_ptr;
983	int err;
984
985	in = mlx5_vzalloc(inlen);
986	if (!in) {
987		mlx5_core_warn(mdev, "failed to allocate inbox\n");
988		return -ENOMEM;
989	}
990
991	MLX5_SET(modify_nic_vport_context_in, in,
992		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
993	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
994	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
995	MLX5_SET(modify_nic_vport_context_in, in,
996		 field_select.permanent_address, 1);
997	mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
998		nic_vport_context.permanent_address.mac_addr_47_32);
999	ether_addr_copy(mac_ptr, addr);
1000
1001	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1002
1003	kvfree(in);
1004
1005	return err;
1006}
1007EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
1008
1009int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
1010{
1011	return mlx5_nic_vport_enable_disable_roce(mdev, 1);
1012}
1013EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1014
1015int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1016{
1017	return mlx5_nic_vport_enable_disable_roce(mdev, 0);
1018}
1019EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1020
1021int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
1022				 u8 port_num, u8 vport_num, u32 *out,
1023				 int outlen)
1024{
1025	u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)];
1026	int is_group_manager;
1027
1028	is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
1029
1030	memset(in, 0, sizeof(in));
1031
1032	MLX5_SET(query_hca_vport_context_in, in, opcode,
1033		 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
1034
1035	if (vport_num) {
1036		if (is_group_manager) {
1037			MLX5_SET(query_hca_vport_context_in, in, other_vport,
1038				 1);
1039			MLX5_SET(query_hca_vport_context_in, in, vport_number,
1040				 vport_num);
1041		} else {
1042			return -EPERM;
1043		}
1044	}
1045
1046	if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1047		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1048
1049	return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
1050}
1051
1052int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1053					   u64 *system_image_guid)
1054{
1055	u32 *out;
1056	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1057	int err;
1058
1059	out = mlx5_vzalloc(outlen);
1060	if (!out)
1061		return -ENOMEM;
1062
1063	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1064	if (err)
1065		goto out;
1066
1067	*system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1068					hca_vport_context.system_image_guid);
1069
1070out:
1071	kvfree(out);
1072	return err;
1073}
1074EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1075
1076int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1077{
1078	u32 *out;
1079	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1080	int err;
1081
1082	out = mlx5_vzalloc(outlen);
1083	if (!out)
1084		return -ENOMEM;
1085
1086	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1087	if (err)
1088		goto out;
1089
1090	*node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1091				hca_vport_context.node_guid);
1092
1093out:
1094	kvfree(out);
1095	return err;
1096}
1097EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1098
1099static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1100					  u64 *port_guid)
1101{
1102	u32 *out;
1103	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1104	int err;
1105
1106	out = mlx5_vzalloc(outlen);
1107	if (!out)
1108		return -ENOMEM;
1109
1110	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1111	if (err)
1112		goto out;
1113
1114	*port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1115				hca_vport_context.port_guid);
1116
1117out:
1118	kvfree(out);
1119	return err;
1120}
1121
1122int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1123			     u16 vport_num, u16 gid_index, union ib_gid *gid)
1124{
1125	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1126	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1127	int is_group_manager;
1128	void *out = NULL;
1129	void *in = NULL;
1130	union ib_gid *tmp;
1131	int tbsz;
1132	int nout;
1133	int err;
1134
1135	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1136	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1137
1138	if (gid_index > tbsz && gid_index != 0xffff)
1139		return -EINVAL;
1140
1141	if (gid_index == 0xffff)
1142		nout = tbsz;
1143	else
1144		nout = 1;
1145
1146	out_sz += nout * sizeof(*gid);
1147
1148	in = mlx5_vzalloc(in_sz);
1149	out = mlx5_vzalloc(out_sz);
1150	if (!in || !out) {
1151		err = -ENOMEM;
1152		goto out;
1153	}
1154
1155	MLX5_SET(query_hca_vport_gid_in, in, opcode,
1156		 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1157	if (vport_num) {
1158		if (is_group_manager) {
1159			MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1160				 vport_num);
1161			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1162		} else {
1163			err = -EPERM;
1164			goto out;
1165		}
1166	}
1167
1168	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1169
1170	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1171		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1172
1173	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1174	if (err)
1175		goto out;
1176
1177	err = mlx5_cmd_status_to_err_v2(out);
1178	if (err)
1179		goto out;
1180
1181	tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1182	gid->global.subnet_prefix = tmp->global.subnet_prefix;
1183	gid->global.interface_id = tmp->global.interface_id;
1184
1185out:
1186	kvfree(in);
1187	kvfree(out);
1188	return err;
1189}
1190EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1191
1192int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1193			      u8 port_num, u16 vf_num, u16 pkey_index,
1194			      u16 *pkey)
1195{
1196	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1197	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1198	int is_group_manager;
1199	void *out = NULL;
1200	void *in = NULL;
1201	void *pkarr;
1202	int nout;
1203	int tbsz;
1204	int err;
1205	int i;
1206
1207	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1208
1209	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1210	if (pkey_index > tbsz && pkey_index != 0xffff)
1211		return -EINVAL;
1212
1213	if (pkey_index == 0xffff)
1214		nout = tbsz;
1215	else
1216		nout = 1;
1217
1218	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1219
1220	in = kzalloc(in_sz, GFP_KERNEL);
1221	out = kzalloc(out_sz, GFP_KERNEL);
1222
1223	MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1224		 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1225	if (other_vport) {
1226		if (is_group_manager) {
1227			MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1228				 vf_num);
1229			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1230		} else {
1231			err = -EPERM;
1232			goto out;
1233		}
1234	}
1235	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1236
1237	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1238		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1239
1240	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1241	if (err)
1242		goto out;
1243
1244	err = mlx5_cmd_status_to_err_v2(out);
1245	if (err)
1246		goto out;
1247
1248	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1249	for (i = 0; i < nout; i++, pkey++,
1250	     pkarr += MLX5_ST_SZ_BYTES(pkey))
1251		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1252
1253out:
1254	kfree(in);
1255	kfree(out);
1256	return err;
1257}
1258EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1259
1260static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1261					 int *min_header)
1262{
1263	u32 *out;
1264	u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1265	int err;
1266
1267	out = mlx5_vzalloc(outlen);
1268	if (!out)
1269		return -ENOMEM;
1270
1271	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1272	if (err)
1273		goto out;
1274
1275	*min_header = MLX5_GET(query_hca_vport_context_out, out,
1276			       hca_vport_context.min_wqe_inline_mode);
1277
1278out:
1279	kvfree(out);
1280	return err;
1281}
1282
1283static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1284					     u16 vport, void *in, int inlen)
1285{
1286	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)];
1287	int err;
1288
1289	memset(out, 0, sizeof(out));
1290
1291	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1292	if (vport)
1293		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1294
1295	MLX5_SET(modify_esw_vport_context_in, in, opcode,
1296		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1297
1298	err = mlx5_cmd_exec_check_status(mdev, in, inlen,
1299					 out, sizeof(out));
1300	if (err)
1301		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1302
1303	return err;
1304}
1305
1306int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1307				u8 insert_mode, u8 strip_mode,
1308				u16 vlan, u8 cfi, u8 pcp)
1309{
1310	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1311
1312	memset(in, 0, sizeof(in));
1313
1314	if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1315		MLX5_SET(modify_esw_vport_context_in, in,
1316			 esw_vport_context.cvlan_cfi, cfi);
1317		MLX5_SET(modify_esw_vport_context_in, in,
1318			 esw_vport_context.cvlan_pcp, pcp);
1319		MLX5_SET(modify_esw_vport_context_in, in,
1320			 esw_vport_context.cvlan_id, vlan);
1321	}
1322
1323	MLX5_SET(modify_esw_vport_context_in, in,
1324		 esw_vport_context.vport_cvlan_insert, insert_mode);
1325
1326	MLX5_SET(modify_esw_vport_context_in, in,
1327		 esw_vport_context.vport_cvlan_strip, strip_mode);
1328
1329	MLX5_SET(modify_esw_vport_context_in, in, field_select,
1330		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1331		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1332
1333	return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1334}
1335EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1336
1337int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1338{
1339	u32 *out;
1340	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1341	int err;
1342
1343	out = mlx5_vzalloc(outlen);
1344	if (!out)
1345		return -ENOMEM;
1346
1347	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1348	if (err)
1349		goto out;
1350
1351	*mtu = MLX5_GET(query_nic_vport_context_out, out,
1352			nic_vport_context.mtu);
1353
1354out:
1355	kvfree(out);
1356	return err;
1357}
1358EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1359
1360int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1361{
1362	u32 *in;
1363	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1364	int err;
1365
1366	in = mlx5_vzalloc(inlen);
1367	if (!in)
1368		return -ENOMEM;
1369
1370	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1371	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1372
1373	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1374
1375	kvfree(in);
1376	return err;
1377}
1378EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1379
1380static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1381					   int *min_header)
1382{
1383	u32 *out;
1384	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1385	int err;
1386
1387	out = mlx5_vzalloc(outlen);
1388	if (!out)
1389		return -ENOMEM;
1390
1391	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1392	if (err)
1393		goto out;
1394
1395	*min_header = MLX5_GET(query_nic_vport_context_out, out,
1396			       nic_vport_context.min_wqe_inline_mode);
1397
1398out:
1399	kvfree(out);
1400	return err;
1401}
1402
1403int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1404				  u8 vport, int min_header)
1405{
1406	u32 *in;
1407	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1408	int err;
1409
1410	in = mlx5_vzalloc(inlen);
1411	if (!in)
1412		return -ENOMEM;
1413
1414	MLX5_SET(modify_nic_vport_context_in, in,
1415		 field_select.min_wqe_inline_mode, 1);
1416	MLX5_SET(modify_nic_vport_context_in, in,
1417		 nic_vport_context.min_wqe_inline_mode, min_header);
1418	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1419	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1420
1421	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1422
1423	kvfree(in);
1424	return err;
1425}
1426EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1427
1428int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1429{
1430	switch (MLX5_CAP_GEN(dev, port_type)) {
1431	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1432		return mlx5_query_hca_min_wqe_header(dev, min_header);
1433
1434	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1435		return mlx5_query_vport_min_wqe_header(dev, min_header);
1436
1437	default:
1438		return -EINVAL;
1439	}
1440}
1441EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1442
1443int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1444				 u16 vport,
1445				 int *promisc_uc,
1446				 int *promisc_mc,
1447				 int *promisc_all)
1448{
1449	u32 *out;
1450	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1451	int err;
1452
1453	out = kzalloc(outlen, GFP_KERNEL);
1454	if (!out)
1455		return -ENOMEM;
1456
1457	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1458	if (err)
1459		goto out;
1460
1461	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1462			       nic_vport_context.promisc_uc);
1463	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1464			       nic_vport_context.promisc_mc);
1465	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1466				nic_vport_context.promisc_all);
1467
1468out:
1469	kfree(out);
1470	return err;
1471}
1472EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1473
1474int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1475				  int promisc_uc,
1476				  int promisc_mc,
1477				  int promisc_all)
1478{
1479	void *in;
1480	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1481	int err;
1482
1483	in = mlx5_vzalloc(inlen);
1484	if (!in) {
1485		mlx5_core_err(mdev, "failed to allocate inbox\n");
1486		return -ENOMEM;
1487	}
1488
1489	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1490	MLX5_SET(modify_nic_vport_context_in, in,
1491		 nic_vport_context.promisc_uc, promisc_uc);
1492	MLX5_SET(modify_nic_vport_context_in, in,
1493		 nic_vport_context.promisc_mc, promisc_mc);
1494	MLX5_SET(modify_nic_vport_context_in, in,
1495		 nic_vport_context.promisc_all, promisc_all);
1496
1497	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1498	kvfree(in);
1499	return err;
1500}
1501EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1502
1503int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1504			     u8 port_num, u16 vport_num,
1505			     void *out, int out_size)
1506{
1507	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1508	int is_group_manager;
1509	void *in;
1510	int err;
1511
1512	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1513
1514	in = mlx5_vzalloc(in_sz);
1515	if (!in)
1516		return -ENOMEM;
1517
1518	MLX5_SET(query_vport_counter_in, in, opcode,
1519		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1520	if (vport_num) {
1521		if (is_group_manager) {
1522			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1523			MLX5_SET(query_vport_counter_in, in, vport_number,
1524				 vport_num);
1525		} else {
1526			err = -EPERM;
1527			goto ex;
1528		}
1529	}
1530	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1531		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1532
1533	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
1534	if (err)
1535		goto ex;
1536	err = mlx5_cmd_status_to_err_v2(out);
1537	if (err)
1538		goto ex;
1539
1540ex:
1541	kvfree(in);
1542	return err;
1543}
1544EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1545
1546int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1547			    struct mlx5_vport_counters *vc)
1548{
1549	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1550	void *out;
1551	int err;
1552
1553	out = mlx5_vzalloc(out_sz);
1554	if (!out)
1555		return -ENOMEM;
1556
1557	err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1558	if (err)
1559		goto ex;
1560
1561	vc->received_errors.packets =
1562		MLX5_GET64(query_vport_counter_out,
1563			   out, received_errors.packets);
1564	vc->received_errors.octets =
1565		MLX5_GET64(query_vport_counter_out,
1566			   out, received_errors.octets);
1567	vc->transmit_errors.packets =
1568		MLX5_GET64(query_vport_counter_out,
1569			   out, transmit_errors.packets);
1570	vc->transmit_errors.octets =
1571		MLX5_GET64(query_vport_counter_out,
1572			   out, transmit_errors.octets);
1573	vc->received_ib_unicast.packets =
1574		MLX5_GET64(query_vport_counter_out,
1575			   out, received_ib_unicast.packets);
1576	vc->received_ib_unicast.octets =
1577		MLX5_GET64(query_vport_counter_out,
1578			   out, received_ib_unicast.octets);
1579	vc->transmitted_ib_unicast.packets =
1580		MLX5_GET64(query_vport_counter_out,
1581			   out, transmitted_ib_unicast.packets);
1582	vc->transmitted_ib_unicast.octets =
1583		MLX5_GET64(query_vport_counter_out,
1584			   out, transmitted_ib_unicast.octets);
1585	vc->received_ib_multicast.packets =
1586		MLX5_GET64(query_vport_counter_out,
1587			   out, received_ib_multicast.packets);
1588	vc->received_ib_multicast.octets =
1589		MLX5_GET64(query_vport_counter_out,
1590			   out, received_ib_multicast.octets);
1591	vc->transmitted_ib_multicast.packets =
1592		MLX5_GET64(query_vport_counter_out,
1593			   out, transmitted_ib_multicast.packets);
1594	vc->transmitted_ib_multicast.octets =
1595		MLX5_GET64(query_vport_counter_out,
1596			   out, transmitted_ib_multicast.octets);
1597	vc->received_eth_broadcast.packets =
1598		MLX5_GET64(query_vport_counter_out,
1599			   out, received_eth_broadcast.packets);
1600	vc->received_eth_broadcast.octets =
1601		MLX5_GET64(query_vport_counter_out,
1602			   out, received_eth_broadcast.octets);
1603	vc->transmitted_eth_broadcast.packets =
1604		MLX5_GET64(query_vport_counter_out,
1605			   out, transmitted_eth_broadcast.packets);
1606	vc->transmitted_eth_broadcast.octets =
1607		MLX5_GET64(query_vport_counter_out,
1608			   out, transmitted_eth_broadcast.octets);
1609	vc->received_eth_unicast.octets =
1610		MLX5_GET64(query_vport_counter_out,
1611			   out, received_eth_unicast.octets);
1612	vc->received_eth_unicast.packets =
1613		MLX5_GET64(query_vport_counter_out,
1614			   out, received_eth_unicast.packets);
1615	vc->transmitted_eth_unicast.octets =
1616		MLX5_GET64(query_vport_counter_out,
1617			   out, transmitted_eth_unicast.octets);
1618	vc->transmitted_eth_unicast.packets =
1619		MLX5_GET64(query_vport_counter_out,
1620			   out, transmitted_eth_unicast.packets);
1621	vc->received_eth_multicast.octets =
1622		MLX5_GET64(query_vport_counter_out,
1623			   out, received_eth_multicast.octets);
1624	vc->received_eth_multicast.packets =
1625		MLX5_GET64(query_vport_counter_out,
1626			   out, received_eth_multicast.packets);
1627	vc->transmitted_eth_multicast.octets =
1628		MLX5_GET64(query_vport_counter_out,
1629			   out, transmitted_eth_multicast.octets);
1630	vc->transmitted_eth_multicast.packets =
1631		MLX5_GET64(query_vport_counter_out,
1632			   out, transmitted_eth_multicast.packets);
1633
1634ex:
1635	kvfree(out);
1636	return err;
1637}
1638
1639int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1640				       u64 *sys_image_guid)
1641{
1642	switch (MLX5_CAP_GEN(dev, port_type)) {
1643	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1644		return mlx5_query_hca_vport_system_image_guid(dev,
1645							      sys_image_guid);
1646
1647	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1648		return mlx5_query_nic_vport_system_image_guid(dev,
1649							      sys_image_guid);
1650
1651	default:
1652		return -EINVAL;
1653	}
1654}
1655EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1656
1657int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1658{
1659	switch (MLX5_CAP_GEN(dev, port_type)) {
1660	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1661		return mlx5_query_hca_vport_node_guid(dev, node_guid);
1662
1663	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1664		return mlx5_query_nic_vport_node_guid(dev, node_guid);
1665
1666	default:
1667		return -EINVAL;
1668	}
1669}
1670EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1671
1672int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1673{
1674	switch (MLX5_CAP_GEN(dev, port_type)) {
1675	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1676		return mlx5_query_hca_vport_port_guid(dev, port_guid);
1677
1678	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1679		return mlx5_query_nic_vport_port_guid(dev, port_guid);
1680
1681	default:
1682		return -EINVAL;
1683	}
1684}
1685EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1686
1687int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1688{
1689	u32 *out;
1690	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1691	int err;
1692
1693	out = mlx5_vzalloc(outlen);
1694	if (!out)
1695		return -ENOMEM;
1696
1697	err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1698	if (err)
1699		goto out;
1700
1701	*vport_state = MLX5_GET(query_hca_vport_context_out, out,
1702				hca_vport_context.vport_state);
1703
1704out:
1705	kvfree(out);
1706	return err;
1707}
1708EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1709