device.h revision 308678
1/*-
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/device.h 308678 2016-11-15 08:53:25Z hselasky $
26 */
27
28#ifndef MLX5_DEVICE_H
29#define MLX5_DEVICE_H
30
31#include <linux/types.h>
32#include <rdma/ib_verbs.h>
33#include <dev/mlx5/mlx5_ifc.h>
34
35#define FW_INIT_TIMEOUT_MILI 2000
36#define FW_INIT_WAIT_MS 2
37
38#if defined(__LITTLE_ENDIAN)
39#define MLX5_SET_HOST_ENDIANNESS	0
40#elif defined(__BIG_ENDIAN)
41#define MLX5_SET_HOST_ENDIANNESS	0x80
42#else
43#error Host endianness not defined
44#endif
45
46/* helper macros */
47#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
48#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
49#define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld)
50#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
51#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
52#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
53#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
54#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
55#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
56
57#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
58#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
59#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
60#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
61#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
62#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
63#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
64#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
65
66/* insert a value to a struct */
67#define MLX5_SET(typ, p, fld, v) do { \
68	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
69	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
70	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
71	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
72		     (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
73		     << __mlx5_dw_bit_off(typ, fld))); \
74} while (0)
75
76#define MLX5_SET_TO_ONES(typ, p, fld) do { \
77	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
78	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) > 32); \
79	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
80	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
81		     (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
82		     << __mlx5_dw_bit_off(typ, fld))); \
83} while (0)
84
85#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
86__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
87__mlx5_mask(typ, fld))
88
89#define MLX5_GET_PR(typ, p, fld) ({ \
90	u32 ___t = MLX5_GET(typ, p, fld); \
91	pr_debug(#fld " = 0x%x\n", ___t); \
92	___t; \
93})
94
95#define MLX5_SET64(typ, p, fld, v) do { \
96	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
97	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
98	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
99} while (0)
100
101#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
102
103enum {
104	MLX5_MAX_COMMANDS		= 32,
105	MLX5_CMD_DATA_BLOCK_SIZE	= 512,
106	MLX5_PCI_CMD_XPORT		= 7,
107	MLX5_MKEY_BSF_OCTO_SIZE		= 4,
108	MLX5_MAX_PSVS			= 4,
109};
110
111enum {
112	MLX5_EXTENDED_UD_AV		= 0x80000000,
113};
114
115enum {
116	MLX5_CQ_FLAGS_OI	= 2,
117};
118
119enum {
120	MLX5_STAT_RATE_OFFSET	= 5,
121};
122
123enum {
124	MLX5_INLINE_SEG = 0x80000000,
125};
126
127enum {
128	MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
129};
130
131enum {
132	MLX5_MIN_PKEY_TABLE_SIZE = 128,
133	MLX5_MAX_LOG_PKEY_TABLE  = 5,
134};
135
136enum {
137	MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
138};
139
140enum {
141	MLX5_PERM_LOCAL_READ	= 1 << 2,
142	MLX5_PERM_LOCAL_WRITE	= 1 << 3,
143	MLX5_PERM_REMOTE_READ	= 1 << 4,
144	MLX5_PERM_REMOTE_WRITE	= 1 << 5,
145	MLX5_PERM_ATOMIC	= 1 << 6,
146	MLX5_PERM_UMR_EN	= 1 << 7,
147};
148
149enum {
150	MLX5_PCIE_CTRL_SMALL_FENCE	= 1 << 0,
151	MLX5_PCIE_CTRL_RELAXED_ORDERING	= 1 << 2,
152	MLX5_PCIE_CTRL_NO_SNOOP		= 1 << 3,
153	MLX5_PCIE_CTRL_TLP_PROCE_EN	= 1 << 6,
154	MLX5_PCIE_CTRL_TPH_MASK		= 3 << 4,
155};
156
157enum {
158	MLX5_MKEY_REMOTE_INVAL	= 1 << 24,
159	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
160	MLX5_MKEY_BSF_EN	= 1 << 30,
161	MLX5_MKEY_LEN64		= 1 << 31,
162};
163
164enum {
165	MLX5_EN_RD	= (u64)1,
166	MLX5_EN_WR	= (u64)2
167};
168
169enum {
170	MLX5_BF_REGS_PER_PAGE		= 4,
171	MLX5_MAX_UAR_PAGES		= 1 << 8,
172	MLX5_NON_FP_BF_REGS_PER_PAGE	= 2,
173	MLX5_MAX_UUARS	= MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE,
174};
175
176enum {
177	MLX5_MKEY_MASK_LEN		= 1ull << 0,
178	MLX5_MKEY_MASK_PAGE_SIZE	= 1ull << 1,
179	MLX5_MKEY_MASK_START_ADDR	= 1ull << 6,
180	MLX5_MKEY_MASK_PD		= 1ull << 7,
181	MLX5_MKEY_MASK_EN_RINVAL	= 1ull << 8,
182	MLX5_MKEY_MASK_EN_SIGERR	= 1ull << 9,
183	MLX5_MKEY_MASK_BSF_EN		= 1ull << 12,
184	MLX5_MKEY_MASK_KEY		= 1ull << 13,
185	MLX5_MKEY_MASK_QPN		= 1ull << 14,
186	MLX5_MKEY_MASK_LR		= 1ull << 17,
187	MLX5_MKEY_MASK_LW		= 1ull << 18,
188	MLX5_MKEY_MASK_RR		= 1ull << 19,
189	MLX5_MKEY_MASK_RW		= 1ull << 20,
190	MLX5_MKEY_MASK_A		= 1ull << 21,
191	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
192	MLX5_MKEY_MASK_FREE		= 1ull << 29,
193};
194
195enum {
196	MLX5_UMR_TRANSLATION_OFFSET_EN	= (1 << 4),
197
198	MLX5_UMR_CHECK_NOT_FREE		= (1 << 5),
199	MLX5_UMR_CHECK_FREE		= (2 << 5),
200
201	MLX5_UMR_INLINE			= (1 << 7),
202};
203
204#define MLX5_UMR_MTT_ALIGNMENT 0x40
205#define MLX5_UMR_MTT_MASK      (MLX5_UMR_MTT_ALIGNMENT - 1)
206#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
207
208enum {
209	MLX5_EVENT_QUEUE_TYPE_QP = 0,
210	MLX5_EVENT_QUEUE_TYPE_RQ = 1,
211	MLX5_EVENT_QUEUE_TYPE_SQ = 2,
212};
213
214enum {
215	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1,
216	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4,
217	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5,
218	MLX5_PORT_CHANGE_SUBTYPE_LID		= 6,
219	MLX5_PORT_CHANGE_SUBTYPE_PKEY		= 7,
220	MLX5_PORT_CHANGE_SUBTYPE_GUID		= 8,
221	MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG	= 9,
222};
223
224enum {
225	MLX5_DCBX_EVENT_SUBTYPE_ERROR_STATE_DCBX = 1,
226	MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_CHANGE,
227	MLX5_DCBX_EVENT_SUBTYPE_LOCAL_OPER_CHANGE,
228	MLX5_DCBX_EVENT_SUBTYPE_REMOTE_CONFIG_APP_PRIORITY_CHANGE,
229	MLX5_MAX_INLINE_RECEIVE_SIZE		= 64
230};
231
232enum {
233	MLX5_DEV_CAP_FLAG_XRC		= 1LL <<  3,
234	MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
235	MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
236	MLX5_DEV_CAP_FLAG_APM		= 1LL << 17,
237	MLX5_DEV_CAP_FLAG_SCQE_BRK_MOD	= 1LL << 21,
238	MLX5_DEV_CAP_FLAG_BLOCK_MCAST	= 1LL << 23,
239	MLX5_DEV_CAP_FLAG_CQ_MODER	= 1LL << 29,
240	MLX5_DEV_CAP_FLAG_RESIZE_CQ	= 1LL << 30,
241	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 33,
242	MLX5_DEV_CAP_FLAG_ROCE          = 1LL << 34,
243	MLX5_DEV_CAP_FLAG_DCT		= 1LL << 37,
244	MLX5_DEV_CAP_FLAG_SIG_HAND_OVER	= 1LL << 40,
245	MLX5_DEV_CAP_FLAG_CMDIF_CSUM	= 3LL << 46,
246	MLX5_DEV_CAP_FLAG_DRAIN_SIGERR	= 1LL << 48,
247};
248
249enum {
250	MLX5_ROCE_VERSION_1		= 0,
251	MLX5_ROCE_VERSION_1_5		= 1,
252	MLX5_ROCE_VERSION_2		= 2,
253};
254
255enum {
256	MLX5_ROCE_VERSION_1_CAP		= 1 << MLX5_ROCE_VERSION_1,
257	MLX5_ROCE_VERSION_1_5_CAP	= 1 << MLX5_ROCE_VERSION_1_5,
258	MLX5_ROCE_VERSION_2_CAP		= 1 << MLX5_ROCE_VERSION_2,
259};
260
261enum {
262	MLX5_ROCE_L3_TYPE_IPV4		= 0,
263	MLX5_ROCE_L3_TYPE_IPV6		= 1,
264};
265
266enum {
267	MLX5_ROCE_L3_TYPE_IPV4_CAP	= 1 << 1,
268	MLX5_ROCE_L3_TYPE_IPV6_CAP	= 1 << 2,
269};
270
271enum {
272	MLX5_OPCODE_NOP			= 0x00,
273	MLX5_OPCODE_SEND_INVAL		= 0x01,
274	MLX5_OPCODE_RDMA_WRITE		= 0x08,
275	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
276	MLX5_OPCODE_SEND		= 0x0a,
277	MLX5_OPCODE_SEND_IMM		= 0x0b,
278	MLX5_OPCODE_LSO			= 0x0e,
279	MLX5_OPCODE_RDMA_READ		= 0x10,
280	MLX5_OPCODE_ATOMIC_CS		= 0x11,
281	MLX5_OPCODE_ATOMIC_FA		= 0x12,
282	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14,
283	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15,
284	MLX5_OPCODE_BIND_MW		= 0x18,
285	MLX5_OPCODE_CONFIG_CMD		= 0x1f,
286
287	MLX5_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
288	MLX5_RECV_OPCODE_SEND		= 0x01,
289	MLX5_RECV_OPCODE_SEND_IMM	= 0x02,
290	MLX5_RECV_OPCODE_SEND_INVAL	= 0x03,
291
292	MLX5_CQE_OPCODE_ERROR		= 0x1e,
293	MLX5_CQE_OPCODE_RESIZE		= 0x16,
294
295	MLX5_OPCODE_SET_PSV		= 0x20,
296	MLX5_OPCODE_GET_PSV		= 0x21,
297	MLX5_OPCODE_CHECK_PSV		= 0x22,
298	MLX5_OPCODE_RGET_PSV		= 0x26,
299	MLX5_OPCODE_RCHECK_PSV		= 0x27,
300
301	MLX5_OPCODE_UMR			= 0x25,
302
303	MLX5_OPCODE_SIGNATURE_CANCELED	= (1 << 15),
304};
305
306enum {
307	MLX5_SET_PORT_RESET_QKEY	= 0,
308	MLX5_SET_PORT_GUID0		= 16,
309	MLX5_SET_PORT_NODE_GUID		= 17,
310	MLX5_SET_PORT_SYS_GUID		= 18,
311	MLX5_SET_PORT_GID_TABLE		= 19,
312	MLX5_SET_PORT_PKEY_TABLE	= 20,
313};
314
315enum {
316	MLX5_MAX_PAGE_SHIFT		= 31
317};
318
319enum {
320	MLX5_ADAPTER_PAGE_SHIFT		= 12,
321	MLX5_ADAPTER_PAGE_SIZE		= 1 << MLX5_ADAPTER_PAGE_SHIFT,
322};
323
324enum {
325	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
326};
327
328struct mlx5_inbox_hdr {
329	__be16		opcode;
330	u8		rsvd[4];
331	__be16		opmod;
332};
333
334struct mlx5_outbox_hdr {
335	u8		status;
336	u8		rsvd[3];
337	__be32		syndrome;
338};
339
340struct mlx5_cmd_set_dc_cnak_mbox_in {
341	struct mlx5_inbox_hdr	hdr;
342	u8			enable;
343	u8			reserved[47];
344	__be64			pa;
345};
346
347struct mlx5_cmd_set_dc_cnak_mbox_out {
348	struct mlx5_outbox_hdr	hdr;
349	u8			rsvd[8];
350};
351
352struct mlx5_cmd_layout {
353	u8		type;
354	u8		rsvd0[3];
355	__be32		inlen;
356	__be64		in_ptr;
357	__be32		in[4];
358	__be32		out[4];
359	__be64		out_ptr;
360	__be32		outlen;
361	u8		token;
362	u8		sig;
363	u8		rsvd1;
364	u8		status_own;
365};
366
367
368struct mlx5_health_buffer {
369	__be32		assert_var[5];
370	__be32		rsvd0[3];
371	__be32		assert_exit_ptr;
372	__be32		assert_callra;
373	__be32		rsvd1[2];
374	__be32		fw_ver;
375	__be32		hw_id;
376	__be32		rsvd2;
377	u8		irisc_index;
378	u8		synd;
379	__be16		ext_sync;
380};
381
382struct mlx5_init_seg {
383	__be32			fw_rev;
384	__be32			cmdif_rev_fw_sub;
385	__be32			rsvd0[2];
386	__be32			cmdq_addr_h;
387	__be32			cmdq_addr_l_sz;
388	__be32			cmd_dbell;
389	__be32			rsvd1[120];
390	__be32			initializing;
391	struct mlx5_health_buffer  health;
392	__be32			rsvd2[880];
393	__be32			internal_timer_h;
394	__be32			internal_timer_l;
395	__be32			rsvd3[2];
396	__be32			health_counter;
397	__be32			rsvd4[1019];
398	__be64			ieee1588_clk;
399	__be32			ieee1588_clk_type;
400	__be32			clr_intx;
401};
402
403struct mlx5_eqe_comp {
404	__be32	reserved[6];
405	__be32	cqn;
406};
407
408struct mlx5_eqe_qp_srq {
409	__be32	reserved[6];
410	__be32	qp_srq_n;
411};
412
413struct mlx5_eqe_cq_err {
414	__be32	cqn;
415	u8	reserved1[7];
416	u8	syndrome;
417};
418
419struct mlx5_eqe_port_state {
420	u8	reserved0[8];
421	u8	port;
422};
423
424struct mlx5_eqe_gpio {
425	__be32	reserved0[2];
426	__be64	gpio_event;
427};
428
429struct mlx5_eqe_congestion {
430	u8	type;
431	u8	rsvd0;
432	u8	congestion_level;
433};
434
435struct mlx5_eqe_stall_vl {
436	u8	rsvd0[3];
437	u8	port_vl;
438};
439
440struct mlx5_eqe_cmd {
441	__be32	vector;
442	__be32	rsvd[6];
443};
444
445struct mlx5_eqe_page_req {
446	u8		rsvd0[2];
447	__be16		func_id;
448	__be32		num_pages;
449	__be32		rsvd1[5];
450};
451
452struct mlx5_eqe_vport_change {
453	u8		rsvd0[2];
454	__be16		vport_num;
455	__be32		rsvd1[6];
456};
457
458
459#define PORT_MODULE_EVENT_MODULE_STATUS_MASK  0xF
460#define PORT_MODULE_EVENT_ERROR_TYPE_MASK     0xF
461
462enum {
463	MLX5_MODULE_STATUS_PLUGGED    = 0x1,
464	MLX5_MODULE_STATUS_UNPLUGGED  = 0x2,
465	MLX5_MODULE_STATUS_ERROR      = 0x3,
466};
467
468enum {
469	MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED                 = 0x0,
470	MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE  = 0x1,
471	MLX5_MODULE_EVENT_ERROR_BUS_STUCK                             = 0x2,
472	MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT               = 0x3,
473	MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST              = 0x4,
474	MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER                    = 0x5,
475	MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE                      = 0x6,
476	MLX5_MODULE_EVENT_ERROR_CABLE_IS_SHORTED                      = 0x7,
477};
478
479struct mlx5_eqe_port_module_event {
480	u8        rsvd0;
481	u8        module;
482	u8        rsvd1;
483	u8        module_status;
484	u8        rsvd2[2];
485	u8        error_type;
486};
487
488union ev_data {
489	__be32				raw[7];
490	struct mlx5_eqe_cmd		cmd;
491	struct mlx5_eqe_comp		comp;
492	struct mlx5_eqe_qp_srq		qp_srq;
493	struct mlx5_eqe_cq_err		cq_err;
494	struct mlx5_eqe_port_state	port;
495	struct mlx5_eqe_gpio		gpio;
496	struct mlx5_eqe_congestion	cong;
497	struct mlx5_eqe_stall_vl	stall_vl;
498	struct mlx5_eqe_page_req	req_pages;
499	struct mlx5_eqe_port_module_event port_module_event;
500	struct mlx5_eqe_vport_change	vport_change;
501} __packed;
502
503struct mlx5_eqe {
504	u8		rsvd0;
505	u8		type;
506	u8		rsvd1;
507	u8		sub_type;
508	__be32		rsvd2[7];
509	union ev_data	data;
510	__be16		rsvd3;
511	u8		signature;
512	u8		owner;
513} __packed;
514
515struct mlx5_cmd_prot_block {
516	u8		data[MLX5_CMD_DATA_BLOCK_SIZE];
517	u8		rsvd0[48];
518	__be64		next;
519	__be32		block_num;
520	u8		rsvd1;
521	u8		token;
522	u8		ctrl_sig;
523	u8		sig;
524};
525
526enum {
527	MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
528};
529
530struct mlx5_err_cqe {
531	u8	rsvd0[32];
532	__be32	srqn;
533	u8	rsvd1[18];
534	u8	vendor_err_synd;
535	u8	syndrome;
536	__be32	s_wqe_opcode_qpn;
537	__be16	wqe_counter;
538	u8	signature;
539	u8	op_own;
540};
541
542struct mlx5_cqe64 {
543	u8		tunneled_etc;
544	u8		rsvd0[3];
545	u8		lro_tcppsh_abort_dupack;
546	u8		lro_min_ttl;
547	__be16		lro_tcp_win;
548	__be32		lro_ack_seq_num;
549	__be32		rss_hash_result;
550	u8		rss_hash_type;
551	u8		ml_path;
552	u8		rsvd20[2];
553	__be16		check_sum;
554	__be16		slid;
555	__be32		flags_rqpn;
556	u8		hds_ip_ext;
557	u8		l4_hdr_type_etc;
558	__be16		vlan_info;
559	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
560	__be32		imm_inval_pkey;
561	u8		rsvd40[4];
562	__be32		byte_cnt;
563	__be64		timestamp;
564	__be32		sop_drop_qpn;
565	__be16		wqe_counter;
566	u8		signature;
567	u8		op_own;
568};
569
570static inline bool get_cqe_lro_timestamp_valid(struct mlx5_cqe64 *cqe)
571{
572	return (cqe->lro_tcppsh_abort_dupack >> 7) & 1;
573}
574
575static inline bool get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
576{
577	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
578}
579
580static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
581{
582	return (cqe->l4_hdr_type_etc >> 4) & 0x7;
583}
584
585static inline u16 get_cqe_vlan(struct mlx5_cqe64 *cqe)
586{
587	return be16_to_cpu(cqe->vlan_info) & 0xfff;
588}
589
590static inline void get_cqe_smac(struct mlx5_cqe64 *cqe, u8 *smac)
591{
592	memcpy(smac, &cqe->rss_hash_type , 4);
593	memcpy(smac + 4, &cqe->slid , 2);
594}
595
596static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
597{
598	return cqe->l4_hdr_type_etc & 0x1;
599}
600
601static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
602{
603	return cqe->tunneled_etc & 0x1;
604}
605
606enum {
607	CQE_L4_HDR_TYPE_NONE			= 0x0,
608	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
609	CQE_L4_HDR_TYPE_UDP			= 0x2,
610	CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA		= 0x3,
611	CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA	= 0x4,
612};
613
614enum {
615	/* source L3 hash types */
616	CQE_RSS_SRC_HTYPE_IP	= 0x3 << 0,
617	CQE_RSS_SRC_HTYPE_IPV4	= 0x1 << 0,
618	CQE_RSS_SRC_HTYPE_IPV6	= 0x2 << 0,
619
620	/* destination L3 hash types */
621	CQE_RSS_DST_HTYPE_IP	= 0x3 << 2,
622	CQE_RSS_DST_HTYPE_IPV4	= 0x1 << 2,
623	CQE_RSS_DST_HTYPE_IPV6	= 0x2 << 2,
624
625	/* source L4 hash types */
626	CQE_RSS_SRC_HTYPE_L4	= 0x3 << 4,
627	CQE_RSS_SRC_HTYPE_TCP	= 0x1 << 4,
628	CQE_RSS_SRC_HTYPE_UDP	= 0x2 << 4,
629	CQE_RSS_SRC_HTYPE_IPSEC	= 0x3 << 4,
630
631	/* destination L4 hash types */
632	CQE_RSS_DST_HTYPE_L4	= 0x3 << 6,
633	CQE_RSS_DST_HTYPE_TCP	= 0x1 << 6,
634	CQE_RSS_DST_HTYPE_UDP	= 0x2 << 6,
635	CQE_RSS_DST_HTYPE_IPSEC	= 0x3 << 6,
636};
637
638enum {
639	CQE_ROCE_L3_HEADER_TYPE_GRH	= 0x0,
640	CQE_ROCE_L3_HEADER_TYPE_IPV6	= 0x1,
641	CQE_ROCE_L3_HEADER_TYPE_IPV4	= 0x2,
642};
643
644enum {
645	CQE_L2_OK	= 1 << 0,
646	CQE_L3_OK	= 1 << 1,
647	CQE_L4_OK	= 1 << 2,
648};
649
650struct mlx5_sig_err_cqe {
651	u8		rsvd0[16];
652	__be32		expected_trans_sig;
653	__be32		actual_trans_sig;
654	__be32		expected_reftag;
655	__be32		actual_reftag;
656	__be16		syndrome;
657	u8		rsvd22[2];
658	__be32		mkey;
659	__be64		err_offset;
660	u8		rsvd30[8];
661	__be32		qpn;
662	u8		rsvd38[2];
663	u8		signature;
664	u8		op_own;
665};
666
667struct mlx5_wqe_srq_next_seg {
668	u8			rsvd0[2];
669	__be16			next_wqe_index;
670	u8			signature;
671	u8			rsvd1[11];
672};
673
674union mlx5_ext_cqe {
675	struct ib_grh	grh;
676	u8		inl[64];
677};
678
679struct mlx5_cqe128 {
680	union mlx5_ext_cqe	inl_grh;
681	struct mlx5_cqe64	cqe64;
682};
683
684struct mlx5_srq_ctx {
685	u8			state_log_sz;
686	u8			rsvd0[3];
687	__be32			flags_xrcd;
688	__be32			pgoff_cqn;
689	u8			rsvd1[4];
690	u8			log_pg_sz;
691	u8			rsvd2[7];
692	__be32			pd;
693	__be16			lwm;
694	__be16			wqe_cnt;
695	u8			rsvd3[8];
696	__be64			db_record;
697};
698
699struct mlx5_create_srq_mbox_in {
700	struct mlx5_inbox_hdr	hdr;
701	__be32			input_srqn;
702	u8			rsvd0[4];
703	struct mlx5_srq_ctx	ctx;
704	u8			rsvd1[208];
705	__be64			pas[0];
706};
707
708struct mlx5_create_srq_mbox_out {
709	struct mlx5_outbox_hdr	hdr;
710	__be32			srqn;
711	u8			rsvd[4];
712};
713
714struct mlx5_destroy_srq_mbox_in {
715	struct mlx5_inbox_hdr	hdr;
716	__be32			srqn;
717	u8			rsvd[4];
718};
719
720struct mlx5_destroy_srq_mbox_out {
721	struct mlx5_outbox_hdr	hdr;
722	u8			rsvd[8];
723};
724
725struct mlx5_query_srq_mbox_in {
726	struct mlx5_inbox_hdr	hdr;
727	__be32			srqn;
728	u8			rsvd0[4];
729};
730
731struct mlx5_query_srq_mbox_out {
732	struct mlx5_outbox_hdr	hdr;
733	u8			rsvd0[8];
734	struct mlx5_srq_ctx	ctx;
735	u8			rsvd1[32];
736	__be64			pas[0];
737};
738
739struct mlx5_arm_srq_mbox_in {
740	struct mlx5_inbox_hdr	hdr;
741	__be32			srqn;
742	__be16			rsvd;
743	__be16			lwm;
744};
745
746struct mlx5_arm_srq_mbox_out {
747	struct mlx5_outbox_hdr	hdr;
748	u8			rsvd[8];
749};
750
751struct mlx5_cq_context {
752	u8			status;
753	u8			cqe_sz_flags;
754	u8			st;
755	u8			rsvd3;
756	u8			rsvd4[6];
757	__be16			page_offset;
758	__be32			log_sz_usr_page;
759	__be16			cq_period;
760	__be16			cq_max_count;
761	__be16			rsvd20;
762	__be16			c_eqn;
763	u8			log_pg_sz;
764	u8			rsvd25[7];
765	__be32			last_notified_index;
766	__be32			solicit_producer_index;
767	__be32			consumer_counter;
768	__be32			producer_counter;
769	u8			rsvd48[8];
770	__be64			db_record_addr;
771};
772
773struct mlx5_create_cq_mbox_in {
774	struct mlx5_inbox_hdr	hdr;
775	__be32			input_cqn;
776	u8			rsvdx[4];
777	struct mlx5_cq_context	ctx;
778	u8			rsvd6[192];
779	__be64			pas[0];
780};
781
782struct mlx5_create_cq_mbox_out {
783	struct mlx5_outbox_hdr	hdr;
784	__be32			cqn;
785	u8			rsvd0[4];
786};
787
788struct mlx5_destroy_cq_mbox_in {
789	struct mlx5_inbox_hdr	hdr;
790	__be32			cqn;
791	u8			rsvd0[4];
792};
793
794struct mlx5_destroy_cq_mbox_out {
795	struct mlx5_outbox_hdr	hdr;
796	u8			rsvd0[8];
797};
798
799struct mlx5_query_cq_mbox_in {
800	struct mlx5_inbox_hdr	hdr;
801	__be32			cqn;
802	u8			rsvd0[4];
803};
804
805struct mlx5_query_cq_mbox_out {
806	struct mlx5_outbox_hdr	hdr;
807	u8			rsvd0[8];
808	struct mlx5_cq_context	ctx;
809	u8			rsvd6[16];
810	__be64			pas[0];
811};
812
813struct mlx5_modify_cq_mbox_in {
814	struct mlx5_inbox_hdr	hdr;
815	__be32			cqn;
816	__be32			field_select;
817	struct mlx5_cq_context	ctx;
818	u8			rsvd[192];
819	__be64			pas[0];
820};
821
822struct mlx5_modify_cq_mbox_out {
823	struct mlx5_outbox_hdr	hdr;
824	u8			rsvd[8];
825};
826
827struct mlx5_eq_context {
828	u8			status;
829	u8			ec_oi;
830	u8			st;
831	u8			rsvd2[7];
832	__be16			page_pffset;
833	__be32			log_sz_usr_page;
834	u8			rsvd3[7];
835	u8			intr;
836	u8			log_page_size;
837	u8			rsvd4[15];
838	__be32			consumer_counter;
839	__be32			produser_counter;
840	u8			rsvd5[16];
841};
842
843struct mlx5_create_eq_mbox_in {
844	struct mlx5_inbox_hdr	hdr;
845	u8			rsvd0[3];
846	u8			input_eqn;
847	u8			rsvd1[4];
848	struct mlx5_eq_context	ctx;
849	u8			rsvd2[8];
850	__be64			events_mask;
851	u8			rsvd3[176];
852	__be64			pas[0];
853};
854
855struct mlx5_create_eq_mbox_out {
856	struct mlx5_outbox_hdr	hdr;
857	u8			rsvd0[3];
858	u8			eq_number;
859	u8			rsvd1[4];
860};
861
862struct mlx5_map_eq_mbox_in {
863	struct mlx5_inbox_hdr	hdr;
864	__be64			mask;
865	u8			mu;
866	u8			rsvd0[2];
867	u8			eqn;
868	u8			rsvd1[24];
869};
870
871struct mlx5_map_eq_mbox_out {
872	struct mlx5_outbox_hdr	hdr;
873	u8			rsvd[8];
874};
875
876struct mlx5_query_eq_mbox_in {
877	struct mlx5_inbox_hdr	hdr;
878	u8			rsvd0[3];
879	u8			eqn;
880	u8			rsvd1[4];
881};
882
883struct mlx5_query_eq_mbox_out {
884	struct mlx5_outbox_hdr	hdr;
885	u8			rsvd[8];
886	struct mlx5_eq_context	ctx;
887};
888
889enum {
890	MLX5_MKEY_STATUS_FREE = 1 << 6,
891};
892
893struct mlx5_mkey_seg {
894	/* This is a two bit field occupying bits 31-30.
895	 * bit 31 is always 0,
896	 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
897	 */
898	u8		status;
899	u8		pcie_control;
900	u8		flags;
901	u8		version;
902	__be32		qpn_mkey7_0;
903	u8		rsvd1[4];
904	__be32		flags_pd;
905	__be64		start_addr;
906	__be64		len;
907	__be32		bsfs_octo_size;
908	u8		rsvd2[16];
909	__be32		xlt_oct_size;
910	u8		rsvd3[3];
911	u8		log2_page_size;
912	u8		rsvd4[4];
913};
914
915struct mlx5_query_special_ctxs_mbox_in {
916	struct mlx5_inbox_hdr	hdr;
917	u8			rsvd[8];
918};
919
920struct mlx5_query_special_ctxs_mbox_out {
921	struct mlx5_outbox_hdr	hdr;
922	__be32			dump_fill_mkey;
923	__be32			reserved_lkey;
924};
925
926struct mlx5_create_mkey_mbox_in {
927	struct mlx5_inbox_hdr	hdr;
928	__be32			input_mkey_index;
929	__be32			flags;
930	struct mlx5_mkey_seg	seg;
931	u8			rsvd1[16];
932	__be32			xlat_oct_act_size;
933	__be32			rsvd2;
934	u8			rsvd3[168];
935	__be64			pas[0];
936};
937
938struct mlx5_create_mkey_mbox_out {
939	struct mlx5_outbox_hdr	hdr;
940	__be32			mkey;
941	u8			rsvd[4];
942};
943
944struct mlx5_query_mkey_mbox_in {
945	struct mlx5_inbox_hdr	hdr;
946	__be32			mkey;
947};
948
949struct mlx5_query_mkey_mbox_out {
950	struct mlx5_outbox_hdr	hdr;
951	__be64			pas[0];
952};
953
954struct mlx5_modify_mkey_mbox_in {
955	struct mlx5_inbox_hdr	hdr;
956	__be32			mkey;
957	__be64			pas[0];
958};
959
960struct mlx5_modify_mkey_mbox_out {
961	struct mlx5_outbox_hdr	hdr;
962	u8			rsvd[8];
963};
964
965struct mlx5_dump_mkey_mbox_in {
966	struct mlx5_inbox_hdr	hdr;
967};
968
969struct mlx5_dump_mkey_mbox_out {
970	struct mlx5_outbox_hdr	hdr;
971	__be32			mkey;
972};
973
974struct mlx5_mad_ifc_mbox_in {
975	struct mlx5_inbox_hdr	hdr;
976	__be16			remote_lid;
977	u8			rsvd0;
978	u8			port;
979	u8			rsvd1[4];
980	u8			data[256];
981};
982
983struct mlx5_mad_ifc_mbox_out {
984	struct mlx5_outbox_hdr	hdr;
985	u8			rsvd[8];
986	u8			data[256];
987};
988
989struct mlx5_access_reg_mbox_in {
990	struct mlx5_inbox_hdr		hdr;
991	u8				rsvd0[2];
992	__be16				register_id;
993	__be32				arg;
994	__be32				data[0];
995};
996
997struct mlx5_access_reg_mbox_out {
998	struct mlx5_outbox_hdr		hdr;
999	u8				rsvd[8];
1000	__be32				data[0];
1001};
1002
1003#define MLX5_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
1004
1005enum {
1006	MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO	= 1 <<  0
1007};
1008
1009struct mlx5_allocate_psv_in {
1010	struct mlx5_inbox_hdr   hdr;
1011	__be32			npsv_pd;
1012	__be32			rsvd_psv0;
1013};
1014
1015struct mlx5_allocate_psv_out {
1016	struct mlx5_outbox_hdr  hdr;
1017	u8			rsvd[8];
1018	__be32			psv_idx[4];
1019};
1020
1021struct mlx5_destroy_psv_in {
1022	struct mlx5_inbox_hdr	hdr;
1023	__be32                  psv_number;
1024	u8                      rsvd[4];
1025};
1026
1027struct mlx5_destroy_psv_out {
1028	struct mlx5_outbox_hdr  hdr;
1029	u8                      rsvd[8];
1030};
1031
1032static inline int mlx5_host_is_le(void)
1033{
1034#if defined(__LITTLE_ENDIAN)
1035	return 1;
1036#elif defined(__BIG_ENDIAN)
1037	return 0;
1038#else
1039#error Host endianness not defined
1040#endif
1041}
1042
1043#define MLX5_CMD_OP_MAX 0x939
1044
1045enum {
1046	VPORT_STATE_DOWN		= 0x0,
1047	VPORT_STATE_UP			= 0x1,
1048};
1049
1050enum {
1051	MLX5_L3_PROT_TYPE_IPV4		= 0,
1052	MLX5_L3_PROT_TYPE_IPV6		= 1,
1053};
1054
1055enum {
1056	MLX5_L4_PROT_TYPE_TCP		= 0,
1057	MLX5_L4_PROT_TYPE_UDP		= 1,
1058};
1059
1060enum {
1061	MLX5_HASH_FIELD_SEL_SRC_IP	= 1 << 0,
1062	MLX5_HASH_FIELD_SEL_DST_IP	= 1 << 1,
1063	MLX5_HASH_FIELD_SEL_L4_SPORT	= 1 << 2,
1064	MLX5_HASH_FIELD_SEL_L4_DPORT	= 1 << 3,
1065	MLX5_HASH_FIELD_SEL_IPSEC_SPI	= 1 << 4,
1066};
1067
1068enum {
1069	MLX5_MATCH_OUTER_HEADERS	= 1 << 0,
1070	MLX5_MATCH_MISC_PARAMETERS	= 1 << 1,
1071	MLX5_MATCH_INNER_HEADERS	= 1 << 2,
1072
1073};
1074
1075enum {
1076	MLX5_FLOW_TABLE_TYPE_NIC_RCV	 = 0,
1077	MLX5_FLOW_TABLE_TYPE_EGRESS_ACL  = 2,
1078	MLX5_FLOW_TABLE_TYPE_INGRESS_ACL = 3,
1079	MLX5_FLOW_TABLE_TYPE_ESWITCH	 = 4,
1080	MLX5_FLOW_TABLE_TYPE_SNIFFER_RX	 = 5,
1081	MLX5_FLOW_TABLE_TYPE_SNIFFER_TX	 = 6,
1082};
1083
1084enum {
1085	MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE	      = 0,
1086	MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_IF_NO_VLAN = 1,
1087	MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_OVERWRITE  = 2
1088};
1089
1090enum {
1091	MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_STRIP  = 1 << 0,
1092	MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP  = 1 << 1,
1093	MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_SVLAN_INSERT = 1 << 2,
1094	MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT = 1 << 3
1095};
1096
1097enum {
1098	MLX5_UC_ADDR_CHANGE = (1 << 0),
1099	MLX5_MC_ADDR_CHANGE = (1 << 1),
1100	MLX5_VLAN_CHANGE    = (1 << 2),
1101	MLX5_PROMISC_CHANGE = (1 << 3),
1102	MLX5_MTU_CHANGE     = (1 << 4),
1103};
1104
1105enum mlx5_list_type {
1106	MLX5_NIC_VPORT_LIST_TYPE_UC   = 0x0,
1107	MLX5_NIC_VPORT_LIST_TYPE_MC   = 0x1,
1108	MLX5_NIC_VPORT_LIST_TYPE_VLAN = 0x2,
1109};
1110
1111enum {
1112	MLX5_ESW_VPORT_ADMIN_STATE_DOWN  = 0x0,
1113	MLX5_ESW_VPORT_ADMIN_STATE_UP    = 0x1,
1114	MLX5_ESW_VPORT_ADMIN_STATE_AUTO  = 0x2,
1115};
1116
1117/* MLX5 DEV CAPs */
1118
1119/* TODO: EAT.ME */
1120enum mlx5_cap_mode {
1121	HCA_CAP_OPMOD_GET_MAX	= 0,
1122	HCA_CAP_OPMOD_GET_CUR	= 1,
1123};
1124
1125enum mlx5_cap_type {
1126	MLX5_CAP_GENERAL = 0,
1127	MLX5_CAP_ETHERNET_OFFLOADS,
1128	MLX5_CAP_ODP,
1129	MLX5_CAP_ATOMIC,
1130	MLX5_CAP_ROCE,
1131	MLX5_CAP_IPOIB_OFFLOADS,
1132	MLX5_CAP_EOIB_OFFLOADS,
1133	MLX5_CAP_FLOW_TABLE,
1134	MLX5_CAP_ESWITCH_FLOW_TABLE,
1135	MLX5_CAP_ESWITCH,
1136	MLX5_CAP_SNAPSHOT,
1137	MLX5_CAP_VECTOR_CALC,
1138	MLX5_CAP_QOS,
1139	MLX5_CAP_DEBUG,
1140	/* NUM OF CAP Types */
1141	MLX5_CAP_NUM
1142};
1143
1144/* GET Dev Caps macros */
1145#define MLX5_CAP_GEN(mdev, cap) \
1146	MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
1147
1148#define MLX5_CAP_GEN_MAX(mdev, cap) \
1149	MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
1150
1151#define MLX5_CAP_ETH(mdev, cap) \
1152	MLX5_GET(per_protocol_networking_offload_caps,\
1153		 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1154
1155#define MLX5_CAP_ETH_MAX(mdev, cap) \
1156	MLX5_GET(per_protocol_networking_offload_caps,\
1157		 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1158
1159#define MLX5_CAP_ROCE(mdev, cap) \
1160	MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
1161
1162#define MLX5_CAP_ROCE_MAX(mdev, cap) \
1163	MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
1164
1165#define MLX5_CAP_ATOMIC(mdev, cap) \
1166	MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
1167
1168#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1169	MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
1170
1171#define MLX5_CAP_FLOWTABLE(mdev, cap) \
1172	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
1173
1174#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1175	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
1176
1177#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1178	MLX5_GET(flow_table_eswitch_cap, \
1179		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1180
1181#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1182	MLX5_GET(flow_table_eswitch_cap, \
1183		 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1184
1185#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1186	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1187
1188#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1189	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1190
1191#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1192	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1193
1194#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1195	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1196
1197#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1198	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1199
1200#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1201	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1202
1203#define MLX5_CAP_ESW(mdev, cap) \
1204	MLX5_GET(e_switch_cap, \
1205		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
1206
1207#define MLX5_CAP_ESW_MAX(mdev, cap) \
1208	MLX5_GET(e_switch_cap, \
1209		 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
1210
1211#define MLX5_CAP_ODP(mdev, cap)\
1212	MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
1213
1214#define MLX5_CAP_ODP_MAX(mdev, cap)\
1215	MLX5_GET(odp_cap, mdev->hca_caps_max[MLX5_CAP_ODP], cap)
1216
1217#define MLX5_CAP_SNAPSHOT(mdev, cap) \
1218	MLX5_GET(snapshot_cap, \
1219		 mdev->hca_caps_cur[MLX5_CAP_SNAPSHOT], cap)
1220
1221#define MLX5_CAP_SNAPSHOT_MAX(mdev, cap) \
1222	MLX5_GET(snapshot_cap, \
1223		 mdev->hca_caps_max[MLX5_CAP_SNAPSHOT], cap)
1224
1225#define MLX5_CAP_EOIB_OFFLOADS(mdev, cap) \
1226	MLX5_GET(per_protocol_networking_offload_caps,\
1227		 mdev->hca_caps_cur[MLX5_CAP_EOIB_OFFLOADS], cap)
1228
1229#define MLX5_CAP_EOIB_OFFLOADS_MAX(mdev, cap) \
1230	MLX5_GET(per_protocol_networking_offload_caps,\
1231		 mdev->hca_caps_max[MLX5_CAP_EOIB_OFFLOADS], cap)
1232
1233#define MLX5_CAP_DEBUG(mdev, cap) \
1234	MLX5_GET(debug_cap, \
1235		 mdev->hca_caps_cur[MLX5_CAP_DEBUG], cap)
1236
1237#define MLX5_CAP_DEBUG_MAX(mdev, cap) \
1238	MLX5_GET(debug_cap, \
1239		 mdev->hca_caps_max[MLX5_CAP_DEBUG], cap)
1240
1241#define MLX5_CAP_QOS(mdev, cap) \
1242	MLX5_GET(qos_cap,\
1243		 mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
1244
1245#define MLX5_CAP_QOS_MAX(mdev, cap) \
1246	MLX5_GET(qos_cap,\
1247		 mdev->hca_caps_max[MLX5_CAP_QOS], cap)
1248
1249enum {
1250	MLX5_CMD_STAT_OK			= 0x0,
1251	MLX5_CMD_STAT_INT_ERR			= 0x1,
1252	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
1253	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
1254	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
1255	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
1256	MLX5_CMD_STAT_RES_BUSY			= 0x6,
1257	MLX5_CMD_STAT_LIM_ERR			= 0x8,
1258	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
1259	MLX5_CMD_STAT_IX_ERR			= 0xa,
1260	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
1261	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
1262	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
1263	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
1264	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
1265	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
1266};
1267
1268enum {
1269	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
1270	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
1271	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
1272	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
1273	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1274	MLX5_ETHERNET_DISCARD_COUNTERS_GROUP  = 0x6,
1275	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
1276	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1277	MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
1278	MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1279};
1280
1281enum {
1282	MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
1283	MLX5_PCIE_LANE_COUNTERS_GROUP	      = 0x1,
1284	MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
1285};
1286
1287enum {
1288	MLX5_NUM_UUARS_PER_PAGE = MLX5_NON_FP_BF_REGS_PER_PAGE,
1289	MLX5_DEF_TOT_UUARS = 8 * MLX5_NUM_UUARS_PER_PAGE,
1290};
1291
1292enum {
1293	NUM_DRIVER_UARS = 4,
1294	NUM_LOW_LAT_UUARS = 4,
1295};
1296
1297enum {
1298	MLX5_CAP_PORT_TYPE_IB  = 0x0,
1299	MLX5_CAP_PORT_TYPE_ETH = 0x1,
1300};
1301
1302enum {
1303	MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_L2           = 0x0,
1304	MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_VPORT_CONFIG = 0x1,
1305	MLX5_CMD_HCA_CAP_MIN_WQE_INLINE_MODE_NOT_REQUIRED = 0x2
1306};
1307
1308enum {
1309	MLX5_QUERY_VPORT_STATE_OUT_STATE_FOLLOW = 0x2,
1310};
1311
1312static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1313{
1314	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1315		return 0;
1316	return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1317}
1318
1319struct mlx5_ifc_mcia_reg_bits {
1320	u8         l[0x1];
1321	u8         reserved_0[0x7];
1322	u8         module[0x8];
1323	u8         reserved_1[0x8];
1324	u8         status[0x8];
1325
1326	u8         i2c_device_address[0x8];
1327	u8         page_number[0x8];
1328	u8         device_address[0x10];
1329
1330	u8         reserved_2[0x10];
1331	u8         size[0x10];
1332
1333	u8         reserved_3[0x20];
1334
1335	u8         dword_0[0x20];
1336	u8         dword_1[0x20];
1337	u8         dword_2[0x20];
1338	u8         dword_3[0x20];
1339	u8         dword_4[0x20];
1340	u8         dword_5[0x20];
1341	u8         dword_6[0x20];
1342	u8         dword_7[0x20];
1343	u8         dword_8[0x20];
1344	u8         dword_9[0x20];
1345	u8         dword_10[0x20];
1346	u8         dword_11[0x20];
1347};
1348
1349#define MLX5_CMD_OP_QUERY_EEPROM 0x93c
1350
1351struct mlx5_mini_cqe8 {
1352	union {
1353		__be32 rx_hash_result;
1354		__be16 checksum;
1355		__be16 rsvd;
1356		struct {
1357			__be16 wqe_counter;
1358			u8  s_wqe_opcode;
1359			u8  reserved;
1360		} s_wqe_info;
1361	};
1362	__be32 byte_cnt;
1363};
1364
1365enum {
1366	MLX5_NO_INLINE_DATA,
1367	MLX5_INLINE_DATA32_SEG,
1368	MLX5_INLINE_DATA64_SEG,
1369	MLX5_COMPRESSED,
1370};
1371
1372enum mlx5_exp_cqe_zip_recv_type {
1373	MLX5_CQE_FORMAT_HASH,
1374	MLX5_CQE_FORMAT_CSUM,
1375};
1376
1377#define MLX5E_CQE_FORMAT_MASK 0xc
1378static inline int mlx5_get_cqe_format(const struct mlx5_cqe64 *cqe)
1379{
1380	return (cqe->op_own & MLX5E_CQE_FORMAT_MASK) >> 2;
1381}
1382
1383/* 8 regular priorities + 1 for multicast */
1384#define MLX5_NUM_BYPASS_FTS	9
1385
1386#endif /* MLX5_DEVICE_H */
1387