en.h revision 347873
1/*-
2 * Copyright (c) 2015-2019 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD: stable/11/sys/dev/mlx5/mlx5_en/en.h 347873 2019-05-16 18:23:28Z hselasky $
26 */
27
28#ifndef _MLX5_EN_H_
29#define	_MLX5_EN_H_
30
31#include <linux/kmod.h>
32#include <linux/page.h>
33#include <linux/slab.h>
34#include <linux/if_vlan.h>
35#include <linux/if_ether.h>
36#include <linux/vmalloc.h>
37#include <linux/moduleparam.h>
38#include <linux/delay.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/ktime.h>
42#include <linux/net_dim.h>
43
44#include <netinet/in_systm.h>
45#include <netinet/in.h>
46#include <netinet/if_ether.h>
47#include <netinet/ip.h>
48#include <netinet/ip6.h>
49#include <netinet/tcp.h>
50#include <netinet/tcp_lro.h>
51#include <netinet/udp.h>
52#include <net/ethernet.h>
53#include <sys/buf_ring.h>
54
55#include "opt_rss.h"
56
57#ifdef	RSS
58#include <net/rss_config.h>
59#include <netinet/in_rss.h>
60#endif
61
62#include <machine/bus.h>
63
64#include <dev/mlx5/driver.h>
65#include <dev/mlx5/qp.h>
66#include <dev/mlx5/cq.h>
67#include <dev/mlx5/port.h>
68#include <dev/mlx5/vport.h>
69#include <dev/mlx5/diagnostics.h>
70
71#include <dev/mlx5/mlx5_core/wq.h>
72#include <dev/mlx5/mlx5_core/transobj.h>
73#include <dev/mlx5/mlx5_core/mlx5_core.h>
74
75#define	MLX5E_MAX_PRIORITY 8
76
77/* IEEE 802.1Qaz standard supported values */
78#define	IEEE_8021QAZ_MAX_TCS	8
79
80#define	MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x7
81#define	MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
82#define	MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xe
83
84#define	MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x7
85#define	MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
86#define	MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xe
87
88#define	MLX5E_MAX_BUSDMA_RX_SEGS 15
89
90#ifndef MLX5E_MAX_RX_BYTES
91#define	MLX5E_MAX_RX_BYTES MCLBYTES
92#endif
93
94#define	MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ \
95    MIN(65535, 7 * MLX5E_MAX_RX_BYTES)
96
97#define	MLX5E_DIM_DEFAULT_PROFILE 3
98#define	MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO	16
99#define	MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
100#define	MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE	0x3
101#define	MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
102#define	MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
103#define	MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
104#define	MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
105#define	MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ         0x7
106#define	MLX5E_CACHELINE_SIZE CACHE_LINE_SIZE
107#define	MLX5E_HW2SW_MTU(hwmtu) \
108    ((hwmtu) - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN))
109#define	MLX5E_SW2HW_MTU(swmtu) \
110    ((swmtu) + (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN))
111#define	MLX5E_SW2MB_MTU(swmtu) \
112    (MLX5E_SW2HW_MTU(swmtu) + MLX5E_NET_IP_ALIGN)
113#define	MLX5E_MTU_MIN		72	/* Min MTU allowed by the kernel */
114#define	MLX5E_MTU_MAX		MIN(ETHERMTU_JUMBO, MJUM16BYTES)	/* Max MTU of Ethernet
115									 * jumbo frames */
116
117#define	MLX5E_BUDGET_MAX	8192	/* RX and TX */
118#define	MLX5E_RX_BUDGET_MAX	256
119#define	MLX5E_SQ_BF_BUDGET	16
120#define	MLX5E_SQ_TX_QUEUE_SIZE	4096	/* SQ drbr queue size */
121
122#define	MLX5E_MAX_TX_NUM_TC	8	/* units */
123#define	MLX5E_MAX_TX_HEADER	128	/* bytes */
124#define	MLX5E_MAX_TX_PAYLOAD_SIZE	65536	/* bytes */
125#define	MLX5E_MAX_TX_MBUF_SIZE	65536	/* bytes */
126#define	MLX5E_MAX_TX_MBUF_FRAGS	\
127    ((MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS) - \
128    (MLX5E_MAX_TX_HEADER / MLX5_SEND_WQE_DS) - \
129    1 /* the maximum value of the DS counter is 0x3F and not 0x40 */)	/* units */
130#define	MLX5E_MAX_TX_INLINE \
131  (MLX5E_MAX_TX_HEADER - sizeof(struct mlx5e_tx_wqe) + \
132  sizeof(((struct mlx5e_tx_wqe *)0)->eth.inline_hdr_start))	/* bytes */
133
134#define	MLX5E_100MB (100000)
135#define	MLX5E_1GB   (1000000)
136
137MALLOC_DECLARE(M_MLX5EN);
138
139struct mlx5_core_dev;
140struct mlx5e_cq;
141
142typedef void (mlx5e_cq_comp_t)(struct mlx5_core_cq *);
143
144#define	MLX5E_STATS_COUNT(a, ...) a
145#define	MLX5E_STATS_VAR(a, b, c, ...) b c;
146#define	MLX5E_STATS_DESC(a, b, c, d, e, ...) d, e,
147
148#define	MLX5E_VPORT_STATS(m)						\
149  /* HW counters */							\
150  m(+1, u64, rx_packets, "rx_packets", "Received packets")		\
151  m(+1, u64, rx_bytes, "rx_bytes", "Received bytes")			\
152  m(+1, u64, tx_packets, "tx_packets", "Transmitted packets")		\
153  m(+1, u64, tx_bytes, "tx_bytes", "Transmitted bytes")			\
154  m(+1, u64, rx_error_packets, "rx_error_packets", "Received error packets") \
155  m(+1, u64, rx_error_bytes, "rx_error_bytes", "Received error bytes")	\
156  m(+1, u64, tx_error_packets, "tx_error_packets", "Transmitted error packets") \
157  m(+1, u64, tx_error_bytes, "tx_error_bytes", "Transmitted error bytes") \
158  m(+1, u64, rx_unicast_packets, "rx_unicast_packets", "Received unicast packets") \
159  m(+1, u64, rx_unicast_bytes, "rx_unicast_bytes", "Received unicast bytes") \
160  m(+1, u64, tx_unicast_packets, "tx_unicast_packets", "Transmitted unicast packets") \
161  m(+1, u64, tx_unicast_bytes, "tx_unicast_bytes", "Transmitted unicast bytes") \
162  m(+1, u64, rx_multicast_packets, "rx_multicast_packets", "Received multicast packets") \
163  m(+1, u64, rx_multicast_bytes, "rx_multicast_bytes", "Received multicast bytes") \
164  m(+1, u64, tx_multicast_packets, "tx_multicast_packets", "Transmitted multicast packets") \
165  m(+1, u64, tx_multicast_bytes, "tx_multicast_bytes", "Transmitted multicast bytes") \
166  m(+1, u64, rx_broadcast_packets, "rx_broadcast_packets", "Received broadcast packets") \
167  m(+1, u64, rx_broadcast_bytes, "rx_broadcast_bytes", "Received broadcast bytes") \
168  m(+1, u64, tx_broadcast_packets, "tx_broadcast_packets", "Transmitted broadcast packets") \
169  m(+1, u64, tx_broadcast_bytes, "tx_broadcast_bytes", "Transmitted broadcast bytes") \
170  m(+1, u64, rx_out_of_buffer, "rx_out_of_buffer", "Receive out of buffer, no recv wqes events") \
171  /* SW counters */							\
172  m(+1, u64, tso_packets, "tso_packets", "Transmitted TSO packets")	\
173  m(+1, u64, tso_bytes, "tso_bytes", "Transmitted TSO bytes")		\
174  m(+1, u64, lro_packets, "lro_packets", "Received LRO packets")		\
175  m(+1, u64, lro_bytes, "lro_bytes", "Received LRO bytes")		\
176  m(+1, u64, sw_lro_queued, "sw_lro_queued", "Packets queued for SW LRO")	\
177  m(+1, u64, sw_lro_flushed, "sw_lro_flushed", "Packets flushed from SW LRO")	\
178  m(+1, u64, rx_csum_good, "rx_csum_good", "Received checksum valid packets") \
179  m(+1, u64, rx_csum_none, "rx_csum_none", "Received no checksum packets") \
180  m(+1, u64, tx_csum_offload, "tx_csum_offload", "Transmit checksum offload packets") \
181  m(+1, u64, tx_queue_dropped, "tx_queue_dropped", "Transmit queue dropped") \
182  m(+1, u64, tx_defragged, "tx_defragged", "Transmit queue defragged") \
183  m(+1, u64, rx_wqe_err, "rx_wqe_err", "Receive WQE errors") \
184  m(+1, u64, tx_jumbo_packets, "tx_jumbo_packets", "TX packets greater than 1518 octets") \
185  m(+1, u64, rx_steer_missed_packets, "rx_steer_missed_packets", "RX packets dropped by steering rule(s)")
186
187#define	MLX5E_VPORT_STATS_NUM (0 MLX5E_VPORT_STATS(MLX5E_STATS_COUNT))
188
189struct mlx5e_vport_stats {
190	struct	sysctl_ctx_list ctx;
191	u64	arg [0];
192	MLX5E_VPORT_STATS(MLX5E_STATS_VAR)
193};
194
195#define	MLX5E_PPORT_IEEE802_3_STATS(m)					\
196  m(+1, u64, frames_tx, "frames_tx", "Frames transmitted")		\
197  m(+1, u64, frames_rx, "frames_rx", "Frames received")			\
198  m(+1, u64, check_seq_err, "check_seq_err", "Sequence errors")		\
199  m(+1, u64, alignment_err, "alignment_err", "Alignment errors")	\
200  m(+1, u64, octets_tx, "octets_tx", "Bytes transmitted")		\
201  m(+1, u64, octets_received, "octets_received", "Bytes received")	\
202  m(+1, u64, multicast_xmitted, "multicast_xmitted", "Multicast transmitted") \
203  m(+1, u64, broadcast_xmitted, "broadcast_xmitted", "Broadcast transmitted") \
204  m(+1, u64, multicast_rx, "multicast_rx", "Multicast received")	\
205  m(+1, u64, broadcast_rx, "broadcast_rx", "Broadcast received")	\
206  m(+1, u64, in_range_len_errors, "in_range_len_errors", "In range length errors") \
207  m(+1, u64, out_of_range_len, "out_of_range_len", "Out of range length errors") \
208  m(+1, u64, too_long_errors, "too_long_errors", "Too long errors")	\
209  m(+1, u64, symbol_err, "symbol_err", "Symbol errors")			\
210  m(+1, u64, mac_control_tx, "mac_control_tx", "MAC control transmitted") \
211  m(+1, u64, mac_control_rx, "mac_control_rx", "MAC control received")	\
212  m(+1, u64, unsupported_op_rx, "unsupported_op_rx", "Unsupported operation received") \
213  m(+1, u64, pause_ctrl_rx, "pause_ctrl_rx", "Pause control received")	\
214  m(+1, u64, pause_ctrl_tx, "pause_ctrl_tx", "Pause control transmitted")
215
216#define	MLX5E_PPORT_RFC2819_STATS(m)					\
217  m(+1, u64, drop_events, "drop_events", "Dropped events")		\
218  m(+1, u64, octets, "octets", "Octets")					\
219  m(+1, u64, pkts, "pkts", "Packets")					\
220  m(+1, u64, broadcast_pkts, "broadcast_pkts", "Broadcast packets")	\
221  m(+1, u64, multicast_pkts, "multicast_pkts", "Multicast packets")	\
222  m(+1, u64, crc_align_errors, "crc_align_errors", "CRC alignment errors") \
223  m(+1, u64, undersize_pkts, "undersize_pkts", "Undersized packets")	\
224  m(+1, u64, oversize_pkts, "oversize_pkts", "Oversized packets")	\
225  m(+1, u64, fragments, "fragments", "Fragments")			\
226  m(+1, u64, jabbers, "jabbers", "Jabbers")				\
227  m(+1, u64, collisions, "collisions", "Collisions")
228
229#define	MLX5E_PPORT_RFC2819_STATS_DEBUG(m)				\
230  m(+1, u64, p64octets, "p64octets", "Bytes")				\
231  m(+1, u64, p65to127octets, "p65to127octets", "Bytes")			\
232  m(+1, u64, p128to255octets, "p128to255octets", "Bytes")		\
233  m(+1, u64, p256to511octets, "p256to511octets", "Bytes")		\
234  m(+1, u64, p512to1023octets, "p512to1023octets", "Bytes")		\
235  m(+1, u64, p1024to1518octets, "p1024to1518octets", "Bytes")		\
236  m(+1, u64, p1519to2047octets, "p1519to2047octets", "Bytes")		\
237  m(+1, u64, p2048to4095octets, "p2048to4095octets", "Bytes")		\
238  m(+1, u64, p4096to8191octets, "p4096to8191octets", "Bytes")		\
239  m(+1, u64, p8192to10239octets, "p8192to10239octets", "Bytes")
240
241#define	MLX5E_PPORT_RFC2863_STATS_DEBUG(m)				\
242  m(+1, u64, in_octets, "in_octets", "In octets")			\
243  m(+1, u64, in_ucast_pkts, "in_ucast_pkts", "In unicast packets")	\
244  m(+1, u64, in_discards, "in_discards", "In discards")			\
245  m(+1, u64, in_errors, "in_errors", "In errors")			\
246  m(+1, u64, in_unknown_protos, "in_unknown_protos", "In unknown protocols") \
247  m(+1, u64, out_octets, "out_octets", "Out octets")			\
248  m(+1, u64, out_ucast_pkts, "out_ucast_pkts", "Out unicast packets")	\
249  m(+1, u64, out_discards, "out_discards", "Out discards")		\
250  m(+1, u64, out_errors, "out_errors", "Out errors")			\
251  m(+1, u64, in_multicast_pkts, "in_multicast_pkts", "In multicast packets") \
252  m(+1, u64, in_broadcast_pkts, "in_broadcast_pkts", "In broadcast packets") \
253  m(+1, u64, out_multicast_pkts, "out_multicast_pkts", "Out multicast packets") \
254  m(+1, u64, out_broadcast_pkts, "out_broadcast_pkts", "Out broadcast packets")
255
256#define	MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG(m)				\
257  m(+1, u64, port_transmit_wait_high, "port_transmit_wait_high", "Port transmit wait high") \
258  m(+1, u64, ecn_marked, "ecn_marked", "ECN marked")			\
259  m(+1, u64, no_buffer_discard_mc, "no_buffer_discard_mc", "No buffer discard mc") \
260  m(+1, u64, rx_ebp, "rx_ebp", "RX EBP")					\
261  m(+1, u64, tx_ebp, "tx_ebp", "TX EBP")					\
262  m(+1, u64, rx_buffer_almost_full, "rx_buffer_almost_full", "RX buffer almost full") \
263  m(+1, u64, rx_buffer_full, "rx_buffer_full", "RX buffer full")	\
264  m(+1, u64, rx_icrc_encapsulated, "rx_icrc_encapsulated", "RX ICRC encapsulated") \
265  m(+1, u64, ex_reserved_0, "ex_reserved_0", "Reserved") \
266  m(+1, u64, ex_reserved_1, "ex_reserved_1", "Reserved") \
267  m(+1, u64, tx_stat_p64octets, "tx_stat_p64octets", "Bytes")			\
268  m(+1, u64, tx_stat_p65to127octets, "tx_stat_p65to127octets", "Bytes")		\
269  m(+1, u64, tx_stat_p128to255octets, "tx_stat_p128to255octets", "Bytes")	\
270  m(+1, u64, tx_stat_p256to511octets, "tx_stat_p256to511octets", "Bytes")	\
271  m(+1, u64, tx_stat_p512to1023octets, "tx_stat_p512to1023octets", "Bytes")	\
272  m(+1, u64, tx_stat_p1024to1518octets, "tx_stat_p1024to1518octets", "Bytes")	\
273  m(+1, u64, tx_stat_p1519to2047octets, "tx_stat_p1519to2047octets", "Bytes")	\
274  m(+1, u64, tx_stat_p2048to4095octets, "tx_stat_p2048to4095octets", "Bytes")	\
275  m(+1, u64, tx_stat_p4096to8191octets, "tx_stat_p4096to8191octets", "Bytes")	\
276  m(+1, u64, tx_stat_p8192to10239octets, "tx_stat_p8192to10239octets", "Bytes")
277
278#define	MLX5E_PPORT_STATISTICAL_DEBUG(m)				\
279  m(+1, u64, phy_time_since_last_clear, "phy_time_since_last_clear",	\
280    "Time since last clear in milliseconds")				\
281  m(+1, u64, phy_received_bits, "phy_received_bits",			\
282    "Total amount of traffic received in bits before error correction")	\
283  m(+1, u64, phy_symbol_errors, "phy_symbol_errors",			\
284    "Total number of symbol errors before error correction")		\
285  m(+1, u64, phy_corrected_bits, "phy_corrected_bits",			\
286    "Total number of corrected bits ")					\
287  m(+1, u64, phy_corrected_bits_lane0, "phy_corrected_bits_lane0",	\
288    "Total number of corrected bits for lane 0")			\
289  m(+1, u64, phy_corrected_bits_lane1, "phy_corrected_bits_lane1",	\
290    "Total number of corrected bits for lane 1")			\
291  m(+1, u64, phy_corrected_bits_lane2, "phy_corrected_bits_lane2",	\
292    "Total number of corrected bits for lane 2")			\
293  m(+1, u64, phy_corrected_bits_lane3, "phy_corrected_bits_lane3",	\
294    "Total number of corrected bits for lane 3")
295
296#define	MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m)			\
297  m(+1, u64, time_since_last_clear, "time_since_last_clear",		\
298    "Time since the last counters clear event (msec)")			\
299  m(+1, u64, symbol_errors, "symbol_errors", "Symbol errors")		\
300  m(+1, u64, sync_headers_errors, "sync_headers_errors",		\
301    "Sync header error counter")					\
302  m(+1, u64, bip_errors_lane0, "edpl_bip_errors_lane0",			\
303    "Indicates the number of PRBS errors on lane 0")			\
304  m(+1, u64, bip_errors_lane1, "edpl_bip_errors_lane1",			\
305    "Indicates the number of PRBS errors on lane 1")			\
306  m(+1, u64, bip_errors_lane2, "edpl_bip_errors_lane2",			\
307    "Indicates the number of PRBS errors on lane 2")			\
308  m(+1, u64, bip_errors_lane3, "edpl_bip_errors_lane3",			\
309    "Indicates the number of PRBS errors on lane 3")			\
310  m(+1, u64, fc_corrected_blocks_lane0, "fc_corrected_blocks_lane0",	\
311    "FEC correctable block counter lane 0")				\
312  m(+1, u64, fc_corrected_blocks_lane1, "fc_corrected_blocks_lane1",	\
313    "FEC correctable block counter lane 1")				\
314  m(+1, u64, fc_corrected_blocks_lane2, "fc_corrected_blocks_lane2",	\
315    "FEC correctable block counter lane 2")				\
316  m(+1, u64, fc_corrected_blocks_lane3, "fc_corrected_blocks_lane3",	\
317    "FEC correctable block counter lane 3")				\
318  m(+1, u64, rs_corrected_blocks, "rs_corrected_blocks",		\
319    "FEC correcable block counter")					\
320  m(+1, u64, rs_uncorrectable_blocks, "rs_uncorrectable_blocks",	\
321    "FEC uncorrecable block counter")					\
322  m(+1, u64, rs_no_errors_blocks, "rs_no_errors_blocks",		\
323    "The number of RS-FEC blocks received that had no errors")		\
324  m(+1, u64, rs_single_error_blocks, "rs_single_error_blocks",		\
325    "The number of corrected RS-FEC blocks received that had"		\
326    "exactly 1 error symbol")						\
327  m(+1, u64, rs_corrected_symbols_total, "rs_corrected_symbols_total",	\
328    "Port FEC corrected symbol counter")				\
329  m(+1, u64, rs_corrected_symbols_lane0, "rs_corrected_symbols_lane0",	\
330    "FEC corrected symbol counter lane 0")				\
331  m(+1, u64, rs_corrected_symbols_lane1, "rs_corrected_symbols_lane1",	\
332    "FEC corrected symbol counter lane 1")				\
333  m(+1, u64, rs_corrected_symbols_lane2, "rs_corrected_symbols_lane2",	\
334    "FEC corrected symbol counter lane 2")				\
335  m(+1, u64, rs_corrected_symbols_lane3, "rs_corrected_symbols_lane3",	\
336    "FEC corrected symbol counter lane 3")
337
338/* Per priority statistics for PFC */
339#define	MLX5E_PPORT_PER_PRIO_STATS_SUB(m,n,p)			\
340  m(n, p, +1, u64, rx_octets, "rx_octets", "Received octets")		\
341  m(n, p, +1, u64, reserved_0, "reserved_0", "Reserved")		\
342  m(n, p, +1, u64, reserved_1, "reserved_1", "Reserved")		\
343  m(n, p, +1, u64, reserved_2, "reserved_2", "Reserved")		\
344  m(n, p, +1, u64, rx_frames, "rx_frames", "Received frames")		\
345  m(n, p, +1, u64, tx_octets, "tx_octets", "Transmitted octets")	\
346  m(n, p, +1, u64, reserved_3, "reserved_3", "Reserved")		\
347  m(n, p, +1, u64, reserved_4, "reserved_4", "Reserved")		\
348  m(n, p, +1, u64, reserved_5, "reserved_5", "Reserved")		\
349  m(n, p, +1, u64, tx_frames, "tx_frames", "Transmitted frames")	\
350  m(n, p, +1, u64, rx_pause, "rx_pause", "Received pause frames")	\
351  m(n, p, +1, u64, rx_pause_duration, "rx_pause_duration",		\
352	"Received pause duration")					\
353  m(n, p, +1, u64, tx_pause, "tx_pause", "Transmitted pause frames")	\
354  m(n, p, +1, u64, tx_pause_duration, "tx_pause_duration",		\
355	"Transmitted pause duration")					\
356  m(n, p, +1, u64, rx_pause_transition, "rx_pause_transition",		\
357	"Received pause transitions")					\
358  m(n, p, +1, u64, rx_discards, "rx_discards", "Discarded received frames") \
359  m(n, p, +1, u64, device_stall_minor_watermark,			\
360	"device_stall_minor_watermark", "Device stall minor watermark")	\
361  m(n, p, +1, u64, device_stall_critical_watermark,			\
362	"device_stall_critical_watermark", "Device stall critical watermark")
363
364#define	MLX5E_PPORT_PER_PRIO_STATS_PREFIX(m,p,c,t,f,s,d) \
365  m(c, t, pri_##p##_##f, "prio" #p "_" s, "Priority " #p " - " d)
366
367#define	MLX5E_PPORT_PER_PRIO_STATS_NUM_PRIO 8
368
369#define	MLX5E_PPORT_PER_PRIO_STATS(m) \
370  MLX5E_PPORT_PER_PRIO_STATS_SUB(MLX5E_PPORT_PER_PRIO_STATS_PREFIX,m,0) \
371  MLX5E_PPORT_PER_PRIO_STATS_SUB(MLX5E_PPORT_PER_PRIO_STATS_PREFIX,m,1) \
372  MLX5E_PPORT_PER_PRIO_STATS_SUB(MLX5E_PPORT_PER_PRIO_STATS_PREFIX,m,2) \
373  MLX5E_PPORT_PER_PRIO_STATS_SUB(MLX5E_PPORT_PER_PRIO_STATS_PREFIX,m,3) \
374  MLX5E_PPORT_PER_PRIO_STATS_SUB(MLX5E_PPORT_PER_PRIO_STATS_PREFIX,m,4) \
375  MLX5E_PPORT_PER_PRIO_STATS_SUB(MLX5E_PPORT_PER_PRIO_STATS_PREFIX,m,5) \
376  MLX5E_PPORT_PER_PRIO_STATS_SUB(MLX5E_PPORT_PER_PRIO_STATS_PREFIX,m,6) \
377  MLX5E_PPORT_PER_PRIO_STATS_SUB(MLX5E_PPORT_PER_PRIO_STATS_PREFIX,m,7)
378
379#define	MLX5E_PCIE_PERFORMANCE_COUNTERS_64(m)				\
380  m(+1, u64, life_time_counter_high, "life_time_counter",		\
381    "Life time counter.", pcie_perf_counters)				\
382  m(+1, u64, tx_overflow_buffer_pkt, "tx_overflow_buffer_pkt",		\
383    "The number of packets dropped due to lack of PCIe buffers "	\
384    "in receive path from NIC port toward the hosts.",			\
385    pcie_perf_counters)							\
386  m(+1, u64, tx_overflow_buffer_marked_pkt,				\
387    "tx_overflow_buffer_marked_pkt",					\
388    "The number of packets marked due to lack of PCIe buffers "		\
389    "in receive path from NIC port toward the hosts.",			\
390    pcie_perf_counters)
391
392#define	MLX5E_PCIE_PERFORMANCE_COUNTERS_32(m)				\
393  m(+1, u64, rx_errors, "rx_errors",					\
394    "Number of transitions to recovery due to Framing "			\
395    "errors and CRC errors.", pcie_perf_counters)			\
396  m(+1, u64, tx_errors, "tx_errors", "Number of transitions "		\
397    "to recovery due to EIEOS and TS errors.", pcie_perf_counters)	\
398  m(+1, u64, l0_to_recovery_eieos, "l0_to_recovery_eieos", "Number of "	\
399    "transitions to recovery due to getting EIEOS.", pcie_perf_counters)\
400  m(+1, u64, l0_to_recovery_ts, "l0_to_recovery_ts", "Number of "	\
401    "transitions to recovery due to getting TS.", pcie_perf_counters)	\
402  m(+1, u64, l0_to_recovery_framing, "l0_to_recovery_framing", "Number "\
403    "of transitions to recovery due to identifying framing "		\
404    "errors at gen3/4.", pcie_perf_counters)				\
405  m(+1, u64, l0_to_recovery_retrain, "l0_to_recovery_retrain",		\
406    "Number of transitions to recovery due to link retrain request "	\
407    "from data link.", pcie_perf_counters)				\
408  m(+1, u64, crc_error_dllp, "crc_error_dllp", "Number of transitions "	\
409    "to recovery due to identifying CRC DLLP errors.",			\
410    pcie_perf_counters)							\
411  m(+1, u64, crc_error_tlp, "crc_error_tlp", "Number of transitions to "\
412    "recovery due to identifying CRC TLP errors.", pcie_perf_counters)	\
413  m(+1, u64, outbound_stalled_reads, "outbound_stalled_reads",		\
414    "The percentage of time within the last second that the NIC had "	\
415    "outbound non-posted read requests but could not perform the "	\
416    "operation due to insufficient non-posted credits.",		\
417    pcie_perf_counters)							\
418  m(+1, u64, outbound_stalled_writes, "outbound_stalled_writes",	\
419    "The percentage of time within the last second that the NIC had "	\
420    "outbound posted writes requests but could not perform the "	\
421    "operation due to insufficient posted credits.",			\
422    pcie_perf_counters)							\
423  m(+1, u64, outbound_stalled_reads_events,				\
424    "outbound_stalled_reads_events", "The number of events where "	\
425    "outbound_stalled_reads was above a threshold.",			\
426    pcie_perf_counters)							\
427  m(+1, u64, outbound_stalled_writes_events,				\
428    "outbound_stalled_writes_events",					\
429    "The number of events where outbound_stalled_writes was above "	\
430    "a threshold.", pcie_perf_counters)
431
432#define	MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(m)			\
433  m(+1, u64, time_to_boot_image_start, "time_to_boot_image_start",	\
434    "Time from start until FW boot image starts running in usec.",	\
435    pcie_timers_states)							\
436  m(+1, u64, time_to_link_image, "time_to_link_image",			\
437    "Time from start until FW pci_link image starts running in usec.",	\
438    pcie_timers_states)							\
439  m(+1, u64, calibration_time, "calibration_time",			\
440    "Time it took FW to do calibration in usec.",			\
441    pcie_timers_states)							\
442  m(+1, u64, time_to_first_perst, "time_to_first_perst",		\
443    "Time form start until FW handle first perst. in usec.",		\
444    pcie_timers_states)							\
445  m(+1, u64, time_to_detect_state, "time_to_detect_state",		\
446    "Time from start until first transition to LTSSM.Detect_Q in usec",	\
447    pcie_timers_states)							\
448  m(+1, u64, time_to_l0, "time_to_l0",					\
449    "Time from start until first transition to LTSSM.L0 in usec",	\
450    pcie_timers_states)							\
451  m(+1, u64, time_to_crs_en, "time_to_crs_en",				\
452    "Time from start until crs is enabled in usec",			\
453    pcie_timers_states)							\
454  m(+1, u64, time_to_plastic_image_start, "time_to_plastic_image_start",\
455    "Time form start until FW plastic image starts running in usec.",	\
456    pcie_timers_states)							\
457  m(+1, u64, time_to_iron_image_start, "time_to_iron_image_start",	\
458    "Time form start until FW iron image starts running in usec.",	\
459    pcie_timers_states)							\
460  m(+1, u64, perst_handler, "perst_handler",				\
461    "Number of persts arrived.", pcie_timers_states)			\
462  m(+1, u64, times_in_l1, "times_in_l1",				\
463    "Number of times LTSSM entered L1 flow.", pcie_timers_states)	\
464  m(+1, u64, times_in_l23, "times_in_l23",				\
465    "Number of times LTSSM entered L23 flow.", pcie_timers_states)	\
466  m(+1, u64, dl_down, "dl_down",					\
467    "Number of moves for DL_active to DL_down.", pcie_timers_states)	\
468  m(+1, u64, config_cycle1usec, "config_cycle1usec",			\
469    "Number of configuration requests that firmware "			\
470    "handled in less than 1 usec.", pcie_timers_states)			\
471  m(+1, u64, config_cycle2to7usec, "config_cycle2to7usec",		\
472    "Number of configuration requests that firmware "			\
473    "handled within 2 to 7 usec.", pcie_timers_states)			\
474  m(+1, u64, config_cycle8to15usec, "config_cycle8to15usec",		\
475    "Number of configuration requests that firmware "			\
476    "handled within 8 to 15 usec.", pcie_timers_states)			\
477  m(+1, u64, config_cycle16to63usec, "config_cycle16to63usec",		\
478    "Number of configuration requests that firmware "			\
479    "handled within 16 to 63 usec.", pcie_timers_states)		\
480  m(+1, u64, config_cycle64usec, "config_cycle64usec",			\
481    "Number of configuration requests that firmware "			\
482    "handled took more than 64 usec.", pcie_timers_states)		\
483  m(+1, u64, correctable_err_msg_sent, "correctable_err_msg_sent",	\
484    "Number of correctable error messages sent.", pcie_timers_states)	\
485  m(+1, u64, non_fatal_err_msg_sent, "non_fatal_err_msg_sent",		\
486    "Number of non-Fatal error msg sent.", pcie_timers_states)		\
487  m(+1, u64, fatal_err_msg_sent, "fatal_err_msg_sent",			\
488    "Number of fatal error msg sent.", pcie_timers_states)
489
490#define	MLX5E_PCIE_LANE_COUNTERS_32(m)				\
491  m(+1, u64, error_counter_lane0, "error_counter_lane0",	\
492    "Error counter for PCI lane 0", pcie_lanes_counters)	\
493  m(+1, u64, error_counter_lane1, "error_counter_lane1",	\
494    "Error counter for PCI lane 1", pcie_lanes_counters)	\
495  m(+1, u64, error_counter_lane2, "error_counter_lane2",	\
496    "Error counter for PCI lane 2", pcie_lanes_counters)	\
497  m(+1, u64, error_counter_lane3, "error_counter_lane3",	\
498    "Error counter for PCI lane 3", pcie_lanes_counters)	\
499  m(+1, u64, error_counter_lane4, "error_counter_lane4",	\
500    "Error counter for PCI lane 4", pcie_lanes_counters)	\
501  m(+1, u64, error_counter_lane5, "error_counter_lane5",	\
502    "Error counter for PCI lane 5", pcie_lanes_counters)	\
503  m(+1, u64, error_counter_lane6, "error_counter_lane6",	\
504    "Error counter for PCI lane 6", pcie_lanes_counters)	\
505  m(+1, u64, error_counter_lane7, "error_counter_lane7",	\
506    "Error counter for PCI lane 7", pcie_lanes_counters)	\
507  m(+1, u64, error_counter_lane8, "error_counter_lane8",	\
508    "Error counter for PCI lane 8", pcie_lanes_counters)	\
509  m(+1, u64, error_counter_lane9, "error_counter_lane9",	\
510    "Error counter for PCI lane 9", pcie_lanes_counters)	\
511  m(+1, u64, error_counter_lane10, "error_counter_lane10",	\
512    "Error counter for PCI lane 10", pcie_lanes_counters)	\
513  m(+1, u64, error_counter_lane11, "error_counter_lane11",	\
514    "Error counter for PCI lane 11", pcie_lanes_counters)	\
515  m(+1, u64, error_counter_lane12, "error_counter_lane12",	\
516    "Error counter for PCI lane 12", pcie_lanes_counters)	\
517  m(+1, u64, error_counter_lane13, "error_counter_lane13",	\
518    "Error counter for PCI lane 13", pcie_lanes_counters)	\
519  m(+1, u64, error_counter_lane14, "error_counter_lane14",	\
520    "Error counter for PCI lane 14", pcie_lanes_counters)	\
521  m(+1, u64, error_counter_lane15, "error_counter_lane15",	\
522    "Error counter for PCI lane 15", pcie_lanes_counters)
523
524/*
525 * Make sure to update mlx5e_update_pport_counters()
526 * when adding a new MLX5E_PPORT_STATS block
527 */
528#define	MLX5E_PPORT_STATS(m)			\
529  MLX5E_PPORT_PER_PRIO_STATS(m)		\
530  MLX5E_PPORT_IEEE802_3_STATS(m)		\
531  MLX5E_PPORT_RFC2819_STATS(m)
532
533#define	MLX5E_PORT_STATS_DEBUG(m)		\
534  MLX5E_PPORT_RFC2819_STATS_DEBUG(m)		\
535  MLX5E_PPORT_RFC2863_STATS_DEBUG(m)		\
536  MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(m)	\
537  MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG(m)	\
538  MLX5E_PPORT_STATISTICAL_DEBUG(m)		\
539  MLX5E_PCIE_PERFORMANCE_COUNTERS_64(m) \
540  MLX5E_PCIE_PERFORMANCE_COUNTERS_32(m) \
541  MLX5E_PCIE_TIMERS_AND_STATES_COUNTERS_32(m) \
542  MLX5E_PCIE_LANE_COUNTERS_32(m)
543
544#define	MLX5E_PPORT_IEEE802_3_STATS_NUM \
545  (0 MLX5E_PPORT_IEEE802_3_STATS(MLX5E_STATS_COUNT))
546#define	MLX5E_PPORT_RFC2819_STATS_NUM \
547  (0 MLX5E_PPORT_RFC2819_STATS(MLX5E_STATS_COUNT))
548#define	MLX5E_PPORT_STATS_NUM \
549  (0 MLX5E_PPORT_STATS(MLX5E_STATS_COUNT))
550
551#define	MLX5E_PPORT_PER_PRIO_STATS_NUM \
552  (0 MLX5E_PPORT_PER_PRIO_STATS(MLX5E_STATS_COUNT))
553#define	MLX5E_PPORT_RFC2819_STATS_DEBUG_NUM \
554  (0 MLX5E_PPORT_RFC2819_STATS_DEBUG(MLX5E_STATS_COUNT))
555#define	MLX5E_PPORT_RFC2863_STATS_DEBUG_NUM \
556  (0 MLX5E_PPORT_RFC2863_STATS_DEBUG(MLX5E_STATS_COUNT))
557#define	MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG_NUM \
558  (0 MLX5E_PPORT_PHYSICAL_LAYER_STATS_DEBUG(MLX5E_STATS_COUNT))
559#define	MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG_NUM \
560  (0 MLX5E_PPORT_ETHERNET_EXTENDED_STATS_DEBUG(MLX5E_STATS_COUNT))
561#define	MLX5E_PPORT_STATISTICAL_DEBUG_NUM \
562  (0 MLX5E_PPORT_STATISTICAL_DEBUG(MLX5E_STATS_COUNT))
563#define	MLX5E_PORT_STATS_DEBUG_NUM \
564  (0 MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_COUNT))
565
566struct mlx5e_pport_stats {
567	struct	sysctl_ctx_list ctx;
568	u64	arg [0];
569	MLX5E_PPORT_STATS(MLX5E_STATS_VAR)
570};
571
572struct mlx5e_port_stats_debug {
573	struct	sysctl_ctx_list ctx;
574	u64	arg [0];
575	MLX5E_PORT_STATS_DEBUG(MLX5E_STATS_VAR)
576};
577
578#define	MLX5E_RQ_STATS(m)					\
579  m(+1, u64, packets, "packets", "Received packets")		\
580  m(+1, u64, bytes, "bytes", "Received bytes")			\
581  m(+1, u64, csum_none, "csum_none", "Received packets")		\
582  m(+1, u64, lro_packets, "lro_packets", "Received LRO packets")	\
583  m(+1, u64, lro_bytes, "lro_bytes", "Received LRO bytes")	\
584  m(+1, u64, sw_lro_queued, "sw_lro_queued", "Packets queued for SW LRO")	\
585  m(+1, u64, sw_lro_flushed, "sw_lro_flushed", "Packets flushed from SW LRO")	\
586  m(+1, u64, wqe_err, "wqe_err", "Received packets")
587
588#define	MLX5E_RQ_STATS_NUM (0 MLX5E_RQ_STATS(MLX5E_STATS_COUNT))
589
590struct mlx5e_rq_stats {
591	struct	sysctl_ctx_list ctx;
592	u64	arg [0];
593	MLX5E_RQ_STATS(MLX5E_STATS_VAR)
594};
595
596#define	MLX5E_SQ_STATS(m)						\
597  m(+1, u64, packets, "packets", "Transmitted packets")			\
598  m(+1, u64, bytes, "bytes", "Transmitted bytes")			\
599  m(+1, u64, tso_packets, "tso_packets", "Transmitted packets")		\
600  m(+1, u64, tso_bytes, "tso_bytes", "Transmitted bytes")		\
601  m(+1, u64, csum_offload_none, "csum_offload_none", "Transmitted packets")	\
602  m(+1, u64, defragged, "defragged", "Transmitted packets")		\
603  m(+1, u64, dropped, "dropped", "Transmitted packets")			\
604  m(+1, u64, nop, "nop", "Transmitted packets")
605
606#define	MLX5E_SQ_STATS_NUM (0 MLX5E_SQ_STATS(MLX5E_STATS_COUNT))
607
608struct mlx5e_sq_stats {
609	struct	sysctl_ctx_list ctx;
610	u64	arg [0];
611	MLX5E_SQ_STATS(MLX5E_STATS_VAR)
612};
613
614struct mlx5e_stats {
615	struct mlx5e_vport_stats vport;
616	struct mlx5e_pport_stats pport;
617	struct mlx5e_port_stats_debug port_stats_debug;
618};
619
620struct mlx5e_rq_param {
621	u32	rqc [MLX5_ST_SZ_DW(rqc)];
622	struct mlx5_wq_param wq;
623};
624
625struct mlx5e_sq_param {
626	u32	sqc [MLX5_ST_SZ_DW(sqc)];
627	struct mlx5_wq_param wq;
628};
629
630struct mlx5e_cq_param {
631	u32	cqc [MLX5_ST_SZ_DW(cqc)];
632	struct mlx5_wq_param wq;
633};
634
635struct mlx5e_params {
636	u8	log_sq_size;
637	u8	log_rq_size;
638	u16	num_channels;
639	u8	default_vlan_prio;
640	u8	num_tc;
641	u8	rx_cq_moderation_mode;
642	u8	tx_cq_moderation_mode;
643	u16	rx_cq_moderation_usec;
644	u16	rx_cq_moderation_pkts;
645	u16	tx_cq_moderation_usec;
646	u16	tx_cq_moderation_pkts;
647	u16	min_rx_wqes;
648	bool	hw_lro_en;
649	bool	cqe_zipping_en;
650	u32	lro_wqe_sz;
651	u16	rx_hash_log_tbl_sz;
652	u32	tx_pauseframe_control __aligned(4);
653	u32	rx_pauseframe_control __aligned(4);
654	u16	tx_max_inline;
655	u8	tx_min_inline_mode;
656	u8	tx_priority_flow_control;
657	u8	rx_priority_flow_control;
658	u8	channels_rsss;
659};
660
661#define	MLX5E_PARAMS(m)							\
662  m(+1, u64, tx_queue_size_max, "tx_queue_size_max", "Max send queue size") \
663  m(+1, u64, rx_queue_size_max, "rx_queue_size_max", "Max receive queue size") \
664  m(+1, u64, tx_queue_size, "tx_queue_size", "Default send queue size")	\
665  m(+1, u64, rx_queue_size, "rx_queue_size", "Default receive queue size") \
666  m(+1, u64, channels, "channels", "Default number of channels")		\
667  m(+1, u64, channels_rsss, "channels_rsss", "Default channels receive side scaling stride") \
668  m(+1, u64, coalesce_usecs_max, "coalesce_usecs_max", "Maximum usecs for joining packets") \
669  m(+1, u64, coalesce_pkts_max, "coalesce_pkts_max", "Maximum packets to join") \
670  m(+1, u64, rx_coalesce_usecs, "rx_coalesce_usecs", "Limit in usec for joining rx packets") \
671  m(+1, u64, rx_coalesce_pkts, "rx_coalesce_pkts", "Maximum number of rx packets to join") \
672  m(+1, u64, rx_coalesce_mode, "rx_coalesce_mode", "0: EQE fixed mode 1: CQE fixed mode 2: EQE auto mode 3: CQE auto mode") \
673  m(+1, u64, tx_coalesce_usecs, "tx_coalesce_usecs", "Limit in usec for joining tx packets") \
674  m(+1, u64, tx_coalesce_pkts, "tx_coalesce_pkts", "Maximum number of tx packets to join") \
675  m(+1, u64, tx_coalesce_mode, "tx_coalesce_mode", "0: EQE mode 1: CQE mode") \
676  m(+1, u64, tx_completion_fact, "tx_completion_fact", "1..MAX: Completion event ratio") \
677  m(+1, u64, tx_completion_fact_max, "tx_completion_fact_max", "Maximum completion event ratio") \
678  m(+1, u64, hw_lro, "hw_lro", "set to enable hw_lro") \
679  m(+1, u64, cqe_zipping, "cqe_zipping", "0 : CQE zipping disabled") \
680  m(+1, u64, modify_tx_dma, "modify_tx_dma", "0: Enable TX 1: Disable TX") \
681  m(+1, u64, modify_rx_dma, "modify_rx_dma", "0: Enable RX 1: Disable RX") \
682  m(+1, u64, diag_pci_enable, "diag_pci_enable", "0: Disabled 1: Enabled") \
683  m(+1, u64, diag_general_enable, "diag_general_enable", "0: Disabled 1: Enabled") \
684  m(+1, u64, hw_mtu, "hw_mtu", "Current hardware MTU value") \
685  m(+1, u64, mc_local_lb, "mc_local_lb", "0: Local multicast loopback enabled 1: Disabled") \
686  m(+1, u64, uc_local_lb, "uc_local_lb", "0: Local unicast loopback enabled 1: Disabled")
687
688#define	MLX5E_PARAMS_NUM (0 MLX5E_PARAMS(MLX5E_STATS_COUNT))
689
690struct mlx5e_params_ethtool {
691	u64	arg [0];
692	MLX5E_PARAMS(MLX5E_STATS_VAR)
693	u64	max_bw_value[IEEE_8021QAZ_MAX_TCS];
694	u8	max_bw_share[IEEE_8021QAZ_MAX_TCS];
695	u8	prio_tc[MLX5E_MAX_PRIORITY];
696	u8	dscp2prio[MLX5_MAX_SUPPORTED_DSCP];
697	u8	trust_state;
698};
699
700/* EEPROM Standards for plug in modules */
701#ifndef MLX5E_ETH_MODULE_SFF_8472
702#define	MLX5E_ETH_MODULE_SFF_8472	0x1
703#define	MLX5E_ETH_MODULE_SFF_8472_LEN	128
704#endif
705
706#ifndef MLX5E_ETH_MODULE_SFF_8636
707#define	MLX5E_ETH_MODULE_SFF_8636	0x2
708#define	MLX5E_ETH_MODULE_SFF_8636_LEN	256
709#endif
710
711#ifndef MLX5E_ETH_MODULE_SFF_8436
712#define	MLX5E_ETH_MODULE_SFF_8436	0x3
713#define	MLX5E_ETH_MODULE_SFF_8436_LEN	256
714#endif
715
716/* EEPROM I2C Addresses */
717#define	MLX5E_I2C_ADDR_LOW		0x50
718#define	MLX5E_I2C_ADDR_HIGH		0x51
719
720#define	MLX5E_EEPROM_LOW_PAGE		0x0
721#define	MLX5E_EEPROM_HIGH_PAGE		0x3
722
723#define	MLX5E_EEPROM_HIGH_PAGE_OFFSET	128
724#define	MLX5E_EEPROM_PAGE_LENGTH	256
725
726#define	MLX5E_EEPROM_INFO_BYTES		0x3
727
728struct mlx5e_cq {
729	/* data path - accessed per cqe */
730	struct mlx5_cqwq wq;
731
732	/* data path - accessed per HW polling */
733	struct mlx5_core_cq mcq;
734
735	/* control */
736	struct mlx5e_priv *priv;
737	struct mlx5_wq_ctrl wq_ctrl;
738} __aligned(MLX5E_CACHELINE_SIZE);
739
740struct mlx5e_rq_mbuf {
741	bus_dmamap_t	dma_map;
742	caddr_t		data;
743	struct mbuf	*mbuf;
744};
745
746struct mlx5e_rq {
747	/* data path */
748	struct mlx5_wq_ll wq;
749	struct mtx mtx;
750	bus_dma_tag_t dma_tag;
751	u32	wqe_sz;
752	u32	nsegs;
753	struct mlx5e_rq_mbuf *mbuf;
754	struct ifnet *ifp;
755	struct mlx5e_rq_stats stats;
756	struct mlx5e_cq cq;
757	struct lro_ctrl lro;
758	volatile int enabled;
759	int	ix;
760
761	/* Dynamic Interrupt Moderation */
762	struct net_dim dim;
763
764	/* control */
765	struct mlx5_wq_ctrl wq_ctrl;
766	u32	rqn;
767	struct mlx5e_channel *channel;
768	struct callout watchdog;
769} __aligned(MLX5E_CACHELINE_SIZE);
770
771struct mlx5e_sq_mbuf {
772	bus_dmamap_t dma_map;
773	struct mbuf *mbuf;
774	u32	num_bytes;
775	u32	num_wqebbs;
776};
777
778enum {
779	MLX5E_SQ_READY,
780	MLX5E_SQ_FULL
781};
782
783struct mlx5e_sq {
784	/* data path */
785	struct	mtx lock;
786	bus_dma_tag_t dma_tag;
787	struct	mtx comp_lock;
788
789	/* dirtied @completion */
790	u16	cc;
791
792	/* dirtied @xmit */
793	u16	pc __aligned(MLX5E_CACHELINE_SIZE);
794	u16	bf_offset;
795	u16	cev_counter;		/* completion event counter */
796	u16	cev_factor;		/* completion event factor */
797	u16	cev_next_state;		/* next completion event state */
798#define	MLX5E_CEV_STATE_INITIAL 0	/* timer not started */
799#define	MLX5E_CEV_STATE_SEND_NOPS 1	/* send NOPs */
800#define	MLX5E_CEV_STATE_HOLD_NOPS 2	/* don't send NOPs yet */
801	u16	running;		/* set if SQ is running */
802	struct callout cev_callout;
803	union {
804		u32	d32[2];
805		u64	d64;
806	} doorbell;
807	struct	mlx5e_sq_stats stats;
808
809	struct	mlx5e_cq cq;
810
811	/* pointers to per packet info: write@xmit, read@completion */
812	struct	mlx5e_sq_mbuf *mbuf;
813	struct	buf_ring *br;
814
815	/* read only */
816	struct	mlx5_wq_cyc wq;
817	struct	mlx5_uar uar;
818	struct	ifnet *ifp;
819	u32	sqn;
820	u32	bf_buf_size;
821	u32	mkey_be;
822	u16	max_inline;
823	u8	min_inline_mode;
824	u8	min_insert_caps;
825#define	MLX5E_INSERT_VLAN 1
826#define	MLX5E_INSERT_NON_VLAN 2
827
828	/* control path */
829	struct	mlx5_wq_ctrl wq_ctrl;
830	struct	mlx5e_priv *priv;
831	int	tc;
832} __aligned(MLX5E_CACHELINE_SIZE);
833
834static inline bool
835mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
836{
837	u16 cc = sq->cc;
838	u16 pc = sq->pc;
839
840	return ((sq->wq.sz_m1 & (cc - pc)) >= n || cc == pc);
841}
842
843struct mlx5e_channel {
844	/* data path */
845	struct mlx5e_rq rq;
846	struct mlx5e_sq sq[MLX5E_MAX_TX_NUM_TC];
847	struct ifnet *ifp;
848	u32	mkey_be;
849	u8	num_tc;
850
851	/* control */
852	struct mlx5e_priv *priv;
853	int	ix;
854	int	cpu;
855} __aligned(MLX5E_CACHELINE_SIZE);
856
857enum mlx5e_traffic_types {
858	MLX5E_TT_IPV4_TCP,
859	MLX5E_TT_IPV6_TCP,
860	MLX5E_TT_IPV4_UDP,
861	MLX5E_TT_IPV6_UDP,
862	MLX5E_TT_IPV4_IPSEC_AH,
863	MLX5E_TT_IPV6_IPSEC_AH,
864	MLX5E_TT_IPV4_IPSEC_ESP,
865	MLX5E_TT_IPV6_IPSEC_ESP,
866	MLX5E_TT_IPV4,
867	MLX5E_TT_IPV6,
868	MLX5E_TT_ANY,
869	MLX5E_NUM_TT,
870};
871
872enum {
873	MLX5E_RQT_SPREADING = 0,
874	MLX5E_RQT_DEFAULT_RQ = 1,
875	MLX5E_NUM_RQT = 2,
876};
877
878struct mlx5_flow_rule;
879
880struct mlx5e_eth_addr_info {
881	u8	addr [ETH_ALEN + 2];
882	u32	tt_vec;
883	/* flow table rule per traffic type */
884	struct mlx5_flow_rule	*ft_rule[MLX5E_NUM_TT];
885};
886
887#define	MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
888
889struct mlx5e_eth_addr_hash_node;
890
891struct mlx5e_eth_addr_hash_head {
892	struct mlx5e_eth_addr_hash_node *lh_first;
893};
894
895struct mlx5e_eth_addr_db {
896	struct mlx5e_eth_addr_hash_head if_uc[MLX5E_ETH_ADDR_HASH_SIZE];
897	struct mlx5e_eth_addr_hash_head if_mc[MLX5E_ETH_ADDR_HASH_SIZE];
898	struct mlx5e_eth_addr_info broadcast;
899	struct mlx5e_eth_addr_info allmulti;
900	struct mlx5e_eth_addr_info promisc;
901	bool	broadcast_enabled;
902	bool	allmulti_enabled;
903	bool	promisc_enabled;
904};
905
906enum {
907	MLX5E_STATE_ASYNC_EVENTS_ENABLE,
908	MLX5E_STATE_OPENED,
909};
910
911enum {
912	MLX5_BW_NO_LIMIT   = 0,
913	MLX5_100_MBPS_UNIT = 3,
914	MLX5_GBPS_UNIT     = 4,
915};
916
917struct mlx5e_vlan_db {
918	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
919	struct mlx5_flow_rule	*active_vlans_ft_rule[VLAN_N_VID];
920	struct mlx5_flow_rule	*untagged_ft_rule;
921	struct mlx5_flow_rule	*any_cvlan_ft_rule;
922	struct mlx5_flow_rule	*any_svlan_ft_rule;
923	bool	filter_disabled;
924};
925
926struct mlx5e_flow_table {
927	int num_groups;
928	struct mlx5_flow_table *t;
929	struct mlx5_flow_group **g;
930};
931
932struct mlx5e_flow_tables {
933	struct mlx5_flow_namespace *ns;
934	struct mlx5e_flow_table vlan;
935	struct mlx5e_flow_table main;
936	struct mlx5e_flow_table inner_rss;
937};
938
939struct mlx5e_priv {
940	struct mlx5_core_dev *mdev;     /* must be first */
941
942	/* priv data path fields - start */
943	int	order_base_2_num_channels;
944	int	queue_mapping_channel_mask;
945	int	num_tc;
946	int	default_vlan_prio;
947	/* priv data path fields - end */
948
949	unsigned long state;
950	int	gone;
951#define	PRIV_LOCK(priv) sx_xlock(&(priv)->state_lock)
952#define	PRIV_UNLOCK(priv) sx_xunlock(&(priv)->state_lock)
953#define	PRIV_LOCKED(priv) sx_xlocked(&(priv)->state_lock)
954	struct sx state_lock;		/* Protects Interface state */
955	struct mlx5_uar cq_uar;
956	u32	pdn;
957	u32	tdn;
958	struct mlx5_core_mr mr;
959
960	u32	tisn[MLX5E_MAX_TX_NUM_TC];
961	u32	rqtn;
962	u32	tirn[MLX5E_NUM_TT];
963
964	struct mlx5e_flow_tables fts;
965	struct mlx5e_eth_addr_db eth_addr;
966	struct mlx5e_vlan_db vlan;
967
968	struct mlx5e_params params;
969	struct mlx5e_params_ethtool params_ethtool;
970	union mlx5_core_pci_diagnostics params_pci;
971	union mlx5_core_general_diagnostics params_general;
972	struct mtx async_events_mtx;	/* sync hw events */
973	struct work_struct update_stats_work;
974	struct work_struct update_carrier_work;
975	struct work_struct set_rx_mode_work;
976	MLX5_DECLARE_DOORBELL_LOCK(doorbell_lock)
977
978	struct ifnet *ifp;
979	struct sysctl_ctx_list sysctl_ctx;
980	struct sysctl_oid *sysctl_ifnet;
981	struct sysctl_oid *sysctl_hw;
982	int	sysctl_debug;
983	struct mlx5e_stats stats;
984	int	counter_set_id;
985
986	struct workqueue_struct *wq;
987
988	eventhandler_tag vlan_detach;
989	eventhandler_tag vlan_attach;
990	struct ifmedia media;
991	int	media_status_last;
992	int	media_active_last;
993
994	struct callout watchdog;
995
996	struct mlx5e_channel channel[];
997};
998
999#define	MLX5E_NET_IP_ALIGN 2
1000
1001struct mlx5e_tx_wqe {
1002	struct mlx5_wqe_ctrl_seg ctrl;
1003	struct mlx5_wqe_eth_seg eth;
1004};
1005
1006struct mlx5e_rx_wqe {
1007	struct mlx5_wqe_srq_next_seg next;
1008	struct mlx5_wqe_data_seg data[];
1009};
1010
1011/* the size of the structure above must be power of two */
1012CTASSERT(powerof2(sizeof(struct mlx5e_rx_wqe)));
1013
1014struct mlx5e_eeprom {
1015	int	lock_bit;
1016	int	i2c_addr;
1017	int	page_num;
1018	int	device_addr;
1019	int	module_num;
1020	int	len;
1021	int	type;
1022	int	page_valid;
1023	u32	*data;
1024};
1025
1026#define	MLX5E_FLD_MAX(typ, fld) ((1ULL << __mlx5_bit_sz(typ, fld)) - 1ULL)
1027
1028int	mlx5e_xmit(struct ifnet *, struct mbuf *);
1029
1030int	mlx5e_open_locked(struct ifnet *);
1031int	mlx5e_close_locked(struct ifnet *);
1032
1033void	mlx5e_cq_error_event(struct mlx5_core_cq *mcq, int event);
1034void	mlx5e_rx_cq_comp(struct mlx5_core_cq *);
1035void	mlx5e_tx_cq_comp(struct mlx5_core_cq *);
1036struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
1037
1038void	mlx5e_dim_work(struct work_struct *);
1039void	mlx5e_dim_build_cq_param(struct mlx5e_priv *, struct mlx5e_cq_param *);
1040
1041int	mlx5e_open_flow_table(struct mlx5e_priv *priv);
1042void	mlx5e_close_flow_table(struct mlx5e_priv *priv);
1043void	mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
1044void	mlx5e_set_rx_mode_work(struct work_struct *work);
1045
1046void	mlx5e_vlan_rx_add_vid(void *, struct ifnet *, u16);
1047void	mlx5e_vlan_rx_kill_vid(void *, struct ifnet *, u16);
1048void	mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
1049void	mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
1050int	mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
1051void	mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
1052
1053static inline void
1054mlx5e_tx_notify_hw(struct mlx5e_sq *sq, u32 *wqe, int bf_sz)
1055{
1056	u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
1057
1058	/* ensure wqe is visible to device before updating doorbell record */
1059	wmb();
1060
1061	*sq->wq.db = cpu_to_be32(sq->pc);
1062
1063	/*
1064	 * Ensure the doorbell record is visible to device before ringing
1065	 * the doorbell:
1066	 */
1067	wmb();
1068
1069	if (bf_sz) {
1070		__iowrite64_copy(sq->uar.bf_map + ofst, wqe, bf_sz);
1071
1072		/* flush the write-combining mapped buffer */
1073		wmb();
1074
1075	} else {
1076		mlx5_write64(wqe, sq->uar.map + ofst,
1077		    MLX5_GET_DOORBELL_LOCK(&sq->priv->doorbell_lock));
1078	}
1079
1080	sq->bf_offset ^= sq->bf_buf_size;
1081}
1082
1083static inline void
1084mlx5e_cq_arm(struct mlx5e_cq *cq, spinlock_t *dblock)
1085{
1086	struct mlx5_core_cq *mcq;
1087
1088	mcq = &cq->mcq;
1089	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, dblock, cq->wq.cc);
1090}
1091
1092extern const struct ethtool_ops mlx5e_ethtool_ops;
1093void	mlx5e_create_ethtool(struct mlx5e_priv *);
1094void	mlx5e_create_stats(struct sysctl_ctx_list *,
1095    struct sysctl_oid_list *, const char *,
1096    const char **, unsigned, u64 *);
1097void	mlx5e_send_nop(struct mlx5e_sq *, u32);
1098void	mlx5e_sq_cev_timeout(void *);
1099int	mlx5e_refresh_channel_params(struct mlx5e_priv *);
1100int	mlx5e_open_cq(struct mlx5e_priv *, struct mlx5e_cq_param *,
1101    struct mlx5e_cq *, mlx5e_cq_comp_t *, int eq_ix);
1102void	mlx5e_close_cq(struct mlx5e_cq *);
1103void	mlx5e_free_sq_db(struct mlx5e_sq *);
1104int	mlx5e_alloc_sq_db(struct mlx5e_sq *);
1105int	mlx5e_enable_sq(struct mlx5e_sq *, struct mlx5e_sq_param *, int tis_num);
1106int	mlx5e_modify_sq(struct mlx5e_sq *, int curr_state, int next_state);
1107void	mlx5e_disable_sq(struct mlx5e_sq *);
1108void	mlx5e_drain_sq(struct mlx5e_sq *);
1109void	mlx5e_modify_tx_dma(struct mlx5e_priv *priv, uint8_t value);
1110void	mlx5e_modify_rx_dma(struct mlx5e_priv *priv, uint8_t value);
1111void	mlx5e_resume_sq(struct mlx5e_sq *sq);
1112void	mlx5e_update_sq_inline(struct mlx5e_sq *sq);
1113void	mlx5e_refresh_sq_inline(struct mlx5e_priv *priv);
1114
1115#endif					/* _MLX5_EN_H_ */
1116