1219820Sjeff/*
2272407Shselasky * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3219820Sjeff *
4219820Sjeff * This software is available to you under a choice of one of two
5219820Sjeff * licenses.  You may choose to be licensed under the terms of the GNU
6219820Sjeff * General Public License (GPL) Version 2, available from the file
7219820Sjeff * COPYING in the main directory of this source tree, or the
8219820Sjeff * OpenIB.org BSD license below:
9219820Sjeff *
10219820Sjeff *     Redistribution and use in source and binary forms, with or
11219820Sjeff *     without modification, are permitted provided that the following
12219820Sjeff *     conditions are met:
13219820Sjeff *
14219820Sjeff *      - Redistributions of source code must retain the above
15219820Sjeff *        copyright notice, this list of conditions and the following
16219820Sjeff *        disclaimer.
17219820Sjeff *
18219820Sjeff *      - Redistributions in binary form must reproduce the above
19219820Sjeff *        copyright notice, this list of conditions and the following
20219820Sjeff *        disclaimer in the documentation and/or other materials
21219820Sjeff *        provided with the distribution.
22219820Sjeff *
23219820Sjeff * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24219820Sjeff * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25219820Sjeff * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26219820Sjeff * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27219820Sjeff * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28219820Sjeff * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29219820Sjeff * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30219820Sjeff * SOFTWARE.
31219820Sjeff *
32219820Sjeff */
33219820Sjeff
34219820Sjeff#ifndef _MLX4_EN_H_
35219820Sjeff#define _MLX4_EN_H_
36219820Sjeff
37272407Shselasky#include <linux/bitops.h>
38219820Sjeff#include <linux/compiler.h>
39219820Sjeff#include <linux/list.h>
40219820Sjeff#include <linux/mutex.h>
41272407Shselasky#include <linux/kobject.h>
42219820Sjeff#include <linux/netdevice.h>
43272407Shselasky#include <linux/if_vlan.h>
44272407Shselasky#include <linux/if_ether.h>
45272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
46272407Shselasky#include <linux/dcbnl.h>
47272407Shselasky#endif
48219820Sjeff
49219820Sjeff#include <linux/mlx4/device.h>
50219820Sjeff#include <linux/mlx4/qp.h>
51219820Sjeff#include <linux/mlx4/cq.h>
52219820Sjeff#include <linux/mlx4/srq.h>
53219820Sjeff#include <linux/mlx4/doorbell.h>
54219820Sjeff#include <linux/mlx4/cmd.h>
55219820Sjeff
56219820Sjeff#include <netinet/tcp_lro.h>
57219820Sjeff
58219820Sjeff#include "en_port.h"
59272407Shselasky#include "mlx4_stats.h"
60219820Sjeff
61219820Sjeff#define DRV_NAME	"mlx4_en"
62219820Sjeff
63219820Sjeff#define MLX4_EN_MSG_LEVEL	(NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
64219820Sjeff
65219820Sjeff/*
66219820Sjeff * Device constants
67219820Sjeff */
68219820Sjeff
69219820Sjeff
70219820Sjeff#define MLX4_EN_PAGE_SHIFT	12
71219820Sjeff#define MLX4_EN_PAGE_SIZE	(1 << MLX4_EN_PAGE_SHIFT)
72298775Shselasky#define	MLX4_NET_IP_ALIGN	2	/* bytes */
73272407Shselasky#define DEF_RX_RINGS		16
74272407Shselasky#define MAX_RX_RINGS		128
75272407Shselasky#define MIN_RX_RINGS		4
76219820Sjeff#define TXBB_SIZE		64
77219820Sjeff#define HEADROOM		(2048 / TXBB_SIZE + 1)
78219820Sjeff#define STAMP_STRIDE		64
79219820Sjeff#define STAMP_DWORDS		(STAMP_STRIDE / 4)
80219820Sjeff#define STAMP_SHIFT		31
81219820Sjeff#define STAMP_VAL		0x7fffffff
82219820Sjeff#define STATS_DELAY		(HZ / 4)
83272407Shselasky#define SERVICE_TASK_DELAY	(HZ / 4)
84272407Shselasky#define MAX_NUM_OF_FS_RULES	256
85219820Sjeff
86272407Shselasky#define MLX4_EN_FILTER_HASH_SHIFT 4
87272407Shselasky#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
88272407Shselasky
89272407Shselasky#ifdef CONFIG_NET_RX_BUSY_POLL
90272407Shselasky#define LL_EXTENDED_STATS
91272407Shselasky#endif
92272407Shselasky
93272407Shselasky/* vlan valid range */
94272407Shselasky#define VLAN_MIN_VALUE		1
95272407Shselasky#define VLAN_MAX_VALUE		4094
96272407Shselasky
97219820Sjeff/*
98219820Sjeff * OS related constants and tunables
99219820Sjeff */
100219820Sjeff
101219820Sjeff#define MLX4_EN_WATCHDOG_TIMEOUT	(15 * HZ)
102219820Sjeff
103272407Shselasky#define MLX4_EN_ALLOC_SIZE     PAGE_ALIGN(PAGE_SIZE)
104272407Shselasky#define MLX4_EN_ALLOC_ORDER    get_order(MLX4_EN_ALLOC_SIZE)
105219820Sjeff
106272407Shselaskyenum mlx4_en_alloc_type {
107272407Shselasky	MLX4_EN_ALLOC_NEW = 0,
108272407Shselasky	MLX4_EN_ALLOC_REPLACEMENT = 1,
109272407Shselasky};
110272407Shselasky
111219820Sjeff/* Maximum ring sizes */
112272407Shselasky#define MLX4_EN_DEF_TX_QUEUE_SIZE       4096
113272407Shselasky
114272407Shselasky/* Minimum packet number till arming the CQ */
115272407Shselasky#define MLX4_EN_MIN_RX_ARM	2048
116272407Shselasky#define MLX4_EN_MIN_TX_ARM	2048
117272407Shselasky
118272407Shselasky/* Maximum ring sizes */
119219820Sjeff#define MLX4_EN_MAX_TX_SIZE	8192
120219820Sjeff#define MLX4_EN_MAX_RX_SIZE	8192
121219820Sjeff
122272407Shselasky/* Minimum ring sizes */
123272407Shselasky#define MLX4_EN_MIN_RX_SIZE	(4096 / TXBB_SIZE)
124219820Sjeff#define MLX4_EN_MIN_TX_SIZE	(4096 / TXBB_SIZE)
125219820Sjeff
126219820Sjeff#define MLX4_EN_SMALL_PKT_SIZE		64
127272407Shselasky
128272407Shselasky#define MLX4_EN_MAX_TX_RING_P_UP	32
129272407Shselasky#define MLX4_EN_NUM_UP			1
130272407Shselasky
131272407Shselasky#define MAX_TX_RINGS			(MLX4_EN_MAX_TX_RING_P_UP * \
132292107Shselasky					MLX4_EN_NUM_UP)
133272407Shselasky
134272407Shselasky#define MLX4_EN_DEF_TX_RING_SIZE	1024
135219820Sjeff#define MLX4_EN_DEF_RX_RING_SIZE  	1024
136219820Sjeff
137219820Sjeff/* Target number of bytes to coalesce with interrupt moderation */
138219820Sjeff#define MLX4_EN_RX_COAL_TARGET	0x20000
139219820Sjeff#define MLX4_EN_RX_COAL_TIME	0x10
140219820Sjeff
141272407Shselasky#define MLX4_EN_TX_COAL_PKTS	64
142272407Shselasky#define MLX4_EN_TX_COAL_TIME	64
143219820Sjeff
144219820Sjeff#define MLX4_EN_RX_RATE_LOW		400000
145219820Sjeff#define MLX4_EN_RX_COAL_TIME_LOW	0
146219820Sjeff#define MLX4_EN_RX_RATE_HIGH		450000
147219820Sjeff#define MLX4_EN_RX_COAL_TIME_HIGH	128
148219820Sjeff#define MLX4_EN_RX_SIZE_THRESH		1024
149219820Sjeff#define MLX4_EN_RX_RATE_THRESH		(1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
150219820Sjeff#define MLX4_EN_SAMPLE_INTERVAL		0
151219820Sjeff#define MLX4_EN_AVG_PKT_SMALL		256
152219820Sjeff
153219820Sjeff#define MLX4_EN_AUTO_CONF	0xffff
154219820Sjeff
155219820Sjeff#define MLX4_EN_DEF_RX_PAUSE	1
156219820Sjeff#define MLX4_EN_DEF_TX_PAUSE	1
157219820Sjeff
158272407Shselasky/* Interval between successive polls in the Tx routine when polling is used
159219820Sjeff   instead of interrupts (in per-core Tx rings) - should be power of 2 */
160219820Sjeff#define MLX4_EN_TX_POLL_MODER	16
161219820Sjeff#define MLX4_EN_TX_POLL_TIMEOUT	(HZ / 4)
162219820Sjeff
163272407Shselasky#define MLX4_EN_64_ALIGN	(64 - NET_SKB_PAD)
164272407Shselasky#define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
165219820Sjeff#define HEADER_COPY_SIZE       (128)
166219820Sjeff#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETHER_HDR_LEN)
167219820Sjeff
168219820Sjeff#define MLX4_EN_MIN_MTU		46
169219820Sjeff#define ETH_BCAST		0xffffffffffffULL
170219820Sjeff
171219820Sjeff#define MLX4_EN_LOOPBACK_RETRIES	5
172219820Sjeff#define MLX4_EN_LOOPBACK_TIMEOUT	100
173219820Sjeff
174219820Sjeff#ifdef MLX4_EN_PERF_STAT
175219820Sjeff/* Number of samples to 'average' */
176219820Sjeff#define AVG_SIZE			128
177219820Sjeff#define AVG_FACTOR			1024
178219820Sjeff
179219820Sjeff#define INC_PERF_COUNTER(cnt)		(++(cnt))
180219820Sjeff#define ADD_PERF_COUNTER(cnt, add)	((cnt) += (add))
181219820Sjeff#define AVG_PERF_COUNTER(cnt, sample) \
182219820Sjeff	((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
183219820Sjeff#define GET_PERF_COUNTER(cnt)		(cnt)
184219820Sjeff#define GET_AVG_PERF_COUNTER(cnt)	((cnt) / AVG_FACTOR)
185219820Sjeff
186219820Sjeff#else
187219820Sjeff
188219820Sjeff#define INC_PERF_COUNTER(cnt)		do {} while (0)
189219820Sjeff#define ADD_PERF_COUNTER(cnt, add)	do {} while (0)
190219820Sjeff#define AVG_PERF_COUNTER(cnt, sample)	do {} while (0)
191219820Sjeff#define GET_PERF_COUNTER(cnt)		(0)
192219820Sjeff#define GET_AVG_PERF_COUNTER(cnt)	(0)
193219820Sjeff#endif /* MLX4_EN_PERF_STAT */
194219820Sjeff
195219820Sjeff/*
196219820Sjeff * Configurables
197219820Sjeff */
198219820Sjeff
199219820Sjeffenum cq_type {
200219820Sjeff	RX = 0,
201219820Sjeff	TX = 1,
202219820Sjeff};
203219820Sjeff
204219820Sjeff
205219820Sjeff/*
206219820Sjeff * Useful macros
207219820Sjeff */
208219820Sjeff#define ROUNDUP_LOG2(x)		ilog2(roundup_pow_of_two(x))
209219820Sjeff#define XNOR(x, y)		(!(x) == !(y))
210219820Sjeff#define ILLEGAL_MAC(addr)	(addr == 0xffffffffffffULL || addr == 0x0)
211219820Sjeff
212219820Sjeffstruct mlx4_en_tx_info {
213292107Shselasky	bus_dmamap_t dma_map;
214272407Shselasky        struct mbuf *mb;
215272407Shselasky        u32 nr_txbb;
216272407Shselasky	u32 nr_bytes;
217219820Sjeff};
218219820Sjeff
219219820Sjeff
220219820Sjeff#define MLX4_EN_BIT_DESC_OWN	0x80000000
221219820Sjeff#define CTRL_SIZE	sizeof(struct mlx4_wqe_ctrl_seg)
222219820Sjeff#define MLX4_EN_MEMTYPE_PAD	0x100
223219820Sjeff#define DS_SIZE		sizeof(struct mlx4_wqe_data_seg)
224219820Sjeff
225219820Sjeff
226219820Sjeffstruct mlx4_en_tx_desc {
227219820Sjeff	struct mlx4_wqe_ctrl_seg ctrl;
228219820Sjeff	union {
229219820Sjeff		struct mlx4_wqe_data_seg data; /* at least one data segment */
230219820Sjeff		struct mlx4_wqe_lso_seg lso;
231219820Sjeff		struct mlx4_wqe_inline_seg inl;
232219820Sjeff	};
233219820Sjeff};
234219820Sjeff
235219820Sjeff#define MLX4_EN_USE_SRQ		0x01000000
236219820Sjeff
237272407Shselasky#define MLX4_EN_RX_BUDGET 64
238272407Shselasky
239292107Shselasky#define	MLX4_EN_TX_MAX_DESC_SIZE 512	/* bytes */
240292107Shselasky#define	MLX4_EN_TX_MAX_MBUF_SIZE 65536	/* bytes */
241292107Shselasky#define	MLX4_EN_TX_MAX_PAYLOAD_SIZE 65536	/* bytes */
242292107Shselasky#define	MLX4_EN_TX_MAX_MBUF_FRAGS \
243292107Shselasky    ((MLX4_EN_TX_MAX_DESC_SIZE - 128) / DS_SIZE_ALIGNMENT) /* units */
244292107Shselasky#define	MLX4_EN_TX_WQE_MAX_WQEBBS			\
245292107Shselasky    (MLX4_EN_TX_MAX_DESC_SIZE / TXBB_SIZE) /* units */
246292107Shselasky
247272407Shselasky#define MLX4_EN_CX3_LOW_ID	0x1000
248272407Shselasky#define MLX4_EN_CX3_HIGH_ID	0x1005
249272407Shselasky
250219820Sjeffstruct mlx4_en_tx_ring {
251272407Shselasky        spinlock_t tx_lock;
252292107Shselasky	bus_dma_tag_t dma_tag;
253219820Sjeff	struct mlx4_hwq_resources wqres;
254219820Sjeff	u32 size ; /* number of TXBBs */
255219820Sjeff	u32 size_mask;
256219820Sjeff	u16 stride;
257219820Sjeff	u16 cqn;	/* index of port CQ associated with this ring */
258219820Sjeff	u32 prod;
259219820Sjeff	u32 cons;
260219820Sjeff	u32 buf_size;
261219820Sjeff	u32 doorbell_qpn;
262292107Shselasky	u8 *buf;
263219820Sjeff	u16 poll_cnt;
264219820Sjeff	int blocked;
265219820Sjeff	struct mlx4_en_tx_info *tx_info;
266272407Shselasky	u8 queue_index;
267272407Shselasky	cpuset_t affinity_mask;
268272407Shselasky	struct buf_ring *br;
269219820Sjeff	u32 last_nr_txbb;
270219820Sjeff	struct mlx4_qp qp;
271219820Sjeff	struct mlx4_qp_context context;
272219820Sjeff	int qpn;
273219820Sjeff	enum mlx4_qp_state qp_state;
274219820Sjeff	struct mlx4_srq dummy;
275219820Sjeff	unsigned long bytes;
276219820Sjeff	unsigned long packets;
277272407Shselasky	unsigned long tx_csum;
278272407Shselasky	unsigned long queue_stopped;
279292107Shselasky	unsigned long oversized_packets;
280272407Shselasky	unsigned long wake_queue;
281322531Shselasky	unsigned long tso_packets;
282322531Shselasky	unsigned long defrag_attempts;
283219820Sjeff	struct mlx4_bf bf;
284219820Sjeff	bool bf_enabled;
285272407Shselasky	int hwtstamp_tx_type;
286272407Shselasky	spinlock_t comp_lock;
287272407Shselasky	int inline_thold;
288219820Sjeff	u64 watchdog_time;
289219820Sjeff};
290219820Sjeff
291219820Sjeffstruct mlx4_en_rx_desc {
292219820Sjeff	/* actual number of entries depends on rx ring stride */
293219820Sjeff	struct mlx4_wqe_data_seg data[0];
294219820Sjeff};
295219820Sjeff
296292107Shselaskystruct mlx4_en_rx_mbuf {
297292107Shselasky	bus_dmamap_t dma_map;
298292107Shselasky	struct mbuf *mbuf;
299272407Shselasky};
300272407Shselasky
301292107Shselaskystruct mlx4_en_rx_spare {
302292107Shselasky	bus_dmamap_t dma_map;
303292107Shselasky	struct mbuf *mbuf;
304292107Shselasky	u64 paddr_be;
305292107Shselasky};
306292107Shselasky
307219820Sjeffstruct mlx4_en_rx_ring {
308219820Sjeff	struct mlx4_hwq_resources wqres;
309292107Shselasky	bus_dma_tag_t dma_tag;
310292107Shselasky	struct mlx4_en_rx_spare spare;
311219820Sjeff	u32 size ;	/* number of Rx descs*/
312219820Sjeff	u32 actual_size;
313219820Sjeff	u32 size_mask;
314219820Sjeff	u16 stride;
315219820Sjeff	u16 log_stride;
316219820Sjeff	u16 cqn;	/* index of port CQ associated with this ring */
317219820Sjeff	u32 prod;
318219820Sjeff	u32 cons;
319219820Sjeff	u32 buf_size;
320272407Shselasky	u8  fcs_del;
321272407Shselasky	u32 rx_mb_size;
322272407Shselasky	int qpn;
323292107Shselasky	u8 *buf;
324292107Shselasky	struct mlx4_en_rx_mbuf *mbuf;
325272407Shselasky	unsigned long errors;
326219820Sjeff	unsigned long bytes;
327219820Sjeff	unsigned long packets;
328272407Shselasky#ifdef LL_EXTENDED_STATS
329272407Shselasky	unsigned long yields;
330272407Shselasky	unsigned long misses;
331272407Shselasky	unsigned long cleaned;
332272407Shselasky#endif
333272407Shselasky	unsigned long csum_ok;
334272407Shselasky	unsigned long csum_none;
335272407Shselasky	int hwtstamp_rx_filter;
336272407Shselasky	int numa_node;
337219820Sjeff	struct lro_ctrl lro;
338219820Sjeff};
339219820Sjeff
340219820Sjeffstatic inline int mlx4_en_can_lro(__be16 status)
341219820Sjeff{
342279731Shselasky	const __be16 status_all = cpu_to_be16(
343272407Shselasky			MLX4_CQE_STATUS_IPV4    |
344272407Shselasky			MLX4_CQE_STATUS_IPV4F   |
345272407Shselasky			MLX4_CQE_STATUS_IPV6    |
346272407Shselasky			MLX4_CQE_STATUS_IPV4OPT |
347272407Shselasky			MLX4_CQE_STATUS_TCP     |
348272407Shselasky			MLX4_CQE_STATUS_UDP     |
349272407Shselasky			MLX4_CQE_STATUS_IPOK);
350279731Shselasky	const __be16 status_ipv4_ipok_tcp = cpu_to_be16(
351272407Shselasky			MLX4_CQE_STATUS_IPV4    |
352272407Shselasky			MLX4_CQE_STATUS_IPOK    |
353272407Shselasky			MLX4_CQE_STATUS_TCP);
354279731Shselasky	const __be16 status_ipv6_ipok_tcp = cpu_to_be16(
355272407Shselasky			MLX4_CQE_STATUS_IPV6    |
356272407Shselasky			MLX4_CQE_STATUS_IPOK    |
357272407Shselasky			MLX4_CQE_STATUS_TCP);
358272407Shselasky
359272407Shselasky	status &= status_all;
360272407Shselasky	return (status == status_ipv4_ipok_tcp ||
361272407Shselasky			status == status_ipv6_ipok_tcp);
362219820Sjeff}
363219820Sjeff
364219820Sjeffstruct mlx4_en_cq {
365219820Sjeff	struct mlx4_cq          mcq;
366219820Sjeff	struct mlx4_hwq_resources wqres;
367219820Sjeff	int                     ring;
368219820Sjeff	spinlock_t              lock;
369219820Sjeff	struct net_device      *dev;
370272407Shselasky        /* Per-core Tx cq processing support */
371272407Shselasky        struct timer_list timer;
372219820Sjeff	int size;
373219820Sjeff	int buf_size;
374219820Sjeff	unsigned vector;
375219820Sjeff	enum cq_type is_tx;
376219820Sjeff	u16 moder_time;
377219820Sjeff	u16 moder_cnt;
378219820Sjeff	struct mlx4_cqe *buf;
379219820Sjeff	struct task cq_task;
380219820Sjeff	struct taskqueue *tq;
381219820Sjeff#define MLX4_EN_OPCODE_ERROR	0x1e
382219820Sjeff	u32 tot_rx;
383272407Shselasky	u32 tot_tx;
384292107Shselasky	u32 curr_poll_rx_cpu_id;
385272407Shselasky
386272407Shselasky#ifdef CONFIG_NET_RX_BUSY_POLL
387272407Shselasky	unsigned int state;
388272407Shselasky#define MLX4_EN_CQ_STATEIDLE        0
389272407Shselasky#define MLX4_EN_CQ_STATENAPI     1    /* NAPI owns this CQ */
390272407Shselasky#define MLX4_EN_CQ_STATEPOLL     2    /* poll owns this CQ */
391272407Shselasky#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATENAPI | MLX4_EN_CQ_STATEPOLL)
392272407Shselasky#define MLX4_EN_CQ_STATENAPI_YIELD  4    /* NAPI yielded this CQ */
393272407Shselasky#define MLX4_EN_CQ_STATEPOLL_YIELD  8    /* poll yielded this CQ */
394272407Shselasky#define CQ_YIELD (MLX4_EN_CQ_STATENAPI_YIELD | MLX4_EN_CQ_STATEPOLL_YIELD)
395272407Shselasky#define CQ_USER_PEND (MLX4_EN_CQ_STATEPOLL | MLX4_EN_CQ_STATEPOLL_YIELD)
396272407Shselasky	spinlock_t poll_lock; /* protects from LLS/napi conflicts */
397272407Shselasky#endif  /* CONFIG_NET_RX_BUSY_POLL */
398219820Sjeff};
399219820Sjeff
400219820Sjeffstruct mlx4_en_port_profile {
401219820Sjeff	u32 flags;
402219820Sjeff	u32 tx_ring_num;
403219820Sjeff	u32 rx_ring_num;
404219820Sjeff	u32 tx_ring_size;
405219820Sjeff	u32 rx_ring_size;
406219820Sjeff	u8 rx_pause;
407272407Shselasky	u8 rx_ppp;
408219820Sjeff	u8 tx_pause;
409272407Shselasky	u8 tx_ppp;
410272407Shselasky	int rss_rings;
411219820Sjeff};
412219820Sjeff
413219820Sjeffstruct mlx4_en_profile {
414219820Sjeff	int rss_xor;
415219820Sjeff	int udp_rss;
416219820Sjeff	u8 rss_mask;
417219820Sjeff	u32 active_ports;
418219820Sjeff	u32 small_pkt_int;
419219820Sjeff	u8 no_reset;
420272407Shselasky	u8 num_tx_rings_p_up;
421219820Sjeff	struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
422219820Sjeff};
423219820Sjeff
424219820Sjeffstruct mlx4_en_dev {
425272407Shselasky	struct mlx4_dev		*dev;
426219820Sjeff	struct pci_dev		*pdev;
427219820Sjeff	struct mutex		state_lock;
428272407Shselasky	struct net_device	*pndev[MLX4_MAX_PORTS + 1];
429272407Shselasky	u32			port_cnt;
430219820Sjeff	bool			device_up;
431272407Shselasky	struct mlx4_en_profile	profile;
432219820Sjeff	u32			LSO_support;
433219820Sjeff	struct workqueue_struct *workqueue;
434272407Shselasky	struct device		*dma_device;
435272407Shselasky	void __iomem		*uar_map;
436272407Shselasky	struct mlx4_uar		priv_uar;
437219820Sjeff	struct mlx4_mr		mr;
438272407Shselasky	u32			priv_pdn;
439272407Shselasky	spinlock_t		uar_lock;
440219820Sjeff	u8			mac_removed[MLX4_MAX_PORTS + 1];
441272407Shselasky	unsigned long		last_overflow_check;
442272407Shselasky	unsigned long		overflow_period;
443219820Sjeff};
444219820Sjeff
445219820Sjeff
446219820Sjeffstruct mlx4_en_rss_map {
447219820Sjeff	int base_qpn;
448219820Sjeff	struct mlx4_qp qps[MAX_RX_RINGS];
449219820Sjeff	enum mlx4_qp_state state[MAX_RX_RINGS];
450219820Sjeff	struct mlx4_qp indir_qp;
451219820Sjeff	enum mlx4_qp_state indir_state;
452219820Sjeff};
453219820Sjeff
454219820Sjeffstruct mlx4_en_port_state {
455219820Sjeff	int link_state;
456219820Sjeff	int link_speed;
457219820Sjeff	int transciver;
458272407Shselasky	int autoneg;
459219820Sjeff};
460219820Sjeff
461272407Shselaskyenum mlx4_en_mclist_act {
462272407Shselasky	MCLIST_NONE,
463272407Shselasky	MCLIST_REM,
464272407Shselasky	MCLIST_ADD,
465219820Sjeff};
466219820Sjeff
467272407Shselaskystruct mlx4_en_mc_list {
468272407Shselasky	struct list_head	list;
469272407Shselasky	enum mlx4_en_mclist_act	action;
470272407Shselasky	u8			addr[ETH_ALEN];
471272407Shselasky	u64			reg_id;
472219820Sjeff};
473219820Sjeff
474272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
475272407Shselasky/* Minimal TC BW - setting to 0 will block traffic */
476272407Shselasky#define MLX4_EN_BW_MIN 1
477272407Shselasky#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
478272407Shselasky
479272407Shselasky#define MLX4_EN_TC_ETS 7
480272407Shselasky
481272407Shselasky#endif
482272407Shselasky
483272407Shselasky
484272407Shselaskyenum {
485272407Shselasky	MLX4_EN_FLAG_PROMISC		= (1 << 0),
486272407Shselasky	MLX4_EN_FLAG_MC_PROMISC		= (1 << 1),
487272407Shselasky	/* whether we need to enable hardware loopback by putting dmac
488272407Shselasky	 * in Tx WQE
489272407Shselasky	 */
490272407Shselasky	MLX4_EN_FLAG_ENABLE_HW_LOOPBACK	= (1 << 2),
491272407Shselasky	/* whether we need to drop packets that hardware loopback-ed */
492272407Shselasky	MLX4_EN_FLAG_RX_FILTER_NEEDED	= (1 << 3),
493272407Shselasky	MLX4_EN_FLAG_FORCE_PROMISC	= (1 << 4),
494272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
495272407Shselasky	MLX4_EN_FLAG_DCB_ENABLED	= (1 << 5)
496272407Shselasky#endif
497219820Sjeff};
498219820Sjeff
499272407Shselasky#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
500272407Shselasky#define MLX4_EN_MAC_HASH_IDX 5
501272407Shselasky
502272407Shselaskystruct en_port {
503272407Shselasky	struct kobject		kobj;
504272407Shselasky	struct mlx4_dev		*dev;
505272407Shselasky	u8			port_num;
506272407Shselasky	u8			vport_num;
507219820Sjeff};
508219820Sjeff
509219820Sjeffstruct mlx4_en_priv {
510219820Sjeff	struct mlx4_en_dev *mdev;
511219820Sjeff	struct mlx4_en_port_profile *prof;
512219820Sjeff	struct net_device *dev;
513272407Shselasky	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
514219820Sjeff	struct mlx4_en_port_state port_state;
515219820Sjeff	spinlock_t stats_lock;
516272407Shselasky	/* To allow rules removal while port is going down */
517272407Shselasky	struct list_head ethtool_list;
518219820Sjeff
519257867Salfred	unsigned long last_moder_packets[MAX_RX_RINGS];
520219820Sjeff	unsigned long last_moder_tx_packets;
521257867Salfred	unsigned long last_moder_bytes[MAX_RX_RINGS];
522219820Sjeff	unsigned long last_moder_jiffies;
523257867Salfred	int last_moder_time[MAX_RX_RINGS];
524219820Sjeff	u16 rx_usecs;
525219820Sjeff	u16 rx_frames;
526219820Sjeff	u16 tx_usecs;
527219820Sjeff	u16 tx_frames;
528219820Sjeff	u32 pkt_rate_low;
529273736Shselasky	u32 rx_usecs_low;
530219820Sjeff	u32 pkt_rate_high;
531273736Shselasky	u32 rx_usecs_high;
532273736Shselasky	u32 sample_interval;
533273736Shselasky	u32 adaptive_rx_coal;
534219820Sjeff	u32 msg_enable;
535219820Sjeff	u32 loopback_ok;
536219820Sjeff	u32 validate_loopback;
537219820Sjeff
538219820Sjeff	struct mlx4_hwq_resources res;
539219820Sjeff	int link_state;
540219820Sjeff	int last_link_state;
541219820Sjeff	bool port_up;
542219820Sjeff	int port;
543219820Sjeff	int registered;
544219820Sjeff	int allocated;
545272407Shselasky	int stride;
546272407Shselasky	unsigned char current_mac[ETH_ALEN + 2];
547272407Shselasky        u64 mac;
548219820Sjeff	int mac_index;
549219820Sjeff	unsigned max_mtu;
550219820Sjeff	int base_qpn;
551272407Shselasky	int cqe_factor;
552219820Sjeff
553219820Sjeff	struct mlx4_en_rss_map rss_map;
554219820Sjeff	u32 flags;
555272407Shselasky	u8 num_tx_rings_p_up;
556219820Sjeff	u32 tx_ring_num;
557219820Sjeff	u32 rx_ring_num;
558219820Sjeff	u32 rx_mb_size;
559219820Sjeff
560272407Shselasky	struct mlx4_en_tx_ring **tx_ring;
561272407Shselasky	struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
562272407Shselasky	struct mlx4_en_cq **tx_cq;
563272407Shselasky	struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
564272407Shselasky	struct mlx4_qp drop_qp;
565272407Shselasky	struct work_struct rx_mode_task;
566219820Sjeff	struct work_struct watchdog_task;
567219820Sjeff	struct work_struct linkstate_task;
568219820Sjeff	struct delayed_work stats_task;
569272407Shselasky	struct delayed_work service_task;
570219820Sjeff	struct mlx4_en_perf_stats pstats;
571219820Sjeff	struct mlx4_en_pkt_stats pkstats;
572272407Shselasky	struct mlx4_en_flow_stats flowstats[MLX4_NUM_PRIORITIES];
573219820Sjeff	struct mlx4_en_port_stats port_stats;
574272407Shselasky	struct mlx4_en_vport_stats vport_stats;
575272407Shselasky	struct mlx4_en_vf_stats vf_stats;
576272407Shselasky	struct list_head mc_list;
577272407Shselasky	struct list_head curr_list;
578272407Shselasky	u64 broadcast_id;
579219820Sjeff	struct mlx4_en_stat_out_mbox hw_stats;
580272407Shselasky	int vids[128];
581272407Shselasky	bool wol;
582272407Shselasky	struct device *ddev;
583272407Shselasky	struct dentry *dev_root;
584272407Shselasky	u32 counter_index;
585219820Sjeff	eventhandler_tag vlan_attach;
586219820Sjeff	eventhandler_tag vlan_detach;
587219820Sjeff	struct callout watchdog_timer;
588272407Shselasky        struct ifmedia media;
589219820Sjeff	volatile int blocked;
590318540Shselasky	struct sysctl_oid *conf_sysctl;
591318540Shselasky	struct sysctl_oid *stat_sysctl;
592219820Sjeff	struct sysctl_ctx_list conf_ctx;
593219820Sjeff	struct sysctl_ctx_list stat_ctx;
594272407Shselasky#define MLX4_EN_MAC_HASH_IDX 5
595272407Shselasky	struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
596272407Shselasky
597272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
598272407Shselasky	struct ieee_ets ets;
599272407Shselasky	u16 maxrate[IEEE_8021QAZ_MAX_TCS];
600272407Shselasky	u8 dcbx_cap;
601272407Shselasky#endif
602272407Shselasky#ifdef CONFIG_RFS_ACCEL
603272407Shselasky	spinlock_t filters_lock;
604272407Shselasky	int last_filter_id;
605272407Shselasky	struct list_head filters;
606272407Shselasky	struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
607272407Shselasky#endif
608272407Shselasky	struct en_port *vf_ports[MLX4_MAX_NUM_VF];
609272407Shselasky	unsigned long last_ifq_jiffies;
610272407Shselasky	u64 if_counters_rx_errors;
611272407Shselasky	u64 if_counters_rx_no_buffer;
612219820Sjeff};
613219820Sjeff
614220016Sjeffenum mlx4_en_wol {
615220016Sjeff	MLX4_EN_WOL_MAGIC = (1ULL << 61),
616220016Sjeff	MLX4_EN_WOL_ENABLED = (1ULL << 62),
617220016Sjeff};
618219820Sjeff
619272407Shselaskystruct mlx4_mac_entry {
620272407Shselasky	struct hlist_node hlist;
621272407Shselasky	unsigned char mac[ETH_ALEN + 2];
622272407Shselasky	u64 reg_id;
623272407Shselasky};
624219820Sjeff
625272407Shselasky#ifdef CONFIG_NET_RX_BUSY_POLL
626272407Shselaskystatic inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
627272407Shselasky{
628272407Shselasky	spin_lock_init(&cq->poll_lock);
629272407Shselasky	cq->state = MLX4_EN_CQ_STATEIDLE;
630272407Shselasky}
631272407Shselasky
632272407Shselasky/* called from the device poll rutine to get ownership of a cq */
633272407Shselaskystatic inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
634272407Shselasky{
635272407Shselasky	int rc = true;
636272407Shselasky	spin_lock(&cq->poll_lock);
637272407Shselasky	if (cq->state & MLX4_CQ_LOCKED) {
638272407Shselasky		WARN_ON(cq->state & MLX4_EN_CQ_STATENAPI);
639272407Shselasky		cq->state |= MLX4_EN_CQ_STATENAPI_YIELD;
640272407Shselasky		rc = false;
641272407Shselasky	} else
642272407Shselasky		/* we don't care if someone yielded */
643272407Shselasky		cq->state = MLX4_EN_CQ_STATENAPI;
644272407Shselasky	spin_unlock(&cq->poll_lock);
645272407Shselasky	return rc;
646272407Shselasky}
647272407Shselasky
648272407Shselasky/* returns true is someone tried to get the cq while napi had it */
649272407Shselaskystatic inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
650272407Shselasky{
651272407Shselasky	int rc = false;
652272407Shselasky	spin_lock(&cq->poll_lock);
653272407Shselasky	WARN_ON(cq->state & (MLX4_EN_CQ_STATEPOLL |
654272407Shselasky			     MLX4_EN_CQ_STATENAPI_YIELD));
655272407Shselasky
656272407Shselasky	if (cq->state & MLX4_EN_CQ_STATEPOLL_YIELD)
657272407Shselasky		rc = true;
658272407Shselasky	cq->state = MLX4_EN_CQ_STATEIDLE;
659272407Shselasky	spin_unlock(&cq->poll_lock);
660272407Shselasky	return rc;
661272407Shselasky}
662272407Shselasky
663272407Shselasky/* called from mlx4_en_low_latency_poll() */
664272407Shselaskystatic inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
665272407Shselasky{
666272407Shselasky	int rc = true;
667272407Shselasky	spin_lock_bh(&cq->poll_lock);
668272407Shselasky	if ((cq->state & MLX4_CQ_LOCKED)) {
669272407Shselasky		struct net_device *dev = cq->dev;
670272407Shselasky		struct mlx4_en_priv *priv = netdev_priv(dev);
671272407Shselasky		struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
672272407Shselasky
673272407Shselasky		cq->state |= MLX4_EN_CQ_STATEPOLL_YIELD;
674272407Shselasky		rc = false;
675272407Shselasky#ifdef LL_EXTENDED_STATS
676272407Shselasky		rx_ring->yields++;
677272407Shselasky#endif
678272407Shselasky	} else
679272407Shselasky		/* preserve yield marks */
680272407Shselasky		cq->state |= MLX4_EN_CQ_STATEPOLL;
681272407Shselasky	spin_unlock_bh(&cq->poll_lock);
682272407Shselasky	return rc;
683272407Shselasky}
684272407Shselasky
685272407Shselasky/* returns true if someone tried to get the cq while it was locked */
686272407Shselaskystatic inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
687272407Shselasky{
688272407Shselasky	int rc = false;
689272407Shselasky	spin_lock_bh(&cq->poll_lock);
690272407Shselasky	WARN_ON(cq->state & (MLX4_EN_CQ_STATENAPI));
691272407Shselasky
692272407Shselasky	if (cq->state & MLX4_EN_CQ_STATEPOLL_YIELD)
693272407Shselasky		rc = true;
694272407Shselasky	cq->state = MLX4_EN_CQ_STATEIDLE;
695272407Shselasky	spin_unlock_bh(&cq->poll_lock);
696272407Shselasky	return rc;
697272407Shselasky}
698272407Shselasky
699272407Shselasky/* true if a socket is polling, even if it did not get the lock */
700272407Shselaskystatic inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
701272407Shselasky{
702272407Shselasky	WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
703272407Shselasky	return cq->state & CQ_USER_PEND;
704272407Shselasky}
705272407Shselasky#else
706272407Shselaskystatic inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
707272407Shselasky{
708272407Shselasky}
709272407Shselasky
710272407Shselaskystatic inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
711272407Shselasky{
712272407Shselasky	return true;
713272407Shselasky}
714272407Shselasky
715272407Shselaskystatic inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
716272407Shselasky{
717272407Shselasky	return false;
718272407Shselasky}
719272407Shselasky
720272407Shselaskystatic inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
721272407Shselasky{
722272407Shselasky	return false;
723272407Shselasky}
724272407Shselasky
725272407Shselaskystatic inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
726272407Shselasky{
727272407Shselasky	return false;
728272407Shselasky}
729272407Shselasky
730272407Shselaskystatic inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
731272407Shselasky{
732272407Shselasky	return false;
733272407Shselasky}
734272407Shselasky#endif /* CONFIG_NET_RX_BUSY_POLL */
735272407Shselasky
736272407Shselasky#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
737272407Shselasky
738219820Sjeffvoid mlx4_en_destroy_netdev(struct net_device *dev);
739219820Sjeffint mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
740219820Sjeff			struct mlx4_en_port_profile *prof);
741219820Sjeff
742272407Shselaskyint mlx4_en_start_port(struct net_device *dev);
743272407Shselaskyvoid mlx4_en_stop_port(struct net_device *dev);
744219820Sjeff
745219820Sjeffvoid mlx4_en_free_resources(struct mlx4_en_priv *priv);
746219820Sjeffint mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
747219820Sjeff
748272407Shselaskyint mlx4_en_pre_config(struct mlx4_en_priv *priv);
749272407Shselaskyint mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
750272407Shselasky		      int entries, int ring, enum cq_type mode, int node);
751272407Shselaskyvoid mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
752272407Shselaskyint mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
753272407Shselasky			int cq_idx);
754219820Sjeffvoid mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
755219820Sjeffint mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
756219820Sjeffint mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
757219820Sjeff
758219820Sjeffvoid mlx4_en_tx_irq(struct mlx4_cq *mcq);
759219820Sjeffu16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb);
760219820Sjeff
761272407Shselaskyint mlx4_en_transmit(struct ifnet *dev, struct mbuf *m);
762272407Shselaskyint mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
763272407Shselasky			   struct mlx4_en_tx_ring **pring,
764272407Shselasky			   u32 size, u16 stride, int node, int queue_idx);
765272407Shselaskyvoid mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
766272407Shselasky			     struct mlx4_en_tx_ring **pring);
767219820Sjeffint mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
768219820Sjeff			     struct mlx4_en_tx_ring *ring,
769272407Shselasky			     int cq, int user_prio);
770219820Sjeffvoid mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
771219820Sjeff				struct mlx4_en_tx_ring *ring);
772272407Shselaskyvoid mlx4_en_qflush(struct ifnet *dev);
773219820Sjeff
774219820Sjeffint mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
775272407Shselasky			   struct mlx4_en_rx_ring **pring,
776272407Shselasky			   u32 size, int node);
777219820Sjeffvoid mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
778272407Shselasky			     struct mlx4_en_rx_ring **pring,
779272407Shselasky			     u32 size, u16 stride);
780272407Shselaskyvoid mlx4_en_tx_que(void *context, int pending);
781272407Shselaskyvoid mlx4_en_rx_que(void *context, int pending);
782219820Sjeffint mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
783219820Sjeffvoid mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
784219820Sjeff				struct mlx4_en_rx_ring *ring);
785219820Sjeffint mlx4_en_process_rx_cq(struct net_device *dev,
786219820Sjeff			  struct mlx4_en_cq *cq,
787219820Sjeff			  int budget);
788272407Shselaskyvoid mlx4_en_poll_tx_cq(unsigned long data);
789219820Sjeffvoid mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
790272407Shselasky		int is_tx, int rss, int qpn, int cqn, int user_prio,
791272407Shselasky		struct mlx4_qp_context *context);
792219820Sjeffvoid mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
793219820Sjeffint mlx4_en_map_buffer(struct mlx4_buf *buf);
794219820Sjeffvoid mlx4_en_unmap_buffer(struct mlx4_buf *buf);
795272407Shselaskyvoid mlx4_en_calc_rx_buf(struct net_device *dev);
796219820Sjeff
797219820Sjeffint mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
798219820Sjeffvoid mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
799272407Shselaskyint mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
800272407Shselaskyvoid mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
801219820Sjeffint mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
802219820Sjeffvoid mlx4_en_rx_irq(struct mlx4_cq *mcq);
803219820Sjeff
804272407Shselaskyint mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
805272407Shselaskyint mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
806219820Sjeff
807219820Sjeffint mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
808219820Sjeffint mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
809272407Shselaskyint mlx4_en_get_vport_stats(struct mlx4_en_dev *mdev, u8 port);
810272407Shselaskyvoid mlx4_en_create_debug_files(struct mlx4_en_priv *priv);
811272407Shselaskyvoid mlx4_en_delete_debug_files(struct mlx4_en_priv *priv);
812272407Shselaskyint mlx4_en_register_debugfs(void);
813272407Shselaskyvoid mlx4_en_unregister_debugfs(void);
814219820Sjeff
815272407Shselasky#ifdef CONFIG_MLX4_EN_DCB
816272407Shselaskyextern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
817272407Shselaskyextern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
818272407Shselasky#endif
819272407Shselasky
820272407Shselaskyint mlx4_en_setup_tc(struct net_device *dev, u8 up);
821272407Shselasky
822272407Shselasky#ifdef CONFIG_RFS_ACCEL
823272407Shselaskyvoid mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
824272407Shselasky			     struct mlx4_en_rx_ring *rx_ring);
825272407Shselasky#endif
826272407Shselasky
827219820Sjeff#define MLX4_EN_NUM_SELF_TEST	5
828219820Sjeffvoid mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
829272407Shselaskyvoid mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
830219820Sjeff
831219820Sjeff/*
832272407Shselasky * Functions for time stamping
833272407Shselasky */
834272407Shselasky#define SKBTX_HW_TSTAMP (1 << 0)
835272407Shselasky#define SKBTX_IN_PROGRESS (1 << 2)
836272407Shselasky
837272407Shselaskyu64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
838272407Shselasky
839272407Shselasky/* Functions for caching and restoring statistics */
840272407Shselaskyint mlx4_en_get_sset_count(struct net_device *dev, int sset);
841272407Shselaskyvoid mlx4_en_restore_ethtool_stats(struct mlx4_en_priv *priv,
842272407Shselasky				    u64 *data);
843272407Shselasky
844272407Shselasky/*
845219820Sjeff * Globals
846219820Sjeff */
847219820Sjeffextern const struct ethtool_ops mlx4_en_ethtool_ops;
848272407Shselasky
849272407Shselasky/*
850272407Shselasky * Defines for link speed - needed by selftest
851272407Shselasky */
852272407Shselasky#define MLX4_EN_LINK_SPEED_1G	1000
853272407Shselasky#define MLX4_EN_LINK_SPEED_10G	10000
854272407Shselasky#define MLX4_EN_LINK_SPEED_40G	40000
855272407Shselasky
856272407Shselaskyenum {
857272407Shselasky        NETIF_MSG_DRV           = 0x0001,
858272407Shselasky        NETIF_MSG_PROBE         = 0x0002,
859272407Shselasky        NETIF_MSG_LINK          = 0x0004,
860272407Shselasky        NETIF_MSG_TIMER         = 0x0008,
861272407Shselasky        NETIF_MSG_IFDOWN        = 0x0010,
862272407Shselasky        NETIF_MSG_IFUP          = 0x0020,
863272407Shselasky        NETIF_MSG_RX_ERR        = 0x0040,
864272407Shselasky        NETIF_MSG_TX_ERR        = 0x0080,
865272407Shselasky        NETIF_MSG_TX_QUEUED     = 0x0100,
866272407Shselasky        NETIF_MSG_INTR          = 0x0200,
867272407Shselasky        NETIF_MSG_TX_DONE       = 0x0400,
868272407Shselasky        NETIF_MSG_RX_STATUS     = 0x0800,
869272407Shselasky        NETIF_MSG_PKTDATA       = 0x1000,
870272407Shselasky        NETIF_MSG_HW            = 0x2000,
871272407Shselasky        NETIF_MSG_WOL           = 0x4000,
872272407Shselasky};
873272407Shselasky
874272407Shselasky
875272407Shselasky/*
876272407Shselasky * printk / logging functions
877272407Shselasky */
878272407Shselasky
879272407Shselasky#define en_print(level, priv, format, arg...)                   \
880272407Shselasky        {                                                       \
881272407Shselasky        if ((priv)->registered)                                 \
882272407Shselasky                printk(level "%s: %s: " format, DRV_NAME,       \
883272407Shselasky                        (priv->dev)->if_xname, ## arg); \
884272407Shselasky        else                                                    \
885272407Shselasky                printk(level "%s: %s: Port %d: " format,        \
886272407Shselasky                        DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
887272407Shselasky                        (priv)->port, ## arg);                  \
888272407Shselasky        }
889272407Shselasky
890272407Shselasky
891272407Shselasky#define en_dbg(mlevel, priv, format, arg...)			\
892272407Shselaskydo {								\
893272407Shselasky	if (NETIF_MSG_##mlevel & priv->msg_enable)		\
894272407Shselasky		en_print(KERN_DEBUG, priv, format, ##arg);	\
895272407Shselasky} while (0)
896272407Shselasky#define en_warn(priv, format, arg...)			\
897272407Shselasky	en_print(KERN_WARNING, priv, format, ##arg)
898272407Shselasky#define en_err(priv, format, arg...)			\
899272407Shselasky	en_print(KERN_ERR, priv, format, ##arg)
900272407Shselasky#define en_info(priv, format, arg...)			\
901272407Shselasky	en_print(KERN_INFO, priv, format, ## arg)
902272407Shselasky
903272407Shselasky#define mlx4_err(mdev, format, arg...)			\
904272407Shselasky	pr_err("%s %s: " format, DRV_NAME,		\
905272407Shselasky	       dev_name(&mdev->pdev->dev), ##arg)
906272407Shselasky#define mlx4_info(mdev, format, arg...)			\
907272407Shselasky	pr_info("%s %s: " format, DRV_NAME,		\
908272407Shselasky		dev_name(&mdev->pdev->dev), ##arg)
909272407Shselasky#define mlx4_warn(mdev, format, arg...)			\
910272407Shselasky	pr_warning("%s %s: " format, DRV_NAME,		\
911272407Shselasky		   dev_name(&mdev->pdev->dev), ##arg)
912272407Shselasky
913219820Sjeff#endif
914