1/*
2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#ifndef _MLX4_EN_H_
35#define _MLX4_EN_H_
36
37#include <linux/mlx4/device.h>
38#include <linux/mlx4/qp.h>
39
40#include <linux/bitops.h>
41#include <linux/if_vlan.h>
42#include <linux/if_ether.h>
43#include <linux/list.h>
44#include <linux/timer.h>
45
46#include "mlx4_stats.h"
47#include "en_port.h"
48
49#include <devif/queue_interface.h>
50
51/*
52 #include <linux/bitops.h>
53 #include <linux/compiler.h>
54 #include <linux/list.h>
55 #include <linux/mutex.h>
56 #include <linux/kobject.h>
57 #include <linux/netdevice.h>
58 #include <linux/if_vlan.h>
59 #include <linux/if_ether.h>
60 #ifdef CONFIG_MLX4_EN_DCB
61 #include <linux/dcbnl.h>
62 #endif
63
64 #include <linux/mlx4/device.h>
65 #include <linux/mlx4/qp.h>
66 #include <linux/mlx4/cq.h>
67 #include <linux/mlx4/srq.h>
68 #include <linux/mlx4/doorbell.h>
69 #include <linux/mlx4/cmd.h>
70
71 #include <netinet/tcp_lro.h>
72
73 #include "en_port.h"
74 #include "mlx4_stats.h"
75
76 */
77// #define DRV_NAME	"mlx4_en"
78
79#define MLX4_EN_MSG_LEVEL	(NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
80
81/* Device constants*/
82
83#define MLX4_EN_PAGE_SHIFT	12
84#define MLX4_EN_PAGE_SIZE	(1 << MLX4_EN_PAGE_SHIFT)
85#define	MLX4_NET_IP_ALIGN	2	 /*bytes*/
86#define DEF_RX_RINGS		16
87#define MAX_RX_RINGS		128
88#define MIN_RX_RINGS		4
89#define TXBB_SIZE		64
90#define HEADROOM		(2048 / TXBB_SIZE + 1)
91#define STAMP_STRIDE		64
92#define STAMP_DWORDS		(STAMP_STRIDE / 4)
93#define STAMP_SHIFT		31
94#define STAMP_VAL		0x7fffffff
95#define STATS_DELAY		(HZ / 4)
96#define SERVICE_TASK_DELAY	(HZ / 4)
97#define MAX_NUM_OF_FS_RULES	256
98
99#define MLX4_EN_FILTER_HASH_SHIFT 4
100#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
101
102#ifdef CONFIG_NET_RX_BUSY_POLL
103#define LL_EXTENDED_STATS
104#endif
105
106/*vlan valid range*/
107#define VLAN_MIN_VALUE		1
108#define VLAN_MAX_VALUE		4094
109
110/** OS related constants and tunables*/
111
112#define MLX4_EN_WATCHDOG_TIMEOUT	(15 * HZ)
113
114#define MLX4_EN_ALLOC_SIZE     PAGE_ALIGN(PAGE_SIZE)
115#define MLX4_EN_ALLOC_ORDER    get_order(MLX4_EN_ALLOC_SIZE)
116
117enum mlx4_en_alloc_type {
118	MLX4_EN_ALLOC_NEW = 0, MLX4_EN_ALLOC_REPLACEMENT = 1,
119};
120
121/*Maximum ring sizes*/
122#define MLX4_EN_DEF_TX_QUEUE_SIZE       4096
123
124/*Minimum packet number till arming the CQ*/
125#define MLX4_EN_MIN_RX_ARM	2048
126#define MLX4_EN_MIN_TX_ARM	2048
127
128/*Maximum ring sizes*/
129#define MLX4_EN_MAX_TX_SIZE	8192
130#define MLX4_EN_MAX_RX_SIZE	8192
131
132/*Minimum ring sizes*/
133#define MLX4_EN_MIN_RX_SIZE	(4096 / TXBB_SIZE)
134#define MLX4_EN_MIN_TX_SIZE	(4096 / TXBB_SIZE)
135
136#define MLX4_EN_SMALL_PKT_SIZE		64
137
138#define MLX4_EN_MAX_TX_RING_P_UP	32
139#define MLX4_EN_NUM_UP			1
140
141#define MAX_TX_RINGS			(MLX4_EN_MAX_TX_RING_P_UP * \
142					MLX4_EN_NUM_UP)
143
144#define MLX4_EN_DEF_TX_RING_SIZE	1024
145#define MLX4_EN_DEF_RX_RING_SIZE  4096
146
147/*Target number of bytes to coalesce with interrupt moderation*/
148#define MLX4_EN_RX_COAL_TARGET	0x20000
149#define MLX4_EN_RX_COAL_TIME	0x10
150
151#define MLX4_EN_TX_COAL_PKTS	64
152#define MLX4_EN_TX_COAL_TIME	64
153
154#define MLX4_EN_RX_RATE_LOW		400000
155#define MLX4_EN_RX_COAL_TIME_LOW	0
156#define MLX4_EN_RX_RATE_HIGH		450000
157#define MLX4_EN_RX_COAL_TIME_HIGH	128
158#define MLX4_EN_RX_SIZE_THRESH		1024
159#define MLX4_EN_RX_RATE_THRESH		(1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
160#define MLX4_EN_SAMPLE_INTERVAL		0
161#define MLX4_EN_AVG_PKT_SMALL		256
162
163#define MLX4_EN_AUTO_CONF	0xffff
164
165#define MLX4_EN_DEF_RX_PAUSE	1
166#define MLX4_EN_DEF_TX_PAUSE	1
167
168/*Interval between successive polls in the Tx routine when polling is used
169 instead of interrupts (in per-core Tx rings) - should be power of 2*/
170#define MLX4_EN_TX_POLL_MODER	16
171#define MLX4_EN_TX_POLL_TIMEOUT	(HZ / 4)
172
173#define MLX4_EN_64_ALIGN	(64 - NET_SKB_PAD)
174#define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
175#define HEADER_COPY_SIZE       (128)
176#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETHER_HDR_LEN)
177
178#define MLX4_EN_MIN_MTU		46
179#define ETH_BCAST		0xffffffffffffULL
180
181#define MLX4_EN_LOOPBACK_RETRIES	5
182#define MLX4_EN_LOOPBACK_TIMEOUT	100
183
184#ifdef MLX4_EN_PERF_STAT
185Number of samples to 'average'
186
187#define AVG_SIZE			128
188#define AVG_FACTOR			1024
189
190#define INC_PERF_COUNTER(cnt)		(++(cnt))
191#define ADD_PERF_COUNTER(cnt, add)	((cnt) += (add))
192#define AVG_PERF_COUNTER(cnt, sample) \
193	((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
194#define GET_PERF_COUNTER(cnt)		(cnt)
195#define GET_AVG_PERF_COUNTER(cnt)	((cnt) / AVG_FACTOR)
196
197#else
198
199#define INC_PERF_COUNTER(cnt)		do {} while (0)
200#define ADD_PERF_COUNTER(cnt, add)	do {} while (0)
201#define AVG_PERF_COUNTER(cnt, sample)	do {} while (0)
202#define GET_PERF_COUNTER(cnt)		(0)
203#define GET_AVG_PERF_COUNTER(cnt)	(0)
204#endif  /*MLX4_EN_PERF_STAT*/
205
206/** Configurables*/
207
208enum cq_type {
209	RX = 0, TX = 1,
210};
211
212/** Useful macros*/
213
214#define ROUNDUP_LOG2(x)		ilog2(roundup_pow_of_two(x))
215#define XNOR(x, y)		(!(x) == !(y))
216#define ILLEGAL_MAC(addr)	(addr == 0xffffffffffffULL || addr == 0x0)
217
218struct mlx4_en_tx_info {
219	/*bus_dmamap_t dma_map;*/
220	// struct mbuf *mb;
221	u32 nr_txbb;
222	u32 nr_bytes;
223    genoffset_t offset;
224    genoffset_t length;
225};
226
227#define MLX4_EN_BIT_DESC_OWN	0x80000000
228#define CTRL_SIZE	sizeof(struct mlx4_wqe_ctrl_seg)
229#define MLX4_EN_MEMTYPE_PAD	0x100
230#define DS_SIZE		sizeof(struct mlx4_wqe_data_seg)
231
232struct mlx4_en_tx_desc {
233	struct mlx4_wqe_ctrl_seg ctrl;
234	union {
235		struct mlx4_wqe_data_seg data; /*at least one data segment*/
236		struct mlx4_wqe_lso_seg lso;
237		struct mlx4_wqe_inline_seg inl;
238	};
239};
240
241#define MLX4_EN_USE_SRQ		0x01000000
242
243#define MLX4_EN_RX_BUDGET 64
244
245#define	MLX4_EN_TX_MAX_DESC_SIZE 512	 /*bytes*/
246#define	MLX4_EN_TX_MAX_MBUF_SIZE 65536	  /*bytes*/
247#define	MLX4_EN_TX_MAX_PAYLOAD_SIZE 65536	  /*bytes*/
248#define	MLX4_EN_TX_MAX_MBUF_FRAGS \
249    ((MLX4_EN_TX_MAX_DESC_SIZE - 128) / DS_SIZE_ALIGNMENT)  /*units*/
250#define	MLX4_EN_TX_WQE_MAX_WQEBBS			\
251    (MLX4_EN_TX_MAX_DESC_SIZE / TXBB_SIZE)  /*units*/
252
253#define MLX4_EN_CX3_LOW_ID	0x1000
254#define MLX4_EN_CX3_HIGH_ID	0x1005
255
256struct mlx4_en_tx_ring {
257	spinlock_t tx_lock;
258	/*bus_dma_tag_t dma_tag;*/
259	struct mlx4_hwq_resources wqres;
260	u32 size;
261	/*number of TXBBs*/
262	u32 size_mask;
263	u16 stride;
264	u16 cqn;
265	/*index of port CQ associated with this ring*/
266	u32 prod;
267	u32 cons;
268	u32 buf_size;
269	u32 doorbell_qpn;
270	void *buf;
271	u16 poll_cnt;
272	int blocked;
273	struct mlx4_en_tx_info *tx_info;
274	u8 queue_index;
275	/*cpuset_t affinity_mask;*/
276	struct buf_ring *br;
277	u32 last_nr_txbb;
278	struct mlx4_qp qp;
279	struct mlx4_qp_context context;
280	int qpn;
281	enum mlx4_qp_state qp_state;
282	struct mlx4_srq dummy;
283	unsigned long bytes;
284	unsigned long packets;
285	unsigned long tx_csum;
286	unsigned long queue_stopped;
287	unsigned long oversized_packets;
288	unsigned long wake_queue;
289	struct mlx4_bf bf;
290	bool bf_enabled;
291	int hwtstamp_tx_type;
292	spinlock_t comp_lock;
293	int inline_thold;
294	u64 watchdog_time;
295};
296
297struct mlx4_en_rx_desc {
298	/*actual number of entries depends on rx ring stride*/
299	struct mlx4_wqe_data_seg data[0];
300};
301
302struct mlx4_en_rx_mbuf {
303	/*bus_dmamap_t dma_map;*/
304	// struct mbuf *mbuf;
305	void *buffer;
306    uint64_t offset, length;
307};
308
309struct mlx4_en_rx_spare {
310	/*bus_dmamap_t dma_map;*/
311	// struct mbuf *mbuf;
312	void *buffer;
313	u64 paddr_be;
314};
315
316struct mlx4_en_rx_ring {
317	struct mlx4_hwq_resources wqres;
318	/*bus_dma_tag_t dma_tag;*/
319	struct mlx4_en_rx_spare spare;
320	u32 size; /*number of Rx descs*/
321	u32 actual_size;
322	u32 size_mask;
323	u16 stride;
324	u16 log_stride;
325	u16 cqn; /* index of port CQ associated with this ring*/
326	u32 prod;
327	u32 cons;
328	u32 buf_size;
329	u8 fcs_del;
330	u32 rx_mb_size;
331	int qpn;
332	u8 *buf;
333	struct mlx4_en_rx_mbuf *mbuf;
334	unsigned long errors;
335	unsigned long bytes;
336	unsigned long packets;
337#ifdef LL_EXTENDED_STATS
338	unsigned long yields;
339	unsigned long misses;
340	unsigned long cleaned;
341#endif
342	unsigned long csum_ok;
343	unsigned long csum_none;
344	int hwtstamp_rx_filter;
345	int numa_node;
346/*struct lro_ctrl lro;*/
347};
348/*
349 static inline int mlx4_en_can_lro(__be16 status)
350 {
351 const __be16 status_all = cpu_to_be16(
352 MLX4_CQE_STATUS_IPV4    |
353 MLX4_CQE_STATUS_IPV4F   |
354 MLX4_CQE_STATUS_IPV6    |
355 MLX4_CQE_STATUS_IPV4OPT |
356 MLX4_CQE_STATUS_TCP     |
357 MLX4_CQE_STATUS_UDP     |
358 MLX4_CQE_STATUS_IPOK);
359 const __be16 status_ipv4_ipok_tcp = cpu_to_be16(
360 MLX4_CQE_STATUS_IPV4    |
361 MLX4_CQE_STATUS_IPOK    |
362 MLX4_CQE_STATUS_TCP);
363 const __be16 status_ipv6_ipok_tcp = cpu_to_be16(
364 MLX4_CQE_STATUS_IPV6    |
365 MLX4_CQE_STATUS_IPOK    |
366 MLX4_CQE_STATUS_TCP);
367
368 status &= status_all;
369 return (status == status_ipv4_ipok_tcp ||
370 status == status_ipv6_ipok_tcp);
371 }
372 */
373struct mlx4_en_cq {
374	struct mlx4_cq mcq;
375	struct mlx4_hwq_resources wqres;
376	int ring;
377	spinlock_t lock;
378	void *dev;
379	/*Per-core Tx cq processing support*/
380	struct timer_list timer;
381	int size;
382	int buf_size;
383	unsigned vector;
384	enum cq_type is_tx;
385	u16 moder_time;
386	u16 moder_cnt;
387	struct mlx4_cqe *buf;
388	/*struct task cq_task;
389	 struct taskqueue *tq;*/
390#define MLX4_EN_OPCODE_ERROR	0x1e
391	u32 tot_rx;
392	u32 tot_tx;
393	u32 curr_poll_rx_cpu_id;
394
395#ifdef CONFIG_NET_RX_BUSY_POLL
396unsigned int state;
397#define MLX4_EN_CQ_STATEIDLE        0
398#define MLX4_EN_CQ_STATENAPI     1     NAPI owns this CQ
399#define MLX4_EN_CQ_STATEPOLL     2     poll owns this CQ
400#define MLX4_CQ_LOCKED (MLX4_EN_CQ_STATENAPI | MLX4_EN_CQ_STATEPOLL)
401#define MLX4_EN_CQ_STATENAPI_YIELD  4     NAPI yielded this CQ
402#define MLX4_EN_CQ_STATEPOLL_YIELD  8     poll yielded this CQ
403#define CQ_YIELD (MLX4_EN_CQ_STATENAPI_YIELD | MLX4_EN_CQ_STATEPOLL_YIELD)
404#define CQ_USER_PEND (MLX4_EN_CQ_STATEPOLL | MLX4_EN_CQ_STATEPOLL_YIELD)
405spinlock_t poll_lock; protects from LLS/napi conflicts
406#endif   /*CONFIG_NET_RX_BUSY_POLL*/
407};
408
409struct mlx4_en_port_profile {
410u32 flags;
411u32 tx_ring_num;
412u32 rx_ring_num;
413u32 tx_ring_size;
414u32 rx_ring_size;
415u8 rx_pause;
416u8 rx_ppp;
417u8 tx_pause;
418u8 tx_ppp;
419int rss_rings;
420};
421
422struct mlx4_en_profile {
423int rss_xor;
424int udp_rss;
425u8 rss_mask;
426u32 active_ports;
427u32 small_pkt_int;
428u8 no_reset;
429u8 num_tx_rings_p_up;
430struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
431};
432
433struct mlx4_en_dev {
434struct mlx4_dev *dev;
435struct pci_dev *pdev;
436/*struct mutex state_lock;*/
437struct mlx4_queue *port_queue[MLX4_MAX_PORTS + 1];
438u32 port_cnt;
439bool device_up;
440struct mlx4_en_profile profile;
441u32 LSO_support;
442struct workqueue_struct *workqueue;
443struct device *dma_device;
444void /*__iomem*/*uar_map;
445struct mlx4_uar priv_uar;
446struct mlx4_mr mr;
447u32 priv_pdn;
448spinlock_t uar_lock;
449u8 mac_removed[MLX4_MAX_PORTS + 1];
450unsigned long last_overflow_check;
451unsigned long overflow_period;
452};
453
454struct mlx4_en_rss_map {
455int base_qpn;
456struct mlx4_qp qps[MAX_RX_RINGS];
457enum mlx4_qp_state state[MAX_RX_RINGS];
458struct mlx4_qp indir_qp;
459enum mlx4_qp_state indir_state;
460};
461
462struct mlx4_en_port_state {
463int link_state;
464int link_speed;
465int transciver;
466int autoneg;
467};
468
469enum mlx4_en_mclist_act {
470MCLIST_NONE, MCLIST_REM, MCLIST_ADD,
471};
472/*
473 struct mlx4_en_mc_list {
474 struct list_head	list;
475 enum mlx4_en_mclist_act	action;
476 u8			addr[ETH_ALEN];
477 u64			reg_id;
478 };
479
480 #ifdef CONFIG_MLX4_EN_DCB
481 Minimal TC BW - setting to 0 will block traffic
482 #define MLX4_EN_BW_MIN 1
483 #define MLX4_EN_BW_MAX 100  Utilize 100% of the line
484
485 #define MLX4_EN_TC_ETS 7
486
487 #endif
488
489 */
490enum {
491MLX4_EN_FLAG_PROMISC = (1 << 0), MLX4_EN_FLAG_MC_PROMISC = (1 << 1),
492/*whether we need to enable hardware loopback by putting dmac
493 * in Tx WQE*/
494
495MLX4_EN_FLAG_ENABLE_HW_LOOPBACK = (1 << 2),
496/*whether we need to drop packets that hardware loopback-ed*/
497MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3), MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
498#ifdef CONFIG_MLX4_EN_DCB
499MLX4_EN_FLAG_DCB_ENABLED = (1 << 5)
500#endif
501};
502
503#define MLX4_EN_MAC_HASH_SIZE (1 << BITS_PER_BYTE)
504#define MLX4_EN_MAC_HASH_IDX 5
505/*
506 struct en_port {
507 struct kobject		kobj;
508 struct mlx4_dev		*dev;
509 u8			port_num;
510 u8			vport_num;
511 };
512 */
513
514struct mlx4_queue;
515
516struct mlx4_en_priv {
517    struct mlx4_en_dev *mdev;
518    struct mlx4_en_port_profile *prof;
519    struct mlx4_queue *devif_queue;
520    unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
521    struct mlx4_en_port_state port_state;
522    spinlock_t stats_lock;
523    /*To allow rules removal while port is going down*/
524    struct list_head ethtool_list;
525
526    unsigned long last_moder_packets[MAX_RX_RINGS];
527    unsigned long last_moder_tx_packets;
528    unsigned long last_moder_bytes[MAX_RX_RINGS];
529    unsigned long last_moder_jiffies;
530    int last_moder_time[MAX_RX_RINGS];
531    u16 rx_usecs;
532    u16 rx_frames;
533    u16 tx_usecs;
534    u16 tx_frames;
535    u32 pkt_rate_low;
536    u32 rx_usecs_low;
537    u32 pkt_rate_high;
538    u32 rx_usecs_high;
539    u32 sample_interval;
540    u32 adaptive_rx_coal;
541    u32 msg_enable;
542    u32 loopback_ok;
543    u32 validate_loopback;
544
545    struct mlx4_hwq_resources res;
546    int link_state;
547    int last_link_state;
548    bool port_up;
549    int port;
550    int registered;
551    int allocated;
552    int stride;
553    unsigned char current_mac[ETH_ALEN + 2];
554    u64 mac;
555    int mac_index;
556    unsigned max_mtu;
557    int base_qpn;
558    int cqe_factor;
559
560    struct mlx4_en_rss_map rss_map;
561    u32 flags;
562    u8 num_tx_rings_p_up;
563    u32 tx_ring_num;
564    u32 rx_ring_num;
565    u32 rx_mb_size;
566
567    struct mlx4_en_tx_ring **tx_ring;
568    struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
569    struct mlx4_en_cq **tx_cq;
570    struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
571    struct mlx4_qp drop_qp;
572    /*struct work_struct rx_mode_task;
573     struct work_struct watchdog_task;
574     struct work_struct linkstate_task;
575     struct delayed_work stats_task;
576     struct delayed_work service_task;*/
577    struct mlx4_en_perf_stats pstats;
578    struct mlx4_en_pkt_stats pkstats;
579    struct mlx4_en_flow_stats flowstats[MLX4_NUM_PRIORITIES];
580    struct mlx4_en_port_stats port_stats;
581    struct mlx4_en_vport_stats vport_stats;
582    struct mlx4_en_vf_stats vf_stats;
583    DECLARE_BITMAP(stats_bitmap, NUM_ALL_STATS);
584    struct list_head mc_list;
585    struct list_head curr_list;
586    u64 broadcast_id;
587    struct mlx4_en_stat_out_mbox hw_stats;
588    int vids[128];
589    bool wol;
590    struct device *ddev;
591    struct dentry *dev_root;
592    u32 counter_index;
593    /*eventhandler_tag vlan_attach;
594     eventhandler_tag vlan_detach;
595     struct callout watchdog_timer;
596     struct ifmedia media;*/
597    volatile int blocked;
598    /*struct sysctl_oid *sysctl;
599     struct sysctl_ctx_list conf_ctx;
600     struct sysctl_ctx_list stat_ctx;*/
601    #define MLX4_EN_MAC_HASH_IDX 5
602    struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
603
604    #ifdef CONFIG_MLX4_EN_DCB
605    struct ieee_ets ets;
606    u16 maxrate[IEEE_8021QAZ_MAX_TCS];
607    u8 dcbx_cap;
608    #endif
609    #ifdef CONFIG_RFS_ACCEL
610    spinlock_t filters_lock;
611    int last_filter_id;
612    struct list_head filters;
613    struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
614    #endif
615    struct en_port *vf_ports[MLX4_MAX_NUM_VF];
616    unsigned long last_ifq_jiffies;
617    u64 if_counters_rx_errors;
618    u64 if_counters_rx_no_buffer;
619
620    /***********VLAD************/
621    unsigned int if_mtu;
622};
623/*
624 enum mlx4_en_wol {
625 MLX4_EN_WOL_MAGIC = (1ULL << 61),
626 MLX4_EN_WOL_ENABLED = (1ULL << 62),
627 };
628
629 struct mlx4_mac_entry {
630 struct hlist_node hlist;
631 unsigned char mac[ETH_ALEN + 2];
632 u64 reg_id;
633 };
634
635 #ifdef CONFIG_NET_RX_BUSY_POLL
636 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
637 {
638 spin_lock_init(&cq->poll_lock);
639 cq->state = MLX4_EN_CQ_STATEIDLE;
640 }
641
642 called from the device poll rutine to get ownership of a cq
643 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
644 {
645 int rc = true;
646 spin_lock(&cq->poll_lock);
647 if (cq->state & MLX4_CQ_LOCKED) {
648 WARN_ON(cq->state & MLX4_EN_CQ_STATENAPI);
649 cq->state |= MLX4_EN_CQ_STATENAPI_YIELD;
650 rc = false;
651 } else
652 we don't care if someone yielded
653 cq->state = MLX4_EN_CQ_STATENAPI;
654 spin_unlock(&cq->poll_lock);
655 return rc;
656 }
657
658 returns true is someone tried to get the cq while napi had it
659 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
660 {
661 int rc = false;
662 spin_lock(&cq->poll_lock);
663 WARN_ON(cq->state & (MLX4_EN_CQ_STATEPOLL |
664 MLX4_EN_CQ_STATENAPI_YIELD));
665
666 if (cq->state & MLX4_EN_CQ_STATEPOLL_YIELD)
667 rc = true;
668 cq->state = MLX4_EN_CQ_STATEIDLE;
669 spin_unlock(&cq->poll_lock);
670 return rc;
671 }
672
673 called from mlx4_en_low_latency_poll()
674 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
675 {
676 int rc = true;
677 spin_lock_bh(&cq->poll_lock);
678 if ((cq->state & MLX4_CQ_LOCKED)) {
679 struct net_device *dev = cq->dev;
680 struct mlx4_en_priv *priv = netdev_priv(dev);
681 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
682
683 cq->state |= MLX4_EN_CQ_STATEPOLL_YIELD;
684 rc = false;
685 #ifdef LL_EXTENDED_STATS
686 rx_ring->yields++;
687 #endif
688 } else
689 preserve yield marks
690 cq->state |= MLX4_EN_CQ_STATEPOLL;
691 spin_unlock_bh(&cq->poll_lock);
692 return rc;
693 }
694
695 returns true if someone tried to get the cq while it was locked
696 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
697 {
698 int rc = false;
699 spin_lock_bh(&cq->poll_lock);
700 WARN_ON(cq->state & (MLX4_EN_CQ_STATENAPI));
701
702 if (cq->state & MLX4_EN_CQ_STATEPOLL_YIELD)
703 rc = true;
704 cq->state = MLX4_EN_CQ_STATEIDLE;
705 spin_unlock_bh(&cq->poll_lock);
706 return rc;
707 }
708
709 true if a socket is polling, even if it did not get the lock
710 static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
711 {
712 WARN_ON(!(cq->state & MLX4_CQ_LOCKED));
713 return cq->state & CQ_USER_PEND;
714 }
715 #else
716 static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
717 {
718 }
719
720 static inline bool mlx4_en_cq_lock_napi(struct mlx4_en_cq *cq)
721 {
722 return true;
723 }
724
725 static inline bool mlx4_en_cq_unlock_napi(struct mlx4_en_cq *cq)
726 {
727 return false;
728 }
729
730 static inline bool mlx4_en_cq_lock_poll(struct mlx4_en_cq *cq)
731 {
732 return false;
733 }
734
735 static inline bool mlx4_en_cq_unlock_poll(struct mlx4_en_cq *cq)
736 {
737 return false;
738 }
739
740 static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
741 {
742 return false;
743 }
744 #endif  CONFIG_NET_RX_BUSY_POLL
745
746 #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
747
748 void mlx4_en_destroy_netdev(struct net_device *dev);
749 */
750
751struct mlx4_queue;
752int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
753struct mlx4_en_port_profile *prof, struct mlx4_queue *queue);
754int mlx4_en_start_port(struct mlx4_en_priv *priv);
755/*
756 void mlx4_en_stop_port(struct net_device *dev);
757
758 void mlx4_en_free_resources(struct mlx4_en_priv *priv);
759 */
760int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
761/*
762 int mlx4_en_pre_config(struct mlx4_en_priv *priv);
763 */
764int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
765int entries, int ring, enum cq_type mode, int node);
766/*
767 void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq);
768 */
769int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
770int cq_idx);
771/*
772 void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
773 int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
774 */
775int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
776void mlx4_en_tx_irq(struct mlx4_cq *mcq);
777/*
778 u16 mlx4_en_select_queue(struct net_device *dev, struct mbuf *mb);
779
780 int mlx4_en_transmit(struct ifnet *dev, struct mbuf *m);
781 */
782int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
783struct mlx4_en_tx_ring **pring, u32 size, u16 stride, int node, int queue_idx);
784/*
785 void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
786 struct mlx4_en_tx_ring **pring);
787 */
788int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
789struct mlx4_en_tx_ring *ring, int cq, int user_prio);
790/*
791 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
792 struct mlx4_en_tx_ring *ring);
793 void mlx4_en_qflush(struct ifnet *dev);
794 */
795int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
796struct mlx4_en_rx_ring **pring, u32 size, int node);
797/*
798 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
799 struct mlx4_en_rx_ring **pring,
800 u32 size, u16 stride);
801 */
802void mlx4_en_tx_que(void *context, int pending);
803void mlx4_en_rx_que(void *context, int pending);
804int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
805/*
806 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
807 struct mlx4_en_rx_ring *ring);
808 */
809int mlx4_en_process_rx_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
810int budget);
811/*
812 void mlx4_en_poll_tx_cq(unsigned long data);
813 */
814void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
815int is_tx, int rss, int qpn, int cqn, int user_prio,
816struct mlx4_qp_context *context);
817void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
818int mlx4_en_map_buffer(struct mlx4_buf *buf);
819/*
820 void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
821 */
822void mlx4_en_calc_rx_buf(struct mlx4_en_priv *priv);
823int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
824/*
825 void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
826 */
827int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
828/*
829 void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
830 int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
831 */
832 void mlx4_en_rx_irq(struct mlx4_cq *mcq);
833/*
834 int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
835 int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
836 */
837int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_priv *priv, u8 port, u8 reset);
838int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
839/*
840 int mlx4_en_get_vport_stats(struct mlx4_en_dev *mdev, u8 port);
841 void mlx4_en_create_debug_files(struct mlx4_en_priv *priv);
842 void mlx4_en_delete_debug_files(struct mlx4_en_priv *priv);
843 int mlx4_en_register_debugfs(void);
844 void mlx4_en_unregister_debugfs(void);
845
846 #ifdef CONFIG_MLX4_EN_DCB
847 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
848 extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
849 #endif
850
851 int mlx4_en_setup_tc(struct net_device *dev, u8 up);
852
853 #ifdef CONFIG_RFS_ACCEL
854 void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
855 struct mlx4_en_rx_ring *rx_ring);
856 #endif
857
858 #define MLX4_EN_NUM_SELF_TEST	5
859 void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
860 void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
861
862
863 * Functions for time stamping
864
865 #define SKBTX_HW_TSTAMP (1 << 0)
866 #define SKBTX_IN_PROGRESS (1 << 2)
867
868 u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
869
870 Functions for caching and restoring statistics
871 int mlx4_en_get_sset_count(struct net_device *dev, int sset);
872 void mlx4_en_restore_ethtool_stats(struct mlx4_en_priv *priv,
873 u64 *data);
874
875
876 * Globals
877
878 extern const struct ethtool_ops mlx4_en_ethtool_ops;
879
880
881 * Defines for link speed - needed by selftest
882 */
883#define MLX4_EN_LINK_SPEED_1G	1000
884#define MLX4_EN_LINK_SPEED_10G	10000
885#define MLX4_EN_LINK_SPEED_40G	40000
886
887enum {
888NETIF_MSG_DRV = 0x0001,
889NETIF_MSG_PROBE = 0x0002,
890NETIF_MSG_LINK = 0x0004,
891NETIF_MSG_TIMER = 0x0008,
892NETIF_MSG_IFDOWN = 0x0010,
893NETIF_MSG_IFUP = 0x0020,
894NETIF_MSG_RX_ERR = 0x0040,
895NETIF_MSG_TX_ERR = 0x0080,
896NETIF_MSG_TX_QUEUED = 0x0100,
897NETIF_MSG_INTR = 0x0200,
898NETIF_MSG_TX_DONE = 0x0400,
899NETIF_MSG_RX_STATUS = 0x0800,
900NETIF_MSG_PKTDATA = 0x1000,
901NETIF_MSG_HW = 0x2000,
902NETIF_MSG_WOL = 0x4000,
903};
904/*
905
906
907 * printk / logging functions
908
909
910 #define en_print(level, priv, format, arg...)                   \
911        {                                                       \
912        if ((priv)->registered)                                 \
913                printk(level "%s: %s: " format, DRV_NAME,       \
914                        (priv->dev)->if_xname, ## arg); \
915        else                                                    \
916                printk(level "%s: %s: Port %d: " format,        \
917                        DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
918                        (priv)->port, ## arg);                  \
919        }
920
921
922 #define en_dbg(mlevel, priv, format, arg...)			\
923do {								\
924	if (NETIF_MSG_##mlevel & priv->msg_enable)		\
925		en_print(KERN_DEBUG, priv, format, ##arg);	\
926} while (0)
927 #define en_warn(priv, format, arg...)			\
928	en_print(KERN_WARNING, priv, format, ##arg)
929 #define en_err(priv, format, arg...)			\
930	en_print(KERN_ERR, priv, format, ##arg)
931 #define en_info(priv, format, arg...)			\
932	en_print(KERN_INFO, priv, format, ## arg)
933
934 #define mlx4_err(mdev, format, arg...)			\
935	pr_err("%s %s: " format, DRV_NAME,		\
936	       dev_name(&mdev->pdev->dev), ##arg)
937 #define mlx4_info(mdev, format, arg...)			\
938	pr_info("%s %s: " format, DRV_NAME,		\
939		dev_name(&mdev->pdev->dev), ##arg)
940 #define mlx4_warn(mdev, format, arg...)			\
941	pr_warning("%s %s: " format, DRV_NAME,		\
942		   dev_name(&mdev->pdev->dev), ##arg)
943 */
944
945#endif
946