1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright(c) 2007 - 2018 Intel Corporation. */
3
4/* Linux PRO/1000 Ethernet Driver main header file */
5
6#ifndef _IGB_H_
7#define _IGB_H_
8
9#include "e1000_mac.h"
10#include "e1000_82575.h"
11
12#include <linux/timecounter.h>
13#include <linux/net_tstamp.h>
14#include <linux/ptp_clock_kernel.h>
15#include <linux/bitops.h>
16#include <linux/if_vlan.h>
17#include <linux/i2c.h>
18#include <linux/i2c-algo-bit.h>
19#include <linux/pci.h>
20#include <linux/mdio.h>
21
22#include <net/xdp.h>
23
24struct igb_adapter;
25
26#define E1000_PCS_CFG_IGN_SD	1
27
28/* Interrupt defines */
29#define IGB_START_ITR		648 /* ~6000 ints/sec */
30#define IGB_4K_ITR		980
31#define IGB_20K_ITR		196
32#define IGB_70K_ITR		56
33
34/* TX/RX descriptor defines */
35#define IGB_DEFAULT_TXD		256
36#define IGB_DEFAULT_TX_WORK	128
37#define IGB_MIN_TXD		64
38#define IGB_MAX_TXD		4096
39
40#define IGB_DEFAULT_RXD		256
41#define IGB_MIN_RXD		64
42#define IGB_MAX_RXD		4096
43
44#define IGB_DEFAULT_ITR		3 /* dynamic */
45#define IGB_MAX_ITR_USECS	10000
46#define IGB_MIN_ITR_USECS	10
47#define NON_Q_VECTORS		1
48#define MAX_Q_VECTORS		8
49#define MAX_MSIX_ENTRIES	10
50
51/* Transmit and receive queues */
52#define IGB_MAX_RX_QUEUES	8
53#define IGB_MAX_RX_QUEUES_82575	4
54#define IGB_MAX_RX_QUEUES_I211	2
55#define IGB_MAX_TX_QUEUES	8
56#define IGB_MAX_VF_MC_ENTRIES	30
57#define IGB_MAX_VF_FUNCTIONS	8
58#define IGB_MAX_VFTA_ENTRIES	128
59#define IGB_82576_VF_DEV_ID	0x10CA
60#define IGB_I350_VF_DEV_ID	0x1520
61
62/* NVM version defines */
63#define IGB_MAJOR_MASK		0xF000
64#define IGB_MINOR_MASK		0x0FF0
65#define IGB_BUILD_MASK		0x000F
66#define IGB_COMB_VER_MASK	0x00FF
67#define IGB_MAJOR_SHIFT		12
68#define IGB_MINOR_SHIFT		4
69#define IGB_COMB_VER_SHFT	8
70#define IGB_NVM_VER_INVALID	0xFFFF
71#define IGB_ETRACK_SHIFT	16
72#define NVM_ETRACK_WORD		0x0042
73#define NVM_COMB_VER_OFF	0x0083
74#define NVM_COMB_VER_PTR	0x003d
75
76/* Transmit and receive latency (for PTP timestamps) */
77#define IGB_I210_TX_LATENCY_10		9542
78#define IGB_I210_TX_LATENCY_100		1024
79#define IGB_I210_TX_LATENCY_1000	178
80#define IGB_I210_RX_LATENCY_10		20662
81#define IGB_I210_RX_LATENCY_100		2213
82#define IGB_I210_RX_LATENCY_1000	448
83
84/* XDP */
85#define IGB_XDP_PASS		0
86#define IGB_XDP_CONSUMED	BIT(0)
87#define IGB_XDP_TX		BIT(1)
88#define IGB_XDP_REDIR		BIT(2)
89
90struct vf_data_storage {
91	unsigned char vf_mac_addresses[ETH_ALEN];
92	u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
93	u16 num_vf_mc_hashes;
94	u32 flags;
95	unsigned long last_nack;
96	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
97	u16 pf_qos;
98	u16 tx_rate;
99	bool spoofchk_enabled;
100	bool trusted;
101};
102
103/* Number of unicast MAC filters reserved for the PF in the RAR registers */
104#define IGB_PF_MAC_FILTERS_RESERVED	3
105
106struct vf_mac_filter {
107	struct list_head l;
108	int vf;
109	bool free;
110	u8 vf_mac[ETH_ALEN];
111};
112
113#define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
114#define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
115#define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
116#define IGB_VF_FLAG_PF_SET_MAC     0x00000008 /* PF has set MAC address */
117
118/* RX descriptor control thresholds.
119 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
120 *           descriptors available in its onboard memory.
121 *           Setting this to 0 disables RX descriptor prefetch.
122 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
123 *           available in host memory.
124 *           If PTHRESH is 0, this should also be 0.
125 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
126 *           descriptors until either it has this many to write back, or the
127 *           ITR timer expires.
128 */
129#define IGB_RX_PTHRESH	((hw->mac.type == e1000_i354) ? 12 : 8)
130#define IGB_RX_HTHRESH	8
131#define IGB_TX_PTHRESH	((hw->mac.type == e1000_i354) ? 20 : 8)
132#define IGB_TX_HTHRESH	1
133#define IGB_RX_WTHRESH	((hw->mac.type == e1000_82576 && \
134			  (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
135#define IGB_TX_WTHRESH	((hw->mac.type == e1000_82576 && \
136			  (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
137
138/* this is the size past which hardware will drop packets when setting LPE=0 */
139#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
140
141#define IGB_ETH_PKT_HDR_PAD	(ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
142
143/* Supported Rx Buffer Sizes */
144#define IGB_RXBUFFER_256	256
145#define IGB_RXBUFFER_1536	1536
146#define IGB_RXBUFFER_2048	2048
147#define IGB_RXBUFFER_3072	3072
148#define IGB_RX_HDR_LEN		IGB_RXBUFFER_256
149#define IGB_TS_HDR_LEN		16
150
151/* Attempt to maximize the headroom available for incoming frames.  We
152 * use a 2K buffer for receives and need 1536/1534 to store the data for
153 * the frame.  This leaves us with 512 bytes of room.  From that we need
154 * to deduct the space needed for the shared info and the padding needed
155 * to IP align the frame.
156 *
157 * Note: For cache line sizes 256 or larger this value is going to end
158 *	 up negative.  In these cases we should fall back to the 3K
159 *	 buffers.
160 */
161#if (PAGE_SIZE < 8192)
162#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN)
163#define IGB_2K_TOO_SMALL_WITH_PADDING \
164((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048))
165
166static inline int igb_compute_pad(int rx_buf_len)
167{
168	int page_size, pad_size;
169
170	page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
171	pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
172
173	return pad_size;
174}
175
176static inline int igb_skb_pad(void)
177{
178	int rx_buf_len;
179
180	/* If a 2K buffer cannot handle a standard Ethernet frame then
181	 * optimize padding for a 3K buffer instead of a 1.5K buffer.
182	 *
183	 * For a 3K buffer we need to add enough padding to allow for
184	 * tailroom due to NET_IP_ALIGN possibly shifting us out of
185	 * cache-line alignment.
186	 */
187	if (IGB_2K_TOO_SMALL_WITH_PADDING)
188		rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
189	else
190		rx_buf_len = IGB_RXBUFFER_1536;
191
192	/* if needed make room for NET_IP_ALIGN */
193	rx_buf_len -= NET_IP_ALIGN;
194
195	return igb_compute_pad(rx_buf_len);
196}
197
198#define IGB_SKB_PAD	igb_skb_pad()
199#else
200#define IGB_SKB_PAD	(NET_SKB_PAD + NET_IP_ALIGN)
201#endif
202
203/* How many Rx Buffers do we bundle into one write to the hardware ? */
204#define IGB_RX_BUFFER_WRITE	16 /* Must be power of 2 */
205
206#define IGB_RX_DMA_ATTR \
207	(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
208
209#define AUTO_ALL_MODES		0
210#define IGB_EEPROM_APME		0x0400
211
212#ifndef IGB_MASTER_SLAVE
213/* Switch to override PHY master/slave setting */
214#define IGB_MASTER_SLAVE	e1000_ms_hw_default
215#endif
216
217#define IGB_MNG_VLAN_NONE	-1
218
219enum igb_tx_flags {
220	/* cmd_type flags */
221	IGB_TX_FLAGS_VLAN	= 0x01,
222	IGB_TX_FLAGS_TSO	= 0x02,
223	IGB_TX_FLAGS_TSTAMP	= 0x04,
224
225	/* olinfo flags */
226	IGB_TX_FLAGS_IPV4	= 0x10,
227	IGB_TX_FLAGS_CSUM	= 0x20,
228};
229
230/* VLAN info */
231#define IGB_TX_FLAGS_VLAN_MASK	0xffff0000
232#define IGB_TX_FLAGS_VLAN_SHIFT	16
233
234/* The largest size we can write to the descriptor is 65535.  In order to
235 * maintain a power of two alignment we have to limit ourselves to 32K.
236 */
237#define IGB_MAX_TXD_PWR	15
238#define IGB_MAX_DATA_PER_TXD	(1u << IGB_MAX_TXD_PWR)
239
240/* Tx Descriptors needed, worst case */
241#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
242#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
243
244/* EEPROM byte offsets */
245#define IGB_SFF_8472_SWAP		0x5C
246#define IGB_SFF_8472_COMP		0x5E
247
248/* Bitmasks */
249#define IGB_SFF_ADDRESSING_MODE		0x4
250#define IGB_SFF_8472_UNSUP		0x00
251
252/* TX resources are shared between XDP and netstack
253 * and we need to tag the buffer type to distinguish them
254 */
255enum igb_tx_buf_type {
256	IGB_TYPE_SKB = 0,
257	IGB_TYPE_XDP,
258};
259
260/* wrapper around a pointer to a socket buffer,
261 * so a DMA handle can be stored along with the buffer
262 */
263struct igb_tx_buffer {
264	union e1000_adv_tx_desc *next_to_watch;
265	unsigned long time_stamp;
266	enum igb_tx_buf_type type;
267	union {
268		struct sk_buff *skb;
269		struct xdp_frame *xdpf;
270	};
271	unsigned int bytecount;
272	u16 gso_segs;
273	__be16 protocol;
274
275	DEFINE_DMA_UNMAP_ADDR(dma);
276	DEFINE_DMA_UNMAP_LEN(len);
277	u32 tx_flags;
278};
279
280struct igb_rx_buffer {
281	dma_addr_t dma;
282	struct page *page;
283#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
284	__u32 page_offset;
285#else
286	__u16 page_offset;
287#endif
288	__u16 pagecnt_bias;
289};
290
291struct igb_tx_queue_stats {
292	u64 packets;
293	u64 bytes;
294	u64 restart_queue;
295	u64 restart_queue2;
296};
297
298struct igb_rx_queue_stats {
299	u64 packets;
300	u64 bytes;
301	u64 drops;
302	u64 csum_err;
303	u64 alloc_failed;
304};
305
306struct igb_ring_container {
307	struct igb_ring *ring;		/* pointer to linked list of rings */
308	unsigned int total_bytes;	/* total bytes processed this int */
309	unsigned int total_packets;	/* total packets processed this int */
310	u16 work_limit;			/* total work allowed per interrupt */
311	u8 count;			/* total number of rings in vector */
312	u8 itr;				/* current ITR setting for ring */
313};
314
315struct igb_ring {
316	struct igb_q_vector *q_vector;	/* backlink to q_vector */
317	struct net_device *netdev;	/* back pointer to net_device */
318	struct bpf_prog *xdp_prog;
319	struct device *dev;		/* device pointer for dma mapping */
320	union {				/* array of buffer info structs */
321		struct igb_tx_buffer *tx_buffer_info;
322		struct igb_rx_buffer *rx_buffer_info;
323	};
324	void *desc;			/* descriptor ring memory */
325	unsigned long flags;		/* ring specific flags */
326	void __iomem *tail;		/* pointer to ring tail register */
327	dma_addr_t dma;			/* phys address of the ring */
328	unsigned int  size;		/* length of desc. ring in bytes */
329
330	u16 count;			/* number of desc. in the ring */
331	u8 queue_index;			/* logical index of the ring*/
332	u8 reg_idx;			/* physical index of the ring */
333	bool launchtime_enable;		/* true if LaunchTime is enabled */
334	bool cbs_enable;		/* indicates if CBS is enabled */
335	s32 idleslope;			/* idleSlope in kbps */
336	s32 sendslope;			/* sendSlope in kbps */
337	s32 hicredit;			/* hiCredit in bytes */
338	s32 locredit;			/* loCredit in bytes */
339
340	/* everything past this point are written often */
341	u16 next_to_clean;
342	u16 next_to_use;
343	u16 next_to_alloc;
344
345	union {
346		/* TX */
347		struct {
348			struct igb_tx_queue_stats tx_stats;
349			struct u64_stats_sync tx_syncp;
350			struct u64_stats_sync tx_syncp2;
351		};
352		/* RX */
353		struct {
354			struct sk_buff *skb;
355			struct igb_rx_queue_stats rx_stats;
356			struct u64_stats_sync rx_syncp;
357		};
358	};
359	struct xdp_rxq_info xdp_rxq;
360} ____cacheline_internodealigned_in_smp;
361
362struct igb_q_vector {
363	struct igb_adapter *adapter;	/* backlink */
364	int cpu;			/* CPU for DCA */
365	u32 eims_value;			/* EIMS mask value */
366
367	u16 itr_val;
368	u8 set_itr;
369	void __iomem *itr_register;
370
371	struct igb_ring_container rx, tx;
372
373	struct napi_struct napi;
374	struct rcu_head rcu;	/* to avoid race with update stats on free */
375	char name[IFNAMSIZ + 9];
376
377	/* for dynamic allocation of rings associated with this q_vector */
378	struct igb_ring ring[] ____cacheline_internodealigned_in_smp;
379};
380
381enum e1000_ring_flags_t {
382	IGB_RING_FLAG_RX_3K_BUFFER,
383	IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
384	IGB_RING_FLAG_RX_SCTP_CSUM,
385	IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
386	IGB_RING_FLAG_TX_CTX_IDX,
387	IGB_RING_FLAG_TX_DETECT_HANG
388};
389
390#define ring_uses_large_buffer(ring) \
391	test_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
392#define set_ring_uses_large_buffer(ring) \
393	set_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
394#define clear_ring_uses_large_buffer(ring) \
395	clear_bit(IGB_RING_FLAG_RX_3K_BUFFER, &(ring)->flags)
396
397#define ring_uses_build_skb(ring) \
398	test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
399#define set_ring_build_skb_enabled(ring) \
400	set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
401#define clear_ring_build_skb_enabled(ring) \
402	clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
403
404static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
405{
406#if (PAGE_SIZE < 8192)
407	if (ring_uses_large_buffer(ring))
408		return IGB_RXBUFFER_3072;
409
410	if (ring_uses_build_skb(ring))
411		return IGB_MAX_FRAME_BUILD_SKB;
412#endif
413	return IGB_RXBUFFER_2048;
414}
415
416static inline unsigned int igb_rx_pg_order(struct igb_ring *ring)
417{
418#if (PAGE_SIZE < 8192)
419	if (ring_uses_large_buffer(ring))
420		return 1;
421#endif
422	return 0;
423}
424
425#define igb_rx_pg_size(_ring) (PAGE_SIZE << igb_rx_pg_order(_ring))
426
427#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
428
429#define IGB_RX_DESC(R, i)	\
430	(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
431#define IGB_TX_DESC(R, i)	\
432	(&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
433#define IGB_TX_CTXTDESC(R, i)	\
434	(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
435
436/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
437static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
438				      const u32 stat_err_bits)
439{
440	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
441}
442
443/* igb_desc_unused - calculate if we have unused descriptors */
444static inline int igb_desc_unused(struct igb_ring *ring)
445{
446	if (ring->next_to_clean > ring->next_to_use)
447		return ring->next_to_clean - ring->next_to_use - 1;
448
449	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
450}
451
452#ifdef CONFIG_IGB_HWMON
453
454#define IGB_HWMON_TYPE_LOC	0
455#define IGB_HWMON_TYPE_TEMP	1
456#define IGB_HWMON_TYPE_CAUTION	2
457#define IGB_HWMON_TYPE_MAX	3
458
459struct hwmon_attr {
460	struct device_attribute dev_attr;
461	struct e1000_hw *hw;
462	struct e1000_thermal_diode_data *sensor;
463	char name[12];
464	};
465
466struct hwmon_buff {
467	struct attribute_group group;
468	const struct attribute_group *groups[2];
469	struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
470	struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
471	unsigned int n_hwmon;
472	};
473#endif
474
475/* The number of L2 ether-type filter registers, Index 3 is reserved
476 * for PTP 1588 timestamp
477 */
478#define MAX_ETYPE_FILTER	(4 - 1)
479/* ETQF filter list: one static filter per filter consumer. This is
480 * to avoid filter collisions later. Add new filters here!!
481 *
482 * Current filters:		Filter 3
483 */
484#define IGB_ETQF_FILTER_1588	3
485
486#define IGB_N_EXTTS	2
487#define IGB_N_PEROUT	2
488#define IGB_N_SDP	4
489#define IGB_RETA_SIZE	128
490
491enum igb_filter_match_flags {
492	IGB_FILTER_FLAG_ETHER_TYPE = 0x1,
493	IGB_FILTER_FLAG_VLAN_TCI   = 0x2,
494	IGB_FILTER_FLAG_SRC_MAC_ADDR   = 0x4,
495	IGB_FILTER_FLAG_DST_MAC_ADDR   = 0x8,
496};
497
498#define IGB_MAX_RXNFC_FILTERS 16
499
500/* RX network flow classification data structure */
501struct igb_nfc_input {
502	/* Byte layout in order, all values with MSB first:
503	 * match_flags - 1 byte
504	 * etype - 2 bytes
505	 * vlan_tci - 2 bytes
506	 */
507	u8 match_flags;
508	__be16 etype;
509	__be16 vlan_tci;
510	u8 src_addr[ETH_ALEN];
511	u8 dst_addr[ETH_ALEN];
512};
513
514struct igb_nfc_filter {
515	struct hlist_node nfc_node;
516	struct igb_nfc_input filter;
517	unsigned long cookie;
518	u16 etype_reg_index;
519	u16 sw_idx;
520	u16 action;
521};
522
523struct igb_mac_addr {
524	u8 addr[ETH_ALEN];
525	u8 queue;
526	u8 state; /* bitmask */
527};
528
529#define IGB_MAC_STATE_DEFAULT	0x1
530#define IGB_MAC_STATE_IN_USE	0x2
531#define IGB_MAC_STATE_SRC_ADDR	0x4
532#define IGB_MAC_STATE_QUEUE_STEERING 0x8
533
534/* board specific private data structure */
535struct igb_adapter {
536	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
537
538	struct net_device *netdev;
539	struct bpf_prog *xdp_prog;
540
541	unsigned long state;
542	unsigned int flags;
543
544	unsigned int num_q_vectors;
545	struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
546
547	/* Interrupt Throttle Rate */
548	u32 rx_itr_setting;
549	u32 tx_itr_setting;
550	u16 tx_itr;
551	u16 rx_itr;
552
553	/* TX */
554	u16 tx_work_limit;
555	u32 tx_timeout_count;
556	int num_tx_queues;
557	struct igb_ring *tx_ring[16];
558
559	/* RX */
560	int num_rx_queues;
561	struct igb_ring *rx_ring[16];
562
563	u32 max_frame_size;
564	u32 min_frame_size;
565
566	struct timer_list watchdog_timer;
567	struct timer_list phy_info_timer;
568
569	u16 mng_vlan_id;
570	u32 bd_number;
571	u32 wol;
572	u32 en_mng_pt;
573	u16 link_speed;
574	u16 link_duplex;
575
576	u8 __iomem *io_addr; /* Mainly for iounmap use */
577
578	struct work_struct reset_task;
579	struct work_struct watchdog_task;
580	bool fc_autoneg;
581	u8  tx_timeout_factor;
582	struct timer_list blink_timer;
583	unsigned long led_status;
584
585	/* OS defined structs */
586	struct pci_dev *pdev;
587
588	spinlock_t stats64_lock;
589	struct rtnl_link_stats64 stats64;
590
591	/* structs defined in e1000_hw.h */
592	struct e1000_hw hw;
593	struct e1000_hw_stats stats;
594	struct e1000_phy_info phy_info;
595
596	u32 test_icr;
597	struct igb_ring test_tx_ring;
598	struct igb_ring test_rx_ring;
599
600	int msg_enable;
601
602	struct igb_q_vector *q_vector[MAX_Q_VECTORS];
603	u32 eims_enable_mask;
604	u32 eims_other;
605
606	/* to not mess up cache alignment, always add to the bottom */
607	u16 tx_ring_count;
608	u16 rx_ring_count;
609	unsigned int vfs_allocated_count;
610	struct vf_data_storage *vf_data;
611	int vf_rate_link_speed;
612	u32 rss_queues;
613	u32 wvbr;
614	u32 *shadow_vfta;
615
616	struct ptp_clock *ptp_clock;
617	struct ptp_clock_info ptp_caps;
618	struct delayed_work ptp_overflow_work;
619	struct work_struct ptp_tx_work;
620	struct sk_buff *ptp_tx_skb;
621	struct hwtstamp_config tstamp_config;
622	unsigned long ptp_tx_start;
623	unsigned long last_rx_ptp_check;
624	unsigned long last_rx_timestamp;
625	unsigned int ptp_flags;
626	spinlock_t tmreg_lock;
627	struct cyclecounter cc;
628	struct timecounter tc;
629	u32 tx_hwtstamp_timeouts;
630	u32 tx_hwtstamp_skipped;
631	u32 rx_hwtstamp_cleared;
632	bool pps_sys_wrap_on;
633
634	struct ptp_pin_desc sdp_config[IGB_N_SDP];
635	struct {
636		struct timespec64 start;
637		struct timespec64 period;
638	} perout[IGB_N_PEROUT];
639
640	char fw_version[48];
641#ifdef CONFIG_IGB_HWMON
642	struct hwmon_buff *igb_hwmon_buff;
643	bool ets;
644#endif
645	struct i2c_algo_bit_data i2c_algo;
646	struct i2c_adapter i2c_adap;
647	struct i2c_client *i2c_client;
648	u32 rss_indir_tbl_init;
649	u8 rss_indir_tbl[IGB_RETA_SIZE];
650
651	unsigned long link_check_timeout;
652	int copper_tries;
653	struct e1000_info ei;
654	u16 eee_advert;
655
656	/* RX network flow classification support */
657	struct hlist_head nfc_filter_list;
658	struct hlist_head cls_flower_list;
659	unsigned int nfc_filter_count;
660	/* lock for RX network flow classification filter */
661	spinlock_t nfc_lock;
662	bool etype_bitmap[MAX_ETYPE_FILTER];
663
664	struct igb_mac_addr *mac_table;
665	struct vf_mac_filter vf_macs;
666	struct vf_mac_filter *vf_mac_list;
667	/* lock for VF resources */
668	spinlock_t vfs_lock;
669};
670
671/* flags controlling PTP/1588 function */
672#define IGB_PTP_ENABLED		BIT(0)
673#define IGB_PTP_OVERFLOW_CHECK	BIT(1)
674
675#define IGB_FLAG_HAS_MSI		BIT(0)
676#define IGB_FLAG_DCA_ENABLED		BIT(1)
677#define IGB_FLAG_QUAD_PORT_A		BIT(2)
678#define IGB_FLAG_QUEUE_PAIRS		BIT(3)
679#define IGB_FLAG_DMAC			BIT(4)
680#define IGB_FLAG_RSS_FIELD_IPV4_UDP	BIT(6)
681#define IGB_FLAG_RSS_FIELD_IPV6_UDP	BIT(7)
682#define IGB_FLAG_WOL_SUPPORTED		BIT(8)
683#define IGB_FLAG_NEED_LINK_UPDATE	BIT(9)
684#define IGB_FLAG_MEDIA_RESET		BIT(10)
685#define IGB_FLAG_MAS_CAPABLE		BIT(11)
686#define IGB_FLAG_MAS_ENABLE		BIT(12)
687#define IGB_FLAG_HAS_MSIX		BIT(13)
688#define IGB_FLAG_EEE			BIT(14)
689#define IGB_FLAG_VLAN_PROMISC		BIT(15)
690#define IGB_FLAG_RX_LEGACY		BIT(16)
691#define IGB_FLAG_FQTSS			BIT(17)
692
693/* Media Auto Sense */
694#define IGB_MAS_ENABLE_0		0X0001
695#define IGB_MAS_ENABLE_1		0X0002
696#define IGB_MAS_ENABLE_2		0X0004
697#define IGB_MAS_ENABLE_3		0X0008
698
699/* DMA Coalescing defines */
700#define IGB_MIN_TXPBSIZE	20408
701#define IGB_TX_BUF_4096		4096
702#define IGB_DMCTLX_DCFLUSH_DIS	0x80000000  /* Disable DMA Coal Flush */
703
704#define IGB_82576_TSYNC_SHIFT	19
705enum e1000_state_t {
706	__IGB_TESTING,
707	__IGB_RESETTING,
708	__IGB_DOWN,
709	__IGB_PTP_TX_IN_PROGRESS,
710};
711
712enum igb_boards {
713	board_82575,
714};
715
716extern char igb_driver_name[];
717
718int igb_xmit_xdp_ring(struct igb_adapter *adapter,
719		      struct igb_ring *ring,
720		      struct xdp_frame *xdpf);
721int igb_open(struct net_device *netdev);
722int igb_close(struct net_device *netdev);
723int igb_up(struct igb_adapter *);
724void igb_down(struct igb_adapter *);
725void igb_reinit_locked(struct igb_adapter *);
726void igb_reset(struct igb_adapter *);
727int igb_reinit_queues(struct igb_adapter *);
728void igb_write_rss_indir_tbl(struct igb_adapter *);
729int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
730int igb_setup_tx_resources(struct igb_ring *);
731int igb_setup_rx_resources(struct igb_ring *);
732void igb_free_tx_resources(struct igb_ring *);
733void igb_free_rx_resources(struct igb_ring *);
734void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
735void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
736void igb_setup_tctl(struct igb_adapter *);
737void igb_setup_rctl(struct igb_adapter *);
738void igb_setup_srrctl(struct igb_adapter *, struct igb_ring *);
739netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
740void igb_alloc_rx_buffers(struct igb_ring *, u16);
741void igb_update_stats(struct igb_adapter *);
742bool igb_has_link(struct igb_adapter *adapter);
743void igb_set_ethtool_ops(struct net_device *);
744void igb_power_up_link(struct igb_adapter *);
745void igb_set_fw_version(struct igb_adapter *);
746void igb_ptp_init(struct igb_adapter *adapter);
747void igb_ptp_stop(struct igb_adapter *adapter);
748void igb_ptp_reset(struct igb_adapter *adapter);
749void igb_ptp_suspend(struct igb_adapter *adapter);
750void igb_ptp_rx_hang(struct igb_adapter *adapter);
751void igb_ptp_tx_hang(struct igb_adapter *adapter);
752void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
753int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
754			ktime_t *timestamp);
755int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
756int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
757void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
758unsigned int igb_get_max_rss_queues(struct igb_adapter *);
759#ifdef CONFIG_IGB_HWMON
760void igb_sysfs_exit(struct igb_adapter *adapter);
761int igb_sysfs_init(struct igb_adapter *adapter);
762#endif
763static inline s32 igb_reset_phy(struct e1000_hw *hw)
764{
765	if (hw->phy.ops.reset)
766		return hw->phy.ops.reset(hw);
767
768	return 0;
769}
770
771static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
772{
773	if (hw->phy.ops.read_reg)
774		return hw->phy.ops.read_reg(hw, offset, data);
775
776	return 0;
777}
778
779static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
780{
781	if (hw->phy.ops.write_reg)
782		return hw->phy.ops.write_reg(hw, offset, data);
783
784	return 0;
785}
786
787static inline s32 igb_get_phy_info(struct e1000_hw *hw)
788{
789	if (hw->phy.ops.get_phy_info)
790		return hw->phy.ops.get_phy_info(hw);
791
792	return 0;
793}
794
795static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
796{
797	return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
798}
799
800int igb_add_filter(struct igb_adapter *adapter,
801		   struct igb_nfc_filter *input);
802int igb_erase_filter(struct igb_adapter *adapter,
803		     struct igb_nfc_filter *input);
804
805int igb_add_mac_steering_filter(struct igb_adapter *adapter,
806				const u8 *addr, u8 queue, u8 flags);
807int igb_del_mac_steering_filter(struct igb_adapter *adapter,
808				const u8 *addr, u8 queue, u8 flags);
809
810#endif /* _IGB_H_ */
811