• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/bnx2x/
1/* bnx2x.h: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 */
13
14#ifndef BNX2X_H
15#define BNX2X_H
16
17/* compilation time flags */
18
19/* define this to make the driver freeze on error to allow getting debug info
20 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */
22
23#define DRV_MODULE_VERSION      "1.52.53-4"
24#define DRV_MODULE_RELDATE      "2010/16/08"
25#define BNX2X_BC_VER            0x040200
26
27#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
28#define BCM_VLAN			1
29#endif
30
31#define BNX2X_MULTI_QUEUE
32
33#define BNX2X_NEW_NAPI
34
35
36
37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
38#define BCM_CNIC 1
39#include "../cnic_if.h"
40#endif
41
42
43#ifdef BCM_CNIC
44#define BNX2X_MIN_MSIX_VEC_CNT 3
45#define BNX2X_MSIX_VEC_FP_START 2
46#else
47#define BNX2X_MIN_MSIX_VEC_CNT 2
48#define BNX2X_MSIX_VEC_FP_START 1
49#endif
50
51#include <linux/mdio.h>
52#include <linux/pci.h>
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
56#include "bnx2x_link.h"
57#include "bnx2x_stats.h"
58
59/* error/debug prints */
60
61#define DRV_MODULE_NAME		"bnx2x"
62
63/* for messages that are currently off */
64#define BNX2X_MSG_OFF			0
65#define BNX2X_MSG_MCP			0x010000 /* was: NETIF_MSG_HW */
66#define BNX2X_MSG_STATS			0x020000 /* was: NETIF_MSG_TIMER */
67#define BNX2X_MSG_NVM			0x040000 /* was: NETIF_MSG_HW */
68#define BNX2X_MSG_DMAE			0x080000 /* was: NETIF_MSG_HW */
69#define BNX2X_MSG_SP			0x100000 /* was: NETIF_MSG_INTR */
70#define BNX2X_MSG_FP			0x200000 /* was: NETIF_MSG_INTR */
71
72#define DP_LEVEL			KERN_NOTICE	/* was: KERN_DEBUG */
73
74/* regular debug print */
75#define DP(__mask, __fmt, __args...)				\
76do {								\
77	if (bp->msg_enable & (__mask))				\
78		printk(DP_LEVEL "[%s:%d(%s)]" __fmt,		\
79		       __func__, __LINE__,			\
80		       bp->dev ? (bp->dev->name) : "?",		\
81		       ##__args);				\
82} while (0)
83
84/* errors debug print */
85#define BNX2X_DBG_ERR(__fmt, __args...)				\
86do {								\
87	if (netif_msg_probe(bp))				\
88		pr_err("[%s:%d(%s)]" __fmt,			\
89		       __func__, __LINE__,			\
90		       bp->dev ? (bp->dev->name) : "?",		\
91		       ##__args);				\
92} while (0)
93
94/* for errors (never masked) */
95#define BNX2X_ERR(__fmt, __args...)				\
96do {								\
97	pr_err("[%s:%d(%s)]" __fmt,				\
98	       __func__, __LINE__,				\
99	       bp->dev ? (bp->dev->name) : "?",			\
100	       ##__args);					\
101	} while (0)
102
103#define BNX2X_ERROR(__fmt, __args...) do { \
104	pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
105	} while (0)
106
107
108/* before we have a dev->name use dev_info() */
109#define BNX2X_DEV_INFO(__fmt, __args...)			 \
110do {								 \
111	if (netif_msg_probe(bp))				 \
112		dev_info(&bp->pdev->dev, __fmt, ##__args);	 \
113} while (0)
114
115void bnx2x_panic_dump(struct bnx2x *bp);
116
117#ifdef BNX2X_STOP_ON_ERROR
118#define bnx2x_panic() do { \
119		bp->panic = 1; \
120		BNX2X_ERR("driver assert\n"); \
121		bnx2x_int_disable(bp); \
122		bnx2x_panic_dump(bp); \
123	} while (0)
124#else
125#define bnx2x_panic() do { \
126		bp->panic = 1; \
127		BNX2X_ERR("driver assert\n"); \
128		bnx2x_panic_dump(bp); \
129	} while (0)
130#endif
131
132
133#define U64_LO(x)			(u32)(((u64)(x)) & 0xffffffff)
134#define U64_HI(x)			(u32)(((u64)(x)) >> 32)
135#define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
136
137
138#define REG_ADDR(bp, offset)		(bp->regview + offset)
139
140#define REG_RD(bp, offset)		readl(REG_ADDR(bp, offset))
141#define REG_RD8(bp, offset)		readb(REG_ADDR(bp, offset))
142
143#define REG_WR(bp, offset, val)		writel((u32)val, REG_ADDR(bp, offset))
144#define REG_WR8(bp, offset, val)	writeb((u8)val, REG_ADDR(bp, offset))
145#define REG_WR16(bp, offset, val)	writew((u16)val, REG_ADDR(bp, offset))
146
147#define REG_RD_IND(bp, offset)		bnx2x_reg_rd_ind(bp, offset)
148#define REG_WR_IND(bp, offset, val)	bnx2x_reg_wr_ind(bp, offset, val)
149
150#define REG_RD_DMAE(bp, offset, valp, len32) \
151	do { \
152		bnx2x_read_dmae(bp, offset, len32);\
153		memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
154	} while (0)
155
156#define REG_WR_DMAE(bp, offset, valp, len32) \
157	do { \
158		memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
159		bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
160				 offset, len32); \
161	} while (0)
162
163#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
164	do { \
165		memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
166		bnx2x_write_big_buf_wb(bp, addr, len32); \
167	} while (0)
168
169#define SHMEM_ADDR(bp, field)		(bp->common.shmem_base + \
170					 offsetof(struct shmem_region, field))
171#define SHMEM_RD(bp, field)		REG_RD(bp, SHMEM_ADDR(bp, field))
172#define SHMEM_WR(bp, field, val)	REG_WR(bp, SHMEM_ADDR(bp, field), val)
173
174#define SHMEM2_ADDR(bp, field)		(bp->common.shmem2_base + \
175					 offsetof(struct shmem2_region, field))
176#define SHMEM2_RD(bp, field)		REG_RD(bp, SHMEM2_ADDR(bp, field))
177#define SHMEM2_WR(bp, field, val)	REG_WR(bp, SHMEM2_ADDR(bp, field), val)
178
179#define MF_CFG_RD(bp, field)		SHMEM_RD(bp, mf_cfg.field)
180#define MF_CFG_WR(bp, field, val)	SHMEM_WR(bp, mf_cfg.field, val)
181
182#define EMAC_RD(bp, reg)		REG_RD(bp, emac_base + reg)
183#define EMAC_WR(bp, reg, val)		REG_WR(bp, emac_base + reg, val)
184
185#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
186	AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
187
188
189/* fast path */
190
191struct sw_rx_bd {
192	struct sk_buff	*skb;
193	DEFINE_DMA_UNMAP_ADDR(mapping);
194};
195
196struct sw_tx_bd {
197	struct sk_buff	*skb;
198	u16		first_bd;
199	u8		flags;
200/* Set on the first BD descriptor when there is a split BD */
201#define BNX2X_TSO_SPLIT_BD		(1<<0)
202};
203
204struct sw_rx_page {
205	struct page	*page;
206	DEFINE_DMA_UNMAP_ADDR(mapping);
207};
208
209union db_prod {
210	struct doorbell_set_prod data;
211	u32		raw;
212};
213
214
215/* MC hsi */
216#define BCM_PAGE_SHIFT			12
217#define BCM_PAGE_SIZE			(1 << BCM_PAGE_SHIFT)
218#define BCM_PAGE_MASK			(~(BCM_PAGE_SIZE - 1))
219#define BCM_PAGE_ALIGN(addr)	(((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
220
221#define PAGES_PER_SGE_SHIFT		0
222#define PAGES_PER_SGE			(1 << PAGES_PER_SGE_SHIFT)
223#define SGE_PAGE_SIZE			PAGE_SIZE
224#define SGE_PAGE_SHIFT			PAGE_SHIFT
225#define SGE_PAGE_ALIGN(addr)		PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
226
227/* SGE ring related macros */
228#define NUM_RX_SGE_PAGES		2
229#define RX_SGE_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
230#define MAX_RX_SGE_CNT			(RX_SGE_CNT - 2)
231/* RX_SGE_CNT is promised to be a power of 2 */
232#define RX_SGE_MASK			(RX_SGE_CNT - 1)
233#define NUM_RX_SGE			(RX_SGE_CNT * NUM_RX_SGE_PAGES)
234#define MAX_RX_SGE			(NUM_RX_SGE - 1)
235#define NEXT_SGE_IDX(x)		((((x) & RX_SGE_MASK) == \
236				  (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
237#define RX_SGE(x)			((x) & MAX_RX_SGE)
238
239/* SGE producer mask related macros */
240/* Number of bits in one sge_mask array element */
241#define RX_SGE_MASK_ELEM_SZ		64
242#define RX_SGE_MASK_ELEM_SHIFT		6
243#define RX_SGE_MASK_ELEM_MASK		((u64)RX_SGE_MASK_ELEM_SZ - 1)
244
245/* Creates a bitmask of all ones in less significant bits.
246   idx - index of the most significant bit in the created mask */
247#define RX_SGE_ONES_MASK(idx) \
248		(((u64)0x1 << (((idx) & RX_SGE_MASK_ELEM_MASK) + 1)) - 1)
249#define RX_SGE_MASK_ELEM_ONE_MASK	((u64)(~0))
250
251/* Number of u64 elements in SGE mask array */
252#define RX_SGE_MASK_LEN			((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
253					 RX_SGE_MASK_ELEM_SZ)
254#define RX_SGE_MASK_LEN_MASK		(RX_SGE_MASK_LEN - 1)
255#define NEXT_SGE_MASK_ELEM(el)		(((el) + 1) & RX_SGE_MASK_LEN_MASK)
256
257
258struct bnx2x_fastpath {
259
260	struct napi_struct	napi;
261	struct host_status_block *status_blk;
262	dma_addr_t		status_blk_mapping;
263
264	struct sw_tx_bd		*tx_buf_ring;
265
266	union eth_tx_bd_types	*tx_desc_ring;
267	dma_addr_t		tx_desc_mapping;
268
269	struct sw_rx_bd		*rx_buf_ring;	/* BDs mappings ring */
270	struct sw_rx_page	*rx_page_ring;	/* SGE pages mappings ring */
271
272	struct eth_rx_bd	*rx_desc_ring;
273	dma_addr_t		rx_desc_mapping;
274
275	union eth_rx_cqe	*rx_comp_ring;
276	dma_addr_t		rx_comp_mapping;
277
278	/* SGE ring */
279	struct eth_rx_sge	*rx_sge_ring;
280	dma_addr_t		rx_sge_mapping;
281
282	u64			sge_mask[RX_SGE_MASK_LEN];
283
284	int			state;
285#define BNX2X_FP_STATE_CLOSED		0
286#define BNX2X_FP_STATE_IRQ		0x80000
287#define BNX2X_FP_STATE_OPENING		0x90000
288#define BNX2X_FP_STATE_OPEN		0xa0000
289#define BNX2X_FP_STATE_HALTING		0xb0000
290#define BNX2X_FP_STATE_HALTED		0xc0000
291
292	u8			index;	/* number in fp array */
293	u8			cl_id;	/* eth client id */
294	u8			sb_id;	/* status block number in HW */
295
296	union db_prod		tx_db;
297
298	u16			tx_pkt_prod;
299	u16			tx_pkt_cons;
300	u16			tx_bd_prod;
301	u16			tx_bd_cons;
302	__le16			*tx_cons_sb;
303
304	__le16			fp_c_idx;
305	__le16			fp_u_idx;
306
307	u16			rx_bd_prod;
308	u16			rx_bd_cons;
309	u16			rx_comp_prod;
310	u16			rx_comp_cons;
311	u16			rx_sge_prod;
312	/* The last maximal completed SGE */
313	u16			last_max_sge;
314	__le16			*rx_cons_sb;
315	__le16			*rx_bd_cons_sb;
316
317
318	unsigned long		tx_pkt,
319				rx_pkt,
320				rx_calls;
321
322	/* TPA related */
323	struct sw_rx_bd		tpa_pool[ETH_MAX_AGGREGATION_QUEUES_E1H];
324	u8			tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H];
325#define BNX2X_TPA_START			1
326#define BNX2X_TPA_STOP			2
327	u8			disable_tpa;
328#ifdef BNX2X_STOP_ON_ERROR
329	u64			tpa_queue_used;
330#endif
331
332	struct tstorm_per_client_stats old_tclient;
333	struct ustorm_per_client_stats old_uclient;
334	struct xstorm_per_client_stats old_xclient;
335	struct bnx2x_eth_q_stats eth_q_stats;
336
337	/* The size is calculated using the following:
338	     sizeof name field from netdev structure +
339	     4 ('-Xx-' string) +
340	     4 (for the digits and to make it DWORD aligned) */
341#define FP_NAME_SIZE		(sizeof(((struct net_device *)0)->name) + 8)
342	char			name[FP_NAME_SIZE];
343	struct bnx2x		*bp; /* parent */
344};
345
346#define bnx2x_fp(bp, nr, var)		(bp->fp[nr].var)
347
348
349/* MC hsi */
350#define MAX_FETCH_BD			13	/* HW max BDs per packet */
351#define RX_COPY_THRESH			92
352
353#define NUM_TX_RINGS			16
354#define TX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
355#define MAX_TX_DESC_CNT			(TX_DESC_CNT - 1)
356#define NUM_TX_BD			(TX_DESC_CNT * NUM_TX_RINGS)
357#define MAX_TX_BD			(NUM_TX_BD - 1)
358#define MAX_TX_AVAIL			(MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
359#define NEXT_TX_IDX(x)		((((x) & MAX_TX_DESC_CNT) == \
360				  (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
361#define TX_BD(x)			((x) & MAX_TX_BD)
362#define TX_BD_POFF(x)			((x) & MAX_TX_DESC_CNT)
363
364/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
365#define NUM_RX_RINGS			8
366#define RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
367#define MAX_RX_DESC_CNT			(RX_DESC_CNT - 2)
368#define RX_DESC_MASK			(RX_DESC_CNT - 1)
369#define NUM_RX_BD			(RX_DESC_CNT * NUM_RX_RINGS)
370#define MAX_RX_BD			(NUM_RX_BD - 1)
371#define MAX_RX_AVAIL			(MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
372#define NEXT_RX_IDX(x)		((((x) & RX_DESC_MASK) == \
373				  (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
374#define RX_BD(x)			((x) & MAX_RX_BD)
375
376/* As long as CQE is 4 times bigger than BD entry we have to allocate
377   4 times more pages for CQ ring in order to keep it balanced with
378   BD ring */
379#define NUM_RCQ_RINGS			(NUM_RX_RINGS * 4)
380#define RCQ_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
381#define MAX_RCQ_DESC_CNT		(RCQ_DESC_CNT - 1)
382#define NUM_RCQ_BD			(RCQ_DESC_CNT * NUM_RCQ_RINGS)
383#define MAX_RCQ_BD			(NUM_RCQ_BD - 1)
384#define MAX_RCQ_AVAIL			(MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
385#define NEXT_RCQ_IDX(x)		((((x) & MAX_RCQ_DESC_CNT) == \
386				  (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
387#define RCQ_BD(x)			((x) & MAX_RCQ_BD)
388
389
390/* This is needed for determining of last_max */
391#define SUB_S16(a, b)			(s16)((s16)(a) - (s16)(b))
392
393#define __SGE_MASK_SET_BIT(el, bit) \
394	do { \
395		el = ((el) | ((u64)0x1 << (bit))); \
396	} while (0)
397
398#define __SGE_MASK_CLEAR_BIT(el, bit) \
399	do { \
400		el = ((el) & (~((u64)0x1 << (bit)))); \
401	} while (0)
402
403#define SGE_MASK_SET_BIT(fp, idx) \
404	__SGE_MASK_SET_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
405			   ((idx) & RX_SGE_MASK_ELEM_MASK))
406
407#define SGE_MASK_CLEAR_BIT(fp, idx) \
408	__SGE_MASK_CLEAR_BIT(fp->sge_mask[(idx) >> RX_SGE_MASK_ELEM_SHIFT], \
409			     ((idx) & RX_SGE_MASK_ELEM_MASK))
410
411
412/* used on a CID received from the HW */
413#define SW_CID(x)			(le32_to_cpu(x) & \
414					 (COMMON_RAMROD_ETH_RX_CQE_CID >> 7))
415#define CQE_CMD(x)			(le32_to_cpu(x) >> \
416					COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
417
418#define BD_UNMAP_ADDR(bd)		HILO_U64(le32_to_cpu((bd)->addr_hi), \
419						 le32_to_cpu((bd)->addr_lo))
420#define BD_UNMAP_LEN(bd)		(le16_to_cpu((bd)->nbytes))
421
422
423#define DPM_TRIGER_TYPE			0x40
424#define DOORBELL(bp, cid, val) \
425	do { \
426		writel((u32)(val), bp->doorbells + (BCM_PAGE_SIZE * (cid)) + \
427		       DPM_TRIGER_TYPE); \
428	} while (0)
429
430
431/* TX CSUM helpers */
432#define SKB_CS_OFF(skb)		(offsetof(struct tcphdr, check) - \
433				 skb->csum_offset)
434#define SKB_CS(skb)		(*(u16 *)(skb_transport_header(skb) + \
435					  skb->csum_offset))
436
437#define pbd_tcp_flags(skb)	(ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
438
439#define XMIT_PLAIN			0
440#define XMIT_CSUM_V4			0x1
441#define XMIT_CSUM_V6			0x2
442#define XMIT_CSUM_TCP			0x4
443#define XMIT_GSO_V4			0x8
444#define XMIT_GSO_V6			0x10
445
446#define XMIT_CSUM			(XMIT_CSUM_V4 | XMIT_CSUM_V6)
447#define XMIT_GSO			(XMIT_GSO_V4 | XMIT_GSO_V6)
448
449
450/* stuff added to make the code fit 80Col */
451
452#define CQE_TYPE(cqe_fp_flags)	((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
453
454#define TPA_TYPE_START			ETH_FAST_PATH_RX_CQE_START_FLG
455#define TPA_TYPE_END			ETH_FAST_PATH_RX_CQE_END_FLG
456#define TPA_TYPE(cqe_fp_flags)		((cqe_fp_flags) & \
457					 (TPA_TYPE_START | TPA_TYPE_END))
458
459#define ETH_RX_ERROR_FALGS		ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
460
461#define BNX2X_IP_CSUM_ERR(cqe) \
462			(!((cqe)->fast_path_cqe.status_flags & \
463			   ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
464			 ((cqe)->fast_path_cqe.type_error_flags & \
465			  ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
466
467#define BNX2X_L4_CSUM_ERR(cqe) \
468			(!((cqe)->fast_path_cqe.status_flags & \
469			   ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
470			 ((cqe)->fast_path_cqe.type_error_flags & \
471			  ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
472
473#define BNX2X_RX_CSUM_OK(cqe) \
474			(!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
475
476#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
477				(((le16_to_cpu(flags) & \
478				   PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
479				  PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
480				 == PRS_FLAG_OVERETH_IPV4)
481#define BNX2X_RX_SUM_FIX(cqe) \
482	BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
483
484
485#define FP_USB_FUNC_OFF			(2 + 2*HC_USTORM_SB_NUM_INDICES)
486#define FP_CSB_FUNC_OFF			(2 + 2*HC_CSTORM_SB_NUM_INDICES)
487
488#define U_SB_ETH_RX_CQ_INDEX		HC_INDEX_U_ETH_RX_CQ_CONS
489#define U_SB_ETH_RX_BD_INDEX		HC_INDEX_U_ETH_RX_BD_CONS
490#define C_SB_ETH_TX_CQ_INDEX		HC_INDEX_C_ETH_TX_CQ_CONS
491
492#define BNX2X_RX_SB_INDEX \
493	(&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_CQ_INDEX])
494
495#define BNX2X_RX_SB_BD_INDEX \
496	(&fp->status_blk->u_status_block.index_values[U_SB_ETH_RX_BD_INDEX])
497
498#define BNX2X_RX_SB_INDEX_NUM \
499		(((U_SB_ETH_RX_CQ_INDEX << \
500		   USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
501		  USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER) | \
502		 ((U_SB_ETH_RX_BD_INDEX << \
503		   USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER_SHIFT) & \
504		  USTORM_ETH_ST_CONTEXT_CONFIG_BD_SB_INDEX_NUMBER))
505
506#define BNX2X_TX_SB_INDEX \
507	(&fp->status_blk->c_status_block.index_values[C_SB_ETH_TX_CQ_INDEX])
508
509
510/* end of fast path */
511
512/* common */
513
514struct bnx2x_common {
515
516	u32			chip_id;
517/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
518#define CHIP_ID(bp)			(bp->common.chip_id & 0xfffffff0)
519
520#define CHIP_NUM(bp)			(bp->common.chip_id >> 16)
521#define CHIP_NUM_57710			0x164e
522#define CHIP_NUM_57711			0x164f
523#define CHIP_NUM_57711E			0x1650
524#define CHIP_IS_E1(bp)			(CHIP_NUM(bp) == CHIP_NUM_57710)
525#define CHIP_IS_57711(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711)
526#define CHIP_IS_57711E(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711E)
527#define CHIP_IS_E1H(bp)			(CHIP_IS_57711(bp) || \
528					 CHIP_IS_57711E(bp))
529#define IS_E1H_OFFSET			CHIP_IS_E1H(bp)
530
531#define CHIP_REV(bp)			(bp->common.chip_id & 0x0000f000)
532#define CHIP_REV_Ax			0x00000000
533/* assume maximum 5 revisions */
534#define CHIP_REV_IS_SLOW(bp)		(CHIP_REV(bp) > 0x00005000)
535/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
536#define CHIP_REV_IS_EMUL(bp)		((CHIP_REV_IS_SLOW(bp)) && \
537					 !(CHIP_REV(bp) & 0x00001000))
538/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
539#define CHIP_REV_IS_FPGA(bp)		((CHIP_REV_IS_SLOW(bp)) && \
540					 (CHIP_REV(bp) & 0x00001000))
541
542#define CHIP_TIME(bp)			((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
543					((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
544
545#define CHIP_METAL(bp)			(bp->common.chip_id & 0x00000ff0)
546#define CHIP_BOND_ID(bp)		(bp->common.chip_id & 0x0000000f)
547
548	int			flash_size;
549#define NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
550#define NVRAM_TIMEOUT_COUNT		30000
551#define NVRAM_PAGE_SIZE			256
552
553	u32			shmem_base;
554	u32			shmem2_base;
555
556	u32			hw_config;
557
558	u32			bc_ver;
559};
560
561
562/* end of common */
563
564/* port */
565
566struct bnx2x_port {
567	u32			pmf;
568
569	u32			link_config;
570
571	u32			supported;
572/* link settings - missing defines */
573#define SUPPORTED_2500baseX_Full	(1 << 15)
574
575	u32			advertising;
576/* link settings - missing defines */
577#define ADVERTISED_2500baseX_Full	(1 << 15)
578
579	u32			phy_addr;
580
581	/* used to synchronize phy accesses */
582	struct mutex		phy_mutex;
583	int			need_hw_lock;
584
585	u32			port_stx;
586
587	struct nig_stats	old_nig_stats;
588};
589
590/* end of port */
591
592
593
594#ifdef BCM_CNIC
595#define MAX_CONTEXT			15
596#else
597#define MAX_CONTEXT			16
598#endif
599
600union cdu_context {
601	struct eth_context eth;
602	char pad[1024];
603};
604
605#define MAX_DMAE_C			8
606
607/* DMA memory not used in fastpath */
608struct bnx2x_slowpath {
609	union cdu_context		context[MAX_CONTEXT];
610	struct eth_stats_query		fw_stats;
611	struct mac_configuration_cmd	mac_config;
612	struct mac_configuration_cmd	mcast_config;
613
614	/* used by dmae command executer */
615	struct dmae_command		dmae[MAX_DMAE_C];
616
617	u32				stats_comp;
618	union mac_stats			mac_stats;
619	struct nig_stats		nig_stats;
620	struct host_port_stats		port_stats;
621	struct host_func_stats		func_stats;
622	struct host_func_stats		func_stats_base;
623
624	u32				wb_comp;
625	u32				wb_data[4];
626};
627
628#define bnx2x_sp(bp, var)		(&bp->slowpath->var)
629#define bnx2x_sp_mapping(bp, var) \
630		(bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
631
632
633/* attn group wiring */
634#define MAX_DYNAMIC_ATTN_GRPS		8
635
636struct attn_route {
637	u32	sig[4];
638};
639
640typedef enum {
641	BNX2X_RECOVERY_DONE,
642	BNX2X_RECOVERY_INIT,
643	BNX2X_RECOVERY_WAIT,
644} bnx2x_recovery_state_t;
645
646struct bnx2x {
647	/* Fields used in the tx and intr/napi performance paths
648	 * are grouped together in the beginning of the structure
649	 */
650	struct bnx2x_fastpath	fp[MAX_CONTEXT];
651	void __iomem		*regview;
652	void __iomem		*doorbells;
653#ifdef BCM_CNIC
654#define BNX2X_DB_SIZE		(18*BCM_PAGE_SIZE)
655#else
656#define BNX2X_DB_SIZE		(16*BCM_PAGE_SIZE)
657#endif
658
659	struct net_device	*dev;
660	struct pci_dev		*pdev;
661
662	atomic_t		intr_sem;
663
664	bnx2x_recovery_state_t	recovery_state;
665	int			is_leader;
666#ifdef BCM_CNIC
667	struct msix_entry	msix_table[MAX_CONTEXT+2];
668#else
669	struct msix_entry	msix_table[MAX_CONTEXT+1];
670#endif
671#define INT_MODE_INTx			1
672#define INT_MODE_MSI			2
673
674	int			tx_ring_size;
675
676#ifdef BCM_VLAN
677	struct vlan_group	*vlgrp;
678#endif
679
680	u32			rx_csum;
681	u32			rx_buf_size;
682#define ETH_OVREHEAD			(ETH_HLEN + 8)	/* 8 for CRC + VLAN */
683#define ETH_MIN_PACKET_SIZE		60
684#define ETH_MAX_PACKET_SIZE		1500
685#define ETH_MAX_JUMBO_PACKET_SIZE	9600
686
687	/* Max supported alignment is 256 (8 shift) */
688#define BNX2X_RX_ALIGN_SHIFT		((L1_CACHE_SHIFT < 8) ? \
689					 L1_CACHE_SHIFT : 8)
690#define BNX2X_RX_ALIGN			(1 << BNX2X_RX_ALIGN_SHIFT)
691
692	struct host_def_status_block *def_status_blk;
693#define DEF_SB_ID			16
694	__le16			def_c_idx;
695	__le16			def_u_idx;
696	__le16			def_x_idx;
697	__le16			def_t_idx;
698	__le16			def_att_idx;
699	u32			attn_state;
700	struct attn_route	attn_group[MAX_DYNAMIC_ATTN_GRPS];
701
702	/* slow path ring */
703	struct eth_spe		*spq;
704	dma_addr_t		spq_mapping;
705	u16			spq_prod_idx;
706	struct eth_spe		*spq_prod_bd;
707	struct eth_spe		*spq_last_bd;
708	__le16			*dsb_sp_prod;
709	u16			spq_left; /* serialize spq */
710	/* used to synchronize spq accesses */
711	spinlock_t		spq_lock;
712
713	/* Flags for marking that there is a STAT_QUERY or
714	   SET_MAC ramrod pending */
715	int			stats_pending;
716	int			set_mac_pending;
717
718	/* End of fields used in the performance code paths */
719
720	int			panic;
721	int			msg_enable;
722
723	u32			flags;
724#define PCIX_FLAG			1
725#define PCI_32BIT_FLAG			2
726#define ONE_PORT_FLAG			4
727#define NO_WOL_FLAG			8
728#define USING_DAC_FLAG			0x10
729#define USING_MSIX_FLAG			0x20
730#define USING_MSI_FLAG			0x40
731#define TPA_ENABLE_FLAG			0x80
732#define NO_MCP_FLAG			0x100
733#define BP_NOMCP(bp)			(bp->flags & NO_MCP_FLAG)
734#define HW_VLAN_TX_FLAG			0x400
735#define HW_VLAN_RX_FLAG			0x800
736#define MF_FUNC_DIS			0x1000
737
738	int			func;
739#define BP_PORT(bp)			(bp->func % PORT_MAX)
740#define BP_FUNC(bp)			(bp->func)
741#define BP_E1HVN(bp)			(bp->func >> 1)
742#define BP_L_ID(bp)			(BP_E1HVN(bp) << 2)
743
744#ifdef BCM_CNIC
745#define BCM_CNIC_CID_START		16
746#define BCM_ISCSI_ETH_CL_ID		17
747#endif
748
749	int			pm_cap;
750	int			pcie_cap;
751	int			mrrs;
752
753	struct delayed_work	sp_task;
754	struct delayed_work	reset_task;
755	struct timer_list	timer;
756	int			current_interval;
757
758	u16			fw_seq;
759	u16			fw_drv_pulse_wr_seq;
760	u32			func_stx;
761
762	struct link_params	link_params;
763	struct link_vars	link_vars;
764	struct mdio_if_info	mdio;
765
766	struct bnx2x_common	common;
767	struct bnx2x_port	port;
768
769	struct cmng_struct_per_port cmng;
770	u32			vn_weight_sum;
771
772	u32			mf_config;
773	u16			e1hov;
774	u8			e1hmf;
775#define IS_E1HMF(bp)			(bp->e1hmf != 0)
776
777	u8			wol;
778
779	int			rx_ring_size;
780
781	u16			tx_quick_cons_trip_int;
782	u16			tx_quick_cons_trip;
783	u16			tx_ticks_int;
784	u16			tx_ticks;
785
786	u16			rx_quick_cons_trip_int;
787	u16			rx_quick_cons_trip;
788	u16			rx_ticks_int;
789	u16			rx_ticks;
790/* Maximal coalescing timeout in us */
791#define BNX2X_MAX_COALESCE_TOUT		(0xf0*12)
792
793	u32			lin_cnt;
794
795	int			state;
796#define BNX2X_STATE_CLOSED		0
797#define BNX2X_STATE_OPENING_WAIT4_LOAD	0x1000
798#define BNX2X_STATE_OPENING_WAIT4_PORT	0x2000
799#define BNX2X_STATE_OPEN		0x3000
800#define BNX2X_STATE_CLOSING_WAIT4_HALT	0x4000
801#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
802#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
803#define BNX2X_STATE_DIAG		0xe000
804#define BNX2X_STATE_ERROR		0xf000
805
806	int			multi_mode;
807	int			num_queues;
808	int			disable_tpa;
809	int			int_mode;
810
811	u32			rx_mode;
812#define BNX2X_RX_MODE_NONE		0
813#define BNX2X_RX_MODE_NORMAL		1
814#define BNX2X_RX_MODE_ALLMULTI		2
815#define BNX2X_RX_MODE_PROMISC		3
816#define BNX2X_MAX_MULTICAST		64
817#define BNX2X_MAX_EMUL_MULTI		16
818
819	u32 			rx_mode_cl_mask;
820
821	dma_addr_t		def_status_blk_mapping;
822
823	struct bnx2x_slowpath	*slowpath;
824	dma_addr_t		slowpath_mapping;
825
826	int			dropless_fc;
827
828#ifdef BCM_CNIC
829	u32			cnic_flags;
830#define BNX2X_CNIC_FLAG_MAC_SET		1
831
832	void			*t1;
833	dma_addr_t		t1_mapping;
834	void			*t2;
835	dma_addr_t		t2_mapping;
836	void			*timers;
837	dma_addr_t		timers_mapping;
838	void			*qm;
839	dma_addr_t		qm_mapping;
840	struct cnic_ops		*cnic_ops;
841	void			*cnic_data;
842	u32			cnic_tag;
843	struct cnic_eth_dev	cnic_eth_dev;
844	struct host_status_block *cnic_sb;
845	dma_addr_t		cnic_sb_mapping;
846#define CNIC_SB_ID(bp)			BP_L_ID(bp)
847	struct eth_spe		*cnic_kwq;
848	struct eth_spe		*cnic_kwq_prod;
849	struct eth_spe		*cnic_kwq_cons;
850	struct eth_spe		*cnic_kwq_last;
851	u16			cnic_kwq_pending;
852	u16			cnic_spq_pending;
853	struct mutex		cnic_mutex;
854	u8			iscsi_mac[6];
855#endif
856
857	int			dmae_ready;
858	/* used to synchronize dmae accesses */
859	struct mutex		dmae_mutex;
860
861	/* used to protect the FW mail box */
862	struct mutex		fw_mb_mutex;
863
864	/* used to synchronize stats collecting */
865	int			stats_state;
866
867	/* used for synchronization of concurrent threads statistics handling */
868	spinlock_t		stats_lock;
869
870	/* used by dmae command loader */
871	struct dmae_command	stats_dmae;
872	int			executer_idx;
873
874	u16			stats_counter;
875	struct bnx2x_eth_stats	eth_stats;
876
877	struct z_stream_s	*strm;
878	void			*gunzip_buf;
879	dma_addr_t		gunzip_mapping;
880	int			gunzip_outlen;
881#define FW_BUF_SIZE			0x8000
882#define GUNZIP_BUF(bp)			(bp->gunzip_buf)
883#define GUNZIP_PHYS(bp)			(bp->gunzip_mapping)
884#define GUNZIP_OUTLEN(bp)		(bp->gunzip_outlen)
885
886	struct raw_op		*init_ops;
887	/* Init blocks offsets inside init_ops */
888	u16			*init_ops_offsets;
889	/* Data blob - has 32 bit granularity */
890	u32			*init_data;
891	/* Zipped PRAM blobs - raw data */
892	const u8		*tsem_int_table_data;
893	const u8		*tsem_pram_data;
894	const u8		*usem_int_table_data;
895	const u8		*usem_pram_data;
896	const u8		*xsem_int_table_data;
897	const u8		*xsem_pram_data;
898	const u8		*csem_int_table_data;
899	const u8		*csem_pram_data;
900#define INIT_OPS(bp)			(bp->init_ops)
901#define INIT_OPS_OFFSETS(bp)		(bp->init_ops_offsets)
902#define INIT_DATA(bp)			(bp->init_data)
903#define INIT_TSEM_INT_TABLE_DATA(bp)	(bp->tsem_int_table_data)
904#define INIT_TSEM_PRAM_DATA(bp)		(bp->tsem_pram_data)
905#define INIT_USEM_INT_TABLE_DATA(bp)	(bp->usem_int_table_data)
906#define INIT_USEM_PRAM_DATA(bp)		(bp->usem_pram_data)
907#define INIT_XSEM_INT_TABLE_DATA(bp)	(bp->xsem_int_table_data)
908#define INIT_XSEM_PRAM_DATA(bp)		(bp->xsem_pram_data)
909#define INIT_CSEM_INT_TABLE_DATA(bp)	(bp->csem_int_table_data)
910#define INIT_CSEM_PRAM_DATA(bp)		(bp->csem_pram_data)
911
912	char			fw_ver[32];
913	const struct firmware	*firmware;
914};
915
916
917#define BNX2X_MAX_QUEUES(bp)	(IS_E1HMF(bp) ? (MAX_CONTEXT/E1HVN_MAX) \
918					      : MAX_CONTEXT)
919#define BNX2X_NUM_QUEUES(bp)	(bp->num_queues)
920#define is_multi(bp)		(BNX2X_NUM_QUEUES(bp) > 1)
921
922#define for_each_queue(bp, var) \
923			for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++)
924#define for_each_nondefault_queue(bp, var) \
925			for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++)
926
927
928void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
929void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
930		      u32 len32);
931int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
932int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
933int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
934u32 bnx2x_fw_command(struct bnx2x *bp, u32 command);
935void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
936void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
937			       u32 addr, u32 len);
938void bnx2x_calc_fc_adv(struct bnx2x *bp);
939int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
940		  u32 data_hi, u32 data_lo, int common);
941void bnx2x_update_coalesce(struct bnx2x *bp);
942
943static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
944			   int wait)
945{
946	u32 val;
947
948	do {
949		val = REG_RD(bp, reg);
950		if (val == expected)
951			break;
952		ms -= wait;
953		msleep(wait);
954
955	} while (ms > 0);
956
957	return val;
958}
959
960
961/* load/unload mode */
962#define LOAD_NORMAL			0
963#define LOAD_OPEN			1
964#define LOAD_DIAG			2
965#define UNLOAD_NORMAL			0
966#define UNLOAD_CLOSE			1
967#define UNLOAD_RECOVERY                 2
968
969
970/* DMAE command defines */
971#define DMAE_CMD_SRC_PCI		0
972#define DMAE_CMD_SRC_GRC		DMAE_COMMAND_SRC
973
974#define DMAE_CMD_DST_PCI		(1 << DMAE_COMMAND_DST_SHIFT)
975#define DMAE_CMD_DST_GRC		(2 << DMAE_COMMAND_DST_SHIFT)
976
977#define DMAE_CMD_C_DST_PCI		0
978#define DMAE_CMD_C_DST_GRC		(1 << DMAE_COMMAND_C_DST_SHIFT)
979
980#define DMAE_CMD_C_ENABLE		DMAE_COMMAND_C_TYPE_ENABLE
981
982#define DMAE_CMD_ENDIANITY_NO_SWAP	(0 << DMAE_COMMAND_ENDIANITY_SHIFT)
983#define DMAE_CMD_ENDIANITY_B_SWAP	(1 << DMAE_COMMAND_ENDIANITY_SHIFT)
984#define DMAE_CMD_ENDIANITY_DW_SWAP	(2 << DMAE_COMMAND_ENDIANITY_SHIFT)
985#define DMAE_CMD_ENDIANITY_B_DW_SWAP	(3 << DMAE_COMMAND_ENDIANITY_SHIFT)
986
987#define DMAE_CMD_PORT_0			0
988#define DMAE_CMD_PORT_1			DMAE_COMMAND_PORT
989
990#define DMAE_CMD_SRC_RESET		DMAE_COMMAND_SRC_RESET
991#define DMAE_CMD_DST_RESET		DMAE_COMMAND_DST_RESET
992#define DMAE_CMD_E1HVN_SHIFT		DMAE_COMMAND_E1HVN_SHIFT
993
994#define DMAE_LEN32_RD_MAX		0x80
995#define DMAE_LEN32_WR_MAX(bp)		(CHIP_IS_E1(bp) ? 0x400 : 0x2000)
996
997#define DMAE_COMP_VAL			0xe0d0d0ae
998
999#define MAX_DMAE_C_PER_PORT		8
1000#define INIT_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1001					 BP_E1HVN(bp))
1002#define PMF_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
1003					 E1HVN_MAX)
1004
1005
1006/* PCIE link and speed */
1007#define PCICFG_LINK_WIDTH		0x1f00000
1008#define PCICFG_LINK_WIDTH_SHIFT		20
1009#define PCICFG_LINK_SPEED		0xf0000
1010#define PCICFG_LINK_SPEED_SHIFT		16
1011
1012
1013#define BNX2X_NUM_TESTS			7
1014
1015#define BNX2X_PHY_LOOPBACK		0
1016#define BNX2X_MAC_LOOPBACK		1
1017#define BNX2X_PHY_LOOPBACK_FAILED	1
1018#define BNX2X_MAC_LOOPBACK_FAILED	2
1019#define BNX2X_LOOPBACK_FAILED		(BNX2X_MAC_LOOPBACK_FAILED | \
1020					 BNX2X_PHY_LOOPBACK_FAILED)
1021
1022
1023#define STROM_ASSERT_ARRAY_SIZE		50
1024
1025
1026/* must be used on a CID before placing it on a HW ring */
1027#define HW_CID(bp, x)			((BP_PORT(bp) << 23) | \
1028					 (BP_E1HVN(bp) << 17) | (x))
1029
1030#define SP_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_spe))
1031#define MAX_SP_DESC_CNT			(SP_DESC_CNT - 1)
1032
1033
1034#define BNX2X_BTR			1
1035#define MAX_SPQ_PENDING			8
1036
1037
1038/* CMNG constants
1039   derived from lab experiments, and not from system spec calculations !!! */
1040#define DEF_MIN_RATE			100
1041/* resolution of the rate shaping timer - 100 usec */
1042#define RS_PERIODIC_TIMEOUT_USEC	100
1043/* resolution of fairness algorithm in usecs -
1044   coefficient for calculating the actual t fair */
1045#define T_FAIR_COEF			10000000
1046/* number of bytes in single QM arbitration cycle -
1047   coefficient for calculating the fairness timer */
1048#define QM_ARB_BYTES			40000
1049#define FAIR_MEM			2
1050
1051
1052#define ATTN_NIG_FOR_FUNC		(1L << 8)
1053#define ATTN_SW_TIMER_4_FUNC		(1L << 9)
1054#define GPIO_2_FUNC			(1L << 10)
1055#define GPIO_3_FUNC			(1L << 11)
1056#define GPIO_4_FUNC			(1L << 12)
1057#define ATTN_GENERAL_ATTN_1		(1L << 13)
1058#define ATTN_GENERAL_ATTN_2		(1L << 14)
1059#define ATTN_GENERAL_ATTN_3		(1L << 15)
1060#define ATTN_GENERAL_ATTN_4		(1L << 13)
1061#define ATTN_GENERAL_ATTN_5		(1L << 14)
1062#define ATTN_GENERAL_ATTN_6		(1L << 15)
1063
1064#define ATTN_HARD_WIRED_MASK		0xff00
1065#define ATTENTION_ID			4
1066
1067
1068/* stuff added to make the code fit 80Col */
1069
1070#define BNX2X_PMF_LINK_ASSERT \
1071	GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
1072
1073#define BNX2X_MC_ASSERT_BITS \
1074	(GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1075	 GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1076	 GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
1077	 GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
1078
1079#define BNX2X_MCP_ASSERT \
1080	GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
1081
1082#define BNX2X_GRC_TIMEOUT	GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
1083#define BNX2X_GRC_RSV		(GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
1084				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
1085				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
1086				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
1087				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
1088				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
1089
1090#define HW_INTERRUT_ASSERT_SET_0 \
1091				(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
1092				 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
1093				 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
1094				 AEU_INPUTS_ATTN_BITS_PBF_HW_INTERRUPT)
1095#define HW_PRTY_ASSERT_SET_0	(AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
1096				 AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
1097				 AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
1098				 AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
1099				 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR)
1100#define HW_INTERRUT_ASSERT_SET_1 \
1101				(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
1102				 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
1103				 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
1104				 AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
1105				 AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
1106				 AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
1107				 AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
1108				 AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
1109				 AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
1110				 AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
1111				 AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
1112#define HW_PRTY_ASSERT_SET_1	(AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR |\
1113				 AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
1114				 AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
1115				 AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
1116				 AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
1117			     AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
1118				 AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
1119				 AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
1120				 AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
1121				 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
1122				 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR)
1123#define HW_INTERRUT_ASSERT_SET_2 \
1124				(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
1125				 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
1126				 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
1127			AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
1128				 AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
1129#define HW_PRTY_ASSERT_SET_2	(AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
1130				 AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
1131			AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
1132				 AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
1133				 AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
1134				 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
1135				 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
1136
1137#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
1138		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
1139		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
1140		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
1141
1142#define RSS_FLAGS(bp) \
1143		(TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
1144		 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
1145		 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
1146		 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
1147		 (bp->multi_mode << \
1148		  TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
1149#define MULTI_MASK			0x7f
1150
1151
1152#define DEF_USB_FUNC_OFF		(2 + 2*HC_USTORM_DEF_SB_NUM_INDICES)
1153#define DEF_CSB_FUNC_OFF		(2 + 2*HC_CSTORM_DEF_SB_NUM_INDICES)
1154#define DEF_XSB_FUNC_OFF		(2 + 2*HC_XSTORM_DEF_SB_NUM_INDICES)
1155#define DEF_TSB_FUNC_OFF		(2 + 2*HC_TSTORM_DEF_SB_NUM_INDICES)
1156
1157#define C_DEF_SB_SP_INDEX		HC_INDEX_DEF_C_ETH_SLOW_PATH
1158
1159#define BNX2X_SP_DSB_INDEX \
1160(&bp->def_status_blk->c_def_status_block.index_values[C_DEF_SB_SP_INDEX])
1161
1162
1163#define CAM_IS_INVALID(x) \
1164(x.target_table_entry.flags == TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
1165
1166#define CAM_INVALIDATE(x) \
1167	(x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
1168
1169
1170/* Number of u32 elements in MC hash array */
1171#define MC_HASH_SIZE			8
1172#define MC_HASH_OFFSET(bp, i)		(BAR_TSTRORM_INTMEM + \
1173	TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
1174
1175
1176#ifndef PXP2_REG_PXP2_INT_STS
1177#define PXP2_REG_PXP2_INT_STS		PXP2_REG_PXP2_INT_STS_0
1178#endif
1179
1180#define BNX2X_VPD_LEN			128
1181#define VENDOR_ID_LEN			4
1182
1183#ifdef BNX2X_MAIN
1184#define BNX2X_EXTERN
1185#else
1186#define BNX2X_EXTERN extern
1187#endif
1188
1189BNX2X_EXTERN int load_count[3]; /* 0-common, 1-port0, 2-port1 */
1190
1191/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1192
1193extern void bnx2x_set_ethtool_ops(struct net_device *netdev);
1194
1195void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
1196
1197#endif /* bnx2x.h */
1198