• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/bnx2x/
1
2
3#include <linux/module.h>
4#include <linux/moduleparam.h>
5#include <linux/kernel.h>
6#include <linux/device.h>  /* for dev_info() */
7#include <linux/timer.h>
8#include <linux/errno.h>
9#include <linux/ioport.h>
10#include <linux/slab.h>
11#include <linux/vmalloc.h>
12#include <linux/interrupt.h>
13#include <linux/pci.h>
14#include <linux/init.h>
15#include <linux/netdevice.h>
16#include <linux/etherdevice.h>
17#include <linux/skbuff.h>
18#include <linux/dma-mapping.h>
19#include <linux/bitops.h>
20#include <linux/irq.h>
21#include <linux/delay.h>
22#include <asm/byteorder.h>
23#include <linux/time.h>
24#include <linux/ethtool.h>
25#include <linux/mii.h>
26#include <linux/if_vlan.h>
27#include <net/ip.h>
28#include <net/tcp.h>
29#include <net/checksum.h>
30#include <net/ip6_checksum.h>
31#include <linux/workqueue.h>
32#include <linux/crc32.h>
33#include <linux/crc32c.h>
34#include <linux/prefetch.h>
35#include <linux/zlib.h>
36#include <linux/io.h>
37#include <linux/stringify.h>
38
39#define BNX2X_MAIN
40#include "bnx2x.h"
41#include "bnx2x_init.h"
42#include "bnx2x_init_ops.h"
43#include "bnx2x_cmn.h"
44
45
46#include <linux/firmware.h>
47#include "bnx2x_fw_file_hdr.h"
48/* FW files */
49#define FW_FILE_VERSION					\
50	__stringify(BCM_5710_FW_MAJOR_VERSION) "."	\
51	__stringify(BCM_5710_FW_MINOR_VERSION) "."	\
52	__stringify(BCM_5710_FW_REVISION_VERSION) "."	\
53	__stringify(BCM_5710_FW_ENGINEERING_VERSION)
54#define FW_FILE_NAME_E1		"bnx2x-e1-" FW_FILE_VERSION ".fw"
55#define FW_FILE_NAME_E1H	"bnx2x-e1h-" FW_FILE_VERSION ".fw"
56
57/* Time in jiffies before concluding the transmitter is hung */
58#define TX_TIMEOUT		(5*HZ)
59
60static char version[] __devinitdata =
61	"Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
62	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
63
64MODULE_AUTHOR("Eliezer Tamir");
65MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
66MODULE_LICENSE("GPL");
67MODULE_VERSION(DRV_MODULE_VERSION);
68MODULE_FIRMWARE(FW_FILE_NAME_E1);
69MODULE_FIRMWARE(FW_FILE_NAME_E1H);
70
71static int multi_mode = 1;
72module_param(multi_mode, int, 0);
73MODULE_PARM_DESC(multi_mode, " Multi queue mode "
74			     "(0 Disable; 1 Enable (default))");
75
76static int num_queues;
77module_param(num_queues, int, 0);
78MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
79				" (default is as a number of CPUs)");
80
81static int disable_tpa;
82module_param(disable_tpa, int, 0);
83MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
84
85static int int_mode;
86module_param(int_mode, int, 0);
87MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
88				"(1 INT#x; 2 MSI)");
89
90static int dropless_fc;
91module_param(dropless_fc, int, 0);
92MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
93
94static int poll;
95module_param(poll, int, 0);
96MODULE_PARM_DESC(poll, " Use polling (for debug)");
97
98static int mrrs = -1;
99module_param(mrrs, int, 0);
100MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
101
102static int debug;
103module_param(debug, int, 0);
104MODULE_PARM_DESC(debug, " Default debug msglevel");
105
106static struct workqueue_struct *bnx2x_wq;
107
108enum bnx2x_board_type {
109	BCM57710 = 0,
110	BCM57711 = 1,
111	BCM57711E = 2,
112};
113
114/* indexed by board_type, above */
115static struct {
116	char *name;
117} board_info[] __devinitdata = {
118	{ "Broadcom NetXtreme II BCM57710 XGb" },
119	{ "Broadcom NetXtreme II BCM57711 XGb" },
120	{ "Broadcom NetXtreme II BCM57711E XGb" }
121};
122
123
124static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
125	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
126	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
127	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
128	{ 0 }
129};
130
131MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
132
133/****************************************************************************
134* General service functions
135****************************************************************************/
136
137/* used only at init
138 * locking is done by mcp
139 */
140void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
141{
142	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
143	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
144	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
145			       PCICFG_VENDOR_ID_OFFSET);
146}
147
148static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
149{
150	u32 val;
151
152	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
153	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
154	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
155			       PCICFG_VENDOR_ID_OFFSET);
156
157	return val;
158}
159
160const u32 dmae_reg_go_c[] = {
161	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
162	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
163	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
164	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
165};
166
167/* copy command into DMAE command memory and set DMAE command go */
168void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
169{
170	u32 cmd_offset;
171	int i;
172
173	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
174	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
175		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
176
177		DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
178		   idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
179	}
180	REG_WR(bp, dmae_reg_go_c[idx], 1);
181}
182
183void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
184		      u32 len32)
185{
186	struct dmae_command dmae;
187	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
188	int cnt = 200;
189
190	if (!bp->dmae_ready) {
191		u32 *data = bnx2x_sp(bp, wb_data[0]);
192
193		DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
194		   "  using indirect\n", dst_addr, len32);
195		bnx2x_init_ind_wr(bp, dst_addr, data, len32);
196		return;
197	}
198
199	memset(&dmae, 0, sizeof(struct dmae_command));
200
201	dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
202		       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
203		       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
204#ifdef __BIG_ENDIAN
205		       DMAE_CMD_ENDIANITY_B_DW_SWAP |
206#else
207		       DMAE_CMD_ENDIANITY_DW_SWAP |
208#endif
209		       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
210		       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
211	dmae.src_addr_lo = U64_LO(dma_addr);
212	dmae.src_addr_hi = U64_HI(dma_addr);
213	dmae.dst_addr_lo = dst_addr >> 2;
214	dmae.dst_addr_hi = 0;
215	dmae.len = len32;
216	dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
217	dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
218	dmae.comp_val = DMAE_COMP_VAL;
219
220	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
221	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
222		    "dst_addr [%x:%08x (%08x)]\n"
223	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
224	   dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
225	   dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
226	   dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
227	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
228	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
229	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
230
231	mutex_lock(&bp->dmae_mutex);
232
233	*wb_comp = 0;
234
235	bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
236
237	udelay(5);
238
239	while (*wb_comp != DMAE_COMP_VAL) {
240		DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
241
242		if (!cnt) {
243			BNX2X_ERR("DMAE timeout!\n");
244			break;
245		}
246		cnt--;
247		/* adjust delay for emulation/FPGA */
248		if (CHIP_REV_IS_SLOW(bp))
249			msleep(100);
250		else
251			udelay(5);
252	}
253
254	mutex_unlock(&bp->dmae_mutex);
255}
256
257void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
258{
259	struct dmae_command dmae;
260	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
261	int cnt = 200;
262
263	if (!bp->dmae_ready) {
264		u32 *data = bnx2x_sp(bp, wb_data[0]);
265		int i;
266
267		DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
268		   "  using indirect\n", src_addr, len32);
269		for (i = 0; i < len32; i++)
270			data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
271		return;
272	}
273
274	memset(&dmae, 0, sizeof(struct dmae_command));
275
276	dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277		       DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278		       DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280		       DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282		       DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
284		       (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285		       (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
286	dmae.src_addr_lo = src_addr >> 2;
287	dmae.src_addr_hi = 0;
288	dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290	dmae.len = len32;
291	dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292	dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
293	dmae.comp_val = DMAE_COMP_VAL;
294
295	DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
296	   DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
297		    "dst_addr [%x:%08x (%08x)]\n"
298	   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
299	   dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
300	   dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
301	   dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
302
303	mutex_lock(&bp->dmae_mutex);
304
305	memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
306	*wb_comp = 0;
307
308	bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
309
310	udelay(5);
311
312	while (*wb_comp != DMAE_COMP_VAL) {
313
314		if (!cnt) {
315			BNX2X_ERR("DMAE timeout!\n");
316			break;
317		}
318		cnt--;
319		/* adjust delay for emulation/FPGA */
320		if (CHIP_REV_IS_SLOW(bp))
321			msleep(100);
322		else
323			udelay(5);
324	}
325	DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
326	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
327	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
328
329	mutex_unlock(&bp->dmae_mutex);
330}
331
332void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
333			       u32 addr, u32 len)
334{
335	int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
336	int offset = 0;
337
338	while (len > dmae_wr_max) {
339		bnx2x_write_dmae(bp, phys_addr + offset,
340				 addr + offset, dmae_wr_max);
341		offset += dmae_wr_max * 4;
342		len -= dmae_wr_max;
343	}
344
345	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
346}
347
348/* used only for slowpath so not inlined */
349static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
350{
351	u32 wb_write[2];
352
353	wb_write[0] = val_hi;
354	wb_write[1] = val_lo;
355	REG_WR_DMAE(bp, reg, wb_write, 2);
356}
357
358#ifdef USE_WB_RD
359static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
360{
361	u32 wb_data[2];
362
363	REG_RD_DMAE(bp, reg, wb_data, 2);
364
365	return HILO_U64(wb_data[0], wb_data[1]);
366}
367#endif
368
369static int bnx2x_mc_assert(struct bnx2x *bp)
370{
371	char last_idx;
372	int i, rc = 0;
373	u32 row0, row1, row2, row3;
374
375	/* XSTORM */
376	last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377			   XSTORM_ASSERT_LIST_INDEX_OFFSET);
378	if (last_idx)
379		BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
380
381	/* print the asserts */
382	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
383
384		row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385			      XSTORM_ASSERT_LIST_OFFSET(i));
386		row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387			      XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388		row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389			      XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390		row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391			      XSTORM_ASSERT_LIST_OFFSET(i) + 12);
392
393		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394			BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395				  " 0x%08x 0x%08x 0x%08x\n",
396				  i, row3, row2, row1, row0);
397			rc++;
398		} else {
399			break;
400		}
401	}
402
403	/* TSTORM */
404	last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405			   TSTORM_ASSERT_LIST_INDEX_OFFSET);
406	if (last_idx)
407		BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
408
409	/* print the asserts */
410	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
411
412		row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413			      TSTORM_ASSERT_LIST_OFFSET(i));
414		row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415			      TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416		row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417			      TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418		row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419			      TSTORM_ASSERT_LIST_OFFSET(i) + 12);
420
421		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422			BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423				  " 0x%08x 0x%08x 0x%08x\n",
424				  i, row3, row2, row1, row0);
425			rc++;
426		} else {
427			break;
428		}
429	}
430
431	/* CSTORM */
432	last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433			   CSTORM_ASSERT_LIST_INDEX_OFFSET);
434	if (last_idx)
435		BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
436
437	/* print the asserts */
438	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
439
440		row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441			      CSTORM_ASSERT_LIST_OFFSET(i));
442		row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443			      CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444		row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445			      CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446		row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447			      CSTORM_ASSERT_LIST_OFFSET(i) + 12);
448
449		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450			BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451				  " 0x%08x 0x%08x 0x%08x\n",
452				  i, row3, row2, row1, row0);
453			rc++;
454		} else {
455			break;
456		}
457	}
458
459	/* USTORM */
460	last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461			   USTORM_ASSERT_LIST_INDEX_OFFSET);
462	if (last_idx)
463		BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
464
465	/* print the asserts */
466	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
467
468		row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469			      USTORM_ASSERT_LIST_OFFSET(i));
470		row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471			      USTORM_ASSERT_LIST_OFFSET(i) + 4);
472		row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473			      USTORM_ASSERT_LIST_OFFSET(i) + 8);
474		row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475			      USTORM_ASSERT_LIST_OFFSET(i) + 12);
476
477		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478			BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479				  " 0x%08x 0x%08x 0x%08x\n",
480				  i, row3, row2, row1, row0);
481			rc++;
482		} else {
483			break;
484		}
485	}
486
487	return rc;
488}
489
490static void bnx2x_fw_dump(struct bnx2x *bp)
491{
492	u32 addr;
493	u32 mark, offset;
494	__be32 data[9];
495	int word;
496
497	if (BP_NOMCP(bp)) {
498		BNX2X_ERR("NO MCP - can not dump\n");
499		return;
500	}
501
502	addr = bp->common.shmem_base - 0x0800 + 4;
503	mark = REG_RD(bp, addr);
504	mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
505	pr_err("begin fw dump (mark 0x%x)\n", mark);
506
507	pr_err("");
508	for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
509		for (word = 0; word < 8; word++)
510			data[word] = htonl(REG_RD(bp, offset + 4*word));
511		data[8] = 0x0;
512		pr_cont("%s", (char *)data);
513	}
514	for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
515		for (word = 0; word < 8; word++)
516			data[word] = htonl(REG_RD(bp, offset + 4*word));
517		data[8] = 0x0;
518		pr_cont("%s", (char *)data);
519	}
520	pr_err("end of fw dump\n");
521}
522
523void bnx2x_panic_dump(struct bnx2x *bp)
524{
525	int i;
526	u16 j, start, end;
527
528	bp->stats_state = STATS_STATE_DISABLED;
529	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
530
531	BNX2X_ERR("begin crash dump -----------------\n");
532
533	/* Indices */
534	/* Common */
535	BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
536		  "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
537		  "  spq_prod_idx(0x%x)\n",
538		  bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
539		  bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
540
541	/* Rx */
542	for_each_queue(bp, i) {
543		struct bnx2x_fastpath *fp = &bp->fp[i];
544
545		BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
546			  "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
547			  "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
548			  i, fp->rx_bd_prod, fp->rx_bd_cons,
549			  le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
550			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
551		BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
552			  "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
553			  fp->rx_sge_prod, fp->last_max_sge,
554			  le16_to_cpu(fp->fp_u_idx),
555			  fp->status_blk->u_status_block.status_block_index);
556	}
557
558	/* Tx */
559	for_each_queue(bp, i) {
560		struct bnx2x_fastpath *fp = &bp->fp[i];
561
562		BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
563			  "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
564			  "  *tx_cons_sb(0x%x)\n",
565			  i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
566			  fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
567		BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
568			  "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
569			  fp->status_blk->c_status_block.status_block_index,
570			  fp->tx_db.data.prod);
571	}
572
573	/* Rings */
574	/* Rx */
575	for_each_queue(bp, i) {
576		struct bnx2x_fastpath *fp = &bp->fp[i];
577
578		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
579		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
580		for (j = start; j != end; j = RX_BD(j + 1)) {
581			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
582			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
583
584			BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
585				  i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
586		}
587
588		start = RX_SGE(fp->rx_sge_prod);
589		end = RX_SGE(fp->last_max_sge);
590		for (j = start; j != end; j = RX_SGE(j + 1)) {
591			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
592			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
593
594			BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
595				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
596		}
597
598		start = RCQ_BD(fp->rx_comp_cons - 10);
599		end = RCQ_BD(fp->rx_comp_cons + 503);
600		for (j = start; j != end; j = RCQ_BD(j + 1)) {
601			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
602
603			BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
604				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
605		}
606	}
607
608	/* Tx */
609	for_each_queue(bp, i) {
610		struct bnx2x_fastpath *fp = &bp->fp[i];
611
612		start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
613		end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
614		for (j = start; j != end; j = TX_BD(j + 1)) {
615			struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
616
617			BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
618				  i, j, sw_bd->skb, sw_bd->first_bd);
619		}
620
621		start = TX_BD(fp->tx_bd_cons - 10);
622		end = TX_BD(fp->tx_bd_cons + 254);
623		for (j = start; j != end; j = TX_BD(j + 1)) {
624			u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
625
626			BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
627				  i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
628		}
629	}
630
631	bnx2x_fw_dump(bp);
632	bnx2x_mc_assert(bp);
633	BNX2X_ERR("end crash dump -----------------\n");
634}
635
636void bnx2x_int_enable(struct bnx2x *bp)
637{
638	int port = BP_PORT(bp);
639	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
640	u32 val = REG_RD(bp, addr);
641	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
642	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
643
644	if (msix) {
645		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646			 HC_CONFIG_0_REG_INT_LINE_EN_0);
647		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
648			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649	} else if (msi) {
650		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
651		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
652			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
653			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654	} else {
655		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
657			HC_CONFIG_0_REG_INT_LINE_EN_0 |
658			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
659
660		DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
661		   val, port, addr);
662
663		REG_WR(bp, addr, val);
664
665		val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
666	}
667
668	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
669	   val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
670
671	REG_WR(bp, addr, val);
672	/*
673	 * Ensure that HC_CONFIG is written before leading/trailing edge config
674	 */
675	mmiowb();
676	barrier();
677
678	if (CHIP_IS_E1H(bp)) {
679		/* init leading/trailing edge */
680		if (IS_E1HMF(bp)) {
681			val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
682			if (bp->port.pmf)
683				/* enable nig and gpio3 attention */
684				val |= 0x1100;
685		} else
686			val = 0xffff;
687
688		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
689		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
690	}
691
692	/* Make sure that interrupts are indeed enabled from here on */
693	mmiowb();
694}
695
696static void bnx2x_int_disable(struct bnx2x *bp)
697{
698	int port = BP_PORT(bp);
699	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
700	u32 val = REG_RD(bp, addr);
701
702	val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
703		 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
704		 HC_CONFIG_0_REG_INT_LINE_EN_0 |
705		 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
706
707	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
708	   val, port, addr);
709
710	/* flush all outstanding writes */
711	mmiowb();
712
713	REG_WR(bp, addr, val);
714	if (REG_RD(bp, addr) != val)
715		BNX2X_ERR("BUG! proper val not read from IGU!\n");
716}
717
718void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
719{
720	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
721	int i, offset;
722
723	/* disable interrupt handling */
724	atomic_inc(&bp->intr_sem);
725	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
726
727	if (disable_hw)
728		/* prevent the HW from sending interrupts */
729		bnx2x_int_disable(bp);
730
731	/* make sure all ISRs are done */
732	if (msix) {
733		synchronize_irq(bp->msix_table[0].vector);
734		offset = 1;
735#ifdef BCM_CNIC
736		offset++;
737#endif
738		for_each_queue(bp, i)
739			synchronize_irq(bp->msix_table[i + offset].vector);
740	} else
741		synchronize_irq(bp->pdev->irq);
742
743	/* make sure sp_task is not running */
744	cancel_delayed_work(&bp->sp_task);
745	flush_workqueue(bnx2x_wq);
746}
747
748/* fast path */
749
750/*
751 * General service functions
752 */
753
754/* Return true if succeeded to acquire the lock */
755static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
756{
757	u32 lock_status;
758	u32 resource_bit = (1 << resource);
759	int func = BP_FUNC(bp);
760	u32 hw_lock_control_reg;
761
762	DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
763
764	/* Validating that the resource is within range */
765	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
766		DP(NETIF_MSG_HW,
767		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
768		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
769		return -EINVAL;
770	}
771
772	if (func <= 5)
773		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
774	else
775		hw_lock_control_reg =
776				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
777
778	/* Try to acquire the lock */
779	REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
780	lock_status = REG_RD(bp, hw_lock_control_reg);
781	if (lock_status & resource_bit)
782		return true;
783
784	DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
785	return false;
786}
787
788
789#ifdef BCM_CNIC
790static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
791#endif
792
793void bnx2x_sp_event(struct bnx2x_fastpath *fp,
794			   union eth_rx_cqe *rr_cqe)
795{
796	struct bnx2x *bp = fp->bp;
797	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
798	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
799
800	DP(BNX2X_MSG_SP,
801	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
802	   fp->index, cid, command, bp->state,
803	   rr_cqe->ramrod_cqe.ramrod_type);
804
805	bp->spq_left++;
806
807	if (fp->index) {
808		switch (command | fp->state) {
809		case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
810						BNX2X_FP_STATE_OPENING):
811			DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
812			   cid);
813			fp->state = BNX2X_FP_STATE_OPEN;
814			break;
815
816		case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
817			DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
818			   cid);
819			fp->state = BNX2X_FP_STATE_HALTED;
820			break;
821
822		default:
823			BNX2X_ERR("unexpected MC reply (%d)  "
824				  "fp[%d] state is %x\n",
825				  command, fp->index, fp->state);
826			break;
827		}
828		mb(); /* force bnx2x_wait_ramrod() to see the change */
829		return;
830	}
831
832	switch (command | bp->state) {
833	case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
834		DP(NETIF_MSG_IFUP, "got setup ramrod\n");
835		bp->state = BNX2X_STATE_OPEN;
836		break;
837
838	case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
839		DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
840		bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
841		fp->state = BNX2X_FP_STATE_HALTED;
842		break;
843
844	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
845		DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
846		bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
847		break;
848
849#ifdef BCM_CNIC
850	case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
851		DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
852		bnx2x_cnic_cfc_comp(bp, cid);
853		break;
854#endif
855
856	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
857	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
858		DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
859		bp->set_mac_pending--;
860		smp_wmb();
861		break;
862
863	case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
864		DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
865		bp->set_mac_pending--;
866		smp_wmb();
867		break;
868
869	default:
870		BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
871			  command, bp->state);
872		break;
873	}
874	mb(); /* force bnx2x_wait_ramrod() to see the change */
875}
876
877irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
878{
879	struct bnx2x *bp = netdev_priv(dev_instance);
880	u16 status = bnx2x_ack_int(bp);
881	u16 mask;
882	int i;
883
884	/* Return here if interrupt is shared and it's not for us */
885	if (unlikely(status == 0)) {
886		DP(NETIF_MSG_INTR, "not our interrupt!\n");
887		return IRQ_NONE;
888	}
889	DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
890
891	/* Return here if interrupt is disabled */
892	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
893		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
894		return IRQ_HANDLED;
895	}
896
897#ifdef BNX2X_STOP_ON_ERROR
898	if (unlikely(bp->panic))
899		return IRQ_HANDLED;
900#endif
901
902	for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
903		struct bnx2x_fastpath *fp = &bp->fp[i];
904
905		mask = 0x2 << fp->sb_id;
906		if (status & mask) {
907			/* Handle Rx and Tx according to SB id */
908			prefetch(fp->rx_cons_sb);
909			prefetch(&fp->status_blk->u_status_block.
910						status_block_index);
911			prefetch(fp->tx_cons_sb);
912			prefetch(&fp->status_blk->c_status_block.
913						status_block_index);
914			napi_schedule(&bnx2x_fp(bp, fp->index, napi));
915			status &= ~mask;
916		}
917	}
918
919#ifdef BCM_CNIC
920	mask = 0x2 << CNIC_SB_ID(bp);
921	if (status & (mask | 0x1)) {
922		struct cnic_ops *c_ops = NULL;
923
924		rcu_read_lock();
925		c_ops = rcu_dereference(bp->cnic_ops);
926		if (c_ops)
927			c_ops->cnic_handler(bp->cnic_data, NULL);
928		rcu_read_unlock();
929
930		status &= ~mask;
931	}
932#endif
933
934	if (unlikely(status & 0x1)) {
935		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
936
937		status &= ~0x1;
938		if (!status)
939			return IRQ_HANDLED;
940	}
941
942	if (unlikely(status))
943		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
944		   status);
945
946	return IRQ_HANDLED;
947}
948
949/* end of fast path */
950
951
952/* Link */
953
954/*
955 * General service functions
956 */
957
958int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
959{
960	u32 lock_status;
961	u32 resource_bit = (1 << resource);
962	int func = BP_FUNC(bp);
963	u32 hw_lock_control_reg;
964	int cnt;
965
966	/* Validating that the resource is within range */
967	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
968		DP(NETIF_MSG_HW,
969		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
970		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
971		return -EINVAL;
972	}
973
974	if (func <= 5) {
975		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
976	} else {
977		hw_lock_control_reg =
978				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
979	}
980
981	/* Validating that the resource is not already taken */
982	lock_status = REG_RD(bp, hw_lock_control_reg);
983	if (lock_status & resource_bit) {
984		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
985		   lock_status, resource_bit);
986		return -EEXIST;
987	}
988
989	/* Try for 5 second every 5ms */
990	for (cnt = 0; cnt < 1000; cnt++) {
991		/* Try to acquire the lock */
992		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
993		lock_status = REG_RD(bp, hw_lock_control_reg);
994		if (lock_status & resource_bit)
995			return 0;
996
997		msleep(5);
998	}
999	DP(NETIF_MSG_HW, "Timeout\n");
1000	return -EAGAIN;
1001}
1002
1003int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1004{
1005	u32 lock_status;
1006	u32 resource_bit = (1 << resource);
1007	int func = BP_FUNC(bp);
1008	u32 hw_lock_control_reg;
1009
1010	DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1011
1012	/* Validating that the resource is within range */
1013	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1014		DP(NETIF_MSG_HW,
1015		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1016		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
1017		return -EINVAL;
1018	}
1019
1020	if (func <= 5) {
1021		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1022	} else {
1023		hw_lock_control_reg =
1024				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1025	}
1026
1027	/* Validating that the resource is currently taken */
1028	lock_status = REG_RD(bp, hw_lock_control_reg);
1029	if (!(lock_status & resource_bit)) {
1030		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1031		   lock_status, resource_bit);
1032		return -EFAULT;
1033	}
1034
1035	REG_WR(bp, hw_lock_control_reg, resource_bit);
1036	return 0;
1037}
1038
1039
1040int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1041{
1042	/* The GPIO should be swapped if swap register is set and active */
1043	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1044			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1045	int gpio_shift = gpio_num +
1046			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1047	u32 gpio_mask = (1 << gpio_shift);
1048	u32 gpio_reg;
1049	int value;
1050
1051	if (gpio_num > MISC_REGISTERS_GPIO_3) {
1052		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1053		return -EINVAL;
1054	}
1055
1056	/* read GPIO value */
1057	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1058
1059	/* get the requested pin value */
1060	if ((gpio_reg & gpio_mask) == gpio_mask)
1061		value = 1;
1062	else
1063		value = 0;
1064
1065	DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1066
1067	return value;
1068}
1069
1070int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1071{
1072	/* The GPIO should be swapped if swap register is set and active */
1073	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1074			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1075	int gpio_shift = gpio_num +
1076			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1077	u32 gpio_mask = (1 << gpio_shift);
1078	u32 gpio_reg;
1079
1080	if (gpio_num > MISC_REGISTERS_GPIO_3) {
1081		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1082		return -EINVAL;
1083	}
1084
1085	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1086	/* read GPIO and mask except the float bits */
1087	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1088
1089	switch (mode) {
1090	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1091		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1092		   gpio_num, gpio_shift);
1093		/* clear FLOAT and set CLR */
1094		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1095		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1096		break;
1097
1098	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1099		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1100		   gpio_num, gpio_shift);
1101		/* clear FLOAT and set SET */
1102		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1103		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1104		break;
1105
1106	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1107		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1108		   gpio_num, gpio_shift);
1109		/* set FLOAT */
1110		gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1111		break;
1112
1113	default:
1114		break;
1115	}
1116
1117	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1118	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1119
1120	return 0;
1121}
1122
1123int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1124{
1125	/* The GPIO should be swapped if swap register is set and active */
1126	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1127			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1128	int gpio_shift = gpio_num +
1129			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1130	u32 gpio_mask = (1 << gpio_shift);
1131	u32 gpio_reg;
1132
1133	if (gpio_num > MISC_REGISTERS_GPIO_3) {
1134		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1135		return -EINVAL;
1136	}
1137
1138	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1139	/* read GPIO int */
1140	gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1141
1142	switch (mode) {
1143	case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1144		DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1145				   "output low\n", gpio_num, gpio_shift);
1146		/* clear SET and set CLR */
1147		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1148		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1149		break;
1150
1151	case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1152		DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1153				   "output high\n", gpio_num, gpio_shift);
1154		/* clear CLR and set SET */
1155		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1156		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1157		break;
1158
1159	default:
1160		break;
1161	}
1162
1163	REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1164	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1165
1166	return 0;
1167}
1168
1169static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1170{
1171	u32 spio_mask = (1 << spio_num);
1172	u32 spio_reg;
1173
1174	if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1175	    (spio_num > MISC_REGISTERS_SPIO_7)) {
1176		BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1177		return -EINVAL;
1178	}
1179
1180	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1181	/* read SPIO and mask except the float bits */
1182	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1183
1184	switch (mode) {
1185	case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1186		DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1187		/* clear FLOAT and set CLR */
1188		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1189		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1190		break;
1191
1192	case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1193		DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1194		/* clear FLOAT and set SET */
1195		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1196		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1197		break;
1198
1199	case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1200		DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1201		/* set FLOAT */
1202		spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1203		break;
1204
1205	default:
1206		break;
1207	}
1208
1209	REG_WR(bp, MISC_REG_SPIO, spio_reg);
1210	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1211
1212	return 0;
1213}
1214
1215void bnx2x_calc_fc_adv(struct bnx2x *bp)
1216{
1217	switch (bp->link_vars.ieee_fc &
1218		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1219	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1220		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1221					  ADVERTISED_Pause);
1222		break;
1223
1224	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1225		bp->port.advertising |= (ADVERTISED_Asym_Pause |
1226					 ADVERTISED_Pause);
1227		break;
1228
1229	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1230		bp->port.advertising |= ADVERTISED_Asym_Pause;
1231		break;
1232
1233	default:
1234		bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1235					  ADVERTISED_Pause);
1236		break;
1237	}
1238}
1239
1240
1241u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1242{
1243	if (!BP_NOMCP(bp)) {
1244		u8 rc;
1245
1246		/* Initialize link parameters structure variables */
1247		/* It is recommended to turn off RX FC for jumbo frames
1248		   for better performance */
1249		if (bp->dev->mtu > 5000)
1250			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1251		else
1252			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1253
1254		bnx2x_acquire_phy_lock(bp);
1255
1256		if (load_mode == LOAD_DIAG)
1257			bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
1258
1259		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1260
1261		bnx2x_release_phy_lock(bp);
1262
1263		bnx2x_calc_fc_adv(bp);
1264
1265		if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1266			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1267			bnx2x_link_report(bp);
1268		}
1269
1270		return rc;
1271	}
1272	BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1273	return -EINVAL;
1274}
1275
1276void bnx2x_link_set(struct bnx2x *bp)
1277{
1278	if (!BP_NOMCP(bp)) {
1279		bnx2x_acquire_phy_lock(bp);
1280		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1281		bnx2x_release_phy_lock(bp);
1282
1283		bnx2x_calc_fc_adv(bp);
1284	} else
1285		BNX2X_ERR("Bootcode is missing - can not set link\n");
1286}
1287
1288static void bnx2x__link_reset(struct bnx2x *bp)
1289{
1290	if (!BP_NOMCP(bp)) {
1291		bnx2x_acquire_phy_lock(bp);
1292		bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1293		bnx2x_release_phy_lock(bp);
1294	} else
1295		BNX2X_ERR("Bootcode is missing - can not reset link\n");
1296}
1297
1298u8 bnx2x_link_test(struct bnx2x *bp)
1299{
1300	u8 rc = 0;
1301
1302	if (!BP_NOMCP(bp)) {
1303		bnx2x_acquire_phy_lock(bp);
1304		rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1305		bnx2x_release_phy_lock(bp);
1306	} else
1307		BNX2X_ERR("Bootcode is missing - can not test link\n");
1308
1309	return rc;
1310}
1311
1312static void bnx2x_init_port_minmax(struct bnx2x *bp)
1313{
1314	u32 r_param = bp->link_vars.line_speed / 8;
1315	u32 fair_periodic_timeout_usec;
1316	u32 t_fair;
1317
1318	memset(&(bp->cmng.rs_vars), 0,
1319	       sizeof(struct rate_shaping_vars_per_port));
1320	memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1321
1322	/* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1323	bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1324
1325	/* this is the threshold below which no timer arming will occur
1326	   1.25 coefficient is for the threshold to be a little bigger
1327	   than the real time, to compensate for timer in-accuracy */
1328	bp->cmng.rs_vars.rs_threshold =
1329				(RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1330
1331	/* resolution of fairness timer */
1332	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1333	/* for 10G it is 1000usec. for 1G it is 10000usec. */
1334	t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1335
1336	/* this is the threshold below which we won't arm the timer anymore */
1337	bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1338
1339	/* we multiply by 1e3/8 to get bytes/msec.
1340	   We don't want the credits to pass a credit
1341	   of the t_fair*FAIR_MEM (algorithm resolution) */
1342	bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1343	/* since each tick is 4 usec */
1344	bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1345}
1346
1347/* Calculates the sum of vn_min_rates.
1348   It's needed for further normalizing of the min_rates.
1349   Returns:
1350     sum of vn_min_rates.
1351       or
1352     0 - if all the min_rates are 0.
1353     In the later case fainess algorithm should be deactivated.
1354     If not all min_rates are zero then those that are zeroes will be set to 1.
1355 */
1356static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1357{
1358	int all_zero = 1;
1359	int port = BP_PORT(bp);
1360	int vn;
1361
1362	bp->vn_weight_sum = 0;
1363	for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1364		int func = 2*vn + port;
1365		u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1366		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1367				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1368
1369		/* Skip hidden vns */
1370		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1371			continue;
1372
1373		/* If min rate is zero - set it to 1 */
1374		if (!vn_min_rate)
1375			vn_min_rate = DEF_MIN_RATE;
1376		else
1377			all_zero = 0;
1378
1379		bp->vn_weight_sum += vn_min_rate;
1380	}
1381
1382	/* ... only if all min rates are zeros - disable fairness */
1383	if (all_zero) {
1384		bp->cmng.flags.cmng_enables &=
1385					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1386		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1387		   "  fairness will be disabled\n");
1388	} else
1389		bp->cmng.flags.cmng_enables |=
1390					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1391}
1392
1393static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1394{
1395	struct rate_shaping_vars_per_vn m_rs_vn;
1396	struct fairness_vars_per_vn m_fair_vn;
1397	u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1398	u16 vn_min_rate, vn_max_rate;
1399	int i;
1400
1401	/* If function is hidden - set min and max to zeroes */
1402	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1403		vn_min_rate = 0;
1404		vn_max_rate = 0;
1405
1406	} else {
1407		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1408				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1409		/* If min rate is zero - set it to 1 */
1410		if (!vn_min_rate)
1411			vn_min_rate = DEF_MIN_RATE;
1412		vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1413				FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1414	}
1415	DP(NETIF_MSG_IFUP,
1416	   "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1417	   func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1418
1419	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1420	memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1421
1422	/* global vn counter - maximal Mbps for this vn */
1423	m_rs_vn.vn_counter.rate = vn_max_rate;
1424
1425	/* quota - number of bytes transmitted in this period */
1426	m_rs_vn.vn_counter.quota =
1427				(vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1428
1429	if (bp->vn_weight_sum) {
1430		/* credit for each period of the fairness algorithm:
1431		   number of bytes in T_FAIR (the vn share the port rate).
1432		   vn_weight_sum should not be larger than 10000, thus
1433		   T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1434		   than zero */
1435		m_fair_vn.vn_credit_delta =
1436			max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1437						   (8 * bp->vn_weight_sum))),
1438			      (bp->cmng.fair_vars.fair_threshold * 2));
1439		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1440		   m_fair_vn.vn_credit_delta);
1441	}
1442
1443	/* Store it to internal memory */
1444	for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1445		REG_WR(bp, BAR_XSTRORM_INTMEM +
1446		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1447		       ((u32 *)(&m_rs_vn))[i]);
1448
1449	for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1450		REG_WR(bp, BAR_XSTRORM_INTMEM +
1451		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1452		       ((u32 *)(&m_fair_vn))[i]);
1453}
1454
1455
1456/* This function is called upon link interrupt */
1457static void bnx2x_link_attn(struct bnx2x *bp)
1458{
1459	u32 prev_link_status = bp->link_vars.link_status;
1460	/* Make sure that we are synced with the current statistics */
1461	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1462
1463	bnx2x_link_update(&bp->link_params, &bp->link_vars);
1464
1465	if (bp->link_vars.link_up) {
1466
1467		/* dropless flow control */
1468		if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1469			int port = BP_PORT(bp);
1470			u32 pause_enabled = 0;
1471
1472			if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1473				pause_enabled = 1;
1474
1475			REG_WR(bp, BAR_USTRORM_INTMEM +
1476			       USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1477			       pause_enabled);
1478		}
1479
1480		if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1481			struct host_port_stats *pstats;
1482
1483			pstats = bnx2x_sp(bp, port_stats);
1484			/* reset old bmac stats */
1485			memset(&(pstats->mac_stx[0]), 0,
1486			       sizeof(struct mac_stx));
1487		}
1488		if (bp->state == BNX2X_STATE_OPEN)
1489			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1490	}
1491
1492	/* indicate link status only if link status actually changed */
1493	if (prev_link_status != bp->link_vars.link_status)
1494		bnx2x_link_report(bp);
1495
1496	if (IS_E1HMF(bp)) {
1497		int port = BP_PORT(bp);
1498		int func;
1499		int vn;
1500
1501		/* Set the attention towards other drivers on the same port */
1502		for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1503			if (vn == BP_E1HVN(bp))
1504				continue;
1505
1506			func = ((vn << 1) | port);
1507			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1508			       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1509		}
1510
1511		if (bp->link_vars.link_up) {
1512			int i;
1513
1514			/* Init rate shaping and fairness contexts */
1515			bnx2x_init_port_minmax(bp);
1516
1517			for (vn = VN_0; vn < E1HVN_MAX; vn++)
1518				bnx2x_init_vn_minmax(bp, 2*vn + port);
1519
1520			/* Store it to internal memory */
1521			for (i = 0;
1522			     i < sizeof(struct cmng_struct_per_port) / 4; i++)
1523				REG_WR(bp, BAR_XSTRORM_INTMEM +
1524				  XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1525				       ((u32 *)(&bp->cmng))[i]);
1526		}
1527	}
1528}
1529
1530void bnx2x__link_status_update(struct bnx2x *bp)
1531{
1532	if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1533		return;
1534
1535	bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1536
1537	if (bp->link_vars.link_up)
1538		bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1539	else
1540		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1541
1542	bnx2x_calc_vn_weight_sum(bp);
1543
1544	/* indicate link status */
1545	bnx2x_link_report(bp);
1546}
1547
1548static void bnx2x_pmf_update(struct bnx2x *bp)
1549{
1550	int port = BP_PORT(bp);
1551	u32 val;
1552
1553	bp->port.pmf = 1;
1554	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1555
1556	/* enable nig attention */
1557	val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1558	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1559	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1560
1561	bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1562}
1563
1564/* end of Link */
1565
1566/* slow path */
1567
1568/*
1569 * General service functions
1570 */
1571
1572/* send the MCP a request, block until there is a reply */
1573u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
1574{
1575	int func = BP_FUNC(bp);
1576	u32 seq = ++bp->fw_seq;
1577	u32 rc = 0;
1578	u32 cnt = 1;
1579	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1580
1581	mutex_lock(&bp->fw_mb_mutex);
1582	SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1583	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1584
1585	do {
1586		/* let the FW do it's magic ... */
1587		msleep(delay);
1588
1589		rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1590
1591		/* Give the FW up to 5 second (500*10ms) */
1592	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1593
1594	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1595	   cnt*delay, rc, seq);
1596
1597	/* is this a reply to our command? */
1598	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1599		rc &= FW_MSG_CODE_MASK;
1600	else {
1601		/* FW BUG! */
1602		BNX2X_ERR("FW failed to respond!\n");
1603		bnx2x_fw_dump(bp);
1604		rc = 0;
1605	}
1606	mutex_unlock(&bp->fw_mb_mutex);
1607
1608	return rc;
1609}
1610
1611static void bnx2x_e1h_disable(struct bnx2x *bp)
1612{
1613	int port = BP_PORT(bp);
1614
1615	netif_tx_disable(bp->dev);
1616
1617	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1618
1619	netif_carrier_off(bp->dev);
1620}
1621
1622static void bnx2x_e1h_enable(struct bnx2x *bp)
1623{
1624	int port = BP_PORT(bp);
1625
1626	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1627
1628	/* Tx queue should be only reenabled */
1629	netif_tx_wake_all_queues(bp->dev);
1630
1631	/*
1632	 * Should not call netif_carrier_on since it will be called if the link
1633	 * is up when checking for link state
1634	 */
1635}
1636
1637static void bnx2x_update_min_max(struct bnx2x *bp)
1638{
1639	int port = BP_PORT(bp);
1640	int vn, i;
1641
1642	/* Init rate shaping and fairness contexts */
1643	bnx2x_init_port_minmax(bp);
1644
1645	bnx2x_calc_vn_weight_sum(bp);
1646
1647	for (vn = VN_0; vn < E1HVN_MAX; vn++)
1648		bnx2x_init_vn_minmax(bp, 2*vn + port);
1649
1650	if (bp->port.pmf) {
1651		int func;
1652
1653		/* Set the attention towards other drivers on the same port */
1654		for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1655			if (vn == BP_E1HVN(bp))
1656				continue;
1657
1658			func = ((vn << 1) | port);
1659			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1660			       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1661		}
1662
1663		/* Store it to internal memory */
1664		for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1665			REG_WR(bp, BAR_XSTRORM_INTMEM +
1666			       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1667			       ((u32 *)(&bp->cmng))[i]);
1668	}
1669}
1670
1671static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1672{
1673	DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1674
1675	if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1676
1677		/*
1678		 * This is the only place besides the function initialization
1679		 * where the bp->flags can change so it is done without any
1680		 * locks
1681		 */
1682		if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1683			DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1684			bp->flags |= MF_FUNC_DIS;
1685
1686			bnx2x_e1h_disable(bp);
1687		} else {
1688			DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1689			bp->flags &= ~MF_FUNC_DIS;
1690
1691			bnx2x_e1h_enable(bp);
1692		}
1693		dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1694	}
1695	if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1696
1697		bnx2x_update_min_max(bp);
1698		dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1699	}
1700
1701	/* Report results to MCP */
1702	if (dcc_event)
1703		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
1704	else
1705		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
1706}
1707
1708/* must be called under the spq lock */
1709static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1710{
1711	struct eth_spe *next_spe = bp->spq_prod_bd;
1712
1713	if (bp->spq_prod_bd == bp->spq_last_bd) {
1714		bp->spq_prod_bd = bp->spq;
1715		bp->spq_prod_idx = 0;
1716		DP(NETIF_MSG_TIMER, "end of spq\n");
1717	} else {
1718		bp->spq_prod_bd++;
1719		bp->spq_prod_idx++;
1720	}
1721	return next_spe;
1722}
1723
1724/* must be called under the spq lock */
1725static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1726{
1727	int func = BP_FUNC(bp);
1728
1729	/* Make sure that BD data is updated before writing the producer */
1730	wmb();
1731
1732	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1733	       bp->spq_prod_idx);
1734	mmiowb();
1735}
1736
1737/* the slow path queue is odd since completions arrive on the fastpath ring */
1738int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1739			 u32 data_hi, u32 data_lo, int common)
1740{
1741	struct eth_spe *spe;
1742
1743#ifdef BNX2X_STOP_ON_ERROR
1744	if (unlikely(bp->panic))
1745		return -EIO;
1746#endif
1747
1748	spin_lock_bh(&bp->spq_lock);
1749
1750	if (!bp->spq_left) {
1751		BNX2X_ERR("BUG! SPQ ring full!\n");
1752		spin_unlock_bh(&bp->spq_lock);
1753		bnx2x_panic();
1754		return -EBUSY;
1755	}
1756
1757	spe = bnx2x_sp_get_next(bp);
1758
1759	/* CID needs port number to be encoded int it */
1760	spe->hdr.conn_and_cmd_data =
1761			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1762				    HW_CID(bp, cid));
1763	spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1764	if (common)
1765		spe->hdr.type |=
1766			cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1767
1768	spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1769	spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1770
1771	bp->spq_left--;
1772
1773	DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1774	   "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
1775	   bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1776	   (u32)(U64_LO(bp->spq_mapping) +
1777	   (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1778	   HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1779
1780	bnx2x_sp_prod_update(bp);
1781	spin_unlock_bh(&bp->spq_lock);
1782	return 0;
1783}
1784
1785/* acquire split MCP access lock register */
1786static int bnx2x_acquire_alr(struct bnx2x *bp)
1787{
1788	u32 j, val;
1789	int rc = 0;
1790
1791	might_sleep();
1792	for (j = 0; j < 1000; j++) {
1793		val = (1UL << 31);
1794		REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1795		val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1796		if (val & (1L << 31))
1797			break;
1798
1799		msleep(5);
1800	}
1801	if (!(val & (1L << 31))) {
1802		BNX2X_ERR("Cannot acquire MCP access lock register\n");
1803		rc = -EBUSY;
1804	}
1805
1806	return rc;
1807}
1808
1809/* release split MCP access lock register */
1810static void bnx2x_release_alr(struct bnx2x *bp)
1811{
1812	REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1813}
1814
1815static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1816{
1817	struct host_def_status_block *def_sb = bp->def_status_blk;
1818	u16 rc = 0;
1819
1820	barrier(); /* status block is written to by the chip */
1821	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1822		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1823		rc |= 1;
1824	}
1825	if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1826		bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1827		rc |= 2;
1828	}
1829	if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1830		bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1831		rc |= 4;
1832	}
1833	if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1834		bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1835		rc |= 8;
1836	}
1837	if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1838		bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1839		rc |= 16;
1840	}
1841	return rc;
1842}
1843
1844/*
1845 * slow path service functions
1846 */
1847
1848static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1849{
1850	int port = BP_PORT(bp);
1851	u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1852		       COMMAND_REG_ATTN_BITS_SET);
1853	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1854			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
1855	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1856				       NIG_REG_MASK_INTERRUPT_PORT0;
1857	u32 aeu_mask;
1858	u32 nig_mask = 0;
1859
1860	if (bp->attn_state & asserted)
1861		BNX2X_ERR("IGU ERROR\n");
1862
1863	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1864	aeu_mask = REG_RD(bp, aeu_addr);
1865
1866	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
1867	   aeu_mask, asserted);
1868	aeu_mask &= ~(asserted & 0x3ff);
1869	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1870
1871	REG_WR(bp, aeu_addr, aeu_mask);
1872	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1873
1874	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1875	bp->attn_state |= asserted;
1876	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1877
1878	if (asserted & ATTN_HARD_WIRED_MASK) {
1879		if (asserted & ATTN_NIG_FOR_FUNC) {
1880
1881			bnx2x_acquire_phy_lock(bp);
1882
1883			/* save nig interrupt mask */
1884			nig_mask = REG_RD(bp, nig_int_mask_addr);
1885			REG_WR(bp, nig_int_mask_addr, 0);
1886
1887			bnx2x_link_attn(bp);
1888
1889			/* handle unicore attn? */
1890		}
1891		if (asserted & ATTN_SW_TIMER_4_FUNC)
1892			DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1893
1894		if (asserted & GPIO_2_FUNC)
1895			DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1896
1897		if (asserted & GPIO_3_FUNC)
1898			DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1899
1900		if (asserted & GPIO_4_FUNC)
1901			DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1902
1903		if (port == 0) {
1904			if (asserted & ATTN_GENERAL_ATTN_1) {
1905				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1906				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1907			}
1908			if (asserted & ATTN_GENERAL_ATTN_2) {
1909				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1910				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1911			}
1912			if (asserted & ATTN_GENERAL_ATTN_3) {
1913				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1914				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1915			}
1916		} else {
1917			if (asserted & ATTN_GENERAL_ATTN_4) {
1918				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1919				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1920			}
1921			if (asserted & ATTN_GENERAL_ATTN_5) {
1922				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1923				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1924			}
1925			if (asserted & ATTN_GENERAL_ATTN_6) {
1926				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1927				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1928			}
1929		}
1930
1931	} /* if hardwired */
1932
1933	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1934	   asserted, hc_addr);
1935	REG_WR(bp, hc_addr, asserted);
1936
1937	/* now set back the mask */
1938	if (asserted & ATTN_NIG_FOR_FUNC) {
1939		REG_WR(bp, nig_int_mask_addr, nig_mask);
1940		bnx2x_release_phy_lock(bp);
1941	}
1942}
1943
1944static inline void bnx2x_fan_failure(struct bnx2x *bp)
1945{
1946	int port = BP_PORT(bp);
1947
1948	/* mark the failure */
1949	bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
1950	bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1951	SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
1952		 bp->link_params.ext_phy_config);
1953
1954	/* log the failure */
1955	netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
1956	       " the driver to shutdown the card to prevent permanent"
1957	       " damage.  Please contact OEM Support for assistance\n");
1958}
1959
1960static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
1961{
1962	int port = BP_PORT(bp);
1963	int reg_offset;
1964	u32 val, swap_val, swap_override;
1965
1966	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1967			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1968
1969	if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1970
1971		val = REG_RD(bp, reg_offset);
1972		val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1973		REG_WR(bp, reg_offset, val);
1974
1975		BNX2X_ERR("SPIO5 hw attention\n");
1976
1977		/* Fan failure attention */
1978		switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
1979		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
1980			/* Low power mode is controlled by GPIO 2 */
1981			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1982				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1983			/* The PHY reset is controlled by GPIO 1 */
1984			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1985				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1986			break;
1987
1988		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
1989			/* The PHY reset is controlled by GPIO 1 */
1990			/* fake the port number to cancel the swap done in
1991			   set_gpio() */
1992			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
1993			swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
1994			port = (swap_val && swap_override) ^ 1;
1995			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1996				       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
1997			break;
1998
1999		default:
2000			break;
2001		}
2002		bnx2x_fan_failure(bp);
2003	}
2004
2005	if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2006		    AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2007		bnx2x_acquire_phy_lock(bp);
2008		bnx2x_handle_module_detect_int(&bp->link_params);
2009		bnx2x_release_phy_lock(bp);
2010	}
2011
2012	if (attn & HW_INTERRUT_ASSERT_SET_0) {
2013
2014		val = REG_RD(bp, reg_offset);
2015		val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2016		REG_WR(bp, reg_offset, val);
2017
2018		BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2019			  (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2020		bnx2x_panic();
2021	}
2022}
2023
2024static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2025{
2026	u32 val;
2027
2028	if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2029
2030		val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2031		BNX2X_ERR("DB hw attention 0x%x\n", val);
2032		/* DORQ discard attention */
2033		if (val & 0x2)
2034			BNX2X_ERR("FATAL error from DORQ\n");
2035	}
2036
2037	if (attn & HW_INTERRUT_ASSERT_SET_1) {
2038
2039		int port = BP_PORT(bp);
2040		int reg_offset;
2041
2042		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2043				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2044
2045		val = REG_RD(bp, reg_offset);
2046		val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2047		REG_WR(bp, reg_offset, val);
2048
2049		BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2050			  (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2051		bnx2x_panic();
2052	}
2053}
2054
2055static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2056{
2057	u32 val;
2058
2059	if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2060
2061		val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2062		BNX2X_ERR("CFC hw attention 0x%x\n", val);
2063		/* CFC error attention */
2064		if (val & 0x2)
2065			BNX2X_ERR("FATAL error from CFC\n");
2066	}
2067
2068	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2069
2070		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2071		BNX2X_ERR("PXP hw attention 0x%x\n", val);
2072		/* RQ_USDMDP_FIFO_OVERFLOW */
2073		if (val & 0x18000)
2074			BNX2X_ERR("FATAL error from PXP\n");
2075	}
2076
2077	if (attn & HW_INTERRUT_ASSERT_SET_2) {
2078
2079		int port = BP_PORT(bp);
2080		int reg_offset;
2081
2082		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2083				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2084
2085		val = REG_RD(bp, reg_offset);
2086		val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2087		REG_WR(bp, reg_offset, val);
2088
2089		BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2090			  (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2091		bnx2x_panic();
2092	}
2093}
2094
2095static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2096{
2097	u32 val;
2098
2099	if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2100
2101		if (attn & BNX2X_PMF_LINK_ASSERT) {
2102			int func = BP_FUNC(bp);
2103
2104			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2105			bp->mf_config = SHMEM_RD(bp,
2106					   mf_cfg.func_mf_config[func].config);
2107			val = SHMEM_RD(bp, func_mb[func].drv_status);
2108			if (val & DRV_STATUS_DCC_EVENT_MASK)
2109				bnx2x_dcc_event(bp,
2110					    (val & DRV_STATUS_DCC_EVENT_MASK));
2111			bnx2x__link_status_update(bp);
2112			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2113				bnx2x_pmf_update(bp);
2114
2115		} else if (attn & BNX2X_MC_ASSERT_BITS) {
2116
2117			BNX2X_ERR("MC assert!\n");
2118			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2119			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2120			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2121			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2122			bnx2x_panic();
2123
2124		} else if (attn & BNX2X_MCP_ASSERT) {
2125
2126			BNX2X_ERR("MCP assert!\n");
2127			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2128			bnx2x_fw_dump(bp);
2129
2130		} else
2131			BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2132	}
2133
2134	if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2135		BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2136		if (attn & BNX2X_GRC_TIMEOUT) {
2137			val = CHIP_IS_E1H(bp) ?
2138				REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2139			BNX2X_ERR("GRC time-out 0x%08x\n", val);
2140		}
2141		if (attn & BNX2X_GRC_RSV) {
2142			val = CHIP_IS_E1H(bp) ?
2143				REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2144			BNX2X_ERR("GRC reserved 0x%08x\n", val);
2145		}
2146		REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2147	}
2148}
2149
2150#define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
2151#define LOAD_COUNTER_BITS	16 /* Number of bits for load counter */
2152#define LOAD_COUNTER_MASK	(((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2153#define RESET_DONE_FLAG_MASK	(~LOAD_COUNTER_MASK)
2154#define RESET_DONE_FLAG_SHIFT	LOAD_COUNTER_BITS
2155#define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2156/*
2157 * should be run under rtnl lock
2158 */
2159static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2160{
2161	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
2162	val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2163	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2164	barrier();
2165	mmiowb();
2166}
2167
2168/*
2169 * should be run under rtnl lock
2170 */
2171static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2172{
2173	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
2174	val |= (1 << 16);
2175	REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2176	barrier();
2177	mmiowb();
2178}
2179
2180/*
2181 * should be run under rtnl lock
2182 */
2183bool bnx2x_reset_is_done(struct bnx2x *bp)
2184{
2185	u32 val	= REG_RD(bp, BNX2X_MISC_GEN_REG);
2186	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2187	return (val & RESET_DONE_FLAG_MASK) ? false : true;
2188}
2189
2190/*
2191 * should be run under rtnl lock
2192 */
2193inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2194{
2195	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2196
2197	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2198
2199	val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2200	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2201	barrier();
2202	mmiowb();
2203}
2204
2205/*
2206 * should be run under rtnl lock
2207 */
2208u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2209{
2210	u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2211
2212	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2213
2214	val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2215	REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2216	barrier();
2217	mmiowb();
2218
2219	return val1;
2220}
2221
2222/*
2223 * should be run under rtnl lock
2224 */
2225static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2226{
2227	return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2228}
2229
2230static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2231{
2232	u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2233	REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2234}
2235
2236static inline void _print_next_block(int idx, const char *blk)
2237{
2238	if (idx)
2239		pr_cont(", ");
2240	pr_cont("%s", blk);
2241}
2242
2243static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2244{
2245	int i = 0;
2246	u32 cur_bit = 0;
2247	for (i = 0; sig; i++) {
2248		cur_bit = ((u32)0x1 << i);
2249		if (sig & cur_bit) {
2250			switch (cur_bit) {
2251			case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2252				_print_next_block(par_num++, "BRB");
2253				break;
2254			case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2255				_print_next_block(par_num++, "PARSER");
2256				break;
2257			case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2258				_print_next_block(par_num++, "TSDM");
2259				break;
2260			case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2261				_print_next_block(par_num++, "SEARCHER");
2262				break;
2263			case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2264				_print_next_block(par_num++, "TSEMI");
2265				break;
2266			}
2267
2268			/* Clear the bit */
2269			sig &= ~cur_bit;
2270		}
2271	}
2272
2273	return par_num;
2274}
2275
2276static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2277{
2278	int i = 0;
2279	u32 cur_bit = 0;
2280	for (i = 0; sig; i++) {
2281		cur_bit = ((u32)0x1 << i);
2282		if (sig & cur_bit) {
2283			switch (cur_bit) {
2284			case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2285				_print_next_block(par_num++, "PBCLIENT");
2286				break;
2287			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2288				_print_next_block(par_num++, "QM");
2289				break;
2290			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2291				_print_next_block(par_num++, "XSDM");
2292				break;
2293			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2294				_print_next_block(par_num++, "XSEMI");
2295				break;
2296			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2297				_print_next_block(par_num++, "DOORBELLQ");
2298				break;
2299			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2300				_print_next_block(par_num++, "VAUX PCI CORE");
2301				break;
2302			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2303				_print_next_block(par_num++, "DEBUG");
2304				break;
2305			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2306				_print_next_block(par_num++, "USDM");
2307				break;
2308			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2309				_print_next_block(par_num++, "USEMI");
2310				break;
2311			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2312				_print_next_block(par_num++, "UPB");
2313				break;
2314			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2315				_print_next_block(par_num++, "CSDM");
2316				break;
2317			}
2318
2319			/* Clear the bit */
2320			sig &= ~cur_bit;
2321		}
2322	}
2323
2324	return par_num;
2325}
2326
2327static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2328{
2329	int i = 0;
2330	u32 cur_bit = 0;
2331	for (i = 0; sig; i++) {
2332		cur_bit = ((u32)0x1 << i);
2333		if (sig & cur_bit) {
2334			switch (cur_bit) {
2335			case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2336				_print_next_block(par_num++, "CSEMI");
2337				break;
2338			case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2339				_print_next_block(par_num++, "PXP");
2340				break;
2341			case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2342				_print_next_block(par_num++,
2343					"PXPPCICLOCKCLIENT");
2344				break;
2345			case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2346				_print_next_block(par_num++, "CFC");
2347				break;
2348			case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2349				_print_next_block(par_num++, "CDU");
2350				break;
2351			case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2352				_print_next_block(par_num++, "IGU");
2353				break;
2354			case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2355				_print_next_block(par_num++, "MISC");
2356				break;
2357			}
2358
2359			/* Clear the bit */
2360			sig &= ~cur_bit;
2361		}
2362	}
2363
2364	return par_num;
2365}
2366
2367static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2368{
2369	int i = 0;
2370	u32 cur_bit = 0;
2371	for (i = 0; sig; i++) {
2372		cur_bit = ((u32)0x1 << i);
2373		if (sig & cur_bit) {
2374			switch (cur_bit) {
2375			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2376				_print_next_block(par_num++, "MCP ROM");
2377				break;
2378			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2379				_print_next_block(par_num++, "MCP UMP RX");
2380				break;
2381			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2382				_print_next_block(par_num++, "MCP UMP TX");
2383				break;
2384			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2385				_print_next_block(par_num++, "MCP SCPAD");
2386				break;
2387			}
2388
2389			/* Clear the bit */
2390			sig &= ~cur_bit;
2391		}
2392	}
2393
2394	return par_num;
2395}
2396
2397static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2398				     u32 sig2, u32 sig3)
2399{
2400	if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2401	    (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2402		int par_num = 0;
2403		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2404			"[0]:0x%08x [1]:0x%08x "
2405			"[2]:0x%08x [3]:0x%08x\n",
2406			  sig0 & HW_PRTY_ASSERT_SET_0,
2407			  sig1 & HW_PRTY_ASSERT_SET_1,
2408			  sig2 & HW_PRTY_ASSERT_SET_2,
2409			  sig3 & HW_PRTY_ASSERT_SET_3);
2410		printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2411		       bp->dev->name);
2412		par_num = bnx2x_print_blocks_with_parity0(
2413			sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2414		par_num = bnx2x_print_blocks_with_parity1(
2415			sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2416		par_num = bnx2x_print_blocks_with_parity2(
2417			sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2418		par_num = bnx2x_print_blocks_with_parity3(
2419			sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2420		printk("\n");
2421		return true;
2422	} else
2423		return false;
2424}
2425
2426bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2427{
2428	struct attn_route attn;
2429	int port = BP_PORT(bp);
2430
2431	attn.sig[0] = REG_RD(bp,
2432		MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2433			     port*4);
2434	attn.sig[1] = REG_RD(bp,
2435		MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2436			     port*4);
2437	attn.sig[2] = REG_RD(bp,
2438		MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2439			     port*4);
2440	attn.sig[3] = REG_RD(bp,
2441		MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2442			     port*4);
2443
2444	return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2445					attn.sig[3]);
2446}
2447
2448static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2449{
2450	struct attn_route attn, *group_mask;
2451	int port = BP_PORT(bp);
2452	int index;
2453	u32 reg_addr;
2454	u32 val;
2455	u32 aeu_mask;
2456
2457	/* need to take HW lock because MCP or other port might also
2458	   try to handle this event */
2459	bnx2x_acquire_alr(bp);
2460
2461	if (bnx2x_chk_parity_attn(bp)) {
2462		bp->recovery_state = BNX2X_RECOVERY_INIT;
2463		bnx2x_set_reset_in_progress(bp);
2464		schedule_delayed_work(&bp->reset_task, 0);
2465		/* Disable HW interrupts */
2466		bnx2x_int_disable(bp);
2467		bnx2x_release_alr(bp);
2468		/* In case of parity errors don't handle attentions so that
2469		 * other function would "see" parity errors.
2470		 */
2471		return;
2472	}
2473
2474	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2475	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2476	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2477	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2478	DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2479	   attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2480
2481	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2482		if (deasserted & (1 << index)) {
2483			group_mask = &bp->attn_group[index];
2484
2485			DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2486			   index, group_mask->sig[0], group_mask->sig[1],
2487			   group_mask->sig[2], group_mask->sig[3]);
2488
2489			bnx2x_attn_int_deasserted3(bp,
2490					attn.sig[3] & group_mask->sig[3]);
2491			bnx2x_attn_int_deasserted1(bp,
2492					attn.sig[1] & group_mask->sig[1]);
2493			bnx2x_attn_int_deasserted2(bp,
2494					attn.sig[2] & group_mask->sig[2]);
2495			bnx2x_attn_int_deasserted0(bp,
2496					attn.sig[0] & group_mask->sig[0]);
2497		}
2498	}
2499
2500	bnx2x_release_alr(bp);
2501
2502	reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2503
2504	val = ~deasserted;
2505	DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2506	   val, reg_addr);
2507	REG_WR(bp, reg_addr, val);
2508
2509	if (~bp->attn_state & deasserted)
2510		BNX2X_ERR("IGU ERROR\n");
2511
2512	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2513			  MISC_REG_AEU_MASK_ATTN_FUNC_0;
2514
2515	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2516	aeu_mask = REG_RD(bp, reg_addr);
2517
2518	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2519	   aeu_mask, deasserted);
2520	aeu_mask |= (deasserted & 0x3ff);
2521	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2522
2523	REG_WR(bp, reg_addr, aeu_mask);
2524	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2525
2526	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2527	bp->attn_state &= ~deasserted;
2528	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2529}
2530
2531static void bnx2x_attn_int(struct bnx2x *bp)
2532{
2533	/* read local copy of bits */
2534	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2535								attn_bits);
2536	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2537								attn_bits_ack);
2538	u32 attn_state = bp->attn_state;
2539
2540	/* look for changed bits */
2541	u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2542	u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2543
2544	DP(NETIF_MSG_HW,
2545	   "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2546	   attn_bits, attn_ack, asserted, deasserted);
2547
2548	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2549		BNX2X_ERR("BAD attention state\n");
2550
2551	/* handle bits that were raised */
2552	if (asserted)
2553		bnx2x_attn_int_asserted(bp, asserted);
2554
2555	if (deasserted)
2556		bnx2x_attn_int_deasserted(bp, deasserted);
2557}
2558
2559static void bnx2x_sp_task(struct work_struct *work)
2560{
2561	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2562	u16 status;
2563
2564	/* Return here if interrupt is disabled */
2565	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2566		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2567		return;
2568	}
2569
2570	status = bnx2x_update_dsb_idx(bp);
2571/*	if (status == 0)				     */
2572/*		BNX2X_ERR("spurious slowpath interrupt!\n"); */
2573
2574	DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2575
2576	/* HW attentions */
2577	if (status & 0x1) {
2578		bnx2x_attn_int(bp);
2579		status &= ~0x1;
2580	}
2581
2582	/* CStorm events: STAT_QUERY */
2583	if (status & 0x2) {
2584		DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2585		status &= ~0x2;
2586	}
2587
2588	if (unlikely(status))
2589		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2590		   status);
2591
2592	bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2593		     IGU_INT_NOP, 1);
2594	bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2595		     IGU_INT_NOP, 1);
2596	bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2597		     IGU_INT_NOP, 1);
2598	bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2599		     IGU_INT_NOP, 1);
2600	bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2601		     IGU_INT_ENABLE, 1);
2602}
2603
2604irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2605{
2606	struct net_device *dev = dev_instance;
2607	struct bnx2x *bp = netdev_priv(dev);
2608
2609	/* Return here if interrupt is disabled */
2610	if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2611		DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2612		return IRQ_HANDLED;
2613	}
2614
2615	bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2616
2617#ifdef BNX2X_STOP_ON_ERROR
2618	if (unlikely(bp->panic))
2619		return IRQ_HANDLED;
2620#endif
2621
2622#ifdef BCM_CNIC
2623	{
2624		struct cnic_ops *c_ops;
2625
2626		rcu_read_lock();
2627		c_ops = rcu_dereference(bp->cnic_ops);
2628		if (c_ops)
2629			c_ops->cnic_handler(bp->cnic_data, NULL);
2630		rcu_read_unlock();
2631	}
2632#endif
2633	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2634
2635	return IRQ_HANDLED;
2636}
2637
2638/* end of slow path */
2639
2640static void bnx2x_timer(unsigned long data)
2641{
2642	struct bnx2x *bp = (struct bnx2x *) data;
2643
2644	if (!netif_running(bp->dev))
2645		return;
2646
2647	if (atomic_read(&bp->intr_sem) != 0)
2648		goto timer_restart;
2649
2650	if (poll) {
2651		struct bnx2x_fastpath *fp = &bp->fp[0];
2652		int rc;
2653
2654		bnx2x_tx_int(fp);
2655		rc = bnx2x_rx_int(fp, 1000);
2656	}
2657
2658	if (!BP_NOMCP(bp)) {
2659		int func = BP_FUNC(bp);
2660		u32 drv_pulse;
2661		u32 mcp_pulse;
2662
2663		++bp->fw_drv_pulse_wr_seq;
2664		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2665		/* TBD - add SYSTEM_TIME */
2666		drv_pulse = bp->fw_drv_pulse_wr_seq;
2667		SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2668
2669		mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2670			     MCP_PULSE_SEQ_MASK);
2671		/* The delta between driver pulse and mcp response
2672		 * should be 1 (before mcp response) or 0 (after mcp response)
2673		 */
2674		if ((drv_pulse != mcp_pulse) &&
2675		    (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2676			/* someone lost a heartbeat... */
2677			BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2678				  drv_pulse, mcp_pulse);
2679		}
2680	}
2681
2682	if (bp->state == BNX2X_STATE_OPEN)
2683		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2684
2685timer_restart:
2686	mod_timer(&bp->timer, jiffies + bp->current_interval);
2687}
2688
2689/* end of Statistics */
2690
2691/* nic init */
2692
2693/*
2694 * nic init service functions
2695 */
2696
2697static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2698{
2699	int port = BP_PORT(bp);
2700
2701	/* "CSTORM" */
2702	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2703			CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2704			CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2705	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2706			CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2707			CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2708}
2709
2710void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2711			  dma_addr_t mapping, int sb_id)
2712{
2713	int port = BP_PORT(bp);
2714	int func = BP_FUNC(bp);
2715	int index;
2716	u64 section;
2717
2718	/* USTORM */
2719	section = ((u64)mapping) + offsetof(struct host_status_block,
2720					    u_status_block);
2721	sb->u_status_block.status_block_id = sb_id;
2722
2723	REG_WR(bp, BAR_CSTRORM_INTMEM +
2724	       CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2725	REG_WR(bp, BAR_CSTRORM_INTMEM +
2726	       ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2727	       U64_HI(section));
2728	REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2729		CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2730
2731	for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2732		REG_WR16(bp, BAR_CSTRORM_INTMEM +
2733			 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2734
2735	/* CSTORM */
2736	section = ((u64)mapping) + offsetof(struct host_status_block,
2737					    c_status_block);
2738	sb->c_status_block.status_block_id = sb_id;
2739
2740	REG_WR(bp, BAR_CSTRORM_INTMEM +
2741	       CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2742	REG_WR(bp, BAR_CSTRORM_INTMEM +
2743	       ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2744	       U64_HI(section));
2745	REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2746		CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2747
2748	for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2749		REG_WR16(bp, BAR_CSTRORM_INTMEM +
2750			 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2751
2752	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2753}
2754
2755static void bnx2x_zero_def_sb(struct bnx2x *bp)
2756{
2757	int func = BP_FUNC(bp);
2758
2759	bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2760			TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2761			sizeof(struct tstorm_def_status_block)/4);
2762	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2763			CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2764			sizeof(struct cstorm_def_status_block_u)/4);
2765	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2766			CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2767			sizeof(struct cstorm_def_status_block_c)/4);
2768	bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2769			XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2770			sizeof(struct xstorm_def_status_block)/4);
2771}
2772
2773static void bnx2x_init_def_sb(struct bnx2x *bp,
2774			      struct host_def_status_block *def_sb,
2775			      dma_addr_t mapping, int sb_id)
2776{
2777	int port = BP_PORT(bp);
2778	int func = BP_FUNC(bp);
2779	int index, val, reg_offset;
2780	u64 section;
2781
2782	/* ATTN */
2783	section = ((u64)mapping) + offsetof(struct host_def_status_block,
2784					    atten_status_block);
2785	def_sb->atten_status_block.status_block_id = sb_id;
2786
2787	bp->attn_state = 0;
2788
2789	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2790			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2791
2792	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2793		bp->attn_group[index].sig[0] = REG_RD(bp,
2794						     reg_offset + 0x10*index);
2795		bp->attn_group[index].sig[1] = REG_RD(bp,
2796					       reg_offset + 0x4 + 0x10*index);
2797		bp->attn_group[index].sig[2] = REG_RD(bp,
2798					       reg_offset + 0x8 + 0x10*index);
2799		bp->attn_group[index].sig[3] = REG_RD(bp,
2800					       reg_offset + 0xc + 0x10*index);
2801	}
2802
2803	reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2804			     HC_REG_ATTN_MSG0_ADDR_L);
2805
2806	REG_WR(bp, reg_offset, U64_LO(section));
2807	REG_WR(bp, reg_offset + 4, U64_HI(section));
2808
2809	reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2810
2811	val = REG_RD(bp, reg_offset);
2812	val |= sb_id;
2813	REG_WR(bp, reg_offset, val);
2814
2815	/* USTORM */
2816	section = ((u64)mapping) + offsetof(struct host_def_status_block,
2817					    u_def_status_block);
2818	def_sb->u_def_status_block.status_block_id = sb_id;
2819
2820	REG_WR(bp, BAR_CSTRORM_INTMEM +
2821	       CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2822	REG_WR(bp, BAR_CSTRORM_INTMEM +
2823	       ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2824	       U64_HI(section));
2825	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2826		CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2827
2828	for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2829		REG_WR16(bp, BAR_CSTRORM_INTMEM +
2830			 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2831
2832	/* CSTORM */
2833	section = ((u64)mapping) + offsetof(struct host_def_status_block,
2834					    c_def_status_block);
2835	def_sb->c_def_status_block.status_block_id = sb_id;
2836
2837	REG_WR(bp, BAR_CSTRORM_INTMEM +
2838	       CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2839	REG_WR(bp, BAR_CSTRORM_INTMEM +
2840	       ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2841	       U64_HI(section));
2842	REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2843		CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2844
2845	for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2846		REG_WR16(bp, BAR_CSTRORM_INTMEM +
2847			 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2848
2849	/* TSTORM */
2850	section = ((u64)mapping) + offsetof(struct host_def_status_block,
2851					    t_def_status_block);
2852	def_sb->t_def_status_block.status_block_id = sb_id;
2853
2854	REG_WR(bp, BAR_TSTRORM_INTMEM +
2855	       TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2856	REG_WR(bp, BAR_TSTRORM_INTMEM +
2857	       ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2858	       U64_HI(section));
2859	REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2860		TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2861
2862	for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2863		REG_WR16(bp, BAR_TSTRORM_INTMEM +
2864			 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2865
2866	/* XSTORM */
2867	section = ((u64)mapping) + offsetof(struct host_def_status_block,
2868					    x_def_status_block);
2869	def_sb->x_def_status_block.status_block_id = sb_id;
2870
2871	REG_WR(bp, BAR_XSTRORM_INTMEM +
2872	       XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2873	REG_WR(bp, BAR_XSTRORM_INTMEM +
2874	       ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2875	       U64_HI(section));
2876	REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2877		XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2878
2879	for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2880		REG_WR16(bp, BAR_XSTRORM_INTMEM +
2881			 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2882
2883	bp->stats_pending = 0;
2884	bp->set_mac_pending = 0;
2885
2886	bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2887}
2888
2889void bnx2x_update_coalesce(struct bnx2x *bp)
2890{
2891	int port = BP_PORT(bp);
2892	int i;
2893
2894	for_each_queue(bp, i) {
2895		int sb_id = bp->fp[i].sb_id;
2896
2897		/* HC_INDEX_U_ETH_RX_CQ_CONS */
2898		REG_WR8(bp, BAR_CSTRORM_INTMEM +
2899			CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2900						      U_SB_ETH_RX_CQ_INDEX),
2901			bp->rx_ticks/(4 * BNX2X_BTR));
2902		REG_WR16(bp, BAR_CSTRORM_INTMEM +
2903			 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2904						       U_SB_ETH_RX_CQ_INDEX),
2905			 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2906
2907		/* HC_INDEX_C_ETH_TX_CQ_CONS */
2908		REG_WR8(bp, BAR_CSTRORM_INTMEM +
2909			CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2910						      C_SB_ETH_TX_CQ_INDEX),
2911			bp->tx_ticks/(4 * BNX2X_BTR));
2912		REG_WR16(bp, BAR_CSTRORM_INTMEM +
2913			 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2914						       C_SB_ETH_TX_CQ_INDEX),
2915			 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2916	}
2917}
2918
2919static void bnx2x_init_sp_ring(struct bnx2x *bp)
2920{
2921	int func = BP_FUNC(bp);
2922
2923	spin_lock_init(&bp->spq_lock);
2924
2925	bp->spq_left = MAX_SPQ_PENDING;
2926	bp->spq_prod_idx = 0;
2927	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2928	bp->spq_prod_bd = bp->spq;
2929	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2930
2931	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2932	       U64_LO(bp->spq_mapping));
2933	REG_WR(bp,
2934	       XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2935	       U64_HI(bp->spq_mapping));
2936
2937	REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2938	       bp->spq_prod_idx);
2939}
2940
2941static void bnx2x_init_context(struct bnx2x *bp)
2942{
2943	int i;
2944
2945	/* Rx */
2946	for_each_queue(bp, i) {
2947		struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2948		struct bnx2x_fastpath *fp = &bp->fp[i];
2949		u8 cl_id = fp->cl_id;
2950
2951		context->ustorm_st_context.common.sb_index_numbers =
2952						BNX2X_RX_SB_INDEX_NUM;
2953		context->ustorm_st_context.common.clientId = cl_id;
2954		context->ustorm_st_context.common.status_block_id = fp->sb_id;
2955		context->ustorm_st_context.common.flags =
2956			(USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2957			 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
2958		context->ustorm_st_context.common.statistics_counter_id =
2959						cl_id;
2960		context->ustorm_st_context.common.mc_alignment_log_size =
2961						BNX2X_RX_ALIGN_SHIFT;
2962		context->ustorm_st_context.common.bd_buff_size =
2963						bp->rx_buf_size;
2964		context->ustorm_st_context.common.bd_page_base_hi =
2965						U64_HI(fp->rx_desc_mapping);
2966		context->ustorm_st_context.common.bd_page_base_lo =
2967						U64_LO(fp->rx_desc_mapping);
2968		if (!fp->disable_tpa) {
2969			context->ustorm_st_context.common.flags |=
2970				USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
2971			context->ustorm_st_context.common.sge_buff_size =
2972				(u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
2973					   0xffff);
2974			context->ustorm_st_context.common.sge_page_base_hi =
2975						U64_HI(fp->rx_sge_mapping);
2976			context->ustorm_st_context.common.sge_page_base_lo =
2977						U64_LO(fp->rx_sge_mapping);
2978
2979			context->ustorm_st_context.common.max_sges_for_packet =
2980				SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
2981			context->ustorm_st_context.common.max_sges_for_packet =
2982				((context->ustorm_st_context.common.
2983				  max_sges_for_packet + PAGES_PER_SGE - 1) &
2984				 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
2985		}
2986
2987		context->ustorm_ag_context.cdu_usage =
2988			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
2989					       CDU_REGION_NUMBER_UCM_AG,
2990					       ETH_CONNECTION_TYPE);
2991
2992		context->xstorm_ag_context.cdu_reserved =
2993			CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
2994					       CDU_REGION_NUMBER_XCM_AG,
2995					       ETH_CONNECTION_TYPE);
2996	}
2997
2998	/* Tx */
2999	for_each_queue(bp, i) {
3000		struct bnx2x_fastpath *fp = &bp->fp[i];
3001		struct eth_context *context =
3002			bnx2x_sp(bp, context[i].eth);
3003
3004		context->cstorm_st_context.sb_index_number =
3005						C_SB_ETH_TX_CQ_INDEX;
3006		context->cstorm_st_context.status_block_id = fp->sb_id;
3007
3008		context->xstorm_st_context.tx_bd_page_base_hi =
3009						U64_HI(fp->tx_desc_mapping);
3010		context->xstorm_st_context.tx_bd_page_base_lo =
3011						U64_LO(fp->tx_desc_mapping);
3012		context->xstorm_st_context.statistics_data = (fp->cl_id |
3013				XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3014	}
3015}
3016
3017static void bnx2x_init_ind_table(struct bnx2x *bp)
3018{
3019	int func = BP_FUNC(bp);
3020	int i;
3021
3022	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3023		return;
3024
3025	DP(NETIF_MSG_IFUP,
3026	   "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
3027	for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3028		REG_WR8(bp, BAR_TSTRORM_INTMEM +
3029			TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3030			bp->fp->cl_id + (i % bp->num_queues));
3031}
3032
3033void bnx2x_set_client_config(struct bnx2x *bp)
3034{
3035	struct tstorm_eth_client_config tstorm_client = {0};
3036	int port = BP_PORT(bp);
3037	int i;
3038
3039	tstorm_client.mtu = bp->dev->mtu;
3040	tstorm_client.config_flags =
3041				(TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3042				 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3043#ifdef BCM_VLAN
3044	if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3045		tstorm_client.config_flags |=
3046				TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3047		DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3048	}
3049#endif
3050
3051	for_each_queue(bp, i) {
3052		tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3053
3054		REG_WR(bp, BAR_TSTRORM_INTMEM +
3055		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3056		       ((u32 *)&tstorm_client)[0]);
3057		REG_WR(bp, BAR_TSTRORM_INTMEM +
3058		       TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3059		       ((u32 *)&tstorm_client)[1]);
3060	}
3061
3062	DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3063	   ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3064}
3065
3066void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3067{
3068	struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3069	int mode = bp->rx_mode;
3070	int mask = bp->rx_mode_cl_mask;
3071	int func = BP_FUNC(bp);
3072	int port = BP_PORT(bp);
3073	int i;
3074	/* All but management unicast packets should pass to the host as well */
3075	u32 llh_mask =
3076		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3077		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3078		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3079		NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3080
3081	DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
3082
3083	switch (mode) {
3084	case BNX2X_RX_MODE_NONE: /* no Rx */
3085		tstorm_mac_filter.ucast_drop_all = mask;
3086		tstorm_mac_filter.mcast_drop_all = mask;
3087		tstorm_mac_filter.bcast_drop_all = mask;
3088		break;
3089
3090	case BNX2X_RX_MODE_NORMAL:
3091		tstorm_mac_filter.bcast_accept_all = mask;
3092		break;
3093
3094	case BNX2X_RX_MODE_ALLMULTI:
3095		tstorm_mac_filter.mcast_accept_all = mask;
3096		tstorm_mac_filter.bcast_accept_all = mask;
3097		break;
3098
3099	case BNX2X_RX_MODE_PROMISC:
3100		tstorm_mac_filter.ucast_accept_all = mask;
3101		tstorm_mac_filter.mcast_accept_all = mask;
3102		tstorm_mac_filter.bcast_accept_all = mask;
3103		/* pass management unicast packets as well */
3104		llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3105		break;
3106
3107	default:
3108		BNX2X_ERR("BAD rx mode (%d)\n", mode);
3109		break;
3110	}
3111
3112	REG_WR(bp,
3113	       (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3114	       llh_mask);
3115
3116	for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3117		REG_WR(bp, BAR_TSTRORM_INTMEM +
3118		       TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3119		       ((u32 *)&tstorm_mac_filter)[i]);
3120
3121/*		DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3122		   ((u32 *)&tstorm_mac_filter)[i]); */
3123	}
3124
3125	if (mode != BNX2X_RX_MODE_NONE)
3126		bnx2x_set_client_config(bp);
3127}
3128
3129static void bnx2x_init_internal_common(struct bnx2x *bp)
3130{
3131	int i;
3132
3133	/* Zero this manually as its initialization is
3134	   currently missing in the initTool */
3135	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3136		REG_WR(bp, BAR_USTRORM_INTMEM +
3137		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
3138}
3139
3140static void bnx2x_init_internal_port(struct bnx2x *bp)
3141{
3142	int port = BP_PORT(bp);
3143
3144	REG_WR(bp,
3145	       BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3146	REG_WR(bp,
3147	       BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3148	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3149	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3150}
3151
3152static void bnx2x_init_internal_func(struct bnx2x *bp)
3153{
3154	struct tstorm_eth_function_common_config tstorm_config = {0};
3155	struct stats_indication_flags stats_flags = {0};
3156	int port = BP_PORT(bp);
3157	int func = BP_FUNC(bp);
3158	int i, j;
3159	u32 offset;
3160	u16 max_agg_size;
3161
3162	tstorm_config.config_flags = RSS_FLAGS(bp);
3163
3164	if (is_multi(bp))
3165		tstorm_config.rss_result_mask = MULTI_MASK;
3166
3167	/* Enable TPA if needed */
3168	if (bp->flags & TPA_ENABLE_FLAG)
3169		tstorm_config.config_flags |=
3170			TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3171
3172	if (IS_E1HMF(bp))
3173		tstorm_config.config_flags |=
3174				TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3175
3176	tstorm_config.leading_client_id = BP_L_ID(bp);
3177
3178	REG_WR(bp, BAR_TSTRORM_INTMEM +
3179	       TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3180	       (*(u32 *)&tstorm_config));
3181
3182	bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3183	bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3184	bnx2x_set_storm_rx_mode(bp);
3185
3186	for_each_queue(bp, i) {
3187		u8 cl_id = bp->fp[i].cl_id;
3188
3189		/* reset xstorm per client statistics */
3190		offset = BAR_XSTRORM_INTMEM +
3191			 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3192		for (j = 0;
3193		     j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3194			REG_WR(bp, offset + j*4, 0);
3195
3196		/* reset tstorm per client statistics */
3197		offset = BAR_TSTRORM_INTMEM +
3198			 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3199		for (j = 0;
3200		     j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3201			REG_WR(bp, offset + j*4, 0);
3202
3203		/* reset ustorm per client statistics */
3204		offset = BAR_USTRORM_INTMEM +
3205			 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3206		for (j = 0;
3207		     j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3208			REG_WR(bp, offset + j*4, 0);
3209	}
3210
3211	/* Init statistics related context */
3212	stats_flags.collect_eth = 1;
3213
3214	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3215	       ((u32 *)&stats_flags)[0]);
3216	REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3217	       ((u32 *)&stats_flags)[1]);
3218
3219	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3220	       ((u32 *)&stats_flags)[0]);
3221	REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3222	       ((u32 *)&stats_flags)[1]);
3223
3224	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3225	       ((u32 *)&stats_flags)[0]);
3226	REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3227	       ((u32 *)&stats_flags)[1]);
3228
3229	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3230	       ((u32 *)&stats_flags)[0]);
3231	REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3232	       ((u32 *)&stats_flags)[1]);
3233
3234	REG_WR(bp, BAR_XSTRORM_INTMEM +
3235	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3236	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3237	REG_WR(bp, BAR_XSTRORM_INTMEM +
3238	       XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3239	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3240
3241	REG_WR(bp, BAR_TSTRORM_INTMEM +
3242	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3243	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3244	REG_WR(bp, BAR_TSTRORM_INTMEM +
3245	       TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3246	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3247
3248	REG_WR(bp, BAR_USTRORM_INTMEM +
3249	       USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3250	       U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3251	REG_WR(bp, BAR_USTRORM_INTMEM +
3252	       USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3253	       U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3254
3255	if (CHIP_IS_E1H(bp)) {
3256		REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3257			IS_E1HMF(bp));
3258		REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3259			IS_E1HMF(bp));
3260		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3261			IS_E1HMF(bp));
3262		REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3263			IS_E1HMF(bp));
3264
3265		REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3266			 bp->e1hov);
3267	}
3268
3269	/* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3270	max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3271				   SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3272	for_each_queue(bp, i) {
3273		struct bnx2x_fastpath *fp = &bp->fp[i];
3274
3275		REG_WR(bp, BAR_USTRORM_INTMEM +
3276		       USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3277		       U64_LO(fp->rx_comp_mapping));
3278		REG_WR(bp, BAR_USTRORM_INTMEM +
3279		       USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3280		       U64_HI(fp->rx_comp_mapping));
3281
3282		/* Next page */
3283		REG_WR(bp, BAR_USTRORM_INTMEM +
3284		       USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3285		       U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3286		REG_WR(bp, BAR_USTRORM_INTMEM +
3287		       USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3288		       U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3289
3290		REG_WR16(bp, BAR_USTRORM_INTMEM +
3291			 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3292			 max_agg_size);
3293	}
3294
3295	/* dropless flow control */
3296	if (CHIP_IS_E1H(bp)) {
3297		struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3298
3299		rx_pause.bd_thr_low = 250;
3300		rx_pause.cqe_thr_low = 250;
3301		rx_pause.cos = 1;
3302		rx_pause.sge_thr_low = 0;
3303		rx_pause.bd_thr_high = 350;
3304		rx_pause.cqe_thr_high = 350;
3305		rx_pause.sge_thr_high = 0;
3306
3307		for_each_queue(bp, i) {
3308			struct bnx2x_fastpath *fp = &bp->fp[i];
3309
3310			if (!fp->disable_tpa) {
3311				rx_pause.sge_thr_low = 150;
3312				rx_pause.sge_thr_high = 250;
3313			}
3314
3315
3316			offset = BAR_USTRORM_INTMEM +
3317				 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3318								   fp->cl_id);
3319			for (j = 0;
3320			     j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3321			     j++)
3322				REG_WR(bp, offset + j*4,
3323				       ((u32 *)&rx_pause)[j]);
3324		}
3325	}
3326
3327	memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3328
3329	/* Init rate shaping and fairness contexts */
3330	if (IS_E1HMF(bp)) {
3331		int vn;
3332
3333		/* During init there is no active link
3334		   Until link is up, set link rate to 10Gbps */
3335		bp->link_vars.line_speed = SPEED_10000;
3336		bnx2x_init_port_minmax(bp);
3337
3338		if (!BP_NOMCP(bp))
3339			bp->mf_config =
3340			      SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3341		bnx2x_calc_vn_weight_sum(bp);
3342
3343		for (vn = VN_0; vn < E1HVN_MAX; vn++)
3344			bnx2x_init_vn_minmax(bp, 2*vn + port);
3345
3346		/* Enable rate shaping and fairness */
3347		bp->cmng.flags.cmng_enables |=
3348					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3349
3350	} else {
3351		/* rate shaping and fairness are disabled */
3352		DP(NETIF_MSG_IFUP,
3353		   "single function mode  minmax will be disabled\n");
3354	}
3355
3356
3357	/* Store cmng structures to internal memory */
3358	if (bp->port.pmf)
3359		for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3360			REG_WR(bp, BAR_XSTRORM_INTMEM +
3361			       XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3362			       ((u32 *)(&bp->cmng))[i]);
3363}
3364
3365static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3366{
3367	switch (load_code) {
3368	case FW_MSG_CODE_DRV_LOAD_COMMON:
3369		bnx2x_init_internal_common(bp);
3370		/* no break */
3371
3372	case FW_MSG_CODE_DRV_LOAD_PORT:
3373		bnx2x_init_internal_port(bp);
3374		/* no break */
3375
3376	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3377		bnx2x_init_internal_func(bp);
3378		break;
3379
3380	default:
3381		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3382		break;
3383	}
3384}
3385
3386void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3387{
3388	int i;
3389
3390	for_each_queue(bp, i) {
3391		struct bnx2x_fastpath *fp = &bp->fp[i];
3392
3393		fp->bp = bp;
3394		fp->state = BNX2X_FP_STATE_CLOSED;
3395		fp->index = i;
3396		fp->cl_id = BP_L_ID(bp) + i;
3397#ifdef BCM_CNIC
3398		fp->sb_id = fp->cl_id + 1;
3399#else
3400		fp->sb_id = fp->cl_id;
3401#endif
3402		DP(NETIF_MSG_IFUP,
3403		   "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
3404		   i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3405		bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3406			      fp->sb_id);
3407		bnx2x_update_fpsb_idx(fp);
3408	}
3409
3410	/* ensure status block indices were read */
3411	rmb();
3412
3413
3414	bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3415			  DEF_SB_ID);
3416	bnx2x_update_dsb_idx(bp);
3417	bnx2x_update_coalesce(bp);
3418	bnx2x_init_rx_rings(bp);
3419	bnx2x_init_tx_ring(bp);
3420	bnx2x_init_sp_ring(bp);
3421	bnx2x_init_context(bp);
3422	bnx2x_init_internal(bp, load_code);
3423	bnx2x_init_ind_table(bp);
3424	bnx2x_stats_init(bp);
3425
3426	/* At this point, we are ready for interrupts */
3427	atomic_set(&bp->intr_sem, 0);
3428
3429	/* flush all before enabling interrupts */
3430	mb();
3431	mmiowb();
3432
3433	bnx2x_int_enable(bp);
3434
3435	/* Check for SPIO5 */
3436	bnx2x_attn_int_deasserted0(bp,
3437		REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3438				   AEU_INPUTS_ATTN_BITS_SPIO5);
3439}
3440
3441/* end of nic init */
3442
3443/*
3444 * gzip service functions
3445 */
3446
3447static int bnx2x_gunzip_init(struct bnx2x *bp)
3448{
3449	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3450					    &bp->gunzip_mapping, GFP_KERNEL);
3451	if (bp->gunzip_buf  == NULL)
3452		goto gunzip_nomem1;
3453
3454	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3455	if (bp->strm  == NULL)
3456		goto gunzip_nomem2;
3457
3458	bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3459				      GFP_KERNEL);
3460	if (bp->strm->workspace == NULL)
3461		goto gunzip_nomem3;
3462
3463	return 0;
3464
3465gunzip_nomem3:
3466	kfree(bp->strm);
3467	bp->strm = NULL;
3468
3469gunzip_nomem2:
3470	dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3471			  bp->gunzip_mapping);
3472	bp->gunzip_buf = NULL;
3473
3474gunzip_nomem1:
3475	netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3476	       " un-compression\n");
3477	return -ENOMEM;
3478}
3479
3480static void bnx2x_gunzip_end(struct bnx2x *bp)
3481{
3482	kfree(bp->strm->workspace);
3483
3484	kfree(bp->strm);
3485	bp->strm = NULL;
3486
3487	if (bp->gunzip_buf) {
3488		dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3489				  bp->gunzip_mapping);
3490		bp->gunzip_buf = NULL;
3491	}
3492}
3493
3494static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3495{
3496	int n, rc;
3497
3498	/* check gzip header */
3499	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3500		BNX2X_ERR("Bad gzip header\n");
3501		return -EINVAL;
3502	}
3503
3504	n = 10;
3505
3506#define FNAME				0x8
3507
3508	if (zbuf[3] & FNAME)
3509		while ((zbuf[n++] != 0) && (n < len));
3510
3511	bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3512	bp->strm->avail_in = len - n;
3513	bp->strm->next_out = bp->gunzip_buf;
3514	bp->strm->avail_out = FW_BUF_SIZE;
3515
3516	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3517	if (rc != Z_OK)
3518		return rc;
3519
3520	rc = zlib_inflate(bp->strm, Z_FINISH);
3521	if ((rc != Z_OK) && (rc != Z_STREAM_END))
3522		netdev_err(bp->dev, "Firmware decompression error: %s\n",
3523			   bp->strm->msg);
3524
3525	bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3526	if (bp->gunzip_outlen & 0x3)
3527		netdev_err(bp->dev, "Firmware decompression error:"
3528				    " gunzip_outlen (%d) not aligned\n",
3529				bp->gunzip_outlen);
3530	bp->gunzip_outlen >>= 2;
3531
3532	zlib_inflateEnd(bp->strm);
3533
3534	if (rc == Z_STREAM_END)
3535		return 0;
3536
3537	return rc;
3538}
3539
3540/* nic load/unload */
3541
3542/*
3543 * General service functions
3544 */
3545
3546/* send a NIG loopback debug packet */
3547static void bnx2x_lb_pckt(struct bnx2x *bp)
3548{
3549	u32 wb_write[3];
3550
3551	/* Ethernet source and destination addresses */
3552	wb_write[0] = 0x55555555;
3553	wb_write[1] = 0x55555555;
3554	wb_write[2] = 0x20;		/* SOP */
3555	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3556
3557	/* NON-IP protocol */
3558	wb_write[0] = 0x09000000;
3559	wb_write[1] = 0x55555555;
3560	wb_write[2] = 0x10;		/* EOP, eop_bvalid = 0 */
3561	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3562}
3563
3564/* some of the internal memories
3565 * are not directly readable from the driver
3566 * to test them we send debug packets
3567 */
3568static int bnx2x_int_mem_test(struct bnx2x *bp)
3569{
3570	int factor;
3571	int count, i;
3572	u32 val = 0;
3573
3574	if (CHIP_REV_IS_FPGA(bp))
3575		factor = 120;
3576	else if (CHIP_REV_IS_EMUL(bp))
3577		factor = 200;
3578	else
3579		factor = 1;
3580
3581	DP(NETIF_MSG_HW, "start part1\n");
3582
3583	/* Disable inputs of parser neighbor blocks */
3584	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3585	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3586	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3587	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3588
3589	/*  Write 0 to parser credits for CFC search request */
3590	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3591
3592	/* send Ethernet packet */
3593	bnx2x_lb_pckt(bp);
3594
3595	/* TODO do i reset NIG statistic? */
3596	/* Wait until NIG register shows 1 packet of size 0x10 */
3597	count = 1000 * factor;
3598	while (count) {
3599
3600		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3601		val = *bnx2x_sp(bp, wb_data[0]);
3602		if (val == 0x10)
3603			break;
3604
3605		msleep(10);
3606		count--;
3607	}
3608	if (val != 0x10) {
3609		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3610		return -1;
3611	}
3612
3613	/* Wait until PRS register shows 1 packet */
3614	count = 1000 * factor;
3615	while (count) {
3616		val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3617		if (val == 1)
3618			break;
3619
3620		msleep(10);
3621		count--;
3622	}
3623	if (val != 0x1) {
3624		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3625		return -2;
3626	}
3627
3628	/* Reset and init BRB, PRS */
3629	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3630	msleep(50);
3631	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3632	msleep(50);
3633	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3634	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3635
3636	DP(NETIF_MSG_HW, "part2\n");
3637
3638	/* Disable inputs of parser neighbor blocks */
3639	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3640	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3641	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3642	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3643
3644	/* Write 0 to parser credits for CFC search request */
3645	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3646
3647	/* send 10 Ethernet packets */
3648	for (i = 0; i < 10; i++)
3649		bnx2x_lb_pckt(bp);
3650
3651	/* Wait until NIG register shows 10 + 1
3652	   packets of size 11*0x10 = 0xb0 */
3653	count = 1000 * factor;
3654	while (count) {
3655
3656		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3657		val = *bnx2x_sp(bp, wb_data[0]);
3658		if (val == 0xb0)
3659			break;
3660
3661		msleep(10);
3662		count--;
3663	}
3664	if (val != 0xb0) {
3665		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3666		return -3;
3667	}
3668
3669	/* Wait until PRS register shows 2 packets */
3670	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3671	if (val != 2)
3672		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3673
3674	/* Write 1 to parser credits for CFC search request */
3675	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3676
3677	/* Wait until PRS register shows 3 packets */
3678	msleep(10 * factor);
3679	/* Wait until NIG register shows 1 packet of size 0x10 */
3680	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3681	if (val != 3)
3682		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3683
3684	/* clear NIG EOP FIFO */
3685	for (i = 0; i < 11; i++)
3686		REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3687	val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3688	if (val != 1) {
3689		BNX2X_ERR("clear of NIG failed\n");
3690		return -4;
3691	}
3692
3693	/* Reset and init BRB, PRS, NIG */
3694	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3695	msleep(50);
3696	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3697	msleep(50);
3698	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3699	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3700#ifndef BCM_CNIC
3701	/* set NIC mode */
3702	REG_WR(bp, PRS_REG_NIC_MODE, 1);
3703#endif
3704
3705	/* Enable inputs of parser neighbor blocks */
3706	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3707	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3708	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3709	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3710
3711	DP(NETIF_MSG_HW, "done\n");
3712
3713	return 0; /* OK */
3714}
3715
3716static void enable_blocks_attention(struct bnx2x *bp)
3717{
3718	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3719	REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3720	REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3721	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3722	REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3723	REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3724	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3725	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3726	REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3727/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3728/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3729	REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3730	REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3731	REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3732/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3733/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3734	REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3735	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3736	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3737	REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3738/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3739/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3740	if (CHIP_REV_IS_FPGA(bp))
3741		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3742	else
3743		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3744	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3745	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3746	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3747/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3748/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3749	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3750	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3751/*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3752	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);		/* bit 3,4 masked */
3753}
3754
3755static const struct {
3756	u32 addr;
3757	u32 mask;
3758} bnx2x_parity_mask[] = {
3759	{PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3760	{PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3761	{PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3762	{HC_REG_HC_PRTY_MASK, 0xffffffff},
3763	{MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3764	{QM_REG_QM_PRTY_MASK, 0x0},
3765	{DORQ_REG_DORQ_PRTY_MASK, 0x0},
3766	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3767	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3768	{SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3769	{CDU_REG_CDU_PRTY_MASK, 0x0},
3770	{CFC_REG_CFC_PRTY_MASK, 0x0},
3771	{DBG_REG_DBG_PRTY_MASK, 0x0},
3772	{DMAE_REG_DMAE_PRTY_MASK, 0x0},
3773	{BRB1_REG_BRB1_PRTY_MASK, 0x0},
3774	{PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3775	{TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3776	{CSDM_REG_CSDM_PRTY_MASK, 0x8},	/* bit 3 */
3777	{USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3778	{XSDM_REG_XSDM_PRTY_MASK, 0x8},	/* bit 3 */
3779	{TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3780	{TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3781	{USEM_REG_USEM_PRTY_MASK_0, 0x0},
3782	{USEM_REG_USEM_PRTY_MASK_1, 0x0},
3783	{CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3784	{CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3785	{XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3786	{XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3787};
3788
3789static void enable_blocks_parity(struct bnx2x *bp)
3790{
3791	int i, mask_arr_len =
3792		sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
3793
3794	for (i = 0; i < mask_arr_len; i++)
3795		REG_WR(bp, bnx2x_parity_mask[i].addr,
3796			bnx2x_parity_mask[i].mask);
3797}
3798
3799
3800static void bnx2x_reset_common(struct bnx2x *bp)
3801{
3802	/* reset_common */
3803	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3804	       0xd3ffff7f);
3805	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3806}
3807
3808static void bnx2x_init_pxp(struct bnx2x *bp)
3809{
3810	u16 devctl;
3811	int r_order, w_order;
3812
3813	pci_read_config_word(bp->pdev,
3814			     bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3815	DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3816	w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3817	if (bp->mrrs == -1)
3818		r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3819	else {
3820		DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3821		r_order = bp->mrrs;
3822	}
3823
3824	bnx2x_init_pxp_arb(bp, r_order, w_order);
3825}
3826
3827static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3828{
3829	int is_required;
3830	u32 val;
3831	int port;
3832
3833	if (BP_NOMCP(bp))
3834		return;
3835
3836	is_required = 0;
3837	val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3838	      SHARED_HW_CFG_FAN_FAILURE_MASK;
3839
3840	if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3841		is_required = 1;
3842
3843	/*
3844	 * The fan failure mechanism is usually related to the PHY type since
3845	 * the power consumption of the board is affected by the PHY. Currently,
3846	 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3847	 */
3848	else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3849		for (port = PORT_0; port < PORT_MAX; port++) {
3850			u32 phy_type =
3851				SHMEM_RD(bp, dev_info.port_hw_config[port].
3852					 external_phy_config) &
3853				PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3854			is_required |=
3855				((phy_type ==
3856				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
3857				 (phy_type ==
3858				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
3859				 (phy_type ==
3860				  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
3861		}
3862
3863	DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3864
3865	if (is_required == 0)
3866		return;
3867
3868	/* Fan failure is indicated by SPIO 5 */
3869	bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3870		       MISC_REGISTERS_SPIO_INPUT_HI_Z);
3871
3872	/* set to active low mode */
3873	val = REG_RD(bp, MISC_REG_SPIO_INT);
3874	val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3875					MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3876	REG_WR(bp, MISC_REG_SPIO_INT, val);
3877
3878	/* enable interrupt to signal the IGU */
3879	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3880	val |= (1 << MISC_REGISTERS_SPIO_5);
3881	REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3882}
3883
3884static int bnx2x_init_common(struct bnx2x *bp)
3885{
3886	u32 val, i;
3887#ifdef BCM_CNIC
3888	u32 wb_write[2];
3889#endif
3890
3891	DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
3892
3893	bnx2x_reset_common(bp);
3894	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3895	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3896
3897	bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3898	if (CHIP_IS_E1H(bp))
3899		REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3900
3901	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3902	msleep(30);
3903	REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3904
3905	bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3906	if (CHIP_IS_E1(bp)) {
3907		/* enable HW interrupt from PXP on USDM overflow
3908		   bit 16 on INT_MASK_0 */
3909		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3910	}
3911
3912	bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3913	bnx2x_init_pxp(bp);
3914
3915#ifdef __BIG_ENDIAN
3916	REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3917	REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3918	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3919	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3920	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3921	/* make sure this value is 0 */
3922	REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3923
3924/*	REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3925	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3926	REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3927	REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3928	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3929#endif
3930
3931	REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3932#ifdef BCM_CNIC
3933	REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3934	REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3935	REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3936#endif
3937
3938	if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3939		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3940
3941	/* let the HW do it's magic ... */
3942	msleep(100);
3943	/* finish PXP init */
3944	val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3945	if (val != 1) {
3946		BNX2X_ERR("PXP2 CFG failed\n");
3947		return -EBUSY;
3948	}
3949	val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3950	if (val != 1) {
3951		BNX2X_ERR("PXP2 RD_INIT failed\n");
3952		return -EBUSY;
3953	}
3954
3955	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3956	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3957
3958	bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3959
3960	/* clean the DMAE memory */
3961	bp->dmae_ready = 1;
3962	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3963
3964	bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
3965	bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
3966	bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
3967	bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
3968
3969	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3970	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3971	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3972	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3973
3974	bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
3975
3976#ifdef BCM_CNIC
3977	wb_write[0] = 0;
3978	wb_write[1] = 0;
3979	for (i = 0; i < 64; i++) {
3980		REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
3981		bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
3982
3983		if (CHIP_IS_E1H(bp)) {
3984			REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
3985			bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
3986					  wb_write, 2);
3987		}
3988	}
3989#endif
3990	/* soft reset pulse */
3991	REG_WR(bp, QM_REG_SOFT_RESET, 1);
3992	REG_WR(bp, QM_REG_SOFT_RESET, 0);
3993
3994#ifdef BCM_CNIC
3995	bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
3996#endif
3997
3998	bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
3999	REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4000	if (!CHIP_REV_IS_SLOW(bp)) {
4001		/* enable hw interrupt from doorbell Q */
4002		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4003	}
4004
4005	bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4006	bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4007	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4008#ifndef BCM_CNIC
4009	/* set NIC mode */
4010	REG_WR(bp, PRS_REG_NIC_MODE, 1);
4011#endif
4012	if (CHIP_IS_E1H(bp))
4013		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4014
4015	bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4016	bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4017	bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4018	bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4019
4020	bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4021	bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4022	bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4023	bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4024
4025	bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4026	bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4027	bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4028	bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4029
4030	/* sync semi rtc */
4031	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4032	       0x80000000);
4033	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4034	       0x80000000);
4035
4036	bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4037	bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4038	bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4039
4040	REG_WR(bp, SRC_REG_SOFT_RST, 1);
4041	for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4042		REG_WR(bp, i, random32());
4043	bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4044#ifdef BCM_CNIC
4045	REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4046	REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4047	REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4048	REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4049	REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4050	REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4051	REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4052	REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4053	REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4054	REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4055#endif
4056	REG_WR(bp, SRC_REG_SOFT_RST, 0);
4057
4058	if (sizeof(union cdu_context) != 1024)
4059		/* we currently assume that a context is 1024 bytes */
4060		dev_alert(&bp->pdev->dev, "please adjust the size "
4061					  "of cdu_context(%ld)\n",
4062			 (long)sizeof(union cdu_context));
4063
4064	bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4065	val = (4 << 24) + (0 << 12) + 1024;
4066	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4067
4068	bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4069	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4070	/* enable context validation interrupt from CFC */
4071	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4072
4073	/* set the thresholds to prevent CFC/CDU race */
4074	REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4075
4076	bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4077	bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4078
4079	bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4080	/* Reset PCIE errors for debug */
4081	REG_WR(bp, 0x2814, 0xffffffff);
4082	REG_WR(bp, 0x3820, 0xffffffff);
4083
4084	bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4085	bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4086	bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4087	bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4088
4089	bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4090	if (CHIP_IS_E1H(bp)) {
4091		REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4092		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4093	}
4094
4095	if (CHIP_REV_IS_SLOW(bp))
4096		msleep(200);
4097
4098	/* finish CFC init */
4099	val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4100	if (val != 1) {
4101		BNX2X_ERR("CFC LL_INIT failed\n");
4102		return -EBUSY;
4103	}
4104	val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4105	if (val != 1) {
4106		BNX2X_ERR("CFC AC_INIT failed\n");
4107		return -EBUSY;
4108	}
4109	val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4110	if (val != 1) {
4111		BNX2X_ERR("CFC CAM_INIT failed\n");
4112		return -EBUSY;
4113	}
4114	REG_WR(bp, CFC_REG_DEBUG0, 0);
4115
4116	/* read NIG statistic
4117	   to see if this is our first up since powerup */
4118	bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4119	val = *bnx2x_sp(bp, wb_data[0]);
4120
4121	/* do internal memory self test */
4122	if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4123		BNX2X_ERR("internal mem self test failed\n");
4124		return -EBUSY;
4125	}
4126
4127	switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4128	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4129	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4130	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4131	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4132		bp->port.need_hw_lock = 1;
4133		break;
4134
4135	default:
4136		break;
4137	}
4138
4139	bnx2x_setup_fan_failure_detection(bp);
4140
4141	/* clear PXP2 attentions */
4142	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4143
4144	enable_blocks_attention(bp);
4145	if (CHIP_PARITY_SUPPORTED(bp))
4146		enable_blocks_parity(bp);
4147
4148	if (!BP_NOMCP(bp)) {
4149		bnx2x_acquire_phy_lock(bp);
4150		bnx2x_common_init_phy(bp, bp->common.shmem_base);
4151		bnx2x_release_phy_lock(bp);
4152	} else
4153		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4154
4155	return 0;
4156}
4157
4158static int bnx2x_init_port(struct bnx2x *bp)
4159{
4160	int port = BP_PORT(bp);
4161	int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4162	u32 low, high;
4163	u32 val;
4164
4165	DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
4166
4167	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4168
4169	bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4170	bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4171
4172	bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4173	bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4174	bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4175	bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4176
4177#ifdef BCM_CNIC
4178	REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4179
4180	bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4181	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4182	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4183#endif
4184
4185	bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4186
4187	bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4188	if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4189		/* no pause for emulation and FPGA */
4190		low = 0;
4191		high = 513;
4192	} else {
4193		if (IS_E1HMF(bp))
4194			low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4195		else if (bp->dev->mtu > 4096) {
4196			if (bp->flags & ONE_PORT_FLAG)
4197				low = 160;
4198			else {
4199				val = bp->dev->mtu;
4200				/* (24*1024 + val*4)/256 */
4201				low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4202			}
4203		} else
4204			low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4205		high = low + 56;	/* 14*1024/256 */
4206	}
4207	REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4208	REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4209
4210
4211	bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4212
4213	bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4214	bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4215	bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4216	bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4217
4218	bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4219	bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4220	bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4221	bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4222
4223	bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4224	bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4225
4226	bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4227
4228	/* configure PBF to work without PAUSE mtu 9000 */
4229	REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4230
4231	/* update threshold */
4232	REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4233	/* update init credit */
4234	REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4235
4236	/* probe changes */
4237	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4238	msleep(5);
4239	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4240
4241#ifdef BCM_CNIC
4242	bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4243#endif
4244	bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4245	bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4246
4247	if (CHIP_IS_E1(bp)) {
4248		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4249		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4250	}
4251	bnx2x_init_block(bp, HC_BLOCK, init_stage);
4252
4253	bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4254	/* init aeu_mask_attn_func_0/1:
4255	 *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4256	 *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4257	 *             bits 4-7 are used for "per vn group attention" */
4258	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4259	       (IS_E1HMF(bp) ? 0xF7 : 0x7));
4260
4261	bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4262	bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4263	bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4264	bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4265	bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4266
4267	bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4268
4269	REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4270
4271	if (CHIP_IS_E1H(bp)) {
4272		/* 0x2 disable e1hov, 0x1 enable */
4273		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4274		       (IS_E1HMF(bp) ? 0x1 : 0x2));
4275
4276		{
4277			REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4278			REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4279			REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4280		}
4281	}
4282
4283	bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4284	bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4285
4286	switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
4287	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4288		{
4289		u32 swap_val, swap_override, aeu_gpio_mask, offset;
4290
4291		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
4292			       MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
4293
4294		/* The GPIO should be swapped if the swap register is
4295		   set and active */
4296		swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
4297		swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
4298
4299		/* Select function upon port-swap configuration */
4300		if (port == 0) {
4301			offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
4302			aeu_gpio_mask = (swap_val && swap_override) ?
4303				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
4304				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
4305		} else {
4306			offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
4307			aeu_gpio_mask = (swap_val && swap_override) ?
4308				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
4309				AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
4310		}
4311		val = REG_RD(bp, offset);
4312		/* add GPIO3 to group */
4313		val |= aeu_gpio_mask;
4314		REG_WR(bp, offset, val);
4315		}
4316		bp->port.need_hw_lock = 1;
4317		break;
4318
4319	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
4320		bp->port.need_hw_lock = 1;
4321	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4322		/* add SPIO 5 to group 0 */
4323		{
4324		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4325				       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4326		val = REG_RD(bp, reg_addr);
4327		val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4328		REG_WR(bp, reg_addr, val);
4329		}
4330		break;
4331	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
4332	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
4333		bp->port.need_hw_lock = 1;
4334		break;
4335	default:
4336		break;
4337	}
4338
4339	bnx2x__link_reset(bp);
4340
4341	return 0;
4342}
4343
4344#define ILT_PER_FUNC		(768/2)
4345#define FUNC_ILT_BASE(func)	(func * ILT_PER_FUNC)
4346/* the phys address is shifted right 12 bits and has an added
4347   1=valid bit added to the 53rd bit
4348   then since this is a wide register(TM)
4349   we split it into two 32 bit writes
4350 */
4351#define ONCHIP_ADDR1(x)		((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4352#define ONCHIP_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
4353#define PXP_ONE_ILT(x)		(((x) << 10) | x)
4354#define PXP_ILT_RANGE(f, l)	(((l) << 10) | f)
4355
4356#ifdef BCM_CNIC
4357#define CNIC_ILT_LINES		127
4358#define CNIC_CTX_PER_ILT	16
4359#else
4360#define CNIC_ILT_LINES		0
4361#endif
4362
4363static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4364{
4365	int reg;
4366
4367	if (CHIP_IS_E1H(bp))
4368		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4369	else /* E1 */
4370		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4371
4372	bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4373}
4374
4375static int bnx2x_init_func(struct bnx2x *bp)
4376{
4377	int port = BP_PORT(bp);
4378	int func = BP_FUNC(bp);
4379	u32 addr, val;
4380	int i;
4381
4382	DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
4383
4384	/* set MSI reconfigure capability */
4385	addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4386	val = REG_RD(bp, addr);
4387	val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4388	REG_WR(bp, addr, val);
4389
4390	i = FUNC_ILT_BASE(func);
4391
4392	bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4393	if (CHIP_IS_E1H(bp)) {
4394		REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4395		REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4396	} else /* E1 */
4397		REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4398		       PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4399
4400#ifdef BCM_CNIC
4401	i += 1 + CNIC_ILT_LINES;
4402	bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4403	if (CHIP_IS_E1(bp))
4404		REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4405	else {
4406		REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4407		REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4408	}
4409
4410	i++;
4411	bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4412	if (CHIP_IS_E1(bp))
4413		REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4414	else {
4415		REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4416		REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4417	}
4418
4419	i++;
4420	bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4421	if (CHIP_IS_E1(bp))
4422		REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4423	else {
4424		REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4425		REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4426	}
4427
4428	/* tell the searcher where the T2 table is */
4429	REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4430
4431	bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4432		    U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4433
4434	bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4435		    U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4436		    U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4437
4438	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4439#endif
4440
4441	if (CHIP_IS_E1H(bp)) {
4442		bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4443		bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4444		bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4445		bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4446		bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4447		bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4448		bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4449		bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4450		bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4451
4452		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4453		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4454	}
4455
4456	/* HC init per function */
4457	if (CHIP_IS_E1H(bp)) {
4458		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4459
4460		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4461		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4462	}
4463	bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4464
4465	/* Reset PCIE errors for debug */
4466	REG_WR(bp, 0x2114, 0xffffffff);
4467	REG_WR(bp, 0x2120, 0xffffffff);
4468
4469	return 0;
4470}
4471
4472int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4473{
4474	int i, rc = 0;
4475
4476	DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
4477	   BP_FUNC(bp), load_code);
4478
4479	bp->dmae_ready = 0;
4480	mutex_init(&bp->dmae_mutex);
4481	rc = bnx2x_gunzip_init(bp);
4482	if (rc)
4483		return rc;
4484
4485	switch (load_code) {
4486	case FW_MSG_CODE_DRV_LOAD_COMMON:
4487		rc = bnx2x_init_common(bp);
4488		if (rc)
4489			goto init_hw_err;
4490		/* no break */
4491
4492	case FW_MSG_CODE_DRV_LOAD_PORT:
4493		bp->dmae_ready = 1;
4494		rc = bnx2x_init_port(bp);
4495		if (rc)
4496			goto init_hw_err;
4497		/* no break */
4498
4499	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4500		bp->dmae_ready = 1;
4501		rc = bnx2x_init_func(bp);
4502		if (rc)
4503			goto init_hw_err;
4504		break;
4505
4506	default:
4507		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4508		break;
4509	}
4510
4511	if (!BP_NOMCP(bp)) {
4512		int func = BP_FUNC(bp);
4513
4514		bp->fw_drv_pulse_wr_seq =
4515				(SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4516				 DRV_PULSE_SEQ_MASK);
4517		DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4518	}
4519
4520	/* this needs to be done before gunzip end */
4521	bnx2x_zero_def_sb(bp);
4522	for_each_queue(bp, i)
4523		bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4524#ifdef BCM_CNIC
4525	bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4526#endif
4527
4528init_hw_err:
4529	bnx2x_gunzip_end(bp);
4530
4531	return rc;
4532}
4533
4534void bnx2x_free_mem(struct bnx2x *bp)
4535{
4536
4537#define BNX2X_PCI_FREE(x, y, size) \
4538	do { \
4539		if (x) { \
4540			dma_free_coherent(&bp->pdev->dev, size, x, y); \
4541			x = NULL; \
4542			y = 0; \
4543		} \
4544	} while (0)
4545
4546#define BNX2X_FREE(x) \
4547	do { \
4548		if (x) { \
4549			vfree(x); \
4550			x = NULL; \
4551		} \
4552	} while (0)
4553
4554	int i;
4555
4556	/* fastpath */
4557	/* Common */
4558	for_each_queue(bp, i) {
4559
4560		/* status blocks */
4561		BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4562			       bnx2x_fp(bp, i, status_blk_mapping),
4563			       sizeof(struct host_status_block));
4564	}
4565	/* Rx */
4566	for_each_queue(bp, i) {
4567
4568		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4569		BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4570		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4571			       bnx2x_fp(bp, i, rx_desc_mapping),
4572			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
4573
4574		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4575			       bnx2x_fp(bp, i, rx_comp_mapping),
4576			       sizeof(struct eth_fast_path_rx_cqe) *
4577			       NUM_RCQ_BD);
4578
4579		/* SGE ring */
4580		BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4581		BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4582			       bnx2x_fp(bp, i, rx_sge_mapping),
4583			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4584	}
4585	/* Tx */
4586	for_each_queue(bp, i) {
4587
4588		/* fastpath tx rings: tx_buf tx_desc */
4589		BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4590		BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4591			       bnx2x_fp(bp, i, tx_desc_mapping),
4592			       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4593	}
4594	/* end of fastpath */
4595
4596	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4597		       sizeof(struct host_def_status_block));
4598
4599	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4600		       sizeof(struct bnx2x_slowpath));
4601
4602#ifdef BCM_CNIC
4603	BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4604	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4605	BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4606	BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4607	BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4608		       sizeof(struct host_status_block));
4609#endif
4610	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4611
4612#undef BNX2X_PCI_FREE
4613#undef BNX2X_KFREE
4614}
4615
4616int bnx2x_alloc_mem(struct bnx2x *bp)
4617{
4618
4619#define BNX2X_PCI_ALLOC(x, y, size) \
4620	do { \
4621		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4622		if (x == NULL) \
4623			goto alloc_mem_err; \
4624		memset(x, 0, size); \
4625	} while (0)
4626
4627#define BNX2X_ALLOC(x, size) \
4628	do { \
4629		x = vmalloc(size); \
4630		if (x == NULL) \
4631			goto alloc_mem_err; \
4632		memset(x, 0, size); \
4633	} while (0)
4634
4635	int i;
4636
4637	/* fastpath */
4638	/* Common */
4639	for_each_queue(bp, i) {
4640		bnx2x_fp(bp, i, bp) = bp;
4641
4642		/* status blocks */
4643		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4644				&bnx2x_fp(bp, i, status_blk_mapping),
4645				sizeof(struct host_status_block));
4646	}
4647	/* Rx */
4648	for_each_queue(bp, i) {
4649
4650		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4651		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4652				sizeof(struct sw_rx_bd) * NUM_RX_BD);
4653		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4654				&bnx2x_fp(bp, i, rx_desc_mapping),
4655				sizeof(struct eth_rx_bd) * NUM_RX_BD);
4656
4657		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4658				&bnx2x_fp(bp, i, rx_comp_mapping),
4659				sizeof(struct eth_fast_path_rx_cqe) *
4660				NUM_RCQ_BD);
4661
4662		/* SGE ring */
4663		BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4664				sizeof(struct sw_rx_page) * NUM_RX_SGE);
4665		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4666				&bnx2x_fp(bp, i, rx_sge_mapping),
4667				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4668	}
4669	/* Tx */
4670	for_each_queue(bp, i) {
4671
4672		/* fastpath tx rings: tx_buf tx_desc */
4673		BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4674				sizeof(struct sw_tx_bd) * NUM_TX_BD);
4675		BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4676				&bnx2x_fp(bp, i, tx_desc_mapping),
4677				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4678	}
4679	/* end of fastpath */
4680
4681	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4682			sizeof(struct host_def_status_block));
4683
4684	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4685			sizeof(struct bnx2x_slowpath));
4686
4687#ifdef BCM_CNIC
4688	BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4689
4690	/* allocate searcher T2 table
4691	   we allocate 1/4 of alloc num for T2
4692	  (which is not entered into the ILT) */
4693	BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4694
4695	/* Initialize T2 (for 1024 connections) */
4696	for (i = 0; i < 16*1024; i += 64)
4697		*(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4698
4699	/* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4700	BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4701
4702	/* QM queues (128*MAX_CONN) */
4703	BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4704
4705	BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4706			sizeof(struct host_status_block));
4707#endif
4708
4709	/* Slow path ring */
4710	BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4711
4712	return 0;
4713
4714alloc_mem_err:
4715	bnx2x_free_mem(bp);
4716	return -ENOMEM;
4717
4718#undef BNX2X_PCI_ALLOC
4719#undef BNX2X_ALLOC
4720}
4721
4722
4723/*
4724 * Init service functions
4725 */
4726
4727/**
4728 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4729 *
4730 * @param bp driver descriptor
4731 * @param set set or clear an entry (1 or 0)
4732 * @param mac pointer to a buffer containing a MAC
4733 * @param cl_bit_vec bit vector of clients to register a MAC for
4734 * @param cam_offset offset in a CAM to use
4735 * @param with_bcast set broadcast MAC as well
4736 */
4737static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4738				      u32 cl_bit_vec, u8 cam_offset,
4739				      u8 with_bcast)
4740{
4741	struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4742	int port = BP_PORT(bp);
4743
4744	/* CAM allocation
4745	 * unicasts 0-31:port0 32-63:port1
4746	 * multicast 64-127:port0 128-191:port1
4747	 */
4748	config->hdr.length = 1 + (with_bcast ? 1 : 0);
4749	config->hdr.offset = cam_offset;
4750	config->hdr.client_id = 0xff;
4751	config->hdr.reserved1 = 0;
4752
4753	/* primary MAC */
4754	config->config_table[0].cam_entry.msb_mac_addr =
4755					swab16(*(u16 *)&mac[0]);
4756	config->config_table[0].cam_entry.middle_mac_addr =
4757					swab16(*(u16 *)&mac[2]);
4758	config->config_table[0].cam_entry.lsb_mac_addr =
4759					swab16(*(u16 *)&mac[4]);
4760	config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4761	if (set)
4762		config->config_table[0].target_table_entry.flags = 0;
4763	else
4764		CAM_INVALIDATE(config->config_table[0]);
4765	config->config_table[0].target_table_entry.clients_bit_vector =
4766						cpu_to_le32(cl_bit_vec);
4767	config->config_table[0].target_table_entry.vlan_id = 0;
4768
4769	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4770	   (set ? "setting" : "clearing"),
4771	   config->config_table[0].cam_entry.msb_mac_addr,
4772	   config->config_table[0].cam_entry.middle_mac_addr,
4773	   config->config_table[0].cam_entry.lsb_mac_addr);
4774
4775	/* broadcast */
4776	if (with_bcast) {
4777		config->config_table[1].cam_entry.msb_mac_addr =
4778			cpu_to_le16(0xffff);
4779		config->config_table[1].cam_entry.middle_mac_addr =
4780			cpu_to_le16(0xffff);
4781		config->config_table[1].cam_entry.lsb_mac_addr =
4782			cpu_to_le16(0xffff);
4783		config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4784		if (set)
4785			config->config_table[1].target_table_entry.flags =
4786					TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4787		else
4788			CAM_INVALIDATE(config->config_table[1]);
4789		config->config_table[1].target_table_entry.clients_bit_vector =
4790							cpu_to_le32(cl_bit_vec);
4791		config->config_table[1].target_table_entry.vlan_id = 0;
4792	}
4793
4794	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4795		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4796		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4797}
4798
4799/**
4800 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4801 *
4802 * @param bp driver descriptor
4803 * @param set set or clear an entry (1 or 0)
4804 * @param mac pointer to a buffer containing a MAC
4805 * @param cl_bit_vec bit vector of clients to register a MAC for
4806 * @param cam_offset offset in a CAM to use
4807 */
4808static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4809				       u32 cl_bit_vec, u8 cam_offset)
4810{
4811	struct mac_configuration_cmd_e1h *config =
4812		(struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4813
4814	config->hdr.length = 1;
4815	config->hdr.offset = cam_offset;
4816	config->hdr.client_id = 0xff;
4817	config->hdr.reserved1 = 0;
4818
4819	/* primary MAC */
4820	config->config_table[0].msb_mac_addr =
4821					swab16(*(u16 *)&mac[0]);
4822	config->config_table[0].middle_mac_addr =
4823					swab16(*(u16 *)&mac[2]);
4824	config->config_table[0].lsb_mac_addr =
4825					swab16(*(u16 *)&mac[4]);
4826	config->config_table[0].clients_bit_vector =
4827					cpu_to_le32(cl_bit_vec);
4828	config->config_table[0].vlan_id = 0;
4829	config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4830	if (set)
4831		config->config_table[0].flags = BP_PORT(bp);
4832	else
4833		config->config_table[0].flags =
4834				MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4835
4836	DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
4837	   (set ? "setting" : "clearing"),
4838	   config->config_table[0].msb_mac_addr,
4839	   config->config_table[0].middle_mac_addr,
4840	   config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4841
4842	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4843		      U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4844		      U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4845}
4846
4847static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4848			     int *state_p, int poll)
4849{
4850	/* can take a while if any port is running */
4851	int cnt = 5000;
4852
4853	DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4854	   poll ? "polling" : "waiting", state, idx);
4855
4856	might_sleep();
4857	while (cnt--) {
4858		if (poll) {
4859			bnx2x_rx_int(bp->fp, 10);
4860			/* if index is different from 0
4861			 * the reply for some commands will
4862			 * be on the non default queue
4863			 */
4864			if (idx)
4865				bnx2x_rx_int(&bp->fp[idx], 10);
4866		}
4867
4868		mb(); /* state is changed by bnx2x_sp_event() */
4869		if (*state_p == state) {
4870#ifdef BNX2X_STOP_ON_ERROR
4871			DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
4872#endif
4873			return 0;
4874		}
4875
4876		msleep(1);
4877
4878		if (bp->panic)
4879			return -EIO;
4880	}
4881
4882	/* timeout! */
4883	BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4884		  poll ? "polling" : "waiting", state, idx);
4885#ifdef BNX2X_STOP_ON_ERROR
4886	bnx2x_panic();
4887#endif
4888
4889	return -EBUSY;
4890}
4891
4892void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4893{
4894	bp->set_mac_pending++;
4895	smp_wmb();
4896
4897	bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4898				   (1 << bp->fp->cl_id), BP_FUNC(bp));
4899
4900	/* Wait for a completion */
4901	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4902}
4903
4904void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4905{
4906	bp->set_mac_pending++;
4907	smp_wmb();
4908
4909	bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4910				  (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4911				  1);
4912
4913	/* Wait for a completion */
4914	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4915}
4916
4917#ifdef BCM_CNIC
4918/**
4919 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4920 * MAC(s). This function will wait until the ramdord completion
4921 * returns.
4922 *
4923 * @param bp driver handle
4924 * @param set set or clear the CAM entry
4925 *
4926 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4927 */
4928int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4929{
4930	u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4931
4932	bp->set_mac_pending++;
4933	smp_wmb();
4934
4935	/* Send a SET_MAC ramrod */
4936	if (CHIP_IS_E1(bp))
4937		bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4938				  cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4939				  1);
4940	else
4941		/* CAM allocation for E1H
4942		* unicasts: by func number
4943		* multicast: 20+FUNC*20, 20 each
4944		*/
4945		bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4946				   cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4947
4948	/* Wait for a completion when setting */
4949	bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4950
4951	return 0;
4952}
4953#endif
4954
4955int bnx2x_setup_leading(struct bnx2x *bp)
4956{
4957	int rc;
4958
4959	/* reset IGU state */
4960	bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4961
4962	/* SETUP ramrod */
4963	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4964
4965	/* Wait for completion */
4966	rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4967
4968	return rc;
4969}
4970
4971int bnx2x_setup_multi(struct bnx2x *bp, int index)
4972{
4973	struct bnx2x_fastpath *fp = &bp->fp[index];
4974
4975	/* reset IGU state */
4976	bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4977
4978	/* SETUP ramrod */
4979	fp->state = BNX2X_FP_STATE_OPENING;
4980	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4981		      fp->cl_id, 0);
4982
4983	/* Wait for completion */
4984	return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4985				 &(fp->state), 0);
4986}
4987
4988
4989void bnx2x_set_num_queues_msix(struct bnx2x *bp)
4990{
4991
4992	switch (bp->multi_mode) {
4993	case ETH_RSS_MODE_DISABLED:
4994		bp->num_queues = 1;
4995		break;
4996
4997	case ETH_RSS_MODE_REGULAR:
4998		if (num_queues)
4999			bp->num_queues = min_t(u32, num_queues,
5000						  BNX2X_MAX_QUEUES(bp));
5001		else
5002			bp->num_queues = min_t(u32, num_online_cpus(),
5003						  BNX2X_MAX_QUEUES(bp));
5004		break;
5005
5006
5007	default:
5008		bp->num_queues = 1;
5009		break;
5010	}
5011}
5012
5013
5014
5015static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5016{
5017	struct bnx2x_fastpath *fp = &bp->fp[index];
5018	int rc;
5019
5020	/* halt the connection */
5021	fp->state = BNX2X_FP_STATE_HALTING;
5022	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5023
5024	/* Wait for completion */
5025	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5026			       &(fp->state), 1);
5027	if (rc) /* timeout */
5028		return rc;
5029
5030	/* delete cfc entry */
5031	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5032
5033	/* Wait for completion */
5034	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5035			       &(fp->state), 1);
5036	return rc;
5037}
5038
5039static int bnx2x_stop_leading(struct bnx2x *bp)
5040{
5041	__le16 dsb_sp_prod_idx;
5042	/* if the other port is handling traffic,
5043	   this can take a lot of time */
5044	int cnt = 500;
5045	int rc;
5046
5047	might_sleep();
5048
5049	/* Send HALT ramrod */
5050	bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5051	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5052
5053	/* Wait for completion */
5054	rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5055			       &(bp->fp[0].state), 1);
5056	if (rc) /* timeout */
5057		return rc;
5058
5059	dsb_sp_prod_idx = *bp->dsb_sp_prod;
5060
5061	/* Send PORT_DELETE ramrod */
5062	bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5063
5064	/* Wait for completion to arrive on default status block
5065	   we are going to reset the chip anyway
5066	   so there is not much to do if this times out
5067	 */
5068	while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5069		if (!cnt) {
5070			DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5071			   "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5072			   *bp->dsb_sp_prod, dsb_sp_prod_idx);
5073#ifdef BNX2X_STOP_ON_ERROR
5074			bnx2x_panic();
5075#endif
5076			rc = -EBUSY;
5077			break;
5078		}
5079		cnt--;
5080		msleep(1);
5081		rmb(); /* Refresh the dsb_sp_prod */
5082	}
5083	bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5084	bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5085
5086	return rc;
5087}
5088
5089static void bnx2x_reset_func(struct bnx2x *bp)
5090{
5091	int port = BP_PORT(bp);
5092	int func = BP_FUNC(bp);
5093	int base, i;
5094
5095	/* Configure IGU */
5096	REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5097	REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5098
5099#ifdef BCM_CNIC
5100	/* Disable Timer scan */
5101	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5102	/*
5103	 * Wait for at least 10ms and up to 2 second for the timers scan to
5104	 * complete
5105	 */
5106	for (i = 0; i < 200; i++) {
5107		msleep(10);
5108		if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5109			break;
5110	}
5111#endif
5112	/* Clear ILT */
5113	base = FUNC_ILT_BASE(func);
5114	for (i = base; i < base + ILT_PER_FUNC; i++)
5115		bnx2x_ilt_wr(bp, i, 0);
5116}
5117
5118static void bnx2x_reset_port(struct bnx2x *bp)
5119{
5120	int port = BP_PORT(bp);
5121	u32 val;
5122
5123	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5124
5125	/* Do not rcv packets to BRB */
5126	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5127	/* Do not direct rcv packets that are not for MCP to the BRB */
5128	REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5129			   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5130
5131	/* Configure AEU */
5132	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5133
5134	msleep(100);
5135	/* Check for BRB port occupancy */
5136	val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5137	if (val)
5138		DP(NETIF_MSG_IFDOWN,
5139		   "BRB1 is not empty  %d blocks are occupied\n", val);
5140
5141	/* TODO: Close Doorbell port? */
5142}
5143
5144static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5145{
5146	DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
5147	   BP_FUNC(bp), reset_code);
5148
5149	switch (reset_code) {
5150	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5151		bnx2x_reset_port(bp);
5152		bnx2x_reset_func(bp);
5153		bnx2x_reset_common(bp);
5154		break;
5155
5156	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5157		bnx2x_reset_port(bp);
5158		bnx2x_reset_func(bp);
5159		break;
5160
5161	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5162		bnx2x_reset_func(bp);
5163		break;
5164
5165	default:
5166		BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5167		break;
5168	}
5169}
5170
5171void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5172{
5173	int port = BP_PORT(bp);
5174	u32 reset_code = 0;
5175	int i, cnt, rc;
5176
5177	/* Wait until tx fastpath tasks complete */
5178	for_each_queue(bp, i) {
5179		struct bnx2x_fastpath *fp = &bp->fp[i];
5180
5181		cnt = 1000;
5182		while (bnx2x_has_tx_work_unload(fp)) {
5183
5184			bnx2x_tx_int(fp);
5185			if (!cnt) {
5186				BNX2X_ERR("timeout waiting for queue[%d]\n",
5187					  i);
5188#ifdef BNX2X_STOP_ON_ERROR
5189				bnx2x_panic();
5190				return -EBUSY;
5191#else
5192				break;
5193#endif
5194			}
5195			cnt--;
5196			msleep(1);
5197		}
5198	}
5199	/* Give HW time to discard old tx messages */
5200	msleep(1);
5201
5202	if (CHIP_IS_E1(bp)) {
5203		struct mac_configuration_cmd *config =
5204						bnx2x_sp(bp, mcast_config);
5205
5206		bnx2x_set_eth_mac_addr_e1(bp, 0);
5207
5208		for (i = 0; i < config->hdr.length; i++)
5209			CAM_INVALIDATE(config->config_table[i]);
5210
5211		config->hdr.length = i;
5212		if (CHIP_REV_IS_SLOW(bp))
5213			config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5214		else
5215			config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5216		config->hdr.client_id = bp->fp->cl_id;
5217		config->hdr.reserved1 = 0;
5218
5219		bp->set_mac_pending++;
5220		smp_wmb();
5221
5222		bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5223			      U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5224			      U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5225
5226	} else { /* E1H */
5227		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5228
5229		bnx2x_set_eth_mac_addr_e1h(bp, 0);
5230
5231		for (i = 0; i < MC_HASH_SIZE; i++)
5232			REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5233
5234		REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5235	}
5236#ifdef BCM_CNIC
5237	/* Clear iSCSI L2 MAC */
5238	mutex_lock(&bp->cnic_mutex);
5239	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5240		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5241		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5242	}
5243	mutex_unlock(&bp->cnic_mutex);
5244#endif
5245
5246	if (unload_mode == UNLOAD_NORMAL)
5247		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5248
5249	else if (bp->flags & NO_WOL_FLAG)
5250		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5251
5252	else if (bp->wol) {
5253		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5254		u8 *mac_addr = bp->dev->dev_addr;
5255		u32 val;
5256		/* The mac address is written to entries 1-4 to
5257		   preserve entry 0 which is used by the PMF */
5258		u8 entry = (BP_E1HVN(bp) + 1)*8;
5259
5260		val = (mac_addr[0] << 8) | mac_addr[1];
5261		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5262
5263		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5264		      (mac_addr[4] << 8) | mac_addr[5];
5265		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5266
5267		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5268
5269	} else
5270		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5271
5272	/* Close multi and leading connections
5273	   Completions for ramrods are collected in a synchronous way */
5274	for_each_nondefault_queue(bp, i)
5275		if (bnx2x_stop_multi(bp, i))
5276			goto unload_error;
5277
5278	rc = bnx2x_stop_leading(bp);
5279	if (rc) {
5280		BNX2X_ERR("Stop leading failed!\n");
5281#ifdef BNX2X_STOP_ON_ERROR
5282		return -EBUSY;
5283#else
5284		goto unload_error;
5285#endif
5286	}
5287
5288unload_error:
5289	if (!BP_NOMCP(bp))
5290		reset_code = bnx2x_fw_command(bp, reset_code);
5291	else {
5292		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
5293		   load_count[0], load_count[1], load_count[2]);
5294		load_count[0]--;
5295		load_count[1 + port]--;
5296		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
5297		   load_count[0], load_count[1], load_count[2]);
5298		if (load_count[0] == 0)
5299			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5300		else if (load_count[1 + port] == 0)
5301			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5302		else
5303			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5304	}
5305
5306	if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5307	    (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5308		bnx2x__link_reset(bp);
5309
5310	/* Reset the chip */
5311	bnx2x_reset_chip(bp, reset_code);
5312
5313	/* Report UNLOAD_DONE to MCP */
5314	if (!BP_NOMCP(bp))
5315		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5316
5317}
5318
5319void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5320{
5321	u32 val;
5322
5323	DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5324
5325	if (CHIP_IS_E1(bp)) {
5326		int port = BP_PORT(bp);
5327		u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5328			MISC_REG_AEU_MASK_ATTN_FUNC_0;
5329
5330		val = REG_RD(bp, addr);
5331		val &= ~(0x300);
5332		REG_WR(bp, addr, val);
5333	} else if (CHIP_IS_E1H(bp)) {
5334		val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5335		val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5336			 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5337		REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5338	}
5339}
5340
5341
5342/* Close gates #2, #3 and #4: */
5343static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5344{
5345	u32 val, addr;
5346
5347	/* Gates #2 and #4a are closed/opened for "not E1" only */
5348	if (!CHIP_IS_E1(bp)) {
5349		/* #4 */
5350		val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5351		REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5352		       close ? (val | 0x1) : (val & (~(u32)1)));
5353		/* #2 */
5354		val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5355		REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5356		       close ? (val | 0x1) : (val & (~(u32)1)));
5357	}
5358
5359	/* #3 */
5360	addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5361	val = REG_RD(bp, addr);
5362	REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5363
5364	DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5365		close ? "closing" : "opening");
5366	mmiowb();
5367}
5368
5369#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
5370
5371static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5372{
5373	/* Do some magic... */
5374	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5375	*magic_val = val & SHARED_MF_CLP_MAGIC;
5376	MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5377}
5378
5379/* Restore the value of the `magic' bit.
5380 *
5381 * @param pdev Device handle.
5382 * @param magic_val Old value of the `magic' bit.
5383 */
5384static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5385{
5386	/* Restore the `magic' bit value... */
5387	/* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5388	SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5389		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5390	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5391	MF_CFG_WR(bp, shared_mf_config.clp_mb,
5392		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5393}
5394
5395/* Prepares for MCP reset: takes care of CLP configurations.
5396 *
5397 * @param bp
5398 * @param magic_val Old value of 'magic' bit.
5399 */
5400static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5401{
5402	u32 shmem;
5403	u32 validity_offset;
5404
5405	DP(NETIF_MSG_HW, "Starting\n");
5406
5407	/* Set `magic' bit in order to save MF config */
5408	if (!CHIP_IS_E1(bp))
5409		bnx2x_clp_reset_prep(bp, magic_val);
5410
5411	/* Get shmem offset */
5412	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5413	validity_offset = offsetof(struct shmem_region, validity_map[0]);
5414
5415	/* Clear validity map flags */
5416	if (shmem > 0)
5417		REG_WR(bp, shmem + validity_offset, 0);
5418}
5419
5420#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
5421#define MCP_ONE_TIMEOUT  100    /* 100 ms */
5422
5423/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5424 * depending on the HW type.
5425 *
5426 * @param bp
5427 */
5428static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5429{
5430	/* special handling for emulation and FPGA,
5431	   wait 10 times longer */
5432	if (CHIP_REV_IS_SLOW(bp))
5433		msleep(MCP_ONE_TIMEOUT*10);
5434	else
5435		msleep(MCP_ONE_TIMEOUT);
5436}
5437
5438static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5439{
5440	u32 shmem, cnt, validity_offset, val;
5441	int rc = 0;
5442
5443	msleep(100);
5444
5445	/* Get shmem offset */
5446	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5447	if (shmem == 0) {
5448		BNX2X_ERR("Shmem 0 return failure\n");
5449		rc = -ENOTTY;
5450		goto exit_lbl;
5451	}
5452
5453	validity_offset = offsetof(struct shmem_region, validity_map[0]);
5454
5455	/* Wait for MCP to come up */
5456	for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5457		/* TBD: its best to check validity map of last port.
5458		 * currently checks on port 0.
5459		 */
5460		val = REG_RD(bp, shmem + validity_offset);
5461		DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5462		   shmem + validity_offset, val);
5463
5464		/* check that shared memory is valid. */
5465		if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5466		    == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5467			break;
5468
5469		bnx2x_mcp_wait_one(bp);
5470	}
5471
5472	DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5473
5474	/* Check that shared memory is valid. This indicates that MCP is up. */
5475	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5476	    (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5477		BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5478		rc = -ENOTTY;
5479		goto exit_lbl;
5480	}
5481
5482exit_lbl:
5483	/* Restore the `magic' bit value */
5484	if (!CHIP_IS_E1(bp))
5485		bnx2x_clp_reset_done(bp, magic_val);
5486
5487	return rc;
5488}
5489
5490static void bnx2x_pxp_prep(struct bnx2x *bp)
5491{
5492	if (!CHIP_IS_E1(bp)) {
5493		REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5494		REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5495		REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5496		mmiowb();
5497	}
5498}
5499
5500/*
5501 * Reset the whole chip except for:
5502 *      - PCIE core
5503 *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5504 *              one reset bit)
5505 *      - IGU
5506 *      - MISC (including AEU)
5507 *      - GRC
5508 *      - RBCN, RBCP
5509 */
5510static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5511{
5512	u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5513
5514	not_reset_mask1 =
5515		MISC_REGISTERS_RESET_REG_1_RST_HC |
5516		MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5517		MISC_REGISTERS_RESET_REG_1_RST_PXP;
5518
5519	not_reset_mask2 =
5520		MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5521		MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5522		MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5523		MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5524		MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5525		MISC_REGISTERS_RESET_REG_2_RST_GRC  |
5526		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5527		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5528
5529	reset_mask1 = 0xffffffff;
5530
5531	if (CHIP_IS_E1(bp))
5532		reset_mask2 = 0xffff;
5533	else
5534		reset_mask2 = 0x1ffff;
5535
5536	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5537	       reset_mask1 & (~not_reset_mask1));
5538	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5539	       reset_mask2 & (~not_reset_mask2));
5540
5541	barrier();
5542	mmiowb();
5543
5544	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5545	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5546	mmiowb();
5547}
5548
5549static int bnx2x_process_kill(struct bnx2x *bp)
5550{
5551	int cnt = 1000;
5552	u32 val = 0;
5553	u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5554
5555
5556	/* Empty the Tetris buffer, wait for 1s */
5557	do {
5558		sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5559		blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5560		port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5561		port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5562		pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5563		if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5564		    ((port_is_idle_0 & 0x1) == 0x1) &&
5565		    ((port_is_idle_1 & 0x1) == 0x1) &&
5566		    (pgl_exp_rom2 == 0xffffffff))
5567			break;
5568		msleep(1);
5569	} while (cnt-- > 0);
5570
5571	if (cnt <= 0) {
5572		DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5573			  " are still"
5574			  " outstanding read requests after 1s!\n");
5575		DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5576			  " port_is_idle_0=0x%08x,"
5577			  " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5578			  sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5579			  pgl_exp_rom2);
5580		return -EAGAIN;
5581	}
5582
5583	barrier();
5584
5585	/* Close gates #2, #3 and #4 */
5586	bnx2x_set_234_gates(bp, true);
5587
5588	/* TBD: Indicate that "process kill" is in progress to MCP */
5589
5590	/* Clear "unprepared" bit */
5591	REG_WR(bp, MISC_REG_UNPREPARED, 0);
5592	barrier();
5593
5594	/* Make sure all is written to the chip before the reset */
5595	mmiowb();
5596
5597	/* Wait for 1ms to empty GLUE and PCI-E core queues,
5598	 * PSWHST, GRC and PSWRD Tetris buffer.
5599	 */
5600	msleep(1);
5601
5602	/* Prepare to chip reset: */
5603	/* MCP */
5604	bnx2x_reset_mcp_prep(bp, &val);
5605
5606	/* PXP */
5607	bnx2x_pxp_prep(bp);
5608	barrier();
5609
5610	/* reset the chip */
5611	bnx2x_process_kill_chip_reset(bp);
5612	barrier();
5613
5614	/* Recover after reset: */
5615	/* MCP */
5616	if (bnx2x_reset_mcp_comp(bp, val))
5617		return -EAGAIN;
5618
5619	/* PXP */
5620	bnx2x_pxp_prep(bp);
5621
5622	/* Open the gates #2, #3 and #4 */
5623	bnx2x_set_234_gates(bp, false);
5624
5625	/* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5626	 * reset state, re-enable attentions. */
5627
5628	return 0;
5629}
5630
5631static int bnx2x_leader_reset(struct bnx2x *bp)
5632{
5633	int rc = 0;
5634	/* Try to recover after the failure */
5635	if (bnx2x_process_kill(bp)) {
5636		printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5637		       bp->dev->name);
5638		rc = -EAGAIN;
5639		goto exit_leader_reset;
5640	}
5641
5642	/* Clear "reset is in progress" bit and update the driver state */
5643	bnx2x_set_reset_done(bp);
5644	bp->recovery_state = BNX2X_RECOVERY_DONE;
5645
5646exit_leader_reset:
5647	bp->is_leader = 0;
5648	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5649	smp_wmb();
5650	return rc;
5651}
5652
5653/* Assumption: runs under rtnl lock. This together with the fact
5654 * that it's called only from bnx2x_reset_task() ensure that it
5655 * will never be called when netif_running(bp->dev) is false.
5656 */
5657static void bnx2x_parity_recover(struct bnx2x *bp)
5658{
5659	DP(NETIF_MSG_HW, "Handling parity\n");
5660	while (1) {
5661		switch (bp->recovery_state) {
5662		case BNX2X_RECOVERY_INIT:
5663			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5664			/* Try to get a LEADER_LOCK HW lock */
5665			if (bnx2x_trylock_hw_lock(bp,
5666				HW_LOCK_RESOURCE_RESERVED_08))
5667				bp->is_leader = 1;
5668
5669			/* Stop the driver */
5670			/* If interface has been removed - break */
5671			if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5672				return;
5673
5674			bp->recovery_state = BNX2X_RECOVERY_WAIT;
5675			/* Ensure "is_leader" and "recovery_state"
5676			 *  update values are seen on other CPUs
5677			 */
5678			smp_wmb();
5679			break;
5680
5681		case BNX2X_RECOVERY_WAIT:
5682			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5683			if (bp->is_leader) {
5684				u32 load_counter = bnx2x_get_load_cnt(bp);
5685				if (load_counter) {
5686					/* Wait until all other functions get
5687					 * down.
5688					 */
5689					schedule_delayed_work(&bp->reset_task,
5690								HZ/10);
5691					return;
5692				} else {
5693					/* If all other functions got down -
5694					 * try to bring the chip back to
5695					 * normal. In any case it's an exit
5696					 * point for a leader.
5697					 */
5698					if (bnx2x_leader_reset(bp) ||
5699					bnx2x_nic_load(bp, LOAD_NORMAL)) {
5700						printk(KERN_ERR"%s: Recovery "
5701						"has failed. Power cycle is "
5702						"needed.\n", bp->dev->name);
5703						/* Disconnect this device */
5704						netif_device_detach(bp->dev);
5705						/* Block ifup for all function
5706						 * of this ASIC until
5707						 * "process kill" or power
5708						 * cycle.
5709						 */
5710						bnx2x_set_reset_in_progress(bp);
5711						/* Shut down the power */
5712						bnx2x_set_power_state(bp,
5713								PCI_D3hot);
5714						return;
5715					}
5716
5717					return;
5718				}
5719			} else { /* non-leader */
5720				if (!bnx2x_reset_is_done(bp)) {
5721					/* Try to get a LEADER_LOCK HW lock as
5722					 * long as a former leader may have
5723					 * been unloaded by the user or
5724					 * released a leadership by another
5725					 * reason.
5726					 */
5727					if (bnx2x_trylock_hw_lock(bp,
5728					    HW_LOCK_RESOURCE_RESERVED_08)) {
5729						/* I'm a leader now! Restart a
5730						 * switch case.
5731						 */
5732						bp->is_leader = 1;
5733						break;
5734					}
5735
5736					schedule_delayed_work(&bp->reset_task,
5737								HZ/10);
5738					return;
5739
5740				} else { /* A leader has completed
5741					  * the "process kill". It's an exit
5742					  * point for a non-leader.
5743					  */
5744					bnx2x_nic_load(bp, LOAD_NORMAL);
5745					bp->recovery_state =
5746						BNX2X_RECOVERY_DONE;
5747					smp_wmb();
5748					return;
5749				}
5750			}
5751		default:
5752			return;
5753		}
5754	}
5755}
5756
5757/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5758 * scheduled on a general queue in order to prevent a dead lock.
5759 */
5760static void bnx2x_reset_task(struct work_struct *work)
5761{
5762	struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5763
5764#ifdef BNX2X_STOP_ON_ERROR
5765	BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5766		  " so reset not done to allow debug dump,\n"
5767	 KERN_ERR " you will need to reboot when done\n");
5768	return;
5769#endif
5770
5771	rtnl_lock();
5772
5773	if (!netif_running(bp->dev))
5774		goto reset_task_exit;
5775
5776	if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5777		bnx2x_parity_recover(bp);
5778	else {
5779		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5780		bnx2x_nic_load(bp, LOAD_NORMAL);
5781	}
5782
5783reset_task_exit:
5784	rtnl_unlock();
5785}
5786
5787/* end of nic load/unload */
5788
5789/*
5790 * Init service functions
5791 */
5792
5793static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5794{
5795	switch (func) {
5796	case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5797	case 1:	return PXP2_REG_PGL_PRETEND_FUNC_F1;
5798	case 2:	return PXP2_REG_PGL_PRETEND_FUNC_F2;
5799	case 3:	return PXP2_REG_PGL_PRETEND_FUNC_F3;
5800	case 4:	return PXP2_REG_PGL_PRETEND_FUNC_F4;
5801	case 5:	return PXP2_REG_PGL_PRETEND_FUNC_F5;
5802	case 6:	return PXP2_REG_PGL_PRETEND_FUNC_F6;
5803	case 7:	return PXP2_REG_PGL_PRETEND_FUNC_F7;
5804	default:
5805		BNX2X_ERR("Unsupported function index: %d\n", func);
5806		return (u32)(-1);
5807	}
5808}
5809
5810static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5811{
5812	u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5813
5814	/* Flush all outstanding writes */
5815	mmiowb();
5816
5817	/* Pretend to be function 0 */
5818	REG_WR(bp, reg, 0);
5819	/* Flush the GRC transaction (in the chip) */
5820	new_val = REG_RD(bp, reg);
5821	if (new_val != 0) {
5822		BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5823			  new_val);
5824		BUG();
5825	}
5826
5827	/* From now we are in the "like-E1" mode */
5828	bnx2x_int_disable(bp);
5829
5830	/* Flush all outstanding writes */
5831	mmiowb();
5832
5833	/* Restore the original funtion settings */
5834	REG_WR(bp, reg, orig_func);
5835	new_val = REG_RD(bp, reg);
5836	if (new_val != orig_func) {
5837		BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5838			  orig_func, new_val);
5839		BUG();
5840	}
5841}
5842
5843static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5844{
5845	if (CHIP_IS_E1H(bp))
5846		bnx2x_undi_int_disable_e1h(bp, func);
5847	else
5848		bnx2x_int_disable(bp);
5849}
5850
5851static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5852{
5853	u32 val;
5854
5855	/* Check if there is any driver already loaded */
5856	val = REG_RD(bp, MISC_REG_UNPREPARED);
5857	if (val == 0x1) {
5858		/* Check if it is the UNDI driver
5859		 * UNDI driver initializes CID offset for normal bell to 0x7
5860		 */
5861		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5862		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5863		if (val == 0x7) {
5864			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5865			/* save our func */
5866			int func = BP_FUNC(bp);
5867			u32 swap_en;
5868			u32 swap_val;
5869
5870			/* clear the UNDI indication */
5871			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5872
5873			BNX2X_DEV_INFO("UNDI is active! reset device\n");
5874
5875			/* try unload UNDI on port 0 */
5876			bp->func = 0;
5877			bp->fw_seq =
5878			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5879				DRV_MSG_SEQ_NUMBER_MASK);
5880			reset_code = bnx2x_fw_command(bp, reset_code);
5881
5882			/* if UNDI is loaded on the other port */
5883			if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5884
5885				/* send "DONE" for previous unload */
5886				bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5887
5888				/* unload UNDI on port 1 */
5889				bp->func = 1;
5890				bp->fw_seq =
5891			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5892					DRV_MSG_SEQ_NUMBER_MASK);
5893				reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5894
5895				bnx2x_fw_command(bp, reset_code);
5896			}
5897
5898			/* now it's safe to release the lock */
5899			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5900
5901			bnx2x_undi_int_disable(bp, func);
5902
5903			/* close input traffic and wait for it */
5904			/* Do not rcv packets to BRB */
5905			REG_WR(bp,
5906			      (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5907					     NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5908			/* Do not direct rcv packets that are not for MCP to
5909			 * the BRB */
5910			REG_WR(bp,
5911			       (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5912					      NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5913			/* clear AEU */
5914			REG_WR(bp,
5915			     (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5916					    MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5917			msleep(10);
5918
5919			/* save NIG port swap info */
5920			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5921			swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5922			/* reset device */
5923			REG_WR(bp,
5924			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5925			       0xd3ffffff);
5926			REG_WR(bp,
5927			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5928			       0x1403);
5929			/* take the NIG out of reset and restore swap values */
5930			REG_WR(bp,
5931			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5932			       MISC_REGISTERS_RESET_REG_1_RST_NIG);
5933			REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5934			REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5935
5936			/* send unload done to the MCP */
5937			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
5938
5939			/* restore our func and fw_seq */
5940			bp->func = func;
5941			bp->fw_seq =
5942			       (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5943				DRV_MSG_SEQ_NUMBER_MASK);
5944
5945		} else
5946			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5947	}
5948}
5949
5950static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5951{
5952	u32 val, val2, val3, val4, id;
5953	u16 pmc;
5954
5955	/* Get the chip revision id and number. */
5956	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5957	val = REG_RD(bp, MISC_REG_CHIP_NUM);
5958	id = ((val & 0xffff) << 16);
5959	val = REG_RD(bp, MISC_REG_CHIP_REV);
5960	id |= ((val & 0xf) << 12);
5961	val = REG_RD(bp, MISC_REG_CHIP_METAL);
5962	id |= ((val & 0xff) << 4);
5963	val = REG_RD(bp, MISC_REG_BOND_ID);
5964	id |= (val & 0xf);
5965	bp->common.chip_id = id;
5966	bp->link_params.chip_id = bp->common.chip_id;
5967	BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5968
5969	val = (REG_RD(bp, 0x2874) & 0x55);
5970	if ((bp->common.chip_id & 0x1) ||
5971	    (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5972		bp->flags |= ONE_PORT_FLAG;
5973		BNX2X_DEV_INFO("single port device\n");
5974	}
5975
5976	val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5977	bp->common.flash_size = (NVRAM_1MB_SIZE <<
5978				 (val & MCPR_NVM_CFG4_FLASH_SIZE));
5979	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5980		       bp->common.flash_size, bp->common.flash_size);
5981
5982	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5983	bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5984	bp->link_params.shmem_base = bp->common.shmem_base;
5985	BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
5986		       bp->common.shmem_base, bp->common.shmem2_base);
5987
5988	if (!bp->common.shmem_base ||
5989	    (bp->common.shmem_base < 0xA0000) ||
5990	    (bp->common.shmem_base >= 0xC0000)) {
5991		BNX2X_DEV_INFO("MCP not active\n");
5992		bp->flags |= NO_MCP_FLAG;
5993		return;
5994	}
5995
5996	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5997	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5998		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5999		BNX2X_ERROR("BAD MCP validity signature\n");
6000
6001	bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6002	BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
6003
6004	bp->link_params.hw_led_mode = ((bp->common.hw_config &
6005					SHARED_HW_CFG_LED_MODE_MASK) >>
6006				       SHARED_HW_CFG_LED_MODE_SHIFT);
6007
6008	bp->link_params.feature_config_flags = 0;
6009	val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6010	if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6011		bp->link_params.feature_config_flags |=
6012				FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6013	else
6014		bp->link_params.feature_config_flags &=
6015				~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6016
6017	val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6018	bp->common.bc_ver = val;
6019	BNX2X_DEV_INFO("bc_ver %X\n", val);
6020	if (val < BNX2X_BC_VER) {
6021		/* for now only warn
6022		 * later we might need to enforce this */
6023		BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6024			    "please upgrade BC\n", BNX2X_BC_VER, val);
6025	}
6026	bp->link_params.feature_config_flags |=
6027		(val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
6028		FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6029
6030	if (BP_E1HVN(bp) == 0) {
6031		pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6032		bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6033	} else {
6034		/* no WOL capability for E1HVN != 0 */
6035		bp->flags |= NO_WOL_FLAG;
6036	}
6037	BNX2X_DEV_INFO("%sWoL capable\n",
6038		       (bp->flags & NO_WOL_FLAG) ? "not " : "");
6039
6040	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6041	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6042	val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6043	val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6044
6045	dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6046		 val, val2, val3, val4);
6047}
6048
6049static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6050						    u32 switch_cfg)
6051{
6052	int port = BP_PORT(bp);
6053	u32 ext_phy_type;
6054
6055	switch (switch_cfg) {
6056	case SWITCH_CFG_1G:
6057		BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6058
6059		ext_phy_type =
6060			SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6061		switch (ext_phy_type) {
6062		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6063			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6064				       ext_phy_type);
6065
6066			bp->port.supported |= (SUPPORTED_10baseT_Half |
6067					       SUPPORTED_10baseT_Full |
6068					       SUPPORTED_100baseT_Half |
6069					       SUPPORTED_100baseT_Full |
6070					       SUPPORTED_1000baseT_Full |
6071					       SUPPORTED_2500baseX_Full |
6072					       SUPPORTED_TP |
6073					       SUPPORTED_FIBRE |
6074					       SUPPORTED_Autoneg |
6075					       SUPPORTED_Pause |
6076					       SUPPORTED_Asym_Pause);
6077			break;
6078
6079		case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6080			BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6081				       ext_phy_type);
6082
6083			bp->port.supported |= (SUPPORTED_10baseT_Half |
6084					       SUPPORTED_10baseT_Full |
6085					       SUPPORTED_100baseT_Half |
6086					       SUPPORTED_100baseT_Full |
6087					       SUPPORTED_1000baseT_Full |
6088					       SUPPORTED_TP |
6089					       SUPPORTED_FIBRE |
6090					       SUPPORTED_Autoneg |
6091					       SUPPORTED_Pause |
6092					       SUPPORTED_Asym_Pause);
6093			break;
6094
6095		default:
6096			BNX2X_ERR("NVRAM config error. "
6097				  "BAD SerDes ext_phy_config 0x%x\n",
6098				  bp->link_params.ext_phy_config);
6099			return;
6100		}
6101
6102		bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6103					   port*0x10);
6104		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6105		break;
6106
6107	case SWITCH_CFG_10G:
6108		BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6109
6110		ext_phy_type =
6111			XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6112		switch (ext_phy_type) {
6113		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6114			BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6115				       ext_phy_type);
6116
6117			bp->port.supported |= (SUPPORTED_10baseT_Half |
6118					       SUPPORTED_10baseT_Full |
6119					       SUPPORTED_100baseT_Half |
6120					       SUPPORTED_100baseT_Full |
6121					       SUPPORTED_1000baseT_Full |
6122					       SUPPORTED_2500baseX_Full |
6123					       SUPPORTED_10000baseT_Full |
6124					       SUPPORTED_TP |
6125					       SUPPORTED_FIBRE |
6126					       SUPPORTED_Autoneg |
6127					       SUPPORTED_Pause |
6128					       SUPPORTED_Asym_Pause);
6129			break;
6130
6131		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6132			BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
6133				       ext_phy_type);
6134
6135			bp->port.supported |= (SUPPORTED_10000baseT_Full |
6136					       SUPPORTED_1000baseT_Full |
6137					       SUPPORTED_FIBRE |
6138					       SUPPORTED_Autoneg |
6139					       SUPPORTED_Pause |
6140					       SUPPORTED_Asym_Pause);
6141			break;
6142
6143		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6144			BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6145				       ext_phy_type);
6146
6147			bp->port.supported |= (SUPPORTED_10000baseT_Full |
6148					       SUPPORTED_2500baseX_Full |
6149					       SUPPORTED_1000baseT_Full |
6150					       SUPPORTED_FIBRE |
6151					       SUPPORTED_Autoneg |
6152					       SUPPORTED_Pause |
6153					       SUPPORTED_Asym_Pause);
6154			break;
6155
6156		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6157			BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
6158				       ext_phy_type);
6159
6160			bp->port.supported |= (SUPPORTED_10000baseT_Full |
6161					       SUPPORTED_FIBRE |
6162					       SUPPORTED_Pause |
6163					       SUPPORTED_Asym_Pause);
6164			break;
6165
6166		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6167			BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6168				       ext_phy_type);
6169
6170			bp->port.supported |= (SUPPORTED_10000baseT_Full |
6171					       SUPPORTED_1000baseT_Full |
6172					       SUPPORTED_FIBRE |
6173					       SUPPORTED_Pause |
6174					       SUPPORTED_Asym_Pause);
6175			break;
6176
6177		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6178			BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
6179				       ext_phy_type);
6180
6181			bp->port.supported |= (SUPPORTED_10000baseT_Full |
6182					       SUPPORTED_1000baseT_Full |
6183					       SUPPORTED_Autoneg |
6184					       SUPPORTED_FIBRE |
6185					       SUPPORTED_Pause |
6186					       SUPPORTED_Asym_Pause);
6187			break;
6188
6189		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6190			BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
6191				       ext_phy_type);
6192
6193			bp->port.supported |= (SUPPORTED_10000baseT_Full |
6194					       SUPPORTED_1000baseT_Full |
6195					       SUPPORTED_Autoneg |
6196					       SUPPORTED_FIBRE |
6197					       SUPPORTED_Pause |
6198					       SUPPORTED_Asym_Pause);
6199			break;
6200
6201		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6202			BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6203				       ext_phy_type);
6204
6205			bp->port.supported |= (SUPPORTED_10000baseT_Full |
6206					       SUPPORTED_TP |
6207					       SUPPORTED_Autoneg |
6208					       SUPPORTED_Pause |
6209					       SUPPORTED_Asym_Pause);
6210			break;
6211
6212		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
6213			BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
6214				       ext_phy_type);
6215
6216			bp->port.supported |= (SUPPORTED_10baseT_Half |
6217					       SUPPORTED_10baseT_Full |
6218					       SUPPORTED_100baseT_Half |
6219					       SUPPORTED_100baseT_Full |
6220					       SUPPORTED_1000baseT_Full |
6221					       SUPPORTED_10000baseT_Full |
6222					       SUPPORTED_TP |
6223					       SUPPORTED_Autoneg |
6224					       SUPPORTED_Pause |
6225					       SUPPORTED_Asym_Pause);
6226			break;
6227
6228		case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6229			BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6230				  bp->link_params.ext_phy_config);
6231			break;
6232
6233		default:
6234			BNX2X_ERR("NVRAM config error. "
6235				  "BAD XGXS ext_phy_config 0x%x\n",
6236				  bp->link_params.ext_phy_config);
6237			return;
6238		}
6239
6240		bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6241					   port*0x18);
6242		BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6243
6244		break;
6245
6246	default:
6247		BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6248			  bp->port.link_config);
6249		return;
6250	}
6251	bp->link_params.phy_addr = bp->port.phy_addr;
6252
6253	/* mask what we support according to speed_cap_mask */
6254	if (!(bp->link_params.speed_cap_mask &
6255				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6256		bp->port.supported &= ~SUPPORTED_10baseT_Half;
6257
6258	if (!(bp->link_params.speed_cap_mask &
6259				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6260		bp->port.supported &= ~SUPPORTED_10baseT_Full;
6261
6262	if (!(bp->link_params.speed_cap_mask &
6263				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6264		bp->port.supported &= ~SUPPORTED_100baseT_Half;
6265
6266	if (!(bp->link_params.speed_cap_mask &
6267				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6268		bp->port.supported &= ~SUPPORTED_100baseT_Full;
6269
6270	if (!(bp->link_params.speed_cap_mask &
6271					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6272		bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6273					SUPPORTED_1000baseT_Full);
6274
6275	if (!(bp->link_params.speed_cap_mask &
6276					PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6277		bp->port.supported &= ~SUPPORTED_2500baseX_Full;
6278
6279	if (!(bp->link_params.speed_cap_mask &
6280					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6281		bp->port.supported &= ~SUPPORTED_10000baseT_Full;
6282
6283	BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
6284}
6285
6286static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6287{
6288	bp->link_params.req_duplex = DUPLEX_FULL;
6289
6290	switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6291	case PORT_FEATURE_LINK_SPEED_AUTO:
6292		if (bp->port.supported & SUPPORTED_Autoneg) {
6293			bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6294			bp->port.advertising = bp->port.supported;
6295		} else {
6296			u32 ext_phy_type =
6297			    XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6298
6299			if ((ext_phy_type ==
6300			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6301			    (ext_phy_type ==
6302			     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6303				/* force 10G, no AN */
6304				bp->link_params.req_line_speed = SPEED_10000;
6305				bp->port.advertising =
6306						(ADVERTISED_10000baseT_Full |
6307						 ADVERTISED_FIBRE);
6308				break;
6309			}
6310			BNX2X_ERR("NVRAM config error. "
6311				  "Invalid link_config 0x%x"
6312				  "  Autoneg not supported\n",
6313				  bp->port.link_config);
6314			return;
6315		}
6316		break;
6317
6318	case PORT_FEATURE_LINK_SPEED_10M_FULL:
6319		if (bp->port.supported & SUPPORTED_10baseT_Full) {
6320			bp->link_params.req_line_speed = SPEED_10;
6321			bp->port.advertising = (ADVERTISED_10baseT_Full |
6322						ADVERTISED_TP);
6323		} else {
6324			BNX2X_ERROR("NVRAM config error. "
6325				    "Invalid link_config 0x%x"
6326				    "  speed_cap_mask 0x%x\n",
6327				    bp->port.link_config,
6328				    bp->link_params.speed_cap_mask);
6329			return;
6330		}
6331		break;
6332
6333	case PORT_FEATURE_LINK_SPEED_10M_HALF:
6334		if (bp->port.supported & SUPPORTED_10baseT_Half) {
6335			bp->link_params.req_line_speed = SPEED_10;
6336			bp->link_params.req_duplex = DUPLEX_HALF;
6337			bp->port.advertising = (ADVERTISED_10baseT_Half |
6338						ADVERTISED_TP);
6339		} else {
6340			BNX2X_ERROR("NVRAM config error. "
6341				    "Invalid link_config 0x%x"
6342				    "  speed_cap_mask 0x%x\n",
6343				    bp->port.link_config,
6344				    bp->link_params.speed_cap_mask);
6345			return;
6346		}
6347		break;
6348
6349	case PORT_FEATURE_LINK_SPEED_100M_FULL:
6350		if (bp->port.supported & SUPPORTED_100baseT_Full) {
6351			bp->link_params.req_line_speed = SPEED_100;
6352			bp->port.advertising = (ADVERTISED_100baseT_Full |
6353						ADVERTISED_TP);
6354		} else {
6355			BNX2X_ERROR("NVRAM config error. "
6356				    "Invalid link_config 0x%x"
6357				    "  speed_cap_mask 0x%x\n",
6358				    bp->port.link_config,
6359				    bp->link_params.speed_cap_mask);
6360			return;
6361		}
6362		break;
6363
6364	case PORT_FEATURE_LINK_SPEED_100M_HALF:
6365		if (bp->port.supported & SUPPORTED_100baseT_Half) {
6366			bp->link_params.req_line_speed = SPEED_100;
6367			bp->link_params.req_duplex = DUPLEX_HALF;
6368			bp->port.advertising = (ADVERTISED_100baseT_Half |
6369						ADVERTISED_TP);
6370		} else {
6371			BNX2X_ERROR("NVRAM config error. "
6372				    "Invalid link_config 0x%x"
6373				    "  speed_cap_mask 0x%x\n",
6374				    bp->port.link_config,
6375				    bp->link_params.speed_cap_mask);
6376			return;
6377		}
6378		break;
6379
6380	case PORT_FEATURE_LINK_SPEED_1G:
6381		if (bp->port.supported & SUPPORTED_1000baseT_Full) {
6382			bp->link_params.req_line_speed = SPEED_1000;
6383			bp->port.advertising = (ADVERTISED_1000baseT_Full |
6384						ADVERTISED_TP);
6385		} else {
6386			BNX2X_ERROR("NVRAM config error. "
6387				    "Invalid link_config 0x%x"
6388				    "  speed_cap_mask 0x%x\n",
6389				    bp->port.link_config,
6390				    bp->link_params.speed_cap_mask);
6391			return;
6392		}
6393		break;
6394
6395	case PORT_FEATURE_LINK_SPEED_2_5G:
6396		if (bp->port.supported & SUPPORTED_2500baseX_Full) {
6397			bp->link_params.req_line_speed = SPEED_2500;
6398			bp->port.advertising = (ADVERTISED_2500baseX_Full |
6399						ADVERTISED_TP);
6400		} else {
6401			BNX2X_ERROR("NVRAM config error. "
6402				    "Invalid link_config 0x%x"
6403				    "  speed_cap_mask 0x%x\n",
6404				    bp->port.link_config,
6405				    bp->link_params.speed_cap_mask);
6406			return;
6407		}
6408		break;
6409
6410	case PORT_FEATURE_LINK_SPEED_10G_CX4:
6411	case PORT_FEATURE_LINK_SPEED_10G_KX4:
6412	case PORT_FEATURE_LINK_SPEED_10G_KR:
6413		if (bp->port.supported & SUPPORTED_10000baseT_Full) {
6414			bp->link_params.req_line_speed = SPEED_10000;
6415			bp->port.advertising = (ADVERTISED_10000baseT_Full |
6416						ADVERTISED_FIBRE);
6417		} else {
6418			BNX2X_ERROR("NVRAM config error. "
6419				    "Invalid link_config 0x%x"
6420				    "  speed_cap_mask 0x%x\n",
6421				    bp->port.link_config,
6422				    bp->link_params.speed_cap_mask);
6423			return;
6424		}
6425		break;
6426
6427	default:
6428		BNX2X_ERROR("NVRAM config error. "
6429			    "BAD link speed link_config 0x%x\n",
6430			    bp->port.link_config);
6431		bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6432		bp->port.advertising = bp->port.supported;
6433		break;
6434	}
6435
6436	bp->link_params.req_flow_ctrl = (bp->port.link_config &
6437					 PORT_FEATURE_FLOW_CONTROL_MASK);
6438	if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
6439	    !(bp->port.supported & SUPPORTED_Autoneg))
6440		bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
6441
6442	BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
6443		       "  advertising 0x%x\n",
6444		       bp->link_params.req_line_speed,
6445		       bp->link_params.req_duplex,
6446		       bp->link_params.req_flow_ctrl, bp->port.advertising);
6447}
6448
6449static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6450{
6451	mac_hi = cpu_to_be16(mac_hi);
6452	mac_lo = cpu_to_be32(mac_lo);
6453	memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6454	memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6455}
6456
6457static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6458{
6459	int port = BP_PORT(bp);
6460	u32 val, val2;
6461	u32 config;
6462	u16 i;
6463	u32 ext_phy_type;
6464
6465	bp->link_params.bp = bp;
6466	bp->link_params.port = port;
6467
6468	bp->link_params.lane_config =
6469		SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6470	bp->link_params.ext_phy_config =
6471		SHMEM_RD(bp,
6472			 dev_info.port_hw_config[port].external_phy_config);
6473	/* BCM8727_NOC => BCM8727 no over current */
6474	if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
6475	    PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
6476		bp->link_params.ext_phy_config &=
6477			~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6478		bp->link_params.ext_phy_config |=
6479			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
6480		bp->link_params.feature_config_flags |=
6481			FEATURE_CONFIG_BCM8727_NOC;
6482	}
6483
6484	bp->link_params.speed_cap_mask =
6485		SHMEM_RD(bp,
6486			 dev_info.port_hw_config[port].speed_capability_mask);
6487
6488	bp->port.link_config =
6489		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6490
6491	/* Get the 4 lanes xgxs config rx and tx */
6492	for (i = 0; i < 2; i++) {
6493		val = SHMEM_RD(bp,
6494			   dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
6495		bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
6496		bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
6497
6498		val = SHMEM_RD(bp,
6499			   dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
6500		bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
6501		bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
6502	}
6503
6504	/* If the device is capable of WoL, set the default state according
6505	 * to the HW
6506	 */
6507	config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6508	bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6509		   (config & PORT_FEATURE_WOL_ENABLED));
6510
6511	BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
6512		       "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
6513		       bp->link_params.lane_config,
6514		       bp->link_params.ext_phy_config,
6515		       bp->link_params.speed_cap_mask, bp->port.link_config);
6516
6517	bp->link_params.switch_cfg |= (bp->port.link_config &
6518				       PORT_FEATURE_CONNECTED_SWITCH_MASK);
6519	bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6520
6521	bnx2x_link_settings_requested(bp);
6522
6523	/*
6524	 * If connected directly, work with the internal PHY, otherwise, work
6525	 * with the external PHY
6526	 */
6527	ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6528	if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6529		bp->mdio.prtad = bp->link_params.phy_addr;
6530
6531	else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6532		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6533		bp->mdio.prtad =
6534			XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
6535
6536	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6537	val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6538	bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6539	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6540	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6541
6542#ifdef BCM_CNIC
6543	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6544	val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6545	bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6546#endif
6547}
6548
6549static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6550{
6551	int func = BP_FUNC(bp);
6552	u32 val, val2;
6553	int rc = 0;
6554
6555	bnx2x_get_common_hwinfo(bp);
6556
6557	bp->e1hov = 0;
6558	bp->e1hmf = 0;
6559	if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6560		bp->mf_config =
6561			SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6562
6563		val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6564		       FUNC_MF_CFG_E1HOV_TAG_MASK);
6565		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6566			bp->e1hmf = 1;
6567		BNX2X_DEV_INFO("%s function mode\n",
6568			       IS_E1HMF(bp) ? "multi" : "single");
6569
6570		if (IS_E1HMF(bp)) {
6571			val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6572								e1hov_tag) &
6573			       FUNC_MF_CFG_E1HOV_TAG_MASK);
6574			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6575				bp->e1hov = val;
6576				BNX2X_DEV_INFO("E1HOV for func %d is %d "
6577					       "(0x%04x)\n",
6578					       func, bp->e1hov, bp->e1hov);
6579			} else {
6580				BNX2X_ERROR("No valid E1HOV for func %d,"
6581					    "  aborting\n", func);
6582				rc = -EPERM;
6583			}
6584		} else {
6585			if (BP_E1HVN(bp)) {
6586				BNX2X_ERROR("VN %d in single function mode,"
6587					    "  aborting\n", BP_E1HVN(bp));
6588				rc = -EPERM;
6589			}
6590		}
6591	}
6592
6593	if (!BP_NOMCP(bp)) {
6594		bnx2x_get_port_hwinfo(bp);
6595
6596		bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6597			      DRV_MSG_SEQ_NUMBER_MASK);
6598		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6599	}
6600
6601	if (IS_E1HMF(bp)) {
6602		val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6603		val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
6604		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6605		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6606			bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6607			bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6608			bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6609			bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6610			bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
6611			bp->dev->dev_addr[5] = (u8)(val & 0xff);
6612			memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6613			       ETH_ALEN);
6614			memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6615			       ETH_ALEN);
6616		}
6617
6618		return rc;
6619	}
6620
6621	if (BP_NOMCP(bp)) {
6622		/* only supposed to happen on emulation/FPGA */
6623		BNX2X_ERROR("warning: random MAC workaround active\n");
6624		random_ether_addr(bp->dev->dev_addr);
6625		memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6626	}
6627
6628	return rc;
6629}
6630
6631static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6632{
6633	int cnt, i, block_end, rodi;
6634	char vpd_data[BNX2X_VPD_LEN+1];
6635	char str_id_reg[VENDOR_ID_LEN+1];
6636	char str_id_cap[VENDOR_ID_LEN+1];
6637	u8 len;
6638
6639	cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6640	memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6641
6642	if (cnt < BNX2X_VPD_LEN)
6643		goto out_not_found;
6644
6645	i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6646			     PCI_VPD_LRDT_RO_DATA);
6647	if (i < 0)
6648		goto out_not_found;
6649
6650
6651	block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6652		    pci_vpd_lrdt_size(&vpd_data[i]);
6653
6654	i += PCI_VPD_LRDT_TAG_SIZE;
6655
6656	if (block_end > BNX2X_VPD_LEN)
6657		goto out_not_found;
6658
6659	rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6660				   PCI_VPD_RO_KEYWORD_MFR_ID);
6661	if (rodi < 0)
6662		goto out_not_found;
6663
6664	len = pci_vpd_info_field_size(&vpd_data[rodi]);
6665
6666	if (len != VENDOR_ID_LEN)
6667		goto out_not_found;
6668
6669	rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6670
6671	/* vendor specific info */
6672	snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6673	snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6674	if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6675	    !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6676
6677		rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6678						PCI_VPD_RO_KEYWORD_VENDOR0);
6679		if (rodi >= 0) {
6680			len = pci_vpd_info_field_size(&vpd_data[rodi]);
6681
6682			rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6683
6684			if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6685				memcpy(bp->fw_ver, &vpd_data[rodi], len);
6686				bp->fw_ver[len] = ' ';
6687			}
6688		}
6689		return;
6690	}
6691out_not_found:
6692	return;
6693}
6694
6695static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6696{
6697	int func = BP_FUNC(bp);
6698	int timer_interval;
6699	int rc;
6700
6701	/* Disable interrupt handling until HW is initialized */
6702	atomic_set(&bp->intr_sem, 1);
6703	smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6704
6705	mutex_init(&bp->port.phy_mutex);
6706	mutex_init(&bp->fw_mb_mutex);
6707	spin_lock_init(&bp->stats_lock);
6708#ifdef BCM_CNIC
6709	mutex_init(&bp->cnic_mutex);
6710#endif
6711
6712	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6713	INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6714
6715	rc = bnx2x_get_hwinfo(bp);
6716
6717	bnx2x_read_fwinfo(bp);
6718	/* need to reset chip if undi was active */
6719	if (!BP_NOMCP(bp))
6720		bnx2x_undi_unload(bp);
6721
6722	if (CHIP_REV_IS_FPGA(bp))
6723		dev_err(&bp->pdev->dev, "FPGA detected\n");
6724
6725	if (BP_NOMCP(bp) && (func == 0))
6726		dev_err(&bp->pdev->dev, "MCP disabled, "
6727					"must load devices in order!\n");
6728
6729	/* Set multi queue mode */
6730	if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6731	    ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6732		dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6733					"requested is not MSI-X\n");
6734		multi_mode = ETH_RSS_MODE_DISABLED;
6735	}
6736	bp->multi_mode = multi_mode;
6737	bp->int_mode = int_mode;
6738
6739	bp->dev->features |= NETIF_F_GRO;
6740
6741	/* Set TPA flags */
6742	if (disable_tpa) {
6743		bp->flags &= ~TPA_ENABLE_FLAG;
6744		bp->dev->features &= ~NETIF_F_LRO;
6745	} else {
6746		bp->flags |= TPA_ENABLE_FLAG;
6747		bp->dev->features |= NETIF_F_LRO;
6748	}
6749	bp->disable_tpa = disable_tpa;
6750
6751	if (CHIP_IS_E1(bp))
6752		bp->dropless_fc = 0;
6753	else
6754		bp->dropless_fc = dropless_fc;
6755
6756	bp->mrrs = mrrs;
6757
6758	bp->tx_ring_size = MAX_TX_AVAIL;
6759	bp->rx_ring_size = MAX_RX_AVAIL;
6760
6761	bp->rx_csum = 1;
6762
6763	/* make sure that the numbers are in the right granularity */
6764	bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6765	bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6766
6767	timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6768	bp->current_interval = (poll ? poll : timer_interval);
6769
6770	init_timer(&bp->timer);
6771	bp->timer.expires = jiffies + bp->current_interval;
6772	bp->timer.data = (unsigned long) bp;
6773	bp->timer.function = bnx2x_timer;
6774
6775	return rc;
6776}
6777
6778
6779/****************************************************************************
6780* General service functions
6781****************************************************************************/
6782
6783/* called with rtnl_lock */
6784static int bnx2x_open(struct net_device *dev)
6785{
6786	struct bnx2x *bp = netdev_priv(dev);
6787
6788	netif_carrier_off(dev);
6789
6790	bnx2x_set_power_state(bp, PCI_D0);
6791
6792	if (!bnx2x_reset_is_done(bp)) {
6793		do {
6794			/* Reset MCP mail box sequence if there is on going
6795			 * recovery
6796			 */
6797			bp->fw_seq = 0;
6798
6799			/* If it's the first function to load and reset done
6800			 * is still not cleared it may mean that. We don't
6801			 * check the attention state here because it may have
6802			 * already been cleared by a "common" reset but we
6803			 * shell proceed with "process kill" anyway.
6804			 */
6805			if ((bnx2x_get_load_cnt(bp) == 0) &&
6806				bnx2x_trylock_hw_lock(bp,
6807				HW_LOCK_RESOURCE_RESERVED_08) &&
6808				(!bnx2x_leader_reset(bp))) {
6809				DP(NETIF_MSG_HW, "Recovered in open\n");
6810				break;
6811			}
6812
6813			bnx2x_set_power_state(bp, PCI_D3hot);
6814
6815			printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6816			" completed yet. Try again later. If u still see this"
6817			" message after a few retries then power cycle is"
6818			" required.\n", bp->dev->name);
6819
6820			return -EAGAIN;
6821		} while (0);
6822	}
6823
6824	bp->recovery_state = BNX2X_RECOVERY_DONE;
6825
6826	return bnx2x_nic_load(bp, LOAD_OPEN);
6827}
6828
6829/* called with rtnl_lock */
6830static int bnx2x_close(struct net_device *dev)
6831{
6832	struct bnx2x *bp = netdev_priv(dev);
6833
6834	/* Unload the driver, release IRQs */
6835	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6836	bnx2x_set_power_state(bp, PCI_D3hot);
6837
6838	return 0;
6839}
6840
6841/* called with netif_tx_lock from dev_mcast.c */
6842void bnx2x_set_rx_mode(struct net_device *dev)
6843{
6844	struct bnx2x *bp = netdev_priv(dev);
6845	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6846	int port = BP_PORT(bp);
6847
6848	if (bp->state != BNX2X_STATE_OPEN) {
6849		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6850		return;
6851	}
6852
6853	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6854
6855	if (dev->flags & IFF_PROMISC)
6856		rx_mode = BNX2X_RX_MODE_PROMISC;
6857
6858	else if ((dev->flags & IFF_ALLMULTI) ||
6859		 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6860		  CHIP_IS_E1(bp)))
6861		rx_mode = BNX2X_RX_MODE_ALLMULTI;
6862
6863	else { /* some multicasts */
6864		if (CHIP_IS_E1(bp)) {
6865			int i, old, offset;
6866			struct netdev_hw_addr *ha;
6867			struct mac_configuration_cmd *config =
6868						bnx2x_sp(bp, mcast_config);
6869
6870			i = 0;
6871			netdev_for_each_mc_addr(ha, dev) {
6872				config->config_table[i].
6873					cam_entry.msb_mac_addr =
6874					swab16(*(u16 *)&ha->addr[0]);
6875				config->config_table[i].
6876					cam_entry.middle_mac_addr =
6877					swab16(*(u16 *)&ha->addr[2]);
6878				config->config_table[i].
6879					cam_entry.lsb_mac_addr =
6880					swab16(*(u16 *)&ha->addr[4]);
6881				config->config_table[i].cam_entry.flags =
6882							cpu_to_le16(port);
6883				config->config_table[i].
6884					target_table_entry.flags = 0;
6885				config->config_table[i].target_table_entry.
6886					clients_bit_vector =
6887						cpu_to_le32(1 << BP_L_ID(bp));
6888				config->config_table[i].
6889					target_table_entry.vlan_id = 0;
6890
6891				DP(NETIF_MSG_IFUP,
6892				   "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6893				   config->config_table[i].
6894						cam_entry.msb_mac_addr,
6895				   config->config_table[i].
6896						cam_entry.middle_mac_addr,
6897				   config->config_table[i].
6898						cam_entry.lsb_mac_addr);
6899				i++;
6900			}
6901			old = config->hdr.length;
6902			if (old > i) {
6903				for (; i < old; i++) {
6904					if (CAM_IS_INVALID(config->
6905							   config_table[i])) {
6906						/* already invalidated */
6907						break;
6908					}
6909					/* invalidate */
6910					CAM_INVALIDATE(config->
6911						       config_table[i]);
6912				}
6913			}
6914
6915			if (CHIP_REV_IS_SLOW(bp))
6916				offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6917			else
6918				offset = BNX2X_MAX_MULTICAST*(1 + port);
6919
6920			config->hdr.length = i;
6921			config->hdr.offset = offset;
6922			config->hdr.client_id = bp->fp->cl_id;
6923			config->hdr.reserved1 = 0;
6924
6925			bp->set_mac_pending++;
6926			smp_wmb();
6927
6928			bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6929				   U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6930				   U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6931				      0);
6932		} else { /* E1H */
6933			/* Accept one or more multicasts */
6934			struct netdev_hw_addr *ha;
6935			u32 mc_filter[MC_HASH_SIZE];
6936			u32 crc, bit, regidx;
6937			int i;
6938
6939			memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6940
6941			netdev_for_each_mc_addr(ha, dev) {
6942				DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6943				   ha->addr);
6944
6945				crc = crc32c_le(0, ha->addr, ETH_ALEN);
6946				bit = (crc >> 24) & 0xff;
6947				regidx = bit >> 5;
6948				bit &= 0x1f;
6949				mc_filter[regidx] |= (1 << bit);
6950			}
6951
6952			for (i = 0; i < MC_HASH_SIZE; i++)
6953				REG_WR(bp, MC_HASH_OFFSET(bp, i),
6954				       mc_filter[i]);
6955		}
6956	}
6957
6958	bp->rx_mode = rx_mode;
6959	bnx2x_set_storm_rx_mode(bp);
6960}
6961
6962
6963/* called with rtnl_lock */
6964static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6965			   int devad, u16 addr)
6966{
6967	struct bnx2x *bp = netdev_priv(netdev);
6968	u16 value;
6969	int rc;
6970	u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6971
6972	DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6973	   prtad, devad, addr);
6974
6975	if (prtad != bp->mdio.prtad) {
6976		DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
6977		   prtad, bp->mdio.prtad);
6978		return -EINVAL;
6979	}
6980
6981	/* The HW expects different devad if CL22 is used */
6982	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6983
6984	bnx2x_acquire_phy_lock(bp);
6985	rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
6986			     devad, addr, &value);
6987	bnx2x_release_phy_lock(bp);
6988	DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6989
6990	if (!rc)
6991		rc = value;
6992	return rc;
6993}
6994
6995/* called with rtnl_lock */
6996static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6997			    u16 addr, u16 value)
6998{
6999	struct bnx2x *bp = netdev_priv(netdev);
7000	u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7001	int rc;
7002
7003	DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7004			   " value 0x%x\n", prtad, devad, addr, value);
7005
7006	if (prtad != bp->mdio.prtad) {
7007		DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
7008		   prtad, bp->mdio.prtad);
7009		return -EINVAL;
7010	}
7011
7012	/* The HW expects different devad if CL22 is used */
7013	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7014
7015	bnx2x_acquire_phy_lock(bp);
7016	rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
7017			      devad, addr, value);
7018	bnx2x_release_phy_lock(bp);
7019	return rc;
7020}
7021
7022/* called with rtnl_lock */
7023static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7024{
7025	struct bnx2x *bp = netdev_priv(dev);
7026	struct mii_ioctl_data *mdio = if_mii(ifr);
7027
7028	DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7029	   mdio->phy_id, mdio->reg_num, mdio->val_in);
7030
7031	if (!netif_running(dev))
7032		return -EAGAIN;
7033
7034	return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
7035}
7036
7037#ifdef CONFIG_NET_POLL_CONTROLLER
7038static void poll_bnx2x(struct net_device *dev)
7039{
7040	struct bnx2x *bp = netdev_priv(dev);
7041
7042	disable_irq(bp->pdev->irq);
7043	bnx2x_interrupt(bp->pdev->irq, dev);
7044	enable_irq(bp->pdev->irq);
7045}
7046#endif
7047
7048static const struct net_device_ops bnx2x_netdev_ops = {
7049	.ndo_open		= bnx2x_open,
7050	.ndo_stop		= bnx2x_close,
7051	.ndo_start_xmit		= bnx2x_start_xmit,
7052	.ndo_set_multicast_list	= bnx2x_set_rx_mode,
7053	.ndo_set_mac_address	= bnx2x_change_mac_addr,
7054	.ndo_validate_addr	= eth_validate_addr,
7055	.ndo_do_ioctl		= bnx2x_ioctl,
7056	.ndo_change_mtu		= bnx2x_change_mtu,
7057	.ndo_tx_timeout		= bnx2x_tx_timeout,
7058#ifdef BCM_VLAN
7059	.ndo_vlan_rx_register	= bnx2x_vlan_rx_register,
7060#endif
7061#ifdef CONFIG_NET_POLL_CONTROLLER
7062	.ndo_poll_controller	= poll_bnx2x,
7063#endif
7064};
7065
7066static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7067				    struct net_device *dev)
7068{
7069	struct bnx2x *bp;
7070	int rc;
7071
7072	SET_NETDEV_DEV(dev, &pdev->dev);
7073	bp = netdev_priv(dev);
7074
7075	bp->dev = dev;
7076	bp->pdev = pdev;
7077	bp->flags = 0;
7078	bp->func = PCI_FUNC(pdev->devfn);
7079
7080	rc = pci_enable_device(pdev);
7081	if (rc) {
7082		dev_err(&bp->pdev->dev,
7083			"Cannot enable PCI device, aborting\n");
7084		goto err_out;
7085	}
7086
7087	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7088		dev_err(&bp->pdev->dev,
7089			"Cannot find PCI device base address, aborting\n");
7090		rc = -ENODEV;
7091		goto err_out_disable;
7092	}
7093
7094	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7095		dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7096		       " base address, aborting\n");
7097		rc = -ENODEV;
7098		goto err_out_disable;
7099	}
7100
7101	if (atomic_read(&pdev->enable_cnt) == 1) {
7102		rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7103		if (rc) {
7104			dev_err(&bp->pdev->dev,
7105				"Cannot obtain PCI resources, aborting\n");
7106			goto err_out_disable;
7107		}
7108
7109		pci_set_master(pdev);
7110		pci_save_state(pdev);
7111	}
7112
7113	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7114	if (bp->pm_cap == 0) {
7115		dev_err(&bp->pdev->dev,
7116			"Cannot find power management capability, aborting\n");
7117		rc = -EIO;
7118		goto err_out_release;
7119	}
7120
7121	bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7122	if (bp->pcie_cap == 0) {
7123		dev_err(&bp->pdev->dev,
7124			"Cannot find PCI Express capability, aborting\n");
7125		rc = -EIO;
7126		goto err_out_release;
7127	}
7128
7129	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
7130		bp->flags |= USING_DAC_FLAG;
7131		if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
7132			dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7133			       " failed, aborting\n");
7134			rc = -EIO;
7135			goto err_out_release;
7136		}
7137
7138	} else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7139		dev_err(&bp->pdev->dev,
7140			"System does not support DMA, aborting\n");
7141		rc = -EIO;
7142		goto err_out_release;
7143	}
7144
7145	dev->mem_start = pci_resource_start(pdev, 0);
7146	dev->base_addr = dev->mem_start;
7147	dev->mem_end = pci_resource_end(pdev, 0);
7148
7149	dev->irq = pdev->irq;
7150
7151	bp->regview = pci_ioremap_bar(pdev, 0);
7152	if (!bp->regview) {
7153		dev_err(&bp->pdev->dev,
7154			"Cannot map register space, aborting\n");
7155		rc = -ENOMEM;
7156		goto err_out_release;
7157	}
7158
7159	bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7160					min_t(u64, BNX2X_DB_SIZE,
7161					      pci_resource_len(pdev, 2)));
7162	if (!bp->doorbells) {
7163		dev_err(&bp->pdev->dev,
7164			"Cannot map doorbell space, aborting\n");
7165		rc = -ENOMEM;
7166		goto err_out_unmap;
7167	}
7168
7169	bnx2x_set_power_state(bp, PCI_D0);
7170
7171	/* clean indirect addresses */
7172	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7173			       PCICFG_VENDOR_ID_OFFSET);
7174	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7175	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7176	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7177	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7178
7179	/* Reset the load counter */
7180	bnx2x_clear_load_cnt(bp);
7181
7182	dev->watchdog_timeo = TX_TIMEOUT;
7183
7184	dev->netdev_ops = &bnx2x_netdev_ops;
7185	bnx2x_set_ethtool_ops(dev);
7186	dev->features |= NETIF_F_SG;
7187	dev->features |= NETIF_F_HW_CSUM;
7188	if (bp->flags & USING_DAC_FLAG)
7189		dev->features |= NETIF_F_HIGHDMA;
7190	dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7191	dev->features |= NETIF_F_TSO6;
7192#ifdef BCM_VLAN
7193	dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7194	bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7195
7196	dev->vlan_features |= NETIF_F_SG;
7197	dev->vlan_features |= NETIF_F_HW_CSUM;
7198	if (bp->flags & USING_DAC_FLAG)
7199		dev->vlan_features |= NETIF_F_HIGHDMA;
7200	dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7201	dev->vlan_features |= NETIF_F_TSO6;
7202#endif
7203
7204	/* get_port_hwinfo() will set prtad and mmds properly */
7205	bp->mdio.prtad = MDIO_PRTAD_NONE;
7206	bp->mdio.mmds = 0;
7207	bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7208	bp->mdio.dev = dev;
7209	bp->mdio.mdio_read = bnx2x_mdio_read;
7210	bp->mdio.mdio_write = bnx2x_mdio_write;
7211
7212	return 0;
7213
7214err_out_unmap:
7215	if (bp->regview) {
7216		iounmap(bp->regview);
7217		bp->regview = NULL;
7218	}
7219	if (bp->doorbells) {
7220		iounmap(bp->doorbells);
7221		bp->doorbells = NULL;
7222	}
7223
7224err_out_release:
7225	if (atomic_read(&pdev->enable_cnt) == 1)
7226		pci_release_regions(pdev);
7227
7228err_out_disable:
7229	pci_disable_device(pdev);
7230	pci_set_drvdata(pdev, NULL);
7231
7232err_out:
7233	return rc;
7234}
7235
7236static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7237						 int *width, int *speed)
7238{
7239	u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7240
7241	*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7242
7243	/* return value of 1=2.5GHz 2=5GHz */
7244	*speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7245}
7246
7247static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
7248{
7249	const struct firmware *firmware = bp->firmware;
7250	struct bnx2x_fw_file_hdr *fw_hdr;
7251	struct bnx2x_fw_file_section *sections;
7252	u32 offset, len, num_ops;
7253	u16 *ops_offsets;
7254	int i;
7255	const u8 *fw_ver;
7256
7257	if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7258		return -EINVAL;
7259
7260	fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7261	sections = (struct bnx2x_fw_file_section *)fw_hdr;
7262
7263	/* Make sure none of the offsets and sizes make us read beyond
7264	 * the end of the firmware data */
7265	for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7266		offset = be32_to_cpu(sections[i].offset);
7267		len = be32_to_cpu(sections[i].len);
7268		if (offset + len > firmware->size) {
7269			dev_err(&bp->pdev->dev,
7270				"Section %d length is out of bounds\n", i);
7271			return -EINVAL;
7272		}
7273	}
7274
7275	/* Likewise for the init_ops offsets */
7276	offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7277	ops_offsets = (u16 *)(firmware->data + offset);
7278	num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7279
7280	for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7281		if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7282			dev_err(&bp->pdev->dev,
7283				"Section offset %d is out of bounds\n", i);
7284			return -EINVAL;
7285		}
7286	}
7287
7288	/* Check FW version */
7289	offset = be32_to_cpu(fw_hdr->fw_version.offset);
7290	fw_ver = firmware->data + offset;
7291	if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7292	    (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7293	    (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7294	    (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7295		dev_err(&bp->pdev->dev,
7296			"Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7297		       fw_ver[0], fw_ver[1], fw_ver[2],
7298		       fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7299		       BCM_5710_FW_MINOR_VERSION,
7300		       BCM_5710_FW_REVISION_VERSION,
7301		       BCM_5710_FW_ENGINEERING_VERSION);
7302		return -EINVAL;
7303	}
7304
7305	return 0;
7306}
7307
7308static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7309{
7310	const __be32 *source = (const __be32 *)_source;
7311	u32 *target = (u32 *)_target;
7312	u32 i;
7313
7314	for (i = 0; i < n/4; i++)
7315		target[i] = be32_to_cpu(source[i]);
7316}
7317
7318/*
7319   Ops array is stored in the following format:
7320   {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7321 */
7322static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7323{
7324	const __be32 *source = (const __be32 *)_source;
7325	struct raw_op *target = (struct raw_op *)_target;
7326	u32 i, j, tmp;
7327
7328	for (i = 0, j = 0; i < n/8; i++, j += 2) {
7329		tmp = be32_to_cpu(source[j]);
7330		target[i].op = (tmp >> 24) & 0xff;
7331		target[i].offset = tmp & 0xffffff;
7332		target[i].raw_data = be32_to_cpu(source[j + 1]);
7333	}
7334}
7335
7336static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7337{
7338	const __be16 *source = (const __be16 *)_source;
7339	u16 *target = (u16 *)_target;
7340	u32 i;
7341
7342	for (i = 0; i < n/2; i++)
7343		target[i] = be16_to_cpu(source[i]);
7344}
7345
7346#define BNX2X_ALLOC_AND_SET(arr, lbl, func)				\
7347do {									\
7348	u32 len = be32_to_cpu(fw_hdr->arr.len);				\
7349	bp->arr = kmalloc(len, GFP_KERNEL);				\
7350	if (!bp->arr) {							\
7351		pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7352		goto lbl;						\
7353	}								\
7354	func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),	\
7355	     (u8 *)bp->arr, len);					\
7356} while (0)
7357
7358static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
7359{
7360	const char *fw_file_name;
7361	struct bnx2x_fw_file_hdr *fw_hdr;
7362	int rc;
7363
7364	if (CHIP_IS_E1(bp))
7365		fw_file_name = FW_FILE_NAME_E1;
7366	else if (CHIP_IS_E1H(bp))
7367		fw_file_name = FW_FILE_NAME_E1H;
7368	else {
7369		dev_err(dev, "Unsupported chip revision\n");
7370		return -EINVAL;
7371	}
7372
7373	dev_info(dev, "Loading %s\n", fw_file_name);
7374
7375	rc = request_firmware(&bp->firmware, fw_file_name, dev);
7376	if (rc) {
7377		dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
7378		goto request_firmware_exit;
7379	}
7380
7381	rc = bnx2x_check_firmware(bp);
7382	if (rc) {
7383		dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
7384		goto request_firmware_exit;
7385	}
7386
7387	fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7388
7389	/* Initialize the pointers to the init arrays */
7390	/* Blob */
7391	BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7392
7393	/* Opcodes */
7394	BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7395
7396	/* Offsets */
7397	BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7398			    be16_to_cpu_n);
7399
7400	/* STORMs firmware */
7401	INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7402			be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7403	INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
7404			be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7405	INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7406			be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7407	INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
7408			be32_to_cpu(fw_hdr->usem_pram_data.offset);
7409	INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7410			be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7411	INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
7412			be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7413	INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7414			be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7415	INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
7416			be32_to_cpu(fw_hdr->csem_pram_data.offset);
7417
7418	return 0;
7419
7420init_offsets_alloc_err:
7421	kfree(bp->init_ops);
7422init_ops_alloc_err:
7423	kfree(bp->init_data);
7424request_firmware_exit:
7425	release_firmware(bp->firmware);
7426
7427	return rc;
7428}
7429
7430
7431static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7432				    const struct pci_device_id *ent)
7433{
7434	struct net_device *dev = NULL;
7435	struct bnx2x *bp;
7436	int pcie_width, pcie_speed;
7437	int rc;
7438
7439	/* dev zeroed in init_etherdev */
7440	dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7441	if (!dev) {
7442		dev_err(&pdev->dev, "Cannot allocate net device\n");
7443		return -ENOMEM;
7444	}
7445
7446	bp = netdev_priv(dev);
7447	bp->msg_enable = debug;
7448
7449	pci_set_drvdata(pdev, dev);
7450
7451	rc = bnx2x_init_dev(pdev, dev);
7452	if (rc < 0) {
7453		free_netdev(dev);
7454		return rc;
7455	}
7456
7457	rc = bnx2x_init_bp(bp);
7458	if (rc)
7459		goto init_one_exit;
7460
7461	/* Set init arrays */
7462	rc = bnx2x_init_firmware(bp, &pdev->dev);
7463	if (rc) {
7464		dev_err(&pdev->dev, "Error loading firmware\n");
7465		goto init_one_exit;
7466	}
7467
7468	rc = register_netdev(dev);
7469	if (rc) {
7470		dev_err(&pdev->dev, "Cannot register net device\n");
7471		goto init_one_exit;
7472	}
7473
7474	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7475	netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7476	       " IRQ %d, ", board_info[ent->driver_data].name,
7477	       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7478	       pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7479	       dev->base_addr, bp->pdev->irq);
7480	pr_cont("node addr %pM\n", dev->dev_addr);
7481
7482	return 0;
7483
7484init_one_exit:
7485	if (bp->regview)
7486		iounmap(bp->regview);
7487
7488	if (bp->doorbells)
7489		iounmap(bp->doorbells);
7490
7491	free_netdev(dev);
7492
7493	if (atomic_read(&pdev->enable_cnt) == 1)
7494		pci_release_regions(pdev);
7495
7496	pci_disable_device(pdev);
7497	pci_set_drvdata(pdev, NULL);
7498
7499	return rc;
7500}
7501
7502static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7503{
7504	struct net_device *dev = pci_get_drvdata(pdev);
7505	struct bnx2x *bp;
7506
7507	if (!dev) {
7508		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7509		return;
7510	}
7511	bp = netdev_priv(dev);
7512
7513	unregister_netdev(dev);
7514
7515	/* Make sure RESET task is not scheduled before continuing */
7516	cancel_delayed_work_sync(&bp->reset_task);
7517
7518	kfree(bp->init_ops_offsets);
7519	kfree(bp->init_ops);
7520	kfree(bp->init_data);
7521	release_firmware(bp->firmware);
7522
7523	if (bp->regview)
7524		iounmap(bp->regview);
7525
7526	if (bp->doorbells)
7527		iounmap(bp->doorbells);
7528
7529	free_netdev(dev);
7530
7531	if (atomic_read(&pdev->enable_cnt) == 1)
7532		pci_release_regions(pdev);
7533
7534	pci_disable_device(pdev);
7535	pci_set_drvdata(pdev, NULL);
7536}
7537
7538static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7539{
7540	int i;
7541
7542	bp->state = BNX2X_STATE_ERROR;
7543
7544	bp->rx_mode = BNX2X_RX_MODE_NONE;
7545
7546	bnx2x_netif_stop(bp, 0);
7547	netif_carrier_off(bp->dev);
7548
7549	del_timer_sync(&bp->timer);
7550	bp->stats_state = STATS_STATE_DISABLED;
7551	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7552
7553	/* Release IRQs */
7554	bnx2x_free_irq(bp, false);
7555
7556	if (CHIP_IS_E1(bp)) {
7557		struct mac_configuration_cmd *config =
7558						bnx2x_sp(bp, mcast_config);
7559
7560		for (i = 0; i < config->hdr.length; i++)
7561			CAM_INVALIDATE(config->config_table[i]);
7562	}
7563
7564	/* Free SKBs, SGEs, TPA pool and driver internals */
7565	bnx2x_free_skbs(bp);
7566	for_each_queue(bp, i)
7567		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7568	for_each_queue(bp, i)
7569		netif_napi_del(&bnx2x_fp(bp, i, napi));
7570	bnx2x_free_mem(bp);
7571
7572	bp->state = BNX2X_STATE_CLOSED;
7573
7574	return 0;
7575}
7576
7577static void bnx2x_eeh_recover(struct bnx2x *bp)
7578{
7579	u32 val;
7580
7581	mutex_init(&bp->port.phy_mutex);
7582
7583	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7584	bp->link_params.shmem_base = bp->common.shmem_base;
7585	BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7586
7587	if (!bp->common.shmem_base ||
7588	    (bp->common.shmem_base < 0xA0000) ||
7589	    (bp->common.shmem_base >= 0xC0000)) {
7590		BNX2X_DEV_INFO("MCP not active\n");
7591		bp->flags |= NO_MCP_FLAG;
7592		return;
7593	}
7594
7595	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7596	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7597		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7598		BNX2X_ERR("BAD MCP validity signature\n");
7599
7600	if (!BP_NOMCP(bp)) {
7601		bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7602			      & DRV_MSG_SEQ_NUMBER_MASK);
7603		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7604	}
7605}
7606
7607/**
7608 * bnx2x_io_error_detected - called when PCI error is detected
7609 * @pdev: Pointer to PCI device
7610 * @state: The current pci connection state
7611 *
7612 * This function is called after a PCI bus error affecting
7613 * this device has been detected.
7614 */
7615static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7616						pci_channel_state_t state)
7617{
7618	struct net_device *dev = pci_get_drvdata(pdev);
7619	struct bnx2x *bp = netdev_priv(dev);
7620
7621	rtnl_lock();
7622
7623	netif_device_detach(dev);
7624
7625	if (state == pci_channel_io_perm_failure) {
7626		rtnl_unlock();
7627		return PCI_ERS_RESULT_DISCONNECT;
7628	}
7629
7630	if (netif_running(dev))
7631		bnx2x_eeh_nic_unload(bp);
7632
7633	pci_disable_device(pdev);
7634
7635	rtnl_unlock();
7636
7637	/* Request a slot reset */
7638	return PCI_ERS_RESULT_NEED_RESET;
7639}
7640
7641/**
7642 * bnx2x_io_slot_reset - called after the PCI bus has been reset
7643 * @pdev: Pointer to PCI device
7644 *
7645 * Restart the card from scratch, as if from a cold-boot.
7646 */
7647static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7648{
7649	struct net_device *dev = pci_get_drvdata(pdev);
7650	struct bnx2x *bp = netdev_priv(dev);
7651
7652	rtnl_lock();
7653
7654	if (pci_enable_device(pdev)) {
7655		dev_err(&pdev->dev,
7656			"Cannot re-enable PCI device after reset\n");
7657		rtnl_unlock();
7658		return PCI_ERS_RESULT_DISCONNECT;
7659	}
7660
7661	pci_set_master(pdev);
7662	pci_restore_state(pdev);
7663
7664	if (netif_running(dev))
7665		bnx2x_set_power_state(bp, PCI_D0);
7666
7667	rtnl_unlock();
7668
7669	return PCI_ERS_RESULT_RECOVERED;
7670}
7671
7672/**
7673 * bnx2x_io_resume - called when traffic can start flowing again
7674 * @pdev: Pointer to PCI device
7675 *
7676 * This callback is called when the error recovery driver tells us that
7677 * its OK to resume normal operation.
7678 */
7679static void bnx2x_io_resume(struct pci_dev *pdev)
7680{
7681	struct net_device *dev = pci_get_drvdata(pdev);
7682	struct bnx2x *bp = netdev_priv(dev);
7683
7684	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7685		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7686		return;
7687	}
7688
7689	rtnl_lock();
7690
7691	bnx2x_eeh_recover(bp);
7692
7693	if (netif_running(dev))
7694		bnx2x_nic_load(bp, LOAD_NORMAL);
7695
7696	netif_device_attach(dev);
7697
7698	rtnl_unlock();
7699}
7700
7701static struct pci_error_handlers bnx2x_err_handler = {
7702	.error_detected = bnx2x_io_error_detected,
7703	.slot_reset     = bnx2x_io_slot_reset,
7704	.resume         = bnx2x_io_resume,
7705};
7706
7707static struct pci_driver bnx2x_pci_driver = {
7708	.name        = DRV_MODULE_NAME,
7709	.id_table    = bnx2x_pci_tbl,
7710	.probe       = bnx2x_init_one,
7711	.remove      = __devexit_p(bnx2x_remove_one),
7712	.suspend     = bnx2x_suspend,
7713	.resume      = bnx2x_resume,
7714	.err_handler = &bnx2x_err_handler,
7715};
7716
7717static int __init bnx2x_init(void)
7718{
7719	int ret;
7720
7721	pr_info("%s", version);
7722
7723	bnx2x_wq = create_singlethread_workqueue("bnx2x");
7724	if (bnx2x_wq == NULL) {
7725		pr_err("Cannot create workqueue\n");
7726		return -ENOMEM;
7727	}
7728
7729	ret = pci_register_driver(&bnx2x_pci_driver);
7730	if (ret) {
7731		pr_err("Cannot register driver\n");
7732		destroy_workqueue(bnx2x_wq);
7733	}
7734	return ret;
7735}
7736
7737static void __exit bnx2x_cleanup(void)
7738{
7739	pci_unregister_driver(&bnx2x_pci_driver);
7740
7741	destroy_workqueue(bnx2x_wq);
7742}
7743
7744module_init(bnx2x_init);
7745module_exit(bnx2x_cleanup);
7746
7747#ifdef BCM_CNIC
7748
7749/* count denotes the number of new completions we have seen */
7750static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7751{
7752	struct eth_spe *spe;
7753
7754#ifdef BNX2X_STOP_ON_ERROR
7755	if (unlikely(bp->panic))
7756		return;
7757#endif
7758
7759	spin_lock_bh(&bp->spq_lock);
7760	bp->cnic_spq_pending -= count;
7761
7762	for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7763	     bp->cnic_spq_pending++) {
7764
7765		if (!bp->cnic_kwq_pending)
7766			break;
7767
7768		spe = bnx2x_sp_get_next(bp);
7769		*spe = *bp->cnic_kwq_cons;
7770
7771		bp->cnic_kwq_pending--;
7772
7773		DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7774		   bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7775
7776		if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7777			bp->cnic_kwq_cons = bp->cnic_kwq;
7778		else
7779			bp->cnic_kwq_cons++;
7780	}
7781	bnx2x_sp_prod_update(bp);
7782	spin_unlock_bh(&bp->spq_lock);
7783}
7784
7785static int bnx2x_cnic_sp_queue(struct net_device *dev,
7786			       struct kwqe_16 *kwqes[], u32 count)
7787{
7788	struct bnx2x *bp = netdev_priv(dev);
7789	int i;
7790
7791#ifdef BNX2X_STOP_ON_ERROR
7792	if (unlikely(bp->panic))
7793		return -EIO;
7794#endif
7795
7796	spin_lock_bh(&bp->spq_lock);
7797
7798	for (i = 0; i < count; i++) {
7799		struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7800
7801		if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7802			break;
7803
7804		*bp->cnic_kwq_prod = *spe;
7805
7806		bp->cnic_kwq_pending++;
7807
7808		DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7809		   spe->hdr.conn_and_cmd_data, spe->hdr.type,
7810		   spe->data.mac_config_addr.hi,
7811		   spe->data.mac_config_addr.lo,
7812		   bp->cnic_kwq_pending);
7813
7814		if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7815			bp->cnic_kwq_prod = bp->cnic_kwq;
7816		else
7817			bp->cnic_kwq_prod++;
7818	}
7819
7820	spin_unlock_bh(&bp->spq_lock);
7821
7822	if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7823		bnx2x_cnic_sp_post(bp, 0);
7824
7825	return i;
7826}
7827
7828static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7829{
7830	struct cnic_ops *c_ops;
7831	int rc = 0;
7832
7833	mutex_lock(&bp->cnic_mutex);
7834	c_ops = bp->cnic_ops;
7835	if (c_ops)
7836		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7837	mutex_unlock(&bp->cnic_mutex);
7838
7839	return rc;
7840}
7841
7842static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7843{
7844	struct cnic_ops *c_ops;
7845	int rc = 0;
7846
7847	rcu_read_lock();
7848	c_ops = rcu_dereference(bp->cnic_ops);
7849	if (c_ops)
7850		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7851	rcu_read_unlock();
7852
7853	return rc;
7854}
7855
7856/*
7857 * for commands that have no data
7858 */
7859int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7860{
7861	struct cnic_ctl_info ctl = {0};
7862
7863	ctl.cmd = cmd;
7864
7865	return bnx2x_cnic_ctl_send(bp, &ctl);
7866}
7867
7868static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7869{
7870	struct cnic_ctl_info ctl;
7871
7872	/* first we tell CNIC and only then we count this as a completion */
7873	ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7874	ctl.data.comp.cid = cid;
7875
7876	bnx2x_cnic_ctl_send_bh(bp, &ctl);
7877	bnx2x_cnic_sp_post(bp, 1);
7878}
7879
7880static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7881{
7882	struct bnx2x *bp = netdev_priv(dev);
7883	int rc = 0;
7884
7885	switch (ctl->cmd) {
7886	case DRV_CTL_CTXTBL_WR_CMD: {
7887		u32 index = ctl->data.io.offset;
7888		dma_addr_t addr = ctl->data.io.dma_addr;
7889
7890		bnx2x_ilt_wr(bp, index, addr);
7891		break;
7892	}
7893
7894	case DRV_CTL_COMPLETION_CMD: {
7895		int count = ctl->data.comp.comp_count;
7896
7897		bnx2x_cnic_sp_post(bp, count);
7898		break;
7899	}
7900
7901	/* rtnl_lock is held.  */
7902	case DRV_CTL_START_L2_CMD: {
7903		u32 cli = ctl->data.ring.client_id;
7904
7905		bp->rx_mode_cl_mask |= (1 << cli);
7906		bnx2x_set_storm_rx_mode(bp);
7907		break;
7908	}
7909
7910	/* rtnl_lock is held.  */
7911	case DRV_CTL_STOP_L2_CMD: {
7912		u32 cli = ctl->data.ring.client_id;
7913
7914		bp->rx_mode_cl_mask &= ~(1 << cli);
7915		bnx2x_set_storm_rx_mode(bp);
7916		break;
7917	}
7918
7919	default:
7920		BNX2X_ERR("unknown command %x\n", ctl->cmd);
7921		rc = -EINVAL;
7922	}
7923
7924	return rc;
7925}
7926
7927void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7928{
7929	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7930
7931	if (bp->flags & USING_MSIX_FLAG) {
7932		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7933		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7934		cp->irq_arr[0].vector = bp->msix_table[1].vector;
7935	} else {
7936		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7937		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7938	}
7939	cp->irq_arr[0].status_blk = bp->cnic_sb;
7940	cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7941	cp->irq_arr[1].status_blk = bp->def_status_blk;
7942	cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7943
7944	cp->num_irq = 2;
7945}
7946
7947static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7948			       void *data)
7949{
7950	struct bnx2x *bp = netdev_priv(dev);
7951	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7952
7953	if (ops == NULL)
7954		return -EINVAL;
7955
7956	if (atomic_read(&bp->intr_sem) != 0)
7957		return -EBUSY;
7958
7959	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7960	if (!bp->cnic_kwq)
7961		return -ENOMEM;
7962
7963	bp->cnic_kwq_cons = bp->cnic_kwq;
7964	bp->cnic_kwq_prod = bp->cnic_kwq;
7965	bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7966
7967	bp->cnic_spq_pending = 0;
7968	bp->cnic_kwq_pending = 0;
7969
7970	bp->cnic_data = data;
7971
7972	cp->num_irq = 0;
7973	cp->drv_state = CNIC_DRV_STATE_REGD;
7974
7975	bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7976
7977	bnx2x_setup_cnic_irq_info(bp);
7978	bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7979	bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7980	rcu_assign_pointer(bp->cnic_ops, ops);
7981
7982	return 0;
7983}
7984
7985static int bnx2x_unregister_cnic(struct net_device *dev)
7986{
7987	struct bnx2x *bp = netdev_priv(dev);
7988	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7989
7990	mutex_lock(&bp->cnic_mutex);
7991	if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7992		bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7993		bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7994	}
7995	cp->drv_state = 0;
7996	rcu_assign_pointer(bp->cnic_ops, NULL);
7997	mutex_unlock(&bp->cnic_mutex);
7998	synchronize_rcu();
7999	kfree(bp->cnic_kwq);
8000	bp->cnic_kwq = NULL;
8001
8002	return 0;
8003}
8004
8005struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8006{
8007	struct bnx2x *bp = netdev_priv(dev);
8008	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8009
8010	cp->drv_owner = THIS_MODULE;
8011	cp->chip_id = CHIP_ID(bp);
8012	cp->pdev = bp->pdev;
8013	cp->io_base = bp->regview;
8014	cp->io_base2 = bp->doorbells;
8015	cp->max_kwqe_pending = 8;
8016	cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
8017	cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8018	cp->ctx_tbl_len = CNIC_ILT_LINES;
8019	cp->starting_cid = BCM_CNIC_CID_START;
8020	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8021	cp->drv_ctl = bnx2x_drv_ctl;
8022	cp->drv_register_cnic = bnx2x_register_cnic;
8023	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8024
8025	return cp;
8026}
8027EXPORT_SYMBOL(bnx2x_cnic_probe);
8028
8029#endif /* BCM_CNIC */
8030