1/* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2007 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan  (mchan@broadcom.com)
10 */
11
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
34#include <asm/page.h>
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/zlib.h>
50
51#include "bnx2.h"
52#include "bnx2_fw.h"
53#include "bnx2_fw2.h"
54
55#define DRV_MODULE_NAME		"bnx2"
56#define PFX DRV_MODULE_NAME	": "
57#define DRV_MODULE_VERSION	"1.5.11"
58#define DRV_MODULE_RELDATE	"June 4, 2007"
59
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT  (5*HZ)
64
65static const char version[] __devinitdata =
66	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79	BCM5706 = 0,
80	NC370T,
81	NC370I,
82	BCM5706S,
83	NC370F,
84	BCM5708,
85	BCM5708S,
86	BCM5709,
87	BCM5709S,
88} board_t;
89
90/* indexed by board_t, above */
91static const struct {
92	char *name;
93} board_info[] __devinitdata = {
94	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
95	{ "HP NC370T Multifunction Gigabit Server Adapter" },
96	{ "HP NC370i Multifunction Gigabit Server Adapter" },
97	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98	{ "HP NC370F Multifunction Gigabit Server Adapter" },
99	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
100	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
102	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
103	};
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
124	{ 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129	/* Slow EEPROM */
130	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133	 "EEPROM - slow"},
134	/* Expansion entry 0001 */
135	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138	 "Entry 0001"},
139	/* Saifun SA25F010 (non-buffered flash) */
140	/* strap, cfg1, & write1 need updates */
141	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144	 "Non-buffered flash (128kB)"},
145	/* Saifun SA25F020 (non-buffered flash) */
146	/* strap, cfg1, & write1 need updates */
147	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150	 "Non-buffered flash (256kB)"},
151	/* Expansion entry 0100 */
152	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155	 "Entry 0100"},
156	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
162	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163	 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166	/* Saifun SA25F005 (non-buffered flash) */
167	/* strap, cfg1, & write1 need updates */
168	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171	 "Non-buffered flash (64kB)"},
172	/* Fast EEPROM */
173	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174	 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176	 "EEPROM - fast"},
177	/* Expansion entry 1001 */
178	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181	 "Entry 1001"},
182	/* Expansion entry 1010 */
183	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186	 "Entry 1010"},
187	/* ATMEL AT45DB011B (buffered flash) */
188	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191	 "Buffered flash (128kB)"},
192	/* Expansion entry 1100 */
193	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196	 "Entry 1100"},
197	/* Expansion entry 1101 */
198	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199	 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201	 "Entry 1101"},
202	/* Ateml Expansion entry 1110 */
203	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206	 "Entry 1110 (Atmel)"},
207	/* ATMEL AT45DB021B (buffered flash) */
208	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209	 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211	 "Buffered flash (256kB)"},
212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
218	u32 diff;
219
220	smp_mb();
221
222	/* The ring uses 256 indices for 255 entries, one of them
223	 * needs to be skipped.
224	 */
225	diff = bp->tx_prod - bp->tx_cons;
226	if (unlikely(diff >= TX_DESC_CNT)) {
227		diff &= 0xffff;
228		if (diff == TX_DESC_CNT)
229			diff = MAX_TX_DESC_CNT;
230	}
231	return (bp->tx_ring_size - diff);
232}
233
234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
237	u32 val;
238
239	spin_lock_bh(&bp->indirect_lock);
240	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241	val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242	spin_unlock_bh(&bp->indirect_lock);
243	return val;
244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
249	spin_lock_bh(&bp->indirect_lock);
250	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252	spin_unlock_bh(&bp->indirect_lock);
253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258	offset += cid_addr;
259	spin_lock_bh(&bp->indirect_lock);
260	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261		int i;
262
263		REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264		REG_WR(bp, BNX2_CTX_CTX_CTRL,
265		       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266		for (i = 0; i < 5; i++) {
267			u32 val;
268			val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270				break;
271			udelay(5);
272		}
273	} else {
274		REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275		REG_WR(bp, BNX2_CTX_DATA, val);
276	}
277	spin_unlock_bh(&bp->indirect_lock);
278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283	u32 val1;
284	int i, ret;
285
286	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293		udelay(40);
294	}
295
296	val1 = (bp->phy_addr << 21) | (reg << 16) |
297		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298		BNX2_EMAC_MDIO_COMM_START_BUSY;
299	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301	for (i = 0; i < 50; i++) {
302		udelay(10);
303
304		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306			udelay(5);
307
308			val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311			break;
312		}
313	}
314
315	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316		*val = 0x0;
317		ret = -EBUSY;
318	}
319	else {
320		*val = val1;
321		ret = 0;
322	}
323
324	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331		udelay(40);
332	}
333
334	return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340	u32 val1;
341	int i, ret;
342
343	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350		udelay(40);
351	}
352
353	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
357
358	for (i = 0; i < 50; i++) {
359		udelay(10);
360
361		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363			udelay(5);
364			break;
365		}
366	}
367
368	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369        	ret = -EBUSY;
370	else
371		ret = 0;
372
373	if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380		udelay(40);
381	}
382
383	return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390	       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
397	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398	       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399	       BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
401	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402	       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
404	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410	atomic_inc(&bp->intr_sem);
411	bnx2_disable_int(bp);
412	synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418	bnx2_disable_int_sync(bp);
419	if (netif_running(bp->dev)) {
420		netif_poll_disable(bp->dev);
421		netif_tx_disable(bp->dev);
422		bp->dev->trans_start = jiffies;	/* prevent tx timeout */
423	}
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429	if (atomic_dec_and_test(&bp->intr_sem)) {
430		if (netif_running(bp->dev)) {
431			netif_wake_queue(bp->dev);
432			netif_poll_enable(bp->dev);
433			bnx2_enable_int(bp);
434		}
435	}
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
441	int i;
442
443	for (i = 0; i < bp->ctx_pages; i++) {
444		if (bp->ctx_blk[i]) {
445			pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446					    bp->ctx_blk[i],
447					    bp->ctx_blk_mapping[i]);
448			bp->ctx_blk[i] = NULL;
449		}
450	}
451	if (bp->status_blk) {
452		pci_free_consistent(bp->pdev, bp->status_stats_size,
453				    bp->status_blk, bp->status_blk_mapping);
454		bp->status_blk = NULL;
455		bp->stats_blk = NULL;
456	}
457	if (bp->tx_desc_ring) {
458		pci_free_consistent(bp->pdev,
459				    sizeof(struct tx_bd) * TX_DESC_CNT,
460				    bp->tx_desc_ring, bp->tx_desc_mapping);
461		bp->tx_desc_ring = NULL;
462	}
463	kfree(bp->tx_buf_ring);
464	bp->tx_buf_ring = NULL;
465	for (i = 0; i < bp->rx_max_ring; i++) {
466		if (bp->rx_desc_ring[i])
467			pci_free_consistent(bp->pdev,
468					    sizeof(struct rx_bd) * RX_DESC_CNT,
469					    bp->rx_desc_ring[i],
470					    bp->rx_desc_mapping[i]);
471		bp->rx_desc_ring[i] = NULL;
472	}
473	vfree(bp->rx_buf_ring);
474	bp->rx_buf_ring = NULL;
475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
480	int i, status_blk_size;
481
482	bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483				  GFP_KERNEL);
484	if (bp->tx_buf_ring == NULL)
485		return -ENOMEM;
486
487	bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488					        sizeof(struct tx_bd) *
489						TX_DESC_CNT,
490						&bp->tx_desc_mapping);
491	if (bp->tx_desc_ring == NULL)
492		goto alloc_mem_err;
493
494	bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495				  bp->rx_max_ring);
496	if (bp->rx_buf_ring == NULL)
497		goto alloc_mem_err;
498
499	memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500				   bp->rx_max_ring);
501
502	for (i = 0; i < bp->rx_max_ring; i++) {
503		bp->rx_desc_ring[i] =
504			pci_alloc_consistent(bp->pdev,
505					     sizeof(struct rx_bd) * RX_DESC_CNT,
506					     &bp->rx_desc_mapping[i]);
507		if (bp->rx_desc_ring[i] == NULL)
508			goto alloc_mem_err;
509
510	}
511
512	/* Combine status and statistics blocks into one allocation. */
513	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514	bp->status_stats_size = status_blk_size +
515				sizeof(struct statistics_block);
516
517	bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518					      &bp->status_blk_mapping);
519	if (bp->status_blk == NULL)
520		goto alloc_mem_err;
521
522	memset(bp->status_blk, 0, bp->status_stats_size);
523
524	bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525				  status_blk_size);
526
527	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
528
529	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530		bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531		if (bp->ctx_pages == 0)
532			bp->ctx_pages = 1;
533		for (i = 0; i < bp->ctx_pages; i++) {
534			bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535						BCM_PAGE_SIZE,
536						&bp->ctx_blk_mapping[i]);
537			if (bp->ctx_blk[i] == NULL)
538				goto alloc_mem_err;
539		}
540	}
541	return 0;
542
543alloc_mem_err:
544	bnx2_free_mem(bp);
545	return -ENOMEM;
546}
547
548static void
549bnx2_report_fw_link(struct bnx2 *bp)
550{
551	u32 fw_link_status = 0;
552
553	if (bp->link_up) {
554		u32 bmsr;
555
556		switch (bp->line_speed) {
557		case SPEED_10:
558			if (bp->duplex == DUPLEX_HALF)
559				fw_link_status = BNX2_LINK_STATUS_10HALF;
560			else
561				fw_link_status = BNX2_LINK_STATUS_10FULL;
562			break;
563		case SPEED_100:
564			if (bp->duplex == DUPLEX_HALF)
565				fw_link_status = BNX2_LINK_STATUS_100HALF;
566			else
567				fw_link_status = BNX2_LINK_STATUS_100FULL;
568			break;
569		case SPEED_1000:
570			if (bp->duplex == DUPLEX_HALF)
571				fw_link_status = BNX2_LINK_STATUS_1000HALF;
572			else
573				fw_link_status = BNX2_LINK_STATUS_1000FULL;
574			break;
575		case SPEED_2500:
576			if (bp->duplex == DUPLEX_HALF)
577				fw_link_status = BNX2_LINK_STATUS_2500HALF;
578			else
579				fw_link_status = BNX2_LINK_STATUS_2500FULL;
580			break;
581		}
582
583		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
584
585		if (bp->autoneg) {
586			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
587
588			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
590
591			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592			    bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
594			else
595				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
596		}
597	}
598	else
599		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
600
601	REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
602}
603
604static void
605bnx2_report_link(struct bnx2 *bp)
606{
607	if (bp->link_up) {
608		netif_carrier_on(bp->dev);
609		printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
610
611		printk("%d Mbps ", bp->line_speed);
612
613		if (bp->duplex == DUPLEX_FULL)
614			printk("full duplex");
615		else
616			printk("half duplex");
617
618		if (bp->flow_ctrl) {
619			if (bp->flow_ctrl & FLOW_CTRL_RX) {
620				printk(", receive ");
621				if (bp->flow_ctrl & FLOW_CTRL_TX)
622					printk("& transmit ");
623			}
624			else {
625				printk(", transmit ");
626			}
627			printk("flow control ON");
628		}
629		printk("\n");
630	}
631	else {
632		netif_carrier_off(bp->dev);
633		printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
634	}
635
636	bnx2_report_fw_link(bp);
637}
638
639static void
640bnx2_resolve_flow_ctrl(struct bnx2 *bp)
641{
642	u32 local_adv, remote_adv;
643
644	bp->flow_ctrl = 0;
645	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
646		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
647
648		if (bp->duplex == DUPLEX_FULL) {
649			bp->flow_ctrl = bp->req_flow_ctrl;
650		}
651		return;
652	}
653
654	if (bp->duplex != DUPLEX_FULL) {
655		return;
656	}
657
658	if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
660		u32 val;
661
662		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664			bp->flow_ctrl |= FLOW_CTRL_TX;
665		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666			bp->flow_ctrl |= FLOW_CTRL_RX;
667		return;
668	}
669
670	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
672
673	if (bp->phy_flags & PHY_SERDES_FLAG) {
674		u32 new_local_adv = 0;
675		u32 new_remote_adv = 0;
676
677		if (local_adv & ADVERTISE_1000XPAUSE)
678			new_local_adv |= ADVERTISE_PAUSE_CAP;
679		if (local_adv & ADVERTISE_1000XPSE_ASYM)
680			new_local_adv |= ADVERTISE_PAUSE_ASYM;
681		if (remote_adv & ADVERTISE_1000XPAUSE)
682			new_remote_adv |= ADVERTISE_PAUSE_CAP;
683		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
685
686		local_adv = new_local_adv;
687		remote_adv = new_remote_adv;
688	}
689
690	/* See Table 28B-3 of 802.3ab-1999 spec. */
691	if (local_adv & ADVERTISE_PAUSE_CAP) {
692		if(local_adv & ADVERTISE_PAUSE_ASYM) {
693	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
694				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
695			}
696			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697				bp->flow_ctrl = FLOW_CTRL_RX;
698			}
699		}
700		else {
701			if (remote_adv & ADVERTISE_PAUSE_CAP) {
702				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
703			}
704		}
705	}
706	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
709
710			bp->flow_ctrl = FLOW_CTRL_TX;
711		}
712	}
713}
714
715static int
716bnx2_5709s_linkup(struct bnx2 *bp)
717{
718	u32 val, speed;
719
720	bp->link_up = 1;
721
722	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
725
726	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727		bp->line_speed = bp->req_line_speed;
728		bp->duplex = bp->req_duplex;
729		return 0;
730	}
731	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
732	switch (speed) {
733		case MII_BNX2_GP_TOP_AN_SPEED_10:
734			bp->line_speed = SPEED_10;
735			break;
736		case MII_BNX2_GP_TOP_AN_SPEED_100:
737			bp->line_speed = SPEED_100;
738			break;
739		case MII_BNX2_GP_TOP_AN_SPEED_1G:
740		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741			bp->line_speed = SPEED_1000;
742			break;
743		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744			bp->line_speed = SPEED_2500;
745			break;
746	}
747	if (val & MII_BNX2_GP_TOP_AN_FD)
748		bp->duplex = DUPLEX_FULL;
749	else
750		bp->duplex = DUPLEX_HALF;
751	return 0;
752}
753
754static int
755bnx2_5708s_linkup(struct bnx2 *bp)
756{
757	u32 val;
758
759	bp->link_up = 1;
760	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762		case BCM5708S_1000X_STAT1_SPEED_10:
763			bp->line_speed = SPEED_10;
764			break;
765		case BCM5708S_1000X_STAT1_SPEED_100:
766			bp->line_speed = SPEED_100;
767			break;
768		case BCM5708S_1000X_STAT1_SPEED_1G:
769			bp->line_speed = SPEED_1000;
770			break;
771		case BCM5708S_1000X_STAT1_SPEED_2G5:
772			bp->line_speed = SPEED_2500;
773			break;
774	}
775	if (val & BCM5708S_1000X_STAT1_FD)
776		bp->duplex = DUPLEX_FULL;
777	else
778		bp->duplex = DUPLEX_HALF;
779
780	return 0;
781}
782
783static int
784bnx2_5706s_linkup(struct bnx2 *bp)
785{
786	u32 bmcr, local_adv, remote_adv, common;
787
788	bp->link_up = 1;
789	bp->line_speed = SPEED_1000;
790
791	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
792	if (bmcr & BMCR_FULLDPLX) {
793		bp->duplex = DUPLEX_FULL;
794	}
795	else {
796		bp->duplex = DUPLEX_HALF;
797	}
798
799	if (!(bmcr & BMCR_ANENABLE)) {
800		return 0;
801	}
802
803	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
805
806	common = local_adv & remote_adv;
807	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
808
809		if (common & ADVERTISE_1000XFULL) {
810			bp->duplex = DUPLEX_FULL;
811		}
812		else {
813			bp->duplex = DUPLEX_HALF;
814		}
815	}
816
817	return 0;
818}
819
820static int
821bnx2_copper_linkup(struct bnx2 *bp)
822{
823	u32 bmcr;
824
825	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
826	if (bmcr & BMCR_ANENABLE) {
827		u32 local_adv, remote_adv, common;
828
829		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
831
832		common = local_adv & (remote_adv >> 2);
833		if (common & ADVERTISE_1000FULL) {
834			bp->line_speed = SPEED_1000;
835			bp->duplex = DUPLEX_FULL;
836		}
837		else if (common & ADVERTISE_1000HALF) {
838			bp->line_speed = SPEED_1000;
839			bp->duplex = DUPLEX_HALF;
840		}
841		else {
842			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
844
845			common = local_adv & remote_adv;
846			if (common & ADVERTISE_100FULL) {
847				bp->line_speed = SPEED_100;
848				bp->duplex = DUPLEX_FULL;
849			}
850			else if (common & ADVERTISE_100HALF) {
851				bp->line_speed = SPEED_100;
852				bp->duplex = DUPLEX_HALF;
853			}
854			else if (common & ADVERTISE_10FULL) {
855				bp->line_speed = SPEED_10;
856				bp->duplex = DUPLEX_FULL;
857			}
858			else if (common & ADVERTISE_10HALF) {
859				bp->line_speed = SPEED_10;
860				bp->duplex = DUPLEX_HALF;
861			}
862			else {
863				bp->line_speed = 0;
864				bp->link_up = 0;
865			}
866		}
867	}
868	else {
869		if (bmcr & BMCR_SPEED100) {
870			bp->line_speed = SPEED_100;
871		}
872		else {
873			bp->line_speed = SPEED_10;
874		}
875		if (bmcr & BMCR_FULLDPLX) {
876			bp->duplex = DUPLEX_FULL;
877		}
878		else {
879			bp->duplex = DUPLEX_HALF;
880		}
881	}
882
883	return 0;
884}
885
886static int
887bnx2_set_mac_link(struct bnx2 *bp)
888{
889	u32 val;
890
891	REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893		(bp->duplex == DUPLEX_HALF)) {
894		REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
895	}
896
897	/* Configure the EMAC mode register. */
898	val = REG_RD(bp, BNX2_EMAC_MODE);
899
900	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
901		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
902		BNX2_EMAC_MODE_25G_MODE);
903
904	if (bp->link_up) {
905		switch (bp->line_speed) {
906			case SPEED_10:
907				if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908					val |= BNX2_EMAC_MODE_PORT_MII_10M;
909					break;
910				}
911				/* fall through */
912			case SPEED_100:
913				val |= BNX2_EMAC_MODE_PORT_MII;
914				break;
915			case SPEED_2500:
916				val |= BNX2_EMAC_MODE_25G_MODE;
917				/* fall through */
918			case SPEED_1000:
919				val |= BNX2_EMAC_MODE_PORT_GMII;
920				break;
921		}
922	}
923	else {
924		val |= BNX2_EMAC_MODE_PORT_GMII;
925	}
926
927	/* Set the MAC to operate in the appropriate duplex mode. */
928	if (bp->duplex == DUPLEX_HALF)
929		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930	REG_WR(bp, BNX2_EMAC_MODE, val);
931
932	/* Enable/disable rx PAUSE. */
933	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
934
935	if (bp->flow_ctrl & FLOW_CTRL_RX)
936		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937	REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
938
939	/* Enable/disable tx PAUSE. */
940	val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
942
943	if (bp->flow_ctrl & FLOW_CTRL_TX)
944		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945	REG_WR(bp, BNX2_EMAC_TX_MODE, val);
946
947	/* Acknowledge the interrupt. */
948	REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
949
950	return 0;
951}
952
953static void
954bnx2_enable_bmsr1(struct bnx2 *bp)
955{
956	if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957	    (CHIP_NUM(bp) == CHIP_NUM_5709))
958		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959			       MII_BNX2_BLK_ADDR_GP_STATUS);
960}
961
962static void
963bnx2_disable_bmsr1(struct bnx2 *bp)
964{
965	if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966	    (CHIP_NUM(bp) == CHIP_NUM_5709))
967		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
969}
970
971static int
972bnx2_test_and_enable_2g5(struct bnx2 *bp)
973{
974	u32 up1;
975	int ret = 1;
976
977	if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
978		return 0;
979
980	if (bp->autoneg & AUTONEG_SPEED)
981		bp->advertising |= ADVERTISED_2500baseX_Full;
982
983	if (CHIP_NUM(bp) == CHIP_NUM_5709)
984		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
985
986	bnx2_read_phy(bp, bp->mii_up1, &up1);
987	if (!(up1 & BCM5708S_UP1_2G5)) {
988		up1 |= BCM5708S_UP1_2G5;
989		bnx2_write_phy(bp, bp->mii_up1, up1);
990		ret = 0;
991	}
992
993	if (CHIP_NUM(bp) == CHIP_NUM_5709)
994		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
996
997	return ret;
998}
999
1000static int
1001bnx2_test_and_disable_2g5(struct bnx2 *bp)
1002{
1003	u32 up1;
1004	int ret = 0;
1005
1006	if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1007		return 0;
1008
1009	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1011
1012	bnx2_read_phy(bp, bp->mii_up1, &up1);
1013	if (up1 & BCM5708S_UP1_2G5) {
1014		up1 &= ~BCM5708S_UP1_2G5;
1015		bnx2_write_phy(bp, bp->mii_up1, up1);
1016		ret = 1;
1017	}
1018
1019	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1022
1023	return ret;
1024}
1025
1026static void
1027bnx2_enable_forced_2g5(struct bnx2 *bp)
1028{
1029	u32 bmcr;
1030
1031	if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1032		return;
1033
1034	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1035		u32 val;
1036
1037		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1039		bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040		val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041		val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042		bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1043
1044		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1047
1048	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1049		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050		bmcr |= BCM5708S_BMCR_FORCE_2500;
1051	}
1052
1053	if (bp->autoneg & AUTONEG_SPEED) {
1054		bmcr &= ~BMCR_ANENABLE;
1055		if (bp->req_duplex == DUPLEX_FULL)
1056			bmcr |= BMCR_FULLDPLX;
1057	}
1058	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1059}
1060
1061static void
1062bnx2_disable_forced_2g5(struct bnx2 *bp)
1063{
1064	u32 bmcr;
1065
1066	if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1067		return;
1068
1069	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1070		u32 val;
1071
1072		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1074		bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075		val &= ~MII_BNX2_SD_MISC1_FORCE;
1076		bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1077
1078		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1081
1082	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1083		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084		bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1085	}
1086
1087	if (bp->autoneg & AUTONEG_SPEED)
1088		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1090}
1091
1092static int
1093bnx2_set_link(struct bnx2 *bp)
1094{
1095	u32 bmsr;
1096	u8 link_up;
1097
1098	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1099		bp->link_up = 1;
1100		return 0;
1101	}
1102
1103	link_up = bp->link_up;
1104
1105	bnx2_enable_bmsr1(bp);
1106	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108	bnx2_disable_bmsr1(bp);
1109
1110	if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111	    (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1112		u32 val;
1113
1114		val = REG_RD(bp, BNX2_EMAC_STATUS);
1115		if (val & BNX2_EMAC_STATUS_LINK)
1116			bmsr |= BMSR_LSTATUS;
1117		else
1118			bmsr &= ~BMSR_LSTATUS;
1119	}
1120
1121	if (bmsr & BMSR_LSTATUS) {
1122		bp->link_up = 1;
1123
1124		if (bp->phy_flags & PHY_SERDES_FLAG) {
1125			if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126				bnx2_5706s_linkup(bp);
1127			else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128				bnx2_5708s_linkup(bp);
1129			else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130				bnx2_5709s_linkup(bp);
1131		}
1132		else {
1133			bnx2_copper_linkup(bp);
1134		}
1135		bnx2_resolve_flow_ctrl(bp);
1136	}
1137	else {
1138		if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1139		    (bp->autoneg & AUTONEG_SPEED))
1140			bnx2_disable_forced_2g5(bp);
1141
1142		bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1143		bp->link_up = 0;
1144	}
1145
1146	if (bp->link_up != link_up) {
1147		bnx2_report_link(bp);
1148	}
1149
1150	bnx2_set_mac_link(bp);
1151
1152	return 0;
1153}
1154
1155static int
1156bnx2_reset_phy(struct bnx2 *bp)
1157{
1158	int i;
1159	u32 reg;
1160
1161        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1162
1163#define PHY_RESET_MAX_WAIT 100
1164	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1165		udelay(10);
1166
1167		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1168		if (!(reg & BMCR_RESET)) {
1169			udelay(20);
1170			break;
1171		}
1172	}
1173	if (i == PHY_RESET_MAX_WAIT) {
1174		return -EBUSY;
1175	}
1176	return 0;
1177}
1178
1179static u32
1180bnx2_phy_get_pause_adv(struct bnx2 *bp)
1181{
1182	u32 adv = 0;
1183
1184	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1186
1187		if (bp->phy_flags & PHY_SERDES_FLAG) {
1188			adv = ADVERTISE_1000XPAUSE;
1189		}
1190		else {
1191			adv = ADVERTISE_PAUSE_CAP;
1192		}
1193	}
1194	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195		if (bp->phy_flags & PHY_SERDES_FLAG) {
1196			adv = ADVERTISE_1000XPSE_ASYM;
1197		}
1198		else {
1199			adv = ADVERTISE_PAUSE_ASYM;
1200		}
1201	}
1202	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203		if (bp->phy_flags & PHY_SERDES_FLAG) {
1204			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1205		}
1206		else {
1207			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1208		}
1209	}
1210	return adv;
1211}
1212
1213static int
1214bnx2_setup_serdes_phy(struct bnx2 *bp)
1215{
1216	u32 adv, bmcr;
1217	u32 new_adv = 0;
1218
1219	if (!(bp->autoneg & AUTONEG_SPEED)) {
1220		u32 new_bmcr;
1221		int force_link_down = 0;
1222
1223		if (bp->req_line_speed == SPEED_2500) {
1224			if (!bnx2_test_and_enable_2g5(bp))
1225				force_link_down = 1;
1226		} else if (bp->req_line_speed == SPEED_1000) {
1227			if (bnx2_test_and_disable_2g5(bp))
1228				force_link_down = 1;
1229		}
1230		bnx2_read_phy(bp, bp->mii_adv, &adv);
1231		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1232
1233		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1234		new_bmcr = bmcr & ~BMCR_ANENABLE;
1235		new_bmcr |= BMCR_SPEED1000;
1236
1237		if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238			if (bp->req_line_speed == SPEED_2500)
1239				bnx2_enable_forced_2g5(bp);
1240			else if (bp->req_line_speed == SPEED_1000) {
1241				bnx2_disable_forced_2g5(bp);
1242				new_bmcr &= ~0x2000;
1243			}
1244
1245		} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1246			if (bp->req_line_speed == SPEED_2500)
1247				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1248			else
1249				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1250		}
1251
1252		if (bp->req_duplex == DUPLEX_FULL) {
1253			adv |= ADVERTISE_1000XFULL;
1254			new_bmcr |= BMCR_FULLDPLX;
1255		}
1256		else {
1257			adv |= ADVERTISE_1000XHALF;
1258			new_bmcr &= ~BMCR_FULLDPLX;
1259		}
1260		if ((new_bmcr != bmcr) || (force_link_down)) {
1261			/* Force a link down visible on the other side */
1262			if (bp->link_up) {
1263				bnx2_write_phy(bp, bp->mii_adv, adv &
1264					       ~(ADVERTISE_1000XFULL |
1265						 ADVERTISE_1000XHALF));
1266				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1267					BMCR_ANRESTART | BMCR_ANENABLE);
1268
1269				bp->link_up = 0;
1270				netif_carrier_off(bp->dev);
1271				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1272				bnx2_report_link(bp);
1273			}
1274			bnx2_write_phy(bp, bp->mii_adv, adv);
1275			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1276		} else {
1277			bnx2_resolve_flow_ctrl(bp);
1278			bnx2_set_mac_link(bp);
1279		}
1280		return 0;
1281	}
1282
1283	bnx2_test_and_enable_2g5(bp);
1284
1285	if (bp->advertising & ADVERTISED_1000baseT_Full)
1286		new_adv |= ADVERTISE_1000XFULL;
1287
1288	new_adv |= bnx2_phy_get_pause_adv(bp);
1289
1290	bnx2_read_phy(bp, bp->mii_adv, &adv);
1291	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1292
1293	bp->serdes_an_pending = 0;
1294	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295		/* Force a link down visible on the other side */
1296		if (bp->link_up) {
1297			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1298			spin_unlock_bh(&bp->phy_lock);
1299			msleep(20);
1300			spin_lock_bh(&bp->phy_lock);
1301		}
1302
1303		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1305			BMCR_ANENABLE);
1306		/* Speed up link-up time when the link partner
1307		 * does not autonegotiate which is very common
1308		 * in blade servers. Some blade servers use
1309		 * IPMI for kerboard input and it's important
1310		 * to minimize link disruptions. Autoneg. involves
1311		 * exchanging base pages plus 3 next pages and
1312		 * normally completes in about 120 msec.
1313		 */
1314		bp->current_interval = SERDES_AN_TIMEOUT;
1315		bp->serdes_an_pending = 1;
1316		mod_timer(&bp->timer, jiffies + bp->current_interval);
1317	} else {
1318		bnx2_resolve_flow_ctrl(bp);
1319		bnx2_set_mac_link(bp);
1320	}
1321
1322	return 0;
1323}
1324
1325#define ETHTOOL_ALL_FIBRE_SPEED						\
1326	(ADVERTISED_1000baseT_Full)
1327
1328#define ETHTOOL_ALL_COPPER_SPEED					\
1329	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1330	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1331	ADVERTISED_1000baseT_Full)
1332
1333#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1335
1336#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1337
1338static int
1339bnx2_setup_copper_phy(struct bnx2 *bp)
1340{
1341	u32 bmcr;
1342	u32 new_bmcr;
1343
1344	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1345
1346	if (bp->autoneg & AUTONEG_SPEED) {
1347		u32 adv_reg, adv1000_reg;
1348		u32 new_adv_reg = 0;
1349		u32 new_adv1000_reg = 0;
1350
1351		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1352		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353			ADVERTISE_PAUSE_ASYM);
1354
1355		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356		adv1000_reg &= PHY_ALL_1000_SPEED;
1357
1358		if (bp->advertising & ADVERTISED_10baseT_Half)
1359			new_adv_reg |= ADVERTISE_10HALF;
1360		if (bp->advertising & ADVERTISED_10baseT_Full)
1361			new_adv_reg |= ADVERTISE_10FULL;
1362		if (bp->advertising & ADVERTISED_100baseT_Half)
1363			new_adv_reg |= ADVERTISE_100HALF;
1364		if (bp->advertising & ADVERTISED_100baseT_Full)
1365			new_adv_reg |= ADVERTISE_100FULL;
1366		if (bp->advertising & ADVERTISED_1000baseT_Full)
1367			new_adv1000_reg |= ADVERTISE_1000FULL;
1368
1369		new_adv_reg |= ADVERTISE_CSMA;
1370
1371		new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1372
1373		if ((adv1000_reg != new_adv1000_reg) ||
1374			(adv_reg != new_adv_reg) ||
1375			((bmcr & BMCR_ANENABLE) == 0)) {
1376
1377			bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1378			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1379			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1380				BMCR_ANENABLE);
1381		}
1382		else if (bp->link_up) {
1383			/* Flow ctrl may have changed from auto to forced */
1384			/* or vice-versa. */
1385
1386			bnx2_resolve_flow_ctrl(bp);
1387			bnx2_set_mac_link(bp);
1388		}
1389		return 0;
1390	}
1391
1392	new_bmcr = 0;
1393	if (bp->req_line_speed == SPEED_100) {
1394		new_bmcr |= BMCR_SPEED100;
1395	}
1396	if (bp->req_duplex == DUPLEX_FULL) {
1397		new_bmcr |= BMCR_FULLDPLX;
1398	}
1399	if (new_bmcr != bmcr) {
1400		u32 bmsr;
1401
1402		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1404
1405		if (bmsr & BMSR_LSTATUS) {
1406			/* Force link down */
1407			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1408			spin_unlock_bh(&bp->phy_lock);
1409			msleep(50);
1410			spin_lock_bh(&bp->phy_lock);
1411
1412			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1414		}
1415
1416		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1417
1418		/* Normally, the new speed is setup after the link has
1419		 * gone down and up again. In some cases, link will not go
1420		 * down so we need to set up the new speed here.
1421		 */
1422		if (bmsr & BMSR_LSTATUS) {
1423			bp->line_speed = bp->req_line_speed;
1424			bp->duplex = bp->req_duplex;
1425			bnx2_resolve_flow_ctrl(bp);
1426			bnx2_set_mac_link(bp);
1427		}
1428	} else {
1429		bnx2_resolve_flow_ctrl(bp);
1430		bnx2_set_mac_link(bp);
1431	}
1432	return 0;
1433}
1434
1435static int
1436bnx2_setup_phy(struct bnx2 *bp)
1437{
1438	if (bp->loopback == MAC_LOOPBACK)
1439		return 0;
1440
1441	if (bp->phy_flags & PHY_SERDES_FLAG) {
1442		return (bnx2_setup_serdes_phy(bp));
1443	}
1444	else {
1445		return (bnx2_setup_copper_phy(bp));
1446	}
1447}
1448
1449static int
1450bnx2_init_5709s_phy(struct bnx2 *bp)
1451{
1452	u32 val;
1453
1454	bp->mii_bmcr = MII_BMCR + 0x10;
1455	bp->mii_bmsr = MII_BMSR + 0x10;
1456	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457	bp->mii_adv = MII_ADVERTISE + 0x10;
1458	bp->mii_lpa = MII_LPA + 0x10;
1459	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1460
1461	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1463
1464	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1465	bnx2_reset_phy(bp);
1466
1467	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1468
1469	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471	val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1473
1474	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476	if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477		val |= BCM5708S_UP1_2G5;
1478	else
1479		val &= ~BCM5708S_UP1_2G5;
1480	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1481
1482	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1486
1487	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1488
1489	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1492
1493	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1494
1495	return 0;
1496}
1497
1498static int
1499bnx2_init_5708s_phy(struct bnx2 *bp)
1500{
1501	u32 val;
1502
1503	bnx2_reset_phy(bp);
1504
1505	bp->mii_up1 = BCM5708S_UP1;
1506
1507	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1510
1511	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1514
1515	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1518
1519	if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520		bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521		val |= BCM5708S_UP1_2G5;
1522		bnx2_write_phy(bp, BCM5708S_UP1, val);
1523	}
1524
1525	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1526	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527	    (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1528		/* increase tx signal amplitude */
1529		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530			       BCM5708S_BLK_ADDR_TX_MISC);
1531		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1535	}
1536
1537	val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1538	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1539
1540	if (val) {
1541		u32 is_backplane;
1542
1543		is_backplane = REG_RD_IND(bp, bp->shmem_base +
1544					  BNX2_SHARED_HW_CFG_CONFIG);
1545		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547				       BCM5708S_BLK_ADDR_TX_MISC);
1548			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550				       BCM5708S_BLK_ADDR_DIG);
1551		}
1552	}
1553	return 0;
1554}
1555
1556static int
1557bnx2_init_5706s_phy(struct bnx2 *bp)
1558{
1559	bnx2_reset_phy(bp);
1560
1561	bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1562
1563	if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564        	REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1565
1566	if (bp->dev->mtu > 1500) {
1567		u32 val;
1568
1569		/* Set extended packet length bit */
1570		bnx2_write_phy(bp, 0x18, 0x7);
1571		bnx2_read_phy(bp, 0x18, &val);
1572		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1573
1574		bnx2_write_phy(bp, 0x1c, 0x6c00);
1575		bnx2_read_phy(bp, 0x1c, &val);
1576		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1577	}
1578	else {
1579		u32 val;
1580
1581		bnx2_write_phy(bp, 0x18, 0x7);
1582		bnx2_read_phy(bp, 0x18, &val);
1583		bnx2_write_phy(bp, 0x18, val & ~0x4007);
1584
1585		bnx2_write_phy(bp, 0x1c, 0x6c00);
1586		bnx2_read_phy(bp, 0x1c, &val);
1587		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1588	}
1589
1590	return 0;
1591}
1592
1593static int
1594bnx2_init_copper_phy(struct bnx2 *bp)
1595{
1596	u32 val;
1597
1598	bnx2_reset_phy(bp);
1599
1600	if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601		bnx2_write_phy(bp, 0x18, 0x0c00);
1602		bnx2_write_phy(bp, 0x17, 0x000a);
1603		bnx2_write_phy(bp, 0x15, 0x310b);
1604		bnx2_write_phy(bp, 0x17, 0x201f);
1605		bnx2_write_phy(bp, 0x15, 0x9506);
1606		bnx2_write_phy(bp, 0x17, 0x401f);
1607		bnx2_write_phy(bp, 0x15, 0x14e2);
1608		bnx2_write_phy(bp, 0x18, 0x0400);
1609	}
1610
1611	if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613			       MII_BNX2_DSP_EXPAND_REG | 0x8);
1614		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1615		val &= ~(1 << 8);
1616		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1617	}
1618
1619	if (bp->dev->mtu > 1500) {
1620		/* Set extended packet length bit */
1621		bnx2_write_phy(bp, 0x18, 0x7);
1622		bnx2_read_phy(bp, 0x18, &val);
1623		bnx2_write_phy(bp, 0x18, val | 0x4000);
1624
1625		bnx2_read_phy(bp, 0x10, &val);
1626		bnx2_write_phy(bp, 0x10, val | 0x1);
1627	}
1628	else {
1629		bnx2_write_phy(bp, 0x18, 0x7);
1630		bnx2_read_phy(bp, 0x18, &val);
1631		bnx2_write_phy(bp, 0x18, val & ~0x4007);
1632
1633		bnx2_read_phy(bp, 0x10, &val);
1634		bnx2_write_phy(bp, 0x10, val & ~0x1);
1635	}
1636
1637	/* ethernet@wirespeed */
1638	bnx2_write_phy(bp, 0x18, 0x7007);
1639	bnx2_read_phy(bp, 0x18, &val);
1640	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1641	return 0;
1642}
1643
1644
1645static int
1646bnx2_init_phy(struct bnx2 *bp)
1647{
1648	u32 val;
1649	int rc = 0;
1650
1651	bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652	bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1653
1654	bp->mii_bmcr = MII_BMCR;
1655	bp->mii_bmsr = MII_BMSR;
1656	bp->mii_bmsr1 = MII_BMSR;
1657	bp->mii_adv = MII_ADVERTISE;
1658	bp->mii_lpa = MII_LPA;
1659
1660        REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1661
1662	bnx2_read_phy(bp, MII_PHYSID1, &val);
1663	bp->phy_id = val << 16;
1664	bnx2_read_phy(bp, MII_PHYSID2, &val);
1665	bp->phy_id |= val & 0xffff;
1666
1667	if (bp->phy_flags & PHY_SERDES_FLAG) {
1668		if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669			rc = bnx2_init_5706s_phy(bp);
1670		else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671			rc = bnx2_init_5708s_phy(bp);
1672		else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673			rc = bnx2_init_5709s_phy(bp);
1674	}
1675	else {
1676		rc = bnx2_init_copper_phy(bp);
1677	}
1678
1679	bnx2_setup_phy(bp);
1680
1681	return rc;
1682}
1683
1684static int
1685bnx2_set_mac_loopback(struct bnx2 *bp)
1686{
1687	u32 mac_mode;
1688
1689	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690	mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1693	bp->link_up = 1;
1694	return 0;
1695}
1696
1697static int bnx2_test_link(struct bnx2 *);
1698
1699static int
1700bnx2_set_phy_loopback(struct bnx2 *bp)
1701{
1702	u32 mac_mode;
1703	int rc, i;
1704
1705	spin_lock_bh(&bp->phy_lock);
1706	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1707			    BMCR_SPEED1000);
1708	spin_unlock_bh(&bp->phy_lock);
1709	if (rc)
1710		return rc;
1711
1712	for (i = 0; i < 10; i++) {
1713		if (bnx2_test_link(bp) == 0)
1714			break;
1715		msleep(100);
1716	}
1717
1718	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1721		      BNX2_EMAC_MODE_25G_MODE);
1722
1723	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1725	bp->link_up = 1;
1726	return 0;
1727}
1728
1729static int
1730bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1731{
1732	int i;
1733	u32 val;
1734
1735	bp->fw_wr_seq++;
1736	msg_data |= bp->fw_wr_seq;
1737
1738	REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1739
1740	/* wait for an acknowledgement. */
1741	for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1742		msleep(10);
1743
1744		val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1745
1746		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1747			break;
1748	}
1749	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1750		return 0;
1751
1752	/* If we timed out, inform the firmware that this is the case. */
1753	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1754		if (!silent)
1755			printk(KERN_ERR PFX "fw sync timeout, reset code = "
1756					    "%x\n", msg_data);
1757
1758		msg_data &= ~BNX2_DRV_MSG_CODE;
1759		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1760
1761		REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1762
1763		return -EBUSY;
1764	}
1765
1766	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1767		return -EIO;
1768
1769	return 0;
1770}
1771
1772static int
1773bnx2_init_5709_context(struct bnx2 *bp)
1774{
1775	int i, ret = 0;
1776	u32 val;
1777
1778	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779	val |= (BCM_PAGE_BITS - 8) << 16;
1780	REG_WR(bp, BNX2_CTX_COMMAND, val);
1781	for (i = 0; i < 10; i++) {
1782		val = REG_RD(bp, BNX2_CTX_COMMAND);
1783		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
1784			break;
1785		udelay(2);
1786	}
1787	if (val & BNX2_CTX_COMMAND_MEM_INIT)
1788		return -EBUSY;
1789
1790	for (i = 0; i < bp->ctx_pages; i++) {
1791		int j;
1792
1793		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1794		       (bp->ctx_blk_mapping[i] & 0xffffffff) |
1795		       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1796		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1797		       (u64) bp->ctx_blk_mapping[i] >> 32);
1798		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1799		       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1800		for (j = 0; j < 10; j++) {
1801
1802			val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1803			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1804				break;
1805			udelay(5);
1806		}
1807		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1808			ret = -EBUSY;
1809			break;
1810		}
1811	}
1812	return ret;
1813}
1814
1815static void
1816bnx2_init_context(struct bnx2 *bp)
1817{
1818	u32 vcid;
1819
1820	vcid = 96;
1821	while (vcid) {
1822		u32 vcid_addr, pcid_addr, offset;
1823		int i;
1824
1825		vcid--;
1826
1827		if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1828			u32 new_vcid;
1829
1830			vcid_addr = GET_PCID_ADDR(vcid);
1831			if (vcid & 0x8) {
1832				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1833			}
1834			else {
1835				new_vcid = vcid;
1836			}
1837			pcid_addr = GET_PCID_ADDR(new_vcid);
1838		}
1839		else {
1840	    		vcid_addr = GET_CID_ADDR(vcid);
1841			pcid_addr = vcid_addr;
1842		}
1843
1844		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
1845			vcid_addr += (i << PHY_CTX_SHIFT);
1846			pcid_addr += (i << PHY_CTX_SHIFT);
1847
1848			REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1849			REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1850
1851			/* Zero out the context. */
1852			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
1853				CTX_WR(bp, 0x00, offset, 0);
1854
1855			REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1856			REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1857		}
1858	}
1859}
1860
1861static int
1862bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1863{
1864	u16 *good_mbuf;
1865	u32 good_mbuf_cnt;
1866	u32 val;
1867
1868	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1869	if (good_mbuf == NULL) {
1870		printk(KERN_ERR PFX "Failed to allocate memory in "
1871				    "bnx2_alloc_bad_rbuf\n");
1872		return -ENOMEM;
1873	}
1874
1875	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1876		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1877
1878	good_mbuf_cnt = 0;
1879
1880	/* Allocate a bunch of mbufs and save the good ones in an array. */
1881	val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1882	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1883		REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1884
1885		val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1886
1887		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1888
1889		/* The addresses with Bit 9 set are bad memory blocks. */
1890		if (!(val & (1 << 9))) {
1891			good_mbuf[good_mbuf_cnt] = (u16) val;
1892			good_mbuf_cnt++;
1893		}
1894
1895		val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1896	}
1897
1898	/* Free the good ones back to the mbuf pool thus discarding
1899	 * all the bad ones. */
1900	while (good_mbuf_cnt) {
1901		good_mbuf_cnt--;
1902
1903		val = good_mbuf[good_mbuf_cnt];
1904		val = (val << 9) | val | 1;
1905
1906		REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1907	}
1908	kfree(good_mbuf);
1909	return 0;
1910}
1911
1912static void
1913bnx2_set_mac_addr(struct bnx2 *bp)
1914{
1915	u32 val;
1916	u8 *mac_addr = bp->dev->dev_addr;
1917
1918	val = (mac_addr[0] << 8) | mac_addr[1];
1919
1920	REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1921
1922	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1923		(mac_addr[4] << 8) | mac_addr[5];
1924
1925	REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1926}
1927
1928static inline int
1929bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1930{
1931	struct sk_buff *skb;
1932	struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1933	dma_addr_t mapping;
1934	struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1935	unsigned long align;
1936
1937	skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1938	if (skb == NULL) {
1939		return -ENOMEM;
1940	}
1941
1942	if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1943		skb_reserve(skb, BNX2_RX_ALIGN - align);
1944
1945	mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1946		PCI_DMA_FROMDEVICE);
1947
1948	rx_buf->skb = skb;
1949	pci_unmap_addr_set(rx_buf, mapping, mapping);
1950
1951	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1952	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1953
1954	bp->rx_prod_bseq += bp->rx_buf_use_size;
1955
1956	return 0;
1957}
1958
1959static int
1960bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
1961{
1962	struct status_block *sblk = bp->status_blk;
1963	u32 new_link_state, old_link_state;
1964	int is_set = 1;
1965
1966	new_link_state = sblk->status_attn_bits & event;
1967	old_link_state = sblk->status_attn_bits_ack & event;
1968	if (new_link_state != old_link_state) {
1969		if (new_link_state)
1970			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
1971		else
1972			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
1973	} else
1974		is_set = 0;
1975
1976	return is_set;
1977}
1978
1979static void
1980bnx2_phy_int(struct bnx2 *bp)
1981{
1982	if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
1983		spin_lock(&bp->phy_lock);
1984		bnx2_set_link(bp);
1985		spin_unlock(&bp->phy_lock);
1986	}
1987}
1988
1989static void
1990bnx2_tx_int(struct bnx2 *bp)
1991{
1992	struct status_block *sblk = bp->status_blk;
1993	u16 hw_cons, sw_cons, sw_ring_cons;
1994	int tx_free_bd = 0;
1995
1996	hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1997	if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1998		hw_cons++;
1999	}
2000	sw_cons = bp->tx_cons;
2001
2002	while (sw_cons != hw_cons) {
2003		struct sw_bd *tx_buf;
2004		struct sk_buff *skb;
2005		int i, last;
2006
2007		sw_ring_cons = TX_RING_IDX(sw_cons);
2008
2009		tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2010		skb = tx_buf->skb;
2011
2012		/* partial BD completions possible with TSO packets */
2013		if (skb_is_gso(skb)) {
2014			u16 last_idx, last_ring_idx;
2015
2016			last_idx = sw_cons +
2017				skb_shinfo(skb)->nr_frags + 1;
2018			last_ring_idx = sw_ring_cons +
2019				skb_shinfo(skb)->nr_frags + 1;
2020			if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2021				last_idx++;
2022			}
2023			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2024				break;
2025			}
2026		}
2027
2028		pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2029			skb_headlen(skb), PCI_DMA_TODEVICE);
2030
2031		tx_buf->skb = NULL;
2032		last = skb_shinfo(skb)->nr_frags;
2033
2034		for (i = 0; i < last; i++) {
2035			sw_cons = NEXT_TX_BD(sw_cons);
2036
2037			pci_unmap_page(bp->pdev,
2038				pci_unmap_addr(
2039					&bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2040				       	mapping),
2041				skb_shinfo(skb)->frags[i].size,
2042				PCI_DMA_TODEVICE);
2043		}
2044
2045		sw_cons = NEXT_TX_BD(sw_cons);
2046
2047		tx_free_bd += last + 1;
2048
2049		dev_kfree_skb(skb);
2050
2051		hw_cons = bp->hw_tx_cons =
2052			sblk->status_tx_quick_consumer_index0;
2053
2054		if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2055			hw_cons++;
2056		}
2057	}
2058
2059	bp->tx_cons = sw_cons;
2060	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2061	 * before checking for netif_queue_stopped().  Without the
2062	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2063	 * will miss it and cause the queue to be stopped forever.
2064	 */
2065	smp_mb();
2066
2067	if (unlikely(netif_queue_stopped(bp->dev)) &&
2068		     (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2069		netif_tx_lock(bp->dev);
2070		if ((netif_queue_stopped(bp->dev)) &&
2071		    (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2072			netif_wake_queue(bp->dev);
2073		netif_tx_unlock(bp->dev);
2074	}
2075}
2076
2077static inline void
2078bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2079	u16 cons, u16 prod)
2080{
2081	struct sw_bd *cons_rx_buf, *prod_rx_buf;
2082	struct rx_bd *cons_bd, *prod_bd;
2083
2084	cons_rx_buf = &bp->rx_buf_ring[cons];
2085	prod_rx_buf = &bp->rx_buf_ring[prod];
2086
2087	pci_dma_sync_single_for_device(bp->pdev,
2088		pci_unmap_addr(cons_rx_buf, mapping),
2089		bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2090
2091	bp->rx_prod_bseq += bp->rx_buf_use_size;
2092
2093	prod_rx_buf->skb = skb;
2094
2095	if (cons == prod)
2096		return;
2097
2098	pci_unmap_addr_set(prod_rx_buf, mapping,
2099			pci_unmap_addr(cons_rx_buf, mapping));
2100
2101	cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2102	prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2103	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2104	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2105}
2106
2107static int
2108bnx2_rx_int(struct bnx2 *bp, int budget)
2109{
2110	struct status_block *sblk = bp->status_blk;
2111	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2112	struct l2_fhdr *rx_hdr;
2113	int rx_pkt = 0;
2114
2115	hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2116	if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2117		hw_cons++;
2118	}
2119	sw_cons = bp->rx_cons;
2120	sw_prod = bp->rx_prod;
2121
2122	/* Memory barrier necessary as speculative reads of the rx
2123	 * buffer can be ahead of the index in the status block
2124	 */
2125	rmb();
2126	while (sw_cons != hw_cons) {
2127		unsigned int len;
2128		u32 status;
2129		struct sw_bd *rx_buf;
2130		struct sk_buff *skb;
2131		dma_addr_t dma_addr;
2132
2133		sw_ring_cons = RX_RING_IDX(sw_cons);
2134		sw_ring_prod = RX_RING_IDX(sw_prod);
2135
2136		rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2137		skb = rx_buf->skb;
2138
2139		rx_buf->skb = NULL;
2140
2141		dma_addr = pci_unmap_addr(rx_buf, mapping);
2142
2143		pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2144			bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2145
2146		rx_hdr = (struct l2_fhdr *) skb->data;
2147		len = rx_hdr->l2_fhdr_pkt_len - 4;
2148
2149		if ((status = rx_hdr->l2_fhdr_status) &
2150			(L2_FHDR_ERRORS_BAD_CRC |
2151			L2_FHDR_ERRORS_PHY_DECODE |
2152			L2_FHDR_ERRORS_ALIGNMENT |
2153			L2_FHDR_ERRORS_TOO_SHORT |
2154			L2_FHDR_ERRORS_GIANT_FRAME)) {
2155
2156			goto reuse_rx;
2157		}
2158
2159		/* Since we don't have a jumbo ring, copy small packets
2160		 * if mtu > 1500
2161		 */
2162		if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2163			struct sk_buff *new_skb;
2164
2165			new_skb = netdev_alloc_skb(bp->dev, len + 2);
2166			if (new_skb == NULL)
2167				goto reuse_rx;
2168
2169			/* aligned copy */
2170			skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2171				      new_skb->data, len + 2);
2172			skb_reserve(new_skb, 2);
2173			skb_put(new_skb, len);
2174
2175			bnx2_reuse_rx_skb(bp, skb,
2176				sw_ring_cons, sw_ring_prod);
2177
2178			skb = new_skb;
2179		}
2180		else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2181			pci_unmap_single(bp->pdev, dma_addr,
2182				bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2183
2184			skb_reserve(skb, bp->rx_offset);
2185			skb_put(skb, len);
2186		}
2187		else {
2188reuse_rx:
2189			bnx2_reuse_rx_skb(bp, skb,
2190				sw_ring_cons, sw_ring_prod);
2191			goto next_rx;
2192		}
2193
2194		skb->protocol = eth_type_trans(skb, bp->dev);
2195
2196		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2197			(ntohs(skb->protocol) != 0x8100)) {
2198
2199			dev_kfree_skb(skb);
2200			goto next_rx;
2201
2202		}
2203
2204		skb->ip_summed = CHECKSUM_NONE;
2205		if (bp->rx_csum &&
2206			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
2207			L2_FHDR_STATUS_UDP_DATAGRAM))) {
2208
2209			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2210					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2211				skb->ip_summed = CHECKSUM_UNNECESSARY;
2212		}
2213
2214#ifdef BCM_VLAN
2215		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2216			vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2217				rx_hdr->l2_fhdr_vlan_tag);
2218		}
2219		else
2220#endif
2221			netif_receive_skb(skb);
2222
2223		bp->dev->last_rx = jiffies;
2224		rx_pkt++;
2225
2226next_rx:
2227		sw_cons = NEXT_RX_BD(sw_cons);
2228		sw_prod = NEXT_RX_BD(sw_prod);
2229
2230		if ((rx_pkt == budget))
2231			break;
2232
2233		/* Refresh hw_cons to see if there is new work */
2234		if (sw_cons == hw_cons) {
2235			hw_cons = bp->hw_rx_cons =
2236				sblk->status_rx_quick_consumer_index0;
2237			if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2238				hw_cons++;
2239			rmb();
2240		}
2241	}
2242	bp->rx_cons = sw_cons;
2243	bp->rx_prod = sw_prod;
2244
2245	REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2246
2247	REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2248
2249	mmiowb();
2250
2251	return rx_pkt;
2252
2253}
2254
2255/* MSI ISR - The only difference between this and the INTx ISR
2256 * is that the MSI interrupt is always serviced.
2257 */
2258static irqreturn_t
2259bnx2_msi(int irq, void *dev_instance)
2260{
2261	struct net_device *dev = dev_instance;
2262	struct bnx2 *bp = netdev_priv(dev);
2263
2264	prefetch(bp->status_blk);
2265	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2266		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2267		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2268
2269	/* Return here if interrupt is disabled. */
2270	if (unlikely(atomic_read(&bp->intr_sem) != 0))
2271		return IRQ_HANDLED;
2272
2273	netif_rx_schedule(dev);
2274
2275	return IRQ_HANDLED;
2276}
2277
2278static irqreturn_t
2279bnx2_msi_1shot(int irq, void *dev_instance)
2280{
2281	struct net_device *dev = dev_instance;
2282	struct bnx2 *bp = netdev_priv(dev);
2283
2284	prefetch(bp->status_blk);
2285
2286	/* Return here if interrupt is disabled. */
2287	if (unlikely(atomic_read(&bp->intr_sem) != 0))
2288		return IRQ_HANDLED;
2289
2290	netif_rx_schedule(dev);
2291
2292	return IRQ_HANDLED;
2293}
2294
2295static irqreturn_t
2296bnx2_interrupt(int irq, void *dev_instance)
2297{
2298	struct net_device *dev = dev_instance;
2299	struct bnx2 *bp = netdev_priv(dev);
2300
2301	/* When using INTx, it is possible for the interrupt to arrive
2302	 * at the CPU before the status block posted prior to the
2303	 * interrupt. Reading a register will flush the status block.
2304	 * When using MSI, the MSI message will always complete after
2305	 * the status block write.
2306	 */
2307	if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2308	    (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2309	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2310		return IRQ_NONE;
2311
2312	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2313		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2314		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2315
2316	/* Return here if interrupt is shared and is disabled. */
2317	if (unlikely(atomic_read(&bp->intr_sem) != 0))
2318		return IRQ_HANDLED;
2319
2320	netif_rx_schedule(dev);
2321
2322	return IRQ_HANDLED;
2323}
2324
2325#define STATUS_ATTN_EVENTS	STATUS_ATTN_BITS_LINK_STATE
2326
2327static inline int
2328bnx2_has_work(struct bnx2 *bp)
2329{
2330	struct status_block *sblk = bp->status_blk;
2331
2332	if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2333	    (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2334		return 1;
2335
2336	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2337	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
2338		return 1;
2339
2340	return 0;
2341}
2342
2343static int
2344bnx2_poll(struct net_device *dev, int *budget)
2345{
2346	struct bnx2 *bp = netdev_priv(dev);
2347	struct status_block *sblk = bp->status_blk;
2348	u32 status_attn_bits = sblk->status_attn_bits;
2349	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
2350
2351	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2352	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
2353
2354		bnx2_phy_int(bp);
2355
2356		/* This is needed to take care of transient status
2357		 * during link changes.
2358		 */
2359		REG_WR(bp, BNX2_HC_COMMAND,
2360		       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2361		REG_RD(bp, BNX2_HC_COMMAND);
2362	}
2363
2364	if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2365		bnx2_tx_int(bp);
2366
2367	if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2368		int orig_budget = *budget;
2369		int work_done;
2370
2371		if (orig_budget > dev->quota)
2372			orig_budget = dev->quota;
2373
2374		work_done = bnx2_rx_int(bp, orig_budget);
2375		*budget -= work_done;
2376		dev->quota -= work_done;
2377	}
2378
2379	bp->last_status_idx = bp->status_blk->status_idx;
2380	rmb();
2381
2382	if (!bnx2_has_work(bp)) {
2383		netif_rx_complete(dev);
2384		if (likely(bp->flags & USING_MSI_FLAG)) {
2385			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2386			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2387			       bp->last_status_idx);
2388			return 0;
2389		}
2390		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2391		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2392		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2393		       bp->last_status_idx);
2394
2395		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2396		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2397		       bp->last_status_idx);
2398		return 0;
2399	}
2400
2401	return 1;
2402}
2403
2404/* Called with rtnl_lock from vlan functions and also netif_tx_lock
2405 * from set_multicast.
2406 */
2407static void
2408bnx2_set_rx_mode(struct net_device *dev)
2409{
2410	struct bnx2 *bp = netdev_priv(dev);
2411	u32 rx_mode, sort_mode;
2412	int i;
2413
2414	spin_lock_bh(&bp->phy_lock);
2415
2416	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2417				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2418	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2419#ifdef BCM_VLAN
2420	if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2421		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2422#else
2423	if (!(bp->flags & ASF_ENABLE_FLAG))
2424		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2425#endif
2426	if (dev->flags & IFF_PROMISC) {
2427		/* Promiscuous mode. */
2428		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2429		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2430			     BNX2_RPM_SORT_USER0_PROM_VLAN;
2431	}
2432	else if (dev->flags & IFF_ALLMULTI) {
2433		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2434			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2435			       0xffffffff);
2436        	}
2437		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2438	}
2439	else {
2440		/* Accept one or more multicast(s). */
2441		struct dev_mc_list *mclist;
2442		u32 mc_filter[NUM_MC_HASH_REGISTERS];
2443		u32 regidx;
2444		u32 bit;
2445		u32 crc;
2446
2447		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2448
2449		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2450		     i++, mclist = mclist->next) {
2451
2452			crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2453			bit = crc & 0xff;
2454			regidx = (bit & 0xe0) >> 5;
2455			bit &= 0x1f;
2456			mc_filter[regidx] |= (1 << bit);
2457		}
2458
2459		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2460			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2461			       mc_filter[i]);
2462		}
2463
2464		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2465	}
2466
2467	if (rx_mode != bp->rx_mode) {
2468		bp->rx_mode = rx_mode;
2469		REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2470	}
2471
2472	REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2473	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2474	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2475
2476	spin_unlock_bh(&bp->phy_lock);
2477}
2478
2479#define FW_BUF_SIZE	0x8000
2480
2481static int
2482bnx2_gunzip_init(struct bnx2 *bp)
2483{
2484	if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2485		goto gunzip_nomem1;
2486
2487	if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2488		goto gunzip_nomem2;
2489
2490	bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2491	if (bp->strm->workspace == NULL)
2492		goto gunzip_nomem3;
2493
2494	return 0;
2495
2496gunzip_nomem3:
2497	kfree(bp->strm);
2498	bp->strm = NULL;
2499
2500gunzip_nomem2:
2501	vfree(bp->gunzip_buf);
2502	bp->gunzip_buf = NULL;
2503
2504gunzip_nomem1:
2505	printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2506			    "uncompression.\n", bp->dev->name);
2507	return -ENOMEM;
2508}
2509
2510static void
2511bnx2_gunzip_end(struct bnx2 *bp)
2512{
2513	kfree(bp->strm->workspace);
2514
2515	kfree(bp->strm);
2516	bp->strm = NULL;
2517
2518	if (bp->gunzip_buf) {
2519		vfree(bp->gunzip_buf);
2520		bp->gunzip_buf = NULL;
2521	}
2522}
2523
2524static int
2525bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2526{
2527	int n, rc;
2528
2529	/* check gzip header */
2530	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2531		return -EINVAL;
2532
2533	n = 10;
2534
2535#define FNAME	0x8
2536	if (zbuf[3] & FNAME)
2537		while ((zbuf[n++] != 0) && (n < len));
2538
2539	bp->strm->next_in = zbuf + n;
2540	bp->strm->avail_in = len - n;
2541	bp->strm->next_out = bp->gunzip_buf;
2542	bp->strm->avail_out = FW_BUF_SIZE;
2543
2544	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2545	if (rc != Z_OK)
2546		return rc;
2547
2548	rc = zlib_inflate(bp->strm, Z_FINISH);
2549
2550	*outlen = FW_BUF_SIZE - bp->strm->avail_out;
2551	*outbuf = bp->gunzip_buf;
2552
2553	if ((rc != Z_OK) && (rc != Z_STREAM_END))
2554		printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2555		       bp->dev->name, bp->strm->msg);
2556
2557	zlib_inflateEnd(bp->strm);
2558
2559	if (rc == Z_STREAM_END)
2560		return 0;
2561
2562	return rc;
2563}
2564
2565static void
2566load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2567	u32 rv2p_proc)
2568{
2569	int i;
2570	u32 val;
2571
2572
2573	for (i = 0; i < rv2p_code_len; i += 8) {
2574		REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2575		rv2p_code++;
2576		REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2577		rv2p_code++;
2578
2579		if (rv2p_proc == RV2P_PROC1) {
2580			val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2581			REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2582		}
2583		else {
2584			val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2585			REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2586		}
2587	}
2588
2589	/* Reset the processor, un-stall is done later. */
2590	if (rv2p_proc == RV2P_PROC1) {
2591		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2592	}
2593	else {
2594		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2595	}
2596}
2597
2598static int
2599load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2600{
2601	u32 offset;
2602	u32 val;
2603	int rc;
2604
2605	/* Halt the CPU. */
2606	val = REG_RD_IND(bp, cpu_reg->mode);
2607	val |= cpu_reg->mode_value_halt;
2608	REG_WR_IND(bp, cpu_reg->mode, val);
2609	REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2610
2611	/* Load the Text area. */
2612	offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2613	if (fw->gz_text) {
2614		u32 text_len;
2615		void *text;
2616
2617		rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2618				 &text_len);
2619		if (rc)
2620			return rc;
2621
2622		fw->text = text;
2623	}
2624	if (fw->gz_text) {
2625		int j;
2626
2627		for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2628			REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2629	        }
2630	}
2631
2632	/* Load the Data area. */
2633	offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2634	if (fw->data) {
2635		int j;
2636
2637		for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2638			REG_WR_IND(bp, offset, fw->data[j]);
2639		}
2640	}
2641
2642	/* Load the SBSS area. */
2643	offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2644	if (fw->sbss) {
2645		int j;
2646
2647		for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2648			REG_WR_IND(bp, offset, fw->sbss[j]);
2649		}
2650	}
2651
2652	/* Load the BSS area. */
2653	offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2654	if (fw->bss) {
2655		int j;
2656
2657		for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2658			REG_WR_IND(bp, offset, fw->bss[j]);
2659		}
2660	}
2661
2662	/* Load the Read-Only area. */
2663	offset = cpu_reg->spad_base +
2664		(fw->rodata_addr - cpu_reg->mips_view_base);
2665	if (fw->rodata) {
2666		int j;
2667
2668		for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2669			REG_WR_IND(bp, offset, fw->rodata[j]);
2670		}
2671	}
2672
2673	/* Clear the pre-fetch instruction. */
2674	REG_WR_IND(bp, cpu_reg->inst, 0);
2675	REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2676
2677	/* Start the CPU. */
2678	val = REG_RD_IND(bp, cpu_reg->mode);
2679	val &= ~cpu_reg->mode_value_halt;
2680	REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2681	REG_WR_IND(bp, cpu_reg->mode, val);
2682
2683	return 0;
2684}
2685
2686static int
2687bnx2_init_cpus(struct bnx2 *bp)
2688{
2689	struct cpu_reg cpu_reg;
2690	struct fw_info *fw;
2691	int rc = 0;
2692	void *text;
2693	u32 text_len;
2694
2695	if ((rc = bnx2_gunzip_init(bp)) != 0)
2696		return rc;
2697
2698	/* Initialize the RV2P processor. */
2699	rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2700			 &text_len);
2701	if (rc)
2702		goto init_cpu_err;
2703
2704	load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2705
2706	rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2707			 &text_len);
2708	if (rc)
2709		goto init_cpu_err;
2710
2711	load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2712
2713	/* Initialize the RX Processor. */
2714	cpu_reg.mode = BNX2_RXP_CPU_MODE;
2715	cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2716	cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2717	cpu_reg.state = BNX2_RXP_CPU_STATE;
2718	cpu_reg.state_value_clear = 0xffffff;
2719	cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2720	cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2721	cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2722	cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2723	cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2724	cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2725	cpu_reg.mips_view_base = 0x8000000;
2726
2727	if (CHIP_NUM(bp) == CHIP_NUM_5709)
2728		fw = &bnx2_rxp_fw_09;
2729	else
2730		fw = &bnx2_rxp_fw_06;
2731
2732	rc = load_cpu_fw(bp, &cpu_reg, fw);
2733	if (rc)
2734		goto init_cpu_err;
2735
2736	/* Initialize the TX Processor. */
2737	cpu_reg.mode = BNX2_TXP_CPU_MODE;
2738	cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2739	cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2740	cpu_reg.state = BNX2_TXP_CPU_STATE;
2741	cpu_reg.state_value_clear = 0xffffff;
2742	cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2743	cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2744	cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2745	cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2746	cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2747	cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2748	cpu_reg.mips_view_base = 0x8000000;
2749
2750	if (CHIP_NUM(bp) == CHIP_NUM_5709)
2751		fw = &bnx2_txp_fw_09;
2752	else
2753		fw = &bnx2_txp_fw_06;
2754
2755	rc = load_cpu_fw(bp, &cpu_reg, fw);
2756	if (rc)
2757		goto init_cpu_err;
2758
2759	/* Initialize the TX Patch-up Processor. */
2760	cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2761	cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2762	cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2763	cpu_reg.state = BNX2_TPAT_CPU_STATE;
2764	cpu_reg.state_value_clear = 0xffffff;
2765	cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2766	cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2767	cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2768	cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2769	cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2770	cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2771	cpu_reg.mips_view_base = 0x8000000;
2772
2773	if (CHIP_NUM(bp) == CHIP_NUM_5709)
2774		fw = &bnx2_tpat_fw_09;
2775	else
2776		fw = &bnx2_tpat_fw_06;
2777
2778	rc = load_cpu_fw(bp, &cpu_reg, fw);
2779	if (rc)
2780		goto init_cpu_err;
2781
2782	/* Initialize the Completion Processor. */
2783	cpu_reg.mode = BNX2_COM_CPU_MODE;
2784	cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2785	cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2786	cpu_reg.state = BNX2_COM_CPU_STATE;
2787	cpu_reg.state_value_clear = 0xffffff;
2788	cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2789	cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2790	cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2791	cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2792	cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2793	cpu_reg.spad_base = BNX2_COM_SCRATCH;
2794	cpu_reg.mips_view_base = 0x8000000;
2795
2796	if (CHIP_NUM(bp) == CHIP_NUM_5709)
2797		fw = &bnx2_com_fw_09;
2798	else
2799		fw = &bnx2_com_fw_06;
2800
2801	rc = load_cpu_fw(bp, &cpu_reg, fw);
2802	if (rc)
2803		goto init_cpu_err;
2804
2805	/* Initialize the Command Processor. */
2806	cpu_reg.mode = BNX2_CP_CPU_MODE;
2807	cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2808	cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2809	cpu_reg.state = BNX2_CP_CPU_STATE;
2810	cpu_reg.state_value_clear = 0xffffff;
2811	cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2812	cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2813	cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2814	cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2815	cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2816	cpu_reg.spad_base = BNX2_CP_SCRATCH;
2817	cpu_reg.mips_view_base = 0x8000000;
2818
2819	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2820		fw = &bnx2_cp_fw_09;
2821
2822		rc = load_cpu_fw(bp, &cpu_reg, fw);
2823		if (rc)
2824			goto init_cpu_err;
2825	}
2826init_cpu_err:
2827	bnx2_gunzip_end(bp);
2828	return rc;
2829}
2830
2831static int
2832bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2833{
2834	u16 pmcsr;
2835
2836	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2837
2838	switch (state) {
2839	case PCI_D0: {
2840		u32 val;
2841
2842		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2843			(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2844			PCI_PM_CTRL_PME_STATUS);
2845
2846		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2847			/* delay required during transition out of D3hot */
2848			msleep(20);
2849
2850		val = REG_RD(bp, BNX2_EMAC_MODE);
2851		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2852		val &= ~BNX2_EMAC_MODE_MPKT;
2853		REG_WR(bp, BNX2_EMAC_MODE, val);
2854
2855		val = REG_RD(bp, BNX2_RPM_CONFIG);
2856		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2857		REG_WR(bp, BNX2_RPM_CONFIG, val);
2858		break;
2859	}
2860	case PCI_D3hot: {
2861		int i;
2862		u32 val, wol_msg;
2863
2864		if (bp->wol) {
2865			u32 advertising;
2866			u8 autoneg;
2867
2868			autoneg = bp->autoneg;
2869			advertising = bp->advertising;
2870
2871			bp->autoneg = AUTONEG_SPEED;
2872			bp->advertising = ADVERTISED_10baseT_Half |
2873				ADVERTISED_10baseT_Full |
2874				ADVERTISED_100baseT_Half |
2875				ADVERTISED_100baseT_Full |
2876				ADVERTISED_Autoneg;
2877
2878			bnx2_setup_copper_phy(bp);
2879
2880			bp->autoneg = autoneg;
2881			bp->advertising = advertising;
2882
2883			bnx2_set_mac_addr(bp);
2884
2885			val = REG_RD(bp, BNX2_EMAC_MODE);
2886
2887			/* Enable port mode. */
2888			val &= ~BNX2_EMAC_MODE_PORT;
2889			val |= BNX2_EMAC_MODE_PORT_MII |
2890			       BNX2_EMAC_MODE_MPKT_RCVD |
2891			       BNX2_EMAC_MODE_ACPI_RCVD |
2892			       BNX2_EMAC_MODE_MPKT;
2893
2894			REG_WR(bp, BNX2_EMAC_MODE, val);
2895
2896			/* receive all multicast */
2897			for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2898				REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2899				       0xffffffff);
2900			}
2901			REG_WR(bp, BNX2_EMAC_RX_MODE,
2902			       BNX2_EMAC_RX_MODE_SORT_MODE);
2903
2904			val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2905			      BNX2_RPM_SORT_USER0_MC_EN;
2906			REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2907			REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2908			REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2909			       BNX2_RPM_SORT_USER0_ENA);
2910
2911			/* Need to enable EMAC and RPM for WOL. */
2912			REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2913			       BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2914			       BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2915			       BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2916
2917			val = REG_RD(bp, BNX2_RPM_CONFIG);
2918			val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2919			REG_WR(bp, BNX2_RPM_CONFIG, val);
2920
2921			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2922		}
2923		else {
2924			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2925		}
2926
2927		if (!(bp->flags & NO_WOL_FLAG))
2928			bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2929
2930		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2931		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2932		    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2933
2934			if (bp->wol)
2935				pmcsr |= 3;
2936		}
2937		else {
2938			pmcsr |= 3;
2939		}
2940		if (bp->wol) {
2941			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2942		}
2943		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2944				      pmcsr);
2945
2946		/* No more memory access after this point until
2947		 * device is brought back to D0.
2948		 */
2949		udelay(50);
2950		break;
2951	}
2952	default:
2953		return -EINVAL;
2954	}
2955	return 0;
2956}
2957
2958static int
2959bnx2_acquire_nvram_lock(struct bnx2 *bp)
2960{
2961	u32 val;
2962	int j;
2963
2964	/* Request access to the flash interface. */
2965	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2966	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2967		val = REG_RD(bp, BNX2_NVM_SW_ARB);
2968		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2969			break;
2970
2971		udelay(5);
2972	}
2973
2974	if (j >= NVRAM_TIMEOUT_COUNT)
2975		return -EBUSY;
2976
2977	return 0;
2978}
2979
2980static int
2981bnx2_release_nvram_lock(struct bnx2 *bp)
2982{
2983	int j;
2984	u32 val;
2985
2986	/* Relinquish nvram interface. */
2987	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2988
2989	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2990		val = REG_RD(bp, BNX2_NVM_SW_ARB);
2991		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2992			break;
2993
2994		udelay(5);
2995	}
2996
2997	if (j >= NVRAM_TIMEOUT_COUNT)
2998		return -EBUSY;
2999
3000	return 0;
3001}
3002
3003
3004static int
3005bnx2_enable_nvram_write(struct bnx2 *bp)
3006{
3007	u32 val;
3008
3009	val = REG_RD(bp, BNX2_MISC_CFG);
3010	REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3011
3012	if (!bp->flash_info->buffered) {
3013		int j;
3014
3015		REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3016		REG_WR(bp, BNX2_NVM_COMMAND,
3017		       BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3018
3019		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3020			udelay(5);
3021
3022			val = REG_RD(bp, BNX2_NVM_COMMAND);
3023			if (val & BNX2_NVM_COMMAND_DONE)
3024				break;
3025		}
3026
3027		if (j >= NVRAM_TIMEOUT_COUNT)
3028			return -EBUSY;
3029	}
3030	return 0;
3031}
3032
3033static void
3034bnx2_disable_nvram_write(struct bnx2 *bp)
3035{
3036	u32 val;
3037
3038	val = REG_RD(bp, BNX2_MISC_CFG);
3039	REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3040}
3041
3042
3043static void
3044bnx2_enable_nvram_access(struct bnx2 *bp)
3045{
3046	u32 val;
3047
3048	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3049	/* Enable both bits, even on read. */
3050	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3051	       val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3052}
3053
3054static void
3055bnx2_disable_nvram_access(struct bnx2 *bp)
3056{
3057	u32 val;
3058
3059	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3060	/* Disable both bits, even after read. */
3061	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3062		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3063			BNX2_NVM_ACCESS_ENABLE_WR_EN));
3064}
3065
3066static int
3067bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3068{
3069	u32 cmd;
3070	int j;
3071
3072	if (bp->flash_info->buffered)
3073		/* Buffered flash, no erase needed */
3074		return 0;
3075
3076	/* Build an erase command */
3077	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3078	      BNX2_NVM_COMMAND_DOIT;
3079
3080	/* Need to clear DONE bit separately. */
3081	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3082
3083	/* Address of the NVRAM to read from. */
3084	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3085
3086	/* Issue an erase command. */
3087	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3088
3089	/* Wait for completion. */
3090	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3091		u32 val;
3092
3093		udelay(5);
3094
3095		val = REG_RD(bp, BNX2_NVM_COMMAND);
3096		if (val & BNX2_NVM_COMMAND_DONE)
3097			break;
3098	}
3099
3100	if (j >= NVRAM_TIMEOUT_COUNT)
3101		return -EBUSY;
3102
3103	return 0;
3104}
3105
3106static int
3107bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3108{
3109	u32 cmd;
3110	int j;
3111
3112	/* Build the command word. */
3113	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3114
3115	/* Calculate an offset of a buffered flash. */
3116	if (bp->flash_info->buffered) {
3117		offset = ((offset / bp->flash_info->page_size) <<
3118			   bp->flash_info->page_bits) +
3119			  (offset % bp->flash_info->page_size);
3120	}
3121
3122	/* Need to clear DONE bit separately. */
3123	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3124
3125	/* Address of the NVRAM to read from. */
3126	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3127
3128	/* Issue a read command. */
3129	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3130
3131	/* Wait for completion. */
3132	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3133		u32 val;
3134
3135		udelay(5);
3136
3137		val = REG_RD(bp, BNX2_NVM_COMMAND);
3138		if (val & BNX2_NVM_COMMAND_DONE) {
3139			val = REG_RD(bp, BNX2_NVM_READ);
3140
3141			val = be32_to_cpu(val);
3142			memcpy(ret_val, &val, 4);
3143			break;
3144		}
3145	}
3146	if (j >= NVRAM_TIMEOUT_COUNT)
3147		return -EBUSY;
3148
3149	return 0;
3150}
3151
3152
3153static int
3154bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3155{
3156	u32 cmd, val32;
3157	int j;
3158
3159	/* Build the command word. */
3160	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3161
3162	/* Calculate an offset of a buffered flash. */
3163	if (bp->flash_info->buffered) {
3164		offset = ((offset / bp->flash_info->page_size) <<
3165			  bp->flash_info->page_bits) +
3166			 (offset % bp->flash_info->page_size);
3167	}
3168
3169	/* Need to clear DONE bit separately. */
3170	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3171
3172	memcpy(&val32, val, 4);
3173	val32 = cpu_to_be32(val32);
3174
3175	/* Write the data. */
3176	REG_WR(bp, BNX2_NVM_WRITE, val32);
3177
3178	/* Address of the NVRAM to write to. */
3179	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3180
3181	/* Issue the write command. */
3182	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3183
3184	/* Wait for completion. */
3185	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3186		udelay(5);
3187
3188		if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3189			break;
3190	}
3191	if (j >= NVRAM_TIMEOUT_COUNT)
3192		return -EBUSY;
3193
3194	return 0;
3195}
3196
3197static int
3198bnx2_init_nvram(struct bnx2 *bp)
3199{
3200	u32 val;
3201	int j, entry_count, rc;
3202	struct flash_spec *flash;
3203
3204	/* Determine the selected interface. */
3205	val = REG_RD(bp, BNX2_NVM_CFG1);
3206
3207	entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3208
3209	rc = 0;
3210	if (val & 0x40000000) {
3211
3212		/* Flash interface has been reconfigured */
3213		for (j = 0, flash = &flash_table[0]; j < entry_count;
3214		     j++, flash++) {
3215			if ((val & FLASH_BACKUP_STRAP_MASK) ==
3216			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3217				bp->flash_info = flash;
3218				break;
3219			}
3220		}
3221	}
3222	else {
3223		u32 mask;
3224		/* Not yet been reconfigured */
3225
3226		if (val & (1 << 23))
3227			mask = FLASH_BACKUP_STRAP_MASK;
3228		else
3229			mask = FLASH_STRAP_MASK;
3230
3231		for (j = 0, flash = &flash_table[0]; j < entry_count;
3232			j++, flash++) {
3233
3234			if ((val & mask) == (flash->strapping & mask)) {
3235				bp->flash_info = flash;
3236
3237				/* Request access to the flash interface. */
3238				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3239					return rc;
3240
3241				/* Enable access to flash interface */
3242				bnx2_enable_nvram_access(bp);
3243
3244				/* Reconfigure the flash interface */
3245				REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3246				REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3247				REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3248				REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3249
3250				/* Disable access to flash interface */
3251				bnx2_disable_nvram_access(bp);
3252				bnx2_release_nvram_lock(bp);
3253
3254				break;
3255			}
3256		}
3257	} /* if (val & 0x40000000) */
3258
3259	if (j == entry_count) {
3260		bp->flash_info = NULL;
3261		printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3262		return -ENODEV;
3263	}
3264
3265	val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3266	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3267	if (val)
3268		bp->flash_size = val;
3269	else
3270		bp->flash_size = bp->flash_info->total_size;
3271
3272	return rc;
3273}
3274
3275static int
3276bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3277		int buf_size)
3278{
3279	int rc = 0;
3280	u32 cmd_flags, offset32, len32, extra;
3281
3282	if (buf_size == 0)
3283		return 0;
3284
3285	/* Request access to the flash interface. */
3286	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3287		return rc;
3288
3289	/* Enable access to flash interface */
3290	bnx2_enable_nvram_access(bp);
3291
3292	len32 = buf_size;
3293	offset32 = offset;
3294	extra = 0;
3295
3296	cmd_flags = 0;
3297
3298	if (offset32 & 3) {
3299		u8 buf[4];
3300		u32 pre_len;
3301
3302		offset32 &= ~3;
3303		pre_len = 4 - (offset & 3);
3304
3305		if (pre_len >= len32) {
3306			pre_len = len32;
3307			cmd_flags = BNX2_NVM_COMMAND_FIRST |
3308				    BNX2_NVM_COMMAND_LAST;
3309		}
3310		else {
3311			cmd_flags = BNX2_NVM_COMMAND_FIRST;
3312		}
3313
3314		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3315
3316		if (rc)
3317			return rc;
3318
3319		memcpy(ret_buf, buf + (offset & 3), pre_len);
3320
3321		offset32 += 4;
3322		ret_buf += pre_len;
3323		len32 -= pre_len;
3324	}
3325	if (len32 & 3) {
3326		extra = 4 - (len32 & 3);
3327		len32 = (len32 + 4) & ~3;
3328	}
3329
3330	if (len32 == 4) {
3331		u8 buf[4];
3332
3333		if (cmd_flags)
3334			cmd_flags = BNX2_NVM_COMMAND_LAST;
3335		else
3336			cmd_flags = BNX2_NVM_COMMAND_FIRST |
3337				    BNX2_NVM_COMMAND_LAST;
3338
3339		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3340
3341		memcpy(ret_buf, buf, 4 - extra);
3342	}
3343	else if (len32 > 0) {
3344		u8 buf[4];
3345
3346		/* Read the first word. */
3347		if (cmd_flags)
3348			cmd_flags = 0;
3349		else
3350			cmd_flags = BNX2_NVM_COMMAND_FIRST;
3351
3352		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3353
3354		/* Advance to the next dword. */
3355		offset32 += 4;
3356		ret_buf += 4;
3357		len32 -= 4;
3358
3359		while (len32 > 4 && rc == 0) {
3360			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3361
3362			/* Advance to the next dword. */
3363			offset32 += 4;
3364			ret_buf += 4;
3365			len32 -= 4;
3366		}
3367
3368		if (rc)
3369			return rc;
3370
3371		cmd_flags = BNX2_NVM_COMMAND_LAST;
3372		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3373
3374		memcpy(ret_buf, buf, 4 - extra);
3375	}
3376
3377	/* Disable access to flash interface */
3378	bnx2_disable_nvram_access(bp);
3379
3380	bnx2_release_nvram_lock(bp);
3381
3382	return rc;
3383}
3384
3385static int
3386bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3387		int buf_size)
3388{
3389	u32 written, offset32, len32;
3390	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3391	int rc = 0;
3392	int align_start, align_end;
3393
3394	buf = data_buf;
3395	offset32 = offset;
3396	len32 = buf_size;
3397	align_start = align_end = 0;
3398
3399	if ((align_start = (offset32 & 3))) {
3400		offset32 &= ~3;
3401		len32 += align_start;
3402		if (len32 < 4)
3403			len32 = 4;
3404		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3405			return rc;
3406	}
3407
3408	if (len32 & 3) {
3409		align_end = 4 - (len32 & 3);
3410		len32 += align_end;
3411		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3412			return rc;
3413	}
3414
3415	if (align_start || align_end) {
3416		align_buf = kmalloc(len32, GFP_KERNEL);
3417		if (align_buf == NULL)
3418			return -ENOMEM;
3419		if (align_start) {
3420			memcpy(align_buf, start, 4);
3421		}
3422		if (align_end) {
3423			memcpy(align_buf + len32 - 4, end, 4);
3424		}
3425		memcpy(align_buf + align_start, data_buf, buf_size);
3426		buf = align_buf;
3427	}
3428
3429	if (bp->flash_info->buffered == 0) {
3430		flash_buffer = kmalloc(264, GFP_KERNEL);
3431		if (flash_buffer == NULL) {
3432			rc = -ENOMEM;
3433			goto nvram_write_end;
3434		}
3435	}
3436
3437	written = 0;
3438	while ((written < len32) && (rc == 0)) {
3439		u32 page_start, page_end, data_start, data_end;
3440		u32 addr, cmd_flags;
3441		int i;
3442
3443	        /* Find the page_start addr */
3444		page_start = offset32 + written;
3445		page_start -= (page_start % bp->flash_info->page_size);
3446		/* Find the page_end addr */
3447		page_end = page_start + bp->flash_info->page_size;
3448		/* Find the data_start addr */
3449		data_start = (written == 0) ? offset32 : page_start;
3450		/* Find the data_end addr */
3451		data_end = (page_end > offset32 + len32) ?
3452			(offset32 + len32) : page_end;
3453
3454		/* Request access to the flash interface. */
3455		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3456			goto nvram_write_end;
3457
3458		/* Enable access to flash interface */
3459		bnx2_enable_nvram_access(bp);
3460
3461		cmd_flags = BNX2_NVM_COMMAND_FIRST;
3462		if (bp->flash_info->buffered == 0) {
3463			int j;
3464
3465			/* Read the whole page into the buffer
3466			 * (non-buffer flash only) */
3467			for (j = 0; j < bp->flash_info->page_size; j += 4) {
3468				if (j == (bp->flash_info->page_size - 4)) {
3469					cmd_flags |= BNX2_NVM_COMMAND_LAST;
3470				}
3471				rc = bnx2_nvram_read_dword(bp,
3472					page_start + j,
3473					&flash_buffer[j],
3474					cmd_flags);
3475
3476				if (rc)
3477					goto nvram_write_end;
3478
3479				cmd_flags = 0;
3480			}
3481		}
3482
3483		/* Enable writes to flash interface (unlock write-protect) */
3484		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3485			goto nvram_write_end;
3486
3487		/* Loop to write back the buffer data from page_start to
3488		 * data_start */
3489		i = 0;
3490		if (bp->flash_info->buffered == 0) {
3491			/* Erase the page */
3492			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3493				goto nvram_write_end;
3494
3495			/* Re-enable the write again for the actual write */
3496			bnx2_enable_nvram_write(bp);
3497
3498			for (addr = page_start; addr < data_start;
3499				addr += 4, i += 4) {
3500
3501				rc = bnx2_nvram_write_dword(bp, addr,
3502					&flash_buffer[i], cmd_flags);
3503
3504				if (rc != 0)
3505					goto nvram_write_end;
3506
3507				cmd_flags = 0;
3508			}
3509		}
3510
3511		/* Loop to write the new data from data_start to data_end */
3512		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3513			if ((addr == page_end - 4) ||
3514				((bp->flash_info->buffered) &&
3515				 (addr == data_end - 4))) {
3516
3517				cmd_flags |= BNX2_NVM_COMMAND_LAST;
3518			}
3519			rc = bnx2_nvram_write_dword(bp, addr, buf,
3520				cmd_flags);
3521
3522			if (rc != 0)
3523				goto nvram_write_end;
3524
3525			cmd_flags = 0;
3526			buf += 4;
3527		}
3528
3529		/* Loop to write back the buffer data from data_end
3530		 * to page_end */
3531		if (bp->flash_info->buffered == 0) {
3532			for (addr = data_end; addr < page_end;
3533				addr += 4, i += 4) {
3534
3535				if (addr == page_end-4) {
3536					cmd_flags = BNX2_NVM_COMMAND_LAST;
3537                		}
3538				rc = bnx2_nvram_write_dword(bp, addr,
3539					&flash_buffer[i], cmd_flags);
3540
3541				if (rc != 0)
3542					goto nvram_write_end;
3543
3544				cmd_flags = 0;
3545			}
3546		}
3547
3548		/* Disable writes to flash interface (lock write-protect) */
3549		bnx2_disable_nvram_write(bp);
3550
3551		/* Disable access to flash interface */
3552		bnx2_disable_nvram_access(bp);
3553		bnx2_release_nvram_lock(bp);
3554
3555		/* Increment written */
3556		written += data_end - data_start;
3557	}
3558
3559nvram_write_end:
3560	kfree(flash_buffer);
3561	kfree(align_buf);
3562	return rc;
3563}
3564
3565static int
3566bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3567{
3568	u32 val;
3569	int i, rc = 0;
3570
3571	/* Wait for the current PCI transaction to complete before
3572	 * issuing a reset. */
3573	REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3574	       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3575	       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3576	       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3577	       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3578	val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3579	udelay(5);
3580
3581	/* Wait for the firmware to tell us it is ok to issue a reset. */
3582	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3583
3584	/* Deposit a driver reset signature so the firmware knows that
3585	 * this is a soft reset. */
3586	REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3587		   BNX2_DRV_RESET_SIGNATURE_MAGIC);
3588
3589	/* Do a dummy read to force the chip to complete all current transaction
3590	 * before we issue a reset. */
3591	val = REG_RD(bp, BNX2_MISC_ID);
3592
3593	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3594		REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3595		REG_RD(bp, BNX2_MISC_COMMAND);
3596		udelay(5);
3597
3598		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3599		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3600
3601		pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3602
3603	} else {
3604		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3605		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3606		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3607
3608		/* Chip reset. */
3609		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3610
3611		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3612		    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3613			current->state = TASK_UNINTERRUPTIBLE;
3614			schedule_timeout(HZ / 50);
3615		}
3616
3617		/* Reset takes approximate 30 usec */
3618		for (i = 0; i < 10; i++) {
3619			val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3620			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3621				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3622				break;
3623			udelay(10);
3624		}
3625
3626		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3627			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3628			printk(KERN_ERR PFX "Chip reset did not complete\n");
3629			return -EBUSY;
3630		}
3631	}
3632
3633	/* Make sure byte swapping is properly configured. */
3634	val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3635	if (val != 0x01020304) {
3636		printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3637		return -ENODEV;
3638	}
3639
3640	/* Wait for the firmware to finish its initialization. */
3641	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3642	if (rc)
3643		return rc;
3644
3645	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3646		/* Adjust the voltage regular to two steps lower.  The default
3647		 * of this register is 0x0000000e. */
3648		REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3649
3650		/* Remove bad rbuf memory from the free pool. */
3651		rc = bnx2_alloc_bad_rbuf(bp);
3652	}
3653
3654	return rc;
3655}
3656
3657static int
3658bnx2_init_chip(struct bnx2 *bp)
3659{
3660	u32 val;
3661	int rc;
3662
3663	/* Make sure the interrupt is not active. */
3664	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3665
3666	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3667	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3668#ifdef __BIG_ENDIAN
3669	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3670#endif
3671	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3672	      DMA_READ_CHANS << 12 |
3673	      DMA_WRITE_CHANS << 16;
3674
3675	val |= (0x2 << 20) | (1 << 11);
3676
3677	if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3678		val |= (1 << 23);
3679
3680	if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3681	    (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3682		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3683
3684	REG_WR(bp, BNX2_DMA_CONFIG, val);
3685
3686	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3687		val = REG_RD(bp, BNX2_TDMA_CONFIG);
3688		val |= BNX2_TDMA_CONFIG_ONE_DMA;
3689		REG_WR(bp, BNX2_TDMA_CONFIG, val);
3690	}
3691
3692	if (bp->flags & PCIX_FLAG) {
3693		u16 val16;
3694
3695		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3696				     &val16);
3697		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3698				      val16 & ~PCI_X_CMD_ERO);
3699	}
3700
3701	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3702	       BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3703	       BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3704	       BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3705
3706	/* Initialize context mapping and zero out the quick contexts.  The
3707	 * context block must have already been enabled. */
3708	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3709		rc = bnx2_init_5709_context(bp);
3710		if (rc)
3711			return rc;
3712	} else
3713		bnx2_init_context(bp);
3714
3715	if ((rc = bnx2_init_cpus(bp)) != 0)
3716		return rc;
3717
3718	bnx2_init_nvram(bp);
3719
3720	bnx2_set_mac_addr(bp);
3721
3722	val = REG_RD(bp, BNX2_MQ_CONFIG);
3723	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3724	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3725	if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3726		val |= BNX2_MQ_CONFIG_HALT_DIS;
3727
3728	REG_WR(bp, BNX2_MQ_CONFIG, val);
3729
3730	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3731	REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3732	REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3733
3734	val = (BCM_PAGE_BITS - 8) << 24;
3735	REG_WR(bp, BNX2_RV2P_CONFIG, val);
3736
3737	/* Configure page size. */
3738	val = REG_RD(bp, BNX2_TBDR_CONFIG);
3739	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3740	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3741	REG_WR(bp, BNX2_TBDR_CONFIG, val);
3742
3743	val = bp->mac_addr[0] +
3744	      (bp->mac_addr[1] << 8) +
3745	      (bp->mac_addr[2] << 16) +
3746	      bp->mac_addr[3] +
3747	      (bp->mac_addr[4] << 8) +
3748	      (bp->mac_addr[5] << 16);
3749	REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3750
3751	/* Program the MTU.  Also include 4 bytes for CRC32. */
3752	val = bp->dev->mtu + ETH_HLEN + 4;
3753	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3754		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3755	REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3756
3757	bp->last_status_idx = 0;
3758	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3759
3760	/* Set up how to generate a link change interrupt. */
3761	REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3762
3763	REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3764	       (u64) bp->status_blk_mapping & 0xffffffff);
3765	REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3766
3767	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3768	       (u64) bp->stats_blk_mapping & 0xffffffff);
3769	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3770	       (u64) bp->stats_blk_mapping >> 32);
3771
3772	REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3773	       (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3774
3775	REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3776	       (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3777
3778	REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3779	       (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3780
3781	REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3782
3783	REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3784
3785	REG_WR(bp, BNX2_HC_COM_TICKS,
3786	       (bp->com_ticks_int << 16) | bp->com_ticks);
3787
3788	REG_WR(bp, BNX2_HC_CMD_TICKS,
3789	       (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3790
3791	if (CHIP_NUM(bp) == CHIP_NUM_5708)
3792		REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
3793	else
3794		REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3795	REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3796
3797	if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3798		val = BNX2_HC_CONFIG_COLLECT_STATS;
3799	else {
3800		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
3801		      BNX2_HC_CONFIG_COLLECT_STATS;
3802	}
3803
3804	if (bp->flags & ONE_SHOT_MSI_FLAG)
3805		val |= BNX2_HC_CONFIG_ONE_SHOT;
3806
3807	REG_WR(bp, BNX2_HC_CONFIG, val);
3808
3809	/* Clear internal stats counters. */
3810	REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3811
3812	REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
3813
3814	if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3815	    BNX2_PORT_FEATURE_ASF_ENABLED)
3816		bp->flags |= ASF_ENABLE_FLAG;
3817
3818	/* Initialize the receive filter. */
3819	bnx2_set_rx_mode(bp->dev);
3820
3821	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3822		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
3823		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
3824		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
3825	}
3826	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3827			  0);
3828
3829	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3830	REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3831
3832	udelay(20);
3833
3834	bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3835
3836	return rc;
3837}
3838
3839static void
3840bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3841{
3842	u32 val, offset0, offset1, offset2, offset3;
3843
3844	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3845		offset0 = BNX2_L2CTX_TYPE_XI;
3846		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3847		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3848		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3849	} else {
3850		offset0 = BNX2_L2CTX_TYPE;
3851		offset1 = BNX2_L2CTX_CMD_TYPE;
3852		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3853		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3854	}
3855	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3856	CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3857
3858	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3859	CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3860
3861	val = (u64) bp->tx_desc_mapping >> 32;
3862	CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3863
3864	val = (u64) bp->tx_desc_mapping & 0xffffffff;
3865	CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3866}
3867
3868static void
3869bnx2_init_tx_ring(struct bnx2 *bp)
3870{
3871	struct tx_bd *txbd;
3872	u32 cid;
3873
3874	bp->tx_wake_thresh = bp->tx_ring_size / 2;
3875
3876	txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3877
3878	txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3879	txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3880
3881	bp->tx_prod = 0;
3882	bp->tx_cons = 0;
3883	bp->hw_tx_cons = 0;
3884	bp->tx_prod_bseq = 0;
3885
3886	cid = TX_CID;
3887	bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3888	bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3889
3890	bnx2_init_tx_context(bp, cid);
3891}
3892
3893static void
3894bnx2_init_rx_ring(struct bnx2 *bp)
3895{
3896	struct rx_bd *rxbd;
3897	int i;
3898	u16 prod, ring_prod;
3899	u32 val;
3900
3901	/* 8 for CRC and VLAN */
3902	bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3903	/* hw alignment */
3904	bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3905
3906	ring_prod = prod = bp->rx_prod = 0;
3907	bp->rx_cons = 0;
3908	bp->hw_rx_cons = 0;
3909	bp->rx_prod_bseq = 0;
3910
3911	for (i = 0; i < bp->rx_max_ring; i++) {
3912		int j;
3913
3914		rxbd = &bp->rx_desc_ring[i][0];
3915		for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3916			rxbd->rx_bd_len = bp->rx_buf_use_size;
3917			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3918		}
3919		if (i == (bp->rx_max_ring - 1))
3920			j = 0;
3921		else
3922			j = i + 1;
3923		rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3924		rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3925				       0xffffffff;
3926	}
3927
3928	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3929	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3930	val |= 0x02 << 8;
3931	CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3932
3933	val = (u64) bp->rx_desc_mapping[0] >> 32;
3934	CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3935
3936	val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3937	CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3938
3939	for (i = 0; i < bp->rx_ring_size; i++) {
3940		if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3941			break;
3942		}
3943		prod = NEXT_RX_BD(prod);
3944		ring_prod = RX_RING_IDX(prod);
3945	}
3946	bp->rx_prod = prod;
3947
3948	REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3949
3950	REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3951}
3952
3953static void
3954bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3955{
3956	u32 num_rings, max;
3957
3958	bp->rx_ring_size = size;
3959	num_rings = 1;
3960	while (size > MAX_RX_DESC_CNT) {
3961		size -= MAX_RX_DESC_CNT;
3962		num_rings++;
3963	}
3964	/* round to next power of 2 */
3965	max = MAX_RX_RINGS;
3966	while ((max & num_rings) == 0)
3967		max >>= 1;
3968
3969	if (num_rings != max)
3970		max <<= 1;
3971
3972	bp->rx_max_ring = max;
3973	bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3974}
3975
3976static void
3977bnx2_free_tx_skbs(struct bnx2 *bp)
3978{
3979	int i;
3980
3981	if (bp->tx_buf_ring == NULL)
3982		return;
3983
3984	for (i = 0; i < TX_DESC_CNT; ) {
3985		struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3986		struct sk_buff *skb = tx_buf->skb;
3987		int j, last;
3988
3989		if (skb == NULL) {
3990			i++;
3991			continue;
3992		}
3993
3994		pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3995			skb_headlen(skb), PCI_DMA_TODEVICE);
3996
3997		tx_buf->skb = NULL;
3998
3999		last = skb_shinfo(skb)->nr_frags;
4000		for (j = 0; j < last; j++) {
4001			tx_buf = &bp->tx_buf_ring[i + j + 1];
4002			pci_unmap_page(bp->pdev,
4003				pci_unmap_addr(tx_buf, mapping),
4004				skb_shinfo(skb)->frags[j].size,
4005				PCI_DMA_TODEVICE);
4006		}
4007		dev_kfree_skb(skb);
4008		i += j + 1;
4009	}
4010
4011}
4012
4013static void
4014bnx2_free_rx_skbs(struct bnx2 *bp)
4015{
4016	int i;
4017
4018	if (bp->rx_buf_ring == NULL)
4019		return;
4020
4021	for (i = 0; i < bp->rx_max_ring_idx; i++) {
4022		struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4023		struct sk_buff *skb = rx_buf->skb;
4024
4025		if (skb == NULL)
4026			continue;
4027
4028		pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4029			bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4030
4031		rx_buf->skb = NULL;
4032
4033		dev_kfree_skb(skb);
4034	}
4035}
4036
4037static void
4038bnx2_free_skbs(struct bnx2 *bp)
4039{
4040	bnx2_free_tx_skbs(bp);
4041	bnx2_free_rx_skbs(bp);
4042}
4043
4044static int
4045bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4046{
4047	int rc;
4048
4049	rc = bnx2_reset_chip(bp, reset_code);
4050	bnx2_free_skbs(bp);
4051	if (rc)
4052		return rc;
4053
4054	if ((rc = bnx2_init_chip(bp)) != 0)
4055		return rc;
4056
4057	bnx2_init_tx_ring(bp);
4058	bnx2_init_rx_ring(bp);
4059	return 0;
4060}
4061
4062static int
4063bnx2_init_nic(struct bnx2 *bp)
4064{
4065	int rc;
4066
4067	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4068		return rc;
4069
4070	spin_lock_bh(&bp->phy_lock);
4071	bnx2_init_phy(bp);
4072	spin_unlock_bh(&bp->phy_lock);
4073	bnx2_set_link(bp);
4074	return 0;
4075}
4076
4077static int
4078bnx2_test_registers(struct bnx2 *bp)
4079{
4080	int ret;
4081	int i, is_5709;
4082	static const struct {
4083		u16   offset;
4084		u16   flags;
4085#define BNX2_FL_NOT_5709	1
4086		u32   rw_mask;
4087		u32   ro_mask;
4088	} reg_tbl[] = {
4089		{ 0x006c, 0, 0x00000000, 0x0000003f },
4090		{ 0x0090, 0, 0xffffffff, 0x00000000 },
4091		{ 0x0094, 0, 0x00000000, 0x00000000 },
4092
4093		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4094		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4095		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4096		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4097		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4098		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4099		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4100		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4101		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4102
4103		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4104		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4105		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4106		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4107		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4108		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4109
4110		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4111		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4112		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
4113
4114		{ 0x1000, 0, 0x00000000, 0x00000001 },
4115		{ 0x1004, 0, 0x00000000, 0x000f0001 },
4116
4117		{ 0x1408, 0, 0x01c00800, 0x00000000 },
4118		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
4119		{ 0x14a8, 0, 0x00000000, 0x000001ff },
4120		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
4121		{ 0x14b0, 0, 0x00000002, 0x00000001 },
4122		{ 0x14b8, 0, 0x00000000, 0x00000000 },
4123		{ 0x14c0, 0, 0x00000000, 0x00000009 },
4124		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
4125		{ 0x14cc, 0, 0x00000000, 0x00000001 },
4126		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
4127
4128		{ 0x1800, 0, 0x00000000, 0x00000001 },
4129		{ 0x1804, 0, 0x00000000, 0x00000003 },
4130
4131		{ 0x2800, 0, 0x00000000, 0x00000001 },
4132		{ 0x2804, 0, 0x00000000, 0x00003f01 },
4133		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4134		{ 0x2810, 0, 0xffff0000, 0x00000000 },
4135		{ 0x2814, 0, 0xffff0000, 0x00000000 },
4136		{ 0x2818, 0, 0xffff0000, 0x00000000 },
4137		{ 0x281c, 0, 0xffff0000, 0x00000000 },
4138		{ 0x2834, 0, 0xffffffff, 0x00000000 },
4139		{ 0x2840, 0, 0x00000000, 0xffffffff },
4140		{ 0x2844, 0, 0x00000000, 0xffffffff },
4141		{ 0x2848, 0, 0xffffffff, 0x00000000 },
4142		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
4143
4144		{ 0x2c00, 0, 0x00000000, 0x00000011 },
4145		{ 0x2c04, 0, 0x00000000, 0x00030007 },
4146
4147		{ 0x3c00, 0, 0x00000000, 0x00000001 },
4148		{ 0x3c04, 0, 0x00000000, 0x00070000 },
4149		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
4150		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4151		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
4152		{ 0x3c14, 0, 0x00000000, 0xffffffff },
4153		{ 0x3c18, 0, 0x00000000, 0xffffffff },
4154		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
4155		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
4156
4157		{ 0x5004, 0, 0x00000000, 0x0000007f },
4158		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
4159
4160		{ 0x5c00, 0, 0x00000000, 0x00000001 },
4161		{ 0x5c04, 0, 0x00000000, 0x0003000f },
4162		{ 0x5c08, 0, 0x00000003, 0x00000000 },
4163		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4164		{ 0x5c10, 0, 0x00000000, 0xffffffff },
4165		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4166		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
4167		{ 0x5c88, 0, 0x00000000, 0x00077373 },
4168		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
4169
4170		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
4171		{ 0x680c, 0, 0xffffffff, 0x00000000 },
4172		{ 0x6810, 0, 0xffffffff, 0x00000000 },
4173		{ 0x6814, 0, 0xffffffff, 0x00000000 },
4174		{ 0x6818, 0, 0xffffffff, 0x00000000 },
4175		{ 0x681c, 0, 0xffffffff, 0x00000000 },
4176		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
4177		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
4178		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
4179		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
4180		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
4181		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
4182		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
4183		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
4184		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
4185		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
4186		{ 0x684c, 0, 0xffffffff, 0x00000000 },
4187		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4188		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4189		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4190		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4191		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
4192		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4193
4194		{ 0xffff, 0, 0x00000000, 0x00000000 },
4195	};
4196
4197	ret = 0;
4198	is_5709 = 0;
4199	if (CHIP_NUM(bp) == CHIP_NUM_5709)
4200		is_5709 = 1;
4201
4202	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4203		u32 offset, rw_mask, ro_mask, save_val, val;
4204		u16 flags = reg_tbl[i].flags;
4205
4206		if (is_5709 && (flags & BNX2_FL_NOT_5709))
4207			continue;
4208
4209		offset = (u32) reg_tbl[i].offset;
4210		rw_mask = reg_tbl[i].rw_mask;
4211		ro_mask = reg_tbl[i].ro_mask;
4212
4213		save_val = readl(bp->regview + offset);
4214
4215		writel(0, bp->regview + offset);
4216
4217		val = readl(bp->regview + offset);
4218		if ((val & rw_mask) != 0) {
4219			goto reg_test_err;
4220		}
4221
4222		if ((val & ro_mask) != (save_val & ro_mask)) {
4223			goto reg_test_err;
4224		}
4225
4226		writel(0xffffffff, bp->regview + offset);
4227
4228		val = readl(bp->regview + offset);
4229		if ((val & rw_mask) != rw_mask) {
4230			goto reg_test_err;
4231		}
4232
4233		if ((val & ro_mask) != (save_val & ro_mask)) {
4234			goto reg_test_err;
4235		}
4236
4237		writel(save_val, bp->regview + offset);
4238		continue;
4239
4240reg_test_err:
4241		writel(save_val, bp->regview + offset);
4242		ret = -ENODEV;
4243		break;
4244	}
4245	return ret;
4246}
4247
4248static int
4249bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4250{
4251	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4252		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4253	int i;
4254
4255	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4256		u32 offset;
4257
4258		for (offset = 0; offset < size; offset += 4) {
4259
4260			REG_WR_IND(bp, start + offset, test_pattern[i]);
4261
4262			if (REG_RD_IND(bp, start + offset) !=
4263				test_pattern[i]) {
4264				return -ENODEV;
4265			}
4266		}
4267	}
4268	return 0;
4269}
4270
4271static int
4272bnx2_test_memory(struct bnx2 *bp)
4273{
4274	int ret = 0;
4275	int i;
4276	static struct mem_entry {
4277		u32   offset;
4278		u32   len;
4279	} mem_tbl_5706[] = {
4280		{ 0x60000,  0x4000 },
4281		{ 0xa0000,  0x3000 },
4282		{ 0xe0000,  0x4000 },
4283		{ 0x120000, 0x4000 },
4284		{ 0x1a0000, 0x4000 },
4285		{ 0x160000, 0x4000 },
4286		{ 0xffffffff, 0    },
4287	},
4288	mem_tbl_5709[] = {
4289		{ 0x60000,  0x4000 },
4290		{ 0xa0000,  0x3000 },
4291		{ 0xe0000,  0x4000 },
4292		{ 0x120000, 0x4000 },
4293		{ 0x1a0000, 0x4000 },
4294		{ 0xffffffff, 0    },
4295	};
4296	struct mem_entry *mem_tbl;
4297
4298	if (CHIP_NUM(bp) == CHIP_NUM_5709)
4299		mem_tbl = mem_tbl_5709;
4300	else
4301		mem_tbl = mem_tbl_5706;
4302
4303	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4304		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4305			mem_tbl[i].len)) != 0) {
4306			return ret;
4307		}
4308	}
4309
4310	return ret;
4311}
4312
4313#define BNX2_MAC_LOOPBACK	0
4314#define BNX2_PHY_LOOPBACK	1
4315
4316static int
4317bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4318{
4319	unsigned int pkt_size, num_pkts, i;
4320	struct sk_buff *skb, *rx_skb;
4321	unsigned char *packet;
4322	u16 rx_start_idx, rx_idx;
4323	dma_addr_t map;
4324	struct tx_bd *txbd;
4325	struct sw_bd *rx_buf;
4326	struct l2_fhdr *rx_hdr;
4327	int ret = -ENODEV;
4328
4329	if (loopback_mode == BNX2_MAC_LOOPBACK) {
4330		bp->loopback = MAC_LOOPBACK;
4331		bnx2_set_mac_loopback(bp);
4332	}
4333	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4334		bp->loopback = PHY_LOOPBACK;
4335		bnx2_set_phy_loopback(bp);
4336	}
4337	else
4338		return -EINVAL;
4339
4340	pkt_size = 1514;
4341	skb = netdev_alloc_skb(bp->dev, pkt_size);
4342	if (!skb)
4343		return -ENOMEM;
4344	packet = skb_put(skb, pkt_size);
4345	memcpy(packet, bp->dev->dev_addr, 6);
4346	memset(packet + 6, 0x0, 8);
4347	for (i = 14; i < pkt_size; i++)
4348		packet[i] = (unsigned char) (i & 0xff);
4349
4350	map = pci_map_single(bp->pdev, skb->data, pkt_size,
4351		PCI_DMA_TODEVICE);
4352
4353	REG_WR(bp, BNX2_HC_COMMAND,
4354	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4355
4356	REG_RD(bp, BNX2_HC_COMMAND);
4357
4358	udelay(5);
4359	rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4360
4361	num_pkts = 0;
4362
4363	txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4364
4365	txbd->tx_bd_haddr_hi = (u64) map >> 32;
4366	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4367	txbd->tx_bd_mss_nbytes = pkt_size;
4368	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4369
4370	num_pkts++;
4371	bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4372	bp->tx_prod_bseq += pkt_size;
4373
4374	REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4375	REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4376
4377	udelay(100);
4378
4379	REG_WR(bp, BNX2_HC_COMMAND,
4380	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4381
4382	REG_RD(bp, BNX2_HC_COMMAND);
4383
4384	udelay(5);
4385
4386	pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4387	dev_kfree_skb(skb);
4388
4389	if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4390		goto loopback_test_done;
4391	}
4392
4393	rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4394	if (rx_idx != rx_start_idx + num_pkts) {
4395		goto loopback_test_done;
4396	}
4397
4398	rx_buf = &bp->rx_buf_ring[rx_start_idx];
4399	rx_skb = rx_buf->skb;
4400
4401	rx_hdr = (struct l2_fhdr *) rx_skb->data;
4402	skb_reserve(rx_skb, bp->rx_offset);
4403
4404	pci_dma_sync_single_for_cpu(bp->pdev,
4405		pci_unmap_addr(rx_buf, mapping),
4406		bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4407
4408	if (rx_hdr->l2_fhdr_status &
4409		(L2_FHDR_ERRORS_BAD_CRC |
4410		L2_FHDR_ERRORS_PHY_DECODE |
4411		L2_FHDR_ERRORS_ALIGNMENT |
4412		L2_FHDR_ERRORS_TOO_SHORT |
4413		L2_FHDR_ERRORS_GIANT_FRAME)) {
4414
4415		goto loopback_test_done;
4416	}
4417
4418	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4419		goto loopback_test_done;
4420	}
4421
4422	for (i = 14; i < pkt_size; i++) {
4423		if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4424			goto loopback_test_done;
4425		}
4426	}
4427
4428	ret = 0;
4429
4430loopback_test_done:
4431	bp->loopback = 0;
4432	return ret;
4433}
4434
4435#define BNX2_MAC_LOOPBACK_FAILED	1
4436#define BNX2_PHY_LOOPBACK_FAILED	2
4437#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
4438					 BNX2_PHY_LOOPBACK_FAILED)
4439
4440static int
4441bnx2_test_loopback(struct bnx2 *bp)
4442{
4443	int rc = 0;
4444
4445	if (!netif_running(bp->dev))
4446		return BNX2_LOOPBACK_FAILED;
4447
4448	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4449	spin_lock_bh(&bp->phy_lock);
4450	bnx2_init_phy(bp);
4451	spin_unlock_bh(&bp->phy_lock);
4452	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4453		rc |= BNX2_MAC_LOOPBACK_FAILED;
4454	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4455		rc |= BNX2_PHY_LOOPBACK_FAILED;
4456	return rc;
4457}
4458
4459#define NVRAM_SIZE 0x200
4460#define CRC32_RESIDUAL 0xdebb20e3
4461
4462static int
4463bnx2_test_nvram(struct bnx2 *bp)
4464{
4465	u32 buf[NVRAM_SIZE / 4];
4466	u8 *data = (u8 *) buf;
4467	int rc = 0;
4468	u32 magic, csum;
4469
4470	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4471		goto test_nvram_done;
4472
4473        magic = be32_to_cpu(buf[0]);
4474	if (magic != 0x669955aa) {
4475		rc = -ENODEV;
4476		goto test_nvram_done;
4477	}
4478
4479	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4480		goto test_nvram_done;
4481
4482	csum = ether_crc_le(0x100, data);
4483	if (csum != CRC32_RESIDUAL) {
4484		rc = -ENODEV;
4485		goto test_nvram_done;
4486	}
4487
4488	csum = ether_crc_le(0x100, data + 0x100);
4489	if (csum != CRC32_RESIDUAL) {
4490		rc = -ENODEV;
4491	}
4492
4493test_nvram_done:
4494	return rc;
4495}
4496
4497static int
4498bnx2_test_link(struct bnx2 *bp)
4499{
4500	u32 bmsr;
4501
4502	spin_lock_bh(&bp->phy_lock);
4503	bnx2_enable_bmsr1(bp);
4504	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4505	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4506	bnx2_disable_bmsr1(bp);
4507	spin_unlock_bh(&bp->phy_lock);
4508
4509	if (bmsr & BMSR_LSTATUS) {
4510		return 0;
4511	}
4512	return -ENODEV;
4513}
4514
4515static int
4516bnx2_test_intr(struct bnx2 *bp)
4517{
4518	int i;
4519	u16 status_idx;
4520
4521	if (!netif_running(bp->dev))
4522		return -ENODEV;
4523
4524	status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4525
4526	/* This register is not touched during run-time. */
4527	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4528	REG_RD(bp, BNX2_HC_COMMAND);
4529
4530	for (i = 0; i < 10; i++) {
4531		if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4532			status_idx) {
4533
4534			break;
4535		}
4536
4537		msleep_interruptible(10);
4538	}
4539	if (i < 10)
4540		return 0;
4541
4542	return -ENODEV;
4543}
4544
4545static void
4546bnx2_5706_serdes_timer(struct bnx2 *bp)
4547{
4548	spin_lock(&bp->phy_lock);
4549	if (bp->serdes_an_pending)
4550		bp->serdes_an_pending--;
4551	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4552		u32 bmcr;
4553
4554		bp->current_interval = bp->timer_interval;
4555
4556		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4557
4558		if (bmcr & BMCR_ANENABLE) {
4559			u32 phy1, phy2;
4560
4561			bnx2_write_phy(bp, 0x1c, 0x7c00);
4562			bnx2_read_phy(bp, 0x1c, &phy1);
4563
4564			bnx2_write_phy(bp, 0x17, 0x0f01);
4565			bnx2_read_phy(bp, 0x15, &phy2);
4566			bnx2_write_phy(bp, 0x17, 0x0f01);
4567			bnx2_read_phy(bp, 0x15, &phy2);
4568
4569			if ((phy1 & 0x10) &&	/* SIGNAL DETECT */
4570				!(phy2 & 0x20)) {	/* no CONFIG */
4571
4572				bmcr &= ~BMCR_ANENABLE;
4573				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4574				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4575				bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4576			}
4577		}
4578	}
4579	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4580		 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4581		u32 phy2;
4582
4583		bnx2_write_phy(bp, 0x17, 0x0f01);
4584		bnx2_read_phy(bp, 0x15, &phy2);
4585		if (phy2 & 0x20) {
4586			u32 bmcr;
4587
4588			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4589			bmcr |= BMCR_ANENABLE;
4590			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4591
4592			bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4593		}
4594	} else
4595		bp->current_interval = bp->timer_interval;
4596
4597	spin_unlock(&bp->phy_lock);
4598}
4599
4600static void
4601bnx2_5708_serdes_timer(struct bnx2 *bp)
4602{
4603	if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4604		bp->serdes_an_pending = 0;
4605		return;
4606	}
4607
4608	spin_lock(&bp->phy_lock);
4609	if (bp->serdes_an_pending)
4610		bp->serdes_an_pending--;
4611	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4612		u32 bmcr;
4613
4614		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4615		if (bmcr & BMCR_ANENABLE) {
4616			bnx2_enable_forced_2g5(bp);
4617			bp->current_interval = SERDES_FORCED_TIMEOUT;
4618		} else {
4619			bnx2_disable_forced_2g5(bp);
4620			bp->serdes_an_pending = 2;
4621			bp->current_interval = bp->timer_interval;
4622		}
4623
4624	} else
4625		bp->current_interval = bp->timer_interval;
4626
4627	spin_unlock(&bp->phy_lock);
4628}
4629
4630static void
4631bnx2_timer(unsigned long data)
4632{
4633	struct bnx2 *bp = (struct bnx2 *) data;
4634	u32 msg;
4635
4636	if (!netif_running(bp->dev))
4637		return;
4638
4639	if (atomic_read(&bp->intr_sem) != 0)
4640		goto bnx2_restart_timer;
4641
4642	msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4643	REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4644
4645	bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4646
4647	if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4648		REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4649					    BNX2_HC_COMMAND_STATS_NOW);
4650
4651	if (bp->phy_flags & PHY_SERDES_FLAG) {
4652		if (CHIP_NUM(bp) == CHIP_NUM_5706)
4653			bnx2_5706_serdes_timer(bp);
4654		else
4655			bnx2_5708_serdes_timer(bp);
4656	}
4657
4658bnx2_restart_timer:
4659	mod_timer(&bp->timer, jiffies + bp->current_interval);
4660}
4661
4662static int
4663bnx2_request_irq(struct bnx2 *bp)
4664{
4665	struct net_device *dev = bp->dev;
4666	int rc = 0;
4667
4668	if (bp->flags & USING_MSI_FLAG) {
4669		irq_handler_t	fn = bnx2_msi;
4670
4671		if (bp->flags & ONE_SHOT_MSI_FLAG)
4672			fn = bnx2_msi_1shot;
4673
4674		rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4675	} else
4676		rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4677				 IRQF_SHARED, dev->name, dev);
4678	return rc;
4679}
4680
4681static void
4682bnx2_free_irq(struct bnx2 *bp)
4683{
4684	struct net_device *dev = bp->dev;
4685
4686	if (bp->flags & USING_MSI_FLAG) {
4687		free_irq(bp->pdev->irq, dev);
4688		pci_disable_msi(bp->pdev);
4689		bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4690	} else
4691		free_irq(bp->pdev->irq, dev);
4692}
4693
4694/* Called with rtnl_lock */
4695static int
4696bnx2_open(struct net_device *dev)
4697{
4698	struct bnx2 *bp = netdev_priv(dev);
4699	int rc;
4700
4701	netif_carrier_off(dev);
4702
4703	bnx2_set_power_state(bp, PCI_D0);
4704	bnx2_disable_int(bp);
4705
4706	rc = bnx2_alloc_mem(bp);
4707	if (rc)
4708		return rc;
4709
4710	if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
4711		if (pci_enable_msi(bp->pdev) == 0) {
4712			bp->flags |= USING_MSI_FLAG;
4713			if (CHIP_NUM(bp) == CHIP_NUM_5709)
4714				bp->flags |= ONE_SHOT_MSI_FLAG;
4715		}
4716	}
4717	rc = bnx2_request_irq(bp);
4718
4719	if (rc) {
4720		bnx2_free_mem(bp);
4721		return rc;
4722	}
4723
4724	rc = bnx2_init_nic(bp);
4725
4726	if (rc) {
4727		bnx2_free_irq(bp);
4728		bnx2_free_skbs(bp);
4729		bnx2_free_mem(bp);
4730		return rc;
4731	}
4732
4733	mod_timer(&bp->timer, jiffies + bp->current_interval);
4734
4735	atomic_set(&bp->intr_sem, 0);
4736
4737	bnx2_enable_int(bp);
4738
4739	if (bp->flags & USING_MSI_FLAG) {
4740		/* Test MSI to make sure it is working
4741		 * If MSI test fails, go back to INTx mode
4742		 */
4743		if (bnx2_test_intr(bp) != 0) {
4744			printk(KERN_WARNING PFX "%s: No interrupt was generated"
4745			       " using MSI, switching to INTx mode. Please"
4746			       " report this failure to the PCI maintainer"
4747			       " and include system chipset information.\n",
4748			       bp->dev->name);
4749
4750			bnx2_disable_int(bp);
4751			bnx2_free_irq(bp);
4752
4753			rc = bnx2_init_nic(bp);
4754
4755			if (!rc)
4756				rc = bnx2_request_irq(bp);
4757
4758			if (rc) {
4759				bnx2_free_skbs(bp);
4760				bnx2_free_mem(bp);
4761				del_timer_sync(&bp->timer);
4762				return rc;
4763			}
4764			bnx2_enable_int(bp);
4765		}
4766	}
4767	if (bp->flags & USING_MSI_FLAG) {
4768		printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4769	}
4770
4771	netif_start_queue(dev);
4772
4773	return 0;
4774}
4775
4776static void
4777bnx2_reset_task(struct work_struct *work)
4778{
4779	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4780
4781	if (!netif_running(bp->dev))
4782		return;
4783
4784	bp->in_reset_task = 1;
4785	bnx2_netif_stop(bp);
4786
4787	bnx2_init_nic(bp);
4788
4789	atomic_set(&bp->intr_sem, 1);
4790	bnx2_netif_start(bp);
4791	bp->in_reset_task = 0;
4792}
4793
4794static void
4795bnx2_tx_timeout(struct net_device *dev)
4796{
4797	struct bnx2 *bp = netdev_priv(dev);
4798
4799	/* This allows the netif to be shutdown gracefully before resetting */
4800	schedule_work(&bp->reset_task);
4801}
4802
4803#ifdef BCM_VLAN
4804/* Called with rtnl_lock */
4805static void
4806bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4807{
4808	struct bnx2 *bp = netdev_priv(dev);
4809
4810	bnx2_netif_stop(bp);
4811
4812	bp->vlgrp = vlgrp;
4813	bnx2_set_rx_mode(dev);
4814
4815	bnx2_netif_start(bp);
4816}
4817#endif
4818
4819/* Called with netif_tx_lock.
4820 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4821 * netif_wake_queue().
4822 */
4823static int
4824bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4825{
4826	struct bnx2 *bp = netdev_priv(dev);
4827	dma_addr_t mapping;
4828	struct tx_bd *txbd;
4829	struct sw_bd *tx_buf;
4830	u32 len, vlan_tag_flags, last_frag, mss;
4831	u16 prod, ring_prod;
4832	int i;
4833
4834	if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4835		netif_stop_queue(dev);
4836		printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4837			dev->name);
4838
4839		return NETDEV_TX_BUSY;
4840	}
4841	len = skb_headlen(skb);
4842	prod = bp->tx_prod;
4843	ring_prod = TX_RING_IDX(prod);
4844
4845	vlan_tag_flags = 0;
4846	if (skb->ip_summed == CHECKSUM_PARTIAL) {
4847		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4848	}
4849
4850	if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4851		vlan_tag_flags |=
4852			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4853	}
4854	if ((mss = skb_shinfo(skb)->gso_size)) {
4855		u32 tcp_opt_len, ip_tcp_len;
4856		struct iphdr *iph;
4857
4858		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4859
4860		tcp_opt_len = tcp_optlen(skb);
4861
4862		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4863			u32 tcp_off = skb_transport_offset(skb) -
4864				      sizeof(struct ipv6hdr) - ETH_HLEN;
4865
4866			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4867					  TX_BD_FLAGS_SW_FLAGS;
4868			if (likely(tcp_off == 0))
4869				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4870			else {
4871				tcp_off >>= 3;
4872				vlan_tag_flags |= ((tcp_off & 0x3) <<
4873						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
4874						  ((tcp_off & 0x10) <<
4875						   TX_BD_FLAGS_TCP6_OFF4_SHL);
4876				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4877			}
4878		} else {
4879			if (skb_header_cloned(skb) &&
4880			    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4881				dev_kfree_skb(skb);
4882				return NETDEV_TX_OK;
4883			}
4884
4885			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4886
4887			iph = ip_hdr(skb);
4888			iph->check = 0;
4889			iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4890			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4891								 iph->daddr, 0,
4892								 IPPROTO_TCP,
4893								 0);
4894			if (tcp_opt_len || (iph->ihl > 5)) {
4895				vlan_tag_flags |= ((iph->ihl - 5) +
4896						   (tcp_opt_len >> 2)) << 8;
4897			}
4898		}
4899	} else
4900		mss = 0;
4901
4902	mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4903
4904	tx_buf = &bp->tx_buf_ring[ring_prod];
4905	tx_buf->skb = skb;
4906	pci_unmap_addr_set(tx_buf, mapping, mapping);
4907
4908	txbd = &bp->tx_desc_ring[ring_prod];
4909
4910	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4911	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4912	txbd->tx_bd_mss_nbytes = len | (mss << 16);
4913	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4914
4915	last_frag = skb_shinfo(skb)->nr_frags;
4916
4917	for (i = 0; i < last_frag; i++) {
4918		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4919
4920		prod = NEXT_TX_BD(prod);
4921		ring_prod = TX_RING_IDX(prod);
4922		txbd = &bp->tx_desc_ring[ring_prod];
4923
4924		len = frag->size;
4925		mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4926			len, PCI_DMA_TODEVICE);
4927		pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4928				mapping, mapping);
4929
4930		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4931		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4932		txbd->tx_bd_mss_nbytes = len | (mss << 16);
4933		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4934
4935	}
4936	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4937
4938	prod = NEXT_TX_BD(prod);
4939	bp->tx_prod_bseq += skb->len;
4940
4941	REG_WR16(bp, bp->tx_bidx_addr, prod);
4942	REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4943
4944	mmiowb();
4945
4946	bp->tx_prod = prod;
4947	dev->trans_start = jiffies;
4948
4949	if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4950		netif_stop_queue(dev);
4951		if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4952			netif_wake_queue(dev);
4953	}
4954
4955	return NETDEV_TX_OK;
4956}
4957
4958/* Called with rtnl_lock */
4959static int
4960bnx2_close(struct net_device *dev)
4961{
4962	struct bnx2 *bp = netdev_priv(dev);
4963	u32 reset_code;
4964
4965	/* Calling flush_scheduled_work() may deadlock because
4966	 * linkwatch_event() may be on the workqueue and it will try to get
4967	 * the rtnl_lock which we are holding.
4968	 */
4969	while (bp->in_reset_task)
4970		msleep(1);
4971
4972	bnx2_netif_stop(bp);
4973	del_timer_sync(&bp->timer);
4974	if (bp->flags & NO_WOL_FLAG)
4975		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4976	else if (bp->wol)
4977		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4978	else
4979		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4980	bnx2_reset_chip(bp, reset_code);
4981	bnx2_free_irq(bp);
4982	bnx2_free_skbs(bp);
4983	bnx2_free_mem(bp);
4984	bp->link_up = 0;
4985	netif_carrier_off(bp->dev);
4986	bnx2_set_power_state(bp, PCI_D3hot);
4987	return 0;
4988}
4989
4990#define GET_NET_STATS64(ctr)					\
4991	(unsigned long) ((unsigned long) (ctr##_hi) << 32) +	\
4992	(unsigned long) (ctr##_lo)
4993
4994#define GET_NET_STATS32(ctr)		\
4995	(ctr##_lo)
4996
4997#if (BITS_PER_LONG == 64)
4998#define GET_NET_STATS	GET_NET_STATS64
4999#else
5000#define GET_NET_STATS	GET_NET_STATS32
5001#endif
5002
5003static struct net_device_stats *
5004bnx2_get_stats(struct net_device *dev)
5005{
5006	struct bnx2 *bp = netdev_priv(dev);
5007	struct statistics_block *stats_blk = bp->stats_blk;
5008	struct net_device_stats *net_stats = &bp->net_stats;
5009
5010	if (bp->stats_blk == NULL) {
5011		return net_stats;
5012	}
5013	net_stats->rx_packets =
5014		GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5015		GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5016		GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5017
5018	net_stats->tx_packets =
5019		GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5020		GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5021		GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5022
5023	net_stats->rx_bytes =
5024		GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5025
5026	net_stats->tx_bytes =
5027		GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5028
5029	net_stats->multicast =
5030		GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5031
5032	net_stats->collisions =
5033		(unsigned long) stats_blk->stat_EtherStatsCollisions;
5034
5035	net_stats->rx_length_errors =
5036		(unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5037		stats_blk->stat_EtherStatsOverrsizePkts);
5038
5039	net_stats->rx_over_errors =
5040		(unsigned long) stats_blk->stat_IfInMBUFDiscards;
5041
5042	net_stats->rx_frame_errors =
5043		(unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5044
5045	net_stats->rx_crc_errors =
5046		(unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5047
5048	net_stats->rx_errors = net_stats->rx_length_errors +
5049		net_stats->rx_over_errors + net_stats->rx_frame_errors +
5050		net_stats->rx_crc_errors;
5051
5052	net_stats->tx_aborted_errors =
5053    		(unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5054		stats_blk->stat_Dot3StatsLateCollisions);
5055
5056	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5057	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
5058		net_stats->tx_carrier_errors = 0;
5059	else {
5060		net_stats->tx_carrier_errors =
5061			(unsigned long)
5062			stats_blk->stat_Dot3StatsCarrierSenseErrors;
5063	}
5064
5065	net_stats->tx_errors =
5066    		(unsigned long)
5067		stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5068		+
5069		net_stats->tx_aborted_errors +
5070		net_stats->tx_carrier_errors;
5071
5072	net_stats->rx_missed_errors =
5073		(unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5074		stats_blk->stat_FwRxDrop);
5075
5076	return net_stats;
5077}
5078
5079/* All ethtool functions called with rtnl_lock */
5080
5081static int
5082bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5083{
5084	struct bnx2 *bp = netdev_priv(dev);
5085
5086	cmd->supported = SUPPORTED_Autoneg;
5087	if (bp->phy_flags & PHY_SERDES_FLAG) {
5088		cmd->supported |= SUPPORTED_1000baseT_Full |
5089			SUPPORTED_FIBRE;
5090		if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5091			cmd->supported |= SUPPORTED_2500baseX_Full;
5092
5093		cmd->port = PORT_FIBRE;
5094	}
5095	else {
5096		cmd->supported |= SUPPORTED_10baseT_Half |
5097			SUPPORTED_10baseT_Full |
5098			SUPPORTED_100baseT_Half |
5099			SUPPORTED_100baseT_Full |
5100			SUPPORTED_1000baseT_Full |
5101			SUPPORTED_TP;
5102
5103		cmd->port = PORT_TP;
5104	}
5105
5106	cmd->advertising = bp->advertising;
5107
5108	if (bp->autoneg & AUTONEG_SPEED) {
5109		cmd->autoneg = AUTONEG_ENABLE;
5110	}
5111	else {
5112		cmd->autoneg = AUTONEG_DISABLE;
5113	}
5114
5115	if (netif_carrier_ok(dev)) {
5116		cmd->speed = bp->line_speed;
5117		cmd->duplex = bp->duplex;
5118	}
5119	else {
5120		cmd->speed = -1;
5121		cmd->duplex = -1;
5122	}
5123
5124	cmd->transceiver = XCVR_INTERNAL;
5125	cmd->phy_address = bp->phy_addr;
5126
5127	return 0;
5128}
5129
5130static int
5131bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5132{
5133	struct bnx2 *bp = netdev_priv(dev);
5134	u8 autoneg = bp->autoneg;
5135	u8 req_duplex = bp->req_duplex;
5136	u16 req_line_speed = bp->req_line_speed;
5137	u32 advertising = bp->advertising;
5138
5139	if (cmd->autoneg == AUTONEG_ENABLE) {
5140		autoneg |= AUTONEG_SPEED;
5141
5142		cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5143
5144		/* allow advertising 1 speed */
5145		if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5146			(cmd->advertising == ADVERTISED_10baseT_Full) ||
5147			(cmd->advertising == ADVERTISED_100baseT_Half) ||
5148			(cmd->advertising == ADVERTISED_100baseT_Full)) {
5149
5150			if (bp->phy_flags & PHY_SERDES_FLAG)
5151				return -EINVAL;
5152
5153			advertising = cmd->advertising;
5154
5155		} else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5156			if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5157				return -EINVAL;
5158		} else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5159			advertising = cmd->advertising;
5160		}
5161		else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5162			return -EINVAL;
5163		}
5164		else {
5165			if (bp->phy_flags & PHY_SERDES_FLAG) {
5166				advertising = ETHTOOL_ALL_FIBRE_SPEED;
5167			}
5168			else {
5169				advertising = ETHTOOL_ALL_COPPER_SPEED;
5170			}
5171		}
5172		advertising |= ADVERTISED_Autoneg;
5173	}
5174	else {
5175		if (bp->phy_flags & PHY_SERDES_FLAG) {
5176			if ((cmd->speed != SPEED_1000 &&
5177			     cmd->speed != SPEED_2500) ||
5178			    (cmd->duplex != DUPLEX_FULL))
5179				return -EINVAL;
5180
5181			if (cmd->speed == SPEED_2500 &&
5182			    !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5183				return -EINVAL;
5184		}
5185		else if (cmd->speed == SPEED_1000) {
5186			return -EINVAL;
5187		}
5188		autoneg &= ~AUTONEG_SPEED;
5189		req_line_speed = cmd->speed;
5190		req_duplex = cmd->duplex;
5191		advertising = 0;
5192	}
5193
5194	bp->autoneg = autoneg;
5195	bp->advertising = advertising;
5196	bp->req_line_speed = req_line_speed;
5197	bp->req_duplex = req_duplex;
5198
5199	spin_lock_bh(&bp->phy_lock);
5200
5201	bnx2_setup_phy(bp);
5202
5203	spin_unlock_bh(&bp->phy_lock);
5204
5205	return 0;
5206}
5207
5208static void
5209bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5210{
5211	struct bnx2 *bp = netdev_priv(dev);
5212
5213	strcpy(info->driver, DRV_MODULE_NAME);
5214	strcpy(info->version, DRV_MODULE_VERSION);
5215	strcpy(info->bus_info, pci_name(bp->pdev));
5216	info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5217	info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5218	info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5219	info->fw_version[1] = info->fw_version[3] = '.';
5220	info->fw_version[5] = 0;
5221}
5222
5223#define BNX2_REGDUMP_LEN		(32 * 1024)
5224
5225static int
5226bnx2_get_regs_len(struct net_device *dev)
5227{
5228	return BNX2_REGDUMP_LEN;
5229}
5230
5231static void
5232bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5233{
5234	u32 *p = _p, i, offset;
5235	u8 *orig_p = _p;
5236	struct bnx2 *bp = netdev_priv(dev);
5237	u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5238				 0x0800, 0x0880, 0x0c00, 0x0c10,
5239				 0x0c30, 0x0d08, 0x1000, 0x101c,
5240				 0x1040, 0x1048, 0x1080, 0x10a4,
5241				 0x1400, 0x1490, 0x1498, 0x14f0,
5242				 0x1500, 0x155c, 0x1580, 0x15dc,
5243				 0x1600, 0x1658, 0x1680, 0x16d8,
5244				 0x1800, 0x1820, 0x1840, 0x1854,
5245				 0x1880, 0x1894, 0x1900, 0x1984,
5246				 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5247				 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5248				 0x2000, 0x2030, 0x23c0, 0x2400,
5249				 0x2800, 0x2820, 0x2830, 0x2850,
5250				 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5251				 0x3c00, 0x3c94, 0x4000, 0x4010,
5252				 0x4080, 0x4090, 0x43c0, 0x4458,
5253				 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5254				 0x4fc0, 0x5010, 0x53c0, 0x5444,
5255				 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5256				 0x5fc0, 0x6000, 0x6400, 0x6428,
5257				 0x6800, 0x6848, 0x684c, 0x6860,
5258				 0x6888, 0x6910, 0x8000 };
5259
5260	regs->version = 0;
5261
5262	memset(p, 0, BNX2_REGDUMP_LEN);
5263
5264	if (!netif_running(bp->dev))
5265		return;
5266
5267	i = 0;
5268	offset = reg_boundaries[0];
5269	p += offset;
5270	while (offset < BNX2_REGDUMP_LEN) {
5271		*p++ = REG_RD(bp, offset);
5272		offset += 4;
5273		if (offset == reg_boundaries[i + 1]) {
5274			offset = reg_boundaries[i + 2];
5275			p = (u32 *) (orig_p + offset);
5276			i += 2;
5277		}
5278	}
5279}
5280
5281static void
5282bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5283{
5284	struct bnx2 *bp = netdev_priv(dev);
5285
5286	if (bp->flags & NO_WOL_FLAG) {
5287		wol->supported = 0;
5288		wol->wolopts = 0;
5289	}
5290	else {
5291		wol->supported = WAKE_MAGIC;
5292		if (bp->wol)
5293			wol->wolopts = WAKE_MAGIC;
5294		else
5295			wol->wolopts = 0;
5296	}
5297	memset(&wol->sopass, 0, sizeof(wol->sopass));
5298}
5299
5300static int
5301bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5302{
5303	struct bnx2 *bp = netdev_priv(dev);
5304
5305	if (wol->wolopts & ~WAKE_MAGIC)
5306		return -EINVAL;
5307
5308	if (wol->wolopts & WAKE_MAGIC) {
5309		if (bp->flags & NO_WOL_FLAG)
5310			return -EINVAL;
5311
5312		bp->wol = 1;
5313	}
5314	else {
5315		bp->wol = 0;
5316	}
5317	return 0;
5318}
5319
5320static int
5321bnx2_nway_reset(struct net_device *dev)
5322{
5323	struct bnx2 *bp = netdev_priv(dev);
5324	u32 bmcr;
5325
5326	if (!(bp->autoneg & AUTONEG_SPEED)) {
5327		return -EINVAL;
5328	}
5329
5330	spin_lock_bh(&bp->phy_lock);
5331
5332	/* Force a link down visible on the other side */
5333	if (bp->phy_flags & PHY_SERDES_FLAG) {
5334		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5335		spin_unlock_bh(&bp->phy_lock);
5336
5337		msleep(20);
5338
5339		spin_lock_bh(&bp->phy_lock);
5340
5341		bp->current_interval = SERDES_AN_TIMEOUT;
5342		bp->serdes_an_pending = 1;
5343		mod_timer(&bp->timer, jiffies + bp->current_interval);
5344	}
5345
5346	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5347	bmcr &= ~BMCR_LOOPBACK;
5348	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5349
5350	spin_unlock_bh(&bp->phy_lock);
5351
5352	return 0;
5353}
5354
5355static int
5356bnx2_get_eeprom_len(struct net_device *dev)
5357{
5358	struct bnx2 *bp = netdev_priv(dev);
5359
5360	if (bp->flash_info == NULL)
5361		return 0;
5362
5363	return (int) bp->flash_size;
5364}
5365
5366static int
5367bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5368		u8 *eebuf)
5369{
5370	struct bnx2 *bp = netdev_priv(dev);
5371	int rc;
5372
5373	/* parameters already validated in ethtool_get_eeprom */
5374
5375	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5376
5377	return rc;
5378}
5379
5380static int
5381bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5382		u8 *eebuf)
5383{
5384	struct bnx2 *bp = netdev_priv(dev);
5385	int rc;
5386
5387	/* parameters already validated in ethtool_set_eeprom */
5388
5389	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5390
5391	return rc;
5392}
5393
5394static int
5395bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5396{
5397	struct bnx2 *bp = netdev_priv(dev);
5398
5399	memset(coal, 0, sizeof(struct ethtool_coalesce));
5400
5401	coal->rx_coalesce_usecs = bp->rx_ticks;
5402	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5403	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5404	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5405
5406	coal->tx_coalesce_usecs = bp->tx_ticks;
5407	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5408	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5409	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5410
5411	coal->stats_block_coalesce_usecs = bp->stats_ticks;
5412
5413	return 0;
5414}
5415
5416static int
5417bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5418{
5419	struct bnx2 *bp = netdev_priv(dev);
5420
5421	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5422	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5423
5424	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5425	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5426
5427	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5428	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5429
5430	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5431	if (bp->rx_quick_cons_trip_int > 0xff)
5432		bp->rx_quick_cons_trip_int = 0xff;
5433
5434	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5435	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5436
5437	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5438	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5439
5440	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5441	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5442
5443	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5444	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5445		0xff;
5446
5447	bp->stats_ticks = coal->stats_block_coalesce_usecs;
5448	if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5449		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5450			bp->stats_ticks = USEC_PER_SEC;
5451	}
5452	if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5453	bp->stats_ticks &= 0xffff00;
5454
5455	if (netif_running(bp->dev)) {
5456		bnx2_netif_stop(bp);
5457		bnx2_init_nic(bp);
5458		bnx2_netif_start(bp);
5459	}
5460
5461	return 0;
5462}
5463
5464static void
5465bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5466{
5467	struct bnx2 *bp = netdev_priv(dev);
5468
5469	ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5470	ering->rx_mini_max_pending = 0;
5471	ering->rx_jumbo_max_pending = 0;
5472
5473	ering->rx_pending = bp->rx_ring_size;
5474	ering->rx_mini_pending = 0;
5475	ering->rx_jumbo_pending = 0;
5476
5477	ering->tx_max_pending = MAX_TX_DESC_CNT;
5478	ering->tx_pending = bp->tx_ring_size;
5479}
5480
5481static int
5482bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5483{
5484	struct bnx2 *bp = netdev_priv(dev);
5485
5486	if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5487		(ering->tx_pending > MAX_TX_DESC_CNT) ||
5488		(ering->tx_pending <= MAX_SKB_FRAGS)) {
5489
5490		return -EINVAL;
5491	}
5492	if (netif_running(bp->dev)) {
5493		bnx2_netif_stop(bp);
5494		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5495		bnx2_free_skbs(bp);
5496		bnx2_free_mem(bp);
5497	}
5498
5499	bnx2_set_rx_ring_size(bp, ering->rx_pending);
5500	bp->tx_ring_size = ering->tx_pending;
5501
5502	if (netif_running(bp->dev)) {
5503		int rc;
5504
5505		rc = bnx2_alloc_mem(bp);
5506		if (rc)
5507			return rc;
5508		bnx2_init_nic(bp);
5509		bnx2_netif_start(bp);
5510	}
5511
5512	return 0;
5513}
5514
5515static void
5516bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5517{
5518	struct bnx2 *bp = netdev_priv(dev);
5519
5520	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5521	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5522	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5523}
5524
5525static int
5526bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5527{
5528	struct bnx2 *bp = netdev_priv(dev);
5529
5530	bp->req_flow_ctrl = 0;
5531	if (epause->rx_pause)
5532		bp->req_flow_ctrl |= FLOW_CTRL_RX;
5533	if (epause->tx_pause)
5534		bp->req_flow_ctrl |= FLOW_CTRL_TX;
5535
5536	if (epause->autoneg) {
5537		bp->autoneg |= AUTONEG_FLOW_CTRL;
5538	}
5539	else {
5540		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5541	}
5542
5543	spin_lock_bh(&bp->phy_lock);
5544
5545	bnx2_setup_phy(bp);
5546
5547	spin_unlock_bh(&bp->phy_lock);
5548
5549	return 0;
5550}
5551
5552static u32
5553bnx2_get_rx_csum(struct net_device *dev)
5554{
5555	struct bnx2 *bp = netdev_priv(dev);
5556
5557	return bp->rx_csum;
5558}
5559
5560static int
5561bnx2_set_rx_csum(struct net_device *dev, u32 data)
5562{
5563	struct bnx2 *bp = netdev_priv(dev);
5564
5565	bp->rx_csum = data;
5566	return 0;
5567}
5568
5569static int
5570bnx2_set_tso(struct net_device *dev, u32 data)
5571{
5572	struct bnx2 *bp = netdev_priv(dev);
5573
5574	if (data) {
5575		dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5576		if (CHIP_NUM(bp) == CHIP_NUM_5709)
5577			dev->features |= NETIF_F_TSO6;
5578	} else
5579		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5580				   NETIF_F_TSO_ECN);
5581	return 0;
5582}
5583
5584#define BNX2_NUM_STATS 46
5585
5586static struct {
5587	char string[ETH_GSTRING_LEN];
5588} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5589	{ "rx_bytes" },
5590	{ "rx_error_bytes" },
5591	{ "tx_bytes" },
5592	{ "tx_error_bytes" },
5593	{ "rx_ucast_packets" },
5594	{ "rx_mcast_packets" },
5595	{ "rx_bcast_packets" },
5596	{ "tx_ucast_packets" },
5597	{ "tx_mcast_packets" },
5598	{ "tx_bcast_packets" },
5599	{ "tx_mac_errors" },
5600	{ "tx_carrier_errors" },
5601	{ "rx_crc_errors" },
5602	{ "rx_align_errors" },
5603	{ "tx_single_collisions" },
5604	{ "tx_multi_collisions" },
5605	{ "tx_deferred" },
5606	{ "tx_excess_collisions" },
5607	{ "tx_late_collisions" },
5608	{ "tx_total_collisions" },
5609	{ "rx_fragments" },
5610	{ "rx_jabbers" },
5611	{ "rx_undersize_packets" },
5612	{ "rx_oversize_packets" },
5613	{ "rx_64_byte_packets" },
5614	{ "rx_65_to_127_byte_packets" },
5615	{ "rx_128_to_255_byte_packets" },
5616	{ "rx_256_to_511_byte_packets" },
5617	{ "rx_512_to_1023_byte_packets" },
5618	{ "rx_1024_to_1522_byte_packets" },
5619	{ "rx_1523_to_9022_byte_packets" },
5620	{ "tx_64_byte_packets" },
5621	{ "tx_65_to_127_byte_packets" },
5622	{ "tx_128_to_255_byte_packets" },
5623	{ "tx_256_to_511_byte_packets" },
5624	{ "tx_512_to_1023_byte_packets" },
5625	{ "tx_1024_to_1522_byte_packets" },
5626	{ "tx_1523_to_9022_byte_packets" },
5627	{ "rx_xon_frames" },
5628	{ "rx_xoff_frames" },
5629	{ "tx_xon_frames" },
5630	{ "tx_xoff_frames" },
5631	{ "rx_mac_ctrl_frames" },
5632	{ "rx_filtered_packets" },
5633	{ "rx_discards" },
5634	{ "rx_fw_discards" },
5635};
5636
5637#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5638
5639static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5640    STATS_OFFSET32(stat_IfHCInOctets_hi),
5641    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5642    STATS_OFFSET32(stat_IfHCOutOctets_hi),
5643    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5644    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5645    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5646    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5647    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5648    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5649    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5650    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5651    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5652    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5653    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5654    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5655    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5656    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5657    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5658    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5659    STATS_OFFSET32(stat_EtherStatsCollisions),
5660    STATS_OFFSET32(stat_EtherStatsFragments),
5661    STATS_OFFSET32(stat_EtherStatsJabbers),
5662    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5663    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5664    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5665    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5666    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5667    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5668    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5669    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5670    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5671    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5672    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5673    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5674    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5675    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5676    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5677    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5678    STATS_OFFSET32(stat_XonPauseFramesReceived),
5679    STATS_OFFSET32(stat_XoffPauseFramesReceived),
5680    STATS_OFFSET32(stat_OutXonSent),
5681    STATS_OFFSET32(stat_OutXoffSent),
5682    STATS_OFFSET32(stat_MacControlFramesReceived),
5683    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5684    STATS_OFFSET32(stat_IfInMBUFDiscards),
5685    STATS_OFFSET32(stat_FwRxDrop),
5686};
5687
5688/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5689 * skipped because of errata.
5690 */
5691static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5692	8,0,8,8,8,8,8,8,8,8,
5693	4,0,4,4,4,4,4,4,4,4,
5694	4,4,4,4,4,4,4,4,4,4,
5695	4,4,4,4,4,4,4,4,4,4,
5696	4,4,4,4,4,4,
5697};
5698
5699static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5700	8,0,8,8,8,8,8,8,8,8,
5701	4,4,4,4,4,4,4,4,4,4,
5702	4,4,4,4,4,4,4,4,4,4,
5703	4,4,4,4,4,4,4,4,4,4,
5704	4,4,4,4,4,4,
5705};
5706
5707#define BNX2_NUM_TESTS 6
5708
5709static struct {
5710	char string[ETH_GSTRING_LEN];
5711} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5712	{ "register_test (offline)" },
5713	{ "memory_test (offline)" },
5714	{ "loopback_test (offline)" },
5715	{ "nvram_test (online)" },
5716	{ "interrupt_test (online)" },
5717	{ "link_test (online)" },
5718};
5719
5720static int
5721bnx2_self_test_count(struct net_device *dev)
5722{
5723	return BNX2_NUM_TESTS;
5724}
5725
5726static void
5727bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5728{
5729	struct bnx2 *bp = netdev_priv(dev);
5730
5731	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5732	if (etest->flags & ETH_TEST_FL_OFFLINE) {
5733		int i;
5734
5735		bnx2_netif_stop(bp);
5736		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5737		bnx2_free_skbs(bp);
5738
5739		if (bnx2_test_registers(bp) != 0) {
5740			buf[0] = 1;
5741			etest->flags |= ETH_TEST_FL_FAILED;
5742		}
5743		if (bnx2_test_memory(bp) != 0) {
5744			buf[1] = 1;
5745			etest->flags |= ETH_TEST_FL_FAILED;
5746		}
5747		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5748			etest->flags |= ETH_TEST_FL_FAILED;
5749
5750		if (!netif_running(bp->dev)) {
5751			bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5752		}
5753		else {
5754			bnx2_init_nic(bp);
5755			bnx2_netif_start(bp);
5756		}
5757
5758		/* wait for link up */
5759		for (i = 0; i < 7; i++) {
5760			if (bp->link_up)
5761				break;
5762			msleep_interruptible(1000);
5763		}
5764	}
5765
5766	if (bnx2_test_nvram(bp) != 0) {
5767		buf[3] = 1;
5768		etest->flags |= ETH_TEST_FL_FAILED;
5769	}
5770	if (bnx2_test_intr(bp) != 0) {
5771		buf[4] = 1;
5772		etest->flags |= ETH_TEST_FL_FAILED;
5773	}
5774
5775	if (bnx2_test_link(bp) != 0) {
5776		buf[5] = 1;
5777		etest->flags |= ETH_TEST_FL_FAILED;
5778
5779	}
5780}
5781
5782static void
5783bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5784{
5785	switch (stringset) {
5786	case ETH_SS_STATS:
5787		memcpy(buf, bnx2_stats_str_arr,
5788			sizeof(bnx2_stats_str_arr));
5789		break;
5790	case ETH_SS_TEST:
5791		memcpy(buf, bnx2_tests_str_arr,
5792			sizeof(bnx2_tests_str_arr));
5793		break;
5794	}
5795}
5796
5797static int
5798bnx2_get_stats_count(struct net_device *dev)
5799{
5800	return BNX2_NUM_STATS;
5801}
5802
5803static void
5804bnx2_get_ethtool_stats(struct net_device *dev,
5805		struct ethtool_stats *stats, u64 *buf)
5806{
5807	struct bnx2 *bp = netdev_priv(dev);
5808	int i;
5809	u32 *hw_stats = (u32 *) bp->stats_blk;
5810	u8 *stats_len_arr = NULL;
5811
5812	if (hw_stats == NULL) {
5813		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5814		return;
5815	}
5816
5817	if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5818	    (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5819	    (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5820	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
5821		stats_len_arr = bnx2_5706_stats_len_arr;
5822	else
5823		stats_len_arr = bnx2_5708_stats_len_arr;
5824
5825	for (i = 0; i < BNX2_NUM_STATS; i++) {
5826		if (stats_len_arr[i] == 0) {
5827			/* skip this counter */
5828			buf[i] = 0;
5829			continue;
5830		}
5831		if (stats_len_arr[i] == 4) {
5832			/* 4-byte counter */
5833			buf[i] = (u64)
5834				*(hw_stats + bnx2_stats_offset_arr[i]);
5835			continue;
5836		}
5837		/* 8-byte counter */
5838		buf[i] = (((u64) *(hw_stats +
5839					bnx2_stats_offset_arr[i])) << 32) +
5840				*(hw_stats + bnx2_stats_offset_arr[i] + 1);
5841	}
5842}
5843
5844static int
5845bnx2_phys_id(struct net_device *dev, u32 data)
5846{
5847	struct bnx2 *bp = netdev_priv(dev);
5848	int i;
5849	u32 save;
5850
5851	if (data == 0)
5852		data = 2;
5853
5854	save = REG_RD(bp, BNX2_MISC_CFG);
5855	REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5856
5857	for (i = 0; i < (data * 2); i++) {
5858		if ((i % 2) == 0) {
5859			REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5860		}
5861		else {
5862			REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5863				BNX2_EMAC_LED_1000MB_OVERRIDE |
5864				BNX2_EMAC_LED_100MB_OVERRIDE |
5865				BNX2_EMAC_LED_10MB_OVERRIDE |
5866				BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5867				BNX2_EMAC_LED_TRAFFIC);
5868		}
5869		msleep_interruptible(500);
5870		if (signal_pending(current))
5871			break;
5872	}
5873	REG_WR(bp, BNX2_EMAC_LED, 0);
5874	REG_WR(bp, BNX2_MISC_CFG, save);
5875	return 0;
5876}
5877
5878static int
5879bnx2_set_tx_csum(struct net_device *dev, u32 data)
5880{
5881	struct bnx2 *bp = netdev_priv(dev);
5882
5883	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5884		return (ethtool_op_set_tx_hw_csum(dev, data));
5885	else
5886		return (ethtool_op_set_tx_csum(dev, data));
5887}
5888
5889static const struct ethtool_ops bnx2_ethtool_ops = {
5890	.get_settings		= bnx2_get_settings,
5891	.set_settings		= bnx2_set_settings,
5892	.get_drvinfo		= bnx2_get_drvinfo,
5893	.get_regs_len		= bnx2_get_regs_len,
5894	.get_regs		= bnx2_get_regs,
5895	.get_wol		= bnx2_get_wol,
5896	.set_wol		= bnx2_set_wol,
5897	.nway_reset		= bnx2_nway_reset,
5898	.get_link		= ethtool_op_get_link,
5899	.get_eeprom_len		= bnx2_get_eeprom_len,
5900	.get_eeprom		= bnx2_get_eeprom,
5901	.set_eeprom		= bnx2_set_eeprom,
5902	.get_coalesce		= bnx2_get_coalesce,
5903	.set_coalesce		= bnx2_set_coalesce,
5904	.get_ringparam		= bnx2_get_ringparam,
5905	.set_ringparam		= bnx2_set_ringparam,
5906	.get_pauseparam		= bnx2_get_pauseparam,
5907	.set_pauseparam		= bnx2_set_pauseparam,
5908	.get_rx_csum		= bnx2_get_rx_csum,
5909	.set_rx_csum		= bnx2_set_rx_csum,
5910	.get_tx_csum		= ethtool_op_get_tx_csum,
5911	.set_tx_csum		= bnx2_set_tx_csum,
5912	.get_sg			= ethtool_op_get_sg,
5913	.set_sg			= ethtool_op_set_sg,
5914	.get_tso		= ethtool_op_get_tso,
5915	.set_tso		= bnx2_set_tso,
5916	.self_test_count	= bnx2_self_test_count,
5917	.self_test		= bnx2_self_test,
5918	.get_strings		= bnx2_get_strings,
5919	.phys_id		= bnx2_phys_id,
5920	.get_stats_count	= bnx2_get_stats_count,
5921	.get_ethtool_stats	= bnx2_get_ethtool_stats,
5922	.get_perm_addr		= ethtool_op_get_perm_addr,
5923};
5924
5925/* Called with rtnl_lock */
5926static int
5927bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5928{
5929	struct mii_ioctl_data *data = if_mii(ifr);
5930	struct bnx2 *bp = netdev_priv(dev);
5931	int err;
5932
5933	switch(cmd) {
5934	case SIOCGMIIPHY:
5935		data->phy_id = bp->phy_addr;
5936
5937		/* fallthru */
5938	case SIOCGMIIREG: {
5939		u32 mii_regval;
5940
5941		if (!netif_running(dev))
5942			return -EAGAIN;
5943
5944		spin_lock_bh(&bp->phy_lock);
5945		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5946		spin_unlock_bh(&bp->phy_lock);
5947
5948		data->val_out = mii_regval;
5949
5950		return err;
5951	}
5952
5953	case SIOCSMIIREG:
5954		if (!capable(CAP_NET_ADMIN))
5955			return -EPERM;
5956
5957		if (!netif_running(dev))
5958			return -EAGAIN;
5959
5960		spin_lock_bh(&bp->phy_lock);
5961		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5962		spin_unlock_bh(&bp->phy_lock);
5963
5964		return err;
5965
5966	default:
5967		/* do nothing */
5968		break;
5969	}
5970	return -EOPNOTSUPP;
5971}
5972
5973/* Called with rtnl_lock */
5974static int
5975bnx2_change_mac_addr(struct net_device *dev, void *p)
5976{
5977	struct sockaddr *addr = p;
5978	struct bnx2 *bp = netdev_priv(dev);
5979
5980	if (!is_valid_ether_addr(addr->sa_data))
5981		return -EINVAL;
5982
5983	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5984	if (netif_running(dev))
5985		bnx2_set_mac_addr(bp);
5986
5987	return 0;
5988}
5989
5990/* Called with rtnl_lock */
5991static int
5992bnx2_change_mtu(struct net_device *dev, int new_mtu)
5993{
5994	struct bnx2 *bp = netdev_priv(dev);
5995
5996	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5997		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5998		return -EINVAL;
5999
6000	dev->mtu = new_mtu;
6001	if (netif_running(dev)) {
6002		bnx2_netif_stop(bp);
6003
6004		bnx2_init_nic(bp);
6005
6006		bnx2_netif_start(bp);
6007	}
6008	return 0;
6009}
6010
6011#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6012static void
6013poll_bnx2(struct net_device *dev)
6014{
6015	struct bnx2 *bp = netdev_priv(dev);
6016
6017	disable_irq(bp->pdev->irq);
6018	bnx2_interrupt(bp->pdev->irq, dev);
6019	enable_irq(bp->pdev->irq);
6020}
6021#endif
6022
6023static void __devinit
6024bnx2_get_5709_media(struct bnx2 *bp)
6025{
6026	u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6027	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6028	u32 strap;
6029
6030	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6031		return;
6032	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6033		bp->phy_flags |= PHY_SERDES_FLAG;
6034		return;
6035	}
6036
6037	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6038		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6039	else
6040		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6041
6042	if (PCI_FUNC(bp->pdev->devfn) == 0) {
6043		switch (strap) {
6044		case 0x4:
6045		case 0x5:
6046		case 0x6:
6047			bp->phy_flags |= PHY_SERDES_FLAG;
6048			return;
6049		}
6050	} else {
6051		switch (strap) {
6052		case 0x1:
6053		case 0x2:
6054		case 0x4:
6055			bp->phy_flags |= PHY_SERDES_FLAG;
6056			return;
6057		}
6058	}
6059}
6060
6061static void __devinit
6062bnx2_get_pci_speed(struct bnx2 *bp)
6063{
6064	u32 reg;
6065
6066	reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6067	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6068		u32 clkreg;
6069
6070		bp->flags |= PCIX_FLAG;
6071
6072		clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6073
6074		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6075		switch (clkreg) {
6076		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6077			bp->bus_speed_mhz = 133;
6078			break;
6079
6080		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6081			bp->bus_speed_mhz = 100;
6082			break;
6083
6084		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6085		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6086			bp->bus_speed_mhz = 66;
6087			break;
6088
6089		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6090		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6091			bp->bus_speed_mhz = 50;
6092			break;
6093
6094		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6095		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6096		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6097			bp->bus_speed_mhz = 33;
6098			break;
6099		}
6100	}
6101	else {
6102		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6103			bp->bus_speed_mhz = 66;
6104		else
6105			bp->bus_speed_mhz = 33;
6106	}
6107
6108	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6109		bp->flags |= PCI_32BIT_FLAG;
6110
6111}
6112
6113static int __devinit
6114bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6115{
6116	struct bnx2 *bp;
6117	unsigned long mem_len;
6118	int rc;
6119	u32 reg;
6120	u64 dma_mask, persist_dma_mask;
6121
6122	SET_MODULE_OWNER(dev);
6123	SET_NETDEV_DEV(dev, &pdev->dev);
6124	bp = netdev_priv(dev);
6125
6126	bp->flags = 0;
6127	bp->phy_flags = 0;
6128
6129	/* enable device (incl. PCI PM wakeup), and bus-mastering */
6130	rc = pci_enable_device(pdev);
6131	if (rc) {
6132		dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6133		goto err_out;
6134	}
6135
6136	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6137		dev_err(&pdev->dev,
6138			"Cannot find PCI device base address, aborting.\n");
6139		rc = -ENODEV;
6140		goto err_out_disable;
6141	}
6142
6143	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6144	if (rc) {
6145		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6146		goto err_out_disable;
6147	}
6148
6149	pci_set_master(pdev);
6150
6151	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6152	if (bp->pm_cap == 0) {
6153		dev_err(&pdev->dev,
6154			"Cannot find power management capability, aborting.\n");
6155		rc = -EIO;
6156		goto err_out_release;
6157	}
6158
6159	bp->dev = dev;
6160	bp->pdev = pdev;
6161
6162	spin_lock_init(&bp->phy_lock);
6163	spin_lock_init(&bp->indirect_lock);
6164	INIT_WORK(&bp->reset_task, bnx2_reset_task);
6165
6166	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6167	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6168	dev->mem_end = dev->mem_start + mem_len;
6169	dev->irq = pdev->irq;
6170
6171	bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6172
6173	if (!bp->regview) {
6174		dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6175		rc = -ENOMEM;
6176		goto err_out_release;
6177	}
6178
6179	/* Configure byte swap and enable write to the reg_window registers.
6180	 * Rely on CPU to do target byte swapping on big endian systems
6181	 * The chip's target access swapping will not swap all accesses
6182	 */
6183	pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6184			       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6185			       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6186
6187	bnx2_set_power_state(bp, PCI_D0);
6188
6189	bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6190
6191	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6192		if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6193			dev_err(&pdev->dev,
6194				"Cannot find PCIE capability, aborting.\n");
6195			rc = -EIO;
6196			goto err_out_unmap;
6197		}
6198		bp->flags |= PCIE_FLAG;
6199	} else {
6200		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6201		if (bp->pcix_cap == 0) {
6202			dev_err(&pdev->dev,
6203				"Cannot find PCIX capability, aborting.\n");
6204			rc = -EIO;
6205			goto err_out_unmap;
6206		}
6207	}
6208
6209	if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6210		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6211			bp->flags |= MSI_CAP_FLAG;
6212	}
6213
6214	/* 5708 cannot support DMA addresses > 40-bit.  */
6215	if (CHIP_NUM(bp) == CHIP_NUM_5708)
6216		persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6217	else
6218		persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6219
6220	/* Configure DMA attributes. */
6221	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6222		dev->features |= NETIF_F_HIGHDMA;
6223		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6224		if (rc) {
6225			dev_err(&pdev->dev,
6226				"pci_set_consistent_dma_mask failed, aborting.\n");
6227			goto err_out_unmap;
6228		}
6229	} else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6230		dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6231		goto err_out_unmap;
6232	}
6233
6234	if (!(bp->flags & PCIE_FLAG))
6235		bnx2_get_pci_speed(bp);
6236
6237	/* 5706A0 may falsely detect SERR and PERR. */
6238	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6239		reg = REG_RD(bp, PCI_COMMAND);
6240		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6241		REG_WR(bp, PCI_COMMAND, reg);
6242	}
6243	else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6244		!(bp->flags & PCIX_FLAG)) {
6245
6246		dev_err(&pdev->dev,
6247			"5706 A1 can only be used in a PCIX bus, aborting.\n");
6248		goto err_out_unmap;
6249	}
6250
6251	bnx2_init_nvram(bp);
6252
6253	reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6254
6255	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6256	    BNX2_SHM_HDR_SIGNATURE_SIG) {
6257		u32 off = PCI_FUNC(pdev->devfn) << 2;
6258
6259		bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6260	} else
6261		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6262
6263	/* Get the permanent MAC address.  First we need to make sure the
6264	 * firmware is actually running.
6265	 */
6266	reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6267
6268	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6269	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6270		dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6271		rc = -ENODEV;
6272		goto err_out_unmap;
6273	}
6274
6275	bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6276
6277	reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6278	bp->mac_addr[0] = (u8) (reg >> 8);
6279	bp->mac_addr[1] = (u8) reg;
6280
6281	reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6282	bp->mac_addr[2] = (u8) (reg >> 24);
6283	bp->mac_addr[3] = (u8) (reg >> 16);
6284	bp->mac_addr[4] = (u8) (reg >> 8);
6285	bp->mac_addr[5] = (u8) reg;
6286
6287	bp->tx_ring_size = MAX_TX_DESC_CNT;
6288	bnx2_set_rx_ring_size(bp, 255);
6289
6290	bp->rx_csum = 1;
6291
6292	bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6293
6294	bp->tx_quick_cons_trip_int = 20;
6295	bp->tx_quick_cons_trip = 20;
6296	bp->tx_ticks_int = 80;
6297	bp->tx_ticks = 80;
6298
6299	bp->rx_quick_cons_trip_int = 6;
6300	bp->rx_quick_cons_trip = 6;
6301	bp->rx_ticks_int = 18;
6302	bp->rx_ticks = 18;
6303
6304	bp->stats_ticks = 1000000 & 0xffff00;
6305
6306	bp->timer_interval =  HZ;
6307	bp->current_interval =  HZ;
6308
6309	bp->phy_addr = 1;
6310
6311	/* Disable WOL support if we are running on a SERDES chip. */
6312	if (CHIP_NUM(bp) == CHIP_NUM_5709)
6313		bnx2_get_5709_media(bp);
6314	else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6315		bp->phy_flags |= PHY_SERDES_FLAG;
6316
6317	if (bp->phy_flags & PHY_SERDES_FLAG) {
6318		bp->flags |= NO_WOL_FLAG;
6319		if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6320			bp->phy_addr = 2;
6321			reg = REG_RD_IND(bp, bp->shmem_base +
6322					 BNX2_SHARED_HW_CFG_CONFIG);
6323			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6324				bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6325		}
6326	} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6327		   CHIP_NUM(bp) == CHIP_NUM_5708)
6328		bp->phy_flags |= PHY_CRC_FIX_FLAG;
6329	else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6330		bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6331
6332	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6333	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6334	    (CHIP_ID(bp) == CHIP_ID_5708_B1))
6335		bp->flags |= NO_WOL_FLAG;
6336
6337	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6338		bp->tx_quick_cons_trip_int =
6339			bp->tx_quick_cons_trip;
6340		bp->tx_ticks_int = bp->tx_ticks;
6341		bp->rx_quick_cons_trip_int =
6342			bp->rx_quick_cons_trip;
6343		bp->rx_ticks_int = bp->rx_ticks;
6344		bp->comp_prod_trip_int = bp->comp_prod_trip;
6345		bp->com_ticks_int = bp->com_ticks;
6346		bp->cmd_ticks_int = bp->cmd_ticks;
6347	}
6348
6349	/* Disable MSI on 5706 if AMD 8132 bridge is found.
6350	 *
6351	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
6352	 * with byte enables disabled on the unused 32-bit word.  This is legal
6353	 * but causes problems on the AMD 8132 which will eventually stop
6354	 * responding after a while.
6355	 *
6356	 * AMD believes this incompatibility is unique to the 5706, and
6357	 * prefers to locally disable MSI rather than globally disabling it.
6358	 */
6359	if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6360		struct pci_dev *amd_8132 = NULL;
6361
6362		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6363						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
6364						  amd_8132))) {
6365			u8 rev;
6366
6367			pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6368			if (rev >= 0x10 && rev <= 0x13) {
6369				disable_msi = 1;
6370				pci_dev_put(amd_8132);
6371				break;
6372			}
6373		}
6374	}
6375
6376	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6377	bp->req_line_speed = 0;
6378	if (bp->phy_flags & PHY_SERDES_FLAG) {
6379		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6380
6381		reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6382		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6383		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6384			bp->autoneg = 0;
6385			bp->req_line_speed = bp->line_speed = SPEED_1000;
6386			bp->req_duplex = DUPLEX_FULL;
6387		}
6388	}
6389	else {
6390		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6391	}
6392
6393	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6394
6395	init_timer(&bp->timer);
6396	bp->timer.expires = RUN_AT(bp->timer_interval);
6397	bp->timer.data = (unsigned long) bp;
6398	bp->timer.function = bnx2_timer;
6399
6400	return 0;
6401
6402err_out_unmap:
6403	if (bp->regview) {
6404		iounmap(bp->regview);
6405		bp->regview = NULL;
6406	}
6407
6408err_out_release:
6409	pci_release_regions(pdev);
6410
6411err_out_disable:
6412	pci_disable_device(pdev);
6413	pci_set_drvdata(pdev, NULL);
6414
6415err_out:
6416	return rc;
6417}
6418
6419static char * __devinit
6420bnx2_bus_string(struct bnx2 *bp, char *str)
6421{
6422	char *s = str;
6423
6424	if (bp->flags & PCIE_FLAG) {
6425		s += sprintf(s, "PCI Express");
6426	} else {
6427		s += sprintf(s, "PCI");
6428		if (bp->flags & PCIX_FLAG)
6429			s += sprintf(s, "-X");
6430		if (bp->flags & PCI_32BIT_FLAG)
6431			s += sprintf(s, " 32-bit");
6432		else
6433			s += sprintf(s, " 64-bit");
6434		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6435	}
6436	return str;
6437}
6438
6439static int __devinit
6440bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6441{
6442	static int version_printed = 0;
6443	struct net_device *dev = NULL;
6444	struct bnx2 *bp;
6445	int rc, i;
6446	char str[40];
6447
6448	if (version_printed++ == 0)
6449		printk(KERN_INFO "%s", version);
6450
6451	/* dev zeroed in init_etherdev */
6452	dev = alloc_etherdev(sizeof(*bp));
6453
6454	if (!dev)
6455		return -ENOMEM;
6456
6457	rc = bnx2_init_board(pdev, dev);
6458	if (rc < 0) {
6459		free_netdev(dev);
6460		return rc;
6461	}
6462
6463	dev->open = bnx2_open;
6464	dev->hard_start_xmit = bnx2_start_xmit;
6465	dev->stop = bnx2_close;
6466	dev->get_stats = bnx2_get_stats;
6467	dev->set_multicast_list = bnx2_set_rx_mode;
6468	dev->do_ioctl = bnx2_ioctl;
6469	dev->set_mac_address = bnx2_change_mac_addr;
6470	dev->change_mtu = bnx2_change_mtu;
6471	dev->tx_timeout = bnx2_tx_timeout;
6472	dev->watchdog_timeo = TX_TIMEOUT;
6473#ifdef BCM_VLAN
6474	dev->vlan_rx_register = bnx2_vlan_rx_register;
6475#endif
6476	dev->poll = bnx2_poll;
6477	dev->ethtool_ops = &bnx2_ethtool_ops;
6478	dev->weight = 64;
6479
6480	bp = netdev_priv(dev);
6481
6482#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6483	dev->poll_controller = poll_bnx2;
6484#endif
6485
6486	pci_set_drvdata(pdev, dev);
6487
6488	memcpy(dev->dev_addr, bp->mac_addr, 6);
6489	memcpy(dev->perm_addr, bp->mac_addr, 6);
6490	bp->name = board_info[ent->driver_data].name;
6491
6492	if (CHIP_NUM(bp) == CHIP_NUM_5709)
6493		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6494	else
6495		dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6496#ifdef BCM_VLAN
6497	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6498#endif
6499	dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6500	if (CHIP_NUM(bp) == CHIP_NUM_5709)
6501		dev->features |= NETIF_F_TSO6;
6502
6503	if ((rc = register_netdev(dev))) {
6504		dev_err(&pdev->dev, "Cannot register net device\n");
6505		if (bp->regview)
6506			iounmap(bp->regview);
6507		pci_release_regions(pdev);
6508		pci_disable_device(pdev);
6509		pci_set_drvdata(pdev, NULL);
6510		free_netdev(dev);
6511		return rc;
6512	}
6513
6514	printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
6515		"IRQ %d, ",
6516		dev->name,
6517		bp->name,
6518		((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6519		((CHIP_ID(bp) & 0x0ff0) >> 4),
6520		bnx2_bus_string(bp, str),
6521		dev->base_addr,
6522		bp->pdev->irq);
6523
6524	printk("node addr ");
6525	for (i = 0; i < 6; i++)
6526		printk("%2.2x", dev->dev_addr[i]);
6527	printk("\n");
6528
6529	return 0;
6530}
6531
6532static void __devexit
6533bnx2_remove_one(struct pci_dev *pdev)
6534{
6535	struct net_device *dev = pci_get_drvdata(pdev);
6536	struct bnx2 *bp = netdev_priv(dev);
6537
6538	flush_scheduled_work();
6539
6540	unregister_netdev(dev);
6541
6542	if (bp->regview)
6543		iounmap(bp->regview);
6544
6545	free_netdev(dev);
6546	pci_release_regions(pdev);
6547	pci_disable_device(pdev);
6548	pci_set_drvdata(pdev, NULL);
6549}
6550
6551static int
6552bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6553{
6554	struct net_device *dev = pci_get_drvdata(pdev);
6555	struct bnx2 *bp = netdev_priv(dev);
6556	u32 reset_code;
6557
6558	if (!netif_running(dev))
6559		return 0;
6560
6561	flush_scheduled_work();
6562	bnx2_netif_stop(bp);
6563	netif_device_detach(dev);
6564	del_timer_sync(&bp->timer);
6565	if (bp->flags & NO_WOL_FLAG)
6566		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6567	else if (bp->wol)
6568		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6569	else
6570		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6571	bnx2_reset_chip(bp, reset_code);
6572	bnx2_free_skbs(bp);
6573	pci_save_state(pdev);
6574	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6575	return 0;
6576}
6577
6578static int
6579bnx2_resume(struct pci_dev *pdev)
6580{
6581	struct net_device *dev = pci_get_drvdata(pdev);
6582	struct bnx2 *bp = netdev_priv(dev);
6583
6584	if (!netif_running(dev))
6585		return 0;
6586
6587	pci_restore_state(pdev);
6588	bnx2_set_power_state(bp, PCI_D0);
6589	netif_device_attach(dev);
6590	bnx2_init_nic(bp);
6591	bnx2_netif_start(bp);
6592	return 0;
6593}
6594
6595static struct pci_driver bnx2_pci_driver = {
6596	.name		= DRV_MODULE_NAME,
6597	.id_table	= bnx2_pci_tbl,
6598	.probe		= bnx2_init_one,
6599	.remove		= __devexit_p(bnx2_remove_one),
6600	.suspend	= bnx2_suspend,
6601	.resume		= bnx2_resume,
6602};
6603
6604static int __init bnx2_init(void)
6605{
6606	return pci_register_driver(&bnx2_pci_driver);
6607}
6608
6609static void __exit bnx2_cleanup(void)
6610{
6611	pci_unregister_driver(&bnx2_pci_driver);
6612}
6613
6614module_init(bnx2_init);
6615module_exit(bnx2_cleanup);
6616