• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/drivers/net/
1/* bnx2.c: Broadcom NX2 network driver.
2 *
3 * Copyright (c) 2004-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan  (mchan@broadcom.com)
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16
17#include <linux/kernel.h>
18#include <linux/timer.h>
19#include <linux/errno.h>
20#include <linux/ioport.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/interrupt.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/dma-mapping.h>
30#include <linux/bitops.h>
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <linux/delay.h>
34#include <asm/byteorder.h>
35#include <asm/page.h>
36#include <linux/time.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
39#include <linux/if_vlan.h>
40#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41#define BCM_VLAN 1
42#endif
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
48#include <linux/prefetch.h>
49#include <linux/cache.h>
50#include <linux/firmware.h>
51#include <linux/log2.h>
52
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
57#include "bnx2.h"
58#include "bnx2_fw.h"
59
60#define DRV_MODULE_NAME		"bnx2"
61#define DRV_MODULE_VERSION	"2.0.17"
62#define DRV_MODULE_RELDATE	"July 18, 2010"
63#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-5.0.0.j6.fw"
64#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-5.0.0.j15.fw"
66#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69#define RUN_AT(x) (jiffies + (x))
70
71/* Time in jiffies before concluding the transmitter is hung. */
72#define TX_TIMEOUT  (5*HZ)
73
74static char version[] __devinitdata =
75	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
81MODULE_FIRMWARE(FW_MIPS_FILE_06);
82MODULE_FIRMWARE(FW_RV2P_FILE_06);
83MODULE_FIRMWARE(FW_MIPS_FILE_09);
84MODULE_FIRMWARE(FW_RV2P_FILE_09);
85MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87static int disable_msi = 0;
88
89module_param(disable_msi, int, 0);
90MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92typedef enum {
93	BCM5706 = 0,
94	NC370T,
95	NC370I,
96	BCM5706S,
97	NC370F,
98	BCM5708,
99	BCM5708S,
100	BCM5709,
101	BCM5709S,
102	BCM5716,
103	BCM5716S,
104} board_t;
105
106/* indexed by board_t, above */
107static struct {
108	char *name;
109} board_info[] __devinitdata = {
110	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
111	{ "HP NC370T Multifunction Gigabit Server Adapter" },
112	{ "HP NC370i Multifunction Gigabit Server Adapter" },
113	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114	{ "HP NC370F Multifunction Gigabit Server Adapter" },
115	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
116	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
118	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
120	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121	};
122
123static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
143	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
145	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146	{ 0, }
147};
148
149static const struct flash_spec flash_table[] =
150{
151#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
153	/* Slow EEPROM */
154	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157	 "EEPROM - slow"},
158	/* Expansion entry 0001 */
159	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162	 "Entry 0001"},
163	/* Saifun SA25F010 (non-buffered flash) */
164	/* strap, cfg1, & write1 need updates */
165	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168	 "Non-buffered flash (128kB)"},
169	/* Saifun SA25F020 (non-buffered flash) */
170	/* strap, cfg1, & write1 need updates */
171	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174	 "Non-buffered flash (256kB)"},
175	/* Expansion entry 0100 */
176	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179	 "Entry 0100"},
180	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
186	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190	/* Saifun SA25F005 (non-buffered flash) */
191	/* strap, cfg1, & write1 need updates */
192	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195	 "Non-buffered flash (64kB)"},
196	/* Fast EEPROM */
197	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200	 "EEPROM - fast"},
201	/* Expansion entry 1001 */
202	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205	 "Entry 1001"},
206	/* Expansion entry 1010 */
207	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210	 "Entry 1010"},
211	/* ATMEL AT45DB011B (buffered flash) */
212	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215	 "Buffered flash (128kB)"},
216	/* Expansion entry 1100 */
217	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220	 "Entry 1100"},
221	/* Expansion entry 1101 */
222	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225	 "Entry 1101"},
226	/* Ateml Expansion entry 1110 */
227	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230	 "Entry 1110 (Atmel)"},
231	/* ATMEL AT45DB021B (buffered flash) */
232	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235	 "Buffered flash (256kB)"},
236};
237
238static const struct flash_spec flash_5709 = {
239	.flags		= BNX2_NV_BUFFERED,
240	.page_bits	= BCM5709_FLASH_PAGE_BITS,
241	.page_size	= BCM5709_FLASH_PAGE_SIZE,
242	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
243	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
244	.name		= "5709 Buffered flash (256kB)",
245};
246
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249static void bnx2_init_napi(struct bnx2 *bp);
250static void bnx2_del_napi(struct bnx2 *bp);
251
252static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253{
254	u32 diff;
255
256	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
257	barrier();
258
259	/* The ring uses 256 indices for 255 entries, one of them
260	 * needs to be skipped.
261	 */
262	diff = txr->tx_prod - txr->tx_cons;
263	if (unlikely(diff >= TX_DESC_CNT)) {
264		diff &= 0xffff;
265		if (diff == TX_DESC_CNT)
266			diff = MAX_TX_DESC_CNT;
267	}
268	return (bp->tx_ring_size - diff);
269}
270
271static u32
272bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
273{
274	u32 val;
275
276	spin_lock_bh(&bp->indirect_lock);
277	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278	val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
279	spin_unlock_bh(&bp->indirect_lock);
280	return val;
281}
282
283static void
284bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285{
286	spin_lock_bh(&bp->indirect_lock);
287	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
288	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
289	spin_unlock_bh(&bp->indirect_lock);
290}
291
292static void
293bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294{
295	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
296}
297
298static u32
299bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300{
301	return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
302}
303
304static void
305bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
306{
307	offset += cid_addr;
308	spin_lock_bh(&bp->indirect_lock);
309	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
310		int i;
311
312		REG_WR(bp, BNX2_CTX_CTX_DATA, val);
313		REG_WR(bp, BNX2_CTX_CTX_CTRL,
314		       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
315		for (i = 0; i < 5; i++) {
316			val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
317			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
318				break;
319			udelay(5);
320		}
321	} else {
322		REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
323		REG_WR(bp, BNX2_CTX_DATA, val);
324	}
325	spin_unlock_bh(&bp->indirect_lock);
326}
327
328#ifdef BCM_CNIC
329static int
330bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331{
332	struct bnx2 *bp = netdev_priv(dev);
333	struct drv_ctl_io *io = &info->data.io;
334
335	switch (info->cmd) {
336	case DRV_CTL_IO_WR_CMD:
337		bnx2_reg_wr_ind(bp, io->offset, io->data);
338		break;
339	case DRV_CTL_IO_RD_CMD:
340		io->data = bnx2_reg_rd_ind(bp, io->offset);
341		break;
342	case DRV_CTL_CTX_WR_CMD:
343		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
344		break;
345	default:
346		return -EINVAL;
347	}
348	return 0;
349}
350
351static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352{
353	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
354	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
355	int sb_id;
356
357	if (bp->flags & BNX2_FLAG_USING_MSIX) {
358		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
359		bnapi->cnic_present = 0;
360		sb_id = bp->irq_nvecs;
361		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362	} else {
363		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
364		bnapi->cnic_tag = bnapi->last_status_idx;
365		bnapi->cnic_present = 1;
366		sb_id = 0;
367		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
368	}
369
370	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
371	cp->irq_arr[0].status_blk = (void *)
372		((unsigned long) bnapi->status_blk.msi +
373		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
374	cp->irq_arr[0].status_blk_num = sb_id;
375	cp->num_irq = 1;
376}
377
378static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
379			      void *data)
380{
381	struct bnx2 *bp = netdev_priv(dev);
382	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
383
384	if (ops == NULL)
385		return -EINVAL;
386
387	if (cp->drv_state & CNIC_DRV_STATE_REGD)
388		return -EBUSY;
389
390	bp->cnic_data = data;
391	rcu_assign_pointer(bp->cnic_ops, ops);
392
393	cp->num_irq = 0;
394	cp->drv_state = CNIC_DRV_STATE_REGD;
395
396	bnx2_setup_cnic_irq_info(bp);
397
398	return 0;
399}
400
401static int bnx2_unregister_cnic(struct net_device *dev)
402{
403	struct bnx2 *bp = netdev_priv(dev);
404	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
405	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
406
407	mutex_lock(&bp->cnic_lock);
408	cp->drv_state = 0;
409	bnapi->cnic_present = 0;
410	rcu_assign_pointer(bp->cnic_ops, NULL);
411	mutex_unlock(&bp->cnic_lock);
412	synchronize_rcu();
413	return 0;
414}
415
416struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
417{
418	struct bnx2 *bp = netdev_priv(dev);
419	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
420
421	cp->drv_owner = THIS_MODULE;
422	cp->chip_id = bp->chip_id;
423	cp->pdev = bp->pdev;
424	cp->io_base = bp->regview;
425	cp->drv_ctl = bnx2_drv_ctl;
426	cp->drv_register_cnic = bnx2_register_cnic;
427	cp->drv_unregister_cnic = bnx2_unregister_cnic;
428
429	return cp;
430}
431EXPORT_SYMBOL(bnx2_cnic_probe);
432
433static void
434bnx2_cnic_stop(struct bnx2 *bp)
435{
436	struct cnic_ops *c_ops;
437	struct cnic_ctl_info info;
438
439	mutex_lock(&bp->cnic_lock);
440	c_ops = bp->cnic_ops;
441	if (c_ops) {
442		info.cmd = CNIC_CTL_STOP_CMD;
443		c_ops->cnic_ctl(bp->cnic_data, &info);
444	}
445	mutex_unlock(&bp->cnic_lock);
446}
447
448static void
449bnx2_cnic_start(struct bnx2 *bp)
450{
451	struct cnic_ops *c_ops;
452	struct cnic_ctl_info info;
453
454	mutex_lock(&bp->cnic_lock);
455	c_ops = bp->cnic_ops;
456	if (c_ops) {
457		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
458			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
459
460			bnapi->cnic_tag = bnapi->last_status_idx;
461		}
462		info.cmd = CNIC_CTL_START_CMD;
463		c_ops->cnic_ctl(bp->cnic_data, &info);
464	}
465	mutex_unlock(&bp->cnic_lock);
466}
467
468#else
469
470static void
471bnx2_cnic_stop(struct bnx2 *bp)
472{
473}
474
475static void
476bnx2_cnic_start(struct bnx2 *bp)
477{
478}
479
480#endif
481
482static int
483bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
484{
485	u32 val1;
486	int i, ret;
487
488	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
489		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
490		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
491
492		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
493		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
494
495		udelay(40);
496	}
497
498	val1 = (bp->phy_addr << 21) | (reg << 16) |
499		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
500		BNX2_EMAC_MDIO_COMM_START_BUSY;
501	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
502
503	for (i = 0; i < 50; i++) {
504		udelay(10);
505
506		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
507		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
508			udelay(5);
509
510			val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
511			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
512
513			break;
514		}
515	}
516
517	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
518		*val = 0x0;
519		ret = -EBUSY;
520	}
521	else {
522		*val = val1;
523		ret = 0;
524	}
525
526	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
527		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
528		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
529
530		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
531		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
532
533		udelay(40);
534	}
535
536	return ret;
537}
538
539static int
540bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
541{
542	u32 val1;
543	int i, ret;
544
545	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
546		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
547		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
548
549		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
550		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
551
552		udelay(40);
553	}
554
555	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
556		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
557		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
558	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
559
560	for (i = 0; i < 50; i++) {
561		udelay(10);
562
563		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
564		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
565			udelay(5);
566			break;
567		}
568	}
569
570	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
571        	ret = -EBUSY;
572	else
573		ret = 0;
574
575	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
576		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
577		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
578
579		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
580		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
581
582		udelay(40);
583	}
584
585	return ret;
586}
587
588static void
589bnx2_disable_int(struct bnx2 *bp)
590{
591	int i;
592	struct bnx2_napi *bnapi;
593
594	for (i = 0; i < bp->irq_nvecs; i++) {
595		bnapi = &bp->bnx2_napi[i];
596		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
597		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
598	}
599	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
600}
601
602static void
603bnx2_enable_int(struct bnx2 *bp)
604{
605	int i;
606	struct bnx2_napi *bnapi;
607
608	for (i = 0; i < bp->irq_nvecs; i++) {
609		bnapi = &bp->bnx2_napi[i];
610
611		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
614		       bnapi->last_status_idx);
615
616		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
617		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
618		       bnapi->last_status_idx);
619	}
620	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
621}
622
623static void
624bnx2_disable_int_sync(struct bnx2 *bp)
625{
626	int i;
627
628	atomic_inc(&bp->intr_sem);
629	if (!netif_running(bp->dev))
630		return;
631
632	bnx2_disable_int(bp);
633	for (i = 0; i < bp->irq_nvecs; i++)
634		synchronize_irq(bp->irq_tbl[i].vector);
635}
636
637static void
638bnx2_napi_disable(struct bnx2 *bp)
639{
640	int i;
641
642	for (i = 0; i < bp->irq_nvecs; i++)
643		napi_disable(&bp->bnx2_napi[i].napi);
644}
645
646static void
647bnx2_napi_enable(struct bnx2 *bp)
648{
649	int i;
650
651	for (i = 0; i < bp->irq_nvecs; i++)
652		napi_enable(&bp->bnx2_napi[i].napi);
653}
654
655static void
656bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
657{
658	if (stop_cnic)
659		bnx2_cnic_stop(bp);
660	if (netif_running(bp->dev)) {
661		bnx2_napi_disable(bp);
662		netif_tx_disable(bp->dev);
663	}
664	bnx2_disable_int_sync(bp);
665	netif_carrier_off(bp->dev);	/* prevent tx timeout */
666}
667
668static void
669bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
670{
671	if (atomic_dec_and_test(&bp->intr_sem)) {
672		if (netif_running(bp->dev)) {
673			netif_tx_wake_all_queues(bp->dev);
674			spin_lock_bh(&bp->phy_lock);
675			if (bp->link_up)
676				netif_carrier_on(bp->dev);
677			spin_unlock_bh(&bp->phy_lock);
678			bnx2_napi_enable(bp);
679			bnx2_enable_int(bp);
680			if (start_cnic)
681				bnx2_cnic_start(bp);
682		}
683	}
684}
685
686static void
687bnx2_free_tx_mem(struct bnx2 *bp)
688{
689	int i;
690
691	for (i = 0; i < bp->num_tx_rings; i++) {
692		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
693		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
694
695		if (txr->tx_desc_ring) {
696			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
697					  txr->tx_desc_ring,
698					  txr->tx_desc_mapping);
699			txr->tx_desc_ring = NULL;
700		}
701		kfree(txr->tx_buf_ring);
702		txr->tx_buf_ring = NULL;
703	}
704}
705
706static void
707bnx2_free_rx_mem(struct bnx2 *bp)
708{
709	int i;
710
711	for (i = 0; i < bp->num_rx_rings; i++) {
712		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
713		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
714		int j;
715
716		for (j = 0; j < bp->rx_max_ring; j++) {
717			if (rxr->rx_desc_ring[j])
718				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
719						  rxr->rx_desc_ring[j],
720						  rxr->rx_desc_mapping[j]);
721			rxr->rx_desc_ring[j] = NULL;
722		}
723		vfree(rxr->rx_buf_ring);
724		rxr->rx_buf_ring = NULL;
725
726		for (j = 0; j < bp->rx_max_pg_ring; j++) {
727			if (rxr->rx_pg_desc_ring[j])
728				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729						  rxr->rx_pg_desc_ring[j],
730						  rxr->rx_pg_desc_mapping[j]);
731			rxr->rx_pg_desc_ring[j] = NULL;
732		}
733		vfree(rxr->rx_pg_ring);
734		rxr->rx_pg_ring = NULL;
735	}
736}
737
738static int
739bnx2_alloc_tx_mem(struct bnx2 *bp)
740{
741	int i;
742
743	for (i = 0; i < bp->num_tx_rings; i++) {
744		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
745		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
746
747		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
748		if (txr->tx_buf_ring == NULL)
749			return -ENOMEM;
750
751		txr->tx_desc_ring =
752			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
753					   &txr->tx_desc_mapping, GFP_KERNEL);
754		if (txr->tx_desc_ring == NULL)
755			return -ENOMEM;
756	}
757	return 0;
758}
759
760static int
761bnx2_alloc_rx_mem(struct bnx2 *bp)
762{
763	int i;
764
765	for (i = 0; i < bp->num_rx_rings; i++) {
766		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
767		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
768		int j;
769
770		rxr->rx_buf_ring =
771			vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
772		if (rxr->rx_buf_ring == NULL)
773			return -ENOMEM;
774
775		memset(rxr->rx_buf_ring, 0,
776		       SW_RXBD_RING_SIZE * bp->rx_max_ring);
777
778		for (j = 0; j < bp->rx_max_ring; j++) {
779			rxr->rx_desc_ring[j] =
780				dma_alloc_coherent(&bp->pdev->dev,
781						   RXBD_RING_SIZE,
782						   &rxr->rx_desc_mapping[j],
783						   GFP_KERNEL);
784			if (rxr->rx_desc_ring[j] == NULL)
785				return -ENOMEM;
786
787		}
788
789		if (bp->rx_pg_ring_size) {
790			rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
791						  bp->rx_max_pg_ring);
792			if (rxr->rx_pg_ring == NULL)
793				return -ENOMEM;
794
795			memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796			       bp->rx_max_pg_ring);
797		}
798
799		for (j = 0; j < bp->rx_max_pg_ring; j++) {
800			rxr->rx_pg_desc_ring[j] =
801				dma_alloc_coherent(&bp->pdev->dev,
802						   RXBD_RING_SIZE,
803						   &rxr->rx_pg_desc_mapping[j],
804						   GFP_KERNEL);
805			if (rxr->rx_pg_desc_ring[j] == NULL)
806				return -ENOMEM;
807
808		}
809	}
810	return 0;
811}
812
813static void
814bnx2_free_mem(struct bnx2 *bp)
815{
816	int i;
817	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
818
819	bnx2_free_tx_mem(bp);
820	bnx2_free_rx_mem(bp);
821
822	for (i = 0; i < bp->ctx_pages; i++) {
823		if (bp->ctx_blk[i]) {
824			dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
825					  bp->ctx_blk[i],
826					  bp->ctx_blk_mapping[i]);
827			bp->ctx_blk[i] = NULL;
828		}
829	}
830	if (bnapi->status_blk.msi) {
831		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
832				  bnapi->status_blk.msi,
833				  bp->status_blk_mapping);
834		bnapi->status_blk.msi = NULL;
835		bp->stats_blk = NULL;
836	}
837}
838
839static int
840bnx2_alloc_mem(struct bnx2 *bp)
841{
842	int i, status_blk_size, err;
843	struct bnx2_napi *bnapi;
844	void *status_blk;
845
846	/* Combine status and statistics blocks into one allocation. */
847	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
848	if (bp->flags & BNX2_FLAG_MSIX_CAP)
849		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
850						 BNX2_SBLK_MSIX_ALIGN_SIZE);
851	bp->status_stats_size = status_blk_size +
852				sizeof(struct statistics_block);
853
854	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
855					&bp->status_blk_mapping, GFP_KERNEL);
856	if (status_blk == NULL)
857		goto alloc_mem_err;
858
859	memset(status_blk, 0, bp->status_stats_size);
860
861	bnapi = &bp->bnx2_napi[0];
862	bnapi->status_blk.msi = status_blk;
863	bnapi->hw_tx_cons_ptr =
864		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
865	bnapi->hw_rx_cons_ptr =
866		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
867	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
868		for (i = 1; i < bp->irq_nvecs; i++) {
869			struct status_block_msix *sblk;
870
871			bnapi = &bp->bnx2_napi[i];
872
873			sblk = (void *) (status_blk +
874					 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
875			bnapi->status_blk.msix = sblk;
876			bnapi->hw_tx_cons_ptr =
877				&sblk->status_tx_quick_consumer_index;
878			bnapi->hw_rx_cons_ptr =
879				&sblk->status_rx_quick_consumer_index;
880			bnapi->int_num = i << 24;
881		}
882	}
883
884	bp->stats_blk = status_blk + status_blk_size;
885
886	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
887
888	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
889		bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
890		if (bp->ctx_pages == 0)
891			bp->ctx_pages = 1;
892		for (i = 0; i < bp->ctx_pages; i++) {
893			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
894						BCM_PAGE_SIZE,
895						&bp->ctx_blk_mapping[i],
896						GFP_KERNEL);
897			if (bp->ctx_blk[i] == NULL)
898				goto alloc_mem_err;
899		}
900	}
901
902	err = bnx2_alloc_rx_mem(bp);
903	if (err)
904		goto alloc_mem_err;
905
906	err = bnx2_alloc_tx_mem(bp);
907	if (err)
908		goto alloc_mem_err;
909
910	return 0;
911
912alloc_mem_err:
913	bnx2_free_mem(bp);
914	return -ENOMEM;
915}
916
917static void
918bnx2_report_fw_link(struct bnx2 *bp)
919{
920	u32 fw_link_status = 0;
921
922	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
923		return;
924
925	if (bp->link_up) {
926		u32 bmsr;
927
928		switch (bp->line_speed) {
929		case SPEED_10:
930			if (bp->duplex == DUPLEX_HALF)
931				fw_link_status = BNX2_LINK_STATUS_10HALF;
932			else
933				fw_link_status = BNX2_LINK_STATUS_10FULL;
934			break;
935		case SPEED_100:
936			if (bp->duplex == DUPLEX_HALF)
937				fw_link_status = BNX2_LINK_STATUS_100HALF;
938			else
939				fw_link_status = BNX2_LINK_STATUS_100FULL;
940			break;
941		case SPEED_1000:
942			if (bp->duplex == DUPLEX_HALF)
943				fw_link_status = BNX2_LINK_STATUS_1000HALF;
944			else
945				fw_link_status = BNX2_LINK_STATUS_1000FULL;
946			break;
947		case SPEED_2500:
948			if (bp->duplex == DUPLEX_HALF)
949				fw_link_status = BNX2_LINK_STATUS_2500HALF;
950			else
951				fw_link_status = BNX2_LINK_STATUS_2500FULL;
952			break;
953		}
954
955		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
956
957		if (bp->autoneg) {
958			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
959
960			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
961			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
962
963			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
964			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
965				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
966			else
967				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
968		}
969	}
970	else
971		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
972
973	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
974}
975
976static char *
977bnx2_xceiver_str(struct bnx2 *bp)
978{
979	return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
980		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
981		 "Copper"));
982}
983
984static void
985bnx2_report_link(struct bnx2 *bp)
986{
987	if (bp->link_up) {
988		netif_carrier_on(bp->dev);
989		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
990			    bnx2_xceiver_str(bp),
991			    bp->line_speed,
992			    bp->duplex == DUPLEX_FULL ? "full" : "half");
993
994		if (bp->flow_ctrl) {
995			if (bp->flow_ctrl & FLOW_CTRL_RX) {
996				pr_cont(", receive ");
997				if (bp->flow_ctrl & FLOW_CTRL_TX)
998					pr_cont("& transmit ");
999			}
1000			else {
1001				pr_cont(", transmit ");
1002			}
1003			pr_cont("flow control ON");
1004		}
1005		pr_cont("\n");
1006	} else {
1007		netif_carrier_off(bp->dev);
1008		netdev_err(bp->dev, "NIC %s Link is Down\n",
1009			   bnx2_xceiver_str(bp));
1010	}
1011
1012	bnx2_report_fw_link(bp);
1013}
1014
1015static void
1016bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1017{
1018	u32 local_adv, remote_adv;
1019
1020	bp->flow_ctrl = 0;
1021	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1022		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1023
1024		if (bp->duplex == DUPLEX_FULL) {
1025			bp->flow_ctrl = bp->req_flow_ctrl;
1026		}
1027		return;
1028	}
1029
1030	if (bp->duplex != DUPLEX_FULL) {
1031		return;
1032	}
1033
1034	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1035	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1036		u32 val;
1037
1038		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1039		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1040			bp->flow_ctrl |= FLOW_CTRL_TX;
1041		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1042			bp->flow_ctrl |= FLOW_CTRL_RX;
1043		return;
1044	}
1045
1046	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1047	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1048
1049	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1050		u32 new_local_adv = 0;
1051		u32 new_remote_adv = 0;
1052
1053		if (local_adv & ADVERTISE_1000XPAUSE)
1054			new_local_adv |= ADVERTISE_PAUSE_CAP;
1055		if (local_adv & ADVERTISE_1000XPSE_ASYM)
1056			new_local_adv |= ADVERTISE_PAUSE_ASYM;
1057		if (remote_adv & ADVERTISE_1000XPAUSE)
1058			new_remote_adv |= ADVERTISE_PAUSE_CAP;
1059		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1060			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1061
1062		local_adv = new_local_adv;
1063		remote_adv = new_remote_adv;
1064	}
1065
1066	/* See Table 28B-3 of 802.3ab-1999 spec. */
1067	if (local_adv & ADVERTISE_PAUSE_CAP) {
1068		if(local_adv & ADVERTISE_PAUSE_ASYM) {
1069	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
1070				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1071			}
1072			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1073				bp->flow_ctrl = FLOW_CTRL_RX;
1074			}
1075		}
1076		else {
1077			if (remote_adv & ADVERTISE_PAUSE_CAP) {
1078				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1079			}
1080		}
1081	}
1082	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1083		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1084			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
1085
1086			bp->flow_ctrl = FLOW_CTRL_TX;
1087		}
1088	}
1089}
1090
1091static int
1092bnx2_5709s_linkup(struct bnx2 *bp)
1093{
1094	u32 val, speed;
1095
1096	bp->link_up = 1;
1097
1098	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1099	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1100	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1101
1102	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1103		bp->line_speed = bp->req_line_speed;
1104		bp->duplex = bp->req_duplex;
1105		return 0;
1106	}
1107	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1108	switch (speed) {
1109		case MII_BNX2_GP_TOP_AN_SPEED_10:
1110			bp->line_speed = SPEED_10;
1111			break;
1112		case MII_BNX2_GP_TOP_AN_SPEED_100:
1113			bp->line_speed = SPEED_100;
1114			break;
1115		case MII_BNX2_GP_TOP_AN_SPEED_1G:
1116		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1117			bp->line_speed = SPEED_1000;
1118			break;
1119		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1120			bp->line_speed = SPEED_2500;
1121			break;
1122	}
1123	if (val & MII_BNX2_GP_TOP_AN_FD)
1124		bp->duplex = DUPLEX_FULL;
1125	else
1126		bp->duplex = DUPLEX_HALF;
1127	return 0;
1128}
1129
1130static int
1131bnx2_5708s_linkup(struct bnx2 *bp)
1132{
1133	u32 val;
1134
1135	bp->link_up = 1;
1136	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1137	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1138		case BCM5708S_1000X_STAT1_SPEED_10:
1139			bp->line_speed = SPEED_10;
1140			break;
1141		case BCM5708S_1000X_STAT1_SPEED_100:
1142			bp->line_speed = SPEED_100;
1143			break;
1144		case BCM5708S_1000X_STAT1_SPEED_1G:
1145			bp->line_speed = SPEED_1000;
1146			break;
1147		case BCM5708S_1000X_STAT1_SPEED_2G5:
1148			bp->line_speed = SPEED_2500;
1149			break;
1150	}
1151	if (val & BCM5708S_1000X_STAT1_FD)
1152		bp->duplex = DUPLEX_FULL;
1153	else
1154		bp->duplex = DUPLEX_HALF;
1155
1156	return 0;
1157}
1158
1159static int
1160bnx2_5706s_linkup(struct bnx2 *bp)
1161{
1162	u32 bmcr, local_adv, remote_adv, common;
1163
1164	bp->link_up = 1;
1165	bp->line_speed = SPEED_1000;
1166
1167	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1168	if (bmcr & BMCR_FULLDPLX) {
1169		bp->duplex = DUPLEX_FULL;
1170	}
1171	else {
1172		bp->duplex = DUPLEX_HALF;
1173	}
1174
1175	if (!(bmcr & BMCR_ANENABLE)) {
1176		return 0;
1177	}
1178
1179	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1180	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1181
1182	common = local_adv & remote_adv;
1183	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1184
1185		if (common & ADVERTISE_1000XFULL) {
1186			bp->duplex = DUPLEX_FULL;
1187		}
1188		else {
1189			bp->duplex = DUPLEX_HALF;
1190		}
1191	}
1192
1193	return 0;
1194}
1195
1196static int
1197bnx2_copper_linkup(struct bnx2 *bp)
1198{
1199	u32 bmcr;
1200
1201	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1202	if (bmcr & BMCR_ANENABLE) {
1203		u32 local_adv, remote_adv, common;
1204
1205		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1206		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1207
1208		common = local_adv & (remote_adv >> 2);
1209		if (common & ADVERTISE_1000FULL) {
1210			bp->line_speed = SPEED_1000;
1211			bp->duplex = DUPLEX_FULL;
1212		}
1213		else if (common & ADVERTISE_1000HALF) {
1214			bp->line_speed = SPEED_1000;
1215			bp->duplex = DUPLEX_HALF;
1216		}
1217		else {
1218			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1219			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1220
1221			common = local_adv & remote_adv;
1222			if (common & ADVERTISE_100FULL) {
1223				bp->line_speed = SPEED_100;
1224				bp->duplex = DUPLEX_FULL;
1225			}
1226			else if (common & ADVERTISE_100HALF) {
1227				bp->line_speed = SPEED_100;
1228				bp->duplex = DUPLEX_HALF;
1229			}
1230			else if (common & ADVERTISE_10FULL) {
1231				bp->line_speed = SPEED_10;
1232				bp->duplex = DUPLEX_FULL;
1233			}
1234			else if (common & ADVERTISE_10HALF) {
1235				bp->line_speed = SPEED_10;
1236				bp->duplex = DUPLEX_HALF;
1237			}
1238			else {
1239				bp->line_speed = 0;
1240				bp->link_up = 0;
1241			}
1242		}
1243	}
1244	else {
1245		if (bmcr & BMCR_SPEED100) {
1246			bp->line_speed = SPEED_100;
1247		}
1248		else {
1249			bp->line_speed = SPEED_10;
1250		}
1251		if (bmcr & BMCR_FULLDPLX) {
1252			bp->duplex = DUPLEX_FULL;
1253		}
1254		else {
1255			bp->duplex = DUPLEX_HALF;
1256		}
1257	}
1258
1259	return 0;
1260}
1261
1262static void
1263bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1264{
1265	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1266
1267	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1268	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1269	val |= 0x02 << 8;
1270
1271	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1272		u32 lo_water, hi_water;
1273
1274		if (bp->flow_ctrl & FLOW_CTRL_TX)
1275			lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1276		else
1277			lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1278		if (lo_water >= bp->rx_ring_size)
1279			lo_water = 0;
1280
1281		hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1282
1283		if (hi_water <= lo_water)
1284			lo_water = 0;
1285
1286		hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1287		lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1288
1289		if (hi_water > 0xf)
1290			hi_water = 0xf;
1291		else if (hi_water == 0)
1292			lo_water = 0;
1293		val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1294	}
1295	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1296}
1297
1298static void
1299bnx2_init_all_rx_contexts(struct bnx2 *bp)
1300{
1301	int i;
1302	u32 cid;
1303
1304	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1305		if (i == 1)
1306			cid = RX_RSS_CID;
1307		bnx2_init_rx_context(bp, cid);
1308	}
1309}
1310
1311static void
1312bnx2_set_mac_link(struct bnx2 *bp)
1313{
1314	u32 val;
1315
1316	REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1317	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1318		(bp->duplex == DUPLEX_HALF)) {
1319		REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1320	}
1321
1322	/* Configure the EMAC mode register. */
1323	val = REG_RD(bp, BNX2_EMAC_MODE);
1324
1325	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1326		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1327		BNX2_EMAC_MODE_25G_MODE);
1328
1329	if (bp->link_up) {
1330		switch (bp->line_speed) {
1331			case SPEED_10:
1332				if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1333					val |= BNX2_EMAC_MODE_PORT_MII_10M;
1334					break;
1335				}
1336				/* fall through */
1337			case SPEED_100:
1338				val |= BNX2_EMAC_MODE_PORT_MII;
1339				break;
1340			case SPEED_2500:
1341				val |= BNX2_EMAC_MODE_25G_MODE;
1342				/* fall through */
1343			case SPEED_1000:
1344				val |= BNX2_EMAC_MODE_PORT_GMII;
1345				break;
1346		}
1347	}
1348	else {
1349		val |= BNX2_EMAC_MODE_PORT_GMII;
1350	}
1351
1352	/* Set the MAC to operate in the appropriate duplex mode. */
1353	if (bp->duplex == DUPLEX_HALF)
1354		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1355	REG_WR(bp, BNX2_EMAC_MODE, val);
1356
1357	/* Enable/disable rx PAUSE. */
1358	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1359
1360	if (bp->flow_ctrl & FLOW_CTRL_RX)
1361		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1362	REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1363
1364	/* Enable/disable tx PAUSE. */
1365	val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1366	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1367
1368	if (bp->flow_ctrl & FLOW_CTRL_TX)
1369		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1370	REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1371
1372	/* Acknowledge the interrupt. */
1373	REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1374
1375	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1376		bnx2_init_all_rx_contexts(bp);
1377}
1378
1379static void
1380bnx2_enable_bmsr1(struct bnx2 *bp)
1381{
1382	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1383	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1384		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1385			       MII_BNX2_BLK_ADDR_GP_STATUS);
1386}
1387
1388static void
1389bnx2_disable_bmsr1(struct bnx2 *bp)
1390{
1391	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1392	    (CHIP_NUM(bp) == CHIP_NUM_5709))
1393		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1394			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1395}
1396
1397static int
1398bnx2_test_and_enable_2g5(struct bnx2 *bp)
1399{
1400	u32 up1;
1401	int ret = 1;
1402
1403	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1404		return 0;
1405
1406	if (bp->autoneg & AUTONEG_SPEED)
1407		bp->advertising |= ADVERTISED_2500baseX_Full;
1408
1409	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1410		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1411
1412	bnx2_read_phy(bp, bp->mii_up1, &up1);
1413	if (!(up1 & BCM5708S_UP1_2G5)) {
1414		up1 |= BCM5708S_UP1_2G5;
1415		bnx2_write_phy(bp, bp->mii_up1, up1);
1416		ret = 0;
1417	}
1418
1419	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1420		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1421			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1422
1423	return ret;
1424}
1425
1426static int
1427bnx2_test_and_disable_2g5(struct bnx2 *bp)
1428{
1429	u32 up1;
1430	int ret = 0;
1431
1432	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1433		return 0;
1434
1435	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1436		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1437
1438	bnx2_read_phy(bp, bp->mii_up1, &up1);
1439	if (up1 & BCM5708S_UP1_2G5) {
1440		up1 &= ~BCM5708S_UP1_2G5;
1441		bnx2_write_phy(bp, bp->mii_up1, up1);
1442		ret = 1;
1443	}
1444
1445	if (CHIP_NUM(bp) == CHIP_NUM_5709)
1446		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1447			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1448
1449	return ret;
1450}
1451
1452static void
1453bnx2_enable_forced_2g5(struct bnx2 *bp)
1454{
1455	u32 uninitialized_var(bmcr);
1456	int err;
1457
1458	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1459		return;
1460
1461	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1462		u32 val;
1463
1464		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1465			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1466		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1467			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1468			val |= MII_BNX2_SD_MISC1_FORCE |
1469				MII_BNX2_SD_MISC1_FORCE_2_5G;
1470			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1471		}
1472
1473		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1474			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1475		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1476
1477	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1478		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479		if (!err)
1480			bmcr |= BCM5708S_BMCR_FORCE_2500;
1481	} else {
1482		return;
1483	}
1484
1485	if (err)
1486		return;
1487
1488	if (bp->autoneg & AUTONEG_SPEED) {
1489		bmcr &= ~BMCR_ANENABLE;
1490		if (bp->req_duplex == DUPLEX_FULL)
1491			bmcr |= BMCR_FULLDPLX;
1492	}
1493	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1494}
1495
1496static void
1497bnx2_disable_forced_2g5(struct bnx2 *bp)
1498{
1499	u32 uninitialized_var(bmcr);
1500	int err;
1501
1502	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1503		return;
1504
1505	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1506		u32 val;
1507
1508		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1509			       MII_BNX2_BLK_ADDR_SERDES_DIG);
1510		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1511			val &= ~MII_BNX2_SD_MISC1_FORCE;
1512			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1513		}
1514
1515		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1516			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1517		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1518
1519	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1520		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521		if (!err)
1522			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1523	} else {
1524		return;
1525	}
1526
1527	if (err)
1528		return;
1529
1530	if (bp->autoneg & AUTONEG_SPEED)
1531		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1532	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1533}
1534
1535static void
1536bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1537{
1538	u32 val;
1539
1540	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1541	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1542	if (start)
1543		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1544	else
1545		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1546}
1547
1548static int
1549bnx2_set_link(struct bnx2 *bp)
1550{
1551	u32 bmsr;
1552	u8 link_up;
1553
1554	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1555		bp->link_up = 1;
1556		return 0;
1557	}
1558
1559	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1560		return 0;
1561
1562	link_up = bp->link_up;
1563
1564	bnx2_enable_bmsr1(bp);
1565	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1566	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1567	bnx2_disable_bmsr1(bp);
1568
1569	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1570	    (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1571		u32 val, an_dbg;
1572
1573		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1574			bnx2_5706s_force_link_dn(bp, 0);
1575			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1576		}
1577		val = REG_RD(bp, BNX2_EMAC_STATUS);
1578
1579		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1580		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1581		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1582
1583		if ((val & BNX2_EMAC_STATUS_LINK) &&
1584		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1585			bmsr |= BMSR_LSTATUS;
1586		else
1587			bmsr &= ~BMSR_LSTATUS;
1588	}
1589
1590	if (bmsr & BMSR_LSTATUS) {
1591		bp->link_up = 1;
1592
1593		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1594			if (CHIP_NUM(bp) == CHIP_NUM_5706)
1595				bnx2_5706s_linkup(bp);
1596			else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1597				bnx2_5708s_linkup(bp);
1598			else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1599				bnx2_5709s_linkup(bp);
1600		}
1601		else {
1602			bnx2_copper_linkup(bp);
1603		}
1604		bnx2_resolve_flow_ctrl(bp);
1605	}
1606	else {
1607		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1608		    (bp->autoneg & AUTONEG_SPEED))
1609			bnx2_disable_forced_2g5(bp);
1610
1611		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1612			u32 bmcr;
1613
1614			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1615			bmcr |= BMCR_ANENABLE;
1616			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1617
1618			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1619		}
1620		bp->link_up = 0;
1621	}
1622
1623	if (bp->link_up != link_up) {
1624		bnx2_report_link(bp);
1625	}
1626
1627	bnx2_set_mac_link(bp);
1628
1629	return 0;
1630}
1631
1632static int
1633bnx2_reset_phy(struct bnx2 *bp)
1634{
1635	int i;
1636	u32 reg;
1637
1638        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1639
1640#define PHY_RESET_MAX_WAIT 100
1641	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1642		udelay(10);
1643
1644		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1645		if (!(reg & BMCR_RESET)) {
1646			udelay(20);
1647			break;
1648		}
1649	}
1650	if (i == PHY_RESET_MAX_WAIT) {
1651		return -EBUSY;
1652	}
1653	return 0;
1654}
1655
1656static u32
1657bnx2_phy_get_pause_adv(struct bnx2 *bp)
1658{
1659	u32 adv = 0;
1660
1661	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1662		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1663
1664		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1665			adv = ADVERTISE_1000XPAUSE;
1666		}
1667		else {
1668			adv = ADVERTISE_PAUSE_CAP;
1669		}
1670	}
1671	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1672		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1673			adv = ADVERTISE_1000XPSE_ASYM;
1674		}
1675		else {
1676			adv = ADVERTISE_PAUSE_ASYM;
1677		}
1678	}
1679	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1680		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1681			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1682		}
1683		else {
1684			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1685		}
1686	}
1687	return adv;
1688}
1689
1690static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1691
1692static int
1693bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1694__releases(&bp->phy_lock)
1695__acquires(&bp->phy_lock)
1696{
1697	u32 speed_arg = 0, pause_adv;
1698
1699	pause_adv = bnx2_phy_get_pause_adv(bp);
1700
1701	if (bp->autoneg & AUTONEG_SPEED) {
1702		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1703		if (bp->advertising & ADVERTISED_10baseT_Half)
1704			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1705		if (bp->advertising & ADVERTISED_10baseT_Full)
1706			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1707		if (bp->advertising & ADVERTISED_100baseT_Half)
1708			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1709		if (bp->advertising & ADVERTISED_100baseT_Full)
1710			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1711		if (bp->advertising & ADVERTISED_1000baseT_Full)
1712			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1713		if (bp->advertising & ADVERTISED_2500baseX_Full)
1714			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1715	} else {
1716		if (bp->req_line_speed == SPEED_2500)
1717			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1718		else if (bp->req_line_speed == SPEED_1000)
1719			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1720		else if (bp->req_line_speed == SPEED_100) {
1721			if (bp->req_duplex == DUPLEX_FULL)
1722				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1723			else
1724				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1725		} else if (bp->req_line_speed == SPEED_10) {
1726			if (bp->req_duplex == DUPLEX_FULL)
1727				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1728			else
1729				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1730		}
1731	}
1732
1733	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1734		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1735	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1736		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1737
1738	if (port == PORT_TP)
1739		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1740			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1741
1742	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1743
1744	spin_unlock_bh(&bp->phy_lock);
1745	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1746	spin_lock_bh(&bp->phy_lock);
1747
1748	return 0;
1749}
1750
1751static int
1752bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1753__releases(&bp->phy_lock)
1754__acquires(&bp->phy_lock)
1755{
1756	u32 adv, bmcr;
1757	u32 new_adv = 0;
1758
1759	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1760		return (bnx2_setup_remote_phy(bp, port));
1761
1762	if (!(bp->autoneg & AUTONEG_SPEED)) {
1763		u32 new_bmcr;
1764		int force_link_down = 0;
1765
1766		if (bp->req_line_speed == SPEED_2500) {
1767			if (!bnx2_test_and_enable_2g5(bp))
1768				force_link_down = 1;
1769		} else if (bp->req_line_speed == SPEED_1000) {
1770			if (bnx2_test_and_disable_2g5(bp))
1771				force_link_down = 1;
1772		}
1773		bnx2_read_phy(bp, bp->mii_adv, &adv);
1774		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1775
1776		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1777		new_bmcr = bmcr & ~BMCR_ANENABLE;
1778		new_bmcr |= BMCR_SPEED1000;
1779
1780		if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1781			if (bp->req_line_speed == SPEED_2500)
1782				bnx2_enable_forced_2g5(bp);
1783			else if (bp->req_line_speed == SPEED_1000) {
1784				bnx2_disable_forced_2g5(bp);
1785				new_bmcr &= ~0x2000;
1786			}
1787
1788		} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1789			if (bp->req_line_speed == SPEED_2500)
1790				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1791			else
1792				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1793		}
1794
1795		if (bp->req_duplex == DUPLEX_FULL) {
1796			adv |= ADVERTISE_1000XFULL;
1797			new_bmcr |= BMCR_FULLDPLX;
1798		}
1799		else {
1800			adv |= ADVERTISE_1000XHALF;
1801			new_bmcr &= ~BMCR_FULLDPLX;
1802		}
1803		if ((new_bmcr != bmcr) || (force_link_down)) {
1804			/* Force a link down visible on the other side */
1805			if (bp->link_up) {
1806				bnx2_write_phy(bp, bp->mii_adv, adv &
1807					       ~(ADVERTISE_1000XFULL |
1808						 ADVERTISE_1000XHALF));
1809				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1810					BMCR_ANRESTART | BMCR_ANENABLE);
1811
1812				bp->link_up = 0;
1813				netif_carrier_off(bp->dev);
1814				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1815				bnx2_report_link(bp);
1816			}
1817			bnx2_write_phy(bp, bp->mii_adv, adv);
1818			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1819		} else {
1820			bnx2_resolve_flow_ctrl(bp);
1821			bnx2_set_mac_link(bp);
1822		}
1823		return 0;
1824	}
1825
1826	bnx2_test_and_enable_2g5(bp);
1827
1828	if (bp->advertising & ADVERTISED_1000baseT_Full)
1829		new_adv |= ADVERTISE_1000XFULL;
1830
1831	new_adv |= bnx2_phy_get_pause_adv(bp);
1832
1833	bnx2_read_phy(bp, bp->mii_adv, &adv);
1834	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1835
1836	bp->serdes_an_pending = 0;
1837	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1838		/* Force a link down visible on the other side */
1839		if (bp->link_up) {
1840			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1841			spin_unlock_bh(&bp->phy_lock);
1842			msleep(20);
1843			spin_lock_bh(&bp->phy_lock);
1844		}
1845
1846		bnx2_write_phy(bp, bp->mii_adv, new_adv);
1847		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1848			BMCR_ANENABLE);
1849		/* Speed up link-up time when the link partner
1850		 * does not autonegotiate which is very common
1851		 * in blade servers. Some blade servers use
1852		 * IPMI for kerboard input and it's important
1853		 * to minimize link disruptions. Autoneg. involves
1854		 * exchanging base pages plus 3 next pages and
1855		 * normally completes in about 120 msec.
1856		 */
1857		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1858		bp->serdes_an_pending = 1;
1859		mod_timer(&bp->timer, jiffies + bp->current_interval);
1860	} else {
1861		bnx2_resolve_flow_ctrl(bp);
1862		bnx2_set_mac_link(bp);
1863	}
1864
1865	return 0;
1866}
1867
1868#define ETHTOOL_ALL_FIBRE_SPEED						\
1869	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
1870		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1871		(ADVERTISED_1000baseT_Full)
1872
1873#define ETHTOOL_ALL_COPPER_SPEED					\
1874	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
1875	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
1876	ADVERTISED_1000baseT_Full)
1877
1878#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1879	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1880
1881#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1882
1883static void
1884bnx2_set_default_remote_link(struct bnx2 *bp)
1885{
1886	u32 link;
1887
1888	if (bp->phy_port == PORT_TP)
1889		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1890	else
1891		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1892
1893	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1894		bp->req_line_speed = 0;
1895		bp->autoneg |= AUTONEG_SPEED;
1896		bp->advertising = ADVERTISED_Autoneg;
1897		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1898			bp->advertising |= ADVERTISED_10baseT_Half;
1899		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1900			bp->advertising |= ADVERTISED_10baseT_Full;
1901		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1902			bp->advertising |= ADVERTISED_100baseT_Half;
1903		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1904			bp->advertising |= ADVERTISED_100baseT_Full;
1905		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1906			bp->advertising |= ADVERTISED_1000baseT_Full;
1907		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1908			bp->advertising |= ADVERTISED_2500baseX_Full;
1909	} else {
1910		bp->autoneg = 0;
1911		bp->advertising = 0;
1912		bp->req_duplex = DUPLEX_FULL;
1913		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1914			bp->req_line_speed = SPEED_10;
1915			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1916				bp->req_duplex = DUPLEX_HALF;
1917		}
1918		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1919			bp->req_line_speed = SPEED_100;
1920			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1921				bp->req_duplex = DUPLEX_HALF;
1922		}
1923		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1924			bp->req_line_speed = SPEED_1000;
1925		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1926			bp->req_line_speed = SPEED_2500;
1927	}
1928}
1929
1930static void
1931bnx2_set_default_link(struct bnx2 *bp)
1932{
1933	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1934		bnx2_set_default_remote_link(bp);
1935		return;
1936	}
1937
1938	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1939	bp->req_line_speed = 0;
1940	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1941		u32 reg;
1942
1943		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1944
1945		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1946		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1947		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1948			bp->autoneg = 0;
1949			bp->req_line_speed = bp->line_speed = SPEED_1000;
1950			bp->req_duplex = DUPLEX_FULL;
1951		}
1952	} else
1953		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1954}
1955
1956static void
1957bnx2_send_heart_beat(struct bnx2 *bp)
1958{
1959	u32 msg;
1960	u32 addr;
1961
1962	spin_lock(&bp->indirect_lock);
1963	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1964	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1965	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1966	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1967	spin_unlock(&bp->indirect_lock);
1968}
1969
1970static void
1971bnx2_remote_phy_event(struct bnx2 *bp)
1972{
1973	u32 msg;
1974	u8 link_up = bp->link_up;
1975	u8 old_port;
1976
1977	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1978
1979	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1980		bnx2_send_heart_beat(bp);
1981
1982	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1983
1984	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1985		bp->link_up = 0;
1986	else {
1987		u32 speed;
1988
1989		bp->link_up = 1;
1990		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1991		bp->duplex = DUPLEX_FULL;
1992		switch (speed) {
1993			case BNX2_LINK_STATUS_10HALF:
1994				bp->duplex = DUPLEX_HALF;
1995			case BNX2_LINK_STATUS_10FULL:
1996				bp->line_speed = SPEED_10;
1997				break;
1998			case BNX2_LINK_STATUS_100HALF:
1999				bp->duplex = DUPLEX_HALF;
2000			case BNX2_LINK_STATUS_100BASE_T4:
2001			case BNX2_LINK_STATUS_100FULL:
2002				bp->line_speed = SPEED_100;
2003				break;
2004			case BNX2_LINK_STATUS_1000HALF:
2005				bp->duplex = DUPLEX_HALF;
2006			case BNX2_LINK_STATUS_1000FULL:
2007				bp->line_speed = SPEED_1000;
2008				break;
2009			case BNX2_LINK_STATUS_2500HALF:
2010				bp->duplex = DUPLEX_HALF;
2011			case BNX2_LINK_STATUS_2500FULL:
2012				bp->line_speed = SPEED_2500;
2013				break;
2014			default:
2015				bp->line_speed = 0;
2016				break;
2017		}
2018
2019		bp->flow_ctrl = 0;
2020		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2021		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2022			if (bp->duplex == DUPLEX_FULL)
2023				bp->flow_ctrl = bp->req_flow_ctrl;
2024		} else {
2025			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2026				bp->flow_ctrl |= FLOW_CTRL_TX;
2027			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2028				bp->flow_ctrl |= FLOW_CTRL_RX;
2029		}
2030
2031		old_port = bp->phy_port;
2032		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2033			bp->phy_port = PORT_FIBRE;
2034		else
2035			bp->phy_port = PORT_TP;
2036
2037		if (old_port != bp->phy_port)
2038			bnx2_set_default_link(bp);
2039
2040	}
2041	if (bp->link_up != link_up)
2042		bnx2_report_link(bp);
2043
2044	bnx2_set_mac_link(bp);
2045}
2046
2047static int
2048bnx2_set_remote_link(struct bnx2 *bp)
2049{
2050	u32 evt_code;
2051
2052	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2053	switch (evt_code) {
2054		case BNX2_FW_EVT_CODE_LINK_EVENT:
2055			bnx2_remote_phy_event(bp);
2056			break;
2057		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2058		default:
2059			bnx2_send_heart_beat(bp);
2060			break;
2061	}
2062	return 0;
2063}
2064
2065static int
2066bnx2_setup_copper_phy(struct bnx2 *bp)
2067__releases(&bp->phy_lock)
2068__acquires(&bp->phy_lock)
2069{
2070	u32 bmcr;
2071	u32 new_bmcr;
2072
2073	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2074
2075	if (bp->autoneg & AUTONEG_SPEED) {
2076		u32 adv_reg, adv1000_reg;
2077		u32 new_adv_reg = 0;
2078		u32 new_adv1000_reg = 0;
2079
2080		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2081		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2082			ADVERTISE_PAUSE_ASYM);
2083
2084		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2085		adv1000_reg &= PHY_ALL_1000_SPEED;
2086
2087		if (bp->advertising & ADVERTISED_10baseT_Half)
2088			new_adv_reg |= ADVERTISE_10HALF;
2089		if (bp->advertising & ADVERTISED_10baseT_Full)
2090			new_adv_reg |= ADVERTISE_10FULL;
2091		if (bp->advertising & ADVERTISED_100baseT_Half)
2092			new_adv_reg |= ADVERTISE_100HALF;
2093		if (bp->advertising & ADVERTISED_100baseT_Full)
2094			new_adv_reg |= ADVERTISE_100FULL;
2095		if (bp->advertising & ADVERTISED_1000baseT_Full)
2096			new_adv1000_reg |= ADVERTISE_1000FULL;
2097
2098		new_adv_reg |= ADVERTISE_CSMA;
2099
2100		new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2101
2102		if ((adv1000_reg != new_adv1000_reg) ||
2103			(adv_reg != new_adv_reg) ||
2104			((bmcr & BMCR_ANENABLE) == 0)) {
2105
2106			bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2107			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2108			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2109				BMCR_ANENABLE);
2110		}
2111		else if (bp->link_up) {
2112			/* Flow ctrl may have changed from auto to forced */
2113			/* or vice-versa. */
2114
2115			bnx2_resolve_flow_ctrl(bp);
2116			bnx2_set_mac_link(bp);
2117		}
2118		return 0;
2119	}
2120
2121	new_bmcr = 0;
2122	if (bp->req_line_speed == SPEED_100) {
2123		new_bmcr |= BMCR_SPEED100;
2124	}
2125	if (bp->req_duplex == DUPLEX_FULL) {
2126		new_bmcr |= BMCR_FULLDPLX;
2127	}
2128	if (new_bmcr != bmcr) {
2129		u32 bmsr;
2130
2131		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2132		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2133
2134		if (bmsr & BMSR_LSTATUS) {
2135			/* Force link down */
2136			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2137			spin_unlock_bh(&bp->phy_lock);
2138			msleep(50);
2139			spin_lock_bh(&bp->phy_lock);
2140
2141			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2142			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2143		}
2144
2145		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2146
2147		/* Normally, the new speed is setup after the link has
2148		 * gone down and up again. In some cases, link will not go
2149		 * down so we need to set up the new speed here.
2150		 */
2151		if (bmsr & BMSR_LSTATUS) {
2152			bp->line_speed = bp->req_line_speed;
2153			bp->duplex = bp->req_duplex;
2154			bnx2_resolve_flow_ctrl(bp);
2155			bnx2_set_mac_link(bp);
2156		}
2157	} else {
2158		bnx2_resolve_flow_ctrl(bp);
2159		bnx2_set_mac_link(bp);
2160	}
2161	return 0;
2162}
2163
2164static int
2165bnx2_setup_phy(struct bnx2 *bp, u8 port)
2166__releases(&bp->phy_lock)
2167__acquires(&bp->phy_lock)
2168{
2169	if (bp->loopback == MAC_LOOPBACK)
2170		return 0;
2171
2172	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2173		return (bnx2_setup_serdes_phy(bp, port));
2174	}
2175	else {
2176		return (bnx2_setup_copper_phy(bp));
2177	}
2178}
2179
2180static int
2181bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2182{
2183	u32 val;
2184
2185	bp->mii_bmcr = MII_BMCR + 0x10;
2186	bp->mii_bmsr = MII_BMSR + 0x10;
2187	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2188	bp->mii_adv = MII_ADVERTISE + 0x10;
2189	bp->mii_lpa = MII_LPA + 0x10;
2190	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191
2192	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2193	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194
2195	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196	if (reset_phy)
2197		bnx2_reset_phy(bp);
2198
2199	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200
2201	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2202	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2203	val |= MII_BNX2_SD_1000XCTL1_FIBER;
2204	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205
2206	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2207	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2208	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2209		val |= BCM5708S_UP1_2G5;
2210	else
2211		val &= ~BCM5708S_UP1_2G5;
2212	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213
2214	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2215	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2217	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218
2219	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2220
2221	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2222	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2223	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224
2225	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2226
2227	return 0;
2228}
2229
2230static int
2231bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232{
2233	u32 val;
2234
2235	if (reset_phy)
2236		bnx2_reset_phy(bp);
2237
2238	bp->mii_up1 = BCM5708S_UP1;
2239
2240	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2241	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2242	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243
2244	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2246	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247
2248	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2250	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251
2252	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2253		bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254		val |= BCM5708S_UP1_2G5;
2255		bnx2_write_phy(bp, BCM5708S_UP1, val);
2256	}
2257
2258	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2259	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2260	    (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2261		/* increase tx signal amplitude */
2262		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263			       BCM5708S_BLK_ADDR_TX_MISC);
2264		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2266		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2267		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2268	}
2269
2270	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2271	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2272
2273	if (val) {
2274		u32 is_backplane;
2275
2276		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2278			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279				       BCM5708S_BLK_ADDR_TX_MISC);
2280			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2281			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2282				       BCM5708S_BLK_ADDR_DIG);
2283		}
2284	}
2285	return 0;
2286}
2287
2288static int
2289bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2290{
2291	if (reset_phy)
2292		bnx2_reset_phy(bp);
2293
2294	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295
2296	if (CHIP_NUM(bp) == CHIP_NUM_5706)
2297        	REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298
2299	if (bp->dev->mtu > 1500) {
2300		u32 val;
2301
2302		/* Set extended packet length bit */
2303		bnx2_write_phy(bp, 0x18, 0x7);
2304		bnx2_read_phy(bp, 0x18, &val);
2305		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306
2307		bnx2_write_phy(bp, 0x1c, 0x6c00);
2308		bnx2_read_phy(bp, 0x1c, &val);
2309		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2310	}
2311	else {
2312		u32 val;
2313
2314		bnx2_write_phy(bp, 0x18, 0x7);
2315		bnx2_read_phy(bp, 0x18, &val);
2316		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317
2318		bnx2_write_phy(bp, 0x1c, 0x6c00);
2319		bnx2_read_phy(bp, 0x1c, &val);
2320		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321	}
2322
2323	return 0;
2324}
2325
2326static int
2327bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328{
2329	u32 val;
2330
2331	if (reset_phy)
2332		bnx2_reset_phy(bp);
2333
2334	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2335		bnx2_write_phy(bp, 0x18, 0x0c00);
2336		bnx2_write_phy(bp, 0x17, 0x000a);
2337		bnx2_write_phy(bp, 0x15, 0x310b);
2338		bnx2_write_phy(bp, 0x17, 0x201f);
2339		bnx2_write_phy(bp, 0x15, 0x9506);
2340		bnx2_write_phy(bp, 0x17, 0x401f);
2341		bnx2_write_phy(bp, 0x15, 0x14e2);
2342		bnx2_write_phy(bp, 0x18, 0x0400);
2343	}
2344
2345	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2346		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347			       MII_BNX2_DSP_EXPAND_REG | 0x8);
2348		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349		val &= ~(1 << 8);
2350		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2351	}
2352
2353	if (bp->dev->mtu > 1500) {
2354		/* Set extended packet length bit */
2355		bnx2_write_phy(bp, 0x18, 0x7);
2356		bnx2_read_phy(bp, 0x18, &val);
2357		bnx2_write_phy(bp, 0x18, val | 0x4000);
2358
2359		bnx2_read_phy(bp, 0x10, &val);
2360		bnx2_write_phy(bp, 0x10, val | 0x1);
2361	}
2362	else {
2363		bnx2_write_phy(bp, 0x18, 0x7);
2364		bnx2_read_phy(bp, 0x18, &val);
2365		bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366
2367		bnx2_read_phy(bp, 0x10, &val);
2368		bnx2_write_phy(bp, 0x10, val & ~0x1);
2369	}
2370
2371	/* ethernet@wirespeed */
2372	bnx2_write_phy(bp, 0x18, 0x7007);
2373	bnx2_read_phy(bp, 0x18, &val);
2374	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2375	return 0;
2376}
2377
2378
2379static int
2380bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2381__releases(&bp->phy_lock)
2382__acquires(&bp->phy_lock)
2383{
2384	u32 val;
2385	int rc = 0;
2386
2387	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2388	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2389
2390	bp->mii_bmcr = MII_BMCR;
2391	bp->mii_bmsr = MII_BMSR;
2392	bp->mii_bmsr1 = MII_BMSR;
2393	bp->mii_adv = MII_ADVERTISE;
2394	bp->mii_lpa = MII_LPA;
2395
2396        REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2397
2398	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2399		goto setup_phy;
2400
2401	bnx2_read_phy(bp, MII_PHYSID1, &val);
2402	bp->phy_id = val << 16;
2403	bnx2_read_phy(bp, MII_PHYSID2, &val);
2404	bp->phy_id |= val & 0xffff;
2405
2406	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2407		if (CHIP_NUM(bp) == CHIP_NUM_5706)
2408			rc = bnx2_init_5706s_phy(bp, reset_phy);
2409		else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2410			rc = bnx2_init_5708s_phy(bp, reset_phy);
2411		else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2412			rc = bnx2_init_5709s_phy(bp, reset_phy);
2413	}
2414	else {
2415		rc = bnx2_init_copper_phy(bp, reset_phy);
2416	}
2417
2418setup_phy:
2419	if (!rc)
2420		rc = bnx2_setup_phy(bp, bp->phy_port);
2421
2422	return rc;
2423}
2424
2425static int
2426bnx2_set_mac_loopback(struct bnx2 *bp)
2427{
2428	u32 mac_mode;
2429
2430	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2431	mac_mode &= ~BNX2_EMAC_MODE_PORT;
2432	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2433	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2434	bp->link_up = 1;
2435	return 0;
2436}
2437
2438static int bnx2_test_link(struct bnx2 *);
2439
2440static int
2441bnx2_set_phy_loopback(struct bnx2 *bp)
2442{
2443	u32 mac_mode;
2444	int rc, i;
2445
2446	spin_lock_bh(&bp->phy_lock);
2447	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2448			    BMCR_SPEED1000);
2449	spin_unlock_bh(&bp->phy_lock);
2450	if (rc)
2451		return rc;
2452
2453	for (i = 0; i < 10; i++) {
2454		if (bnx2_test_link(bp) == 0)
2455			break;
2456		msleep(100);
2457	}
2458
2459	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2460	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2461		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2462		      BNX2_EMAC_MODE_25G_MODE);
2463
2464	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2465	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2466	bp->link_up = 1;
2467	return 0;
2468}
2469
2470static int
2471bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2472{
2473	int i;
2474	u32 val;
2475
2476	bp->fw_wr_seq++;
2477	msg_data |= bp->fw_wr_seq;
2478
2479	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2480
2481	if (!ack)
2482		return 0;
2483
2484	/* wait for an acknowledgement. */
2485	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2486		msleep(10);
2487
2488		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2489
2490		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2491			break;
2492	}
2493	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2494		return 0;
2495
2496	/* If we timed out, inform the firmware that this is the case. */
2497	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2498		if (!silent)
2499			pr_err("fw sync timeout, reset code = %x\n", msg_data);
2500
2501		msg_data &= ~BNX2_DRV_MSG_CODE;
2502		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2503
2504		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2505
2506		return -EBUSY;
2507	}
2508
2509	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2510		return -EIO;
2511
2512	return 0;
2513}
2514
2515static int
2516bnx2_init_5709_context(struct bnx2 *bp)
2517{
2518	int i, ret = 0;
2519	u32 val;
2520
2521	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2522	val |= (BCM_PAGE_BITS - 8) << 16;
2523	REG_WR(bp, BNX2_CTX_COMMAND, val);
2524	for (i = 0; i < 10; i++) {
2525		val = REG_RD(bp, BNX2_CTX_COMMAND);
2526		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2527			break;
2528		udelay(2);
2529	}
2530	if (val & BNX2_CTX_COMMAND_MEM_INIT)
2531		return -EBUSY;
2532
2533	for (i = 0; i < bp->ctx_pages; i++) {
2534		int j;
2535
2536		if (bp->ctx_blk[i])
2537			memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2538		else
2539			return -ENOMEM;
2540
2541		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2542		       (bp->ctx_blk_mapping[i] & 0xffffffff) |
2543		       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2544		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2545		       (u64) bp->ctx_blk_mapping[i] >> 32);
2546		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2547		       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2548		for (j = 0; j < 10; j++) {
2549
2550			val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2551			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2552				break;
2553			udelay(5);
2554		}
2555		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2556			ret = -EBUSY;
2557			break;
2558		}
2559	}
2560	return ret;
2561}
2562
2563static void
2564bnx2_init_context(struct bnx2 *bp)
2565{
2566	u32 vcid;
2567
2568	vcid = 96;
2569	while (vcid) {
2570		u32 vcid_addr, pcid_addr, offset;
2571		int i;
2572
2573		vcid--;
2574
2575		if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2576			u32 new_vcid;
2577
2578			vcid_addr = GET_PCID_ADDR(vcid);
2579			if (vcid & 0x8) {
2580				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2581			}
2582			else {
2583				new_vcid = vcid;
2584			}
2585			pcid_addr = GET_PCID_ADDR(new_vcid);
2586		}
2587		else {
2588	    		vcid_addr = GET_CID_ADDR(vcid);
2589			pcid_addr = vcid_addr;
2590		}
2591
2592		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2593			vcid_addr += (i << PHY_CTX_SHIFT);
2594			pcid_addr += (i << PHY_CTX_SHIFT);
2595
2596			REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2597			REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2598
2599			/* Zero out the context. */
2600			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2601				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2602		}
2603	}
2604}
2605
2606static int
2607bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2608{
2609	u16 *good_mbuf;
2610	u32 good_mbuf_cnt;
2611	u32 val;
2612
2613	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2614	if (good_mbuf == NULL) {
2615		pr_err("Failed to allocate memory in %s\n", __func__);
2616		return -ENOMEM;
2617	}
2618
2619	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2620		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2621
2622	good_mbuf_cnt = 0;
2623
2624	/* Allocate a bunch of mbufs and save the good ones in an array. */
2625	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2626	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2627		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2628				BNX2_RBUF_COMMAND_ALLOC_REQ);
2629
2630		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2631
2632		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2633
2634		/* The addresses with Bit 9 set are bad memory blocks. */
2635		if (!(val & (1 << 9))) {
2636			good_mbuf[good_mbuf_cnt] = (u16) val;
2637			good_mbuf_cnt++;
2638		}
2639
2640		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2641	}
2642
2643	/* Free the good ones back to the mbuf pool thus discarding
2644	 * all the bad ones. */
2645	while (good_mbuf_cnt) {
2646		good_mbuf_cnt--;
2647
2648		val = good_mbuf[good_mbuf_cnt];
2649		val = (val << 9) | val | 1;
2650
2651		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2652	}
2653	kfree(good_mbuf);
2654	return 0;
2655}
2656
2657static void
2658bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2659{
2660	u32 val;
2661
2662	val = (mac_addr[0] << 8) | mac_addr[1];
2663
2664	REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2665
2666	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2667		(mac_addr[4] << 8) | mac_addr[5];
2668
2669	REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2670}
2671
2672static inline int
2673bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2674{
2675	dma_addr_t mapping;
2676	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2677	struct rx_bd *rxbd =
2678		&rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2679	struct page *page = alloc_page(gfp);
2680
2681	if (!page)
2682		return -ENOMEM;
2683	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2684			       PCI_DMA_FROMDEVICE);
2685	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2686		__free_page(page);
2687		return -EIO;
2688	}
2689
2690	rx_pg->page = page;
2691	dma_unmap_addr_set(rx_pg, mapping, mapping);
2692	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2693	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2694	return 0;
2695}
2696
2697static void
2698bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2699{
2700	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2701	struct page *page = rx_pg->page;
2702
2703	if (!page)
2704		return;
2705
2706	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2707		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
2708
2709	__free_page(page);
2710	rx_pg->page = NULL;
2711}
2712
2713static inline int
2714bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2715{
2716	struct sk_buff *skb;
2717	struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2718	dma_addr_t mapping;
2719	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2720	unsigned long align;
2721
2722	skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
2723	if (skb == NULL) {
2724		return -ENOMEM;
2725	}
2726
2727	if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2728		skb_reserve(skb, BNX2_RX_ALIGN - align);
2729
2730	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
2731				 PCI_DMA_FROMDEVICE);
2732	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2733		dev_kfree_skb(skb);
2734		return -EIO;
2735	}
2736
2737	rx_buf->skb = skb;
2738	rx_buf->desc = (struct l2_fhdr *) skb->data;
2739	dma_unmap_addr_set(rx_buf, mapping, mapping);
2740
2741	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2742	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2743
2744	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2745
2746	return 0;
2747}
2748
2749static int
2750bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2751{
2752	struct status_block *sblk = bnapi->status_blk.msi;
2753	u32 new_link_state, old_link_state;
2754	int is_set = 1;
2755
2756	new_link_state = sblk->status_attn_bits & event;
2757	old_link_state = sblk->status_attn_bits_ack & event;
2758	if (new_link_state != old_link_state) {
2759		if (new_link_state)
2760			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2761		else
2762			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2763	} else
2764		is_set = 0;
2765
2766	return is_set;
2767}
2768
2769static void
2770bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2771{
2772	spin_lock(&bp->phy_lock);
2773
2774	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2775		bnx2_set_link(bp);
2776	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2777		bnx2_set_remote_link(bp);
2778
2779	spin_unlock(&bp->phy_lock);
2780
2781}
2782
2783static inline u16
2784bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2785{
2786	u16 cons;
2787
2788	/* Tell compiler that status block fields can change. */
2789	barrier();
2790	cons = *bnapi->hw_tx_cons_ptr;
2791	barrier();
2792	if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2793		cons++;
2794	return cons;
2795}
2796
2797static int
2798bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2799{
2800	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2801	u16 hw_cons, sw_cons, sw_ring_cons;
2802	int tx_pkt = 0, index;
2803	struct netdev_queue *txq;
2804
2805	index = (bnapi - bp->bnx2_napi);
2806	txq = netdev_get_tx_queue(bp->dev, index);
2807
2808	hw_cons = bnx2_get_hw_tx_cons(bnapi);
2809	sw_cons = txr->tx_cons;
2810
2811	while (sw_cons != hw_cons) {
2812		struct sw_tx_bd *tx_buf;
2813		struct sk_buff *skb;
2814		int i, last;
2815
2816		sw_ring_cons = TX_RING_IDX(sw_cons);
2817
2818		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2819		skb = tx_buf->skb;
2820
2821		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2822		prefetch(&skb->end);
2823
2824		/* partial BD completions possible with TSO packets */
2825		if (tx_buf->is_gso) {
2826			u16 last_idx, last_ring_idx;
2827
2828			last_idx = sw_cons + tx_buf->nr_frags + 1;
2829			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2830			if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2831				last_idx++;
2832			}
2833			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2834				break;
2835			}
2836		}
2837
2838		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2839			skb_headlen(skb), PCI_DMA_TODEVICE);
2840
2841		tx_buf->skb = NULL;
2842		last = tx_buf->nr_frags;
2843
2844		for (i = 0; i < last; i++) {
2845			sw_cons = NEXT_TX_BD(sw_cons);
2846
2847			dma_unmap_page(&bp->pdev->dev,
2848				dma_unmap_addr(
2849					&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2850					mapping),
2851				skb_shinfo(skb)->frags[i].size,
2852				PCI_DMA_TODEVICE);
2853		}
2854
2855		sw_cons = NEXT_TX_BD(sw_cons);
2856
2857		dev_kfree_skb(skb);
2858		tx_pkt++;
2859		if (tx_pkt == budget)
2860			break;
2861
2862		if (hw_cons == sw_cons)
2863			hw_cons = bnx2_get_hw_tx_cons(bnapi);
2864	}
2865
2866	txr->hw_tx_cons = hw_cons;
2867	txr->tx_cons = sw_cons;
2868
2869	/* Need to make the tx_cons update visible to bnx2_start_xmit()
2870	 * before checking for netif_tx_queue_stopped().  Without the
2871	 * memory barrier, there is a small possibility that bnx2_start_xmit()
2872	 * will miss it and cause the queue to be stopped forever.
2873	 */
2874	smp_mb();
2875
2876	if (unlikely(netif_tx_queue_stopped(txq)) &&
2877		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2878		__netif_tx_lock(txq, smp_processor_id());
2879		if ((netif_tx_queue_stopped(txq)) &&
2880		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2881			netif_tx_wake_queue(txq);
2882		__netif_tx_unlock(txq);
2883	}
2884
2885	return tx_pkt;
2886}
2887
2888static void
2889bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2890			struct sk_buff *skb, int count)
2891{
2892	struct sw_pg *cons_rx_pg, *prod_rx_pg;
2893	struct rx_bd *cons_bd, *prod_bd;
2894	int i;
2895	u16 hw_prod, prod;
2896	u16 cons = rxr->rx_pg_cons;
2897
2898	cons_rx_pg = &rxr->rx_pg_ring[cons];
2899
2900	/* The caller was unable to allocate a new page to replace the
2901	 * last one in the frags array, so we need to recycle that page
2902	 * and then free the skb.
2903	 */
2904	if (skb) {
2905		struct page *page;
2906		struct skb_shared_info *shinfo;
2907
2908		shinfo = skb_shinfo(skb);
2909		shinfo->nr_frags--;
2910		page = shinfo->frags[shinfo->nr_frags].page;
2911		shinfo->frags[shinfo->nr_frags].page = NULL;
2912
2913		cons_rx_pg->page = page;
2914		dev_kfree_skb(skb);
2915	}
2916
2917	hw_prod = rxr->rx_pg_prod;
2918
2919	for (i = 0; i < count; i++) {
2920		prod = RX_PG_RING_IDX(hw_prod);
2921
2922		prod_rx_pg = &rxr->rx_pg_ring[prod];
2923		cons_rx_pg = &rxr->rx_pg_ring[cons];
2924		cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2925		prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2926
2927		if (prod != cons) {
2928			prod_rx_pg->page = cons_rx_pg->page;
2929			cons_rx_pg->page = NULL;
2930			dma_unmap_addr_set(prod_rx_pg, mapping,
2931				dma_unmap_addr(cons_rx_pg, mapping));
2932
2933			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2934			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2935
2936		}
2937		cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2938		hw_prod = NEXT_RX_BD(hw_prod);
2939	}
2940	rxr->rx_pg_prod = hw_prod;
2941	rxr->rx_pg_cons = cons;
2942}
2943
2944static inline void
2945bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2946		  struct sk_buff *skb, u16 cons, u16 prod)
2947{
2948	struct sw_bd *cons_rx_buf, *prod_rx_buf;
2949	struct rx_bd *cons_bd, *prod_bd;
2950
2951	cons_rx_buf = &rxr->rx_buf_ring[cons];
2952	prod_rx_buf = &rxr->rx_buf_ring[prod];
2953
2954	dma_sync_single_for_device(&bp->pdev->dev,
2955		dma_unmap_addr(cons_rx_buf, mapping),
2956		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2957
2958	rxr->rx_prod_bseq += bp->rx_buf_use_size;
2959
2960	prod_rx_buf->skb = skb;
2961	prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2962
2963	if (cons == prod)
2964		return;
2965
2966	dma_unmap_addr_set(prod_rx_buf, mapping,
2967			dma_unmap_addr(cons_rx_buf, mapping));
2968
2969	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2970	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2971	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2972	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2973}
2974
2975static int
2976bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2977	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2978	    u32 ring_idx)
2979{
2980	int err;
2981	u16 prod = ring_idx & 0xffff;
2982
2983	err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
2984	if (unlikely(err)) {
2985		bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2986		if (hdr_len) {
2987			unsigned int raw_len = len + 4;
2988			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2989
2990			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2991		}
2992		return err;
2993	}
2994
2995	skb_reserve(skb, BNX2_RX_OFFSET);
2996	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
2997			 PCI_DMA_FROMDEVICE);
2998
2999	if (hdr_len == 0) {
3000		skb_put(skb, len);
3001		return 0;
3002	} else {
3003		unsigned int i, frag_len, frag_size, pages;
3004		struct sw_pg *rx_pg;
3005		u16 pg_cons = rxr->rx_pg_cons;
3006		u16 pg_prod = rxr->rx_pg_prod;
3007
3008		frag_size = len + 4 - hdr_len;
3009		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3010		skb_put(skb, hdr_len);
3011
3012		for (i = 0; i < pages; i++) {
3013			dma_addr_t mapping_old;
3014
3015			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3016			if (unlikely(frag_len <= 4)) {
3017				unsigned int tail = 4 - frag_len;
3018
3019				rxr->rx_pg_cons = pg_cons;
3020				rxr->rx_pg_prod = pg_prod;
3021				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3022							pages - i);
3023				skb->len -= tail;
3024				if (i == 0) {
3025					skb->tail -= tail;
3026				} else {
3027					skb_frag_t *frag =
3028						&skb_shinfo(skb)->frags[i - 1];
3029					frag->size -= tail;
3030					skb->data_len -= tail;
3031					skb->truesize -= tail;
3032				}
3033				return 0;
3034			}
3035			rx_pg = &rxr->rx_pg_ring[pg_cons];
3036
3037			/* Don't unmap yet.  If we're unable to allocate a new
3038			 * page, we need to recycle the page and the DMA addr.
3039			 */
3040			mapping_old = dma_unmap_addr(rx_pg, mapping);
3041			if (i == pages - 1)
3042				frag_len -= 4;
3043
3044			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3045			rx_pg->page = NULL;
3046
3047			err = bnx2_alloc_rx_page(bp, rxr,
3048						 RX_PG_RING_IDX(pg_prod),
3049						 GFP_ATOMIC);
3050			if (unlikely(err)) {
3051				rxr->rx_pg_cons = pg_cons;
3052				rxr->rx_pg_prod = pg_prod;
3053				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3054							pages - i);
3055				return err;
3056			}
3057
3058			dma_unmap_page(&bp->pdev->dev, mapping_old,
3059				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
3060
3061			frag_size -= frag_len;
3062			skb->data_len += frag_len;
3063			skb->truesize += frag_len;
3064			skb->len += frag_len;
3065
3066			pg_prod = NEXT_RX_BD(pg_prod);
3067			pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3068		}
3069		rxr->rx_pg_prod = pg_prod;
3070		rxr->rx_pg_cons = pg_cons;
3071	}
3072	return 0;
3073}
3074
3075static inline u16
3076bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3077{
3078	u16 cons;
3079
3080	/* Tell compiler that status block fields can change. */
3081	barrier();
3082	cons = *bnapi->hw_rx_cons_ptr;
3083	barrier();
3084	if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3085		cons++;
3086	return cons;
3087}
3088
3089static int
3090bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3091{
3092	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3093	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3094	struct l2_fhdr *rx_hdr;
3095	int rx_pkt = 0, pg_ring_used = 0;
3096
3097	hw_cons = bnx2_get_hw_rx_cons(bnapi);
3098	sw_cons = rxr->rx_cons;
3099	sw_prod = rxr->rx_prod;
3100
3101	/* Memory barrier necessary as speculative reads of the rx
3102	 * buffer can be ahead of the index in the status block
3103	 */
3104	rmb();
3105	while (sw_cons != hw_cons) {
3106		unsigned int len, hdr_len;
3107		u32 status;
3108		struct sw_bd *rx_buf, *next_rx_buf;
3109		struct sk_buff *skb;
3110		dma_addr_t dma_addr;
3111		u16 vtag = 0;
3112		int hw_vlan __maybe_unused = 0;
3113
3114		sw_ring_cons = RX_RING_IDX(sw_cons);
3115		sw_ring_prod = RX_RING_IDX(sw_prod);
3116
3117		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3118		skb = rx_buf->skb;
3119		prefetchw(skb);
3120
3121		next_rx_buf =
3122			&rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3123		prefetch(next_rx_buf->desc);
3124
3125		rx_buf->skb = NULL;
3126
3127		dma_addr = dma_unmap_addr(rx_buf, mapping);
3128
3129		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3130			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3131			PCI_DMA_FROMDEVICE);
3132
3133		rx_hdr = rx_buf->desc;
3134		len = rx_hdr->l2_fhdr_pkt_len;
3135		status = rx_hdr->l2_fhdr_status;
3136
3137		hdr_len = 0;
3138		if (status & L2_FHDR_STATUS_SPLIT) {
3139			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3140			pg_ring_used = 1;
3141		} else if (len > bp->rx_jumbo_thresh) {
3142			hdr_len = bp->rx_jumbo_thresh;
3143			pg_ring_used = 1;
3144		}
3145
3146		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3147				       L2_FHDR_ERRORS_PHY_DECODE |
3148				       L2_FHDR_ERRORS_ALIGNMENT |
3149				       L2_FHDR_ERRORS_TOO_SHORT |
3150				       L2_FHDR_ERRORS_GIANT_FRAME))) {
3151
3152			bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3153					  sw_ring_prod);
3154			if (pg_ring_used) {
3155				int pages;
3156
3157				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3158
3159				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3160			}
3161			goto next_rx;
3162		}
3163
3164		len -= 4;
3165
3166		if (len <= bp->rx_copy_thresh) {
3167			struct sk_buff *new_skb;
3168
3169			new_skb = netdev_alloc_skb(bp->dev, len + 6);
3170			if (new_skb == NULL) {
3171				bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3172						  sw_ring_prod);
3173				goto next_rx;
3174			}
3175
3176			/* aligned copy */
3177			skb_copy_from_linear_data_offset(skb,
3178							 BNX2_RX_OFFSET - 6,
3179				      new_skb->data, len + 6);
3180			skb_reserve(new_skb, 6);
3181			skb_put(new_skb, len);
3182
3183			bnx2_reuse_rx_skb(bp, rxr, skb,
3184				sw_ring_cons, sw_ring_prod);
3185
3186			skb = new_skb;
3187		} else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3188			   dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3189			goto next_rx;
3190
3191		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3192		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3193			vtag = rx_hdr->l2_fhdr_vlan_tag;
3194#ifdef BCM_VLAN
3195			if (bp->vlgrp)
3196				hw_vlan = 1;
3197			else
3198#endif
3199			{
3200				struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3201					__skb_push(skb, 4);
3202
3203				memmove(ve, skb->data + 4, ETH_ALEN * 2);
3204				ve->h_vlan_proto = htons(ETH_P_8021Q);
3205				ve->h_vlan_TCI = htons(vtag);
3206				len += 4;
3207			}
3208		}
3209
3210		skb->protocol = eth_type_trans(skb, bp->dev);
3211
3212		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3213			(ntohs(skb->protocol) != 0x8100)) {
3214
3215			dev_kfree_skb(skb);
3216			goto next_rx;
3217
3218		}
3219
3220		skb->ip_summed = CHECKSUM_NONE;
3221		if (bp->rx_csum &&
3222			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
3223			L2_FHDR_STATUS_UDP_DATAGRAM))) {
3224
3225			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3226					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3227				skb->ip_summed = CHECKSUM_UNNECESSARY;
3228		}
3229		if ((bp->dev->features & NETIF_F_RXHASH) &&
3230		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3231		     L2_FHDR_STATUS_USE_RXHASH))
3232			skb->rxhash = rx_hdr->l2_fhdr_hash;
3233
3234		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3235
3236#ifdef BCM_VLAN
3237		if (hw_vlan)
3238			vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3239		else
3240#endif
3241			napi_gro_receive(&bnapi->napi, skb);
3242
3243		rx_pkt++;
3244
3245next_rx:
3246		sw_cons = NEXT_RX_BD(sw_cons);
3247		sw_prod = NEXT_RX_BD(sw_prod);
3248
3249		if ((rx_pkt == budget))
3250			break;
3251
3252		/* Refresh hw_cons to see if there is new work */
3253		if (sw_cons == hw_cons) {
3254			hw_cons = bnx2_get_hw_rx_cons(bnapi);
3255			rmb();
3256		}
3257	}
3258	rxr->rx_cons = sw_cons;
3259	rxr->rx_prod = sw_prod;
3260
3261	if (pg_ring_used)
3262		REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3263
3264	REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3265
3266	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3267
3268	mmiowb();
3269
3270	return rx_pkt;
3271
3272}
3273
3274/* MSI ISR - The only difference between this and the INTx ISR
3275 * is that the MSI interrupt is always serviced.
3276 */
3277static irqreturn_t
3278bnx2_msi(int irq, void *dev_instance)
3279{
3280	struct bnx2_napi *bnapi = dev_instance;
3281	struct bnx2 *bp = bnapi->bp;
3282
3283	prefetch(bnapi->status_blk.msi);
3284	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3285		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3286		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3287
3288	/* Return here if interrupt is disabled. */
3289	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3290		return IRQ_HANDLED;
3291
3292	napi_schedule(&bnapi->napi);
3293
3294	return IRQ_HANDLED;
3295}
3296
3297static irqreturn_t
3298bnx2_msi_1shot(int irq, void *dev_instance)
3299{
3300	struct bnx2_napi *bnapi = dev_instance;
3301	struct bnx2 *bp = bnapi->bp;
3302
3303	prefetch(bnapi->status_blk.msi);
3304
3305	/* Return here if interrupt is disabled. */
3306	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3307		return IRQ_HANDLED;
3308
3309	napi_schedule(&bnapi->napi);
3310
3311	return IRQ_HANDLED;
3312}
3313
3314static irqreturn_t
3315bnx2_interrupt(int irq, void *dev_instance)
3316{
3317	struct bnx2_napi *bnapi = dev_instance;
3318	struct bnx2 *bp = bnapi->bp;
3319	struct status_block *sblk = bnapi->status_blk.msi;
3320
3321	/* When using INTx, it is possible for the interrupt to arrive
3322	 * at the CPU before the status block posted prior to the
3323	 * interrupt. Reading a register will flush the status block.
3324	 * When using MSI, the MSI message will always complete after
3325	 * the status block write.
3326	 */
3327	if ((sblk->status_idx == bnapi->last_status_idx) &&
3328	    (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3329	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3330		return IRQ_NONE;
3331
3332	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3333		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3334		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3335
3336	/* Read back to deassert IRQ immediately to avoid too many
3337	 * spurious interrupts.
3338	 */
3339	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3340
3341	/* Return here if interrupt is shared and is disabled. */
3342	if (unlikely(atomic_read(&bp->intr_sem) != 0))
3343		return IRQ_HANDLED;
3344
3345	if (napi_schedule_prep(&bnapi->napi)) {
3346		bnapi->last_status_idx = sblk->status_idx;
3347		__napi_schedule(&bnapi->napi);
3348	}
3349
3350	return IRQ_HANDLED;
3351}
3352
3353static inline int
3354bnx2_has_fast_work(struct bnx2_napi *bnapi)
3355{
3356	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3357	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3358
3359	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3360	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3361		return 1;
3362	return 0;
3363}
3364
3365#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
3366				 STATUS_ATTN_BITS_TIMER_ABORT)
3367
3368static inline int
3369bnx2_has_work(struct bnx2_napi *bnapi)
3370{
3371	struct status_block *sblk = bnapi->status_blk.msi;
3372
3373	if (bnx2_has_fast_work(bnapi))
3374		return 1;
3375
3376#ifdef BCM_CNIC
3377	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3378		return 1;
3379#endif
3380
3381	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3382	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3383		return 1;
3384
3385	return 0;
3386}
3387
3388static void
3389bnx2_chk_missed_msi(struct bnx2 *bp)
3390{
3391	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3392	u32 msi_ctrl;
3393
3394	if (bnx2_has_work(bnapi)) {
3395		msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3396		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3397			return;
3398
3399		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3400			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3401			       ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3402			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3403			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3404		}
3405	}
3406
3407	bp->idle_chk_status_idx = bnapi->last_status_idx;
3408}
3409
3410#ifdef BCM_CNIC
3411static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3412{
3413	struct cnic_ops *c_ops;
3414
3415	if (!bnapi->cnic_present)
3416		return;
3417
3418	rcu_read_lock();
3419	c_ops = rcu_dereference(bp->cnic_ops);
3420	if (c_ops)
3421		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3422						      bnapi->status_blk.msi);
3423	rcu_read_unlock();
3424}
3425#endif
3426
3427static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3428{
3429	struct status_block *sblk = bnapi->status_blk.msi;
3430	u32 status_attn_bits = sblk->status_attn_bits;
3431	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3432
3433	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3434	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3435
3436		bnx2_phy_int(bp, bnapi);
3437
3438		/* This is needed to take care of transient status
3439		 * during link changes.
3440		 */
3441		REG_WR(bp, BNX2_HC_COMMAND,
3442		       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3443		REG_RD(bp, BNX2_HC_COMMAND);
3444	}
3445}
3446
3447static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3448			  int work_done, int budget)
3449{
3450	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3451	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3452
3453	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3454		bnx2_tx_int(bp, bnapi, 0);
3455
3456	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3457		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3458
3459	return work_done;
3460}
3461
3462static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3463{
3464	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3465	struct bnx2 *bp = bnapi->bp;
3466	int work_done = 0;
3467	struct status_block_msix *sblk = bnapi->status_blk.msix;
3468
3469	while (1) {
3470		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3471		if (unlikely(work_done >= budget))
3472			break;
3473
3474		bnapi->last_status_idx = sblk->status_idx;
3475		/* status idx must be read before checking for more work. */
3476		rmb();
3477		if (likely(!bnx2_has_fast_work(bnapi))) {
3478
3479			napi_complete(napi);
3480			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3481			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3482			       bnapi->last_status_idx);
3483			break;
3484		}
3485	}
3486	return work_done;
3487}
3488
3489static int bnx2_poll(struct napi_struct *napi, int budget)
3490{
3491	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3492	struct bnx2 *bp = bnapi->bp;
3493	int work_done = 0;
3494	struct status_block *sblk = bnapi->status_blk.msi;
3495
3496	while (1) {
3497		bnx2_poll_link(bp, bnapi);
3498
3499		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3500
3501#ifdef BCM_CNIC
3502		bnx2_poll_cnic(bp, bnapi);
3503#endif
3504
3505		/* bnapi->last_status_idx is used below to tell the hw how
3506		 * much work has been processed, so we must read it before
3507		 * checking for more work.
3508		 */
3509		bnapi->last_status_idx = sblk->status_idx;
3510
3511		if (unlikely(work_done >= budget))
3512			break;
3513
3514		rmb();
3515		if (likely(!bnx2_has_work(bnapi))) {
3516			napi_complete(napi);
3517			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3518				REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3519				       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3520				       bnapi->last_status_idx);
3521				break;
3522			}
3523			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3524			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3525			       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3526			       bnapi->last_status_idx);
3527
3528			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3529			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3530			       bnapi->last_status_idx);
3531			break;
3532		}
3533	}
3534
3535	return work_done;
3536}
3537
3538/* Called with rtnl_lock from vlan functions and also netif_tx_lock
3539 * from set_multicast.
3540 */
3541static void
3542bnx2_set_rx_mode(struct net_device *dev)
3543{
3544	struct bnx2 *bp = netdev_priv(dev);
3545	u32 rx_mode, sort_mode;
3546	struct netdev_hw_addr *ha;
3547	int i;
3548
3549	if (!netif_running(dev))
3550		return;
3551
3552	spin_lock_bh(&bp->phy_lock);
3553
3554	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3555				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3556	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3557#ifdef BCM_VLAN
3558	if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3559		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3560#else
3561	if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3562		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3563#endif
3564	if (dev->flags & IFF_PROMISC) {
3565		/* Promiscuous mode. */
3566		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3567		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3568			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3569	}
3570	else if (dev->flags & IFF_ALLMULTI) {
3571		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3572			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3573			       0xffffffff);
3574        	}
3575		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3576	}
3577	else {
3578		/* Accept one or more multicast(s). */
3579		u32 mc_filter[NUM_MC_HASH_REGISTERS];
3580		u32 regidx;
3581		u32 bit;
3582		u32 crc;
3583
3584		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3585
3586		netdev_for_each_mc_addr(ha, dev) {
3587			crc = ether_crc_le(ETH_ALEN, ha->addr);
3588			bit = crc & 0xff;
3589			regidx = (bit & 0xe0) >> 5;
3590			bit &= 0x1f;
3591			mc_filter[regidx] |= (1 << bit);
3592		}
3593
3594		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3595			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3596			       mc_filter[i]);
3597		}
3598
3599		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3600	}
3601
3602	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3603		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3604		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3605			     BNX2_RPM_SORT_USER0_PROM_VLAN;
3606	} else if (!(dev->flags & IFF_PROMISC)) {
3607		/* Add all entries into to the match filter list */
3608		i = 0;
3609		netdev_for_each_uc_addr(ha, dev) {
3610			bnx2_set_mac_addr(bp, ha->addr,
3611					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
3612			sort_mode |= (1 <<
3613				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3614			i++;
3615		}
3616
3617	}
3618
3619	if (rx_mode != bp->rx_mode) {
3620		bp->rx_mode = rx_mode;
3621		REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3622	}
3623
3624	REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3625	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3626	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3627
3628	spin_unlock_bh(&bp->phy_lock);
3629}
3630
3631static int __devinit
3632check_fw_section(const struct firmware *fw,
3633		 const struct bnx2_fw_file_section *section,
3634		 u32 alignment, bool non_empty)
3635{
3636	u32 offset = be32_to_cpu(section->offset);
3637	u32 len = be32_to_cpu(section->len);
3638
3639	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3640		return -EINVAL;
3641	if ((non_empty && len == 0) || len > fw->size - offset ||
3642	    len & (alignment - 1))
3643		return -EINVAL;
3644	return 0;
3645}
3646
3647static int __devinit
3648check_mips_fw_entry(const struct firmware *fw,
3649		    const struct bnx2_mips_fw_file_entry *entry)
3650{
3651	if (check_fw_section(fw, &entry->text, 4, true) ||
3652	    check_fw_section(fw, &entry->data, 4, false) ||
3653	    check_fw_section(fw, &entry->rodata, 4, false))
3654		return -EINVAL;
3655	return 0;
3656}
3657
3658static int __devinit
3659bnx2_request_firmware(struct bnx2 *bp)
3660{
3661	const char *mips_fw_file, *rv2p_fw_file;
3662	const struct bnx2_mips_fw_file *mips_fw;
3663	const struct bnx2_rv2p_fw_file *rv2p_fw;
3664	int rc;
3665
3666	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3667		mips_fw_file = FW_MIPS_FILE_09;
3668		if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3669		    (CHIP_ID(bp) == CHIP_ID_5709_A1))
3670			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3671		else
3672			rv2p_fw_file = FW_RV2P_FILE_09;
3673	} else {
3674		mips_fw_file = FW_MIPS_FILE_06;
3675		rv2p_fw_file = FW_RV2P_FILE_06;
3676	}
3677
3678	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3679	if (rc) {
3680		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3681		return rc;
3682	}
3683
3684	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3685	if (rc) {
3686		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3687		return rc;
3688	}
3689	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3690	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3691	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3692	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3693	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3694	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3695	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3696	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3697		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3698		return -EINVAL;
3699	}
3700	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3701	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3702	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3703		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3704		return -EINVAL;
3705	}
3706
3707	return 0;
3708}
3709
3710static u32
3711rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3712{
3713	switch (idx) {
3714	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3715		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3716		rv2p_code |= RV2P_BD_PAGE_SIZE;
3717		break;
3718	}
3719	return rv2p_code;
3720}
3721
3722static int
3723load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3724	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
3725{
3726	u32 rv2p_code_len, file_offset;
3727	__be32 *rv2p_code;
3728	int i;
3729	u32 val, cmd, addr;
3730
3731	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3732	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3733
3734	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3735
3736	if (rv2p_proc == RV2P_PROC1) {
3737		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3738		addr = BNX2_RV2P_PROC1_ADDR_CMD;
3739	} else {
3740		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3741		addr = BNX2_RV2P_PROC2_ADDR_CMD;
3742	}
3743
3744	for (i = 0; i < rv2p_code_len; i += 8) {
3745		REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3746		rv2p_code++;
3747		REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3748		rv2p_code++;
3749
3750		val = (i / 8) | cmd;
3751		REG_WR(bp, addr, val);
3752	}
3753
3754	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3755	for (i = 0; i < 8; i++) {
3756		u32 loc, code;
3757
3758		loc = be32_to_cpu(fw_entry->fixup[i]);
3759		if (loc && ((loc * 4) < rv2p_code_len)) {
3760			code = be32_to_cpu(*(rv2p_code + loc - 1));
3761			REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3762			code = be32_to_cpu(*(rv2p_code + loc));
3763			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3764			REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3765
3766			val = (loc / 2) | cmd;
3767			REG_WR(bp, addr, val);
3768		}
3769	}
3770
3771	/* Reset the processor, un-stall is done later. */
3772	if (rv2p_proc == RV2P_PROC1) {
3773		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3774	}
3775	else {
3776		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3777	}
3778
3779	return 0;
3780}
3781
3782static int
3783load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3784	    const struct bnx2_mips_fw_file_entry *fw_entry)
3785{
3786	u32 addr, len, file_offset;
3787	__be32 *data;
3788	u32 offset;
3789	u32 val;
3790
3791	/* Halt the CPU. */
3792	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3793	val |= cpu_reg->mode_value_halt;
3794	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3795	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3796
3797	/* Load the Text area. */
3798	addr = be32_to_cpu(fw_entry->text.addr);
3799	len = be32_to_cpu(fw_entry->text.len);
3800	file_offset = be32_to_cpu(fw_entry->text.offset);
3801	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3802
3803	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3804	if (len) {
3805		int j;
3806
3807		for (j = 0; j < (len / 4); j++, offset += 4)
3808			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3809	}
3810
3811	/* Load the Data area. */
3812	addr = be32_to_cpu(fw_entry->data.addr);
3813	len = be32_to_cpu(fw_entry->data.len);
3814	file_offset = be32_to_cpu(fw_entry->data.offset);
3815	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3816
3817	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3818	if (len) {
3819		int j;
3820
3821		for (j = 0; j < (len / 4); j++, offset += 4)
3822			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3823	}
3824
3825	/* Load the Read-Only area. */
3826	addr = be32_to_cpu(fw_entry->rodata.addr);
3827	len = be32_to_cpu(fw_entry->rodata.len);
3828	file_offset = be32_to_cpu(fw_entry->rodata.offset);
3829	data = (__be32 *)(bp->mips_firmware->data + file_offset);
3830
3831	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3832	if (len) {
3833		int j;
3834
3835		for (j = 0; j < (len / 4); j++, offset += 4)
3836			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3837	}
3838
3839	/* Clear the pre-fetch instruction. */
3840	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3841
3842	val = be32_to_cpu(fw_entry->start_addr);
3843	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3844
3845	/* Start the CPU. */
3846	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3847	val &= ~cpu_reg->mode_value_halt;
3848	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3849	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3850
3851	return 0;
3852}
3853
3854static int
3855bnx2_init_cpus(struct bnx2 *bp)
3856{
3857	const struct bnx2_mips_fw_file *mips_fw =
3858		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3859	const struct bnx2_rv2p_fw_file *rv2p_fw =
3860		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3861	int rc;
3862
3863	/* Initialize the RV2P processor. */
3864	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3865	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3866
3867	/* Initialize the RX Processor. */
3868	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3869	if (rc)
3870		goto init_cpu_err;
3871
3872	/* Initialize the TX Processor. */
3873	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3874	if (rc)
3875		goto init_cpu_err;
3876
3877	/* Initialize the TX Patch-up Processor. */
3878	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3879	if (rc)
3880		goto init_cpu_err;
3881
3882	/* Initialize the Completion Processor. */
3883	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3884	if (rc)
3885		goto init_cpu_err;
3886
3887	/* Initialize the Command Processor. */
3888	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3889
3890init_cpu_err:
3891	return rc;
3892}
3893
3894static int
3895bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3896{
3897	u16 pmcsr;
3898
3899	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3900
3901	switch (state) {
3902	case PCI_D0: {
3903		u32 val;
3904
3905		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3906			(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3907			PCI_PM_CTRL_PME_STATUS);
3908
3909		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3910			/* delay required during transition out of D3hot */
3911			msleep(20);
3912
3913		val = REG_RD(bp, BNX2_EMAC_MODE);
3914		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3915		val &= ~BNX2_EMAC_MODE_MPKT;
3916		REG_WR(bp, BNX2_EMAC_MODE, val);
3917
3918		val = REG_RD(bp, BNX2_RPM_CONFIG);
3919		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3920		REG_WR(bp, BNX2_RPM_CONFIG, val);
3921		break;
3922	}
3923	case PCI_D3hot: {
3924		int i;
3925		u32 val, wol_msg;
3926
3927		if (bp->wol) {
3928			u32 advertising;
3929			u8 autoneg;
3930
3931			autoneg = bp->autoneg;
3932			advertising = bp->advertising;
3933
3934			if (bp->phy_port == PORT_TP) {
3935				bp->autoneg = AUTONEG_SPEED;
3936				bp->advertising = ADVERTISED_10baseT_Half |
3937					ADVERTISED_10baseT_Full |
3938					ADVERTISED_100baseT_Half |
3939					ADVERTISED_100baseT_Full |
3940					ADVERTISED_Autoneg;
3941			}
3942
3943			spin_lock_bh(&bp->phy_lock);
3944			bnx2_setup_phy(bp, bp->phy_port);
3945			spin_unlock_bh(&bp->phy_lock);
3946
3947			bp->autoneg = autoneg;
3948			bp->advertising = advertising;
3949
3950			bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3951
3952			val = REG_RD(bp, BNX2_EMAC_MODE);
3953
3954			/* Enable port mode. */
3955			val &= ~BNX2_EMAC_MODE_PORT;
3956			val |= BNX2_EMAC_MODE_MPKT_RCVD |
3957			       BNX2_EMAC_MODE_ACPI_RCVD |
3958			       BNX2_EMAC_MODE_MPKT;
3959			if (bp->phy_port == PORT_TP)
3960				val |= BNX2_EMAC_MODE_PORT_MII;
3961			else {
3962				val |= BNX2_EMAC_MODE_PORT_GMII;
3963				if (bp->line_speed == SPEED_2500)
3964					val |= BNX2_EMAC_MODE_25G_MODE;
3965			}
3966
3967			REG_WR(bp, BNX2_EMAC_MODE, val);
3968
3969			/* receive all multicast */
3970			for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3971				REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3972				       0xffffffff);
3973			}
3974			REG_WR(bp, BNX2_EMAC_RX_MODE,
3975			       BNX2_EMAC_RX_MODE_SORT_MODE);
3976
3977			val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3978			      BNX2_RPM_SORT_USER0_MC_EN;
3979			REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3980			REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3981			REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3982			       BNX2_RPM_SORT_USER0_ENA);
3983
3984			/* Need to enable EMAC and RPM for WOL. */
3985			REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3986			       BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3987			       BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3988			       BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3989
3990			val = REG_RD(bp, BNX2_RPM_CONFIG);
3991			val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3992			REG_WR(bp, BNX2_RPM_CONFIG, val);
3993
3994			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3995		}
3996		else {
3997			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3998		}
3999
4000		if (!(bp->flags & BNX2_FLAG_NO_WOL))
4001			bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
4002				     1, 0);
4003
4004		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
4005		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4006		    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4007
4008			if (bp->wol)
4009				pmcsr |= 3;
4010		}
4011		else {
4012			pmcsr |= 3;
4013		}
4014		if (bp->wol) {
4015			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4016		}
4017		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4018				      pmcsr);
4019
4020		/* No more memory access after this point until
4021		 * device is brought back to D0.
4022		 */
4023		udelay(50);
4024		break;
4025	}
4026	default:
4027		return -EINVAL;
4028	}
4029	return 0;
4030}
4031
4032static int
4033bnx2_acquire_nvram_lock(struct bnx2 *bp)
4034{
4035	u32 val;
4036	int j;
4037
4038	/* Request access to the flash interface. */
4039	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4040	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4041		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4042		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4043			break;
4044
4045		udelay(5);
4046	}
4047
4048	if (j >= NVRAM_TIMEOUT_COUNT)
4049		return -EBUSY;
4050
4051	return 0;
4052}
4053
4054static int
4055bnx2_release_nvram_lock(struct bnx2 *bp)
4056{
4057	int j;
4058	u32 val;
4059
4060	/* Relinquish nvram interface. */
4061	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4062
4063	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4064		val = REG_RD(bp, BNX2_NVM_SW_ARB);
4065		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4066			break;
4067
4068		udelay(5);
4069	}
4070
4071	if (j >= NVRAM_TIMEOUT_COUNT)
4072		return -EBUSY;
4073
4074	return 0;
4075}
4076
4077
4078static int
4079bnx2_enable_nvram_write(struct bnx2 *bp)
4080{
4081	u32 val;
4082
4083	val = REG_RD(bp, BNX2_MISC_CFG);
4084	REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4085
4086	if (bp->flash_info->flags & BNX2_NV_WREN) {
4087		int j;
4088
4089		REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4090		REG_WR(bp, BNX2_NVM_COMMAND,
4091		       BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4092
4093		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4094			udelay(5);
4095
4096			val = REG_RD(bp, BNX2_NVM_COMMAND);
4097			if (val & BNX2_NVM_COMMAND_DONE)
4098				break;
4099		}
4100
4101		if (j >= NVRAM_TIMEOUT_COUNT)
4102			return -EBUSY;
4103	}
4104	return 0;
4105}
4106
4107static void
4108bnx2_disable_nvram_write(struct bnx2 *bp)
4109{
4110	u32 val;
4111
4112	val = REG_RD(bp, BNX2_MISC_CFG);
4113	REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4114}
4115
4116
4117static void
4118bnx2_enable_nvram_access(struct bnx2 *bp)
4119{
4120	u32 val;
4121
4122	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4123	/* Enable both bits, even on read. */
4124	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4125	       val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4126}
4127
4128static void
4129bnx2_disable_nvram_access(struct bnx2 *bp)
4130{
4131	u32 val;
4132
4133	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4134	/* Disable both bits, even after read. */
4135	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4136		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4137			BNX2_NVM_ACCESS_ENABLE_WR_EN));
4138}
4139
4140static int
4141bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4142{
4143	u32 cmd;
4144	int j;
4145
4146	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4147		/* Buffered flash, no erase needed */
4148		return 0;
4149
4150	/* Build an erase command */
4151	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4152	      BNX2_NVM_COMMAND_DOIT;
4153
4154	/* Need to clear DONE bit separately. */
4155	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4156
4157	/* Address of the NVRAM to read from. */
4158	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4159
4160	/* Issue an erase command. */
4161	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4162
4163	/* Wait for completion. */
4164	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4165		u32 val;
4166
4167		udelay(5);
4168
4169		val = REG_RD(bp, BNX2_NVM_COMMAND);
4170		if (val & BNX2_NVM_COMMAND_DONE)
4171			break;
4172	}
4173
4174	if (j >= NVRAM_TIMEOUT_COUNT)
4175		return -EBUSY;
4176
4177	return 0;
4178}
4179
4180static int
4181bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4182{
4183	u32 cmd;
4184	int j;
4185
4186	/* Build the command word. */
4187	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4188
4189	/* Calculate an offset of a buffered flash, not needed for 5709. */
4190	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4191		offset = ((offset / bp->flash_info->page_size) <<
4192			   bp->flash_info->page_bits) +
4193			  (offset % bp->flash_info->page_size);
4194	}
4195
4196	/* Need to clear DONE bit separately. */
4197	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4198
4199	/* Address of the NVRAM to read from. */
4200	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4201
4202	/* Issue a read command. */
4203	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4204
4205	/* Wait for completion. */
4206	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4207		u32 val;
4208
4209		udelay(5);
4210
4211		val = REG_RD(bp, BNX2_NVM_COMMAND);
4212		if (val & BNX2_NVM_COMMAND_DONE) {
4213			__be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4214			memcpy(ret_val, &v, 4);
4215			break;
4216		}
4217	}
4218	if (j >= NVRAM_TIMEOUT_COUNT)
4219		return -EBUSY;
4220
4221	return 0;
4222}
4223
4224
4225static int
4226bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4227{
4228	u32 cmd;
4229	__be32 val32;
4230	int j;
4231
4232	/* Build the command word. */
4233	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4234
4235	/* Calculate an offset of a buffered flash, not needed for 5709. */
4236	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4237		offset = ((offset / bp->flash_info->page_size) <<
4238			  bp->flash_info->page_bits) +
4239			 (offset % bp->flash_info->page_size);
4240	}
4241
4242	/* Need to clear DONE bit separately. */
4243	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4244
4245	memcpy(&val32, val, 4);
4246
4247	/* Write the data. */
4248	REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4249
4250	/* Address of the NVRAM to write to. */
4251	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4252
4253	/* Issue the write command. */
4254	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4255
4256	/* Wait for completion. */
4257	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4258		udelay(5);
4259
4260		if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4261			break;
4262	}
4263	if (j >= NVRAM_TIMEOUT_COUNT)
4264		return -EBUSY;
4265
4266	return 0;
4267}
4268
4269static int
4270bnx2_init_nvram(struct bnx2 *bp)
4271{
4272	u32 val;
4273	int j, entry_count, rc = 0;
4274	const struct flash_spec *flash;
4275
4276	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4277		bp->flash_info = &flash_5709;
4278		goto get_flash_size;
4279	}
4280
4281	/* Determine the selected interface. */
4282	val = REG_RD(bp, BNX2_NVM_CFG1);
4283
4284	entry_count = ARRAY_SIZE(flash_table);
4285
4286	if (val & 0x40000000) {
4287
4288		/* Flash interface has been reconfigured */
4289		for (j = 0, flash = &flash_table[0]; j < entry_count;
4290		     j++, flash++) {
4291			if ((val & FLASH_BACKUP_STRAP_MASK) ==
4292			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4293				bp->flash_info = flash;
4294				break;
4295			}
4296		}
4297	}
4298	else {
4299		u32 mask;
4300		/* Not yet been reconfigured */
4301
4302		if (val & (1 << 23))
4303			mask = FLASH_BACKUP_STRAP_MASK;
4304		else
4305			mask = FLASH_STRAP_MASK;
4306
4307		for (j = 0, flash = &flash_table[0]; j < entry_count;
4308			j++, flash++) {
4309
4310			if ((val & mask) == (flash->strapping & mask)) {
4311				bp->flash_info = flash;
4312
4313				/* Request access to the flash interface. */
4314				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4315					return rc;
4316
4317				/* Enable access to flash interface */
4318				bnx2_enable_nvram_access(bp);
4319
4320				/* Reconfigure the flash interface */
4321				REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4322				REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4323				REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4324				REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4325
4326				/* Disable access to flash interface */
4327				bnx2_disable_nvram_access(bp);
4328				bnx2_release_nvram_lock(bp);
4329
4330				break;
4331			}
4332		}
4333	} /* if (val & 0x40000000) */
4334
4335	if (j == entry_count) {
4336		bp->flash_info = NULL;
4337		pr_alert("Unknown flash/EEPROM type\n");
4338		return -ENODEV;
4339	}
4340
4341get_flash_size:
4342	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4343	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4344	if (val)
4345		bp->flash_size = val;
4346	else
4347		bp->flash_size = bp->flash_info->total_size;
4348
4349	return rc;
4350}
4351
4352static int
4353bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4354		int buf_size)
4355{
4356	int rc = 0;
4357	u32 cmd_flags, offset32, len32, extra;
4358
4359	if (buf_size == 0)
4360		return 0;
4361
4362	/* Request access to the flash interface. */
4363	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4364		return rc;
4365
4366	/* Enable access to flash interface */
4367	bnx2_enable_nvram_access(bp);
4368
4369	len32 = buf_size;
4370	offset32 = offset;
4371	extra = 0;
4372
4373	cmd_flags = 0;
4374
4375	if (offset32 & 3) {
4376		u8 buf[4];
4377		u32 pre_len;
4378
4379		offset32 &= ~3;
4380		pre_len = 4 - (offset & 3);
4381
4382		if (pre_len >= len32) {
4383			pre_len = len32;
4384			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4385				    BNX2_NVM_COMMAND_LAST;
4386		}
4387		else {
4388			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4389		}
4390
4391		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4392
4393		if (rc)
4394			return rc;
4395
4396		memcpy(ret_buf, buf + (offset & 3), pre_len);
4397
4398		offset32 += 4;
4399		ret_buf += pre_len;
4400		len32 -= pre_len;
4401	}
4402	if (len32 & 3) {
4403		extra = 4 - (len32 & 3);
4404		len32 = (len32 + 4) & ~3;
4405	}
4406
4407	if (len32 == 4) {
4408		u8 buf[4];
4409
4410		if (cmd_flags)
4411			cmd_flags = BNX2_NVM_COMMAND_LAST;
4412		else
4413			cmd_flags = BNX2_NVM_COMMAND_FIRST |
4414				    BNX2_NVM_COMMAND_LAST;
4415
4416		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4417
4418		memcpy(ret_buf, buf, 4 - extra);
4419	}
4420	else if (len32 > 0) {
4421		u8 buf[4];
4422
4423		/* Read the first word. */
4424		if (cmd_flags)
4425			cmd_flags = 0;
4426		else
4427			cmd_flags = BNX2_NVM_COMMAND_FIRST;
4428
4429		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4430
4431		/* Advance to the next dword. */
4432		offset32 += 4;
4433		ret_buf += 4;
4434		len32 -= 4;
4435
4436		while (len32 > 4 && rc == 0) {
4437			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4438
4439			/* Advance to the next dword. */
4440			offset32 += 4;
4441			ret_buf += 4;
4442			len32 -= 4;
4443		}
4444
4445		if (rc)
4446			return rc;
4447
4448		cmd_flags = BNX2_NVM_COMMAND_LAST;
4449		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4450
4451		memcpy(ret_buf, buf, 4 - extra);
4452	}
4453
4454	/* Disable access to flash interface */
4455	bnx2_disable_nvram_access(bp);
4456
4457	bnx2_release_nvram_lock(bp);
4458
4459	return rc;
4460}
4461
4462static int
4463bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4464		int buf_size)
4465{
4466	u32 written, offset32, len32;
4467	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4468	int rc = 0;
4469	int align_start, align_end;
4470
4471	buf = data_buf;
4472	offset32 = offset;
4473	len32 = buf_size;
4474	align_start = align_end = 0;
4475
4476	if ((align_start = (offset32 & 3))) {
4477		offset32 &= ~3;
4478		len32 += align_start;
4479		if (len32 < 4)
4480			len32 = 4;
4481		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4482			return rc;
4483	}
4484
4485	if (len32 & 3) {
4486		align_end = 4 - (len32 & 3);
4487		len32 += align_end;
4488		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4489			return rc;
4490	}
4491
4492	if (align_start || align_end) {
4493		align_buf = kmalloc(len32, GFP_KERNEL);
4494		if (align_buf == NULL)
4495			return -ENOMEM;
4496		if (align_start) {
4497			memcpy(align_buf, start, 4);
4498		}
4499		if (align_end) {
4500			memcpy(align_buf + len32 - 4, end, 4);
4501		}
4502		memcpy(align_buf + align_start, data_buf, buf_size);
4503		buf = align_buf;
4504	}
4505
4506	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4507		flash_buffer = kmalloc(264, GFP_KERNEL);
4508		if (flash_buffer == NULL) {
4509			rc = -ENOMEM;
4510			goto nvram_write_end;
4511		}
4512	}
4513
4514	written = 0;
4515	while ((written < len32) && (rc == 0)) {
4516		u32 page_start, page_end, data_start, data_end;
4517		u32 addr, cmd_flags;
4518		int i;
4519
4520	        /* Find the page_start addr */
4521		page_start = offset32 + written;
4522		page_start -= (page_start % bp->flash_info->page_size);
4523		/* Find the page_end addr */
4524		page_end = page_start + bp->flash_info->page_size;
4525		/* Find the data_start addr */
4526		data_start = (written == 0) ? offset32 : page_start;
4527		/* Find the data_end addr */
4528		data_end = (page_end > offset32 + len32) ?
4529			(offset32 + len32) : page_end;
4530
4531		/* Request access to the flash interface. */
4532		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4533			goto nvram_write_end;
4534
4535		/* Enable access to flash interface */
4536		bnx2_enable_nvram_access(bp);
4537
4538		cmd_flags = BNX2_NVM_COMMAND_FIRST;
4539		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4540			int j;
4541
4542			/* Read the whole page into the buffer
4543			 * (non-buffer flash only) */
4544			for (j = 0; j < bp->flash_info->page_size; j += 4) {
4545				if (j == (bp->flash_info->page_size - 4)) {
4546					cmd_flags |= BNX2_NVM_COMMAND_LAST;
4547				}
4548				rc = bnx2_nvram_read_dword(bp,
4549					page_start + j,
4550					&flash_buffer[j],
4551					cmd_flags);
4552
4553				if (rc)
4554					goto nvram_write_end;
4555
4556				cmd_flags = 0;
4557			}
4558		}
4559
4560		/* Enable writes to flash interface (unlock write-protect) */
4561		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4562			goto nvram_write_end;
4563
4564		/* Loop to write back the buffer data from page_start to
4565		 * data_start */
4566		i = 0;
4567		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4568			/* Erase the page */
4569			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4570				goto nvram_write_end;
4571
4572			/* Re-enable the write again for the actual write */
4573			bnx2_enable_nvram_write(bp);
4574
4575			for (addr = page_start; addr < data_start;
4576				addr += 4, i += 4) {
4577
4578				rc = bnx2_nvram_write_dword(bp, addr,
4579					&flash_buffer[i], cmd_flags);
4580
4581				if (rc != 0)
4582					goto nvram_write_end;
4583
4584				cmd_flags = 0;
4585			}
4586		}
4587
4588		/* Loop to write the new data from data_start to data_end */
4589		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4590			if ((addr == page_end - 4) ||
4591				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4592				 (addr == data_end - 4))) {
4593
4594				cmd_flags |= BNX2_NVM_COMMAND_LAST;
4595			}
4596			rc = bnx2_nvram_write_dword(bp, addr, buf,
4597				cmd_flags);
4598
4599			if (rc != 0)
4600				goto nvram_write_end;
4601
4602			cmd_flags = 0;
4603			buf += 4;
4604		}
4605
4606		/* Loop to write back the buffer data from data_end
4607		 * to page_end */
4608		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4609			for (addr = data_end; addr < page_end;
4610				addr += 4, i += 4) {
4611
4612				if (addr == page_end-4) {
4613					cmd_flags = BNX2_NVM_COMMAND_LAST;
4614                		}
4615				rc = bnx2_nvram_write_dword(bp, addr,
4616					&flash_buffer[i], cmd_flags);
4617
4618				if (rc != 0)
4619					goto nvram_write_end;
4620
4621				cmd_flags = 0;
4622			}
4623		}
4624
4625		/* Disable writes to flash interface (lock write-protect) */
4626		bnx2_disable_nvram_write(bp);
4627
4628		/* Disable access to flash interface */
4629		bnx2_disable_nvram_access(bp);
4630		bnx2_release_nvram_lock(bp);
4631
4632		/* Increment written */
4633		written += data_end - data_start;
4634	}
4635
4636nvram_write_end:
4637	kfree(flash_buffer);
4638	kfree(align_buf);
4639	return rc;
4640}
4641
4642static void
4643bnx2_init_fw_cap(struct bnx2 *bp)
4644{
4645	u32 val, sig = 0;
4646
4647	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4648	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4649
4650	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4651		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4652
4653	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4654	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4655		return;
4656
4657	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4658		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4659		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4660	}
4661
4662	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4663	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4664		u32 link;
4665
4666		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4667
4668		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4669		if (link & BNX2_LINK_STATUS_SERDES_LINK)
4670			bp->phy_port = PORT_FIBRE;
4671		else
4672			bp->phy_port = PORT_TP;
4673
4674		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4675		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4676	}
4677
4678	if (netif_running(bp->dev) && sig)
4679		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4680}
4681
4682static void
4683bnx2_setup_msix_tbl(struct bnx2 *bp)
4684{
4685	REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4686
4687	REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4688	REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4689}
4690
4691static int
4692bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4693{
4694	u32 val;
4695	int i, rc = 0;
4696	u8 old_port;
4697
4698	/* Wait for the current PCI transaction to complete before
4699	 * issuing a reset. */
4700	REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4701	       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4702	       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4703	       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4704	       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4705	val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4706	udelay(5);
4707
4708	/* Wait for the firmware to tell us it is ok to issue a reset. */
4709	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4710
4711	/* Deposit a driver reset signature so the firmware knows that
4712	 * this is a soft reset. */
4713	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4714		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
4715
4716	/* Do a dummy read to force the chip to complete all current transaction
4717	 * before we issue a reset. */
4718	val = REG_RD(bp, BNX2_MISC_ID);
4719
4720	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4721		REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4722		REG_RD(bp, BNX2_MISC_COMMAND);
4723		udelay(5);
4724
4725		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4726		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4727
4728		pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4729
4730	} else {
4731		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4732		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4733		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4734
4735		/* Chip reset. */
4736		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4737
4738		/* Reading back any register after chip reset will hang the
4739		 * bus on 5706 A0 and A1.  The msleep below provides plenty
4740		 * of margin for write posting.
4741		 */
4742		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4743		    (CHIP_ID(bp) == CHIP_ID_5706_A1))
4744			msleep(20);
4745
4746		/* Reset takes approximate 30 usec */
4747		for (i = 0; i < 10; i++) {
4748			val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4749			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4750				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4751				break;
4752			udelay(10);
4753		}
4754
4755		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4756			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4757			pr_err("Chip reset did not complete\n");
4758			return -EBUSY;
4759		}
4760	}
4761
4762	/* Make sure byte swapping is properly configured. */
4763	val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4764	if (val != 0x01020304) {
4765		pr_err("Chip not in correct endian mode\n");
4766		return -ENODEV;
4767	}
4768
4769	/* Wait for the firmware to finish its initialization. */
4770	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4771	if (rc)
4772		return rc;
4773
4774	spin_lock_bh(&bp->phy_lock);
4775	old_port = bp->phy_port;
4776	bnx2_init_fw_cap(bp);
4777	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4778	    old_port != bp->phy_port)
4779		bnx2_set_default_remote_link(bp);
4780	spin_unlock_bh(&bp->phy_lock);
4781
4782	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4783		/* Adjust the voltage regular to two steps lower.  The default
4784		 * of this register is 0x0000000e. */
4785		REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4786
4787		/* Remove bad rbuf memory from the free pool. */
4788		rc = bnx2_alloc_bad_rbuf(bp);
4789	}
4790
4791	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4792		bnx2_setup_msix_tbl(bp);
4793		/* Prevent MSIX table reads and write from timing out */
4794		REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4795			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4796	}
4797
4798	return rc;
4799}
4800
4801static int
4802bnx2_init_chip(struct bnx2 *bp)
4803{
4804	u32 val, mtu;
4805	int rc, i;
4806
4807	/* Make sure the interrupt is not active. */
4808	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4809
4810	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4811	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4812#ifdef __BIG_ENDIAN
4813	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4814#endif
4815	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4816	      DMA_READ_CHANS << 12 |
4817	      DMA_WRITE_CHANS << 16;
4818
4819	val |= (0x2 << 20) | (1 << 11);
4820
4821	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4822		val |= (1 << 23);
4823
4824	if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4825	    (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4826		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4827
4828	REG_WR(bp, BNX2_DMA_CONFIG, val);
4829
4830	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4831		val = REG_RD(bp, BNX2_TDMA_CONFIG);
4832		val |= BNX2_TDMA_CONFIG_ONE_DMA;
4833		REG_WR(bp, BNX2_TDMA_CONFIG, val);
4834	}
4835
4836	if (bp->flags & BNX2_FLAG_PCIX) {
4837		u16 val16;
4838
4839		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4840				     &val16);
4841		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4842				      val16 & ~PCI_X_CMD_ERO);
4843	}
4844
4845	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4846	       BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4847	       BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4848	       BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4849
4850	/* Initialize context mapping and zero out the quick contexts.  The
4851	 * context block must have already been enabled. */
4852	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4853		rc = bnx2_init_5709_context(bp);
4854		if (rc)
4855			return rc;
4856	} else
4857		bnx2_init_context(bp);
4858
4859	if ((rc = bnx2_init_cpus(bp)) != 0)
4860		return rc;
4861
4862	bnx2_init_nvram(bp);
4863
4864	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4865
4866	val = REG_RD(bp, BNX2_MQ_CONFIG);
4867	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4868	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4869	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4870		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4871		if (CHIP_REV(bp) == CHIP_REV_Ax)
4872			val |= BNX2_MQ_CONFIG_HALT_DIS;
4873	}
4874
4875	REG_WR(bp, BNX2_MQ_CONFIG, val);
4876
4877	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4878	REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4879	REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4880
4881	val = (BCM_PAGE_BITS - 8) << 24;
4882	REG_WR(bp, BNX2_RV2P_CONFIG, val);
4883
4884	/* Configure page size. */
4885	val = REG_RD(bp, BNX2_TBDR_CONFIG);
4886	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4887	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4888	REG_WR(bp, BNX2_TBDR_CONFIG, val);
4889
4890	val = bp->mac_addr[0] +
4891	      (bp->mac_addr[1] << 8) +
4892	      (bp->mac_addr[2] << 16) +
4893	      bp->mac_addr[3] +
4894	      (bp->mac_addr[4] << 8) +
4895	      (bp->mac_addr[5] << 16);
4896	REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4897
4898	/* Program the MTU.  Also include 4 bytes for CRC32. */
4899	mtu = bp->dev->mtu;
4900	val = mtu + ETH_HLEN + ETH_FCS_LEN;
4901	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4902		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4903	REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4904
4905	if (mtu < 1500)
4906		mtu = 1500;
4907
4908	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4909	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4910	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4911
4912	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4913	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4914		bp->bnx2_napi[i].last_status_idx = 0;
4915
4916	bp->idle_chk_status_idx = 0xffff;
4917
4918	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4919
4920	/* Set up how to generate a link change interrupt. */
4921	REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4922
4923	REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4924	       (u64) bp->status_blk_mapping & 0xffffffff);
4925	REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4926
4927	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4928	       (u64) bp->stats_blk_mapping & 0xffffffff);
4929	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4930	       (u64) bp->stats_blk_mapping >> 32);
4931
4932	REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4933	       (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4934
4935	REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4936	       (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4937
4938	REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4939	       (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4940
4941	REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4942
4943	REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4944
4945	REG_WR(bp, BNX2_HC_COM_TICKS,
4946	       (bp->com_ticks_int << 16) | bp->com_ticks);
4947
4948	REG_WR(bp, BNX2_HC_CMD_TICKS,
4949	       (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4950
4951	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4952		REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4953	else
4954		REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4955	REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4956
4957	if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4958		val = BNX2_HC_CONFIG_COLLECT_STATS;
4959	else {
4960		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4961		      BNX2_HC_CONFIG_COLLECT_STATS;
4962	}
4963
4964	if (bp->flags & BNX2_FLAG_USING_MSIX) {
4965		REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4966		       BNX2_HC_MSIX_BIT_VECTOR_VAL);
4967
4968		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4969	}
4970
4971	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4972		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4973
4974	REG_WR(bp, BNX2_HC_CONFIG, val);
4975
4976	for (i = 1; i < bp->irq_nvecs; i++) {
4977		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4978			   BNX2_HC_SB_CONFIG_1;
4979
4980		REG_WR(bp, base,
4981			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4982			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4983			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4984
4985		REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4986			(bp->tx_quick_cons_trip_int << 16) |
4987			 bp->tx_quick_cons_trip);
4988
4989		REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4990			(bp->tx_ticks_int << 16) | bp->tx_ticks);
4991
4992		REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4993		       (bp->rx_quick_cons_trip_int << 16) |
4994			bp->rx_quick_cons_trip);
4995
4996		REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4997			(bp->rx_ticks_int << 16) | bp->rx_ticks);
4998	}
4999
5000	/* Clear internal stats counters. */
5001	REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5002
5003	REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5004
5005	/* Initialize the receive filter. */
5006	bnx2_set_rx_mode(bp->dev);
5007
5008	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5009		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5010		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5011		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5012	}
5013	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5014			  1, 0);
5015
5016	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5017	REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5018
5019	udelay(20);
5020
5021	bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5022
5023	return rc;
5024}
5025
5026static void
5027bnx2_clear_ring_states(struct bnx2 *bp)
5028{
5029	struct bnx2_napi *bnapi;
5030	struct bnx2_tx_ring_info *txr;
5031	struct bnx2_rx_ring_info *rxr;
5032	int i;
5033
5034	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5035		bnapi = &bp->bnx2_napi[i];
5036		txr = &bnapi->tx_ring;
5037		rxr = &bnapi->rx_ring;
5038
5039		txr->tx_cons = 0;
5040		txr->hw_tx_cons = 0;
5041		rxr->rx_prod_bseq = 0;
5042		rxr->rx_prod = 0;
5043		rxr->rx_cons = 0;
5044		rxr->rx_pg_prod = 0;
5045		rxr->rx_pg_cons = 0;
5046	}
5047}
5048
5049static void
5050bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5051{
5052	u32 val, offset0, offset1, offset2, offset3;
5053	u32 cid_addr = GET_CID_ADDR(cid);
5054
5055	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5056		offset0 = BNX2_L2CTX_TYPE_XI;
5057		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5058		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5059		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5060	} else {
5061		offset0 = BNX2_L2CTX_TYPE;
5062		offset1 = BNX2_L2CTX_CMD_TYPE;
5063		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5064		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5065	}
5066	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5067	bnx2_ctx_wr(bp, cid_addr, offset0, val);
5068
5069	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5070	bnx2_ctx_wr(bp, cid_addr, offset1, val);
5071
5072	val = (u64) txr->tx_desc_mapping >> 32;
5073	bnx2_ctx_wr(bp, cid_addr, offset2, val);
5074
5075	val = (u64) txr->tx_desc_mapping & 0xffffffff;
5076	bnx2_ctx_wr(bp, cid_addr, offset3, val);
5077}
5078
5079static void
5080bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5081{
5082	struct tx_bd *txbd;
5083	u32 cid = TX_CID;
5084	struct bnx2_napi *bnapi;
5085	struct bnx2_tx_ring_info *txr;
5086
5087	bnapi = &bp->bnx2_napi[ring_num];
5088	txr = &bnapi->tx_ring;
5089
5090	if (ring_num == 0)
5091		cid = TX_CID;
5092	else
5093		cid = TX_TSS_CID + ring_num - 1;
5094
5095	bp->tx_wake_thresh = bp->tx_ring_size / 2;
5096
5097	txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5098
5099	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5100	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5101
5102	txr->tx_prod = 0;
5103	txr->tx_prod_bseq = 0;
5104
5105	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5106	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5107
5108	bnx2_init_tx_context(bp, cid, txr);
5109}
5110
5111static void
5112bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5113		     int num_rings)
5114{
5115	int i;
5116	struct rx_bd *rxbd;
5117
5118	for (i = 0; i < num_rings; i++) {
5119		int j;
5120
5121		rxbd = &rx_ring[i][0];
5122		for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5123			rxbd->rx_bd_len = buf_size;
5124			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5125		}
5126		if (i == (num_rings - 1))
5127			j = 0;
5128		else
5129			j = i + 1;
5130		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5131		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5132	}
5133}
5134
5135static void
5136bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5137{
5138	int i;
5139	u16 prod, ring_prod;
5140	u32 cid, rx_cid_addr, val;
5141	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5142	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5143
5144	if (ring_num == 0)
5145		cid = RX_CID;
5146	else
5147		cid = RX_RSS_CID + ring_num - 1;
5148
5149	rx_cid_addr = GET_CID_ADDR(cid);
5150
5151	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5152			     bp->rx_buf_use_size, bp->rx_max_ring);
5153
5154	bnx2_init_rx_context(bp, cid);
5155
5156	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5157		val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5158		REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5159	}
5160
5161	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5162	if (bp->rx_pg_ring_size) {
5163		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5164				     rxr->rx_pg_desc_mapping,
5165				     PAGE_SIZE, bp->rx_max_pg_ring);
5166		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5167		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5168		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5169		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5170
5171		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5172		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5173
5174		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5175		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5176
5177		if (CHIP_NUM(bp) == CHIP_NUM_5709)
5178			REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5179	}
5180
5181	val = (u64) rxr->rx_desc_mapping[0] >> 32;
5182	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5183
5184	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5185	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5186
5187	ring_prod = prod = rxr->rx_pg_prod;
5188	for (i = 0; i < bp->rx_pg_ring_size; i++) {
5189		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5190			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5191				    ring_num, i, bp->rx_pg_ring_size);
5192			break;
5193		}
5194		prod = NEXT_RX_BD(prod);
5195		ring_prod = RX_PG_RING_IDX(prod);
5196	}
5197	rxr->rx_pg_prod = prod;
5198
5199	ring_prod = prod = rxr->rx_prod;
5200	for (i = 0; i < bp->rx_ring_size; i++) {
5201		if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5202			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5203				    ring_num, i, bp->rx_ring_size);
5204			break;
5205		}
5206		prod = NEXT_RX_BD(prod);
5207		ring_prod = RX_RING_IDX(prod);
5208	}
5209	rxr->rx_prod = prod;
5210
5211	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5212	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5213	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5214
5215	REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5216	REG_WR16(bp, rxr->rx_bidx_addr, prod);
5217
5218	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5219}
5220
5221static void
5222bnx2_init_all_rings(struct bnx2 *bp)
5223{
5224	int i;
5225	u32 val;
5226
5227	bnx2_clear_ring_states(bp);
5228
5229	REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5230	for (i = 0; i < bp->num_tx_rings; i++)
5231		bnx2_init_tx_ring(bp, i);
5232
5233	if (bp->num_tx_rings > 1)
5234		REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5235		       (TX_TSS_CID << 7));
5236
5237	REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5238	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5239
5240	for (i = 0; i < bp->num_rx_rings; i++)
5241		bnx2_init_rx_ring(bp, i);
5242
5243	if (bp->num_rx_rings > 1) {
5244		u32 tbl_32;
5245		u8 *tbl = (u8 *) &tbl_32;
5246
5247		bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5248				BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5249
5250		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5251			tbl[i % 4] = i % (bp->num_rx_rings - 1);
5252			if ((i % 4) == 3)
5253				bnx2_reg_wr_ind(bp,
5254						BNX2_RXP_SCRATCH_RSS_TBL + i,
5255						cpu_to_be32(tbl_32));
5256		}
5257
5258		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5259		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5260
5261		REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5262
5263	}
5264}
5265
5266static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5267{
5268	u32 max, num_rings = 1;
5269
5270	while (ring_size > MAX_RX_DESC_CNT) {
5271		ring_size -= MAX_RX_DESC_CNT;
5272		num_rings++;
5273	}
5274	/* round to next power of 2 */
5275	max = max_size;
5276	while ((max & num_rings) == 0)
5277		max >>= 1;
5278
5279	if (num_rings != max)
5280		max <<= 1;
5281
5282	return max;
5283}
5284
5285static void
5286bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5287{
5288	u32 rx_size, rx_space, jumbo_size;
5289
5290	/* 8 for CRC and VLAN */
5291	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5292
5293	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5294		sizeof(struct skb_shared_info);
5295
5296	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5297	bp->rx_pg_ring_size = 0;
5298	bp->rx_max_pg_ring = 0;
5299	bp->rx_max_pg_ring_idx = 0;
5300	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5301		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5302
5303		jumbo_size = size * pages;
5304		if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5305			jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5306
5307		bp->rx_pg_ring_size = jumbo_size;
5308		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5309							MAX_RX_PG_RINGS);
5310		bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5311		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5312		bp->rx_copy_thresh = 0;
5313	}
5314
5315	bp->rx_buf_use_size = rx_size;
5316	/* hw alignment */
5317	bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5318	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5319	bp->rx_ring_size = size;
5320	bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5321	bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5322}
5323
5324static void
5325bnx2_free_tx_skbs(struct bnx2 *bp)
5326{
5327	int i;
5328
5329	for (i = 0; i < bp->num_tx_rings; i++) {
5330		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5331		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5332		int j;
5333
5334		if (txr->tx_buf_ring == NULL)
5335			continue;
5336
5337		for (j = 0; j < TX_DESC_CNT; ) {
5338			struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5339			struct sk_buff *skb = tx_buf->skb;
5340			int k, last;
5341
5342			if (skb == NULL) {
5343				j++;
5344				continue;
5345			}
5346
5347			dma_unmap_single(&bp->pdev->dev,
5348					 dma_unmap_addr(tx_buf, mapping),
5349					 skb_headlen(skb),
5350					 PCI_DMA_TODEVICE);
5351
5352			tx_buf->skb = NULL;
5353
5354			last = tx_buf->nr_frags;
5355			j++;
5356			for (k = 0; k < last; k++, j++) {
5357				tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5358				dma_unmap_page(&bp->pdev->dev,
5359					dma_unmap_addr(tx_buf, mapping),
5360					skb_shinfo(skb)->frags[k].size,
5361					PCI_DMA_TODEVICE);
5362			}
5363			dev_kfree_skb(skb);
5364		}
5365	}
5366}
5367
5368static void
5369bnx2_free_rx_skbs(struct bnx2 *bp)
5370{
5371	int i;
5372
5373	for (i = 0; i < bp->num_rx_rings; i++) {
5374		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5375		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5376		int j;
5377
5378		if (rxr->rx_buf_ring == NULL)
5379			return;
5380
5381		for (j = 0; j < bp->rx_max_ring_idx; j++) {
5382			struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5383			struct sk_buff *skb = rx_buf->skb;
5384
5385			if (skb == NULL)
5386				continue;
5387
5388			dma_unmap_single(&bp->pdev->dev,
5389					 dma_unmap_addr(rx_buf, mapping),
5390					 bp->rx_buf_use_size,
5391					 PCI_DMA_FROMDEVICE);
5392
5393			rx_buf->skb = NULL;
5394
5395			dev_kfree_skb(skb);
5396		}
5397		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5398			bnx2_free_rx_page(bp, rxr, j);
5399	}
5400}
5401
5402static void
5403bnx2_free_skbs(struct bnx2 *bp)
5404{
5405	bnx2_free_tx_skbs(bp);
5406	bnx2_free_rx_skbs(bp);
5407}
5408
5409static int
5410bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5411{
5412	int rc;
5413
5414	rc = bnx2_reset_chip(bp, reset_code);
5415	bnx2_free_skbs(bp);
5416	if (rc)
5417		return rc;
5418
5419	if ((rc = bnx2_init_chip(bp)) != 0)
5420		return rc;
5421
5422	bnx2_init_all_rings(bp);
5423	return 0;
5424}
5425
5426static int
5427bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5428{
5429	int rc;
5430
5431	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5432		return rc;
5433
5434	spin_lock_bh(&bp->phy_lock);
5435	bnx2_init_phy(bp, reset_phy);
5436	bnx2_set_link(bp);
5437	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5438		bnx2_remote_phy_event(bp);
5439	spin_unlock_bh(&bp->phy_lock);
5440	return 0;
5441}
5442
5443static int
5444bnx2_shutdown_chip(struct bnx2 *bp)
5445{
5446	u32 reset_code;
5447
5448	if (bp->flags & BNX2_FLAG_NO_WOL)
5449		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5450	else if (bp->wol)
5451		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5452	else
5453		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5454
5455	return bnx2_reset_chip(bp, reset_code);
5456}
5457
5458static int
5459bnx2_test_registers(struct bnx2 *bp)
5460{
5461	int ret;
5462	int i, is_5709;
5463	static const struct {
5464		u16   offset;
5465		u16   flags;
5466#define BNX2_FL_NOT_5709	1
5467		u32   rw_mask;
5468		u32   ro_mask;
5469	} reg_tbl[] = {
5470		{ 0x006c, 0, 0x00000000, 0x0000003f },
5471		{ 0x0090, 0, 0xffffffff, 0x00000000 },
5472		{ 0x0094, 0, 0x00000000, 0x00000000 },
5473
5474		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5475		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5476		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5477		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5478		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5479		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5480		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5481		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5482		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5483
5484		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5485		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5486		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5487		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5488		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5489		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5490
5491		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5492		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5493		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5494
5495		{ 0x1000, 0, 0x00000000, 0x00000001 },
5496		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5497
5498		{ 0x1408, 0, 0x01c00800, 0x00000000 },
5499		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
5500		{ 0x14a8, 0, 0x00000000, 0x000001ff },
5501		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
5502		{ 0x14b0, 0, 0x00000002, 0x00000001 },
5503		{ 0x14b8, 0, 0x00000000, 0x00000000 },
5504		{ 0x14c0, 0, 0x00000000, 0x00000009 },
5505		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
5506		{ 0x14cc, 0, 0x00000000, 0x00000001 },
5507		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
5508
5509		{ 0x1800, 0, 0x00000000, 0x00000001 },
5510		{ 0x1804, 0, 0x00000000, 0x00000003 },
5511
5512		{ 0x2800, 0, 0x00000000, 0x00000001 },
5513		{ 0x2804, 0, 0x00000000, 0x00003f01 },
5514		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5515		{ 0x2810, 0, 0xffff0000, 0x00000000 },
5516		{ 0x2814, 0, 0xffff0000, 0x00000000 },
5517		{ 0x2818, 0, 0xffff0000, 0x00000000 },
5518		{ 0x281c, 0, 0xffff0000, 0x00000000 },
5519		{ 0x2834, 0, 0xffffffff, 0x00000000 },
5520		{ 0x2840, 0, 0x00000000, 0xffffffff },
5521		{ 0x2844, 0, 0x00000000, 0xffffffff },
5522		{ 0x2848, 0, 0xffffffff, 0x00000000 },
5523		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
5524
5525		{ 0x2c00, 0, 0x00000000, 0x00000011 },
5526		{ 0x2c04, 0, 0x00000000, 0x00030007 },
5527
5528		{ 0x3c00, 0, 0x00000000, 0x00000001 },
5529		{ 0x3c04, 0, 0x00000000, 0x00070000 },
5530		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
5531		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5532		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
5533		{ 0x3c14, 0, 0x00000000, 0xffffffff },
5534		{ 0x3c18, 0, 0x00000000, 0xffffffff },
5535		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
5536		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
5537
5538		{ 0x5004, 0, 0x00000000, 0x0000007f },
5539		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
5540
5541		{ 0x5c00, 0, 0x00000000, 0x00000001 },
5542		{ 0x5c04, 0, 0x00000000, 0x0003000f },
5543		{ 0x5c08, 0, 0x00000003, 0x00000000 },
5544		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5545		{ 0x5c10, 0, 0x00000000, 0xffffffff },
5546		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5547		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
5548		{ 0x5c88, 0, 0x00000000, 0x00077373 },
5549		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
5550
5551		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
5552		{ 0x680c, 0, 0xffffffff, 0x00000000 },
5553		{ 0x6810, 0, 0xffffffff, 0x00000000 },
5554		{ 0x6814, 0, 0xffffffff, 0x00000000 },
5555		{ 0x6818, 0, 0xffffffff, 0x00000000 },
5556		{ 0x681c, 0, 0xffffffff, 0x00000000 },
5557		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
5558		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
5559		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
5560		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
5561		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
5562		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
5563		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
5564		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
5565		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
5566		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
5567		{ 0x684c, 0, 0xffffffff, 0x00000000 },
5568		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5569		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5570		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5571		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5572		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
5573		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5574
5575		{ 0xffff, 0, 0x00000000, 0x00000000 },
5576	};
5577
5578	ret = 0;
5579	is_5709 = 0;
5580	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5581		is_5709 = 1;
5582
5583	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5584		u32 offset, rw_mask, ro_mask, save_val, val;
5585		u16 flags = reg_tbl[i].flags;
5586
5587		if (is_5709 && (flags & BNX2_FL_NOT_5709))
5588			continue;
5589
5590		offset = (u32) reg_tbl[i].offset;
5591		rw_mask = reg_tbl[i].rw_mask;
5592		ro_mask = reg_tbl[i].ro_mask;
5593
5594		save_val = readl(bp->regview + offset);
5595
5596		writel(0, bp->regview + offset);
5597
5598		val = readl(bp->regview + offset);
5599		if ((val & rw_mask) != 0) {
5600			goto reg_test_err;
5601		}
5602
5603		if ((val & ro_mask) != (save_val & ro_mask)) {
5604			goto reg_test_err;
5605		}
5606
5607		writel(0xffffffff, bp->regview + offset);
5608
5609		val = readl(bp->regview + offset);
5610		if ((val & rw_mask) != rw_mask) {
5611			goto reg_test_err;
5612		}
5613
5614		if ((val & ro_mask) != (save_val & ro_mask)) {
5615			goto reg_test_err;
5616		}
5617
5618		writel(save_val, bp->regview + offset);
5619		continue;
5620
5621reg_test_err:
5622		writel(save_val, bp->regview + offset);
5623		ret = -ENODEV;
5624		break;
5625	}
5626	return ret;
5627}
5628
5629static int
5630bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5631{
5632	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5633		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5634	int i;
5635
5636	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5637		u32 offset;
5638
5639		for (offset = 0; offset < size; offset += 4) {
5640
5641			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5642
5643			if (bnx2_reg_rd_ind(bp, start + offset) !=
5644				test_pattern[i]) {
5645				return -ENODEV;
5646			}
5647		}
5648	}
5649	return 0;
5650}
5651
5652static int
5653bnx2_test_memory(struct bnx2 *bp)
5654{
5655	int ret = 0;
5656	int i;
5657	static struct mem_entry {
5658		u32   offset;
5659		u32   len;
5660	} mem_tbl_5706[] = {
5661		{ 0x60000,  0x4000 },
5662		{ 0xa0000,  0x3000 },
5663		{ 0xe0000,  0x4000 },
5664		{ 0x120000, 0x4000 },
5665		{ 0x1a0000, 0x4000 },
5666		{ 0x160000, 0x4000 },
5667		{ 0xffffffff, 0    },
5668	},
5669	mem_tbl_5709[] = {
5670		{ 0x60000,  0x4000 },
5671		{ 0xa0000,  0x3000 },
5672		{ 0xe0000,  0x4000 },
5673		{ 0x120000, 0x4000 },
5674		{ 0x1a0000, 0x4000 },
5675		{ 0xffffffff, 0    },
5676	};
5677	struct mem_entry *mem_tbl;
5678
5679	if (CHIP_NUM(bp) == CHIP_NUM_5709)
5680		mem_tbl = mem_tbl_5709;
5681	else
5682		mem_tbl = mem_tbl_5706;
5683
5684	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5685		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5686			mem_tbl[i].len)) != 0) {
5687			return ret;
5688		}
5689	}
5690
5691	return ret;
5692}
5693
5694#define BNX2_MAC_LOOPBACK	0
5695#define BNX2_PHY_LOOPBACK	1
5696
5697static int
5698bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5699{
5700	unsigned int pkt_size, num_pkts, i;
5701	struct sk_buff *skb, *rx_skb;
5702	unsigned char *packet;
5703	u16 rx_start_idx, rx_idx;
5704	dma_addr_t map;
5705	struct tx_bd *txbd;
5706	struct sw_bd *rx_buf;
5707	struct l2_fhdr *rx_hdr;
5708	int ret = -ENODEV;
5709	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5710	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5711	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5712
5713	tx_napi = bnapi;
5714
5715	txr = &tx_napi->tx_ring;
5716	rxr = &bnapi->rx_ring;
5717	if (loopback_mode == BNX2_MAC_LOOPBACK) {
5718		bp->loopback = MAC_LOOPBACK;
5719		bnx2_set_mac_loopback(bp);
5720	}
5721	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5722		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5723			return 0;
5724
5725		bp->loopback = PHY_LOOPBACK;
5726		bnx2_set_phy_loopback(bp);
5727	}
5728	else
5729		return -EINVAL;
5730
5731	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5732	skb = netdev_alloc_skb(bp->dev, pkt_size);
5733	if (!skb)
5734		return -ENOMEM;
5735	packet = skb_put(skb, pkt_size);
5736	memcpy(packet, bp->dev->dev_addr, 6);
5737	memset(packet + 6, 0x0, 8);
5738	for (i = 14; i < pkt_size; i++)
5739		packet[i] = (unsigned char) (i & 0xff);
5740
5741	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5742			     PCI_DMA_TODEVICE);
5743	if (dma_mapping_error(&bp->pdev->dev, map)) {
5744		dev_kfree_skb(skb);
5745		return -EIO;
5746	}
5747
5748	REG_WR(bp, BNX2_HC_COMMAND,
5749	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5750
5751	REG_RD(bp, BNX2_HC_COMMAND);
5752
5753	udelay(5);
5754	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5755
5756	num_pkts = 0;
5757
5758	txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5759
5760	txbd->tx_bd_haddr_hi = (u64) map >> 32;
5761	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5762	txbd->tx_bd_mss_nbytes = pkt_size;
5763	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5764
5765	num_pkts++;
5766	txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5767	txr->tx_prod_bseq += pkt_size;
5768
5769	REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5770	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5771
5772	udelay(100);
5773
5774	REG_WR(bp, BNX2_HC_COMMAND,
5775	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5776
5777	REG_RD(bp, BNX2_HC_COMMAND);
5778
5779	udelay(5);
5780
5781	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
5782	dev_kfree_skb(skb);
5783
5784	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5785		goto loopback_test_done;
5786
5787	rx_idx = bnx2_get_hw_rx_cons(bnapi);
5788	if (rx_idx != rx_start_idx + num_pkts) {
5789		goto loopback_test_done;
5790	}
5791
5792	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5793	rx_skb = rx_buf->skb;
5794
5795	rx_hdr = rx_buf->desc;
5796	skb_reserve(rx_skb, BNX2_RX_OFFSET);
5797
5798	dma_sync_single_for_cpu(&bp->pdev->dev,
5799		dma_unmap_addr(rx_buf, mapping),
5800		bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5801
5802	if (rx_hdr->l2_fhdr_status &
5803		(L2_FHDR_ERRORS_BAD_CRC |
5804		L2_FHDR_ERRORS_PHY_DECODE |
5805		L2_FHDR_ERRORS_ALIGNMENT |
5806		L2_FHDR_ERRORS_TOO_SHORT |
5807		L2_FHDR_ERRORS_GIANT_FRAME)) {
5808
5809		goto loopback_test_done;
5810	}
5811
5812	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5813		goto loopback_test_done;
5814	}
5815
5816	for (i = 14; i < pkt_size; i++) {
5817		if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5818			goto loopback_test_done;
5819		}
5820	}
5821
5822	ret = 0;
5823
5824loopback_test_done:
5825	bp->loopback = 0;
5826	return ret;
5827}
5828
5829#define BNX2_MAC_LOOPBACK_FAILED	1
5830#define BNX2_PHY_LOOPBACK_FAILED	2
5831#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
5832					 BNX2_PHY_LOOPBACK_FAILED)
5833
5834static int
5835bnx2_test_loopback(struct bnx2 *bp)
5836{
5837	int rc = 0;
5838
5839	if (!netif_running(bp->dev))
5840		return BNX2_LOOPBACK_FAILED;
5841
5842	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5843	spin_lock_bh(&bp->phy_lock);
5844	bnx2_init_phy(bp, 1);
5845	spin_unlock_bh(&bp->phy_lock);
5846	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5847		rc |= BNX2_MAC_LOOPBACK_FAILED;
5848	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5849		rc |= BNX2_PHY_LOOPBACK_FAILED;
5850	return rc;
5851}
5852
5853#define NVRAM_SIZE 0x200
5854#define CRC32_RESIDUAL 0xdebb20e3
5855
5856static int
5857bnx2_test_nvram(struct bnx2 *bp)
5858{
5859	__be32 buf[NVRAM_SIZE / 4];
5860	u8 *data = (u8 *) buf;
5861	int rc = 0;
5862	u32 magic, csum;
5863
5864	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5865		goto test_nvram_done;
5866
5867        magic = be32_to_cpu(buf[0]);
5868	if (magic != 0x669955aa) {
5869		rc = -ENODEV;
5870		goto test_nvram_done;
5871	}
5872
5873	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5874		goto test_nvram_done;
5875
5876	csum = ether_crc_le(0x100, data);
5877	if (csum != CRC32_RESIDUAL) {
5878		rc = -ENODEV;
5879		goto test_nvram_done;
5880	}
5881
5882	csum = ether_crc_le(0x100, data + 0x100);
5883	if (csum != CRC32_RESIDUAL) {
5884		rc = -ENODEV;
5885	}
5886
5887test_nvram_done:
5888	return rc;
5889}
5890
5891static int
5892bnx2_test_link(struct bnx2 *bp)
5893{
5894	u32 bmsr;
5895
5896	if (!netif_running(bp->dev))
5897		return -ENODEV;
5898
5899	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5900		if (bp->link_up)
5901			return 0;
5902		return -ENODEV;
5903	}
5904	spin_lock_bh(&bp->phy_lock);
5905	bnx2_enable_bmsr1(bp);
5906	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5907	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5908	bnx2_disable_bmsr1(bp);
5909	spin_unlock_bh(&bp->phy_lock);
5910
5911	if (bmsr & BMSR_LSTATUS) {
5912		return 0;
5913	}
5914	return -ENODEV;
5915}
5916
5917static int
5918bnx2_test_intr(struct bnx2 *bp)
5919{
5920	int i;
5921	u16 status_idx;
5922
5923	if (!netif_running(bp->dev))
5924		return -ENODEV;
5925
5926	status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5927
5928	/* This register is not touched during run-time. */
5929	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5930	REG_RD(bp, BNX2_HC_COMMAND);
5931
5932	for (i = 0; i < 10; i++) {
5933		if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5934			status_idx) {
5935
5936			break;
5937		}
5938
5939		msleep_interruptible(10);
5940	}
5941	if (i < 10)
5942		return 0;
5943
5944	return -ENODEV;
5945}
5946
5947/* Determining link for parallel detection. */
5948static int
5949bnx2_5706_serdes_has_link(struct bnx2 *bp)
5950{
5951	u32 mode_ctl, an_dbg, exp;
5952
5953	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5954		return 0;
5955
5956	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5957	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5958
5959	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5960		return 0;
5961
5962	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5963	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5964	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5965
5966	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5967		return 0;
5968
5969	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5970	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5971	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5972
5973	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
5974		return 0;
5975
5976	return 1;
5977}
5978
5979static void
5980bnx2_5706_serdes_timer(struct bnx2 *bp)
5981{
5982	int check_link = 1;
5983
5984	spin_lock(&bp->phy_lock);
5985	if (bp->serdes_an_pending) {
5986		bp->serdes_an_pending--;
5987		check_link = 0;
5988	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5989		u32 bmcr;
5990
5991		bp->current_interval = BNX2_TIMER_INTERVAL;
5992
5993		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5994
5995		if (bmcr & BMCR_ANENABLE) {
5996			if (bnx2_5706_serdes_has_link(bp)) {
5997				bmcr &= ~BMCR_ANENABLE;
5998				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5999				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6000				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6001			}
6002		}
6003	}
6004	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6005		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6006		u32 phy2;
6007
6008		bnx2_write_phy(bp, 0x17, 0x0f01);
6009		bnx2_read_phy(bp, 0x15, &phy2);
6010		if (phy2 & 0x20) {
6011			u32 bmcr;
6012
6013			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6014			bmcr |= BMCR_ANENABLE;
6015			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6016
6017			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6018		}
6019	} else
6020		bp->current_interval = BNX2_TIMER_INTERVAL;
6021
6022	if (check_link) {
6023		u32 val;
6024
6025		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6026		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6027		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6028
6029		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6030			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6031				bnx2_5706s_force_link_dn(bp, 1);
6032				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6033			} else
6034				bnx2_set_link(bp);
6035		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6036			bnx2_set_link(bp);
6037	}
6038	spin_unlock(&bp->phy_lock);
6039}
6040
6041static void
6042bnx2_5708_serdes_timer(struct bnx2 *bp)
6043{
6044	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6045		return;
6046
6047	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6048		bp->serdes_an_pending = 0;
6049		return;
6050	}
6051
6052	spin_lock(&bp->phy_lock);
6053	if (bp->serdes_an_pending)
6054		bp->serdes_an_pending--;
6055	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6056		u32 bmcr;
6057
6058		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6059		if (bmcr & BMCR_ANENABLE) {
6060			bnx2_enable_forced_2g5(bp);
6061			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6062		} else {
6063			bnx2_disable_forced_2g5(bp);
6064			bp->serdes_an_pending = 2;
6065			bp->current_interval = BNX2_TIMER_INTERVAL;
6066		}
6067
6068	} else
6069		bp->current_interval = BNX2_TIMER_INTERVAL;
6070
6071	spin_unlock(&bp->phy_lock);
6072}
6073
6074static void
6075bnx2_timer(unsigned long data)
6076{
6077	struct bnx2 *bp = (struct bnx2 *) data;
6078
6079	if (!netif_running(bp->dev))
6080		return;
6081
6082	if (atomic_read(&bp->intr_sem) != 0)
6083		goto bnx2_restart_timer;
6084
6085	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6086	     BNX2_FLAG_USING_MSI)
6087		bnx2_chk_missed_msi(bp);
6088
6089	bnx2_send_heart_beat(bp);
6090
6091	bp->stats_blk->stat_FwRxDrop =
6092		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6093
6094	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6095		REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6096					    BNX2_HC_COMMAND_STATS_NOW);
6097
6098	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6099		if (CHIP_NUM(bp) == CHIP_NUM_5706)
6100			bnx2_5706_serdes_timer(bp);
6101		else
6102			bnx2_5708_serdes_timer(bp);
6103	}
6104
6105bnx2_restart_timer:
6106	mod_timer(&bp->timer, jiffies + bp->current_interval);
6107}
6108
6109static int
6110bnx2_request_irq(struct bnx2 *bp)
6111{
6112	unsigned long flags;
6113	struct bnx2_irq *irq;
6114	int rc = 0, i;
6115
6116	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6117		flags = 0;
6118	else
6119		flags = IRQF_SHARED;
6120
6121	for (i = 0; i < bp->irq_nvecs; i++) {
6122		irq = &bp->irq_tbl[i];
6123		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6124				 &bp->bnx2_napi[i]);
6125		if (rc)
6126			break;
6127		irq->requested = 1;
6128	}
6129	return rc;
6130}
6131
6132static void
6133bnx2_free_irq(struct bnx2 *bp)
6134{
6135	struct bnx2_irq *irq;
6136	int i;
6137
6138	for (i = 0; i < bp->irq_nvecs; i++) {
6139		irq = &bp->irq_tbl[i];
6140		if (irq->requested)
6141			free_irq(irq->vector, &bp->bnx2_napi[i]);
6142		irq->requested = 0;
6143	}
6144	if (bp->flags & BNX2_FLAG_USING_MSI)
6145		pci_disable_msi(bp->pdev);
6146	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6147		pci_disable_msix(bp->pdev);
6148
6149	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6150}
6151
6152static void
6153bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6154{
6155	int i, total_vecs, rc;
6156	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6157	struct net_device *dev = bp->dev;
6158	const int len = sizeof(bp->irq_tbl[0].name);
6159
6160	bnx2_setup_msix_tbl(bp);
6161	REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6162	REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6163	REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6164
6165	/*  Need to flush the previous three writes to ensure MSI-X
6166	 *  is setup properly */
6167	REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6168
6169	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6170		msix_ent[i].entry = i;
6171		msix_ent[i].vector = 0;
6172	}
6173
6174	total_vecs = msix_vecs;
6175#ifdef BCM_CNIC
6176	total_vecs++;
6177#endif
6178	rc = -ENOSPC;
6179	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
6180		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
6181		if (rc <= 0)
6182			break;
6183		if (rc > 0)
6184			total_vecs = rc;
6185	}
6186
6187	if (rc != 0)
6188		return;
6189
6190	msix_vecs = total_vecs;
6191#ifdef BCM_CNIC
6192	msix_vecs--;
6193#endif
6194	bp->irq_nvecs = msix_vecs;
6195	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6196	for (i = 0; i < total_vecs; i++) {
6197		bp->irq_tbl[i].vector = msix_ent[i].vector;
6198		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6199		bp->irq_tbl[i].handler = bnx2_msi_1shot;
6200	}
6201}
6202
6203static void
6204bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6205{
6206	int cpus = num_online_cpus();
6207	int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6208
6209	bp->irq_tbl[0].handler = bnx2_interrupt;
6210	strcpy(bp->irq_tbl[0].name, bp->dev->name);
6211	bp->irq_nvecs = 1;
6212	bp->irq_tbl[0].vector = bp->pdev->irq;
6213
6214	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6215		bnx2_enable_msix(bp, msix_vecs);
6216
6217	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6218	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6219		if (pci_enable_msi(bp->pdev) == 0) {
6220			bp->flags |= BNX2_FLAG_USING_MSI;
6221			if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6222				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6223				bp->irq_tbl[0].handler = bnx2_msi_1shot;
6224			} else
6225				bp->irq_tbl[0].handler = bnx2_msi;
6226
6227			bp->irq_tbl[0].vector = bp->pdev->irq;
6228		}
6229	}
6230
6231	bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6232	bp->dev->real_num_tx_queues = bp->num_tx_rings;
6233
6234	bp->num_rx_rings = bp->irq_nvecs;
6235}
6236
6237/* Called with rtnl_lock */
6238static int
6239bnx2_open(struct net_device *dev)
6240{
6241	struct bnx2 *bp = netdev_priv(dev);
6242	int rc;
6243
6244	netif_carrier_off(dev);
6245
6246	bnx2_set_power_state(bp, PCI_D0);
6247	bnx2_disable_int(bp);
6248
6249	bnx2_setup_int_mode(bp, disable_msi);
6250	bnx2_init_napi(bp);
6251	bnx2_napi_enable(bp);
6252	rc = bnx2_alloc_mem(bp);
6253	if (rc)
6254		goto open_err;
6255
6256	rc = bnx2_request_irq(bp);
6257	if (rc)
6258		goto open_err;
6259
6260	rc = bnx2_init_nic(bp, 1);
6261	if (rc)
6262		goto open_err;
6263
6264	mod_timer(&bp->timer, jiffies + bp->current_interval);
6265
6266	atomic_set(&bp->intr_sem, 0);
6267
6268	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6269
6270	bnx2_enable_int(bp);
6271
6272	if (bp->flags & BNX2_FLAG_USING_MSI) {
6273		/* Test MSI to make sure it is working
6274		 * If MSI test fails, go back to INTx mode
6275		 */
6276		if (bnx2_test_intr(bp) != 0) {
6277			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6278
6279			bnx2_disable_int(bp);
6280			bnx2_free_irq(bp);
6281
6282			bnx2_setup_int_mode(bp, 1);
6283
6284			rc = bnx2_init_nic(bp, 0);
6285
6286			if (!rc)
6287				rc = bnx2_request_irq(bp);
6288
6289			if (rc) {
6290				del_timer_sync(&bp->timer);
6291				goto open_err;
6292			}
6293			bnx2_enable_int(bp);
6294		}
6295	}
6296	if (bp->flags & BNX2_FLAG_USING_MSI)
6297		netdev_info(dev, "using MSI\n");
6298	else if (bp->flags & BNX2_FLAG_USING_MSIX)
6299		netdev_info(dev, "using MSIX\n");
6300
6301	netif_tx_start_all_queues(dev);
6302
6303	return 0;
6304
6305open_err:
6306	bnx2_napi_disable(bp);
6307	bnx2_free_skbs(bp);
6308	bnx2_free_irq(bp);
6309	bnx2_free_mem(bp);
6310	bnx2_del_napi(bp);
6311	return rc;
6312}
6313
6314static void
6315bnx2_reset_task(struct work_struct *work)
6316{
6317	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6318
6319	rtnl_lock();
6320	if (!netif_running(bp->dev)) {
6321		rtnl_unlock();
6322		return;
6323	}
6324
6325	bnx2_netif_stop(bp, true);
6326
6327	bnx2_init_nic(bp, 1);
6328
6329	atomic_set(&bp->intr_sem, 1);
6330	bnx2_netif_start(bp, true);
6331	rtnl_unlock();
6332}
6333
6334static void
6335bnx2_dump_state(struct bnx2 *bp)
6336{
6337	struct net_device *dev = bp->dev;
6338	u32 mcp_p0, mcp_p1, val1, val2;
6339
6340	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6341	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6342		   atomic_read(&bp->intr_sem), val1);
6343	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6344	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6345	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6346	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6347		   REG_RD(bp, BNX2_EMAC_TX_STATUS),
6348		   REG_RD(bp, BNX2_EMAC_RX_STATUS));
6349	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6350		   REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6351	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6352		mcp_p0 = BNX2_MCP_STATE_P0;
6353		mcp_p1 = BNX2_MCP_STATE_P1;
6354	} else {
6355		mcp_p0 = BNX2_MCP_STATE_P0_5708;
6356		mcp_p1 = BNX2_MCP_STATE_P1_5708;
6357	}
6358	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6359		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6360	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6361		   REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6362	if (bp->flags & BNX2_FLAG_USING_MSIX)
6363		netdev_err(dev, "DEBUG: PBA[%08x]\n",
6364			   REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6365}
6366
6367static void
6368bnx2_tx_timeout(struct net_device *dev)
6369{
6370	struct bnx2 *bp = netdev_priv(dev);
6371
6372	bnx2_dump_state(bp);
6373
6374	/* This allows the netif to be shutdown gracefully before resetting */
6375	schedule_work(&bp->reset_task);
6376}
6377
6378#ifdef BCM_VLAN
6379/* Called with rtnl_lock */
6380static void
6381bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6382{
6383	struct bnx2 *bp = netdev_priv(dev);
6384
6385	if (netif_running(dev))
6386		bnx2_netif_stop(bp, false);
6387
6388	bp->vlgrp = vlgrp;
6389
6390	if (!netif_running(dev))
6391		return;
6392
6393	bnx2_set_rx_mode(dev);
6394	if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6395		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6396
6397	bnx2_netif_start(bp, false);
6398}
6399#endif
6400
6401/* Called with netif_tx_lock.
6402 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6403 * netif_wake_queue().
6404 */
6405static netdev_tx_t
6406bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6407{
6408	struct bnx2 *bp = netdev_priv(dev);
6409	dma_addr_t mapping;
6410	struct tx_bd *txbd;
6411	struct sw_tx_bd *tx_buf;
6412	u32 len, vlan_tag_flags, last_frag, mss;
6413	u16 prod, ring_prod;
6414	int i;
6415	struct bnx2_napi *bnapi;
6416	struct bnx2_tx_ring_info *txr;
6417	struct netdev_queue *txq;
6418
6419	/*  Determine which tx ring we will be placed on */
6420	i = skb_get_queue_mapping(skb);
6421	bnapi = &bp->bnx2_napi[i];
6422	txr = &bnapi->tx_ring;
6423	txq = netdev_get_tx_queue(dev, i);
6424
6425	if (unlikely(bnx2_tx_avail(bp, txr) <
6426	    (skb_shinfo(skb)->nr_frags + 1))) {
6427		netif_tx_stop_queue(txq);
6428		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6429
6430		return NETDEV_TX_BUSY;
6431	}
6432	len = skb_headlen(skb);
6433	prod = txr->tx_prod;
6434	ring_prod = TX_RING_IDX(prod);
6435
6436	vlan_tag_flags = 0;
6437	if (skb->ip_summed == CHECKSUM_PARTIAL) {
6438		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6439	}
6440
6441#ifdef BCM_VLAN
6442	if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6443		vlan_tag_flags |=
6444			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6445	}
6446#endif
6447	if ((mss = skb_shinfo(skb)->gso_size)) {
6448		u32 tcp_opt_len;
6449		struct iphdr *iph;
6450
6451		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6452
6453		tcp_opt_len = tcp_optlen(skb);
6454
6455		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6456			u32 tcp_off = skb_transport_offset(skb) -
6457				      sizeof(struct ipv6hdr) - ETH_HLEN;
6458
6459			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6460					  TX_BD_FLAGS_SW_FLAGS;
6461			if (likely(tcp_off == 0))
6462				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6463			else {
6464				tcp_off >>= 3;
6465				vlan_tag_flags |= ((tcp_off & 0x3) <<
6466						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
6467						  ((tcp_off & 0x10) <<
6468						   TX_BD_FLAGS_TCP6_OFF4_SHL);
6469				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6470			}
6471		} else {
6472			iph = ip_hdr(skb);
6473			if (tcp_opt_len || (iph->ihl > 5)) {
6474				vlan_tag_flags |= ((iph->ihl - 5) +
6475						   (tcp_opt_len >> 2)) << 8;
6476			}
6477		}
6478	} else
6479		mss = 0;
6480
6481	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
6482	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6483		dev_kfree_skb(skb);
6484		return NETDEV_TX_OK;
6485	}
6486
6487	tx_buf = &txr->tx_buf_ring[ring_prod];
6488	tx_buf->skb = skb;
6489	dma_unmap_addr_set(tx_buf, mapping, mapping);
6490
6491	txbd = &txr->tx_desc_ring[ring_prod];
6492
6493	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6494	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6495	txbd->tx_bd_mss_nbytes = len | (mss << 16);
6496	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6497
6498	last_frag = skb_shinfo(skb)->nr_frags;
6499	tx_buf->nr_frags = last_frag;
6500	tx_buf->is_gso = skb_is_gso(skb);
6501
6502	for (i = 0; i < last_frag; i++) {
6503		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6504
6505		prod = NEXT_TX_BD(prod);
6506		ring_prod = TX_RING_IDX(prod);
6507		txbd = &txr->tx_desc_ring[ring_prod];
6508
6509		len = frag->size;
6510		mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
6511				       len, PCI_DMA_TODEVICE);
6512		if (dma_mapping_error(&bp->pdev->dev, mapping))
6513			goto dma_error;
6514		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6515				   mapping);
6516
6517		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6518		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6519		txbd->tx_bd_mss_nbytes = len | (mss << 16);
6520		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6521
6522	}
6523	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6524
6525	prod = NEXT_TX_BD(prod);
6526	txr->tx_prod_bseq += skb->len;
6527
6528	REG_WR16(bp, txr->tx_bidx_addr, prod);
6529	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6530
6531	mmiowb();
6532
6533	txr->tx_prod = prod;
6534
6535	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6536		netif_tx_stop_queue(txq);
6537
6538		/* netif_tx_stop_queue() must be done before checking
6539		 * tx index in bnx2_tx_avail() below, because in
6540		 * bnx2_tx_int(), we update tx index before checking for
6541		 * netif_tx_queue_stopped().
6542		 */
6543		smp_mb();
6544		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6545			netif_tx_wake_queue(txq);
6546	}
6547
6548	return NETDEV_TX_OK;
6549dma_error:
6550	/* save value of frag that failed */
6551	last_frag = i;
6552
6553	/* start back at beginning and unmap skb */
6554	prod = txr->tx_prod;
6555	ring_prod = TX_RING_IDX(prod);
6556	tx_buf = &txr->tx_buf_ring[ring_prod];
6557	tx_buf->skb = NULL;
6558	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6559			 skb_headlen(skb), PCI_DMA_TODEVICE);
6560
6561	/* unmap remaining mapped pages */
6562	for (i = 0; i < last_frag; i++) {
6563		prod = NEXT_TX_BD(prod);
6564		ring_prod = TX_RING_IDX(prod);
6565		tx_buf = &txr->tx_buf_ring[ring_prod];
6566		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6567			       skb_shinfo(skb)->frags[i].size,
6568			       PCI_DMA_TODEVICE);
6569	}
6570
6571	dev_kfree_skb(skb);
6572	return NETDEV_TX_OK;
6573}
6574
6575/* Called with rtnl_lock */
6576static int
6577bnx2_close(struct net_device *dev)
6578{
6579	struct bnx2 *bp = netdev_priv(dev);
6580
6581	cancel_work_sync(&bp->reset_task);
6582
6583	bnx2_disable_int_sync(bp);
6584	bnx2_napi_disable(bp);
6585	del_timer_sync(&bp->timer);
6586	bnx2_shutdown_chip(bp);
6587	bnx2_free_irq(bp);
6588	bnx2_free_skbs(bp);
6589	bnx2_free_mem(bp);
6590	bnx2_del_napi(bp);
6591	bp->link_up = 0;
6592	netif_carrier_off(bp->dev);
6593	bnx2_set_power_state(bp, PCI_D3hot);
6594	return 0;
6595}
6596
6597static void
6598bnx2_save_stats(struct bnx2 *bp)
6599{
6600	u32 *hw_stats = (u32 *) bp->stats_blk;
6601	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6602	int i;
6603
6604	/* The 1st 10 counters are 64-bit counters */
6605	for (i = 0; i < 20; i += 2) {
6606		u32 hi;
6607		u64 lo;
6608
6609		hi = temp_stats[i] + hw_stats[i];
6610		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6611		if (lo > 0xffffffff)
6612			hi++;
6613		temp_stats[i] = hi;
6614		temp_stats[i + 1] = lo & 0xffffffff;
6615	}
6616
6617	for ( ; i < sizeof(struct statistics_block) / 4; i++)
6618		temp_stats[i] += hw_stats[i];
6619}
6620
6621#define GET_64BIT_NET_STATS64(ctr)		\
6622	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
6623
6624#define GET_64BIT_NET_STATS(ctr)				\
6625	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
6626	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6627
6628#define GET_32BIT_NET_STATS(ctr)				\
6629	(unsigned long) (bp->stats_blk->ctr +			\
6630			 bp->temp_stats_blk->ctr)
6631
6632static struct rtnl_link_stats64 *
6633bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
6634{
6635	struct bnx2 *bp = netdev_priv(dev);
6636
6637	if (bp->stats_blk == NULL)
6638		return net_stats;
6639
6640	net_stats->rx_packets =
6641		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6642		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6643		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6644
6645	net_stats->tx_packets =
6646		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6647		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6648		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6649
6650	net_stats->rx_bytes =
6651		GET_64BIT_NET_STATS(stat_IfHCInOctets);
6652
6653	net_stats->tx_bytes =
6654		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6655
6656	net_stats->multicast =
6657		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
6658
6659	net_stats->collisions =
6660		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6661
6662	net_stats->rx_length_errors =
6663		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6664		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6665
6666	net_stats->rx_over_errors =
6667		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6668		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6669
6670	net_stats->rx_frame_errors =
6671		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6672
6673	net_stats->rx_crc_errors =
6674		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6675
6676	net_stats->rx_errors = net_stats->rx_length_errors +
6677		net_stats->rx_over_errors + net_stats->rx_frame_errors +
6678		net_stats->rx_crc_errors;
6679
6680	net_stats->tx_aborted_errors =
6681		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6682		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6683
6684	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6685	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
6686		net_stats->tx_carrier_errors = 0;
6687	else {
6688		net_stats->tx_carrier_errors =
6689			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6690	}
6691
6692	net_stats->tx_errors =
6693		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6694		net_stats->tx_aborted_errors +
6695		net_stats->tx_carrier_errors;
6696
6697	net_stats->rx_missed_errors =
6698		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6699		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6700		GET_32BIT_NET_STATS(stat_FwRxDrop);
6701
6702	return net_stats;
6703}
6704
6705/* All ethtool functions called with rtnl_lock */
6706
6707static int
6708bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6709{
6710	struct bnx2 *bp = netdev_priv(dev);
6711	int support_serdes = 0, support_copper = 0;
6712
6713	cmd->supported = SUPPORTED_Autoneg;
6714	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6715		support_serdes = 1;
6716		support_copper = 1;
6717	} else if (bp->phy_port == PORT_FIBRE)
6718		support_serdes = 1;
6719	else
6720		support_copper = 1;
6721
6722	if (support_serdes) {
6723		cmd->supported |= SUPPORTED_1000baseT_Full |
6724			SUPPORTED_FIBRE;
6725		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6726			cmd->supported |= SUPPORTED_2500baseX_Full;
6727
6728	}
6729	if (support_copper) {
6730		cmd->supported |= SUPPORTED_10baseT_Half |
6731			SUPPORTED_10baseT_Full |
6732			SUPPORTED_100baseT_Half |
6733			SUPPORTED_100baseT_Full |
6734			SUPPORTED_1000baseT_Full |
6735			SUPPORTED_TP;
6736
6737	}
6738
6739	spin_lock_bh(&bp->phy_lock);
6740	cmd->port = bp->phy_port;
6741	cmd->advertising = bp->advertising;
6742
6743	if (bp->autoneg & AUTONEG_SPEED) {
6744		cmd->autoneg = AUTONEG_ENABLE;
6745	}
6746	else {
6747		cmd->autoneg = AUTONEG_DISABLE;
6748	}
6749
6750	if (netif_carrier_ok(dev)) {
6751		cmd->speed = bp->line_speed;
6752		cmd->duplex = bp->duplex;
6753	}
6754	else {
6755		cmd->speed = -1;
6756		cmd->duplex = -1;
6757	}
6758	spin_unlock_bh(&bp->phy_lock);
6759
6760	cmd->transceiver = XCVR_INTERNAL;
6761	cmd->phy_address = bp->phy_addr;
6762
6763	return 0;
6764}
6765
6766static int
6767bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6768{
6769	struct bnx2 *bp = netdev_priv(dev);
6770	u8 autoneg = bp->autoneg;
6771	u8 req_duplex = bp->req_duplex;
6772	u16 req_line_speed = bp->req_line_speed;
6773	u32 advertising = bp->advertising;
6774	int err = -EINVAL;
6775
6776	spin_lock_bh(&bp->phy_lock);
6777
6778	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6779		goto err_out_unlock;
6780
6781	if (cmd->port != bp->phy_port &&
6782	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6783		goto err_out_unlock;
6784
6785	/* If device is down, we can store the settings only if the user
6786	 * is setting the currently active port.
6787	 */
6788	if (!netif_running(dev) && cmd->port != bp->phy_port)
6789		goto err_out_unlock;
6790
6791	if (cmd->autoneg == AUTONEG_ENABLE) {
6792		autoneg |= AUTONEG_SPEED;
6793
6794		advertising = cmd->advertising;
6795		if (cmd->port == PORT_TP) {
6796			advertising &= ETHTOOL_ALL_COPPER_SPEED;
6797			if (!advertising)
6798				advertising = ETHTOOL_ALL_COPPER_SPEED;
6799		} else {
6800			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6801			if (!advertising)
6802				advertising = ETHTOOL_ALL_FIBRE_SPEED;
6803		}
6804		advertising |= ADVERTISED_Autoneg;
6805	}
6806	else {
6807		if (cmd->port == PORT_FIBRE) {
6808			if ((cmd->speed != SPEED_1000 &&
6809			     cmd->speed != SPEED_2500) ||
6810			    (cmd->duplex != DUPLEX_FULL))
6811				goto err_out_unlock;
6812
6813			if (cmd->speed == SPEED_2500 &&
6814			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6815				goto err_out_unlock;
6816		}
6817		else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6818			goto err_out_unlock;
6819
6820		autoneg &= ~AUTONEG_SPEED;
6821		req_line_speed = cmd->speed;
6822		req_duplex = cmd->duplex;
6823		advertising = 0;
6824	}
6825
6826	bp->autoneg = autoneg;
6827	bp->advertising = advertising;
6828	bp->req_line_speed = req_line_speed;
6829	bp->req_duplex = req_duplex;
6830
6831	err = 0;
6832	/* If device is down, the new settings will be picked up when it is
6833	 * brought up.
6834	 */
6835	if (netif_running(dev))
6836		err = bnx2_setup_phy(bp, cmd->port);
6837
6838err_out_unlock:
6839	spin_unlock_bh(&bp->phy_lock);
6840
6841	return err;
6842}
6843
6844static void
6845bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6846{
6847	struct bnx2 *bp = netdev_priv(dev);
6848
6849	strcpy(info->driver, DRV_MODULE_NAME);
6850	strcpy(info->version, DRV_MODULE_VERSION);
6851	strcpy(info->bus_info, pci_name(bp->pdev));
6852	strcpy(info->fw_version, bp->fw_version);
6853}
6854
6855#define BNX2_REGDUMP_LEN		(32 * 1024)
6856
6857static int
6858bnx2_get_regs_len(struct net_device *dev)
6859{
6860	return BNX2_REGDUMP_LEN;
6861}
6862
6863static void
6864bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6865{
6866	u32 *p = _p, i, offset;
6867	u8 *orig_p = _p;
6868	struct bnx2 *bp = netdev_priv(dev);
6869	u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6870				 0x0800, 0x0880, 0x0c00, 0x0c10,
6871				 0x0c30, 0x0d08, 0x1000, 0x101c,
6872				 0x1040, 0x1048, 0x1080, 0x10a4,
6873				 0x1400, 0x1490, 0x1498, 0x14f0,
6874				 0x1500, 0x155c, 0x1580, 0x15dc,
6875				 0x1600, 0x1658, 0x1680, 0x16d8,
6876				 0x1800, 0x1820, 0x1840, 0x1854,
6877				 0x1880, 0x1894, 0x1900, 0x1984,
6878				 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6879				 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6880				 0x2000, 0x2030, 0x23c0, 0x2400,
6881				 0x2800, 0x2820, 0x2830, 0x2850,
6882				 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6883				 0x3c00, 0x3c94, 0x4000, 0x4010,
6884				 0x4080, 0x4090, 0x43c0, 0x4458,
6885				 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6886				 0x4fc0, 0x5010, 0x53c0, 0x5444,
6887				 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6888				 0x5fc0, 0x6000, 0x6400, 0x6428,
6889				 0x6800, 0x6848, 0x684c, 0x6860,
6890				 0x6888, 0x6910, 0x8000 };
6891
6892	regs->version = 0;
6893
6894	memset(p, 0, BNX2_REGDUMP_LEN);
6895
6896	if (!netif_running(bp->dev))
6897		return;
6898
6899	i = 0;
6900	offset = reg_boundaries[0];
6901	p += offset;
6902	while (offset < BNX2_REGDUMP_LEN) {
6903		*p++ = REG_RD(bp, offset);
6904		offset += 4;
6905		if (offset == reg_boundaries[i + 1]) {
6906			offset = reg_boundaries[i + 2];
6907			p = (u32 *) (orig_p + offset);
6908			i += 2;
6909		}
6910	}
6911}
6912
6913static void
6914bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6915{
6916	struct bnx2 *bp = netdev_priv(dev);
6917
6918	if (bp->flags & BNX2_FLAG_NO_WOL) {
6919		wol->supported = 0;
6920		wol->wolopts = 0;
6921	}
6922	else {
6923		wol->supported = WAKE_MAGIC;
6924		if (bp->wol)
6925			wol->wolopts = WAKE_MAGIC;
6926		else
6927			wol->wolopts = 0;
6928	}
6929	memset(&wol->sopass, 0, sizeof(wol->sopass));
6930}
6931
6932static int
6933bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6934{
6935	struct bnx2 *bp = netdev_priv(dev);
6936
6937	if (wol->wolopts & ~WAKE_MAGIC)
6938		return -EINVAL;
6939
6940	if (wol->wolopts & WAKE_MAGIC) {
6941		if (bp->flags & BNX2_FLAG_NO_WOL)
6942			return -EINVAL;
6943
6944		bp->wol = 1;
6945	}
6946	else {
6947		bp->wol = 0;
6948	}
6949	return 0;
6950}
6951
6952static int
6953bnx2_nway_reset(struct net_device *dev)
6954{
6955	struct bnx2 *bp = netdev_priv(dev);
6956	u32 bmcr;
6957
6958	if (!netif_running(dev))
6959		return -EAGAIN;
6960
6961	if (!(bp->autoneg & AUTONEG_SPEED)) {
6962		return -EINVAL;
6963	}
6964
6965	spin_lock_bh(&bp->phy_lock);
6966
6967	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6968		int rc;
6969
6970		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6971		spin_unlock_bh(&bp->phy_lock);
6972		return rc;
6973	}
6974
6975	/* Force a link down visible on the other side */
6976	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6977		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6978		spin_unlock_bh(&bp->phy_lock);
6979
6980		msleep(20);
6981
6982		spin_lock_bh(&bp->phy_lock);
6983
6984		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6985		bp->serdes_an_pending = 1;
6986		mod_timer(&bp->timer, jiffies + bp->current_interval);
6987	}
6988
6989	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6990	bmcr &= ~BMCR_LOOPBACK;
6991	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6992
6993	spin_unlock_bh(&bp->phy_lock);
6994
6995	return 0;
6996}
6997
6998static u32
6999bnx2_get_link(struct net_device *dev)
7000{
7001	struct bnx2 *bp = netdev_priv(dev);
7002
7003	return bp->link_up;
7004}
7005
7006static int
7007bnx2_get_eeprom_len(struct net_device *dev)
7008{
7009	struct bnx2 *bp = netdev_priv(dev);
7010
7011	if (bp->flash_info == NULL)
7012		return 0;
7013
7014	return (int) bp->flash_size;
7015}
7016
7017static int
7018bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7019		u8 *eebuf)
7020{
7021	struct bnx2 *bp = netdev_priv(dev);
7022	int rc;
7023
7024	if (!netif_running(dev))
7025		return -EAGAIN;
7026
7027	/* parameters already validated in ethtool_get_eeprom */
7028
7029	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7030
7031	return rc;
7032}
7033
7034static int
7035bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7036		u8 *eebuf)
7037{
7038	struct bnx2 *bp = netdev_priv(dev);
7039	int rc;
7040
7041	if (!netif_running(dev))
7042		return -EAGAIN;
7043
7044	/* parameters already validated in ethtool_set_eeprom */
7045
7046	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7047
7048	return rc;
7049}
7050
7051static int
7052bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7053{
7054	struct bnx2 *bp = netdev_priv(dev);
7055
7056	memset(coal, 0, sizeof(struct ethtool_coalesce));
7057
7058	coal->rx_coalesce_usecs = bp->rx_ticks;
7059	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7060	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7061	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7062
7063	coal->tx_coalesce_usecs = bp->tx_ticks;
7064	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7065	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7066	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7067
7068	coal->stats_block_coalesce_usecs = bp->stats_ticks;
7069
7070	return 0;
7071}
7072
7073static int
7074bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7075{
7076	struct bnx2 *bp = netdev_priv(dev);
7077
7078	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7079	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7080
7081	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7082	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7083
7084	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7085	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7086
7087	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7088	if (bp->rx_quick_cons_trip_int > 0xff)
7089		bp->rx_quick_cons_trip_int = 0xff;
7090
7091	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7092	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7093
7094	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7095	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7096
7097	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7098	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7099
7100	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7101	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7102		0xff;
7103
7104	bp->stats_ticks = coal->stats_block_coalesce_usecs;
7105	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7106		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7107			bp->stats_ticks = USEC_PER_SEC;
7108	}
7109	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7110		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7111	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7112
7113	if (netif_running(bp->dev)) {
7114		bnx2_netif_stop(bp, true);
7115		bnx2_init_nic(bp, 0);
7116		bnx2_netif_start(bp, true);
7117	}
7118
7119	return 0;
7120}
7121
7122static void
7123bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7124{
7125	struct bnx2 *bp = netdev_priv(dev);
7126
7127	ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7128	ering->rx_mini_max_pending = 0;
7129	ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7130
7131	ering->rx_pending = bp->rx_ring_size;
7132	ering->rx_mini_pending = 0;
7133	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7134
7135	ering->tx_max_pending = MAX_TX_DESC_CNT;
7136	ering->tx_pending = bp->tx_ring_size;
7137}
7138
7139static int
7140bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7141{
7142	if (netif_running(bp->dev)) {
7143		/* Reset will erase chipset stats; save them */
7144		bnx2_save_stats(bp);
7145
7146		bnx2_netif_stop(bp, true);
7147		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7148		bnx2_free_skbs(bp);
7149		bnx2_free_mem(bp);
7150	}
7151
7152	bnx2_set_rx_ring_size(bp, rx);
7153	bp->tx_ring_size = tx;
7154
7155	if (netif_running(bp->dev)) {
7156		int rc;
7157
7158		rc = bnx2_alloc_mem(bp);
7159		if (!rc)
7160			rc = bnx2_init_nic(bp, 0);
7161
7162		if (rc) {
7163			bnx2_napi_enable(bp);
7164			dev_close(bp->dev);
7165			return rc;
7166		}
7167#ifdef BCM_CNIC
7168		mutex_lock(&bp->cnic_lock);
7169		/* Let cnic know about the new status block. */
7170		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7171			bnx2_setup_cnic_irq_info(bp);
7172		mutex_unlock(&bp->cnic_lock);
7173#endif
7174		bnx2_netif_start(bp, true);
7175	}
7176	return 0;
7177}
7178
7179static int
7180bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7181{
7182	struct bnx2 *bp = netdev_priv(dev);
7183	int rc;
7184
7185	if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7186		(ering->tx_pending > MAX_TX_DESC_CNT) ||
7187		(ering->tx_pending <= MAX_SKB_FRAGS)) {
7188
7189		return -EINVAL;
7190	}
7191	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7192	return rc;
7193}
7194
7195static void
7196bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7197{
7198	struct bnx2 *bp = netdev_priv(dev);
7199
7200	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7201	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7202	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7203}
7204
7205static int
7206bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7207{
7208	struct bnx2 *bp = netdev_priv(dev);
7209
7210	bp->req_flow_ctrl = 0;
7211	if (epause->rx_pause)
7212		bp->req_flow_ctrl |= FLOW_CTRL_RX;
7213	if (epause->tx_pause)
7214		bp->req_flow_ctrl |= FLOW_CTRL_TX;
7215
7216	if (epause->autoneg) {
7217		bp->autoneg |= AUTONEG_FLOW_CTRL;
7218	}
7219	else {
7220		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7221	}
7222
7223	if (netif_running(dev)) {
7224		spin_lock_bh(&bp->phy_lock);
7225		bnx2_setup_phy(bp, bp->phy_port);
7226		spin_unlock_bh(&bp->phy_lock);
7227	}
7228
7229	return 0;
7230}
7231
7232static u32
7233bnx2_get_rx_csum(struct net_device *dev)
7234{
7235	struct bnx2 *bp = netdev_priv(dev);
7236
7237	return bp->rx_csum;
7238}
7239
7240static int
7241bnx2_set_rx_csum(struct net_device *dev, u32 data)
7242{
7243	struct bnx2 *bp = netdev_priv(dev);
7244
7245	bp->rx_csum = data;
7246	return 0;
7247}
7248
7249static int
7250bnx2_set_tso(struct net_device *dev, u32 data)
7251{
7252	struct bnx2 *bp = netdev_priv(dev);
7253
7254	if (data) {
7255		dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7256		if (CHIP_NUM(bp) == CHIP_NUM_5709)
7257			dev->features |= NETIF_F_TSO6;
7258	} else
7259		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7260				   NETIF_F_TSO_ECN);
7261	return 0;
7262}
7263
7264static struct {
7265	char string[ETH_GSTRING_LEN];
7266} bnx2_stats_str_arr[] = {
7267	{ "rx_bytes" },
7268	{ "rx_error_bytes" },
7269	{ "tx_bytes" },
7270	{ "tx_error_bytes" },
7271	{ "rx_ucast_packets" },
7272	{ "rx_mcast_packets" },
7273	{ "rx_bcast_packets" },
7274	{ "tx_ucast_packets" },
7275	{ "tx_mcast_packets" },
7276	{ "tx_bcast_packets" },
7277	{ "tx_mac_errors" },
7278	{ "tx_carrier_errors" },
7279	{ "rx_crc_errors" },
7280	{ "rx_align_errors" },
7281	{ "tx_single_collisions" },
7282	{ "tx_multi_collisions" },
7283	{ "tx_deferred" },
7284	{ "tx_excess_collisions" },
7285	{ "tx_late_collisions" },
7286	{ "tx_total_collisions" },
7287	{ "rx_fragments" },
7288	{ "rx_jabbers" },
7289	{ "rx_undersize_packets" },
7290	{ "rx_oversize_packets" },
7291	{ "rx_64_byte_packets" },
7292	{ "rx_65_to_127_byte_packets" },
7293	{ "rx_128_to_255_byte_packets" },
7294	{ "rx_256_to_511_byte_packets" },
7295	{ "rx_512_to_1023_byte_packets" },
7296	{ "rx_1024_to_1522_byte_packets" },
7297	{ "rx_1523_to_9022_byte_packets" },
7298	{ "tx_64_byte_packets" },
7299	{ "tx_65_to_127_byte_packets" },
7300	{ "tx_128_to_255_byte_packets" },
7301	{ "tx_256_to_511_byte_packets" },
7302	{ "tx_512_to_1023_byte_packets" },
7303	{ "tx_1024_to_1522_byte_packets" },
7304	{ "tx_1523_to_9022_byte_packets" },
7305	{ "rx_xon_frames" },
7306	{ "rx_xoff_frames" },
7307	{ "tx_xon_frames" },
7308	{ "tx_xoff_frames" },
7309	{ "rx_mac_ctrl_frames" },
7310	{ "rx_filtered_packets" },
7311	{ "rx_ftq_discards" },
7312	{ "rx_discards" },
7313	{ "rx_fw_discards" },
7314};
7315
7316#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7317			sizeof(bnx2_stats_str_arr[0]))
7318
7319#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7320
7321static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7322    STATS_OFFSET32(stat_IfHCInOctets_hi),
7323    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7324    STATS_OFFSET32(stat_IfHCOutOctets_hi),
7325    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7326    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7327    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7328    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7329    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7330    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7331    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7332    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7333    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7334    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7335    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7336    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7337    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7338    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7339    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7340    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7341    STATS_OFFSET32(stat_EtherStatsCollisions),
7342    STATS_OFFSET32(stat_EtherStatsFragments),
7343    STATS_OFFSET32(stat_EtherStatsJabbers),
7344    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7345    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7346    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7347    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7348    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7349    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7350    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7351    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7352    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7353    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7354    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7355    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7356    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7357    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7358    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7359    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7360    STATS_OFFSET32(stat_XonPauseFramesReceived),
7361    STATS_OFFSET32(stat_XoffPauseFramesReceived),
7362    STATS_OFFSET32(stat_OutXonSent),
7363    STATS_OFFSET32(stat_OutXoffSent),
7364    STATS_OFFSET32(stat_MacControlFramesReceived),
7365    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7366    STATS_OFFSET32(stat_IfInFTQDiscards),
7367    STATS_OFFSET32(stat_IfInMBUFDiscards),
7368    STATS_OFFSET32(stat_FwRxDrop),
7369};
7370
7371/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7372 * skipped because of errata.
7373 */
7374static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7375	8,0,8,8,8,8,8,8,8,8,
7376	4,0,4,4,4,4,4,4,4,4,
7377	4,4,4,4,4,4,4,4,4,4,
7378	4,4,4,4,4,4,4,4,4,4,
7379	4,4,4,4,4,4,4,
7380};
7381
7382static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7383	8,0,8,8,8,8,8,8,8,8,
7384	4,4,4,4,4,4,4,4,4,4,
7385	4,4,4,4,4,4,4,4,4,4,
7386	4,4,4,4,4,4,4,4,4,4,
7387	4,4,4,4,4,4,4,
7388};
7389
7390#define BNX2_NUM_TESTS 6
7391
7392static struct {
7393	char string[ETH_GSTRING_LEN];
7394} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7395	{ "register_test (offline)" },
7396	{ "memory_test (offline)" },
7397	{ "loopback_test (offline)" },
7398	{ "nvram_test (online)" },
7399	{ "interrupt_test (online)" },
7400	{ "link_test (online)" },
7401};
7402
7403static int
7404bnx2_get_sset_count(struct net_device *dev, int sset)
7405{
7406	switch (sset) {
7407	case ETH_SS_TEST:
7408		return BNX2_NUM_TESTS;
7409	case ETH_SS_STATS:
7410		return BNX2_NUM_STATS;
7411	default:
7412		return -EOPNOTSUPP;
7413	}
7414}
7415
7416static void
7417bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7418{
7419	struct bnx2 *bp = netdev_priv(dev);
7420
7421	bnx2_set_power_state(bp, PCI_D0);
7422
7423	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7424	if (etest->flags & ETH_TEST_FL_OFFLINE) {
7425		int i;
7426
7427		bnx2_netif_stop(bp, true);
7428		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7429		bnx2_free_skbs(bp);
7430
7431		if (bnx2_test_registers(bp) != 0) {
7432			buf[0] = 1;
7433			etest->flags |= ETH_TEST_FL_FAILED;
7434		}
7435		if (bnx2_test_memory(bp) != 0) {
7436			buf[1] = 1;
7437			etest->flags |= ETH_TEST_FL_FAILED;
7438		}
7439		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7440			etest->flags |= ETH_TEST_FL_FAILED;
7441
7442		if (!netif_running(bp->dev))
7443			bnx2_shutdown_chip(bp);
7444		else {
7445			bnx2_init_nic(bp, 1);
7446			bnx2_netif_start(bp, true);
7447		}
7448
7449		/* wait for link up */
7450		for (i = 0; i < 7; i++) {
7451			if (bp->link_up)
7452				break;
7453			msleep_interruptible(1000);
7454		}
7455	}
7456
7457	if (bnx2_test_nvram(bp) != 0) {
7458		buf[3] = 1;
7459		etest->flags |= ETH_TEST_FL_FAILED;
7460	}
7461	if (bnx2_test_intr(bp) != 0) {
7462		buf[4] = 1;
7463		etest->flags |= ETH_TEST_FL_FAILED;
7464	}
7465
7466	if (bnx2_test_link(bp) != 0) {
7467		buf[5] = 1;
7468		etest->flags |= ETH_TEST_FL_FAILED;
7469
7470	}
7471	if (!netif_running(bp->dev))
7472		bnx2_set_power_state(bp, PCI_D3hot);
7473}
7474
7475static void
7476bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7477{
7478	switch (stringset) {
7479	case ETH_SS_STATS:
7480		memcpy(buf, bnx2_stats_str_arr,
7481			sizeof(bnx2_stats_str_arr));
7482		break;
7483	case ETH_SS_TEST:
7484		memcpy(buf, bnx2_tests_str_arr,
7485			sizeof(bnx2_tests_str_arr));
7486		break;
7487	}
7488}
7489
7490static void
7491bnx2_get_ethtool_stats(struct net_device *dev,
7492		struct ethtool_stats *stats, u64 *buf)
7493{
7494	struct bnx2 *bp = netdev_priv(dev);
7495	int i;
7496	u32 *hw_stats = (u32 *) bp->stats_blk;
7497	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7498	u8 *stats_len_arr = NULL;
7499
7500	if (hw_stats == NULL) {
7501		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7502		return;
7503	}
7504
7505	if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7506	    (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7507	    (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7508	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
7509		stats_len_arr = bnx2_5706_stats_len_arr;
7510	else
7511		stats_len_arr = bnx2_5708_stats_len_arr;
7512
7513	for (i = 0; i < BNX2_NUM_STATS; i++) {
7514		unsigned long offset;
7515
7516		if (stats_len_arr[i] == 0) {
7517			/* skip this counter */
7518			buf[i] = 0;
7519			continue;
7520		}
7521
7522		offset = bnx2_stats_offset_arr[i];
7523		if (stats_len_arr[i] == 4) {
7524			/* 4-byte counter */
7525			buf[i] = (u64) *(hw_stats + offset) +
7526				 *(temp_stats + offset);
7527			continue;
7528		}
7529		/* 8-byte counter */
7530		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7531			 *(hw_stats + offset + 1) +
7532			 (((u64) *(temp_stats + offset)) << 32) +
7533			 *(temp_stats + offset + 1);
7534	}
7535}
7536
7537static int
7538bnx2_phys_id(struct net_device *dev, u32 data)
7539{
7540	struct bnx2 *bp = netdev_priv(dev);
7541	int i;
7542	u32 save;
7543
7544	bnx2_set_power_state(bp, PCI_D0);
7545
7546	if (data == 0)
7547		data = 2;
7548
7549	save = REG_RD(bp, BNX2_MISC_CFG);
7550	REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7551
7552	for (i = 0; i < (data * 2); i++) {
7553		if ((i % 2) == 0) {
7554			REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7555		}
7556		else {
7557			REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7558				BNX2_EMAC_LED_1000MB_OVERRIDE |
7559				BNX2_EMAC_LED_100MB_OVERRIDE |
7560				BNX2_EMAC_LED_10MB_OVERRIDE |
7561				BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7562				BNX2_EMAC_LED_TRAFFIC);
7563		}
7564		msleep_interruptible(500);
7565		if (signal_pending(current))
7566			break;
7567	}
7568	REG_WR(bp, BNX2_EMAC_LED, 0);
7569	REG_WR(bp, BNX2_MISC_CFG, save);
7570
7571	if (!netif_running(dev))
7572		bnx2_set_power_state(bp, PCI_D3hot);
7573
7574	return 0;
7575}
7576
7577static int
7578bnx2_set_tx_csum(struct net_device *dev, u32 data)
7579{
7580	struct bnx2 *bp = netdev_priv(dev);
7581
7582	if (CHIP_NUM(bp) == CHIP_NUM_5709)
7583		return (ethtool_op_set_tx_ipv6_csum(dev, data));
7584	else
7585		return (ethtool_op_set_tx_csum(dev, data));
7586}
7587
7588static int
7589bnx2_set_flags(struct net_device *dev, u32 data)
7590{
7591	return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
7592}
7593
7594static const struct ethtool_ops bnx2_ethtool_ops = {
7595	.get_settings		= bnx2_get_settings,
7596	.set_settings		= bnx2_set_settings,
7597	.get_drvinfo		= bnx2_get_drvinfo,
7598	.get_regs_len		= bnx2_get_regs_len,
7599	.get_regs		= bnx2_get_regs,
7600	.get_wol		= bnx2_get_wol,
7601	.set_wol		= bnx2_set_wol,
7602	.nway_reset		= bnx2_nway_reset,
7603	.get_link		= bnx2_get_link,
7604	.get_eeprom_len		= bnx2_get_eeprom_len,
7605	.get_eeprom		= bnx2_get_eeprom,
7606	.set_eeprom		= bnx2_set_eeprom,
7607	.get_coalesce		= bnx2_get_coalesce,
7608	.set_coalesce		= bnx2_set_coalesce,
7609	.get_ringparam		= bnx2_get_ringparam,
7610	.set_ringparam		= bnx2_set_ringparam,
7611	.get_pauseparam		= bnx2_get_pauseparam,
7612	.set_pauseparam		= bnx2_set_pauseparam,
7613	.get_rx_csum		= bnx2_get_rx_csum,
7614	.set_rx_csum		= bnx2_set_rx_csum,
7615	.set_tx_csum		= bnx2_set_tx_csum,
7616	.set_sg			= ethtool_op_set_sg,
7617	.set_tso		= bnx2_set_tso,
7618	.self_test		= bnx2_self_test,
7619	.get_strings		= bnx2_get_strings,
7620	.phys_id		= bnx2_phys_id,
7621	.get_ethtool_stats	= bnx2_get_ethtool_stats,
7622	.get_sset_count		= bnx2_get_sset_count,
7623	.set_flags		= bnx2_set_flags,
7624	.get_flags		= ethtool_op_get_flags,
7625};
7626
7627/* Called with rtnl_lock */
7628static int
7629bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7630{
7631	struct mii_ioctl_data *data = if_mii(ifr);
7632	struct bnx2 *bp = netdev_priv(dev);
7633	int err;
7634
7635	switch(cmd) {
7636	case SIOCGMIIPHY:
7637		data->phy_id = bp->phy_addr;
7638
7639		/* fallthru */
7640	case SIOCGMIIREG: {
7641		u32 mii_regval;
7642
7643		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7644			return -EOPNOTSUPP;
7645
7646		if (!netif_running(dev))
7647			return -EAGAIN;
7648
7649		spin_lock_bh(&bp->phy_lock);
7650		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7651		spin_unlock_bh(&bp->phy_lock);
7652
7653		data->val_out = mii_regval;
7654
7655		return err;
7656	}
7657
7658	case SIOCSMIIREG:
7659		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7660			return -EOPNOTSUPP;
7661
7662		if (!netif_running(dev))
7663			return -EAGAIN;
7664
7665		spin_lock_bh(&bp->phy_lock);
7666		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7667		spin_unlock_bh(&bp->phy_lock);
7668
7669		return err;
7670
7671	default:
7672		/* do nothing */
7673		break;
7674	}
7675	return -EOPNOTSUPP;
7676}
7677
7678/* Called with rtnl_lock */
7679static int
7680bnx2_change_mac_addr(struct net_device *dev, void *p)
7681{
7682	struct sockaddr *addr = p;
7683	struct bnx2 *bp = netdev_priv(dev);
7684
7685	if (!is_valid_ether_addr(addr->sa_data))
7686		return -EINVAL;
7687
7688	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7689	if (netif_running(dev))
7690		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7691
7692	return 0;
7693}
7694
7695/* Called with rtnl_lock */
7696static int
7697bnx2_change_mtu(struct net_device *dev, int new_mtu)
7698{
7699	struct bnx2 *bp = netdev_priv(dev);
7700
7701	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7702		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7703		return -EINVAL;
7704
7705	dev->mtu = new_mtu;
7706	return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7707}
7708
7709#ifdef CONFIG_NET_POLL_CONTROLLER
7710static void
7711poll_bnx2(struct net_device *dev)
7712{
7713	struct bnx2 *bp = netdev_priv(dev);
7714	int i;
7715
7716	for (i = 0; i < bp->irq_nvecs; i++) {
7717		struct bnx2_irq *irq = &bp->irq_tbl[i];
7718
7719		disable_irq(irq->vector);
7720		irq->handler(irq->vector, &bp->bnx2_napi[i]);
7721		enable_irq(irq->vector);
7722	}
7723}
7724#endif
7725
7726static void __devinit
7727bnx2_get_5709_media(struct bnx2 *bp)
7728{
7729	u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7730	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7731	u32 strap;
7732
7733	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7734		return;
7735	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7736		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7737		return;
7738	}
7739
7740	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7741		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7742	else
7743		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7744
7745	if (PCI_FUNC(bp->pdev->devfn) == 0) {
7746		switch (strap) {
7747		case 0x4:
7748		case 0x5:
7749		case 0x6:
7750			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7751			return;
7752		}
7753	} else {
7754		switch (strap) {
7755		case 0x1:
7756		case 0x2:
7757		case 0x4:
7758			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7759			return;
7760		}
7761	}
7762}
7763
7764static void __devinit
7765bnx2_get_pci_speed(struct bnx2 *bp)
7766{
7767	u32 reg;
7768
7769	reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7770	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7771		u32 clkreg;
7772
7773		bp->flags |= BNX2_FLAG_PCIX;
7774
7775		clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7776
7777		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7778		switch (clkreg) {
7779		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7780			bp->bus_speed_mhz = 133;
7781			break;
7782
7783		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7784			bp->bus_speed_mhz = 100;
7785			break;
7786
7787		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7788		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7789			bp->bus_speed_mhz = 66;
7790			break;
7791
7792		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7793		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7794			bp->bus_speed_mhz = 50;
7795			break;
7796
7797		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7798		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7799		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7800			bp->bus_speed_mhz = 33;
7801			break;
7802		}
7803	}
7804	else {
7805		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7806			bp->bus_speed_mhz = 66;
7807		else
7808			bp->bus_speed_mhz = 33;
7809	}
7810
7811	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7812		bp->flags |= BNX2_FLAG_PCI_32BIT;
7813
7814}
7815
7816static void __devinit
7817bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7818{
7819	int rc, i, j;
7820	u8 *data;
7821	unsigned int block_end, rosize, len;
7822
7823#define BNX2_VPD_NVRAM_OFFSET	0x300
7824#define BNX2_VPD_LEN		128
7825#define BNX2_MAX_VER_SLEN	30
7826
7827	data = kmalloc(256, GFP_KERNEL);
7828	if (!data)
7829		return;
7830
7831	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7832			     BNX2_VPD_LEN);
7833	if (rc)
7834		goto vpd_done;
7835
7836	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7837		data[i] = data[i + BNX2_VPD_LEN + 3];
7838		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7839		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7840		data[i + 3] = data[i + BNX2_VPD_LEN];
7841	}
7842
7843	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7844	if (i < 0)
7845		goto vpd_done;
7846
7847	rosize = pci_vpd_lrdt_size(&data[i]);
7848	i += PCI_VPD_LRDT_TAG_SIZE;
7849	block_end = i + rosize;
7850
7851	if (block_end > BNX2_VPD_LEN)
7852		goto vpd_done;
7853
7854	j = pci_vpd_find_info_keyword(data, i, rosize,
7855				      PCI_VPD_RO_KEYWORD_MFR_ID);
7856	if (j < 0)
7857		goto vpd_done;
7858
7859	len = pci_vpd_info_field_size(&data[j]);
7860
7861	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7862	if (j + len > block_end || len != 4 ||
7863	    memcmp(&data[j], "1028", 4))
7864		goto vpd_done;
7865
7866	j = pci_vpd_find_info_keyword(data, i, rosize,
7867				      PCI_VPD_RO_KEYWORD_VENDOR0);
7868	if (j < 0)
7869		goto vpd_done;
7870
7871	len = pci_vpd_info_field_size(&data[j]);
7872
7873	j += PCI_VPD_INFO_FLD_HDR_SIZE;
7874	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7875		goto vpd_done;
7876
7877	memcpy(bp->fw_version, &data[j], len);
7878	bp->fw_version[len] = ' ';
7879
7880vpd_done:
7881	kfree(data);
7882}
7883
7884static int __devinit
7885bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7886{
7887	struct bnx2 *bp;
7888	unsigned long mem_len;
7889	int rc, i, j;
7890	u32 reg;
7891	u64 dma_mask, persist_dma_mask;
7892
7893	SET_NETDEV_DEV(dev, &pdev->dev);
7894	bp = netdev_priv(dev);
7895
7896	bp->flags = 0;
7897	bp->phy_flags = 0;
7898
7899	bp->temp_stats_blk =
7900		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7901
7902	if (bp->temp_stats_blk == NULL) {
7903		rc = -ENOMEM;
7904		goto err_out;
7905	}
7906
7907	/* enable device (incl. PCI PM wakeup), and bus-mastering */
7908	rc = pci_enable_device(pdev);
7909	if (rc) {
7910		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7911		goto err_out;
7912	}
7913
7914	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7915		dev_err(&pdev->dev,
7916			"Cannot find PCI device base address, aborting\n");
7917		rc = -ENODEV;
7918		goto err_out_disable;
7919	}
7920
7921	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7922	if (rc) {
7923		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7924		goto err_out_disable;
7925	}
7926
7927	pci_set_master(pdev);
7928	pci_save_state(pdev);
7929
7930	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7931	if (bp->pm_cap == 0) {
7932		dev_err(&pdev->dev,
7933			"Cannot find power management capability, aborting\n");
7934		rc = -EIO;
7935		goto err_out_release;
7936	}
7937
7938	bp->dev = dev;
7939	bp->pdev = pdev;
7940
7941	spin_lock_init(&bp->phy_lock);
7942	spin_lock_init(&bp->indirect_lock);
7943#ifdef BCM_CNIC
7944	mutex_init(&bp->cnic_lock);
7945#endif
7946	INIT_WORK(&bp->reset_task, bnx2_reset_task);
7947
7948	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7949	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7950	dev->mem_end = dev->mem_start + mem_len;
7951	dev->irq = pdev->irq;
7952
7953	bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7954
7955	if (!bp->regview) {
7956		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7957		rc = -ENOMEM;
7958		goto err_out_release;
7959	}
7960
7961	/* Configure byte swap and enable write to the reg_window registers.
7962	 * Rely on CPU to do target byte swapping on big endian systems
7963	 * The chip's target access swapping will not swap all accesses
7964	 */
7965	pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7966			       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7967			       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7968
7969	bnx2_set_power_state(bp, PCI_D0);
7970
7971	bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7972
7973	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7974		if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7975			dev_err(&pdev->dev,
7976				"Cannot find PCIE capability, aborting\n");
7977			rc = -EIO;
7978			goto err_out_unmap;
7979		}
7980		bp->flags |= BNX2_FLAG_PCIE;
7981		if (CHIP_REV(bp) == CHIP_REV_Ax)
7982			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7983	} else {
7984		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7985		if (bp->pcix_cap == 0) {
7986			dev_err(&pdev->dev,
7987				"Cannot find PCIX capability, aborting\n");
7988			rc = -EIO;
7989			goto err_out_unmap;
7990		}
7991		bp->flags |= BNX2_FLAG_BROKEN_STATS;
7992	}
7993
7994	if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7995		if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7996			bp->flags |= BNX2_FLAG_MSIX_CAP;
7997	}
7998
7999	if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
8000		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
8001			bp->flags |= BNX2_FLAG_MSI_CAP;
8002	}
8003
8004	/* 5708 cannot support DMA addresses > 40-bit.  */
8005	if (CHIP_NUM(bp) == CHIP_NUM_5708)
8006		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
8007	else
8008		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
8009
8010	/* Configure DMA attributes. */
8011	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
8012		dev->features |= NETIF_F_HIGHDMA;
8013		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
8014		if (rc) {
8015			dev_err(&pdev->dev,
8016				"pci_set_consistent_dma_mask failed, aborting\n");
8017			goto err_out_unmap;
8018		}
8019	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8020		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8021		goto err_out_unmap;
8022	}
8023
8024	if (!(bp->flags & BNX2_FLAG_PCIE))
8025		bnx2_get_pci_speed(bp);
8026
8027	/* 5706A0 may falsely detect SERR and PERR. */
8028	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8029		reg = REG_RD(bp, PCI_COMMAND);
8030		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8031		REG_WR(bp, PCI_COMMAND, reg);
8032	}
8033	else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8034		!(bp->flags & BNX2_FLAG_PCIX)) {
8035
8036		dev_err(&pdev->dev,
8037			"5706 A1 can only be used in a PCIX bus, aborting\n");
8038		goto err_out_unmap;
8039	}
8040
8041	bnx2_init_nvram(bp);
8042
8043	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8044
8045	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8046	    BNX2_SHM_HDR_SIGNATURE_SIG) {
8047		u32 off = PCI_FUNC(pdev->devfn) << 2;
8048
8049		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8050	} else
8051		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8052
8053	/* Get the permanent MAC address.  First we need to make sure the
8054	 * firmware is actually running.
8055	 */
8056	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8057
8058	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8059	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8060		dev_err(&pdev->dev, "Firmware not running, aborting\n");
8061		rc = -ENODEV;
8062		goto err_out_unmap;
8063	}
8064
8065	bnx2_read_vpd_fw_ver(bp);
8066
8067	j = strlen(bp->fw_version);
8068	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8069	for (i = 0; i < 3 && j < 24; i++) {
8070		u8 num, k, skip0;
8071
8072		if (i == 0) {
8073			bp->fw_version[j++] = 'b';
8074			bp->fw_version[j++] = 'c';
8075			bp->fw_version[j++] = ' ';
8076		}
8077		num = (u8) (reg >> (24 - (i * 8)));
8078		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8079			if (num >= k || !skip0 || k == 1) {
8080				bp->fw_version[j++] = (num / k) + '0';
8081				skip0 = 0;
8082			}
8083		}
8084		if (i != 2)
8085			bp->fw_version[j++] = '.';
8086	}
8087	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8088	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8089		bp->wol = 1;
8090
8091	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8092		bp->flags |= BNX2_FLAG_ASF_ENABLE;
8093
8094		for (i = 0; i < 30; i++) {
8095			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8096			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8097				break;
8098			msleep(10);
8099		}
8100	}
8101	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8102	reg &= BNX2_CONDITION_MFW_RUN_MASK;
8103	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8104	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
8105		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8106
8107		if (j < 32)
8108			bp->fw_version[j++] = ' ';
8109		for (i = 0; i < 3 && j < 28; i++) {
8110			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8111			reg = swab32(reg);
8112			memcpy(&bp->fw_version[j], &reg, 4);
8113			j += 4;
8114		}
8115	}
8116
8117	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8118	bp->mac_addr[0] = (u8) (reg >> 8);
8119	bp->mac_addr[1] = (u8) reg;
8120
8121	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8122	bp->mac_addr[2] = (u8) (reg >> 24);
8123	bp->mac_addr[3] = (u8) (reg >> 16);
8124	bp->mac_addr[4] = (u8) (reg >> 8);
8125	bp->mac_addr[5] = (u8) reg;
8126
8127	bp->tx_ring_size = MAX_TX_DESC_CNT;
8128	bnx2_set_rx_ring_size(bp, 255);
8129
8130	bp->rx_csum = 1;
8131
8132	bp->tx_quick_cons_trip_int = 2;
8133	bp->tx_quick_cons_trip = 20;
8134	bp->tx_ticks_int = 18;
8135	bp->tx_ticks = 80;
8136
8137	bp->rx_quick_cons_trip_int = 2;
8138	bp->rx_quick_cons_trip = 12;
8139	bp->rx_ticks_int = 18;
8140	bp->rx_ticks = 18;
8141
8142	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8143
8144	bp->current_interval = BNX2_TIMER_INTERVAL;
8145
8146	bp->phy_addr = 1;
8147
8148	/* Disable WOL support if we are running on a SERDES chip. */
8149	if (CHIP_NUM(bp) == CHIP_NUM_5709)
8150		bnx2_get_5709_media(bp);
8151	else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8152		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8153
8154	bp->phy_port = PORT_TP;
8155	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8156		bp->phy_port = PORT_FIBRE;
8157		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8158		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8159			bp->flags |= BNX2_FLAG_NO_WOL;
8160			bp->wol = 0;
8161		}
8162		if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8163			/* Don't do parallel detect on this board because of
8164			 * some board problems.  The link will not go down
8165			 * if we do parallel detect.
8166			 */
8167			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8168			    pdev->subsystem_device == 0x310c)
8169				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8170		} else {
8171			bp->phy_addr = 2;
8172			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8173				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8174		}
8175	} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8176		   CHIP_NUM(bp) == CHIP_NUM_5708)
8177		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8178	else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8179		 (CHIP_REV(bp) == CHIP_REV_Ax ||
8180		  CHIP_REV(bp) == CHIP_REV_Bx))
8181		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8182
8183	bnx2_init_fw_cap(bp);
8184
8185	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8186	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8187	    (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8188	    !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8189		bp->flags |= BNX2_FLAG_NO_WOL;
8190		bp->wol = 0;
8191	}
8192
8193	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8194		bp->tx_quick_cons_trip_int =
8195			bp->tx_quick_cons_trip;
8196		bp->tx_ticks_int = bp->tx_ticks;
8197		bp->rx_quick_cons_trip_int =
8198			bp->rx_quick_cons_trip;
8199		bp->rx_ticks_int = bp->rx_ticks;
8200		bp->comp_prod_trip_int = bp->comp_prod_trip;
8201		bp->com_ticks_int = bp->com_ticks;
8202		bp->cmd_ticks_int = bp->cmd_ticks;
8203	}
8204
8205	/* Disable MSI on 5706 if AMD 8132 bridge is found.
8206	 *
8207	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8208	 * with byte enables disabled on the unused 32-bit word.  This is legal
8209	 * but causes problems on the AMD 8132 which will eventually stop
8210	 * responding after a while.
8211	 *
8212	 * AMD believes this incompatibility is unique to the 5706, and
8213	 * prefers to locally disable MSI rather than globally disabling it.
8214	 */
8215	if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8216		struct pci_dev *amd_8132 = NULL;
8217
8218		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8219						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
8220						  amd_8132))) {
8221
8222			if (amd_8132->revision >= 0x10 &&
8223			    amd_8132->revision <= 0x13) {
8224				disable_msi = 1;
8225				pci_dev_put(amd_8132);
8226				break;
8227			}
8228		}
8229	}
8230
8231	bnx2_set_default_link(bp);
8232	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8233
8234	init_timer(&bp->timer);
8235	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8236	bp->timer.data = (unsigned long) bp;
8237	bp->timer.function = bnx2_timer;
8238
8239	return 0;
8240
8241err_out_unmap:
8242	if (bp->regview) {
8243		iounmap(bp->regview);
8244		bp->regview = NULL;
8245	}
8246
8247err_out_release:
8248	pci_release_regions(pdev);
8249
8250err_out_disable:
8251	pci_disable_device(pdev);
8252	pci_set_drvdata(pdev, NULL);
8253
8254err_out:
8255	return rc;
8256}
8257
8258static char * __devinit
8259bnx2_bus_string(struct bnx2 *bp, char *str)
8260{
8261	char *s = str;
8262
8263	if (bp->flags & BNX2_FLAG_PCIE) {
8264		s += sprintf(s, "PCI Express");
8265	} else {
8266		s += sprintf(s, "PCI");
8267		if (bp->flags & BNX2_FLAG_PCIX)
8268			s += sprintf(s, "-X");
8269		if (bp->flags & BNX2_FLAG_PCI_32BIT)
8270			s += sprintf(s, " 32-bit");
8271		else
8272			s += sprintf(s, " 64-bit");
8273		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8274	}
8275	return str;
8276}
8277
8278static void
8279bnx2_del_napi(struct bnx2 *bp)
8280{
8281	int i;
8282
8283	for (i = 0; i < bp->irq_nvecs; i++)
8284		netif_napi_del(&bp->bnx2_napi[i].napi);
8285}
8286
8287static void
8288bnx2_init_napi(struct bnx2 *bp)
8289{
8290	int i;
8291
8292	for (i = 0; i < bp->irq_nvecs; i++) {
8293		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8294		int (*poll)(struct napi_struct *, int);
8295
8296		if (i == 0)
8297			poll = bnx2_poll;
8298		else
8299			poll = bnx2_poll_msix;
8300
8301		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8302		bnapi->bp = bp;
8303	}
8304}
8305
8306static const struct net_device_ops bnx2_netdev_ops = {
8307	.ndo_open		= bnx2_open,
8308	.ndo_start_xmit		= bnx2_start_xmit,
8309	.ndo_stop		= bnx2_close,
8310	.ndo_get_stats64	= bnx2_get_stats64,
8311	.ndo_set_rx_mode	= bnx2_set_rx_mode,
8312	.ndo_do_ioctl		= bnx2_ioctl,
8313	.ndo_validate_addr	= eth_validate_addr,
8314	.ndo_set_mac_address	= bnx2_change_mac_addr,
8315	.ndo_change_mtu		= bnx2_change_mtu,
8316	.ndo_tx_timeout		= bnx2_tx_timeout,
8317#ifdef BCM_VLAN
8318	.ndo_vlan_rx_register	= bnx2_vlan_rx_register,
8319#endif
8320#ifdef CONFIG_NET_POLL_CONTROLLER
8321	.ndo_poll_controller	= poll_bnx2,
8322#endif
8323};
8324
8325static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8326{
8327#ifdef BCM_VLAN
8328	dev->vlan_features |= flags;
8329#endif
8330}
8331
8332static int __devinit
8333bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8334{
8335	static int version_printed = 0;
8336	struct net_device *dev = NULL;
8337	struct bnx2 *bp;
8338	int rc;
8339	char str[40];
8340
8341	if (version_printed++ == 0)
8342		pr_info("%s", version);
8343
8344	/* dev zeroed in init_etherdev */
8345	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8346
8347	if (!dev)
8348		return -ENOMEM;
8349
8350	rc = bnx2_init_board(pdev, dev);
8351	if (rc < 0) {
8352		free_netdev(dev);
8353		return rc;
8354	}
8355
8356	dev->netdev_ops = &bnx2_netdev_ops;
8357	dev->watchdog_timeo = TX_TIMEOUT;
8358	dev->ethtool_ops = &bnx2_ethtool_ops;
8359
8360	bp = netdev_priv(dev);
8361
8362	pci_set_drvdata(pdev, dev);
8363
8364	rc = bnx2_request_firmware(bp);
8365	if (rc)
8366		goto error;
8367
8368	memcpy(dev->dev_addr, bp->mac_addr, 6);
8369	memcpy(dev->perm_addr, bp->mac_addr, 6);
8370
8371	dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
8372			 NETIF_F_RXHASH;
8373	vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8374	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8375		dev->features |= NETIF_F_IPV6_CSUM;
8376		vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8377	}
8378#ifdef BCM_VLAN
8379	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8380#endif
8381	dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8382	vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8383	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8384		dev->features |= NETIF_F_TSO6;
8385		vlan_features_add(dev, NETIF_F_TSO6);
8386	}
8387	if ((rc = register_netdev(dev))) {
8388		dev_err(&pdev->dev, "Cannot register net device\n");
8389		goto error;
8390	}
8391
8392	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8393		    board_info[ent->driver_data].name,
8394		    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8395		    ((CHIP_ID(bp) & 0x0ff0) >> 4),
8396		    bnx2_bus_string(bp, str),
8397		    dev->base_addr,
8398		    bp->pdev->irq, dev->dev_addr);
8399
8400	return 0;
8401
8402error:
8403	if (bp->mips_firmware)
8404		release_firmware(bp->mips_firmware);
8405	if (bp->rv2p_firmware)
8406		release_firmware(bp->rv2p_firmware);
8407
8408	if (bp->regview)
8409		iounmap(bp->regview);
8410	pci_release_regions(pdev);
8411	pci_disable_device(pdev);
8412	pci_set_drvdata(pdev, NULL);
8413	free_netdev(dev);
8414	return rc;
8415}
8416
8417static void __devexit
8418bnx2_remove_one(struct pci_dev *pdev)
8419{
8420	struct net_device *dev = pci_get_drvdata(pdev);
8421	struct bnx2 *bp = netdev_priv(dev);
8422
8423	flush_scheduled_work();
8424
8425	unregister_netdev(dev);
8426
8427	if (bp->mips_firmware)
8428		release_firmware(bp->mips_firmware);
8429	if (bp->rv2p_firmware)
8430		release_firmware(bp->rv2p_firmware);
8431
8432	if (bp->regview)
8433		iounmap(bp->regview);
8434
8435	kfree(bp->temp_stats_blk);
8436
8437	free_netdev(dev);
8438	pci_release_regions(pdev);
8439	pci_disable_device(pdev);
8440	pci_set_drvdata(pdev, NULL);
8441}
8442
8443static int
8444bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8445{
8446	struct net_device *dev = pci_get_drvdata(pdev);
8447	struct bnx2 *bp = netdev_priv(dev);
8448
8449	/* PCI register 4 needs to be saved whether netif_running() or not.
8450	 * MSI address and data need to be saved if using MSI and
8451	 * netif_running().
8452	 */
8453	pci_save_state(pdev);
8454	if (!netif_running(dev))
8455		return 0;
8456
8457	flush_scheduled_work();
8458	bnx2_netif_stop(bp, true);
8459	netif_device_detach(dev);
8460	del_timer_sync(&bp->timer);
8461	bnx2_shutdown_chip(bp);
8462	bnx2_free_skbs(bp);
8463	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8464	return 0;
8465}
8466
8467static int
8468bnx2_resume(struct pci_dev *pdev)
8469{
8470	struct net_device *dev = pci_get_drvdata(pdev);
8471	struct bnx2 *bp = netdev_priv(dev);
8472
8473	pci_restore_state(pdev);
8474	if (!netif_running(dev))
8475		return 0;
8476
8477	bnx2_set_power_state(bp, PCI_D0);
8478	netif_device_attach(dev);
8479	bnx2_init_nic(bp, 1);
8480	bnx2_netif_start(bp, true);
8481	return 0;
8482}
8483
8484/**
8485 * bnx2_io_error_detected - called when PCI error is detected
8486 * @pdev: Pointer to PCI device
8487 * @state: The current pci connection state
8488 *
8489 * This function is called after a PCI bus error affecting
8490 * this device has been detected.
8491 */
8492static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8493					       pci_channel_state_t state)
8494{
8495	struct net_device *dev = pci_get_drvdata(pdev);
8496	struct bnx2 *bp = netdev_priv(dev);
8497
8498	rtnl_lock();
8499	netif_device_detach(dev);
8500
8501	if (state == pci_channel_io_perm_failure) {
8502		rtnl_unlock();
8503		return PCI_ERS_RESULT_DISCONNECT;
8504	}
8505
8506	if (netif_running(dev)) {
8507		bnx2_netif_stop(bp, true);
8508		del_timer_sync(&bp->timer);
8509		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8510	}
8511
8512	pci_disable_device(pdev);
8513	rtnl_unlock();
8514
8515	/* Request a slot slot reset. */
8516	return PCI_ERS_RESULT_NEED_RESET;
8517}
8518
8519/**
8520 * bnx2_io_slot_reset - called after the pci bus has been reset.
8521 * @pdev: Pointer to PCI device
8522 *
8523 * Restart the card from scratch, as if from a cold-boot.
8524 */
8525static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8526{
8527	struct net_device *dev = pci_get_drvdata(pdev);
8528	struct bnx2 *bp = netdev_priv(dev);
8529
8530	rtnl_lock();
8531	if (pci_enable_device(pdev)) {
8532		dev_err(&pdev->dev,
8533			"Cannot re-enable PCI device after reset\n");
8534		rtnl_unlock();
8535		return PCI_ERS_RESULT_DISCONNECT;
8536	}
8537	pci_set_master(pdev);
8538	pci_restore_state(pdev);
8539	pci_save_state(pdev);
8540
8541	if (netif_running(dev)) {
8542		bnx2_set_power_state(bp, PCI_D0);
8543		bnx2_init_nic(bp, 1);
8544	}
8545
8546	rtnl_unlock();
8547	return PCI_ERS_RESULT_RECOVERED;
8548}
8549
8550/**
8551 * bnx2_io_resume - called when traffic can start flowing again.
8552 * @pdev: Pointer to PCI device
8553 *
8554 * This callback is called when the error recovery driver tells us that
8555 * its OK to resume normal operation.
8556 */
8557static void bnx2_io_resume(struct pci_dev *pdev)
8558{
8559	struct net_device *dev = pci_get_drvdata(pdev);
8560	struct bnx2 *bp = netdev_priv(dev);
8561
8562	rtnl_lock();
8563	if (netif_running(dev))
8564		bnx2_netif_start(bp, true);
8565
8566	netif_device_attach(dev);
8567	rtnl_unlock();
8568}
8569
8570static struct pci_error_handlers bnx2_err_handler = {
8571	.error_detected	= bnx2_io_error_detected,
8572	.slot_reset	= bnx2_io_slot_reset,
8573	.resume		= bnx2_io_resume,
8574};
8575
8576static struct pci_driver bnx2_pci_driver = {
8577	.name		= DRV_MODULE_NAME,
8578	.id_table	= bnx2_pci_tbl,
8579	.probe		= bnx2_init_one,
8580	.remove		= __devexit_p(bnx2_remove_one),
8581	.suspend	= bnx2_suspend,
8582	.resume		= bnx2_resume,
8583	.err_handler	= &bnx2_err_handler,
8584};
8585
8586static int __init bnx2_init(void)
8587{
8588	return pci_register_driver(&bnx2_pci_driver);
8589}
8590
8591static void __exit bnx2_cleanup(void)
8592{
8593	pci_unregister_driver(&bnx2_pci_driver);
8594}
8595
8596module_init(bnx2_init);
8597module_exit(bnx2_cleanup);
8598