1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
8 *
9 * Firmware is:
10 *	Derived from proprietary unpublished source code,
11 *	Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 *	Permission is hereby granted for the distribution of this firmware
14 *	data in hexadecimal or equivalent format, provided this copyright
15 *	notice is accompanying it.
16 */
17
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/compiler.h>
24#include <linux/slab.h>
25#include <linux/delay.h>
26#include <linux/in.h>
27#include <linux/init.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/mii.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/workqueue.h>
39#include <linux/prefetch.h>
40#include <linux/dma-mapping.h>
41
42#include <net/checksum.h>
43#include <net/ip.h>
44
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/byteorder.h>
48#include <asm/uaccess.h>
49
50#ifdef CONFIG_SPARC
51#include <asm/idprom.h>
52#include <asm/prom.h>
53#endif
54
55#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
56#define TG3_VLAN_TAG_USED 1
57#else
58#define TG3_VLAN_TAG_USED 0
59#endif
60
61#define TG3_TSO_SUPPORT	1
62
63#include "tg3.h"
64
65#define DRV_MODULE_NAME		"tg3"
66#define PFX DRV_MODULE_NAME	": "
67#define DRV_MODULE_VERSION	"3.77"
68#define DRV_MODULE_RELDATE	"May 31, 2007"
69
70#define TG3_DEF_MAC_MODE	0
71#define TG3_DEF_RX_MODE		0
72#define TG3_DEF_TX_MODE		0
73#define TG3_DEF_MSG_ENABLE	  \
74	(NETIF_MSG_DRV		| \
75	 NETIF_MSG_PROBE	| \
76	 NETIF_MSG_LINK		| \
77	 NETIF_MSG_TIMER	| \
78	 NETIF_MSG_IFDOWN	| \
79	 NETIF_MSG_IFUP		| \
80	 NETIF_MSG_RX_ERR	| \
81	 NETIF_MSG_TX_ERR)
82
83/* length of time before we decide the hardware is borked,
84 * and dev->tx_timeout() should be called to fix the problem
85 */
86#define TG3_TX_TIMEOUT			(5 * HZ)
87
88/* hardware minimum and maximum for a single frame's data payload */
89#define TG3_MIN_MTU			60
90#define TG3_MAX_MTU(tp)	\
91	((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
92
93/* These numbers seem to be hard coded in the NIC firmware somehow.
94 * You can't change the ring sizes, but you can change where you place
95 * them in the NIC onboard memory.
96 */
97#define TG3_RX_RING_SIZE		512
98#define TG3_DEF_RX_RING_PENDING		200
99#define TG3_RX_JUMBO_RING_SIZE		256
100#define TG3_DEF_RX_JUMBO_RING_PENDING	100
101
102/* Do not place this n-ring entries value into the tp struct itself,
103 * we really want to expose these constants to GCC so that modulo et
104 * al.  operations are done with shifts and masks instead of with
105 * hw multiply/modulo instructions.  Another solution would be to
106 * replace things like '% foo' with '& (foo - 1)'.
107 */
108#define TG3_RX_RCB_RING_SIZE(tp)	\
109	((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
110
111#define TG3_TX_RING_SIZE		512
112#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
113
114#define TG3_RX_RING_BYTES	(sizeof(struct tg3_rx_buffer_desc) * \
115				 TG3_RX_RING_SIZE)
116#define TG3_RX_JUMBO_RING_BYTES	(sizeof(struct tg3_rx_buffer_desc) * \
117			         TG3_RX_JUMBO_RING_SIZE)
118#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
119			           TG3_RX_RCB_RING_SIZE(tp))
120#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
121				 TG3_TX_RING_SIZE)
122#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
123
124#define RX_PKT_BUF_SZ		(1536 + tp->rx_offset + 64)
125#define RX_JUMBO_PKT_BUF_SZ	(9046 + tp->rx_offset + 64)
126
127/* minimum number of free TX descriptors required to wake up TX process */
128#define TG3_TX_WAKEUP_THRESH(tp)		((tp)->tx_pending / 4)
129
130/* number of ETHTOOL_GSTATS u64's */
131#define TG3_NUM_STATS		(sizeof(struct tg3_ethtool_stats)/sizeof(u64))
132
133#define TG3_NUM_TEST		6
134
135static char version[] __devinitdata =
136	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
137
138MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
139MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
140MODULE_LICENSE("GPL");
141MODULE_VERSION(DRV_MODULE_VERSION);
142
143static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
144module_param(tg3_debug, int, 0);
145MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
146
147static struct pci_device_id tg3_pci_tbl[] = {
148	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
149	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
150	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
151	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
152	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
153	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
154	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
155	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
156	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
157	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
158	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
159	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
160	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
161	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
162	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
163	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
164	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
165	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
166	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
167	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
168	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
169	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
170	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
171	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
172	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
173	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
174	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
175	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
176	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
177	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
178	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
179	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
180	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
181	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
182	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
183	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
184	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
185	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
186	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
187	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
188	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
189	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
190	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
191	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
192	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
193	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
194	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
195	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
196	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
197	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
198	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
199	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
200	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
201	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
202	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
203	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
204	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
205	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
206	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
207	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
208	{}
209};
210
211MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
212
213static const struct {
214	const char string[ETH_GSTRING_LEN];
215} ethtool_stats_keys[TG3_NUM_STATS] = {
216	{ "rx_octets" },
217	{ "rx_fragments" },
218	{ "rx_ucast_packets" },
219	{ "rx_mcast_packets" },
220	{ "rx_bcast_packets" },
221	{ "rx_fcs_errors" },
222	{ "rx_align_errors" },
223	{ "rx_xon_pause_rcvd" },
224	{ "rx_xoff_pause_rcvd" },
225	{ "rx_mac_ctrl_rcvd" },
226	{ "rx_xoff_entered" },
227	{ "rx_frame_too_long_errors" },
228	{ "rx_jabbers" },
229	{ "rx_undersize_packets" },
230	{ "rx_in_length_errors" },
231	{ "rx_out_length_errors" },
232	{ "rx_64_or_less_octet_packets" },
233	{ "rx_65_to_127_octet_packets" },
234	{ "rx_128_to_255_octet_packets" },
235	{ "rx_256_to_511_octet_packets" },
236	{ "rx_512_to_1023_octet_packets" },
237	{ "rx_1024_to_1522_octet_packets" },
238	{ "rx_1523_to_2047_octet_packets" },
239	{ "rx_2048_to_4095_octet_packets" },
240	{ "rx_4096_to_8191_octet_packets" },
241	{ "rx_8192_to_9022_octet_packets" },
242
243	{ "tx_octets" },
244	{ "tx_collisions" },
245
246	{ "tx_xon_sent" },
247	{ "tx_xoff_sent" },
248	{ "tx_flow_control" },
249	{ "tx_mac_errors" },
250	{ "tx_single_collisions" },
251	{ "tx_mult_collisions" },
252	{ "tx_deferred" },
253	{ "tx_excessive_collisions" },
254	{ "tx_late_collisions" },
255	{ "tx_collide_2times" },
256	{ "tx_collide_3times" },
257	{ "tx_collide_4times" },
258	{ "tx_collide_5times" },
259	{ "tx_collide_6times" },
260	{ "tx_collide_7times" },
261	{ "tx_collide_8times" },
262	{ "tx_collide_9times" },
263	{ "tx_collide_10times" },
264	{ "tx_collide_11times" },
265	{ "tx_collide_12times" },
266	{ "tx_collide_13times" },
267	{ "tx_collide_14times" },
268	{ "tx_collide_15times" },
269	{ "tx_ucast_packets" },
270	{ "tx_mcast_packets" },
271	{ "tx_bcast_packets" },
272	{ "tx_carrier_sense_errors" },
273	{ "tx_discards" },
274	{ "tx_errors" },
275
276	{ "dma_writeq_full" },
277	{ "dma_write_prioq_full" },
278	{ "rxbds_empty" },
279	{ "rx_discards" },
280	{ "rx_errors" },
281	{ "rx_threshold_hit" },
282
283	{ "dma_readq_full" },
284	{ "dma_read_prioq_full" },
285	{ "tx_comp_queue_full" },
286
287	{ "ring_set_send_prod_index" },
288	{ "ring_status_update" },
289	{ "nic_irqs" },
290	{ "nic_avoided_irqs" },
291	{ "nic_tx_threshold_hit" }
292};
293
294static const struct {
295	const char string[ETH_GSTRING_LEN];
296} ethtool_test_keys[TG3_NUM_TEST] = {
297	{ "nvram test     (online) " },
298	{ "link test      (online) " },
299	{ "register test  (offline)" },
300	{ "memory test    (offline)" },
301	{ "loopback test  (offline)" },
302	{ "interrupt test (offline)" },
303};
304
305static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
306{
307	writel(val, tp->regs + off);
308}
309
310static u32 tg3_read32(struct tg3 *tp, u32 off)
311{
312	return (readl(tp->regs + off));
313}
314
315static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
316{
317	unsigned long flags;
318
319	spin_lock_irqsave(&tp->indirect_lock, flags);
320	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
321	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
322	spin_unlock_irqrestore(&tp->indirect_lock, flags);
323}
324
325static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
326{
327	writel(val, tp->regs + off);
328	readl(tp->regs + off);
329}
330
331static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
332{
333	unsigned long flags;
334	u32 val;
335
336	spin_lock_irqsave(&tp->indirect_lock, flags);
337	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
338	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
339	spin_unlock_irqrestore(&tp->indirect_lock, flags);
340	return val;
341}
342
343static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
344{
345	unsigned long flags;
346
347	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
348		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
349				       TG3_64BIT_REG_LOW, val);
350		return;
351	}
352	if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
353		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
354				       TG3_64BIT_REG_LOW, val);
355		return;
356	}
357
358	spin_lock_irqsave(&tp->indirect_lock, flags);
359	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
360	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
361	spin_unlock_irqrestore(&tp->indirect_lock, flags);
362
363	/* In indirect mode when disabling interrupts, we also need
364	 * to clear the interrupt bit in the GRC local ctrl register.
365	 */
366	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
367	    (val == 0x1)) {
368		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
369				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
370	}
371}
372
373static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
374{
375	unsigned long flags;
376	u32 val;
377
378	spin_lock_irqsave(&tp->indirect_lock, flags);
379	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
380	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
381	spin_unlock_irqrestore(&tp->indirect_lock, flags);
382	return val;
383}
384
385/* usec_wait specifies the wait time in usec when writing to certain registers
386 * where it is unsafe to read back the register without some delay.
387 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
388 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
389 */
390static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
391{
392	if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
393	    (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
394		/* Non-posted methods */
395		tp->write32(tp, off, val);
396	else {
397		/* Posted method */
398		tg3_write32(tp, off, val);
399		if (usec_wait)
400			udelay(usec_wait);
401		tp->read32(tp, off);
402	}
403	/* Wait again after the read for the posted method to guarantee that
404	 * the wait time is met.
405	 */
406	if (usec_wait)
407		udelay(usec_wait);
408}
409
410static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
411{
412	tp->write32_mbox(tp, off, val);
413	if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
414	    !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
415		tp->read32_mbox(tp, off);
416}
417
418static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
419{
420	void __iomem *mbox = tp->regs + off;
421	writel(val, mbox);
422	if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
423		writel(val, mbox);
424	if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
425		readl(mbox);
426}
427
428static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
429{
430	return (readl(tp->regs + off + GRCMBOX_BASE));
431}
432
433static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
434{
435	writel(val, tp->regs + off + GRCMBOX_BASE);
436}
437
438#define tw32_mailbox(reg, val)	tp->write32_mbox(tp, reg, val)
439#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
440#define tw32_rx_mbox(reg, val)	tp->write32_rx_mbox(tp, reg, val)
441#define tw32_tx_mbox(reg, val)	tp->write32_tx_mbox(tp, reg, val)
442#define tr32_mailbox(reg)	tp->read32_mbox(tp, reg)
443
444#define tw32(reg,val)		tp->write32(tp, reg, val)
445#define tw32_f(reg,val)		_tw32_flush(tp,(reg),(val), 0)
446#define tw32_wait_f(reg,val,us)	_tw32_flush(tp,(reg),(val), (us))
447#define tr32(reg)		tp->read32(tp, reg)
448
449static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
450{
451	unsigned long flags;
452
453	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
454	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
455		return;
456
457	spin_lock_irqsave(&tp->indirect_lock, flags);
458	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
459		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
460		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
461
462		/* Always leave this as zero. */
463		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
464	} else {
465		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
466		tw32_f(TG3PCI_MEM_WIN_DATA, val);
467
468		/* Always leave this as zero. */
469		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
470	}
471	spin_unlock_irqrestore(&tp->indirect_lock, flags);
472}
473
474static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
475{
476	unsigned long flags;
477
478	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
479	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
480		*val = 0;
481		return;
482	}
483
484	spin_lock_irqsave(&tp->indirect_lock, flags);
485	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
486		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
487		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
488
489		/* Always leave this as zero. */
490		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
491	} else {
492		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
493		*val = tr32(TG3PCI_MEM_WIN_DATA);
494
495		/* Always leave this as zero. */
496		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
497	}
498	spin_unlock_irqrestore(&tp->indirect_lock, flags);
499}
500
501static void tg3_disable_ints(struct tg3 *tp)
502{
503	tw32(TG3PCI_MISC_HOST_CTRL,
504	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
505	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
506}
507
508static inline void tg3_cond_int(struct tg3 *tp)
509{
510	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
511	    (tp->hw_status->status & SD_STATUS_UPDATED))
512		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
513	else
514		tw32(HOSTCC_MODE, tp->coalesce_mode |
515		     (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
516}
517
518static void tg3_enable_ints(struct tg3 *tp)
519{
520	tp->irq_sync = 0;
521	wmb();
522
523	tw32(TG3PCI_MISC_HOST_CTRL,
524	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
525	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
526		       (tp->last_tag << 24));
527	if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
528		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
529			       (tp->last_tag << 24));
530	tg3_cond_int(tp);
531}
532
533static inline unsigned int tg3_has_work(struct tg3 *tp)
534{
535	struct tg3_hw_status *sblk = tp->hw_status;
536	unsigned int work_exists = 0;
537
538	/* check for phy events */
539	if (!(tp->tg3_flags &
540	      (TG3_FLAG_USE_LINKCHG_REG |
541	       TG3_FLAG_POLL_SERDES))) {
542		if (sblk->status & SD_STATUS_LINK_CHG)
543			work_exists = 1;
544	}
545	/* check for RX/TX work to do */
546	if (sblk->idx[0].tx_consumer != tp->tx_cons ||
547	    sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
548		work_exists = 1;
549
550	return work_exists;
551}
552
553/* tg3_restart_ints
554 *  similar to tg3_enable_ints, but it accurately determines whether there
555 *  is new work pending and can return without flushing the PIO write
556 *  which reenables interrupts
557 */
558static void tg3_restart_ints(struct tg3 *tp)
559{
560	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
561		     tp->last_tag << 24);
562	mmiowb();
563
564	/* When doing tagged status, this work check is unnecessary.
565	 * The last_tag we write above tells the chip which piece of
566	 * work we've completed.
567	 */
568	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
569	    tg3_has_work(tp))
570		tw32(HOSTCC_MODE, tp->coalesce_mode |
571		     (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
572}
573
574static inline void tg3_netif_stop(struct tg3 *tp)
575{
576	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
577	netif_poll_disable(tp->dev);
578	netif_tx_disable(tp->dev);
579}
580
581static inline void tg3_netif_start(struct tg3 *tp)
582{
583	netif_wake_queue(tp->dev);
584	/* NOTE: unconditional netif_wake_queue is only appropriate
585	 * so long as all callers are assured to have free tx slots
586	 * (such as after tg3_init_hw)
587	 */
588	netif_poll_enable(tp->dev);
589	tp->hw_status->status |= SD_STATUS_UPDATED;
590	tg3_enable_ints(tp);
591}
592
593static void tg3_switch_clocks(struct tg3 *tp)
594{
595	u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
596	u32 orig_clock_ctrl;
597
598	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
599		return;
600
601	orig_clock_ctrl = clock_ctrl;
602	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
603		       CLOCK_CTRL_CLKRUN_OENABLE |
604		       0x1f);
605	tp->pci_clock_ctrl = clock_ctrl;
606
607	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
608		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
609			tw32_wait_f(TG3PCI_CLOCK_CTRL,
610				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
611		}
612	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
613		tw32_wait_f(TG3PCI_CLOCK_CTRL,
614			    clock_ctrl |
615			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
616			    40);
617		tw32_wait_f(TG3PCI_CLOCK_CTRL,
618			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
619			    40);
620	}
621	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
622}
623
624#define PHY_BUSY_LOOPS	5000
625
626static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
627{
628	u32 frame_val;
629	unsigned int loops;
630	int ret;
631
632	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
633		tw32_f(MAC_MI_MODE,
634		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
635		udelay(80);
636	}
637
638	*val = 0x0;
639
640	frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
641		      MI_COM_PHY_ADDR_MASK);
642	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
643		      MI_COM_REG_ADDR_MASK);
644	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
645
646	tw32_f(MAC_MI_COM, frame_val);
647
648	loops = PHY_BUSY_LOOPS;
649	while (loops != 0) {
650		udelay(10);
651		frame_val = tr32(MAC_MI_COM);
652
653		if ((frame_val & MI_COM_BUSY) == 0) {
654			udelay(5);
655			frame_val = tr32(MAC_MI_COM);
656			break;
657		}
658		loops -= 1;
659	}
660
661	ret = -EBUSY;
662	if (loops != 0) {
663		*val = frame_val & MI_COM_DATA_MASK;
664		ret = 0;
665	}
666
667	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
668		tw32_f(MAC_MI_MODE, tp->mi_mode);
669		udelay(80);
670	}
671
672	return ret;
673}
674
675static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
676{
677	u32 frame_val;
678	unsigned int loops;
679	int ret;
680
681	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
682	    (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
683		return 0;
684
685	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
686		tw32_f(MAC_MI_MODE,
687		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
688		udelay(80);
689	}
690
691	frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
692		      MI_COM_PHY_ADDR_MASK);
693	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
694		      MI_COM_REG_ADDR_MASK);
695	frame_val |= (val & MI_COM_DATA_MASK);
696	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
697
698	tw32_f(MAC_MI_COM, frame_val);
699
700	loops = PHY_BUSY_LOOPS;
701	while (loops != 0) {
702		udelay(10);
703		frame_val = tr32(MAC_MI_COM);
704		if ((frame_val & MI_COM_BUSY) == 0) {
705			udelay(5);
706			frame_val = tr32(MAC_MI_COM);
707			break;
708		}
709		loops -= 1;
710	}
711
712	ret = -EBUSY;
713	if (loops != 0)
714		ret = 0;
715
716	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
717		tw32_f(MAC_MI_MODE, tp->mi_mode);
718		udelay(80);
719	}
720
721	return ret;
722}
723
724static void tg3_phy_set_wirespeed(struct tg3 *tp)
725{
726	u32 val;
727
728	if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
729		return;
730
731	if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
732	    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
733		tg3_writephy(tp, MII_TG3_AUX_CTRL,
734			     (val | (1 << 15) | (1 << 4)));
735}
736
737static int tg3_bmcr_reset(struct tg3 *tp)
738{
739	u32 phy_control;
740	int limit, err;
741
742	/* OK, reset it, and poll the BMCR_RESET bit until it
743	 * clears or we time out.
744	 */
745	phy_control = BMCR_RESET;
746	err = tg3_writephy(tp, MII_BMCR, phy_control);
747	if (err != 0)
748		return -EBUSY;
749
750	limit = 5000;
751	while (limit--) {
752		err = tg3_readphy(tp, MII_BMCR, &phy_control);
753		if (err != 0)
754			return -EBUSY;
755
756		if ((phy_control & BMCR_RESET) == 0) {
757			udelay(40);
758			break;
759		}
760		udelay(10);
761	}
762	if (limit <= 0)
763		return -EBUSY;
764
765	return 0;
766}
767
768static int tg3_wait_macro_done(struct tg3 *tp)
769{
770	int limit = 100;
771
772	while (limit--) {
773		u32 tmp32;
774
775		if (!tg3_readphy(tp, 0x16, &tmp32)) {
776			if ((tmp32 & 0x1000) == 0)
777				break;
778		}
779	}
780	if (limit <= 0)
781		return -EBUSY;
782
783	return 0;
784}
785
786static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
787{
788	static const u32 test_pat[4][6] = {
789	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
790	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
791	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
792	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
793	};
794	int chan;
795
796	for (chan = 0; chan < 4; chan++) {
797		int i;
798
799		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
800			     (chan * 0x2000) | 0x0200);
801		tg3_writephy(tp, 0x16, 0x0002);
802
803		for (i = 0; i < 6; i++)
804			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
805				     test_pat[chan][i]);
806
807		tg3_writephy(tp, 0x16, 0x0202);
808		if (tg3_wait_macro_done(tp)) {
809			*resetp = 1;
810			return -EBUSY;
811		}
812
813		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
814			     (chan * 0x2000) | 0x0200);
815		tg3_writephy(tp, 0x16, 0x0082);
816		if (tg3_wait_macro_done(tp)) {
817			*resetp = 1;
818			return -EBUSY;
819		}
820
821		tg3_writephy(tp, 0x16, 0x0802);
822		if (tg3_wait_macro_done(tp)) {
823			*resetp = 1;
824			return -EBUSY;
825		}
826
827		for (i = 0; i < 6; i += 2) {
828			u32 low, high;
829
830			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
831			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
832			    tg3_wait_macro_done(tp)) {
833				*resetp = 1;
834				return -EBUSY;
835			}
836			low &= 0x7fff;
837			high &= 0x000f;
838			if (low != test_pat[chan][i] ||
839			    high != test_pat[chan][i+1]) {
840				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
841				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
842				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
843
844				return -EBUSY;
845			}
846		}
847	}
848
849	return 0;
850}
851
852static int tg3_phy_reset_chanpat(struct tg3 *tp)
853{
854	int chan;
855
856	for (chan = 0; chan < 4; chan++) {
857		int i;
858
859		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
860			     (chan * 0x2000) | 0x0200);
861		tg3_writephy(tp, 0x16, 0x0002);
862		for (i = 0; i < 6; i++)
863			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
864		tg3_writephy(tp, 0x16, 0x0202);
865		if (tg3_wait_macro_done(tp))
866			return -EBUSY;
867	}
868
869	return 0;
870}
871
872static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
873{
874	u32 reg32, phy9_orig;
875	int retries, do_phy_reset, err;
876
877	retries = 10;
878	do_phy_reset = 1;
879	do {
880		if (do_phy_reset) {
881			err = tg3_bmcr_reset(tp);
882			if (err)
883				return err;
884			do_phy_reset = 0;
885		}
886
887		/* Disable transmitter and interrupt.  */
888		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
889			continue;
890
891		reg32 |= 0x3000;
892		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
893
894		/* Set full-duplex, 1000 mbps.  */
895		tg3_writephy(tp, MII_BMCR,
896			     BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
897
898		/* Set to master mode.  */
899		if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
900			continue;
901
902		tg3_writephy(tp, MII_TG3_CTRL,
903			     (MII_TG3_CTRL_AS_MASTER |
904			      MII_TG3_CTRL_ENABLE_AS_MASTER));
905
906		/* Enable SM_DSP_CLOCK and 6dB.  */
907		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
908
909		/* Block the PHY control access.  */
910		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
911		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
912
913		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
914		if (!err)
915			break;
916	} while (--retries);
917
918	err = tg3_phy_reset_chanpat(tp);
919	if (err)
920		return err;
921
922	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
923	tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
924
925	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
926	tg3_writephy(tp, 0x16, 0x0000);
927
928	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
929	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
930		/* Set Extended packet length bit for jumbo frames */
931		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
932	}
933	else {
934		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
935	}
936
937	tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
938
939	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
940		reg32 &= ~0x3000;
941		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
942	} else if (!err)
943		err = -EBUSY;
944
945	return err;
946}
947
948static void tg3_link_report(struct tg3 *);
949
950/* This will reset the tigon3 PHY if there is no valid
951 * link unless the FORCE argument is non-zero.
952 */
953static int tg3_phy_reset(struct tg3 *tp)
954{
955	u32 phy_status;
956	int err;
957
958	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
959		u32 val;
960
961		val = tr32(GRC_MISC_CFG);
962		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
963		udelay(40);
964	}
965	err  = tg3_readphy(tp, MII_BMSR, &phy_status);
966	err |= tg3_readphy(tp, MII_BMSR, &phy_status);
967	if (err != 0)
968		return -EBUSY;
969
970	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
971		netif_carrier_off(tp->dev);
972		tg3_link_report(tp);
973	}
974
975	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
976	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
977	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
978		err = tg3_phy_reset_5703_4_5(tp);
979		if (err)
980			return err;
981		goto out;
982	}
983
984	err = tg3_bmcr_reset(tp);
985	if (err)
986		return err;
987
988out:
989	if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
990		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
991		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
992		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
993		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
994		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
995		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
996	}
997	if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
998		tg3_writephy(tp, 0x1c, 0x8d68);
999		tg3_writephy(tp, 0x1c, 0x8d68);
1000	}
1001	if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1002		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1003		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1004		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1005		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1006		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1007		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1008		tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1009		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1010	}
1011	else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1012		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1013		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1014		if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1015			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1016			tg3_writephy(tp, MII_TG3_TEST1,
1017				     MII_TG3_TEST1_TRIM_EN | 0x4);
1018		} else
1019			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1020		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1021	}
1022	/* Set Extended packet length bit (bit 14) on all chips that */
1023	/* support jumbo frames */
1024	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1025		/* Cannot do read-modify-write on 5401 */
1026		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1027	} else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1028		u32 phy_reg;
1029
1030		/* Set bit 14 with read-modify-write to preserve other bits */
1031		if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1032		    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1033			tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1034	}
1035
1036	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
1037	 * jumbo frames transmission.
1038	 */
1039	if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1040		u32 phy_reg;
1041
1042		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1043		    tg3_writephy(tp, MII_TG3_EXT_CTRL,
1044				 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1045	}
1046
1047	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1048		u32 phy_reg;
1049
1050		/* adjust output voltage */
1051		tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1052
1053		if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phy_reg)) {
1054			u32 phy_reg2;
1055
1056			tg3_writephy(tp, MII_TG3_EPHY_TEST,
1057				     phy_reg | MII_TG3_EPHY_SHADOW_EN);
1058			/* Enable auto-MDIX */
1059			if (!tg3_readphy(tp, 0x10, &phy_reg2))
1060				tg3_writephy(tp, 0x10, phy_reg2 | 0x4000);
1061			tg3_writephy(tp, MII_TG3_EPHY_TEST, phy_reg);
1062		}
1063	}
1064
1065	tg3_phy_set_wirespeed(tp);
1066	return 0;
1067}
1068
1069static void tg3_frob_aux_power(struct tg3 *tp)
1070{
1071	struct tg3 *tp_peer = tp;
1072
1073	if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1074		return;
1075
1076	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1077	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1078		struct net_device *dev_peer;
1079
1080		dev_peer = pci_get_drvdata(tp->pdev_peer);
1081		/* remove_one() may have been run on the peer. */
1082		if (!dev_peer)
1083			tp_peer = tp;
1084		else
1085			tp_peer = netdev_priv(dev_peer);
1086	}
1087
1088	if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1089	    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1090	    (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1091	    (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1092		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1093		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1094			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1095				    (GRC_LCLCTRL_GPIO_OE0 |
1096				     GRC_LCLCTRL_GPIO_OE1 |
1097				     GRC_LCLCTRL_GPIO_OE2 |
1098				     GRC_LCLCTRL_GPIO_OUTPUT0 |
1099				     GRC_LCLCTRL_GPIO_OUTPUT1),
1100				    100);
1101		} else {
1102			u32 no_gpio2;
1103			u32 grc_local_ctrl = 0;
1104
1105			if (tp_peer != tp &&
1106			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1107				return;
1108
1109			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1110			    ASIC_REV_5714) {
1111				grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1112				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1113					    grc_local_ctrl, 100);
1114			}
1115
1116			/* On 5753 and variants, GPIO2 cannot be used. */
1117			no_gpio2 = tp->nic_sram_data_cfg &
1118				    NIC_SRAM_DATA_CFG_NO_GPIO2;
1119
1120			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1121					 GRC_LCLCTRL_GPIO_OE1 |
1122					 GRC_LCLCTRL_GPIO_OE2 |
1123					 GRC_LCLCTRL_GPIO_OUTPUT1 |
1124					 GRC_LCLCTRL_GPIO_OUTPUT2;
1125			if (no_gpio2) {
1126				grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1127						    GRC_LCLCTRL_GPIO_OUTPUT2);
1128			}
1129			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1130						    grc_local_ctrl, 100);
1131
1132			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1133
1134			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1135						    grc_local_ctrl, 100);
1136
1137			if (!no_gpio2) {
1138				grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1139				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1140					    grc_local_ctrl, 100);
1141			}
1142		}
1143	} else {
1144		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1145		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1146			if (tp_peer != tp &&
1147			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1148				return;
1149
1150			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1151				    (GRC_LCLCTRL_GPIO_OE1 |
1152				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1153
1154			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1155				    GRC_LCLCTRL_GPIO_OE1, 100);
1156
1157			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1158				    (GRC_LCLCTRL_GPIO_OE1 |
1159				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1160		}
1161	}
1162}
1163
1164static int tg3_setup_phy(struct tg3 *, int);
1165
1166#define RESET_KIND_SHUTDOWN	0
1167#define RESET_KIND_INIT		1
1168#define RESET_KIND_SUSPEND	2
1169
1170static void tg3_write_sig_post_reset(struct tg3 *, int);
1171static int tg3_halt_cpu(struct tg3 *, u32);
1172static int tg3_nvram_lock(struct tg3 *);
1173static void tg3_nvram_unlock(struct tg3 *);
1174
1175static void tg3_power_down_phy(struct tg3 *tp)
1176{
1177	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
1178		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1179			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
1180			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
1181
1182			sg_dig_ctrl |=
1183				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
1184			tw32(SG_DIG_CTRL, sg_dig_ctrl);
1185			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
1186		}
1187		return;
1188	}
1189
1190	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1191		u32 val;
1192
1193		tg3_bmcr_reset(tp);
1194		val = tr32(GRC_MISC_CFG);
1195		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
1196		udelay(40);
1197		return;
1198	} else {
1199		tg3_writephy(tp, MII_TG3_EXT_CTRL,
1200			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
1201		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x01b2);
1202	}
1203
1204	/* The PHY should not be powered down on some chips because
1205	 * of bugs.
1206	 */
1207	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1208	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1209	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
1210	     (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1211		return;
1212	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
1213}
1214
1215static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
1216{
1217	u32 misc_host_ctrl;
1218	u16 power_control, power_caps;
1219	int pm = tp->pm_cap;
1220
1221	/* Make sure register accesses (indirect or otherwise)
1222	 * will function correctly.
1223	 */
1224	pci_write_config_dword(tp->pdev,
1225			       TG3PCI_MISC_HOST_CTRL,
1226			       tp->misc_host_ctrl);
1227
1228	pci_read_config_word(tp->pdev,
1229			     pm + PCI_PM_CTRL,
1230			     &power_control);
1231	power_control |= PCI_PM_CTRL_PME_STATUS;
1232	power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1233	switch (state) {
1234	case PCI_D0:
1235		power_control |= 0;
1236		pci_write_config_word(tp->pdev,
1237				      pm + PCI_PM_CTRL,
1238				      power_control);
1239		udelay(100);	/* Delay after power state change */
1240
1241		/* Switch out of Vaux if it is a NIC */
1242		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
1243			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
1244
1245		return 0;
1246
1247	case PCI_D1:
1248		power_control |= 1;
1249		break;
1250
1251	case PCI_D2:
1252		power_control |= 2;
1253		break;
1254
1255	case PCI_D3hot:
1256		power_control |= 3;
1257		break;
1258
1259	default:
1260		printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1261		       "requested.\n",
1262		       tp->dev->name, state);
1263		return -EINVAL;
1264	};
1265
1266	power_control |= PCI_PM_CTRL_PME_ENABLE;
1267
1268	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1269	tw32(TG3PCI_MISC_HOST_CTRL,
1270	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1271
1272	if (tp->link_config.phy_is_low_power == 0) {
1273		tp->link_config.phy_is_low_power = 1;
1274		tp->link_config.orig_speed = tp->link_config.speed;
1275		tp->link_config.orig_duplex = tp->link_config.duplex;
1276		tp->link_config.orig_autoneg = tp->link_config.autoneg;
1277	}
1278
1279	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
1280		tp->link_config.speed = SPEED_10;
1281		tp->link_config.duplex = DUPLEX_HALF;
1282		tp->link_config.autoneg = AUTONEG_ENABLE;
1283		tg3_setup_phy(tp, 0);
1284	}
1285
1286	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1287		u32 val;
1288
1289		val = tr32(GRC_VCPU_EXT_CTRL);
1290		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
1291	} else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1292		int i;
1293		u32 val;
1294
1295		for (i = 0; i < 200; i++) {
1296			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
1297			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1298				break;
1299			msleep(1);
1300		}
1301	}
1302	if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
1303		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
1304						     WOL_DRV_STATE_SHUTDOWN |
1305						     WOL_DRV_WOL |
1306						     WOL_SET_MAGIC_PKT);
1307
1308	pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1309
1310	if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1311		u32 mac_mode;
1312
1313		if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1314			tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1315			udelay(40);
1316
1317			if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
1318				mac_mode = MAC_MODE_PORT_MODE_GMII;
1319			else
1320				mac_mode = MAC_MODE_PORT_MODE_MII;
1321
1322			if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1323			    !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1324				mac_mode |= MAC_MODE_LINK_POLARITY;
1325		} else {
1326			mac_mode = MAC_MODE_PORT_MODE_TBI;
1327		}
1328
1329		if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
1330			tw32(MAC_LED_CTRL, tp->led_ctrl);
1331
1332		if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1333		     (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1334			mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1335
1336		tw32_f(MAC_MODE, mac_mode);
1337		udelay(100);
1338
1339		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1340		udelay(10);
1341	}
1342
1343	if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1344	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1345	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1346		u32 base_val;
1347
1348		base_val = tp->pci_clock_ctrl;
1349		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1350			     CLOCK_CTRL_TXCLK_DISABLE);
1351
1352		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
1353			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
1354	} else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1355		   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
1356		/* do nothing */
1357	} else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
1358		     (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1359		u32 newbits1, newbits2;
1360
1361		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1362		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1363			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1364				    CLOCK_CTRL_TXCLK_DISABLE |
1365				    CLOCK_CTRL_ALTCLK);
1366			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1367		} else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1368			newbits1 = CLOCK_CTRL_625_CORE;
1369			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1370		} else {
1371			newbits1 = CLOCK_CTRL_ALTCLK;
1372			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1373		}
1374
1375		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
1376			    40);
1377
1378		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
1379			    40);
1380
1381		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1382			u32 newbits3;
1383
1384			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1385			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1386				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1387					    CLOCK_CTRL_TXCLK_DISABLE |
1388					    CLOCK_CTRL_44MHZ_CORE);
1389			} else {
1390				newbits3 = CLOCK_CTRL_44MHZ_CORE;
1391			}
1392
1393			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1394				    tp->pci_clock_ctrl | newbits3, 40);
1395		}
1396	}
1397
1398	if (!(tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
1399	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1400		tg3_power_down_phy(tp);
1401
1402	tg3_frob_aux_power(tp);
1403
1404	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1405	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1406		u32 val = tr32(0x7d00);
1407
1408		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1409		tw32(0x7d00, val);
1410		if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
1411			int err;
1412
1413			err = tg3_nvram_lock(tp);
1414			tg3_halt_cpu(tp, RX_CPU_BASE);
1415			if (!err)
1416				tg3_nvram_unlock(tp);
1417		}
1418	}
1419
1420	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1421
1422	/* Finally, set the new power state. */
1423	pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1424	udelay(100);	/* Delay after power state change */
1425
1426	return 0;
1427}
1428
1429static void tg3_link_report(struct tg3 *tp)
1430{
1431	if (!netif_carrier_ok(tp->dev)) {
1432		if (netif_msg_link(tp))
1433			printk(KERN_INFO PFX "%s: Link is down.\n",
1434			       tp->dev->name);
1435	} else if (netif_msg_link(tp)) {
1436		printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1437		       tp->dev->name,
1438		       (tp->link_config.active_speed == SPEED_1000 ?
1439			1000 :
1440			(tp->link_config.active_speed == SPEED_100 ?
1441			 100 : 10)),
1442		       (tp->link_config.active_duplex == DUPLEX_FULL ?
1443			"full" : "half"));
1444
1445		printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1446		       "%s for RX.\n",
1447		       tp->dev->name,
1448		       (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1449		       (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1450	}
1451}
1452
1453static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1454{
1455	u32 new_tg3_flags = 0;
1456	u32 old_rx_mode = tp->rx_mode;
1457	u32 old_tx_mode = tp->tx_mode;
1458
1459	if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1460
1461		/* Convert 1000BaseX flow control bits to 1000BaseT
1462		 * bits before resolving flow control.
1463		 */
1464		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
1465			local_adv &= ~(ADVERTISE_PAUSE_CAP |
1466				       ADVERTISE_PAUSE_ASYM);
1467			remote_adv &= ~(LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1468
1469			if (local_adv & ADVERTISE_1000XPAUSE)
1470				local_adv |= ADVERTISE_PAUSE_CAP;
1471			if (local_adv & ADVERTISE_1000XPSE_ASYM)
1472				local_adv |= ADVERTISE_PAUSE_ASYM;
1473			if (remote_adv & LPA_1000XPAUSE)
1474				remote_adv |= LPA_PAUSE_CAP;
1475			if (remote_adv & LPA_1000XPAUSE_ASYM)
1476				remote_adv |= LPA_PAUSE_ASYM;
1477		}
1478
1479		if (local_adv & ADVERTISE_PAUSE_CAP) {
1480			if (local_adv & ADVERTISE_PAUSE_ASYM) {
1481				if (remote_adv & LPA_PAUSE_CAP)
1482					new_tg3_flags |=
1483						(TG3_FLAG_RX_PAUSE |
1484					 	TG3_FLAG_TX_PAUSE);
1485				else if (remote_adv & LPA_PAUSE_ASYM)
1486					new_tg3_flags |=
1487						(TG3_FLAG_RX_PAUSE);
1488			} else {
1489				if (remote_adv & LPA_PAUSE_CAP)
1490					new_tg3_flags |=
1491						(TG3_FLAG_RX_PAUSE |
1492					 	TG3_FLAG_TX_PAUSE);
1493			}
1494		} else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1495			if ((remote_adv & LPA_PAUSE_CAP) &&
1496		    	(remote_adv & LPA_PAUSE_ASYM))
1497				new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1498		}
1499
1500		tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1501		tp->tg3_flags |= new_tg3_flags;
1502	} else {
1503		new_tg3_flags = tp->tg3_flags;
1504	}
1505
1506	if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1507		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1508	else
1509		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1510
1511	if (old_rx_mode != tp->rx_mode) {
1512		tw32_f(MAC_RX_MODE, tp->rx_mode);
1513	}
1514
1515	if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1516		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1517	else
1518		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1519
1520	if (old_tx_mode != tp->tx_mode) {
1521		tw32_f(MAC_TX_MODE, tp->tx_mode);
1522	}
1523}
1524
1525static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1526{
1527	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1528	case MII_TG3_AUX_STAT_10HALF:
1529		*speed = SPEED_10;
1530		*duplex = DUPLEX_HALF;
1531		break;
1532
1533	case MII_TG3_AUX_STAT_10FULL:
1534		*speed = SPEED_10;
1535		*duplex = DUPLEX_FULL;
1536		break;
1537
1538	case MII_TG3_AUX_STAT_100HALF:
1539		*speed = SPEED_100;
1540		*duplex = DUPLEX_HALF;
1541		break;
1542
1543	case MII_TG3_AUX_STAT_100FULL:
1544		*speed = SPEED_100;
1545		*duplex = DUPLEX_FULL;
1546		break;
1547
1548	case MII_TG3_AUX_STAT_1000HALF:
1549		*speed = SPEED_1000;
1550		*duplex = DUPLEX_HALF;
1551		break;
1552
1553	case MII_TG3_AUX_STAT_1000FULL:
1554		*speed = SPEED_1000;
1555		*duplex = DUPLEX_FULL;
1556		break;
1557
1558	default:
1559		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1560			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
1561				 SPEED_10;
1562			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
1563				  DUPLEX_HALF;
1564			break;
1565		}
1566		*speed = SPEED_INVALID;
1567		*duplex = DUPLEX_INVALID;
1568		break;
1569	};
1570}
1571
1572static void tg3_phy_copper_begin(struct tg3 *tp)
1573{
1574	u32 new_adv;
1575	int i;
1576
1577	if (tp->link_config.phy_is_low_power) {
1578		/* Entering low power mode.  Disable gigabit and
1579		 * 100baseT advertisements.
1580		 */
1581		tg3_writephy(tp, MII_TG3_CTRL, 0);
1582
1583		new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1584			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1585		if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1586			new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1587
1588		tg3_writephy(tp, MII_ADVERTISE, new_adv);
1589	} else if (tp->link_config.speed == SPEED_INVALID) {
1590		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1591			tp->link_config.advertising &=
1592				~(ADVERTISED_1000baseT_Half |
1593				  ADVERTISED_1000baseT_Full);
1594
1595		new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1596		if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1597			new_adv |= ADVERTISE_10HALF;
1598		if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1599			new_adv |= ADVERTISE_10FULL;
1600		if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1601			new_adv |= ADVERTISE_100HALF;
1602		if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1603			new_adv |= ADVERTISE_100FULL;
1604		tg3_writephy(tp, MII_ADVERTISE, new_adv);
1605
1606		if (tp->link_config.advertising &
1607		    (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1608			new_adv = 0;
1609			if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1610				new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1611			if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1612				new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1613			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1614			    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1615			     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1616				new_adv |= (MII_TG3_CTRL_AS_MASTER |
1617					    MII_TG3_CTRL_ENABLE_AS_MASTER);
1618			tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1619		} else {
1620			tg3_writephy(tp, MII_TG3_CTRL, 0);
1621		}
1622	} else {
1623		/* Asking for a specific link mode. */
1624		if (tp->link_config.speed == SPEED_1000) {
1625			new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1626			tg3_writephy(tp, MII_ADVERTISE, new_adv);
1627
1628			if (tp->link_config.duplex == DUPLEX_FULL)
1629				new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1630			else
1631				new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1632			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1633			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1634				new_adv |= (MII_TG3_CTRL_AS_MASTER |
1635					    MII_TG3_CTRL_ENABLE_AS_MASTER);
1636			tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1637		} else {
1638			tg3_writephy(tp, MII_TG3_CTRL, 0);
1639
1640			new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1641			if (tp->link_config.speed == SPEED_100) {
1642				if (tp->link_config.duplex == DUPLEX_FULL)
1643					new_adv |= ADVERTISE_100FULL;
1644				else
1645					new_adv |= ADVERTISE_100HALF;
1646			} else {
1647				if (tp->link_config.duplex == DUPLEX_FULL)
1648					new_adv |= ADVERTISE_10FULL;
1649				else
1650					new_adv |= ADVERTISE_10HALF;
1651			}
1652			tg3_writephy(tp, MII_ADVERTISE, new_adv);
1653		}
1654	}
1655
1656	if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1657	    tp->link_config.speed != SPEED_INVALID) {
1658		u32 bmcr, orig_bmcr;
1659
1660		tp->link_config.active_speed = tp->link_config.speed;
1661		tp->link_config.active_duplex = tp->link_config.duplex;
1662
1663		bmcr = 0;
1664		switch (tp->link_config.speed) {
1665		default:
1666		case SPEED_10:
1667			break;
1668
1669		case SPEED_100:
1670			bmcr |= BMCR_SPEED100;
1671			break;
1672
1673		case SPEED_1000:
1674			bmcr |= TG3_BMCR_SPEED1000;
1675			break;
1676		};
1677
1678		if (tp->link_config.duplex == DUPLEX_FULL)
1679			bmcr |= BMCR_FULLDPLX;
1680
1681		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1682		    (bmcr != orig_bmcr)) {
1683			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1684			for (i = 0; i < 1500; i++) {
1685				u32 tmp;
1686
1687				udelay(10);
1688				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1689				    tg3_readphy(tp, MII_BMSR, &tmp))
1690					continue;
1691				if (!(tmp & BMSR_LSTATUS)) {
1692					udelay(40);
1693					break;
1694				}
1695			}
1696			tg3_writephy(tp, MII_BMCR, bmcr);
1697			udelay(40);
1698		}
1699	} else {
1700		tg3_writephy(tp, MII_BMCR,
1701			     BMCR_ANENABLE | BMCR_ANRESTART);
1702	}
1703}
1704
1705static int tg3_init_5401phy_dsp(struct tg3 *tp)
1706{
1707	int err;
1708
1709	/* Turn off tap power management. */
1710	/* Set Extended packet length bit */
1711	err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1712
1713	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1714	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1715
1716	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1717	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1718
1719	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1720	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1721
1722	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1723	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1724
1725	err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1726	err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1727
1728	udelay(40);
1729
1730	return err;
1731}
1732
1733static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
1734{
1735	u32 adv_reg, all_mask = 0;
1736
1737	if (mask & ADVERTISED_10baseT_Half)
1738		all_mask |= ADVERTISE_10HALF;
1739	if (mask & ADVERTISED_10baseT_Full)
1740		all_mask |= ADVERTISE_10FULL;
1741	if (mask & ADVERTISED_100baseT_Half)
1742		all_mask |= ADVERTISE_100HALF;
1743	if (mask & ADVERTISED_100baseT_Full)
1744		all_mask |= ADVERTISE_100FULL;
1745
1746	if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1747		return 0;
1748
1749	if ((adv_reg & all_mask) != all_mask)
1750		return 0;
1751	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1752		u32 tg3_ctrl;
1753
1754		all_mask = 0;
1755		if (mask & ADVERTISED_1000baseT_Half)
1756			all_mask |= ADVERTISE_1000HALF;
1757		if (mask & ADVERTISED_1000baseT_Full)
1758			all_mask |= ADVERTISE_1000FULL;
1759
1760		if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1761			return 0;
1762
1763		if ((tg3_ctrl & all_mask) != all_mask)
1764			return 0;
1765	}
1766	return 1;
1767}
1768
1769static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1770{
1771	int current_link_up;
1772	u32 bmsr, dummy;
1773	u16 current_speed;
1774	u8 current_duplex;
1775	int i, err;
1776
1777	tw32(MAC_EVENT, 0);
1778
1779	tw32_f(MAC_STATUS,
1780	     (MAC_STATUS_SYNC_CHANGED |
1781	      MAC_STATUS_CFG_CHANGED |
1782	      MAC_STATUS_MI_COMPLETION |
1783	      MAC_STATUS_LNKSTATE_CHANGED));
1784	udelay(40);
1785
1786	tp->mi_mode = MAC_MI_MODE_BASE;
1787	tw32_f(MAC_MI_MODE, tp->mi_mode);
1788	udelay(80);
1789
1790	tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1791
1792	/* Some third-party PHYs need to be reset on link going
1793	 * down.
1794	 */
1795	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1796	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1797	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1798	    netif_carrier_ok(tp->dev)) {
1799		tg3_readphy(tp, MII_BMSR, &bmsr);
1800		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1801		    !(bmsr & BMSR_LSTATUS))
1802			force_reset = 1;
1803	}
1804	if (force_reset)
1805		tg3_phy_reset(tp);
1806
1807	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1808		tg3_readphy(tp, MII_BMSR, &bmsr);
1809		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1810		    !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1811			bmsr = 0;
1812
1813		if (!(bmsr & BMSR_LSTATUS)) {
1814			err = tg3_init_5401phy_dsp(tp);
1815			if (err)
1816				return err;
1817
1818			tg3_readphy(tp, MII_BMSR, &bmsr);
1819			for (i = 0; i < 1000; i++) {
1820				udelay(10);
1821				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1822				    (bmsr & BMSR_LSTATUS)) {
1823					udelay(40);
1824					break;
1825				}
1826			}
1827
1828			if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1829			    !(bmsr & BMSR_LSTATUS) &&
1830			    tp->link_config.active_speed == SPEED_1000) {
1831				err = tg3_phy_reset(tp);
1832				if (!err)
1833					err = tg3_init_5401phy_dsp(tp);
1834				if (err)
1835					return err;
1836			}
1837		}
1838	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1839		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1840		tg3_writephy(tp, 0x15, 0x0a75);
1841		tg3_writephy(tp, 0x1c, 0x8c68);
1842		tg3_writephy(tp, 0x1c, 0x8d68);
1843		tg3_writephy(tp, 0x1c, 0x8c68);
1844	}
1845
1846	/* Clear pending interrupts... */
1847	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1848	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1849
1850	if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1851		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1852	else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
1853		tg3_writephy(tp, MII_TG3_IMASK, ~0);
1854
1855	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1856	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1857		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1858			tg3_writephy(tp, MII_TG3_EXT_CTRL,
1859				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1860		else
1861			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1862	}
1863
1864	current_link_up = 0;
1865	current_speed = SPEED_INVALID;
1866	current_duplex = DUPLEX_INVALID;
1867
1868	if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1869		u32 val;
1870
1871		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1872		tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1873		if (!(val & (1 << 10))) {
1874			val |= (1 << 10);
1875			tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1876			goto relink;
1877		}
1878	}
1879
1880	bmsr = 0;
1881	for (i = 0; i < 100; i++) {
1882		tg3_readphy(tp, MII_BMSR, &bmsr);
1883		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1884		    (bmsr & BMSR_LSTATUS))
1885			break;
1886		udelay(40);
1887	}
1888
1889	if (bmsr & BMSR_LSTATUS) {
1890		u32 aux_stat, bmcr;
1891
1892		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1893		for (i = 0; i < 2000; i++) {
1894			udelay(10);
1895			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1896			    aux_stat)
1897				break;
1898		}
1899
1900		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1901					     &current_speed,
1902					     &current_duplex);
1903
1904		bmcr = 0;
1905		for (i = 0; i < 200; i++) {
1906			tg3_readphy(tp, MII_BMCR, &bmcr);
1907			if (tg3_readphy(tp, MII_BMCR, &bmcr))
1908				continue;
1909			if (bmcr && bmcr != 0x7fff)
1910				break;
1911			udelay(10);
1912		}
1913
1914		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1915			if (bmcr & BMCR_ANENABLE) {
1916				current_link_up = 1;
1917
1918				/* Force autoneg restart if we are exiting
1919				 * low power mode.
1920				 */
1921				if (!tg3_copper_is_advertising_all(tp,
1922						tp->link_config.advertising))
1923					current_link_up = 0;
1924			} else {
1925				current_link_up = 0;
1926			}
1927		} else {
1928			if (!(bmcr & BMCR_ANENABLE) &&
1929			    tp->link_config.speed == current_speed &&
1930			    tp->link_config.duplex == current_duplex) {
1931				current_link_up = 1;
1932			} else {
1933				current_link_up = 0;
1934			}
1935		}
1936
1937		tp->link_config.active_speed = current_speed;
1938		tp->link_config.active_duplex = current_duplex;
1939	}
1940
1941	if (current_link_up == 1 &&
1942	    (tp->link_config.active_duplex == DUPLEX_FULL) &&
1943	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1944		u32 local_adv, remote_adv;
1945
1946		if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1947			local_adv = 0;
1948		local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1949
1950		if (tg3_readphy(tp, MII_LPA, &remote_adv))
1951			remote_adv = 0;
1952
1953		remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1954
1955		/* If we are not advertising full pause capability,
1956		 * something is wrong.  Bring the link down and reconfigure.
1957		 */
1958		if (local_adv != ADVERTISE_PAUSE_CAP) {
1959			current_link_up = 0;
1960		} else {
1961			tg3_setup_flow_control(tp, local_adv, remote_adv);
1962		}
1963	}
1964relink:
1965	if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
1966		u32 tmp;
1967
1968		tg3_phy_copper_begin(tp);
1969
1970		tg3_readphy(tp, MII_BMSR, &tmp);
1971		if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1972		    (tmp & BMSR_LSTATUS))
1973			current_link_up = 1;
1974	}
1975
1976	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1977	if (current_link_up == 1) {
1978		if (tp->link_config.active_speed == SPEED_100 ||
1979		    tp->link_config.active_speed == SPEED_10)
1980			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1981		else
1982			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1983	} else
1984		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1985
1986	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1987	if (tp->link_config.active_duplex == DUPLEX_HALF)
1988		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1989
1990	tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1991	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1992		if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1993		    (current_link_up == 1 &&
1994		     tp->link_config.active_speed == SPEED_10))
1995			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1996	} else {
1997		if (current_link_up == 1)
1998			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1999	}
2000
2001	/* ??? Without this setting Netgear GA302T PHY does not
2002	 * ??? send/receive packets...
2003	 */
2004	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2005	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2006		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2007		tw32_f(MAC_MI_MODE, tp->mi_mode);
2008		udelay(80);
2009	}
2010
2011	tw32_f(MAC_MODE, tp->mac_mode);
2012	udelay(40);
2013
2014	if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2015		/* Polled via timer. */
2016		tw32_f(MAC_EVENT, 0);
2017	} else {
2018		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2019	}
2020	udelay(40);
2021
2022	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2023	    current_link_up == 1 &&
2024	    tp->link_config.active_speed == SPEED_1000 &&
2025	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2026	     (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2027		udelay(120);
2028		tw32_f(MAC_STATUS,
2029		     (MAC_STATUS_SYNC_CHANGED |
2030		      MAC_STATUS_CFG_CHANGED));
2031		udelay(40);
2032		tg3_write_mem(tp,
2033			      NIC_SRAM_FIRMWARE_MBOX,
2034			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2035	}
2036
2037	if (current_link_up != netif_carrier_ok(tp->dev)) {
2038		if (current_link_up)
2039			netif_carrier_on(tp->dev);
2040		else
2041			netif_carrier_off(tp->dev);
2042		tg3_link_report(tp);
2043	}
2044
2045	return 0;
2046}
2047
2048struct tg3_fiber_aneginfo {
2049	int state;
2050#define ANEG_STATE_UNKNOWN		0
2051#define ANEG_STATE_AN_ENABLE		1
2052#define ANEG_STATE_RESTART_INIT		2
2053#define ANEG_STATE_RESTART		3
2054#define ANEG_STATE_DISABLE_LINK_OK	4
2055#define ANEG_STATE_ABILITY_DETECT_INIT	5
2056#define ANEG_STATE_ABILITY_DETECT	6
2057#define ANEG_STATE_ACK_DETECT_INIT	7
2058#define ANEG_STATE_ACK_DETECT		8
2059#define ANEG_STATE_COMPLETE_ACK_INIT	9
2060#define ANEG_STATE_COMPLETE_ACK		10
2061#define ANEG_STATE_IDLE_DETECT_INIT	11
2062#define ANEG_STATE_IDLE_DETECT		12
2063#define ANEG_STATE_LINK_OK		13
2064#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
2065#define ANEG_STATE_NEXT_PAGE_WAIT	15
2066
2067	u32 flags;
2068#define MR_AN_ENABLE		0x00000001
2069#define MR_RESTART_AN		0x00000002
2070#define MR_AN_COMPLETE		0x00000004
2071#define MR_PAGE_RX		0x00000008
2072#define MR_NP_LOADED		0x00000010
2073#define MR_TOGGLE_TX		0x00000020
2074#define MR_LP_ADV_FULL_DUPLEX	0x00000040
2075#define MR_LP_ADV_HALF_DUPLEX	0x00000080
2076#define MR_LP_ADV_SYM_PAUSE	0x00000100
2077#define MR_LP_ADV_ASYM_PAUSE	0x00000200
2078#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
2079#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
2080#define MR_LP_ADV_NEXT_PAGE	0x00001000
2081#define MR_TOGGLE_RX		0x00002000
2082#define MR_NP_RX		0x00004000
2083
2084#define MR_LINK_OK		0x80000000
2085
2086	unsigned long link_time, cur_time;
2087
2088	u32 ability_match_cfg;
2089	int ability_match_count;
2090
2091	char ability_match, idle_match, ack_match;
2092
2093	u32 txconfig, rxconfig;
2094#define ANEG_CFG_NP		0x00000080
2095#define ANEG_CFG_ACK		0x00000040
2096#define ANEG_CFG_RF2		0x00000020
2097#define ANEG_CFG_RF1		0x00000010
2098#define ANEG_CFG_PS2		0x00000001
2099#define ANEG_CFG_PS1		0x00008000
2100#define ANEG_CFG_HD		0x00004000
2101#define ANEG_CFG_FD		0x00002000
2102#define ANEG_CFG_INVAL		0x00001f06
2103
2104};
2105#define ANEG_OK		0
2106#define ANEG_DONE	1
2107#define ANEG_TIMER_ENAB	2
2108#define ANEG_FAILED	-1
2109
2110#define ANEG_STATE_SETTLE_TIME	10000
2111
2112static int tg3_fiber_aneg_smachine(struct tg3 *tp,
2113				   struct tg3_fiber_aneginfo *ap)
2114{
2115	unsigned long delta;
2116	u32 rx_cfg_reg;
2117	int ret;
2118
2119	if (ap->state == ANEG_STATE_UNKNOWN) {
2120		ap->rxconfig = 0;
2121		ap->link_time = 0;
2122		ap->cur_time = 0;
2123		ap->ability_match_cfg = 0;
2124		ap->ability_match_count = 0;
2125		ap->ability_match = 0;
2126		ap->idle_match = 0;
2127		ap->ack_match = 0;
2128	}
2129	ap->cur_time++;
2130
2131	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
2132		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
2133
2134		if (rx_cfg_reg != ap->ability_match_cfg) {
2135			ap->ability_match_cfg = rx_cfg_reg;
2136			ap->ability_match = 0;
2137			ap->ability_match_count = 0;
2138		} else {
2139			if (++ap->ability_match_count > 1) {
2140				ap->ability_match = 1;
2141				ap->ability_match_cfg = rx_cfg_reg;
2142			}
2143		}
2144		if (rx_cfg_reg & ANEG_CFG_ACK)
2145			ap->ack_match = 1;
2146		else
2147			ap->ack_match = 0;
2148
2149		ap->idle_match = 0;
2150	} else {
2151		ap->idle_match = 1;
2152		ap->ability_match_cfg = 0;
2153		ap->ability_match_count = 0;
2154		ap->ability_match = 0;
2155		ap->ack_match = 0;
2156
2157		rx_cfg_reg = 0;
2158	}
2159
2160	ap->rxconfig = rx_cfg_reg;
2161	ret = ANEG_OK;
2162
2163	switch(ap->state) {
2164	case ANEG_STATE_UNKNOWN:
2165		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
2166			ap->state = ANEG_STATE_AN_ENABLE;
2167
2168		/* fallthru */
2169	case ANEG_STATE_AN_ENABLE:
2170		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
2171		if (ap->flags & MR_AN_ENABLE) {
2172			ap->link_time = 0;
2173			ap->cur_time = 0;
2174			ap->ability_match_cfg = 0;
2175			ap->ability_match_count = 0;
2176			ap->ability_match = 0;
2177			ap->idle_match = 0;
2178			ap->ack_match = 0;
2179
2180			ap->state = ANEG_STATE_RESTART_INIT;
2181		} else {
2182			ap->state = ANEG_STATE_DISABLE_LINK_OK;
2183		}
2184		break;
2185
2186	case ANEG_STATE_RESTART_INIT:
2187		ap->link_time = ap->cur_time;
2188		ap->flags &= ~(MR_NP_LOADED);
2189		ap->txconfig = 0;
2190		tw32(MAC_TX_AUTO_NEG, 0);
2191		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2192		tw32_f(MAC_MODE, tp->mac_mode);
2193		udelay(40);
2194
2195		ret = ANEG_TIMER_ENAB;
2196		ap->state = ANEG_STATE_RESTART;
2197
2198		/* fallthru */
2199	case ANEG_STATE_RESTART:
2200		delta = ap->cur_time - ap->link_time;
2201		if (delta > ANEG_STATE_SETTLE_TIME) {
2202			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
2203		} else {
2204			ret = ANEG_TIMER_ENAB;
2205		}
2206		break;
2207
2208	case ANEG_STATE_DISABLE_LINK_OK:
2209		ret = ANEG_DONE;
2210		break;
2211
2212	case ANEG_STATE_ABILITY_DETECT_INIT:
2213		ap->flags &= ~(MR_TOGGLE_TX);
2214		ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
2215		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2216		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2217		tw32_f(MAC_MODE, tp->mac_mode);
2218		udelay(40);
2219
2220		ap->state = ANEG_STATE_ABILITY_DETECT;
2221		break;
2222
2223	case ANEG_STATE_ABILITY_DETECT:
2224		if (ap->ability_match != 0 && ap->rxconfig != 0) {
2225			ap->state = ANEG_STATE_ACK_DETECT_INIT;
2226		}
2227		break;
2228
2229	case ANEG_STATE_ACK_DETECT_INIT:
2230		ap->txconfig |= ANEG_CFG_ACK;
2231		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
2232		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
2233		tw32_f(MAC_MODE, tp->mac_mode);
2234		udelay(40);
2235
2236		ap->state = ANEG_STATE_ACK_DETECT;
2237
2238		/* fallthru */
2239	case ANEG_STATE_ACK_DETECT:
2240		if (ap->ack_match != 0) {
2241			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
2242			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
2243				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
2244			} else {
2245				ap->state = ANEG_STATE_AN_ENABLE;
2246			}
2247		} else if (ap->ability_match != 0 &&
2248			   ap->rxconfig == 0) {
2249			ap->state = ANEG_STATE_AN_ENABLE;
2250		}
2251		break;
2252
2253	case ANEG_STATE_COMPLETE_ACK_INIT:
2254		if (ap->rxconfig & ANEG_CFG_INVAL) {
2255			ret = ANEG_FAILED;
2256			break;
2257		}
2258		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
2259			       MR_LP_ADV_HALF_DUPLEX |
2260			       MR_LP_ADV_SYM_PAUSE |
2261			       MR_LP_ADV_ASYM_PAUSE |
2262			       MR_LP_ADV_REMOTE_FAULT1 |
2263			       MR_LP_ADV_REMOTE_FAULT2 |
2264			       MR_LP_ADV_NEXT_PAGE |
2265			       MR_TOGGLE_RX |
2266			       MR_NP_RX);
2267		if (ap->rxconfig & ANEG_CFG_FD)
2268			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
2269		if (ap->rxconfig & ANEG_CFG_HD)
2270			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
2271		if (ap->rxconfig & ANEG_CFG_PS1)
2272			ap->flags |= MR_LP_ADV_SYM_PAUSE;
2273		if (ap->rxconfig & ANEG_CFG_PS2)
2274			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
2275		if (ap->rxconfig & ANEG_CFG_RF1)
2276			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
2277		if (ap->rxconfig & ANEG_CFG_RF2)
2278			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
2279		if (ap->rxconfig & ANEG_CFG_NP)
2280			ap->flags |= MR_LP_ADV_NEXT_PAGE;
2281
2282		ap->link_time = ap->cur_time;
2283
2284		ap->flags ^= (MR_TOGGLE_TX);
2285		if (ap->rxconfig & 0x0008)
2286			ap->flags |= MR_TOGGLE_RX;
2287		if (ap->rxconfig & ANEG_CFG_NP)
2288			ap->flags |= MR_NP_RX;
2289		ap->flags |= MR_PAGE_RX;
2290
2291		ap->state = ANEG_STATE_COMPLETE_ACK;
2292		ret = ANEG_TIMER_ENAB;
2293		break;
2294
2295	case ANEG_STATE_COMPLETE_ACK:
2296		if (ap->ability_match != 0 &&
2297		    ap->rxconfig == 0) {
2298			ap->state = ANEG_STATE_AN_ENABLE;
2299			break;
2300		}
2301		delta = ap->cur_time - ap->link_time;
2302		if (delta > ANEG_STATE_SETTLE_TIME) {
2303			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2304				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2305			} else {
2306				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2307				    !(ap->flags & MR_NP_RX)) {
2308					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2309				} else {
2310					ret = ANEG_FAILED;
2311				}
2312			}
2313		}
2314		break;
2315
2316	case ANEG_STATE_IDLE_DETECT_INIT:
2317		ap->link_time = ap->cur_time;
2318		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2319		tw32_f(MAC_MODE, tp->mac_mode);
2320		udelay(40);
2321
2322		ap->state = ANEG_STATE_IDLE_DETECT;
2323		ret = ANEG_TIMER_ENAB;
2324		break;
2325
2326	case ANEG_STATE_IDLE_DETECT:
2327		if (ap->ability_match != 0 &&
2328		    ap->rxconfig == 0) {
2329			ap->state = ANEG_STATE_AN_ENABLE;
2330			break;
2331		}
2332		delta = ap->cur_time - ap->link_time;
2333		if (delta > ANEG_STATE_SETTLE_TIME) {
2334			ap->state = ANEG_STATE_LINK_OK;
2335		}
2336		break;
2337
2338	case ANEG_STATE_LINK_OK:
2339		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2340		ret = ANEG_DONE;
2341		break;
2342
2343	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2344		/* ??? unimplemented */
2345		break;
2346
2347	case ANEG_STATE_NEXT_PAGE_WAIT:
2348		/* ??? unimplemented */
2349		break;
2350
2351	default:
2352		ret = ANEG_FAILED;
2353		break;
2354	};
2355
2356	return ret;
2357}
2358
2359static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2360{
2361	int res = 0;
2362	struct tg3_fiber_aneginfo aninfo;
2363	int status = ANEG_FAILED;
2364	unsigned int tick;
2365	u32 tmp;
2366
2367	tw32_f(MAC_TX_AUTO_NEG, 0);
2368
2369	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2370	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2371	udelay(40);
2372
2373	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2374	udelay(40);
2375
2376	memset(&aninfo, 0, sizeof(aninfo));
2377	aninfo.flags |= MR_AN_ENABLE;
2378	aninfo.state = ANEG_STATE_UNKNOWN;
2379	aninfo.cur_time = 0;
2380	tick = 0;
2381	while (++tick < 195000) {
2382		status = tg3_fiber_aneg_smachine(tp, &aninfo);
2383		if (status == ANEG_DONE || status == ANEG_FAILED)
2384			break;
2385
2386		udelay(1);
2387	}
2388
2389	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2390	tw32_f(MAC_MODE, tp->mac_mode);
2391	udelay(40);
2392
2393	*flags = aninfo.flags;
2394
2395	if (status == ANEG_DONE &&
2396	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2397			     MR_LP_ADV_FULL_DUPLEX)))
2398		res = 1;
2399
2400	return res;
2401}
2402
2403static void tg3_init_bcm8002(struct tg3 *tp)
2404{
2405	u32 mac_status = tr32(MAC_STATUS);
2406	int i;
2407
2408	/* Reset when initting first time or we have a link. */
2409	if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2410	    !(mac_status & MAC_STATUS_PCS_SYNCED))
2411		return;
2412
2413	/* Set PLL lock range. */
2414	tg3_writephy(tp, 0x16, 0x8007);
2415
2416	/* SW reset */
2417	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2418
2419	/* Wait for reset to complete. */
2420	for (i = 0; i < 500; i++)
2421		udelay(10);
2422
2423	/* Config mode; select PMA/Ch 1 regs. */
2424	tg3_writephy(tp, 0x10, 0x8411);
2425
2426	/* Enable auto-lock and comdet, select txclk for tx. */
2427	tg3_writephy(tp, 0x11, 0x0a10);
2428
2429	tg3_writephy(tp, 0x18, 0x00a0);
2430	tg3_writephy(tp, 0x16, 0x41ff);
2431
2432	/* Assert and deassert POR. */
2433	tg3_writephy(tp, 0x13, 0x0400);
2434	udelay(40);
2435	tg3_writephy(tp, 0x13, 0x0000);
2436
2437	tg3_writephy(tp, 0x11, 0x0a50);
2438	udelay(40);
2439	tg3_writephy(tp, 0x11, 0x0a10);
2440
2441	/* Wait for signal to stabilize */
2442	for (i = 0; i < 15000; i++)
2443		udelay(10);
2444
2445	/* Deselect the channel register so we can read the PHYID
2446	 * later.
2447	 */
2448	tg3_writephy(tp, 0x10, 0x8011);
2449}
2450
2451static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2452{
2453	u32 sg_dig_ctrl, sg_dig_status;
2454	u32 serdes_cfg, expected_sg_dig_ctrl;
2455	int workaround, port_a;
2456	int current_link_up;
2457
2458	serdes_cfg = 0;
2459	expected_sg_dig_ctrl = 0;
2460	workaround = 0;
2461	port_a = 1;
2462	current_link_up = 0;
2463
2464	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2465	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2466		workaround = 1;
2467		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2468			port_a = 0;
2469
2470		/* preserve bits 0-11,13,14 for signal pre-emphasis */
2471		/* preserve bits 20-23 for voltage regulator */
2472		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2473	}
2474
2475	sg_dig_ctrl = tr32(SG_DIG_CTRL);
2476
2477	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2478		if (sg_dig_ctrl & (1 << 31)) {
2479			if (workaround) {
2480				u32 val = serdes_cfg;
2481
2482				if (port_a)
2483					val |= 0xc010000;
2484				else
2485					val |= 0x4010000;
2486				tw32_f(MAC_SERDES_CFG, val);
2487			}
2488			tw32_f(SG_DIG_CTRL, 0x01388400);
2489		}
2490		if (mac_status & MAC_STATUS_PCS_SYNCED) {
2491			tg3_setup_flow_control(tp, 0, 0);
2492			current_link_up = 1;
2493		}
2494		goto out;
2495	}
2496
2497	/* Want auto-negotiation.  */
2498	expected_sg_dig_ctrl = 0x81388400;
2499
2500	/* Pause capability */
2501	expected_sg_dig_ctrl |= (1 << 11);
2502
2503	/* Asymettric pause */
2504	expected_sg_dig_ctrl |= (1 << 12);
2505
2506	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2507		if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
2508		    tp->serdes_counter &&
2509		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
2510				    MAC_STATUS_RCVD_CFG)) ==
2511		     MAC_STATUS_PCS_SYNCED)) {
2512			tp->serdes_counter--;
2513			current_link_up = 1;
2514			goto out;
2515		}
2516restart_autoneg:
2517		if (workaround)
2518			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2519		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2520		udelay(5);
2521		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2522
2523		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2524		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2525	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2526				 MAC_STATUS_SIGNAL_DET)) {
2527		sg_dig_status = tr32(SG_DIG_STATUS);
2528		mac_status = tr32(MAC_STATUS);
2529
2530		if ((sg_dig_status & (1 << 1)) &&
2531		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
2532			u32 local_adv, remote_adv;
2533
2534			local_adv = ADVERTISE_PAUSE_CAP;
2535			remote_adv = 0;
2536			if (sg_dig_status & (1 << 19))
2537				remote_adv |= LPA_PAUSE_CAP;
2538			if (sg_dig_status & (1 << 20))
2539				remote_adv |= LPA_PAUSE_ASYM;
2540
2541			tg3_setup_flow_control(tp, local_adv, remote_adv);
2542			current_link_up = 1;
2543			tp->serdes_counter = 0;
2544			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2545		} else if (!(sg_dig_status & (1 << 1))) {
2546			if (tp->serdes_counter)
2547				tp->serdes_counter--;
2548			else {
2549				if (workaround) {
2550					u32 val = serdes_cfg;
2551
2552					if (port_a)
2553						val |= 0xc010000;
2554					else
2555						val |= 0x4010000;
2556
2557					tw32_f(MAC_SERDES_CFG, val);
2558				}
2559
2560				tw32_f(SG_DIG_CTRL, 0x01388400);
2561				udelay(40);
2562
2563				/* Link parallel detection - link is up */
2564				/* only if we have PCS_SYNC and not */
2565				/* receiving config code words */
2566				mac_status = tr32(MAC_STATUS);
2567				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2568				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
2569					tg3_setup_flow_control(tp, 0, 0);
2570					current_link_up = 1;
2571					tp->tg3_flags2 |=
2572						TG3_FLG2_PARALLEL_DETECT;
2573					tp->serdes_counter =
2574						SERDES_PARALLEL_DET_TIMEOUT;
2575				} else
2576					goto restart_autoneg;
2577			}
2578		}
2579	} else {
2580		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
2581		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2582	}
2583
2584out:
2585	return current_link_up;
2586}
2587
2588static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2589{
2590	int current_link_up = 0;
2591
2592	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
2593		goto out;
2594
2595	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2596		u32 flags;
2597		int i;
2598
2599		if (fiber_autoneg(tp, &flags)) {
2600			u32 local_adv, remote_adv;
2601
2602			local_adv = ADVERTISE_PAUSE_CAP;
2603			remote_adv = 0;
2604			if (flags & MR_LP_ADV_SYM_PAUSE)
2605				remote_adv |= LPA_PAUSE_CAP;
2606			if (flags & MR_LP_ADV_ASYM_PAUSE)
2607				remote_adv |= LPA_PAUSE_ASYM;
2608
2609			tg3_setup_flow_control(tp, local_adv, remote_adv);
2610
2611			current_link_up = 1;
2612		}
2613		for (i = 0; i < 30; i++) {
2614			udelay(20);
2615			tw32_f(MAC_STATUS,
2616			       (MAC_STATUS_SYNC_CHANGED |
2617				MAC_STATUS_CFG_CHANGED));
2618			udelay(40);
2619			if ((tr32(MAC_STATUS) &
2620			     (MAC_STATUS_SYNC_CHANGED |
2621			      MAC_STATUS_CFG_CHANGED)) == 0)
2622				break;
2623		}
2624
2625		mac_status = tr32(MAC_STATUS);
2626		if (current_link_up == 0 &&
2627		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
2628		    !(mac_status & MAC_STATUS_RCVD_CFG))
2629			current_link_up = 1;
2630	} else {
2631		/* Forcing 1000FD link up. */
2632		current_link_up = 1;
2633
2634		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2635		udelay(40);
2636	}
2637
2638out:
2639	return current_link_up;
2640}
2641
2642static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2643{
2644	u32 orig_pause_cfg;
2645	u16 orig_active_speed;
2646	u8 orig_active_duplex;
2647	u32 mac_status;
2648	int current_link_up;
2649	int i;
2650
2651	orig_pause_cfg =
2652		(tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2653				  TG3_FLAG_TX_PAUSE));
2654	orig_active_speed = tp->link_config.active_speed;
2655	orig_active_duplex = tp->link_config.active_duplex;
2656
2657	if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2658	    netif_carrier_ok(tp->dev) &&
2659	    (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2660		mac_status = tr32(MAC_STATUS);
2661		mac_status &= (MAC_STATUS_PCS_SYNCED |
2662			       MAC_STATUS_SIGNAL_DET |
2663			       MAC_STATUS_CFG_CHANGED |
2664			       MAC_STATUS_RCVD_CFG);
2665		if (mac_status == (MAC_STATUS_PCS_SYNCED |
2666				   MAC_STATUS_SIGNAL_DET)) {
2667			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2668					    MAC_STATUS_CFG_CHANGED));
2669			return 0;
2670		}
2671	}
2672
2673	tw32_f(MAC_TX_AUTO_NEG, 0);
2674
2675	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2676	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2677	tw32_f(MAC_MODE, tp->mac_mode);
2678	udelay(40);
2679
2680	if (tp->phy_id == PHY_ID_BCM8002)
2681		tg3_init_bcm8002(tp);
2682
2683	/* Enable link change event even when serdes polling.  */
2684	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2685	udelay(40);
2686
2687	current_link_up = 0;
2688	mac_status = tr32(MAC_STATUS);
2689
2690	if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2691		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2692	else
2693		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2694
2695	tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2696	tw32_f(MAC_MODE, tp->mac_mode);
2697	udelay(40);
2698
2699	tp->hw_status->status =
2700		(SD_STATUS_UPDATED |
2701		 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2702
2703	for (i = 0; i < 100; i++) {
2704		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2705				    MAC_STATUS_CFG_CHANGED));
2706		udelay(5);
2707		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2708					 MAC_STATUS_CFG_CHANGED |
2709					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
2710			break;
2711	}
2712
2713	mac_status = tr32(MAC_STATUS);
2714	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2715		current_link_up = 0;
2716		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2717		    tp->serdes_counter == 0) {
2718			tw32_f(MAC_MODE, (tp->mac_mode |
2719					  MAC_MODE_SEND_CONFIGS));
2720			udelay(1);
2721			tw32_f(MAC_MODE, tp->mac_mode);
2722		}
2723	}
2724
2725	if (current_link_up == 1) {
2726		tp->link_config.active_speed = SPEED_1000;
2727		tp->link_config.active_duplex = DUPLEX_FULL;
2728		tw32(MAC_LED_CTRL, (tp->led_ctrl |
2729				    LED_CTRL_LNKLED_OVERRIDE |
2730				    LED_CTRL_1000MBPS_ON));
2731	} else {
2732		tp->link_config.active_speed = SPEED_INVALID;
2733		tp->link_config.active_duplex = DUPLEX_INVALID;
2734		tw32(MAC_LED_CTRL, (tp->led_ctrl |
2735				    LED_CTRL_LNKLED_OVERRIDE |
2736				    LED_CTRL_TRAFFIC_OVERRIDE));
2737	}
2738
2739	if (current_link_up != netif_carrier_ok(tp->dev)) {
2740		if (current_link_up)
2741			netif_carrier_on(tp->dev);
2742		else
2743			netif_carrier_off(tp->dev);
2744		tg3_link_report(tp);
2745	} else {
2746		u32 now_pause_cfg =
2747			tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2748					 TG3_FLAG_TX_PAUSE);
2749		if (orig_pause_cfg != now_pause_cfg ||
2750		    orig_active_speed != tp->link_config.active_speed ||
2751		    orig_active_duplex != tp->link_config.active_duplex)
2752			tg3_link_report(tp);
2753	}
2754
2755	return 0;
2756}
2757
2758static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
2759{
2760	int current_link_up, err = 0;
2761	u32 bmsr, bmcr;
2762	u16 current_speed;
2763	u8 current_duplex;
2764
2765	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2766	tw32_f(MAC_MODE, tp->mac_mode);
2767	udelay(40);
2768
2769	tw32(MAC_EVENT, 0);
2770
2771	tw32_f(MAC_STATUS,
2772	     (MAC_STATUS_SYNC_CHANGED |
2773	      MAC_STATUS_CFG_CHANGED |
2774	      MAC_STATUS_MI_COMPLETION |
2775	      MAC_STATUS_LNKSTATE_CHANGED));
2776	udelay(40);
2777
2778	if (force_reset)
2779		tg3_phy_reset(tp);
2780
2781	current_link_up = 0;
2782	current_speed = SPEED_INVALID;
2783	current_duplex = DUPLEX_INVALID;
2784
2785	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2786	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2787	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2788		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2789			bmsr |= BMSR_LSTATUS;
2790		else
2791			bmsr &= ~BMSR_LSTATUS;
2792	}
2793
2794	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
2795
2796	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
2797	    (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2798		/* do nothing, just check for link up at the end */
2799	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2800		u32 adv, new_adv;
2801
2802		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2803		new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
2804				  ADVERTISE_1000XPAUSE |
2805				  ADVERTISE_1000XPSE_ASYM |
2806				  ADVERTISE_SLCT);
2807
2808		/* Always advertise symmetric PAUSE just like copper */
2809		new_adv |= ADVERTISE_1000XPAUSE;
2810
2811		if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2812			new_adv |= ADVERTISE_1000XHALF;
2813		if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2814			new_adv |= ADVERTISE_1000XFULL;
2815
2816		if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
2817			tg3_writephy(tp, MII_ADVERTISE, new_adv);
2818			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
2819			tg3_writephy(tp, MII_BMCR, bmcr);
2820
2821			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2822			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
2823			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2824
2825			return err;
2826		}
2827	} else {
2828		u32 new_bmcr;
2829
2830		bmcr &= ~BMCR_SPEED1000;
2831		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
2832
2833		if (tp->link_config.duplex == DUPLEX_FULL)
2834			new_bmcr |= BMCR_FULLDPLX;
2835
2836		if (new_bmcr != bmcr) {
2837			/* BMCR_SPEED1000 is a reserved bit that needs
2838			 * to be set on write.
2839			 */
2840			new_bmcr |= BMCR_SPEED1000;
2841
2842			/* Force a linkdown */
2843			if (netif_carrier_ok(tp->dev)) {
2844				u32 adv;
2845
2846				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
2847				adv &= ~(ADVERTISE_1000XFULL |
2848					 ADVERTISE_1000XHALF |
2849					 ADVERTISE_SLCT);
2850				tg3_writephy(tp, MII_ADVERTISE, adv);
2851				tg3_writephy(tp, MII_BMCR, bmcr |
2852							   BMCR_ANRESTART |
2853							   BMCR_ANENABLE);
2854				udelay(10);
2855				netif_carrier_off(tp->dev);
2856			}
2857			tg3_writephy(tp, MII_BMCR, new_bmcr);
2858			bmcr = new_bmcr;
2859			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2860			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
2861			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2862			    ASIC_REV_5714) {
2863				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
2864					bmsr |= BMSR_LSTATUS;
2865				else
2866					bmsr &= ~BMSR_LSTATUS;
2867			}
2868			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2869		}
2870	}
2871
2872	if (bmsr & BMSR_LSTATUS) {
2873		current_speed = SPEED_1000;
2874		current_link_up = 1;
2875		if (bmcr & BMCR_FULLDPLX)
2876			current_duplex = DUPLEX_FULL;
2877		else
2878			current_duplex = DUPLEX_HALF;
2879
2880		if (bmcr & BMCR_ANENABLE) {
2881			u32 local_adv, remote_adv, common;
2882
2883			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
2884			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
2885			common = local_adv & remote_adv;
2886			if (common & (ADVERTISE_1000XHALF |
2887				      ADVERTISE_1000XFULL)) {
2888				if (common & ADVERTISE_1000XFULL)
2889					current_duplex = DUPLEX_FULL;
2890				else
2891					current_duplex = DUPLEX_HALF;
2892
2893				tg3_setup_flow_control(tp, local_adv,
2894						       remote_adv);
2895			}
2896			else
2897				current_link_up = 0;
2898		}
2899	}
2900
2901	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2902	if (tp->link_config.active_duplex == DUPLEX_HALF)
2903		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2904
2905	tw32_f(MAC_MODE, tp->mac_mode);
2906	udelay(40);
2907
2908	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2909
2910	tp->link_config.active_speed = current_speed;
2911	tp->link_config.active_duplex = current_duplex;
2912
2913	if (current_link_up != netif_carrier_ok(tp->dev)) {
2914		if (current_link_up)
2915			netif_carrier_on(tp->dev);
2916		else {
2917			netif_carrier_off(tp->dev);
2918			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2919		}
2920		tg3_link_report(tp);
2921	}
2922	return err;
2923}
2924
2925static void tg3_serdes_parallel_detect(struct tg3 *tp)
2926{
2927	if (tp->serdes_counter) {
2928		/* Give autoneg time to complete. */
2929		tp->serdes_counter--;
2930		return;
2931	}
2932	if (!netif_carrier_ok(tp->dev) &&
2933	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
2934		u32 bmcr;
2935
2936		tg3_readphy(tp, MII_BMCR, &bmcr);
2937		if (bmcr & BMCR_ANENABLE) {
2938			u32 phy1, phy2;
2939
2940			/* Select shadow register 0x1f */
2941			tg3_writephy(tp, 0x1c, 0x7c00);
2942			tg3_readphy(tp, 0x1c, &phy1);
2943
2944			/* Select expansion interrupt status register */
2945			tg3_writephy(tp, 0x17, 0x0f01);
2946			tg3_readphy(tp, 0x15, &phy2);
2947			tg3_readphy(tp, 0x15, &phy2);
2948
2949			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
2950				/* We have signal detect and not receiving
2951				 * config code words, link is up by parallel
2952				 * detection.
2953				 */
2954
2955				bmcr &= ~BMCR_ANENABLE;
2956				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
2957				tg3_writephy(tp, MII_BMCR, bmcr);
2958				tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
2959			}
2960		}
2961	}
2962	else if (netif_carrier_ok(tp->dev) &&
2963		 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
2964		 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
2965		u32 phy2;
2966
2967		/* Select expansion interrupt status register */
2968		tg3_writephy(tp, 0x17, 0x0f01);
2969		tg3_readphy(tp, 0x15, &phy2);
2970		if (phy2 & 0x20) {
2971			u32 bmcr;
2972
2973			/* Config code words received, turn on autoneg. */
2974			tg3_readphy(tp, MII_BMCR, &bmcr);
2975			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
2976
2977			tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
2978
2979		}
2980	}
2981}
2982
2983static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2984{
2985	int err;
2986
2987	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2988		err = tg3_setup_fiber_phy(tp, force_reset);
2989	} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
2990		err = tg3_setup_fiber_mii_phy(tp, force_reset);
2991	} else {
2992		err = tg3_setup_copper_phy(tp, force_reset);
2993	}
2994
2995	if (tp->link_config.active_speed == SPEED_1000 &&
2996	    tp->link_config.active_duplex == DUPLEX_HALF)
2997		tw32(MAC_TX_LENGTHS,
2998		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2999		      (6 << TX_LENGTHS_IPG_SHIFT) |
3000		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3001	else
3002		tw32(MAC_TX_LENGTHS,
3003		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3004		      (6 << TX_LENGTHS_IPG_SHIFT) |
3005		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3006
3007	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3008		if (netif_carrier_ok(tp->dev)) {
3009			tw32(HOSTCC_STAT_COAL_TICKS,
3010			     tp->coal.stats_block_coalesce_usecs);
3011		} else {
3012			tw32(HOSTCC_STAT_COAL_TICKS, 0);
3013		}
3014	}
3015
3016	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3017		u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3018		if (!netif_carrier_ok(tp->dev))
3019			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3020			      tp->pwrmgmt_thresh;
3021		else
3022			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3023		tw32(PCIE_PWR_MGMT_THRESH, val);
3024	}
3025
3026	return err;
3027}
3028
3029/* This is called whenever we suspect that the system chipset is re-
3030 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3031 * is bogus tx completions. We try to recover by setting the
3032 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3033 * in the workqueue.
3034 */
3035static void tg3_tx_recover(struct tg3 *tp)
3036{
3037	BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3038	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
3039
3040	printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3041	       "mapped I/O cycles to the network device, attempting to "
3042	       "recover. Please report the problem to the driver maintainer "
3043	       "and include system chipset information.\n", tp->dev->name);
3044
3045	spin_lock(&tp->lock);
3046	tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3047	spin_unlock(&tp->lock);
3048}
3049
3050static inline u32 tg3_tx_avail(struct tg3 *tp)
3051{
3052	smp_mb();
3053	return (tp->tx_pending -
3054		((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3055}
3056
3057/* Tigon3 never reports partial packet sends.  So we do not
3058 * need special logic to handle SKBs that have not had all
3059 * of their frags sent yet, like SunGEM does.
3060 */
3061static void tg3_tx(struct tg3 *tp)
3062{
3063	u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3064	u32 sw_idx = tp->tx_cons;
3065
3066	while (sw_idx != hw_idx) {
3067		struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3068		struct sk_buff *skb = ri->skb;
3069		int i, tx_bug = 0;
3070
3071		if (unlikely(skb == NULL)) {
3072			tg3_tx_recover(tp);
3073			return;
3074		}
3075
3076		pci_unmap_single(tp->pdev,
3077				 pci_unmap_addr(ri, mapping),
3078				 skb_headlen(skb),
3079				 PCI_DMA_TODEVICE);
3080
3081		ri->skb = NULL;
3082
3083		sw_idx = NEXT_TX(sw_idx);
3084
3085		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3086			ri = &tp->tx_buffers[sw_idx];
3087			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
3088				tx_bug = 1;
3089
3090			pci_unmap_page(tp->pdev,
3091				       pci_unmap_addr(ri, mapping),
3092				       skb_shinfo(skb)->frags[i].size,
3093				       PCI_DMA_TODEVICE);
3094
3095			sw_idx = NEXT_TX(sw_idx);
3096		}
3097
3098		dev_kfree_skb(skb);
3099
3100		if (unlikely(tx_bug)) {
3101			tg3_tx_recover(tp);
3102			return;
3103		}
3104	}
3105
3106	tp->tx_cons = sw_idx;
3107
3108	/* Need to make the tx_cons update visible to tg3_start_xmit()
3109	 * before checking for netif_queue_stopped().  Without the
3110	 * memory barrier, there is a small possibility that tg3_start_xmit()
3111	 * will miss it and cause the queue to be stopped forever.
3112	 */
3113	smp_mb();
3114
3115	if (unlikely(netif_queue_stopped(tp->dev) &&
3116		     (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
3117		netif_tx_lock(tp->dev);
3118		if (netif_queue_stopped(tp->dev) &&
3119		    (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
3120			netif_wake_queue(tp->dev);
3121		netif_tx_unlock(tp->dev);
3122	}
3123}
3124
3125/* Returns size of skb allocated or < 0 on error.
3126 *
3127 * We only need to fill in the address because the other members
3128 * of the RX descriptor are invariant, see tg3_init_rings.
3129 *
3130 * Note the purposeful assymetry of cpu vs. chip accesses.  For
3131 * posting buffers we only dirty the first cache line of the RX
3132 * descriptor (containing the address).  Whereas for the RX status
3133 * buffers the cpu only reads the last cacheline of the RX descriptor
3134 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
3135 */
3136static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
3137			    int src_idx, u32 dest_idx_unmasked)
3138{
3139	struct tg3_rx_buffer_desc *desc;
3140	struct ring_info *map, *src_map;
3141	struct sk_buff *skb;
3142	dma_addr_t mapping;
3143	int skb_size, dest_idx;
3144
3145	src_map = NULL;
3146	switch (opaque_key) {
3147	case RXD_OPAQUE_RING_STD:
3148		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3149		desc = &tp->rx_std[dest_idx];
3150		map = &tp->rx_std_buffers[dest_idx];
3151		if (src_idx >= 0)
3152			src_map = &tp->rx_std_buffers[src_idx];
3153		skb_size = tp->rx_pkt_buf_sz;
3154		break;
3155
3156	case RXD_OPAQUE_RING_JUMBO:
3157		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3158		desc = &tp->rx_jumbo[dest_idx];
3159		map = &tp->rx_jumbo_buffers[dest_idx];
3160		if (src_idx >= 0)
3161			src_map = &tp->rx_jumbo_buffers[src_idx];
3162		skb_size = RX_JUMBO_PKT_BUF_SZ;
3163		break;
3164
3165	default:
3166		return -EINVAL;
3167	};
3168
3169	/* Do not overwrite any of the map or rp information
3170	 * until we are sure we can commit to a new buffer.
3171	 *
3172	 * Callers depend upon this behavior and assume that
3173	 * we leave everything unchanged if we fail.
3174	 */
3175	skb = netdev_alloc_skb(tp->dev, skb_size);
3176	if (skb == NULL)
3177		return -ENOMEM;
3178
3179	skb_reserve(skb, tp->rx_offset);
3180
3181	mapping = pci_map_single(tp->pdev, skb->data,
3182				 skb_size - tp->rx_offset,
3183				 PCI_DMA_FROMDEVICE);
3184
3185	map->skb = skb;
3186	pci_unmap_addr_set(map, mapping, mapping);
3187
3188	if (src_map != NULL)
3189		src_map->skb = NULL;
3190
3191	desc->addr_hi = ((u64)mapping >> 32);
3192	desc->addr_lo = ((u64)mapping & 0xffffffff);
3193
3194	return skb_size;
3195}
3196
3197/* We only need to move over in the address because the other
3198 * members of the RX descriptor are invariant.  See notes above
3199 * tg3_alloc_rx_skb for full details.
3200 */
3201static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
3202			   int src_idx, u32 dest_idx_unmasked)
3203{
3204	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
3205	struct ring_info *src_map, *dest_map;
3206	int dest_idx;
3207
3208	switch (opaque_key) {
3209	case RXD_OPAQUE_RING_STD:
3210		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
3211		dest_desc = &tp->rx_std[dest_idx];
3212		dest_map = &tp->rx_std_buffers[dest_idx];
3213		src_desc = &tp->rx_std[src_idx];
3214		src_map = &tp->rx_std_buffers[src_idx];
3215		break;
3216
3217	case RXD_OPAQUE_RING_JUMBO:
3218		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
3219		dest_desc = &tp->rx_jumbo[dest_idx];
3220		dest_map = &tp->rx_jumbo_buffers[dest_idx];
3221		src_desc = &tp->rx_jumbo[src_idx];
3222		src_map = &tp->rx_jumbo_buffers[src_idx];
3223		break;
3224
3225	default:
3226		return;
3227	};
3228
3229	dest_map->skb = src_map->skb;
3230	pci_unmap_addr_set(dest_map, mapping,
3231			   pci_unmap_addr(src_map, mapping));
3232	dest_desc->addr_hi = src_desc->addr_hi;
3233	dest_desc->addr_lo = src_desc->addr_lo;
3234
3235	src_map->skb = NULL;
3236}
3237
3238#if TG3_VLAN_TAG_USED
3239static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
3240{
3241	return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
3242}
3243#endif
3244
3245/* The RX ring scheme is composed of multiple rings which post fresh
3246 * buffers to the chip, and one special ring the chip uses to report
3247 * status back to the host.
3248 *
3249 * The special ring reports the status of received packets to the
3250 * host.  The chip does not write into the original descriptor the
3251 * RX buffer was obtained from.  The chip simply takes the original
3252 * descriptor as provided by the host, updates the status and length
3253 * field, then writes this into the next status ring entry.
3254 *
3255 * Each ring the host uses to post buffers to the chip is described
3256 * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
3257 * it is first placed into the on-chip ram.  When the packet's length
3258 * is known, it walks down the TG3_BDINFO entries to select the ring.
3259 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
3260 * which is within the range of the new packet's length is chosen.
3261 *
3262 * The "separate ring for rx status" scheme may sound queer, but it makes
3263 * sense from a cache coherency perspective.  If only the host writes
3264 * to the buffer post rings, and only the chip writes to the rx status
3265 * rings, then cache lines never move beyond shared-modified state.
3266 * If both the host and chip were to write into the same ring, cache line
3267 * eviction could occur since both entities want it in an exclusive state.
3268 */
3269static int tg3_rx(struct tg3 *tp, int budget)
3270{
3271	u32 work_mask, rx_std_posted = 0;
3272	u32 sw_idx = tp->rx_rcb_ptr;
3273	u16 hw_idx;
3274	int received;
3275
3276	hw_idx = tp->hw_status->idx[0].rx_producer;
3277	/*
3278	 * We need to order the read of hw_idx and the read of
3279	 * the opaque cookie.
3280	 */
3281	rmb();
3282	work_mask = 0;
3283	received = 0;
3284	while (sw_idx != hw_idx && budget > 0) {
3285		struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
3286		unsigned int len;
3287		struct sk_buff *skb;
3288		dma_addr_t dma_addr;
3289		u32 opaque_key, desc_idx, *post_ptr;
3290
3291		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
3292		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
3293		if (opaque_key == RXD_OPAQUE_RING_STD) {
3294			dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
3295						  mapping);
3296			skb = tp->rx_std_buffers[desc_idx].skb;
3297			post_ptr = &tp->rx_std_ptr;
3298			rx_std_posted++;
3299		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
3300			dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
3301						  mapping);
3302			skb = tp->rx_jumbo_buffers[desc_idx].skb;
3303			post_ptr = &tp->rx_jumbo_ptr;
3304		}
3305		else {
3306			goto next_pkt_nopost;
3307		}
3308
3309		work_mask |= opaque_key;
3310
3311		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
3312		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
3313		drop_it:
3314			tg3_recycle_rx(tp, opaque_key,
3315				       desc_idx, *post_ptr);
3316		drop_it_no_recycle:
3317			/* Other statistics kept track of by card. */
3318			tp->net_stats.rx_dropped++;
3319			goto next_pkt;
3320		}
3321
3322		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
3323
3324		if (len > RX_COPY_THRESHOLD
3325			&& tp->rx_offset == 2
3326			/* rx_offset != 2 iff this is a 5701 card running
3327			 * in PCI-X mode [see tg3_get_invariants()] */
3328		) {
3329			int skb_size;
3330
3331			skb_size = tg3_alloc_rx_skb(tp, opaque_key,
3332						    desc_idx, *post_ptr);
3333			if (skb_size < 0)
3334				goto drop_it;
3335
3336			pci_unmap_single(tp->pdev, dma_addr,
3337					 skb_size - tp->rx_offset,
3338					 PCI_DMA_FROMDEVICE);
3339
3340			skb_put(skb, len);
3341		} else {
3342			struct sk_buff *copy_skb;
3343
3344			tg3_recycle_rx(tp, opaque_key,
3345				       desc_idx, *post_ptr);
3346
3347			copy_skb = netdev_alloc_skb(tp->dev, len + 2);
3348			if (copy_skb == NULL)
3349				goto drop_it_no_recycle;
3350
3351			skb_reserve(copy_skb, 2);
3352			skb_put(copy_skb, len);
3353			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3354			skb_copy_from_linear_data(skb, copy_skb->data, len);
3355			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
3356
3357			/* We'll reuse the original ring buffer. */
3358			skb = copy_skb;
3359		}
3360
3361		if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
3362		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
3363		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
3364		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
3365			skb->ip_summed = CHECKSUM_UNNECESSARY;
3366		else
3367			skb->ip_summed = CHECKSUM_NONE;
3368
3369		skb->protocol = eth_type_trans(skb, tp->dev);
3370#if TG3_VLAN_TAG_USED
3371		if (tp->vlgrp != NULL &&
3372		    desc->type_flags & RXD_FLAG_VLAN) {
3373			tg3_vlan_rx(tp, skb,
3374				    desc->err_vlan & RXD_VLAN_MASK);
3375		} else
3376#endif
3377			netif_receive_skb(skb);
3378
3379		tp->dev->last_rx = jiffies;
3380		received++;
3381		budget--;
3382
3383next_pkt:
3384		(*post_ptr)++;
3385
3386		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
3387			u32 idx = *post_ptr % TG3_RX_RING_SIZE;
3388
3389			tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
3390				     TG3_64BIT_REG_LOW, idx);
3391			work_mask &= ~RXD_OPAQUE_RING_STD;
3392			rx_std_posted = 0;
3393		}
3394next_pkt_nopost:
3395		sw_idx++;
3396		sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
3397
3398		/* Refresh hw_idx to see if there is new work */
3399		if (sw_idx == hw_idx) {
3400			hw_idx = tp->hw_status->idx[0].rx_producer;
3401			rmb();
3402		}
3403	}
3404
3405	/* ACK the status ring. */
3406	tp->rx_rcb_ptr = sw_idx;
3407	tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
3408
3409	/* Refill RX ring(s). */
3410	if (work_mask & RXD_OPAQUE_RING_STD) {
3411		sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
3412		tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
3413			     sw_idx);
3414	}
3415	if (work_mask & RXD_OPAQUE_RING_JUMBO) {
3416		sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
3417		tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
3418			     sw_idx);
3419	}
3420	mmiowb();
3421
3422	return received;
3423}
3424
3425static int tg3_poll(struct net_device *netdev, int *budget)
3426{
3427	struct tg3 *tp = netdev_priv(netdev);
3428	struct tg3_hw_status *sblk = tp->hw_status;
3429	int done;
3430
3431	/* handle link change and other phy events */
3432	if (!(tp->tg3_flags &
3433	      (TG3_FLAG_USE_LINKCHG_REG |
3434	       TG3_FLAG_POLL_SERDES))) {
3435		if (sblk->status & SD_STATUS_LINK_CHG) {
3436			sblk->status = SD_STATUS_UPDATED |
3437				(sblk->status & ~SD_STATUS_LINK_CHG);
3438			spin_lock(&tp->lock);
3439			tg3_setup_phy(tp, 0);
3440			spin_unlock(&tp->lock);
3441		}
3442	}
3443
3444	/* run TX completion thread */
3445	if (sblk->idx[0].tx_consumer != tp->tx_cons) {
3446		tg3_tx(tp);
3447		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) {
3448			netif_rx_complete(netdev);
3449			schedule_work(&tp->reset_task);
3450			return 0;
3451		}
3452	}
3453
3454	/* run RX thread, within the bounds set by NAPI.
3455	 * All RX "locking" is done by ensuring outside
3456	 * code synchronizes with dev->poll()
3457	 */
3458	if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
3459		int orig_budget = *budget;
3460		int work_done;
3461
3462		if (orig_budget > netdev->quota)
3463			orig_budget = netdev->quota;
3464
3465		work_done = tg3_rx(tp, orig_budget);
3466
3467		*budget -= work_done;
3468		netdev->quota -= work_done;
3469	}
3470
3471	if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
3472		tp->last_tag = sblk->status_tag;
3473		rmb();
3474	} else
3475		sblk->status &= ~SD_STATUS_UPDATED;
3476
3477	/* if no more work, tell net stack and NIC we're done */
3478	done = !tg3_has_work(tp);
3479	if (done) {
3480		netif_rx_complete(netdev);
3481		tg3_restart_ints(tp);
3482	}
3483
3484	return (done ? 0 : 1);
3485}
3486
3487static void tg3_irq_quiesce(struct tg3 *tp)
3488{
3489	BUG_ON(tp->irq_sync);
3490
3491	tp->irq_sync = 1;
3492	smp_mb();
3493
3494	synchronize_irq(tp->pdev->irq);
3495}
3496
3497static inline int tg3_irq_sync(struct tg3 *tp)
3498{
3499	return tp->irq_sync;
3500}
3501
3502/* Fully shutdown all tg3 driver activity elsewhere in the system.
3503 * If irq_sync is non-zero, then the IRQ handler must be synchronized
3504 * with as well.  Most of the time, this is not necessary except when
3505 * shutting down the device.
3506 */
3507static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
3508{
3509	if (irq_sync)
3510		tg3_irq_quiesce(tp);
3511	spin_lock_bh(&tp->lock);
3512}
3513
3514static inline void tg3_full_unlock(struct tg3 *tp)
3515{
3516	spin_unlock_bh(&tp->lock);
3517}
3518
3519/* One-shot MSI handler - Chip automatically disables interrupt
3520 * after sending MSI so driver doesn't have to do it.
3521 */
3522static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
3523{
3524	struct net_device *dev = dev_id;
3525	struct tg3 *tp = netdev_priv(dev);
3526
3527	prefetch(tp->hw_status);
3528	prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3529
3530	if (likely(!tg3_irq_sync(tp)))
3531		netif_rx_schedule(dev);		/* schedule NAPI poll */
3532
3533	return IRQ_HANDLED;
3534}
3535
3536/* MSI ISR - No need to check for interrupt sharing and no need to
3537 * flush status block and interrupt mailbox. PCI ordering rules
3538 * guarantee that MSI will arrive after the status block.
3539 */
3540static irqreturn_t tg3_msi(int irq, void *dev_id)
3541{
3542	struct net_device *dev = dev_id;
3543	struct tg3 *tp = netdev_priv(dev);
3544
3545	prefetch(tp->hw_status);
3546	prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3547	/*
3548	 * Writing any value to intr-mbox-0 clears PCI INTA# and
3549	 * chip-internal interrupt pending events.
3550	 * Writing non-zero to intr-mbox-0 additional tells the
3551	 * NIC to stop sending us irqs, engaging "in-intr-handler"
3552	 * event coalescing.
3553	 */
3554	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3555	if (likely(!tg3_irq_sync(tp)))
3556		netif_rx_schedule(dev);		/* schedule NAPI poll */
3557
3558	return IRQ_RETVAL(1);
3559}
3560
3561static irqreturn_t tg3_interrupt(int irq, void *dev_id)
3562{
3563	struct net_device *dev = dev_id;
3564	struct tg3 *tp = netdev_priv(dev);
3565	struct tg3_hw_status *sblk = tp->hw_status;
3566	unsigned int handled = 1;
3567
3568	/* In INTx mode, it is possible for the interrupt to arrive at
3569	 * the CPU before the status block posted prior to the interrupt.
3570	 * Reading the PCI State register will confirm whether the
3571	 * interrupt is ours and will flush the status block.
3572	 */
3573	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
3574		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3575		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3576			handled = 0;
3577			goto out;
3578		}
3579	}
3580
3581	/*
3582	 * Writing any value to intr-mbox-0 clears PCI INTA# and
3583	 * chip-internal interrupt pending events.
3584	 * Writing non-zero to intr-mbox-0 additional tells the
3585	 * NIC to stop sending us irqs, engaging "in-intr-handler"
3586	 * event coalescing.
3587	 *
3588	 * Flush the mailbox to de-assert the IRQ immediately to prevent
3589	 * spurious interrupts.  The flush impacts performance but
3590	 * excessive spurious interrupts can be worse in some cases.
3591	 */
3592	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3593	if (tg3_irq_sync(tp))
3594		goto out;
3595	sblk->status &= ~SD_STATUS_UPDATED;
3596	if (likely(tg3_has_work(tp))) {
3597		prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3598		netif_rx_schedule(dev);		/* schedule NAPI poll */
3599	} else {
3600		/* No work, shared interrupt perhaps?  re-enable
3601		 * interrupts, and flush that PCI write
3602		 */
3603		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
3604			       0x00000000);
3605	}
3606out:
3607	return IRQ_RETVAL(handled);
3608}
3609
3610static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
3611{
3612	struct net_device *dev = dev_id;
3613	struct tg3 *tp = netdev_priv(dev);
3614	struct tg3_hw_status *sblk = tp->hw_status;
3615	unsigned int handled = 1;
3616
3617	/* In INTx mode, it is possible for the interrupt to arrive at
3618	 * the CPU before the status block posted prior to the interrupt.
3619	 * Reading the PCI State register will confirm whether the
3620	 * interrupt is ours and will flush the status block.
3621	 */
3622	if (unlikely(sblk->status_tag == tp->last_tag)) {
3623		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
3624		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3625			handled = 0;
3626			goto out;
3627		}
3628	}
3629
3630	/*
3631	 * writing any value to intr-mbox-0 clears PCI INTA# and
3632	 * chip-internal interrupt pending events.
3633	 * writing non-zero to intr-mbox-0 additional tells the
3634	 * NIC to stop sending us irqs, engaging "in-intr-handler"
3635	 * event coalescing.
3636	 *
3637	 * Flush the mailbox to de-assert the IRQ immediately to prevent
3638	 * spurious interrupts.  The flush impacts performance but
3639	 * excessive spurious interrupts can be worse in some cases.
3640	 */
3641	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
3642	if (tg3_irq_sync(tp))
3643		goto out;
3644	if (netif_rx_schedule_prep(dev)) {
3645		prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
3646		/* Update last_tag to mark that this status has been
3647		 * seen. Because interrupt may be shared, we may be
3648		 * racing with tg3_poll(), so only update last_tag
3649		 * if tg3_poll() is not scheduled.
3650		 */
3651		tp->last_tag = sblk->status_tag;
3652		__netif_rx_schedule(dev);
3653	}
3654out:
3655	return IRQ_RETVAL(handled);
3656}
3657
3658/* ISR for interrupt test */
3659static irqreturn_t tg3_test_isr(int irq, void *dev_id)
3660{
3661	struct net_device *dev = dev_id;
3662	struct tg3 *tp = netdev_priv(dev);
3663	struct tg3_hw_status *sblk = tp->hw_status;
3664
3665	if ((sblk->status & SD_STATUS_UPDATED) ||
3666	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
3667		tg3_disable_ints(tp);
3668		return IRQ_RETVAL(1);
3669	}
3670	return IRQ_RETVAL(0);
3671}
3672
3673static int tg3_init_hw(struct tg3 *, int);
3674static int tg3_halt(struct tg3 *, int, int);
3675
3676/* Restart hardware after configuration changes, self-test, etc.
3677 * Invoked with tp->lock held.
3678 */
3679static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
3680{
3681	int err;
3682
3683	err = tg3_init_hw(tp, reset_phy);
3684	if (err) {
3685		printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
3686		       "aborting.\n", tp->dev->name);
3687		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
3688		tg3_full_unlock(tp);
3689		del_timer_sync(&tp->timer);
3690		tp->irq_sync = 0;
3691		netif_poll_enable(tp->dev);
3692		dev_close(tp->dev);
3693		tg3_full_lock(tp, 0);
3694	}
3695	return err;
3696}
3697
3698#ifdef CONFIG_NET_POLL_CONTROLLER
3699static void tg3_poll_controller(struct net_device *dev)
3700{
3701	struct tg3 *tp = netdev_priv(dev);
3702
3703	tg3_interrupt(tp->pdev->irq, dev);
3704}
3705#endif
3706
3707static void tg3_reset_task(struct work_struct *work)
3708{
3709	struct tg3 *tp = container_of(work, struct tg3, reset_task);
3710	unsigned int restart_timer;
3711
3712	tg3_full_lock(tp, 0);
3713
3714	if (!netif_running(tp->dev)) {
3715		tg3_full_unlock(tp);
3716		return;
3717	}
3718
3719	tg3_full_unlock(tp);
3720
3721	tg3_netif_stop(tp);
3722
3723	tg3_full_lock(tp, 1);
3724
3725	restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
3726	tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
3727
3728	if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
3729		tp->write32_tx_mbox = tg3_write32_tx_mbox;
3730		tp->write32_rx_mbox = tg3_write_flush_reg32;
3731		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
3732		tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
3733	}
3734
3735	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
3736	if (tg3_init_hw(tp, 1))
3737		goto out;
3738
3739	tg3_netif_start(tp);
3740
3741	if (restart_timer)
3742		mod_timer(&tp->timer, jiffies + 1);
3743
3744out:
3745	tg3_full_unlock(tp);
3746}
3747
3748static void tg3_dump_short_state(struct tg3 *tp)
3749{
3750	printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
3751	       tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
3752	printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
3753	       tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
3754}
3755
3756static void tg3_tx_timeout(struct net_device *dev)
3757{
3758	struct tg3 *tp = netdev_priv(dev);
3759
3760	if (netif_msg_tx_err(tp)) {
3761		printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
3762		       dev->name);
3763		tg3_dump_short_state(tp);
3764	}
3765
3766	schedule_work(&tp->reset_task);
3767}
3768
3769/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
3770static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3771{
3772	u32 base = (u32) mapping & 0xffffffff;
3773
3774	return ((base > 0xffffdcc0) &&
3775		(base + len + 8 < base));
3776}
3777
3778/* Test for DMA addresses > 40-bit */
3779static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
3780					  int len)
3781{
3782#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
3783	if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
3784		return (((u64) mapping + len) > DMA_40BIT_MASK);
3785	return 0;
3786#else
3787	return 0;
3788#endif
3789}
3790
3791static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3792
3793static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3794				       u32 last_plus_one, u32 *start,
3795				       u32 base_flags, u32 mss)
3796{
3797	struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3798	dma_addr_t new_addr = 0;
3799	u32 entry = *start;
3800	int i, ret = 0;
3801
3802	if (!new_skb) {
3803		ret = -1;
3804	} else {
3805		/* New SKB is guaranteed to be linear. */
3806		entry = *start;
3807		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3808					  PCI_DMA_TODEVICE);
3809		/* Make sure new skb does not cross any 4G boundaries.
3810		 * Drop the packet if it does.
3811		 */
3812		if (tg3_4g_overflow_test(new_addr, new_skb->len)) {
3813			ret = -1;
3814			dev_kfree_skb(new_skb);
3815			new_skb = NULL;
3816		} else {
3817			tg3_set_txd(tp, entry, new_addr, new_skb->len,
3818				    base_flags, 1 | (mss << 1));
3819			*start = NEXT_TX(entry);
3820		}
3821	}
3822
3823	/* Now clean up the sw ring entries. */
3824	i = 0;
3825	while (entry != last_plus_one) {
3826		int len;
3827
3828		if (i == 0)
3829			len = skb_headlen(skb);
3830		else
3831			len = skb_shinfo(skb)->frags[i-1].size;
3832		pci_unmap_single(tp->pdev,
3833				 pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3834				 len, PCI_DMA_TODEVICE);
3835		if (i == 0) {
3836			tp->tx_buffers[entry].skb = new_skb;
3837			pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3838		} else {
3839			tp->tx_buffers[entry].skb = NULL;
3840		}
3841		entry = NEXT_TX(entry);
3842		i++;
3843	}
3844
3845	dev_kfree_skb(skb);
3846
3847	return ret;
3848}
3849
3850static void tg3_set_txd(struct tg3 *tp, int entry,
3851			dma_addr_t mapping, int len, u32 flags,
3852			u32 mss_and_is_end)
3853{
3854	struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3855	int is_end = (mss_and_is_end & 0x1);
3856	u32 mss = (mss_and_is_end >> 1);
3857	u32 vlan_tag = 0;
3858
3859	if (is_end)
3860		flags |= TXD_FLAG_END;
3861	if (flags & TXD_FLAG_VLAN) {
3862		vlan_tag = flags >> 16;
3863		flags &= 0xffff;
3864	}
3865	vlan_tag |= (mss << TXD_MSS_SHIFT);
3866
3867	txd->addr_hi = ((u64) mapping >> 32);
3868	txd->addr_lo = ((u64) mapping & 0xffffffff);
3869	txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3870	txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3871}
3872
3873/* hard_start_xmit for devices that don't have any bugs and
3874 * support TG3_FLG2_HW_TSO_2 only.
3875 */
3876static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3877{
3878	struct tg3 *tp = netdev_priv(dev);
3879	dma_addr_t mapping;
3880	u32 len, entry, base_flags, mss;
3881
3882	len = skb_headlen(skb);
3883
3884	/* We are running in BH disabled context with netif_tx_lock
3885	 * and TX reclaim runs via tp->poll inside of a software
3886	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
3887	 * no IRQ context deadlocks to worry about either.  Rejoice!
3888	 */
3889	if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3890		if (!netif_queue_stopped(dev)) {
3891			netif_stop_queue(dev);
3892
3893			/* This is a hard error, log it. */
3894			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
3895			       "queue awake!\n", dev->name);
3896		}
3897		return NETDEV_TX_BUSY;
3898	}
3899
3900	entry = tp->tx_prod;
3901	base_flags = 0;
3902	mss = 0;
3903	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
3904		int tcp_opt_len, ip_tcp_len;
3905
3906		if (skb_header_cloned(skb) &&
3907		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3908			dev_kfree_skb(skb);
3909			goto out_unlock;
3910		}
3911
3912		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
3913			mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
3914		else {
3915			struct iphdr *iph = ip_hdr(skb);
3916
3917			tcp_opt_len = tcp_optlen(skb);
3918			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
3919
3920			iph->check = 0;
3921			iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
3922			mss |= (ip_tcp_len + tcp_opt_len) << 9;
3923		}
3924
3925		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3926			       TXD_FLAG_CPU_POST_DMA);
3927
3928		tcp_hdr(skb)->check = 0;
3929
3930	}
3931	else if (skb->ip_summed == CHECKSUM_PARTIAL)
3932		base_flags |= TXD_FLAG_TCPUDP_CSUM;
3933#if TG3_VLAN_TAG_USED
3934	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3935		base_flags |= (TXD_FLAG_VLAN |
3936			       (vlan_tx_tag_get(skb) << 16));
3937#endif
3938
3939	/* Queue skb data, a.k.a. the main skb fragment. */
3940	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3941
3942	tp->tx_buffers[entry].skb = skb;
3943	pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3944
3945	tg3_set_txd(tp, entry, mapping, len, base_flags,
3946		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3947
3948	entry = NEXT_TX(entry);
3949
3950	/* Now loop through additional data fragments, and queue them. */
3951	if (skb_shinfo(skb)->nr_frags > 0) {
3952		unsigned int i, last;
3953
3954		last = skb_shinfo(skb)->nr_frags - 1;
3955		for (i = 0; i <= last; i++) {
3956			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3957
3958			len = frag->size;
3959			mapping = pci_map_page(tp->pdev,
3960					       frag->page,
3961					       frag->page_offset,
3962					       len, PCI_DMA_TODEVICE);
3963
3964			tp->tx_buffers[entry].skb = NULL;
3965			pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3966
3967			tg3_set_txd(tp, entry, mapping, len,
3968				    base_flags, (i == last) | (mss << 1));
3969
3970			entry = NEXT_TX(entry);
3971		}
3972	}
3973
3974	/* Packets are ready, update Tx producer idx local and on card. */
3975	tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3976
3977	tp->tx_prod = entry;
3978	if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
3979		netif_stop_queue(dev);
3980		if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
3981			netif_wake_queue(tp->dev);
3982	}
3983
3984out_unlock:
3985    	mmiowb();
3986
3987	dev->trans_start = jiffies;
3988
3989	return NETDEV_TX_OK;
3990}
3991
3992static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
3993
3994static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
3995{
3996	struct sk_buff *segs, *nskb;
3997
3998	/* Estimate the number of fragments in the worst case */
3999	if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4000		netif_stop_queue(tp->dev);
4001		if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4002			return NETDEV_TX_BUSY;
4003
4004		netif_wake_queue(tp->dev);
4005	}
4006
4007	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4008	if (unlikely(IS_ERR(segs)))
4009		goto tg3_tso_bug_end;
4010
4011	do {
4012		nskb = segs;
4013		segs = segs->next;
4014		nskb->next = NULL;
4015		tg3_start_xmit_dma_bug(nskb, tp->dev);
4016	} while (segs);
4017
4018tg3_tso_bug_end:
4019	dev_kfree_skb(skb);
4020
4021	return NETDEV_TX_OK;
4022}
4023
4024/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4025 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4026 */
4027static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4028{
4029	struct tg3 *tp = netdev_priv(dev);
4030	dma_addr_t mapping;
4031	u32 len, entry, base_flags, mss;
4032	int would_hit_hwbug;
4033
4034	len = skb_headlen(skb);
4035
4036	/* We are running in BH disabled context with netif_tx_lock
4037	 * and TX reclaim runs via tp->poll inside of a software
4038	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
4039	 * no IRQ context deadlocks to worry about either.  Rejoice!
4040	 */
4041	if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4042		if (!netif_queue_stopped(dev)) {
4043			netif_stop_queue(dev);
4044
4045			/* This is a hard error, log it. */
4046			printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4047			       "queue awake!\n", dev->name);
4048		}
4049		return NETDEV_TX_BUSY;
4050	}
4051
4052	entry = tp->tx_prod;
4053	base_flags = 0;
4054	if (skb->ip_summed == CHECKSUM_PARTIAL)
4055		base_flags |= TXD_FLAG_TCPUDP_CSUM;
4056	mss = 0;
4057	if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4058		struct iphdr *iph;
4059		int tcp_opt_len, ip_tcp_len, hdr_len;
4060
4061		if (skb_header_cloned(skb) &&
4062		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4063			dev_kfree_skb(skb);
4064			goto out_unlock;
4065		}
4066
4067		tcp_opt_len = tcp_optlen(skb);
4068		ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4069
4070		hdr_len = ip_tcp_len + tcp_opt_len;
4071		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
4072			     (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
4073			return (tg3_tso_bug(tp, skb));
4074
4075		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4076			       TXD_FLAG_CPU_POST_DMA);
4077
4078		iph = ip_hdr(skb);
4079		iph->check = 0;
4080		iph->tot_len = htons(mss + hdr_len);
4081		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
4082			tcp_hdr(skb)->check = 0;
4083			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
4084		} else
4085			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4086								 iph->daddr, 0,
4087								 IPPROTO_TCP,
4088								 0);
4089
4090		if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
4091		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
4092			if (tcp_opt_len || iph->ihl > 5) {
4093				int tsflags;
4094
4095				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4096				mss |= (tsflags << 11);
4097			}
4098		} else {
4099			if (tcp_opt_len || iph->ihl > 5) {
4100				int tsflags;
4101
4102				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
4103				base_flags |= tsflags << 12;
4104			}
4105		}
4106	}
4107#if TG3_VLAN_TAG_USED
4108	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4109		base_flags |= (TXD_FLAG_VLAN |
4110			       (vlan_tx_tag_get(skb) << 16));
4111#endif
4112
4113	/* Queue skb data, a.k.a. the main skb fragment. */
4114	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4115
4116	tp->tx_buffers[entry].skb = skb;
4117	pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4118
4119	would_hit_hwbug = 0;
4120
4121	if (tg3_4g_overflow_test(mapping, len))
4122		would_hit_hwbug = 1;
4123
4124	tg3_set_txd(tp, entry, mapping, len, base_flags,
4125		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4126
4127	entry = NEXT_TX(entry);
4128
4129	/* Now loop through additional data fragments, and queue them. */
4130	if (skb_shinfo(skb)->nr_frags > 0) {
4131		unsigned int i, last;
4132
4133		last = skb_shinfo(skb)->nr_frags - 1;
4134		for (i = 0; i <= last; i++) {
4135			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4136
4137			len = frag->size;
4138			mapping = pci_map_page(tp->pdev,
4139					       frag->page,
4140					       frag->page_offset,
4141					       len, PCI_DMA_TODEVICE);
4142
4143			tp->tx_buffers[entry].skb = NULL;
4144			pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
4145
4146			if (tg3_4g_overflow_test(mapping, len))
4147				would_hit_hwbug = 1;
4148
4149			if (tg3_40bit_overflow_test(tp, mapping, len))
4150				would_hit_hwbug = 1;
4151
4152			if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4153				tg3_set_txd(tp, entry, mapping, len,
4154					    base_flags, (i == last)|(mss << 1));
4155			else
4156				tg3_set_txd(tp, entry, mapping, len,
4157					    base_flags, (i == last));
4158
4159			entry = NEXT_TX(entry);
4160		}
4161	}
4162
4163	if (would_hit_hwbug) {
4164		u32 last_plus_one = entry;
4165		u32 start;
4166
4167		start = entry - 1 - skb_shinfo(skb)->nr_frags;
4168		start &= (TG3_TX_RING_SIZE - 1);
4169
4170		if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
4171						&start, base_flags, mss))
4172			goto out_unlock;
4173
4174		entry = start;
4175	}
4176
4177	/* Packets are ready, update Tx producer idx local and on card. */
4178	tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4179
4180	tp->tx_prod = entry;
4181	if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4182		netif_stop_queue(dev);
4183		if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4184			netif_wake_queue(tp->dev);
4185	}
4186
4187out_unlock:
4188    	mmiowb();
4189
4190	dev->trans_start = jiffies;
4191
4192	return NETDEV_TX_OK;
4193}
4194
4195static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
4196			       int new_mtu)
4197{
4198	dev->mtu = new_mtu;
4199
4200	if (new_mtu > ETH_DATA_LEN) {
4201		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4202			tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
4203			ethtool_op_set_tso(dev, 0);
4204		}
4205		else
4206			tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
4207	} else {
4208		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
4209			tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
4210		tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
4211	}
4212}
4213
4214static int tg3_change_mtu(struct net_device *dev, int new_mtu)
4215{
4216	struct tg3 *tp = netdev_priv(dev);
4217	int err;
4218
4219	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
4220		return -EINVAL;
4221
4222	if (!netif_running(dev)) {
4223		/* We'll just catch it later when the
4224		 * device is up'd.
4225		 */
4226		tg3_set_mtu(dev, tp, new_mtu);
4227		return 0;
4228	}
4229
4230	tg3_netif_stop(tp);
4231
4232	tg3_full_lock(tp, 1);
4233
4234	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4235
4236	tg3_set_mtu(dev, tp, new_mtu);
4237
4238	err = tg3_restart_hw(tp, 0);
4239
4240	if (!err)
4241		tg3_netif_start(tp);
4242
4243	tg3_full_unlock(tp);
4244
4245	return err;
4246}
4247
4248/* Free up pending packets in all rx/tx rings.
4249 *
4250 * The chip has been shut down and the driver detached from
4251 * the networking, so no interrupts or new tx packets will
4252 * end up in the driver.  tp->{tx,}lock is not held and we are not
4253 * in an interrupt context and thus may sleep.
4254 */
4255static void tg3_free_rings(struct tg3 *tp)
4256{
4257	struct ring_info *rxp;
4258	int i;
4259
4260	for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4261		rxp = &tp->rx_std_buffers[i];
4262
4263		if (rxp->skb == NULL)
4264			continue;
4265		pci_unmap_single(tp->pdev,
4266				 pci_unmap_addr(rxp, mapping),
4267				 tp->rx_pkt_buf_sz - tp->rx_offset,
4268				 PCI_DMA_FROMDEVICE);
4269		dev_kfree_skb_any(rxp->skb);
4270		rxp->skb = NULL;
4271	}
4272
4273	for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4274		rxp = &tp->rx_jumbo_buffers[i];
4275
4276		if (rxp->skb == NULL)
4277			continue;
4278		pci_unmap_single(tp->pdev,
4279				 pci_unmap_addr(rxp, mapping),
4280				 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
4281				 PCI_DMA_FROMDEVICE);
4282		dev_kfree_skb_any(rxp->skb);
4283		rxp->skb = NULL;
4284	}
4285
4286	for (i = 0; i < TG3_TX_RING_SIZE; ) {
4287		struct tx_ring_info *txp;
4288		struct sk_buff *skb;
4289		int j;
4290
4291		txp = &tp->tx_buffers[i];
4292		skb = txp->skb;
4293
4294		if (skb == NULL) {
4295			i++;
4296			continue;
4297		}
4298
4299		pci_unmap_single(tp->pdev,
4300				 pci_unmap_addr(txp, mapping),
4301				 skb_headlen(skb),
4302				 PCI_DMA_TODEVICE);
4303		txp->skb = NULL;
4304
4305		i++;
4306
4307		for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
4308			txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
4309			pci_unmap_page(tp->pdev,
4310				       pci_unmap_addr(txp, mapping),
4311				       skb_shinfo(skb)->frags[j].size,
4312				       PCI_DMA_TODEVICE);
4313			i++;
4314		}
4315
4316		dev_kfree_skb_any(skb);
4317	}
4318}
4319
4320/* Initialize tx/rx rings for packet processing.
4321 *
4322 * The chip has been shut down and the driver detached from
4323 * the networking, so no interrupts or new tx packets will
4324 * end up in the driver.  tp->{tx,}lock are held and thus
4325 * we may not sleep.
4326 */
4327static int tg3_init_rings(struct tg3 *tp)
4328{
4329	u32 i;
4330
4331	/* Free up all the SKBs. */
4332	tg3_free_rings(tp);
4333
4334	/* Zero out all descriptors. */
4335	memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
4336	memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
4337	memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
4338	memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
4339
4340	tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
4341	if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
4342	    (tp->dev->mtu > ETH_DATA_LEN))
4343		tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
4344
4345	/* Initialize invariants of the rings, we only set this
4346	 * stuff once.  This works because the card does not
4347	 * write into the rx buffer posting rings.
4348	 */
4349	for (i = 0; i < TG3_RX_RING_SIZE; i++) {
4350		struct tg3_rx_buffer_desc *rxd;
4351
4352		rxd = &tp->rx_std[i];
4353		rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
4354			<< RXD_LEN_SHIFT;
4355		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
4356		rxd->opaque = (RXD_OPAQUE_RING_STD |
4357			       (i << RXD_OPAQUE_INDEX_SHIFT));
4358	}
4359
4360	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4361		for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
4362			struct tg3_rx_buffer_desc *rxd;
4363
4364			rxd = &tp->rx_jumbo[i];
4365			rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
4366				<< RXD_LEN_SHIFT;
4367			rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
4368				RXD_FLAG_JUMBO;
4369			rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
4370			       (i << RXD_OPAQUE_INDEX_SHIFT));
4371		}
4372	}
4373
4374	/* Now allocate fresh SKBs for each rx ring. */
4375	for (i = 0; i < tp->rx_pending; i++) {
4376		if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
4377			printk(KERN_WARNING PFX
4378			       "%s: Using a smaller RX standard ring, "
4379			       "only %d out of %d buffers were allocated "
4380			       "successfully.\n",
4381			       tp->dev->name, i, tp->rx_pending);
4382			if (i == 0)
4383				return -ENOMEM;
4384			tp->rx_pending = i;
4385			break;
4386		}
4387	}
4388
4389	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
4390		for (i = 0; i < tp->rx_jumbo_pending; i++) {
4391			if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
4392					     -1, i) < 0) {
4393				printk(KERN_WARNING PFX
4394				       "%s: Using a smaller RX jumbo ring, "
4395				       "only %d out of %d buffers were "
4396				       "allocated successfully.\n",
4397				       tp->dev->name, i, tp->rx_jumbo_pending);
4398				if (i == 0) {
4399					tg3_free_rings(tp);
4400					return -ENOMEM;
4401				}
4402				tp->rx_jumbo_pending = i;
4403				break;
4404			}
4405		}
4406	}
4407	return 0;
4408}
4409
4410/*
4411 * Must not be invoked with interrupt sources disabled and
4412 * the hardware shutdown down.
4413 */
4414static void tg3_free_consistent(struct tg3 *tp)
4415{
4416	kfree(tp->rx_std_buffers);
4417	tp->rx_std_buffers = NULL;
4418	if (tp->rx_std) {
4419		pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
4420				    tp->rx_std, tp->rx_std_mapping);
4421		tp->rx_std = NULL;
4422	}
4423	if (tp->rx_jumbo) {
4424		pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4425				    tp->rx_jumbo, tp->rx_jumbo_mapping);
4426		tp->rx_jumbo = NULL;
4427	}
4428	if (tp->rx_rcb) {
4429		pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4430				    tp->rx_rcb, tp->rx_rcb_mapping);
4431		tp->rx_rcb = NULL;
4432	}
4433	if (tp->tx_ring) {
4434		pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
4435			tp->tx_ring, tp->tx_desc_mapping);
4436		tp->tx_ring = NULL;
4437	}
4438	if (tp->hw_status) {
4439		pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
4440				    tp->hw_status, tp->status_mapping);
4441		tp->hw_status = NULL;
4442	}
4443	if (tp->hw_stats) {
4444		pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
4445				    tp->hw_stats, tp->stats_mapping);
4446		tp->hw_stats = NULL;
4447	}
4448}
4449
4450/*
4451 * Must not be invoked with interrupt sources disabled and
4452 * the hardware shutdown down.  Can sleep.
4453 */
4454static int tg3_alloc_consistent(struct tg3 *tp)
4455{
4456	tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
4457				      (TG3_RX_RING_SIZE +
4458				       TG3_RX_JUMBO_RING_SIZE)) +
4459				     (sizeof(struct tx_ring_info) *
4460				      TG3_TX_RING_SIZE),
4461				     GFP_KERNEL);
4462	if (!tp->rx_std_buffers)
4463		return -ENOMEM;
4464
4465	tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
4466	tp->tx_buffers = (struct tx_ring_info *)
4467		&tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
4468
4469	tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
4470					  &tp->rx_std_mapping);
4471	if (!tp->rx_std)
4472		goto err_out;
4473
4474	tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
4475					    &tp->rx_jumbo_mapping);
4476
4477	if (!tp->rx_jumbo)
4478		goto err_out;
4479
4480	tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
4481					  &tp->rx_rcb_mapping);
4482	if (!tp->rx_rcb)
4483		goto err_out;
4484
4485	tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
4486					   &tp->tx_desc_mapping);
4487	if (!tp->tx_ring)
4488		goto err_out;
4489
4490	tp->hw_status = pci_alloc_consistent(tp->pdev,
4491					     TG3_HW_STATUS_SIZE,
4492					     &tp->status_mapping);
4493	if (!tp->hw_status)
4494		goto err_out;
4495
4496	tp->hw_stats = pci_alloc_consistent(tp->pdev,
4497					    sizeof(struct tg3_hw_stats),
4498					    &tp->stats_mapping);
4499	if (!tp->hw_stats)
4500		goto err_out;
4501
4502	memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4503	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4504
4505	return 0;
4506
4507err_out:
4508	tg3_free_consistent(tp);
4509	return -ENOMEM;
4510}
4511
4512#define MAX_WAIT_CNT 1000
4513
4514/* To stop a block, clear the enable bit and poll till it
4515 * clears.  tp->lock is held.
4516 */
4517static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
4518{
4519	unsigned int i;
4520	u32 val;
4521
4522	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
4523		switch (ofs) {
4524		case RCVLSC_MODE:
4525		case DMAC_MODE:
4526		case MBFREE_MODE:
4527		case BUFMGR_MODE:
4528		case MEMARB_MODE:
4529			/* We can't enable/disable these bits of the
4530			 * 5705/5750, just say success.
4531			 */
4532			return 0;
4533
4534		default:
4535			break;
4536		};
4537	}
4538
4539	val = tr32(ofs);
4540	val &= ~enable_bit;
4541	tw32_f(ofs, val);
4542
4543	for (i = 0; i < MAX_WAIT_CNT; i++) {
4544		udelay(100);
4545		val = tr32(ofs);
4546		if ((val & enable_bit) == 0)
4547			break;
4548	}
4549
4550	if (i == MAX_WAIT_CNT && !silent) {
4551		printk(KERN_ERR PFX "tg3_stop_block timed out, "
4552		       "ofs=%lx enable_bit=%x\n",
4553		       ofs, enable_bit);
4554		return -ENODEV;
4555	}
4556
4557	return 0;
4558}
4559
4560/* tp->lock is held. */
4561static int tg3_abort_hw(struct tg3 *tp, int silent)
4562{
4563	int i, err;
4564
4565	tg3_disable_ints(tp);
4566
4567	tp->rx_mode &= ~RX_MODE_ENABLE;
4568	tw32_f(MAC_RX_MODE, tp->rx_mode);
4569	udelay(10);
4570
4571	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
4572	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
4573	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
4574	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
4575	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
4576	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
4577
4578	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
4579	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
4580	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
4581	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
4582	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
4583	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
4584	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
4585
4586	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
4587	tw32_f(MAC_MODE, tp->mac_mode);
4588	udelay(40);
4589
4590	tp->tx_mode &= ~TX_MODE_ENABLE;
4591	tw32_f(MAC_TX_MODE, tp->tx_mode);
4592
4593	for (i = 0; i < MAX_WAIT_CNT; i++) {
4594		udelay(100);
4595		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
4596			break;
4597	}
4598	if (i >= MAX_WAIT_CNT) {
4599		printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
4600		       "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
4601		       tp->dev->name, tr32(MAC_TX_MODE));
4602		err |= -ENODEV;
4603	}
4604
4605	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
4606	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
4607	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
4608
4609	tw32(FTQ_RESET, 0xffffffff);
4610	tw32(FTQ_RESET, 0x00000000);
4611
4612	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
4613	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
4614
4615	if (tp->hw_status)
4616		memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4617	if (tp->hw_stats)
4618		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
4619
4620	return err;
4621}
4622
4623/* tp->lock is held. */
4624static int tg3_nvram_lock(struct tg3 *tp)
4625{
4626	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4627		int i;
4628
4629		if (tp->nvram_lock_cnt == 0) {
4630			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
4631			for (i = 0; i < 8000; i++) {
4632				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
4633					break;
4634				udelay(20);
4635			}
4636			if (i == 8000) {
4637				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
4638				return -ENODEV;
4639			}
4640		}
4641		tp->nvram_lock_cnt++;
4642	}
4643	return 0;
4644}
4645
4646/* tp->lock is held. */
4647static void tg3_nvram_unlock(struct tg3 *tp)
4648{
4649	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
4650		if (tp->nvram_lock_cnt > 0)
4651			tp->nvram_lock_cnt--;
4652		if (tp->nvram_lock_cnt == 0)
4653			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
4654	}
4655}
4656
4657/* tp->lock is held. */
4658static void tg3_enable_nvram_access(struct tg3 *tp)
4659{
4660	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4661	    !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4662		u32 nvaccess = tr32(NVRAM_ACCESS);
4663
4664		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
4665	}
4666}
4667
4668/* tp->lock is held. */
4669static void tg3_disable_nvram_access(struct tg3 *tp)
4670{
4671	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
4672	    !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
4673		u32 nvaccess = tr32(NVRAM_ACCESS);
4674
4675		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
4676	}
4677}
4678
4679/* tp->lock is held. */
4680static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
4681{
4682	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
4683		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4684
4685	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4686		switch (kind) {
4687		case RESET_KIND_INIT:
4688			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4689				      DRV_STATE_START);
4690			break;
4691
4692		case RESET_KIND_SHUTDOWN:
4693			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4694				      DRV_STATE_UNLOAD);
4695			break;
4696
4697		case RESET_KIND_SUSPEND:
4698			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4699				      DRV_STATE_SUSPEND);
4700			break;
4701
4702		default:
4703			break;
4704		};
4705	}
4706}
4707
4708/* tp->lock is held. */
4709static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
4710{
4711	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
4712		switch (kind) {
4713		case RESET_KIND_INIT:
4714			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4715				      DRV_STATE_START_DONE);
4716			break;
4717
4718		case RESET_KIND_SHUTDOWN:
4719			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4720				      DRV_STATE_UNLOAD_DONE);
4721			break;
4722
4723		default:
4724			break;
4725		};
4726	}
4727}
4728
4729/* tp->lock is held. */
4730static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
4731{
4732	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
4733		switch (kind) {
4734		case RESET_KIND_INIT:
4735			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4736				      DRV_STATE_START);
4737			break;
4738
4739		case RESET_KIND_SHUTDOWN:
4740			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4741				      DRV_STATE_UNLOAD);
4742			break;
4743
4744		case RESET_KIND_SUSPEND:
4745			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4746				      DRV_STATE_SUSPEND);
4747			break;
4748
4749		default:
4750			break;
4751		};
4752	}
4753}
4754
4755static int tg3_poll_fw(struct tg3 *tp)
4756{
4757	int i;
4758	u32 val;
4759
4760	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4761		/* Wait up to 20ms for init done. */
4762		for (i = 0; i < 200; i++) {
4763			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
4764				return 0;
4765			udelay(100);
4766		}
4767		return -ENODEV;
4768	}
4769
4770	/* Wait for firmware initialization to complete. */
4771	for (i = 0; i < 100000; i++) {
4772		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4773		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4774			break;
4775		udelay(10);
4776	}
4777
4778	/* Chip might not be fitted with firmware.  Some Sun onboard
4779	 * parts are configured like that.  So don't signal the timeout
4780	 * of the above loop as an error, but do report the lack of
4781	 * running firmware once.
4782	 */
4783	if (i >= 100000 &&
4784	    !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
4785		tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
4786
4787		printk(KERN_INFO PFX "%s: No firmware running.\n",
4788		       tp->dev->name);
4789	}
4790
4791	return 0;
4792}
4793
4794static void tg3_stop_fw(struct tg3 *);
4795
4796/* tp->lock is held. */
4797static int tg3_chip_reset(struct tg3 *tp)
4798{
4799	u32 val;
4800	void (*write_op)(struct tg3 *, u32, u32);
4801	int err;
4802
4803	tg3_nvram_lock(tp);
4804
4805	/* No matching tg3_nvram_unlock() after this because
4806	 * chip reset below will undo the nvram lock.
4807	 */
4808	tp->nvram_lock_cnt = 0;
4809
4810	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
4811	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
4812	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
4813		tw32(GRC_FASTBOOT_PC, 0);
4814
4815	write_op = tp->write32;
4816	if (write_op == tg3_write_flush_reg32)
4817		tp->write32 = tg3_write32;
4818
4819	/* Prevent the irq handler from reading or writing PCI registers
4820	 * during chip reset when the memory enable bit in the PCI command
4821	 * register may be cleared.  The chip does not generate interrupt
4822	 * at this time, but the irq handler may still be called due to irq
4823	 * sharing or irqpoll.
4824	 */
4825	tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
4826	if (tp->hw_status) {
4827		tp->hw_status->status = 0;
4828		tp->hw_status->status_tag = 0;
4829	}
4830	tp->last_tag = 0;
4831	smp_mb();
4832	synchronize_irq(tp->pdev->irq);
4833
4834	/* do the reset */
4835	val = GRC_MISC_CFG_CORECLK_RESET;
4836
4837	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4838		if (tr32(0x7e2c) == 0x60) {
4839			tw32(0x7e2c, 0x20);
4840		}
4841		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4842			tw32(GRC_MISC_CFG, (1 << 29));
4843			val |= (1 << 29);
4844		}
4845	}
4846
4847	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
4848		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
4849		tw32(GRC_VCPU_EXT_CTRL,
4850		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
4851	}
4852
4853	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4854		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
4855	tw32(GRC_MISC_CFG, val);
4856
4857	tp->write32 = write_op;
4858
4859	/* Unfortunately, we have to delay before the PCI read back.
4860	 * Some 575X chips even will not respond to a PCI cfg access
4861	 * when the reset command is given to the chip.
4862	 *
4863	 * How do these hardware designers expect things to work
4864	 * properly if the PCI write is posted for a long period
4865	 * of time?  It is always necessary to have some method by
4866	 * which a register read back can occur to push the write
4867	 * out which does the reset.
4868	 *
4869	 * For most tg3 variants the trick below was working.
4870	 * Ho hum...
4871	 */
4872	udelay(120);
4873
4874	/* Flush PCI posted writes.  The normal MMIO registers
4875	 * are inaccessible at this time so this is the only
4876	 * way to make this reliably (actually, this is no longer
4877	 * the case, see above).  I tried to use indirect
4878	 * register read/write but this upset some 5701 variants.
4879	 */
4880	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
4881
4882	udelay(120);
4883
4884	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
4885		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
4886			int i;
4887			u32 cfg_val;
4888
4889			/* Wait for link training to complete.  */
4890			for (i = 0; i < 5000; i++)
4891				udelay(100);
4892
4893			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
4894			pci_write_config_dword(tp->pdev, 0xc4,
4895					       cfg_val | (1 << 15));
4896		}
4897		/* Set PCIE max payload size and clear error status.  */
4898		pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
4899	}
4900
4901	/* Re-enable indirect register accesses. */
4902	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
4903			       tp->misc_host_ctrl);
4904
4905	/* Set MAX PCI retry to zero. */
4906	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
4907	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4908	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
4909		val |= PCISTATE_RETRY_SAME_DMA;
4910	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
4911
4912	pci_restore_state(tp->pdev);
4913
4914	tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
4915
4916	/* Make sure PCI-X relaxed ordering bit is clear. */
4917	pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
4918	val &= ~PCIX_CAPS_RELAXED_ORDERING;
4919	pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
4920
4921	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
4922		u32 val;
4923
4924		/* Chip reset on 5780 will reset MSI enable bit,
4925		 * so need to restore it.
4926		 */
4927		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
4928			u16 ctrl;
4929
4930			pci_read_config_word(tp->pdev,
4931					     tp->msi_cap + PCI_MSI_FLAGS,
4932					     &ctrl);
4933			pci_write_config_word(tp->pdev,
4934					      tp->msi_cap + PCI_MSI_FLAGS,
4935					      ctrl | PCI_MSI_FLAGS_ENABLE);
4936			val = tr32(MSGINT_MODE);
4937			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
4938		}
4939
4940		val = tr32(MEMARB_MODE);
4941		tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
4942
4943	} else
4944		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
4945
4946	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
4947		tg3_stop_fw(tp);
4948		tw32(0x5000, 0x400);
4949	}
4950
4951	tw32(GRC_MODE, tp->grc_mode);
4952
4953	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
4954		u32 val = tr32(0xc4);
4955
4956		tw32(0xc4, val | (1 << 15));
4957	}
4958
4959	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
4960	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4961		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
4962		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
4963			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
4964		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4965	}
4966
4967	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
4968		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4969		tw32_f(MAC_MODE, tp->mac_mode);
4970	} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
4971		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
4972		tw32_f(MAC_MODE, tp->mac_mode);
4973	} else
4974		tw32_f(MAC_MODE, 0);
4975	udelay(40);
4976
4977	err = tg3_poll_fw(tp);
4978	if (err)
4979		return err;
4980
4981	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
4982	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
4983		u32 val = tr32(0x7c00);
4984
4985		tw32(0x7c00, val | (1 << 25));
4986	}
4987
4988	/* Reprobe ASF enable state.  */
4989	tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
4990	tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
4991	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
4992	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
4993		u32 nic_cfg;
4994
4995		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
4996		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
4997			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
4998			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
4999				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
5000		}
5001	}
5002
5003	return 0;
5004}
5005
5006/* tp->lock is held. */
5007static void tg3_stop_fw(struct tg3 *tp)
5008{
5009	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5010		u32 val;
5011		int i;
5012
5013		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
5014		val = tr32(GRC_RX_CPU_EVENT);
5015		val |= (1 << 14);
5016		tw32(GRC_RX_CPU_EVENT, val);
5017
5018		/* Wait for RX cpu to ACK the event.  */
5019		for (i = 0; i < 100; i++) {
5020			if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
5021				break;
5022			udelay(1);
5023		}
5024	}
5025}
5026
5027/* tp->lock is held. */
5028static int tg3_halt(struct tg3 *tp, int kind, int silent)
5029{
5030	int err;
5031
5032	tg3_stop_fw(tp);
5033
5034	tg3_write_sig_pre_reset(tp, kind);
5035
5036	tg3_abort_hw(tp, silent);
5037	err = tg3_chip_reset(tp);
5038
5039	tg3_write_sig_legacy(tp, kind);
5040	tg3_write_sig_post_reset(tp, kind);
5041
5042	if (err)
5043		return err;
5044
5045	return 0;
5046}
5047
5048#define TG3_FW_RELEASE_MAJOR	0x0
5049#define TG3_FW_RELASE_MINOR	0x0
5050#define TG3_FW_RELEASE_FIX	0x0
5051#define TG3_FW_START_ADDR	0x08000000
5052#define TG3_FW_TEXT_ADDR	0x08000000
5053#define TG3_FW_TEXT_LEN		0x9c0
5054#define TG3_FW_RODATA_ADDR	0x080009c0
5055#define TG3_FW_RODATA_LEN	0x60
5056#define TG3_FW_DATA_ADDR	0x08000a40
5057#define TG3_FW_DATA_LEN		0x20
5058#define TG3_FW_SBSS_ADDR	0x08000a60
5059#define TG3_FW_SBSS_LEN		0xc
5060#define TG3_FW_BSS_ADDR		0x08000a70
5061#define TG3_FW_BSS_LEN		0x10
5062
5063static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
5064	0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
5065	0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
5066	0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
5067	0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
5068	0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
5069	0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
5070	0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
5071	0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
5072	0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
5073	0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
5074	0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
5075	0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
5076	0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
5077	0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
5078	0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
5079	0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5080	0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
5081	0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
5082	0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
5083	0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5084	0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
5085	0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
5086	0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5087	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5088	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5089	0, 0, 0, 0, 0, 0,
5090	0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
5091	0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5092	0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5093	0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5094	0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
5095	0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
5096	0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
5097	0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
5098	0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5099	0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
5100	0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
5101	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5102	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5103	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
5104	0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
5105	0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
5106	0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
5107	0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
5108	0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
5109	0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
5110	0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
5111	0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
5112	0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
5113	0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
5114	0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
5115	0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
5116	0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
5117	0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
5118	0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
5119	0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
5120	0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
5121	0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
5122	0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
5123	0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
5124	0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
5125	0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
5126	0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
5127	0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
5128	0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
5129	0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
5130	0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
5131	0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
5132	0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
5133	0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
5134	0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
5135	0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
5136	0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
5137	0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
5138	0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
5139	0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
5140	0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
5141	0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
5142	0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
5143	0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
5144	0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
5145	0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
5146	0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
5147	0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
5148	0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
5149	0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
5150	0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
5151	0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
5152	0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
5153	0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
5154	0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
5155};
5156
5157static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
5158	0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
5159	0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
5160	0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5161	0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
5162	0x00000000
5163};
5164
5165
5166#define RX_CPU_SCRATCH_BASE	0x30000
5167#define RX_CPU_SCRATCH_SIZE	0x04000
5168#define TX_CPU_SCRATCH_BASE	0x34000
5169#define TX_CPU_SCRATCH_SIZE	0x04000
5170
5171/* tp->lock is held. */
5172static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
5173{
5174	int i;
5175
5176	BUG_ON(offset == TX_CPU_BASE &&
5177	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
5178
5179	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5180		u32 val = tr32(GRC_VCPU_EXT_CTRL);
5181
5182		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
5183		return 0;
5184	}
5185	if (offset == RX_CPU_BASE) {
5186		for (i = 0; i < 10000; i++) {
5187			tw32(offset + CPU_STATE, 0xffffffff);
5188			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5189			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5190				break;
5191		}
5192
5193		tw32(offset + CPU_STATE, 0xffffffff);
5194		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
5195		udelay(10);
5196	} else {
5197		for (i = 0; i < 10000; i++) {
5198			tw32(offset + CPU_STATE, 0xffffffff);
5199			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
5200			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
5201				break;
5202		}
5203	}
5204
5205	if (i >= 10000) {
5206		printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
5207		       "and %s CPU\n",
5208		       tp->dev->name,
5209		       (offset == RX_CPU_BASE ? "RX" : "TX"));
5210		return -ENODEV;
5211	}
5212
5213	/* Clear firmware's nvram arbitration. */
5214	if (tp->tg3_flags & TG3_FLAG_NVRAM)
5215		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
5216	return 0;
5217}
5218
5219struct fw_info {
5220	unsigned int text_base;
5221	unsigned int text_len;
5222	const u32 *text_data;
5223	unsigned int rodata_base;
5224	unsigned int rodata_len;
5225	const u32 *rodata_data;
5226	unsigned int data_base;
5227	unsigned int data_len;
5228	const u32 *data_data;
5229};
5230
5231/* tp->lock is held. */
5232static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
5233				 int cpu_scratch_size, struct fw_info *info)
5234{
5235	int err, lock_err, i;
5236	void (*write_op)(struct tg3 *, u32, u32);
5237
5238	if (cpu_base == TX_CPU_BASE &&
5239	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5240		printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
5241		       "TX cpu firmware on %s which is 5705.\n",
5242		       tp->dev->name);
5243		return -EINVAL;
5244	}
5245
5246	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5247		write_op = tg3_write_mem;
5248	else
5249		write_op = tg3_write_indirect_reg32;
5250
5251	/* It is possible that bootcode is still loading at this point.
5252	 * Get the nvram lock first before halting the cpu.
5253	 */
5254	lock_err = tg3_nvram_lock(tp);
5255	err = tg3_halt_cpu(tp, cpu_base);
5256	if (!lock_err)
5257		tg3_nvram_unlock(tp);
5258	if (err)
5259		goto out;
5260
5261	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
5262		write_op(tp, cpu_scratch_base + i, 0);
5263	tw32(cpu_base + CPU_STATE, 0xffffffff);
5264	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
5265	for (i = 0; i < (info->text_len / sizeof(u32)); i++)
5266		write_op(tp, (cpu_scratch_base +
5267			      (info->text_base & 0xffff) +
5268			      (i * sizeof(u32))),
5269			 (info->text_data ?
5270			  info->text_data[i] : 0));
5271	for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
5272		write_op(tp, (cpu_scratch_base +
5273			      (info->rodata_base & 0xffff) +
5274			      (i * sizeof(u32))),
5275			 (info->rodata_data ?
5276			  info->rodata_data[i] : 0));
5277	for (i = 0; i < (info->data_len / sizeof(u32)); i++)
5278		write_op(tp, (cpu_scratch_base +
5279			      (info->data_base & 0xffff) +
5280			      (i * sizeof(u32))),
5281			 (info->data_data ?
5282			  info->data_data[i] : 0));
5283
5284	err = 0;
5285
5286out:
5287	return err;
5288}
5289
5290/* tp->lock is held. */
5291static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
5292{
5293	struct fw_info info;
5294	int err, i;
5295
5296	info.text_base = TG3_FW_TEXT_ADDR;
5297	info.text_len = TG3_FW_TEXT_LEN;
5298	info.text_data = &tg3FwText[0];
5299	info.rodata_base = TG3_FW_RODATA_ADDR;
5300	info.rodata_len = TG3_FW_RODATA_LEN;
5301	info.rodata_data = &tg3FwRodata[0];
5302	info.data_base = TG3_FW_DATA_ADDR;
5303	info.data_len = TG3_FW_DATA_LEN;
5304	info.data_data = NULL;
5305
5306	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
5307				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
5308				    &info);
5309	if (err)
5310		return err;
5311
5312	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
5313				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
5314				    &info);
5315	if (err)
5316		return err;
5317
5318	/* Now startup only the RX cpu. */
5319	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5320	tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5321
5322	for (i = 0; i < 5; i++) {
5323		if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
5324			break;
5325		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5326		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
5327		tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
5328		udelay(1000);
5329	}
5330	if (i >= 5) {
5331		printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
5332		       "to set RX CPU PC, is %08x should be %08x\n",
5333		       tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
5334		       TG3_FW_TEXT_ADDR);
5335		return -ENODEV;
5336	}
5337	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
5338	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
5339
5340	return 0;
5341}
5342
5343
5344#define TG3_TSO_FW_RELEASE_MAJOR	0x1
5345#define TG3_TSO_FW_RELASE_MINOR		0x6
5346#define TG3_TSO_FW_RELEASE_FIX		0x0
5347#define TG3_TSO_FW_START_ADDR		0x08000000
5348#define TG3_TSO_FW_TEXT_ADDR		0x08000000
5349#define TG3_TSO_FW_TEXT_LEN		0x1aa0
5350#define TG3_TSO_FW_RODATA_ADDR		0x08001aa0
5351#define TG3_TSO_FW_RODATA_LEN		0x60
5352#define TG3_TSO_FW_DATA_ADDR		0x08001b20
5353#define TG3_TSO_FW_DATA_LEN		0x30
5354#define TG3_TSO_FW_SBSS_ADDR		0x08001b50
5355#define TG3_TSO_FW_SBSS_LEN		0x2c
5356#define TG3_TSO_FW_BSS_ADDR		0x08001b80
5357#define TG3_TSO_FW_BSS_LEN		0x894
5358
5359static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
5360	0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
5361	0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
5362	0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5363	0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
5364	0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
5365	0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
5366	0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
5367	0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
5368	0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
5369	0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
5370	0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
5371	0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
5372	0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
5373	0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
5374	0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
5375	0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
5376	0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
5377	0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
5378	0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5379	0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
5380	0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
5381	0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
5382	0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
5383	0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
5384	0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
5385	0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
5386	0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
5387	0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
5388	0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
5389	0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5390	0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
5391	0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
5392	0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
5393	0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
5394	0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
5395	0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
5396	0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
5397	0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
5398	0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
5399	0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
5400	0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
5401	0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
5402	0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
5403	0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
5404	0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
5405	0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
5406	0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
5407	0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5408	0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
5409	0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
5410	0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
5411	0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
5412	0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
5413	0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
5414	0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
5415	0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
5416	0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
5417	0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
5418	0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
5419	0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
5420	0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
5421	0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
5422	0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
5423	0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
5424	0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
5425	0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
5426	0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
5427	0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
5428	0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
5429	0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
5430	0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
5431	0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
5432	0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
5433	0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
5434	0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
5435	0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
5436	0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
5437	0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
5438	0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
5439	0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
5440	0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
5441	0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
5442	0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
5443	0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
5444	0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
5445	0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
5446	0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
5447	0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
5448	0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
5449	0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
5450	0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
5451	0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
5452	0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
5453	0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
5454	0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
5455	0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
5456	0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
5457	0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
5458	0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
5459	0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
5460	0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
5461	0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
5462	0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
5463	0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
5464	0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
5465	0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
5466	0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
5467	0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
5468	0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
5469	0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
5470	0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
5471	0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
5472	0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
5473	0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
5474	0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
5475	0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
5476	0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
5477	0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
5478	0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
5479	0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
5480	0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
5481	0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
5482	0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
5483	0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
5484	0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
5485	0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
5486	0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
5487	0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
5488	0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
5489	0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
5490	0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
5491	0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
5492	0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
5493	0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
5494	0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
5495	0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
5496	0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
5497	0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
5498	0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5499	0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
5500	0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
5501	0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
5502	0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
5503	0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
5504	0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
5505	0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
5506	0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
5507	0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
5508	0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
5509	0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
5510	0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
5511	0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
5512	0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
5513	0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
5514	0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
5515	0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
5516	0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
5517	0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
5518	0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
5519	0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
5520	0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
5521	0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
5522	0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
5523	0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
5524	0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
5525	0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
5526	0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
5527	0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
5528	0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
5529	0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
5530	0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
5531	0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
5532	0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
5533	0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
5534	0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
5535	0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
5536	0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
5537	0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
5538	0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
5539	0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
5540	0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
5541	0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
5542	0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
5543	0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
5544	0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
5545	0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
5546	0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
5547	0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
5548	0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
5549	0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
5550	0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
5551	0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
5552	0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
5553	0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
5554	0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
5555	0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
5556	0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
5557	0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
5558	0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
5559	0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
5560	0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
5561	0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
5562	0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
5563	0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
5564	0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
5565	0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
5566	0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
5567	0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
5568	0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
5569	0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
5570	0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
5571	0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
5572	0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
5573	0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
5574	0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
5575	0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
5576	0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
5577	0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
5578	0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
5579	0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
5580	0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5581	0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
5582	0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
5583	0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
5584	0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
5585	0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
5586	0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
5587	0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
5588	0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
5589	0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
5590	0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
5591	0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
5592	0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
5593	0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
5594	0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
5595	0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
5596	0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
5597	0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
5598	0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
5599	0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
5600	0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
5601	0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
5602	0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
5603	0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
5604	0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
5605	0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
5606	0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
5607	0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
5608	0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
5609	0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
5610	0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
5611	0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
5612	0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
5613	0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
5614	0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
5615	0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
5616	0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
5617	0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
5618	0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
5619	0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
5620	0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
5621	0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
5622	0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
5623	0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
5624	0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
5625	0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
5626	0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
5627	0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
5628	0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
5629	0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
5630	0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
5631	0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
5632	0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
5633	0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
5634	0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
5635	0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
5636	0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
5637	0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
5638	0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
5639	0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
5640	0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
5641	0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
5642	0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
5643	0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
5644};
5645
5646static const u32 tg3TsoFwRodata[] = {
5647	0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5648	0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
5649	0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
5650	0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
5651	0x00000000,
5652};
5653
5654static const u32 tg3TsoFwData[] = {
5655	0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
5656	0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
5657	0x00000000,
5658};
5659
5660/* 5705 needs a special version of the TSO firmware.  */
5661#define TG3_TSO5_FW_RELEASE_MAJOR	0x1
5662#define TG3_TSO5_FW_RELASE_MINOR	0x2
5663#define TG3_TSO5_FW_RELEASE_FIX		0x0
5664#define TG3_TSO5_FW_START_ADDR		0x00010000
5665#define TG3_TSO5_FW_TEXT_ADDR		0x00010000
5666#define TG3_TSO5_FW_TEXT_LEN		0xe90
5667#define TG3_TSO5_FW_RODATA_ADDR		0x00010e90
5668#define TG3_TSO5_FW_RODATA_LEN		0x50
5669#define TG3_TSO5_FW_DATA_ADDR		0x00010f00
5670#define TG3_TSO5_FW_DATA_LEN		0x20
5671#define TG3_TSO5_FW_SBSS_ADDR		0x00010f20
5672#define TG3_TSO5_FW_SBSS_LEN		0x28
5673#define TG3_TSO5_FW_BSS_ADDR		0x00010f50
5674#define TG3_TSO5_FW_BSS_LEN		0x88
5675
5676static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
5677	0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
5678	0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
5679	0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
5680	0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
5681	0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
5682	0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
5683	0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5684	0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
5685	0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
5686	0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
5687	0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
5688	0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
5689	0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
5690	0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
5691	0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
5692	0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
5693	0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
5694	0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
5695	0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
5696	0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
5697	0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
5698	0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
5699	0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
5700	0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
5701	0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
5702	0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
5703	0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
5704	0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
5705	0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
5706	0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
5707	0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5708	0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
5709	0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
5710	0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
5711	0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
5712	0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
5713	0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
5714	0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
5715	0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
5716	0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
5717	0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
5718	0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
5719	0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
5720	0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
5721	0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
5722	0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
5723	0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
5724	0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
5725	0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
5726	0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
5727	0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
5728	0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
5729	0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
5730	0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
5731	0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
5732	0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
5733	0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
5734	0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
5735	0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
5736	0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
5737	0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
5738	0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
5739	0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
5740	0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
5741	0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
5742	0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
5743	0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
5744	0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
5745	0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
5746	0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
5747	0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
5748	0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
5749	0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
5750	0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
5751	0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
5752	0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
5753	0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
5754	0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
5755	0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
5756	0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
5757	0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
5758	0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
5759	0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
5760	0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
5761	0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
5762	0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
5763	0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
5764	0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
5765	0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
5766	0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
5767	0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
5768	0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
5769	0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
5770	0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
5771	0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
5772	0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
5773	0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
5774	0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
5775	0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
5776	0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
5777	0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
5778	0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
5779	0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
5780	0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
5781	0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
5782	0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
5783	0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5784	0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5785	0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
5786	0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
5787	0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
5788	0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
5789	0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
5790	0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
5791	0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
5792	0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
5793	0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
5794	0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
5795	0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
5796	0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
5797	0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
5798	0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
5799	0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
5800	0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
5801	0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
5802	0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
5803	0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
5804	0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
5805	0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
5806	0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
5807	0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
5808	0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
5809	0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
5810	0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
5811	0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
5812	0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
5813	0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
5814	0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
5815	0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
5816	0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
5817	0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
5818	0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
5819	0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
5820	0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
5821	0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
5822	0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
5823	0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
5824	0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
5825	0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
5826	0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
5827	0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
5828	0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
5829	0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
5830	0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
5831	0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
5832	0x00000000, 0x00000000, 0x00000000,
5833};
5834
5835static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
5836	0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
5837	0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
5838	0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
5839	0x00000000, 0x00000000, 0x00000000,
5840};
5841
5842static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
5843	0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
5844	0x00000000, 0x00000000, 0x00000000,
5845};
5846
5847/* tp->lock is held. */
5848static int tg3_load_tso_firmware(struct tg3 *tp)
5849{
5850	struct fw_info info;
5851	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
5852	int err, i;
5853
5854	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5855		return 0;
5856
5857	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5858		info.text_base = TG3_TSO5_FW_TEXT_ADDR;
5859		info.text_len = TG3_TSO5_FW_TEXT_LEN;
5860		info.text_data = &tg3Tso5FwText[0];
5861		info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
5862		info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
5863		info.rodata_data = &tg3Tso5FwRodata[0];
5864		info.data_base = TG3_TSO5_FW_DATA_ADDR;
5865		info.data_len = TG3_TSO5_FW_DATA_LEN;
5866		info.data_data = &tg3Tso5FwData[0];
5867		cpu_base = RX_CPU_BASE;
5868		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
5869		cpu_scratch_size = (info.text_len +
5870				    info.rodata_len +
5871				    info.data_len +
5872				    TG3_TSO5_FW_SBSS_LEN +
5873				    TG3_TSO5_FW_BSS_LEN);
5874	} else {
5875		info.text_base = TG3_TSO_FW_TEXT_ADDR;
5876		info.text_len = TG3_TSO_FW_TEXT_LEN;
5877		info.text_data = &tg3TsoFwText[0];
5878		info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
5879		info.rodata_len = TG3_TSO_FW_RODATA_LEN;
5880		info.rodata_data = &tg3TsoFwRodata[0];
5881		info.data_base = TG3_TSO_FW_DATA_ADDR;
5882		info.data_len = TG3_TSO_FW_DATA_LEN;
5883		info.data_data = &tg3TsoFwData[0];
5884		cpu_base = TX_CPU_BASE;
5885		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
5886		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
5887	}
5888
5889	err = tg3_load_firmware_cpu(tp, cpu_base,
5890				    cpu_scratch_base, cpu_scratch_size,
5891				    &info);
5892	if (err)
5893		return err;
5894
5895	/* Now startup the cpu. */
5896	tw32(cpu_base + CPU_STATE, 0xffffffff);
5897	tw32_f(cpu_base + CPU_PC,    info.text_base);
5898
5899	for (i = 0; i < 5; i++) {
5900		if (tr32(cpu_base + CPU_PC) == info.text_base)
5901			break;
5902		tw32(cpu_base + CPU_STATE, 0xffffffff);
5903		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
5904		tw32_f(cpu_base + CPU_PC,    info.text_base);
5905		udelay(1000);
5906	}
5907	if (i >= 5) {
5908		printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
5909		       "to set CPU PC, is %08x should be %08x\n",
5910		       tp->dev->name, tr32(cpu_base + CPU_PC),
5911		       info.text_base);
5912		return -ENODEV;
5913	}
5914	tw32(cpu_base + CPU_STATE, 0xffffffff);
5915	tw32_f(cpu_base + CPU_MODE,  0x00000000);
5916	return 0;
5917}
5918
5919
5920/* tp->lock is held. */
5921static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
5922{
5923	u32 addr_high, addr_low;
5924	int i;
5925
5926	addr_high = ((tp->dev->dev_addr[0] << 8) |
5927		     tp->dev->dev_addr[1]);
5928	addr_low = ((tp->dev->dev_addr[2] << 24) |
5929		    (tp->dev->dev_addr[3] << 16) |
5930		    (tp->dev->dev_addr[4] <<  8) |
5931		    (tp->dev->dev_addr[5] <<  0));
5932	for (i = 0; i < 4; i++) {
5933		if (i == 1 && skip_mac_1)
5934			continue;
5935		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
5936		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
5937	}
5938
5939	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
5940	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5941		for (i = 0; i < 12; i++) {
5942			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
5943			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
5944		}
5945	}
5946
5947	addr_high = (tp->dev->dev_addr[0] +
5948		     tp->dev->dev_addr[1] +
5949		     tp->dev->dev_addr[2] +
5950		     tp->dev->dev_addr[3] +
5951		     tp->dev->dev_addr[4] +
5952		     tp->dev->dev_addr[5]) &
5953		TX_BACKOFF_SEED_MASK;
5954	tw32(MAC_TX_BACKOFF_SEED, addr_high);
5955}
5956
5957static int tg3_set_mac_addr(struct net_device *dev, void *p)
5958{
5959	struct tg3 *tp = netdev_priv(dev);
5960	struct sockaddr *addr = p;
5961	int err = 0, skip_mac_1 = 0;
5962
5963	if (!is_valid_ether_addr(addr->sa_data))
5964		return -EINVAL;
5965
5966	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5967
5968	if (!netif_running(dev))
5969		return 0;
5970
5971	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5972		u32 addr0_high, addr0_low, addr1_high, addr1_low;
5973
5974		addr0_high = tr32(MAC_ADDR_0_HIGH);
5975		addr0_low = tr32(MAC_ADDR_0_LOW);
5976		addr1_high = tr32(MAC_ADDR_1_HIGH);
5977		addr1_low = tr32(MAC_ADDR_1_LOW);
5978
5979		/* Skip MAC addr 1 if ASF is using it. */
5980		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
5981		    !(addr1_high == 0 && addr1_low == 0))
5982			skip_mac_1 = 1;
5983	}
5984	spin_lock_bh(&tp->lock);
5985	__tg3_set_mac_addr(tp, skip_mac_1);
5986	spin_unlock_bh(&tp->lock);
5987
5988	return err;
5989}
5990
5991/* tp->lock is held. */
5992static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
5993			   dma_addr_t mapping, u32 maxlen_flags,
5994			   u32 nic_addr)
5995{
5996	tg3_write_mem(tp,
5997		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
5998		      ((u64) mapping >> 32));
5999	tg3_write_mem(tp,
6000		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
6001		      ((u64) mapping & 0xffffffff));
6002	tg3_write_mem(tp,
6003		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
6004		       maxlen_flags);
6005
6006	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6007		tg3_write_mem(tp,
6008			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
6009			      nic_addr);
6010}
6011
6012static void __tg3_set_rx_mode(struct net_device *);
6013static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
6014{
6015	tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
6016	tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
6017	tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
6018	tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
6019	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6020		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
6021		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
6022	}
6023	tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
6024	tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
6025	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6026		u32 val = ec->stats_block_coalesce_usecs;
6027
6028		if (!netif_carrier_ok(tp->dev))
6029			val = 0;
6030
6031		tw32(HOSTCC_STAT_COAL_TICKS, val);
6032	}
6033}
6034
6035/* tp->lock is held. */
6036static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
6037{
6038	u32 val, rdmac_mode;
6039	int i, err, limit;
6040
6041	tg3_disable_ints(tp);
6042
6043	tg3_stop_fw(tp);
6044
6045	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
6046
6047	if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
6048		tg3_abort_hw(tp, 1);
6049	}
6050
6051	if (reset_phy)
6052		tg3_phy_reset(tp);
6053
6054	err = tg3_chip_reset(tp);
6055	if (err)
6056		return err;
6057
6058	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
6059
6060	/* This works around an issue with Athlon chipsets on
6061	 * B3 tigon3 silicon.  This bit has no effect on any
6062	 * other revision.  But do not set this on PCI Express
6063	 * chips.
6064	 */
6065	if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
6066		tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
6067	tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6068
6069	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6070	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
6071		val = tr32(TG3PCI_PCISTATE);
6072		val |= PCISTATE_RETRY_SAME_DMA;
6073		tw32(TG3PCI_PCISTATE, val);
6074	}
6075
6076	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
6077		/* Enable some hw fixes.  */
6078		val = tr32(TG3PCI_MSI_DATA);
6079		val |= (1 << 26) | (1 << 28) | (1 << 29);
6080		tw32(TG3PCI_MSI_DATA, val);
6081	}
6082
6083	/* Descriptor ring init may make accesses to the
6084	 * NIC SRAM area to setup the TX descriptors, so we
6085	 * can only do this after the hardware has been
6086	 * successfully reset.
6087	 */
6088	err = tg3_init_rings(tp);
6089	if (err)
6090		return err;
6091
6092	/* This value is determined during the probe time DMA
6093	 * engine test, tg3_test_dma.
6094	 */
6095	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
6096
6097	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
6098			  GRC_MODE_4X_NIC_SEND_RINGS |
6099			  GRC_MODE_NO_TX_PHDR_CSUM |
6100			  GRC_MODE_NO_RX_PHDR_CSUM);
6101	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
6102
6103	/* Pseudo-header checksum is done by hardware logic and not
6104	 * the offload processers, so make the chip do the pseudo-
6105	 * header checksums on receive.  For transmit it is more
6106	 * convenient to do the pseudo-header checksum in software
6107	 * as Linux does that on transmit for us in all cases.
6108	 */
6109	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
6110
6111	tw32(GRC_MODE,
6112	     tp->grc_mode |
6113	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
6114
6115	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
6116	val = tr32(GRC_MISC_CFG);
6117	val &= ~0xff;
6118	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
6119	tw32(GRC_MISC_CFG, val);
6120
6121	/* Initialize MBUF/DESC pool. */
6122	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6123		/* Do nothing.  */
6124	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
6125		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
6126		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
6127			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
6128		else
6129			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
6130		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
6131		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
6132	}
6133	else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6134		int fw_len;
6135
6136		fw_len = (TG3_TSO5_FW_TEXT_LEN +
6137			  TG3_TSO5_FW_RODATA_LEN +
6138			  TG3_TSO5_FW_DATA_LEN +
6139			  TG3_TSO5_FW_SBSS_LEN +
6140			  TG3_TSO5_FW_BSS_LEN);
6141		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
6142		tw32(BUFMGR_MB_POOL_ADDR,
6143		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
6144		tw32(BUFMGR_MB_POOL_SIZE,
6145		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
6146	}
6147
6148	if (tp->dev->mtu <= ETH_DATA_LEN) {
6149		tw32(BUFMGR_MB_RDMA_LOW_WATER,
6150		     tp->bufmgr_config.mbuf_read_dma_low_water);
6151		tw32(BUFMGR_MB_MACRX_LOW_WATER,
6152		     tp->bufmgr_config.mbuf_mac_rx_low_water);
6153		tw32(BUFMGR_MB_HIGH_WATER,
6154		     tp->bufmgr_config.mbuf_high_water);
6155	} else {
6156		tw32(BUFMGR_MB_RDMA_LOW_WATER,
6157		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
6158		tw32(BUFMGR_MB_MACRX_LOW_WATER,
6159		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
6160		tw32(BUFMGR_MB_HIGH_WATER,
6161		     tp->bufmgr_config.mbuf_high_water_jumbo);
6162	}
6163	tw32(BUFMGR_DMA_LOW_WATER,
6164	     tp->bufmgr_config.dma_low_water);
6165	tw32(BUFMGR_DMA_HIGH_WATER,
6166	     tp->bufmgr_config.dma_high_water);
6167
6168	tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
6169	for (i = 0; i < 2000; i++) {
6170		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
6171			break;
6172		udelay(10);
6173	}
6174	if (i >= 2000) {
6175		printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
6176		       tp->dev->name);
6177		return -ENODEV;
6178	}
6179
6180	/* Setup replenish threshold. */
6181	val = tp->rx_pending / 8;
6182	if (val == 0)
6183		val = 1;
6184	else if (val > tp->rx_std_max_post)
6185		val = tp->rx_std_max_post;
6186	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6187		if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
6188			tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
6189
6190		if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
6191			val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
6192	}
6193
6194	tw32(RCVBDI_STD_THRESH, val);
6195
6196	/* Initialize TG3_BDINFO's at:
6197	 *  RCVDBDI_STD_BD:	standard eth size rx ring
6198	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
6199	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
6200	 *
6201	 * like so:
6202	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
6203	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
6204	 *                              ring attribute flags
6205	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
6206	 *
6207	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
6208	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
6209	 *
6210	 * The size of each ring is fixed in the firmware, but the location is
6211	 * configurable.
6212	 */
6213	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6214	     ((u64) tp->rx_std_mapping >> 32));
6215	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6216	     ((u64) tp->rx_std_mapping & 0xffffffff));
6217	tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
6218	     NIC_SRAM_RX_BUFFER_DESC);
6219
6220	/* Don't even try to program the JUMBO/MINI buffer descriptor
6221	 * configs on 5705.
6222	 */
6223	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6224		tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6225		     RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
6226	} else {
6227		tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
6228		     RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6229
6230		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
6231		     BDINFO_FLAGS_DISABLED);
6232
6233		/* Setup replenish threshold. */
6234		tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
6235
6236		if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
6237			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
6238			     ((u64) tp->rx_jumbo_mapping >> 32));
6239			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
6240			     ((u64) tp->rx_jumbo_mapping & 0xffffffff));
6241			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6242			     RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
6243			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
6244			     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
6245		} else {
6246			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
6247			     BDINFO_FLAGS_DISABLED);
6248		}
6249
6250	}
6251
6252	/* There is only one send ring on 5705/5750, no need to explicitly
6253	 * disable the others.
6254	 */
6255	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6256		/* Clear out send RCB ring in SRAM. */
6257		for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
6258			tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6259				      BDINFO_FLAGS_DISABLED);
6260	}
6261
6262	tp->tx_prod = 0;
6263	tp->tx_cons = 0;
6264	tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6265	tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
6266
6267	tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
6268		       tp->tx_desc_mapping,
6269		       (TG3_TX_RING_SIZE <<
6270			BDINFO_FLAGS_MAXLEN_SHIFT),
6271		       NIC_SRAM_TX_BUFFER_DESC);
6272
6273	/* There is only one receive return ring on 5705/5750, no need
6274	 * to explicitly disable the others.
6275	 */
6276	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6277		for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
6278		     i += TG3_BDINFO_SIZE) {
6279			tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
6280				      BDINFO_FLAGS_DISABLED);
6281		}
6282	}
6283
6284	tp->rx_rcb_ptr = 0;
6285	tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
6286
6287	tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
6288		       tp->rx_rcb_mapping,
6289		       (TG3_RX_RCB_RING_SIZE(tp) <<
6290			BDINFO_FLAGS_MAXLEN_SHIFT),
6291		       0);
6292
6293	tp->rx_std_ptr = tp->rx_pending;
6294	tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
6295		     tp->rx_std_ptr);
6296
6297	tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
6298						tp->rx_jumbo_pending : 0;
6299	tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
6300		     tp->rx_jumbo_ptr);
6301
6302	/* Initialize MAC address and backoff seed. */
6303	__tg3_set_mac_addr(tp, 0);
6304
6305	/* MTU + ethernet header + FCS + optional VLAN tag */
6306	tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
6307
6308	/* The slot time is changed by tg3_setup_phy if we
6309	 * run at gigabit with half duplex.
6310	 */
6311	tw32(MAC_TX_LENGTHS,
6312	     (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6313	     (6 << TX_LENGTHS_IPG_SHIFT) |
6314	     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6315
6316	/* Receive rules. */
6317	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
6318	tw32(RCVLPC_CONFIG, 0x0181);
6319
6320	/* Calculate RDMAC_MODE setting early, we need it to determine
6321	 * the RCVLPC_STATE_ENABLE mask.
6322	 */
6323	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
6324		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
6325		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
6326		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
6327		      RDMAC_MODE_LNGREAD_ENAB);
6328
6329	/* If statement applies to 5705 and 5750 PCI devices only */
6330	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6331	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6332	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
6333		if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
6334		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6335			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
6336		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6337			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
6338			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6339		}
6340	}
6341
6342	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6343		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
6344
6345	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6346		rdmac_mode |= (1 << 27);
6347
6348	/* Receive/send statistics. */
6349	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
6350		val = tr32(RCVLPC_STATS_ENABLE);
6351		val &= ~RCVLPC_STATSENAB_DACK_FIX;
6352		tw32(RCVLPC_STATS_ENABLE, val);
6353	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
6354		   (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6355		val = tr32(RCVLPC_STATS_ENABLE);
6356		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
6357		tw32(RCVLPC_STATS_ENABLE, val);
6358	} else {
6359		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
6360	}
6361	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
6362	tw32(SNDDATAI_STATSENAB, 0xffffff);
6363	tw32(SNDDATAI_STATSCTRL,
6364	     (SNDDATAI_SCTRL_ENABLE |
6365	      SNDDATAI_SCTRL_FASTUPD));
6366
6367	/* Setup host coalescing engine. */
6368	tw32(HOSTCC_MODE, 0);
6369	for (i = 0; i < 2000; i++) {
6370		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
6371			break;
6372		udelay(10);
6373	}
6374
6375	__tg3_set_coalesce(tp, &tp->coal);
6376
6377	/* set status block DMA address */
6378	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6379	     ((u64) tp->status_mapping >> 32));
6380	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6381	     ((u64) tp->status_mapping & 0xffffffff));
6382
6383	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6384		/* Status/statistics block address.  See tg3_timer,
6385		 * the tg3_periodic_fetch_stats call there, and
6386		 * tg3_get_stats to see how this works for 5705/5750 chips.
6387		 */
6388		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
6389		     ((u64) tp->stats_mapping >> 32));
6390		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
6391		     ((u64) tp->stats_mapping & 0xffffffff));
6392		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
6393		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
6394	}
6395
6396	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
6397
6398	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
6399	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
6400	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6401		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
6402
6403	/* Clear statistics/status block in chip, and status block in ram. */
6404	for (i = NIC_SRAM_STATS_BLK;
6405	     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
6406	     i += sizeof(u32)) {
6407		tg3_write_mem(tp, i, 0);
6408		udelay(40);
6409	}
6410	memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
6411
6412	if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6413		tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
6414		/* reset to prevent losing 1st rx packet intermittently */
6415		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6416		udelay(10);
6417	}
6418
6419	tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
6420		MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
6421	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
6422	udelay(40);
6423
6424	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
6425	 * If TG3_FLG2_IS_NIC is zero, we should read the
6426	 * register to preserve the GPIO settings for LOMs. The GPIOs,
6427	 * whether used as inputs or outputs, are set by boot code after
6428	 * reset.
6429	 */
6430	if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
6431		u32 gpio_mask;
6432
6433		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
6434			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
6435			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
6436
6437		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
6438			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
6439				     GRC_LCLCTRL_GPIO_OUTPUT3;
6440
6441		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6442			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
6443
6444		tp->grc_local_ctrl &= ~gpio_mask;
6445		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
6446
6447		/* GPIO1 must be driven high for eeprom write protect */
6448		if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
6449			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
6450					       GRC_LCLCTRL_GPIO_OUTPUT1);
6451	}
6452	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6453	udelay(100);
6454
6455	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
6456	tp->last_tag = 0;
6457
6458	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6459		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
6460		udelay(40);
6461	}
6462
6463	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
6464	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
6465	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
6466	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
6467	       WDMAC_MODE_LNGREAD_ENAB);
6468
6469	/* If statement applies to 5705 and 5750 PCI devices only */
6470	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6471	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
6472	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
6473		if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
6474		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
6475		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
6476			/* nothing */
6477		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
6478			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
6479			   !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
6480			val |= WDMAC_MODE_RX_ACCEL;
6481		}
6482	}
6483
6484	/* Enable host coalescing bug fix */
6485	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
6486	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787))
6487		val |= (1 << 29);
6488
6489	tw32_f(WDMAC_MODE, val);
6490	udelay(40);
6491
6492	if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
6493		val = tr32(TG3PCI_X_CAPS);
6494		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6495			val &= ~PCIX_CAPS_BURST_MASK;
6496			val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6497		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
6498			val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
6499			val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
6500		}
6501		tw32(TG3PCI_X_CAPS, val);
6502	}
6503
6504	tw32_f(RDMAC_MODE, rdmac_mode);
6505	udelay(40);
6506
6507	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
6508	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
6509		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
6510	tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
6511	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
6512	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
6513	tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
6514	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
6515	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6516		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
6517	tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
6518	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
6519
6520	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
6521		err = tg3_load_5701_a0_firmware_fix(tp);
6522		if (err)
6523			return err;
6524	}
6525
6526	if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
6527		err = tg3_load_tso_firmware(tp);
6528		if (err)
6529			return err;
6530	}
6531
6532	tp->tx_mode = TX_MODE_ENABLE;
6533	tw32_f(MAC_TX_MODE, tp->tx_mode);
6534	udelay(100);
6535
6536	tp->rx_mode = RX_MODE_ENABLE;
6537	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
6538		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
6539
6540	tw32_f(MAC_RX_MODE, tp->rx_mode);
6541	udelay(10);
6542
6543	if (tp->link_config.phy_is_low_power) {
6544		tp->link_config.phy_is_low_power = 0;
6545		tp->link_config.speed = tp->link_config.orig_speed;
6546		tp->link_config.duplex = tp->link_config.orig_duplex;
6547		tp->link_config.autoneg = tp->link_config.orig_autoneg;
6548	}
6549
6550	tp->mi_mode = MAC_MI_MODE_BASE;
6551	tw32_f(MAC_MI_MODE, tp->mi_mode);
6552	udelay(80);
6553
6554	tw32(MAC_LED_CTRL, tp->led_ctrl);
6555
6556	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
6557	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6558		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6559		udelay(10);
6560	}
6561	tw32_f(MAC_RX_MODE, tp->rx_mode);
6562	udelay(10);
6563
6564	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6565		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6566			!(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
6567			/* Set drive transmission level to 1.2V  */
6568			/* only if the signal pre-emphasis bit is not set  */
6569			val = tr32(MAC_SERDES_CFG);
6570			val &= 0xfffff000;
6571			val |= 0x880;
6572			tw32(MAC_SERDES_CFG, val);
6573		}
6574		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
6575			tw32(MAC_SERDES_CFG, 0x616000);
6576	}
6577
6578	/* Prevent chip from dropping frames when flow control
6579	 * is enabled.
6580	 */
6581	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
6582
6583	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6584	    (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
6585		/* Use hardware link auto-negotiation */
6586		tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
6587	}
6588
6589	if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
6590	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
6591		u32 tmp;
6592
6593		tmp = tr32(SERDES_RX_CTRL);
6594		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
6595		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
6596		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
6597		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
6598	}
6599
6600	err = tg3_setup_phy(tp, 0);
6601	if (err)
6602		return err;
6603
6604	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6605	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
6606		u32 tmp;
6607
6608		/* Clear CRC stats. */
6609		if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
6610			tg3_writephy(tp, MII_TG3_TEST1,
6611				     tmp | MII_TG3_TEST1_CRC_EN);
6612			tg3_readphy(tp, 0x14, &tmp);
6613		}
6614	}
6615
6616	__tg3_set_rx_mode(tp->dev);
6617
6618	/* Initialize receive rules. */
6619	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
6620	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
6621	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
6622	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
6623
6624	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6625	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6626		limit = 8;
6627	else
6628		limit = 16;
6629	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
6630		limit -= 4;
6631	switch (limit) {
6632	case 16:
6633		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
6634	case 15:
6635		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
6636	case 14:
6637		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
6638	case 13:
6639		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
6640	case 12:
6641		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
6642	case 11:
6643		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
6644	case 10:
6645		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
6646	case 9:
6647		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
6648	case 8:
6649		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
6650	case 7:
6651		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
6652	case 6:
6653		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
6654	case 5:
6655		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
6656	case 4:
6657		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
6658	case 3:
6659		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
6660	case 2:
6661	case 1:
6662
6663	default:
6664		break;
6665	};
6666
6667	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
6668
6669	return 0;
6670}
6671
6672/* Called at device open time to get the chip ready for
6673 * packet processing.  Invoked with tp->lock held.
6674 */
6675static int tg3_init_hw(struct tg3 *tp, int reset_phy)
6676{
6677	int err;
6678
6679	/* Force the chip into D0. */
6680	err = tg3_set_power_state(tp, PCI_D0);
6681	if (err)
6682		goto out;
6683
6684	tg3_switch_clocks(tp);
6685
6686	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6687
6688	err = tg3_reset_hw(tp, reset_phy);
6689
6690out:
6691	return err;
6692}
6693
6694#define TG3_STAT_ADD32(PSTAT, REG) \
6695do {	u32 __val = tr32(REG); \
6696	(PSTAT)->low += __val; \
6697	if ((PSTAT)->low < __val) \
6698		(PSTAT)->high += 1; \
6699} while (0)
6700
6701static void tg3_periodic_fetch_stats(struct tg3 *tp)
6702{
6703	struct tg3_hw_stats *sp = tp->hw_stats;
6704
6705	if (!netif_carrier_ok(tp->dev))
6706		return;
6707
6708	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
6709	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
6710	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
6711	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
6712	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
6713	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
6714	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
6715	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
6716	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
6717	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
6718	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
6719	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
6720	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
6721
6722	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
6723	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
6724	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
6725	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
6726	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
6727	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
6728	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
6729	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
6730	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
6731	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
6732	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
6733	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
6734	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
6735	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
6736
6737	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
6738	TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
6739	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
6740}
6741
6742static void tg3_timer(unsigned long __opaque)
6743{
6744	struct tg3 *tp = (struct tg3 *) __opaque;
6745
6746	if (tp->irq_sync)
6747		goto restart_timer;
6748
6749	spin_lock(&tp->lock);
6750
6751	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
6752		/* All of this garbage is because when using non-tagged
6753		 * IRQ status the mailbox/status_block protocol the chip
6754		 * uses with the cpu is race prone.
6755		 */
6756		if (tp->hw_status->status & SD_STATUS_UPDATED) {
6757			tw32(GRC_LOCAL_CTRL,
6758			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
6759		} else {
6760			tw32(HOSTCC_MODE, tp->coalesce_mode |
6761			     (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
6762		}
6763
6764		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
6765			tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
6766			spin_unlock(&tp->lock);
6767			schedule_work(&tp->reset_task);
6768			return;
6769		}
6770	}
6771
6772	/* This part only runs once per second. */
6773	if (!--tp->timer_counter) {
6774		if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6775			tg3_periodic_fetch_stats(tp);
6776
6777		if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
6778			u32 mac_stat;
6779			int phy_event;
6780
6781			mac_stat = tr32(MAC_STATUS);
6782
6783			phy_event = 0;
6784			if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
6785				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
6786					phy_event = 1;
6787			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
6788				phy_event = 1;
6789
6790			if (phy_event)
6791				tg3_setup_phy(tp, 0);
6792		} else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
6793			u32 mac_stat = tr32(MAC_STATUS);
6794			int need_setup = 0;
6795
6796			if (netif_carrier_ok(tp->dev) &&
6797			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
6798				need_setup = 1;
6799			}
6800			if (! netif_carrier_ok(tp->dev) &&
6801			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
6802					 MAC_STATUS_SIGNAL_DET))) {
6803				need_setup = 1;
6804			}
6805			if (need_setup) {
6806				if (!tp->serdes_counter) {
6807					tw32_f(MAC_MODE,
6808					     (tp->mac_mode &
6809					      ~MAC_MODE_PORT_MODE_MASK));
6810					udelay(40);
6811					tw32_f(MAC_MODE, tp->mac_mode);
6812					udelay(40);
6813				}
6814				tg3_setup_phy(tp, 0);
6815			}
6816		} else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
6817			tg3_serdes_parallel_detect(tp);
6818
6819		tp->timer_counter = tp->timer_multiplier;
6820	}
6821
6822	/* Heartbeat is only sent once every 2 seconds.
6823	 *
6824	 * The heartbeat is to tell the ASF firmware that the host
6825	 * driver is still alive.  In the event that the OS crashes,
6826	 * ASF needs to reset the hardware to free up the FIFO space
6827	 * that may be filled with rx packets destined for the host.
6828	 * If the FIFO is full, ASF will no longer function properly.
6829	 *
6830	 * Unintended resets have been reported on real time kernels
6831	 * where the timer doesn't run on time.  Netpoll will also have
6832	 * same problem.
6833	 *
6834	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
6835	 * to check the ring condition when the heartbeat is expiring
6836	 * before doing the reset.  This will prevent most unintended
6837	 * resets.
6838	 */
6839	if (!--tp->asf_counter) {
6840		if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6841			u32 val;
6842
6843			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
6844				      FWCMD_NICDRV_ALIVE3);
6845			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
6846			/* 5 seconds timeout */
6847			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
6848			val = tr32(GRC_RX_CPU_EVENT);
6849			val |= (1 << 14);
6850			tw32(GRC_RX_CPU_EVENT, val);
6851		}
6852		tp->asf_counter = tp->asf_multiplier;
6853	}
6854
6855	spin_unlock(&tp->lock);
6856
6857restart_timer:
6858	tp->timer.expires = jiffies + tp->timer_offset;
6859	add_timer(&tp->timer);
6860}
6861
6862static int tg3_request_irq(struct tg3 *tp)
6863{
6864	irq_handler_t fn;
6865	unsigned long flags;
6866	struct net_device *dev = tp->dev;
6867
6868	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6869		fn = tg3_msi;
6870		if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
6871			fn = tg3_msi_1shot;
6872		flags = IRQF_SAMPLE_RANDOM;
6873	} else {
6874		fn = tg3_interrupt;
6875		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
6876			fn = tg3_interrupt_tagged;
6877		flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
6878	}
6879	return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
6880}
6881
6882static int tg3_test_interrupt(struct tg3 *tp)
6883{
6884	struct net_device *dev = tp->dev;
6885	int err, i, intr_ok = 0;
6886
6887	if (!netif_running(dev))
6888		return -ENODEV;
6889
6890	tg3_disable_ints(tp);
6891
6892	free_irq(tp->pdev->irq, dev);
6893
6894	err = request_irq(tp->pdev->irq, tg3_test_isr,
6895			  IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
6896	if (err)
6897		return err;
6898
6899	tp->hw_status->status &= ~SD_STATUS_UPDATED;
6900	tg3_enable_ints(tp);
6901
6902	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
6903	       HOSTCC_MODE_NOW);
6904
6905	for (i = 0; i < 5; i++) {
6906		u32 int_mbox, misc_host_ctrl;
6907
6908		int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
6909					TG3_64BIT_REG_LOW);
6910		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
6911
6912		if ((int_mbox != 0) ||
6913		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
6914			intr_ok = 1;
6915			break;
6916		}
6917
6918		msleep(10);
6919	}
6920
6921	tg3_disable_ints(tp);
6922
6923	free_irq(tp->pdev->irq, dev);
6924
6925	err = tg3_request_irq(tp);
6926
6927	if (err)
6928		return err;
6929
6930	if (intr_ok)
6931		return 0;
6932
6933	return -EIO;
6934}
6935
6936/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
6937 * successfully restored
6938 */
6939static int tg3_test_msi(struct tg3 *tp)
6940{
6941	struct net_device *dev = tp->dev;
6942	int err;
6943	u16 pci_cmd;
6944
6945	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
6946		return 0;
6947
6948	/* Turn off SERR reporting in case MSI terminates with Master
6949	 * Abort.
6950	 */
6951	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6952	pci_write_config_word(tp->pdev, PCI_COMMAND,
6953			      pci_cmd & ~PCI_COMMAND_SERR);
6954
6955	err = tg3_test_interrupt(tp);
6956
6957	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6958
6959	if (!err)
6960		return 0;
6961
6962	/* other failures */
6963	if (err != -EIO)
6964		return err;
6965
6966	/* MSI test failed, go back to INTx mode */
6967	printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
6968	       "switching to INTx mode. Please report this failure to "
6969	       "the PCI maintainer and include system chipset information.\n",
6970		       tp->dev->name);
6971
6972	free_irq(tp->pdev->irq, dev);
6973	pci_disable_msi(tp->pdev);
6974
6975	tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
6976
6977	err = tg3_request_irq(tp);
6978	if (err)
6979		return err;
6980
6981	/* Need to reset the chip because the MSI cycle may have terminated
6982	 * with Master Abort.
6983	 */
6984	tg3_full_lock(tp, 1);
6985
6986	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6987	err = tg3_init_hw(tp, 1);
6988
6989	tg3_full_unlock(tp);
6990
6991	if (err)
6992		free_irq(tp->pdev->irq, dev);
6993
6994	return err;
6995}
6996
6997static int tg3_open(struct net_device *dev)
6998{
6999	struct tg3 *tp = netdev_priv(dev);
7000	int err;
7001
7002	netif_carrier_off(tp->dev);
7003
7004	tg3_full_lock(tp, 0);
7005
7006	err = tg3_set_power_state(tp, PCI_D0);
7007	if (err) {
7008		tg3_full_unlock(tp);
7009		return err;
7010	}
7011
7012	tg3_disable_ints(tp);
7013	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7014
7015	tg3_full_unlock(tp);
7016
7017	/* The placement of this call is tied
7018	 * to the setup and use of Host TX descriptors.
7019	 */
7020	err = tg3_alloc_consistent(tp);
7021	if (err)
7022		return err;
7023
7024	if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
7025		/* All MSI supporting chips should support tagged
7026		 * status.  Assert that this is the case.
7027		 */
7028		if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7029			printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
7030			       "Not using MSI.\n", tp->dev->name);
7031		} else if (pci_enable_msi(tp->pdev) == 0) {
7032			u32 msi_mode;
7033
7034			msi_mode = tr32(MSGINT_MODE);
7035			tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
7036			tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
7037		}
7038	}
7039	err = tg3_request_irq(tp);
7040
7041	if (err) {
7042		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7043			pci_disable_msi(tp->pdev);
7044			tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7045		}
7046		tg3_free_consistent(tp);
7047		return err;
7048	}
7049
7050	tg3_full_lock(tp, 0);
7051
7052	err = tg3_init_hw(tp, 1);
7053	if (err) {
7054		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7055		tg3_free_rings(tp);
7056	} else {
7057		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
7058			tp->timer_offset = HZ;
7059		else
7060			tp->timer_offset = HZ / 10;
7061
7062		BUG_ON(tp->timer_offset > HZ);
7063		tp->timer_counter = tp->timer_multiplier =
7064			(HZ / tp->timer_offset);
7065		tp->asf_counter = tp->asf_multiplier =
7066			((HZ / tp->timer_offset) * 2);
7067
7068		init_timer(&tp->timer);
7069		tp->timer.expires = jiffies + tp->timer_offset;
7070		tp->timer.data = (unsigned long) tp;
7071		tp->timer.function = tg3_timer;
7072	}
7073
7074	tg3_full_unlock(tp);
7075
7076	if (err) {
7077		free_irq(tp->pdev->irq, dev);
7078		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7079			pci_disable_msi(tp->pdev);
7080			tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7081		}
7082		tg3_free_consistent(tp);
7083		return err;
7084	}
7085
7086	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7087		err = tg3_test_msi(tp);
7088
7089		if (err) {
7090			tg3_full_lock(tp, 0);
7091
7092			if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7093				pci_disable_msi(tp->pdev);
7094				tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7095			}
7096			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7097			tg3_free_rings(tp);
7098			tg3_free_consistent(tp);
7099
7100			tg3_full_unlock(tp);
7101
7102			return err;
7103		}
7104
7105		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7106			if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
7107				u32 val = tr32(PCIE_TRANSACTION_CFG);
7108
7109				tw32(PCIE_TRANSACTION_CFG,
7110				     val | PCIE_TRANS_CFG_1SHOT_MSI);
7111			}
7112		}
7113	}
7114
7115	tg3_full_lock(tp, 0);
7116
7117	add_timer(&tp->timer);
7118	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
7119	tg3_enable_ints(tp);
7120
7121	tg3_full_unlock(tp);
7122
7123	netif_start_queue(dev);
7124
7125	return 0;
7126}
7127
7128
7129static struct net_device_stats *tg3_get_stats(struct net_device *);
7130static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
7131
7132static int tg3_close(struct net_device *dev)
7133{
7134	struct tg3 *tp = netdev_priv(dev);
7135
7136	cancel_work_sync(&tp->reset_task);
7137
7138	netif_stop_queue(dev);
7139
7140	del_timer_sync(&tp->timer);
7141
7142	tg3_full_lock(tp, 1);
7143
7144	tg3_disable_ints(tp);
7145
7146	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7147	tg3_free_rings(tp);
7148	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
7149
7150	tg3_full_unlock(tp);
7151
7152	free_irq(tp->pdev->irq, dev);
7153	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7154		pci_disable_msi(tp->pdev);
7155		tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
7156	}
7157
7158	memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
7159	       sizeof(tp->net_stats_prev));
7160	memcpy(&tp->estats_prev, tg3_get_estats(tp),
7161	       sizeof(tp->estats_prev));
7162
7163	tg3_free_consistent(tp);
7164
7165	tg3_set_power_state(tp, PCI_D3hot);
7166
7167	netif_carrier_off(tp->dev);
7168
7169	return 0;
7170}
7171
7172static inline unsigned long get_stat64(tg3_stat64_t *val)
7173{
7174	unsigned long ret;
7175
7176#if (BITS_PER_LONG == 32)
7177	ret = val->low;
7178#else
7179	ret = ((u64)val->high << 32) | ((u64)val->low);
7180#endif
7181	return ret;
7182}
7183
7184static unsigned long calc_crc_errors(struct tg3 *tp)
7185{
7186	struct tg3_hw_stats *hw_stats = tp->hw_stats;
7187
7188	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7189	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7190	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
7191		u32 val;
7192
7193		spin_lock_bh(&tp->lock);
7194		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
7195			tg3_writephy(tp, MII_TG3_TEST1,
7196				     val | MII_TG3_TEST1_CRC_EN);
7197			tg3_readphy(tp, 0x14, &val);
7198		} else
7199			val = 0;
7200		spin_unlock_bh(&tp->lock);
7201
7202		tp->phy_crc_errors += val;
7203
7204		return tp->phy_crc_errors;
7205	}
7206
7207	return get_stat64(&hw_stats->rx_fcs_errors);
7208}
7209
7210#define ESTAT_ADD(member) \
7211	estats->member =	old_estats->member + \
7212				get_stat64(&hw_stats->member)
7213
7214static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
7215{
7216	struct tg3_ethtool_stats *estats = &tp->estats;
7217	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
7218	struct tg3_hw_stats *hw_stats = tp->hw_stats;
7219
7220	if (!hw_stats)
7221		return old_estats;
7222
7223	ESTAT_ADD(rx_octets);
7224	ESTAT_ADD(rx_fragments);
7225	ESTAT_ADD(rx_ucast_packets);
7226	ESTAT_ADD(rx_mcast_packets);
7227	ESTAT_ADD(rx_bcast_packets);
7228	ESTAT_ADD(rx_fcs_errors);
7229	ESTAT_ADD(rx_align_errors);
7230	ESTAT_ADD(rx_xon_pause_rcvd);
7231	ESTAT_ADD(rx_xoff_pause_rcvd);
7232	ESTAT_ADD(rx_mac_ctrl_rcvd);
7233	ESTAT_ADD(rx_xoff_entered);
7234	ESTAT_ADD(rx_frame_too_long_errors);
7235	ESTAT_ADD(rx_jabbers);
7236	ESTAT_ADD(rx_undersize_packets);
7237	ESTAT_ADD(rx_in_length_errors);
7238	ESTAT_ADD(rx_out_length_errors);
7239	ESTAT_ADD(rx_64_or_less_octet_packets);
7240	ESTAT_ADD(rx_65_to_127_octet_packets);
7241	ESTAT_ADD(rx_128_to_255_octet_packets);
7242	ESTAT_ADD(rx_256_to_511_octet_packets);
7243	ESTAT_ADD(rx_512_to_1023_octet_packets);
7244	ESTAT_ADD(rx_1024_to_1522_octet_packets);
7245	ESTAT_ADD(rx_1523_to_2047_octet_packets);
7246	ESTAT_ADD(rx_2048_to_4095_octet_packets);
7247	ESTAT_ADD(rx_4096_to_8191_octet_packets);
7248	ESTAT_ADD(rx_8192_to_9022_octet_packets);
7249
7250	ESTAT_ADD(tx_octets);
7251	ESTAT_ADD(tx_collisions);
7252	ESTAT_ADD(tx_xon_sent);
7253	ESTAT_ADD(tx_xoff_sent);
7254	ESTAT_ADD(tx_flow_control);
7255	ESTAT_ADD(tx_mac_errors);
7256	ESTAT_ADD(tx_single_collisions);
7257	ESTAT_ADD(tx_mult_collisions);
7258	ESTAT_ADD(tx_deferred);
7259	ESTAT_ADD(tx_excessive_collisions);
7260	ESTAT_ADD(tx_late_collisions);
7261	ESTAT_ADD(tx_collide_2times);
7262	ESTAT_ADD(tx_collide_3times);
7263	ESTAT_ADD(tx_collide_4times);
7264	ESTAT_ADD(tx_collide_5times);
7265	ESTAT_ADD(tx_collide_6times);
7266	ESTAT_ADD(tx_collide_7times);
7267	ESTAT_ADD(tx_collide_8times);
7268	ESTAT_ADD(tx_collide_9times);
7269	ESTAT_ADD(tx_collide_10times);
7270	ESTAT_ADD(tx_collide_11times);
7271	ESTAT_ADD(tx_collide_12times);
7272	ESTAT_ADD(tx_collide_13times);
7273	ESTAT_ADD(tx_collide_14times);
7274	ESTAT_ADD(tx_collide_15times);
7275	ESTAT_ADD(tx_ucast_packets);
7276	ESTAT_ADD(tx_mcast_packets);
7277	ESTAT_ADD(tx_bcast_packets);
7278	ESTAT_ADD(tx_carrier_sense_errors);
7279	ESTAT_ADD(tx_discards);
7280	ESTAT_ADD(tx_errors);
7281
7282	ESTAT_ADD(dma_writeq_full);
7283	ESTAT_ADD(dma_write_prioq_full);
7284	ESTAT_ADD(rxbds_empty);
7285	ESTAT_ADD(rx_discards);
7286	ESTAT_ADD(rx_errors);
7287	ESTAT_ADD(rx_threshold_hit);
7288
7289	ESTAT_ADD(dma_readq_full);
7290	ESTAT_ADD(dma_read_prioq_full);
7291	ESTAT_ADD(tx_comp_queue_full);
7292
7293	ESTAT_ADD(ring_set_send_prod_index);
7294	ESTAT_ADD(ring_status_update);
7295	ESTAT_ADD(nic_irqs);
7296	ESTAT_ADD(nic_avoided_irqs);
7297	ESTAT_ADD(nic_tx_threshold_hit);
7298
7299	return estats;
7300}
7301
7302static struct net_device_stats *tg3_get_stats(struct net_device *dev)
7303{
7304	struct tg3 *tp = netdev_priv(dev);
7305	struct net_device_stats *stats = &tp->net_stats;
7306	struct net_device_stats *old_stats = &tp->net_stats_prev;
7307	struct tg3_hw_stats *hw_stats = tp->hw_stats;
7308
7309	if (!hw_stats)
7310		return old_stats;
7311
7312	stats->rx_packets = old_stats->rx_packets +
7313		get_stat64(&hw_stats->rx_ucast_packets) +
7314		get_stat64(&hw_stats->rx_mcast_packets) +
7315		get_stat64(&hw_stats->rx_bcast_packets);
7316
7317	stats->tx_packets = old_stats->tx_packets +
7318		get_stat64(&hw_stats->tx_ucast_packets) +
7319		get_stat64(&hw_stats->tx_mcast_packets) +
7320		get_stat64(&hw_stats->tx_bcast_packets);
7321
7322	stats->rx_bytes = old_stats->rx_bytes +
7323		get_stat64(&hw_stats->rx_octets);
7324	stats->tx_bytes = old_stats->tx_bytes +
7325		get_stat64(&hw_stats->tx_octets);
7326
7327	stats->rx_errors = old_stats->rx_errors +
7328		get_stat64(&hw_stats->rx_errors);
7329	stats->tx_errors = old_stats->tx_errors +
7330		get_stat64(&hw_stats->tx_errors) +
7331		get_stat64(&hw_stats->tx_mac_errors) +
7332		get_stat64(&hw_stats->tx_carrier_sense_errors) +
7333		get_stat64(&hw_stats->tx_discards);
7334
7335	stats->multicast = old_stats->multicast +
7336		get_stat64(&hw_stats->rx_mcast_packets);
7337	stats->collisions = old_stats->collisions +
7338		get_stat64(&hw_stats->tx_collisions);
7339
7340	stats->rx_length_errors = old_stats->rx_length_errors +
7341		get_stat64(&hw_stats->rx_frame_too_long_errors) +
7342		get_stat64(&hw_stats->rx_undersize_packets);
7343
7344	stats->rx_over_errors = old_stats->rx_over_errors +
7345		get_stat64(&hw_stats->rxbds_empty);
7346	stats->rx_frame_errors = old_stats->rx_frame_errors +
7347		get_stat64(&hw_stats->rx_align_errors);
7348	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
7349		get_stat64(&hw_stats->tx_discards);
7350	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
7351		get_stat64(&hw_stats->tx_carrier_sense_errors);
7352
7353	stats->rx_crc_errors = old_stats->rx_crc_errors +
7354		calc_crc_errors(tp);
7355
7356	stats->rx_missed_errors = old_stats->rx_missed_errors +
7357		get_stat64(&hw_stats->rx_discards);
7358
7359	return stats;
7360}
7361
7362static inline u32 calc_crc(unsigned char *buf, int len)
7363{
7364	u32 reg;
7365	u32 tmp;
7366	int j, k;
7367
7368	reg = 0xffffffff;
7369
7370	for (j = 0; j < len; j++) {
7371		reg ^= buf[j];
7372
7373		for (k = 0; k < 8; k++) {
7374			tmp = reg & 0x01;
7375
7376			reg >>= 1;
7377
7378			if (tmp) {
7379				reg ^= 0xedb88320;
7380			}
7381		}
7382	}
7383
7384	return ~reg;
7385}
7386
7387static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
7388{
7389	/* accept or reject all multicast frames */
7390	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
7391	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
7392	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
7393	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
7394}
7395
7396static void __tg3_set_rx_mode(struct net_device *dev)
7397{
7398	struct tg3 *tp = netdev_priv(dev);
7399	u32 rx_mode;
7400
7401	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
7402				  RX_MODE_KEEP_VLAN_TAG);
7403
7404	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
7405	 * flag clear.
7406	 */
7407#if TG3_VLAN_TAG_USED
7408	if (!tp->vlgrp &&
7409	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7410		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7411#else
7412	/* By definition, VLAN is disabled always in this
7413	 * case.
7414	 */
7415	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
7416		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
7417#endif
7418
7419	if (dev->flags & IFF_PROMISC) {
7420		/* Promiscuous mode. */
7421		rx_mode |= RX_MODE_PROMISC;
7422	} else if (dev->flags & IFF_ALLMULTI) {
7423		/* Accept all multicast. */
7424		tg3_set_multi (tp, 1);
7425	} else if (dev->mc_count < 1) {
7426		/* Reject all multicast. */
7427		tg3_set_multi (tp, 0);
7428	} else {
7429		/* Accept one or more multicast(s). */
7430		struct dev_mc_list *mclist;
7431		unsigned int i;
7432		u32 mc_filter[4] = { 0, };
7433		u32 regidx;
7434		u32 bit;
7435		u32 crc;
7436
7437		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
7438		     i++, mclist = mclist->next) {
7439
7440			crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
7441			bit = ~crc & 0x7f;
7442			regidx = (bit & 0x60) >> 5;
7443			bit &= 0x1f;
7444			mc_filter[regidx] |= (1 << bit);
7445		}
7446
7447		tw32(MAC_HASH_REG_0, mc_filter[0]);
7448		tw32(MAC_HASH_REG_1, mc_filter[1]);
7449		tw32(MAC_HASH_REG_2, mc_filter[2]);
7450		tw32(MAC_HASH_REG_3, mc_filter[3]);
7451	}
7452
7453	if (rx_mode != tp->rx_mode) {
7454		tp->rx_mode = rx_mode;
7455		tw32_f(MAC_RX_MODE, rx_mode);
7456		udelay(10);
7457	}
7458}
7459
7460static void tg3_set_rx_mode(struct net_device *dev)
7461{
7462	struct tg3 *tp = netdev_priv(dev);
7463
7464	if (!netif_running(dev))
7465		return;
7466
7467	tg3_full_lock(tp, 0);
7468	__tg3_set_rx_mode(dev);
7469	tg3_full_unlock(tp);
7470}
7471
7472#define TG3_REGDUMP_LEN		(32 * 1024)
7473
7474static int tg3_get_regs_len(struct net_device *dev)
7475{
7476	return TG3_REGDUMP_LEN;
7477}
7478
7479static void tg3_get_regs(struct net_device *dev,
7480		struct ethtool_regs *regs, void *_p)
7481{
7482	u32 *p = _p;
7483	struct tg3 *tp = netdev_priv(dev);
7484	u8 *orig_p = _p;
7485	int i;
7486
7487	regs->version = 0;
7488
7489	memset(p, 0, TG3_REGDUMP_LEN);
7490
7491	if (tp->link_config.phy_is_low_power)
7492		return;
7493
7494	tg3_full_lock(tp, 0);
7495
7496#define __GET_REG32(reg)	(*(p)++ = tr32(reg))
7497#define GET_REG32_LOOP(base,len)		\
7498do {	p = (u32 *)(orig_p + (base));		\
7499	for (i = 0; i < len; i += 4)		\
7500		__GET_REG32((base) + i);	\
7501} while (0)
7502#define GET_REG32_1(reg)			\
7503do {	p = (u32 *)(orig_p + (reg));		\
7504	__GET_REG32((reg));			\
7505} while (0)
7506
7507	GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
7508	GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
7509	GET_REG32_LOOP(MAC_MODE, 0x4f0);
7510	GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
7511	GET_REG32_1(SNDDATAC_MODE);
7512	GET_REG32_LOOP(SNDBDS_MODE, 0x80);
7513	GET_REG32_LOOP(SNDBDI_MODE, 0x48);
7514	GET_REG32_1(SNDBDC_MODE);
7515	GET_REG32_LOOP(RCVLPC_MODE, 0x20);
7516	GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
7517	GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
7518	GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
7519	GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
7520	GET_REG32_1(RCVDCC_MODE);
7521	GET_REG32_LOOP(RCVBDI_MODE, 0x20);
7522	GET_REG32_LOOP(RCVCC_MODE, 0x14);
7523	GET_REG32_LOOP(RCVLSC_MODE, 0x08);
7524	GET_REG32_1(MBFREE_MODE);
7525	GET_REG32_LOOP(HOSTCC_MODE, 0x100);
7526	GET_REG32_LOOP(MEMARB_MODE, 0x10);
7527	GET_REG32_LOOP(BUFMGR_MODE, 0x58);
7528	GET_REG32_LOOP(RDMAC_MODE, 0x08);
7529	GET_REG32_LOOP(WDMAC_MODE, 0x08);
7530	GET_REG32_1(RX_CPU_MODE);
7531	GET_REG32_1(RX_CPU_STATE);
7532	GET_REG32_1(RX_CPU_PGMCTR);
7533	GET_REG32_1(RX_CPU_HWBKPT);
7534	GET_REG32_1(TX_CPU_MODE);
7535	GET_REG32_1(TX_CPU_STATE);
7536	GET_REG32_1(TX_CPU_PGMCTR);
7537	GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
7538	GET_REG32_LOOP(FTQ_RESET, 0x120);
7539	GET_REG32_LOOP(MSGINT_MODE, 0x0c);
7540	GET_REG32_1(DMAC_MODE);
7541	GET_REG32_LOOP(GRC_MODE, 0x4c);
7542	if (tp->tg3_flags & TG3_FLAG_NVRAM)
7543		GET_REG32_LOOP(NVRAM_CMD, 0x24);
7544
7545#undef __GET_REG32
7546#undef GET_REG32_LOOP
7547#undef GET_REG32_1
7548
7549	tg3_full_unlock(tp);
7550}
7551
7552static int tg3_get_eeprom_len(struct net_device *dev)
7553{
7554	struct tg3 *tp = netdev_priv(dev);
7555
7556	return tp->nvram_size;
7557}
7558
7559static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
7560static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
7561
7562static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7563{
7564	struct tg3 *tp = netdev_priv(dev);
7565	int ret;
7566	u8  *pd;
7567	u32 i, offset, len, val, b_offset, b_count;
7568
7569	if (tp->link_config.phy_is_low_power)
7570		return -EAGAIN;
7571
7572	offset = eeprom->offset;
7573	len = eeprom->len;
7574	eeprom->len = 0;
7575
7576	eeprom->magic = TG3_EEPROM_MAGIC;
7577
7578	if (offset & 3) {
7579		/* adjustments to start on required 4 byte boundary */
7580		b_offset = offset & 3;
7581		b_count = 4 - b_offset;
7582		if (b_count > len) {
7583			/* i.e. offset=1 len=2 */
7584			b_count = len;
7585		}
7586		ret = tg3_nvram_read(tp, offset-b_offset, &val);
7587		if (ret)
7588			return ret;
7589		val = cpu_to_le32(val);
7590		memcpy(data, ((char*)&val) + b_offset, b_count);
7591		len -= b_count;
7592		offset += b_count;
7593	        eeprom->len += b_count;
7594	}
7595
7596	/* read bytes upto the last 4 byte boundary */
7597	pd = &data[eeprom->len];
7598	for (i = 0; i < (len - (len & 3)); i += 4) {
7599		ret = tg3_nvram_read(tp, offset + i, &val);
7600		if (ret) {
7601			eeprom->len += i;
7602			return ret;
7603		}
7604		val = cpu_to_le32(val);
7605		memcpy(pd + i, &val, 4);
7606	}
7607	eeprom->len += i;
7608
7609	if (len & 3) {
7610		/* read last bytes not ending on 4 byte boundary */
7611		pd = &data[eeprom->len];
7612		b_count = len & 3;
7613		b_offset = offset + len - b_count;
7614		ret = tg3_nvram_read(tp, b_offset, &val);
7615		if (ret)
7616			return ret;
7617		val = cpu_to_le32(val);
7618		memcpy(pd, ((char*)&val), b_count);
7619		eeprom->len += b_count;
7620	}
7621	return 0;
7622}
7623
7624static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
7625
7626static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
7627{
7628	struct tg3 *tp = netdev_priv(dev);
7629	int ret;
7630	u32 offset, len, b_offset, odd_len, start, end;
7631	u8 *buf;
7632
7633	if (tp->link_config.phy_is_low_power)
7634		return -EAGAIN;
7635
7636	if (eeprom->magic != TG3_EEPROM_MAGIC)
7637		return -EINVAL;
7638
7639	offset = eeprom->offset;
7640	len = eeprom->len;
7641
7642	if ((b_offset = (offset & 3))) {
7643		/* adjustments to start on required 4 byte boundary */
7644		ret = tg3_nvram_read(tp, offset-b_offset, &start);
7645		if (ret)
7646			return ret;
7647		start = cpu_to_le32(start);
7648		len += b_offset;
7649		offset &= ~3;
7650		if (len < 4)
7651			len = 4;
7652	}
7653
7654	odd_len = 0;
7655	if (len & 3) {
7656		/* adjustments to end on required 4 byte boundary */
7657		odd_len = 1;
7658		len = (len + 3) & ~3;
7659		ret = tg3_nvram_read(tp, offset+len-4, &end);
7660		if (ret)
7661			return ret;
7662		end = cpu_to_le32(end);
7663	}
7664
7665	buf = data;
7666	if (b_offset || odd_len) {
7667		buf = kmalloc(len, GFP_KERNEL);
7668		if (buf == 0)
7669			return -ENOMEM;
7670		if (b_offset)
7671			memcpy(buf, &start, 4);
7672		if (odd_len)
7673			memcpy(buf+len-4, &end, 4);
7674		memcpy(buf + b_offset, data, eeprom->len);
7675	}
7676
7677	ret = tg3_nvram_write_block(tp, offset, len, buf);
7678
7679	if (buf != data)
7680		kfree(buf);
7681
7682	return ret;
7683}
7684
7685static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7686{
7687  	struct tg3 *tp = netdev_priv(dev);
7688
7689	cmd->supported = (SUPPORTED_Autoneg);
7690
7691	if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7692		cmd->supported |= (SUPPORTED_1000baseT_Half |
7693				   SUPPORTED_1000baseT_Full);
7694
7695	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
7696		cmd->supported |= (SUPPORTED_100baseT_Half |
7697				  SUPPORTED_100baseT_Full |
7698				  SUPPORTED_10baseT_Half |
7699				  SUPPORTED_10baseT_Full |
7700				  SUPPORTED_MII);
7701		cmd->port = PORT_TP;
7702	} else {
7703		cmd->supported |= SUPPORTED_FIBRE;
7704		cmd->port = PORT_FIBRE;
7705	}
7706
7707	cmd->advertising = tp->link_config.advertising;
7708	if (netif_running(dev)) {
7709		cmd->speed = tp->link_config.active_speed;
7710		cmd->duplex = tp->link_config.active_duplex;
7711	}
7712	cmd->phy_address = PHY_ADDR;
7713	cmd->transceiver = 0;
7714	cmd->autoneg = tp->link_config.autoneg;
7715	cmd->maxtxpkt = 0;
7716	cmd->maxrxpkt = 0;
7717	return 0;
7718}
7719
7720static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7721{
7722	struct tg3 *tp = netdev_priv(dev);
7723
7724	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
7725		/* These are the only valid advertisement bits allowed.  */
7726		if (cmd->autoneg == AUTONEG_ENABLE &&
7727		    (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
7728					  ADVERTISED_1000baseT_Full |
7729					  ADVERTISED_Autoneg |
7730					  ADVERTISED_FIBRE)))
7731			return -EINVAL;
7732		/* Fiber can only do SPEED_1000.  */
7733		else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7734			 (cmd->speed != SPEED_1000))
7735			return -EINVAL;
7736	/* Copper cannot force SPEED_1000.  */
7737	} else if ((cmd->autoneg != AUTONEG_ENABLE) &&
7738		   (cmd->speed == SPEED_1000))
7739		return -EINVAL;
7740	else if ((cmd->speed == SPEED_1000) &&
7741		 (tp->tg3_flags2 & TG3_FLAG_10_100_ONLY))
7742		return -EINVAL;
7743
7744	tg3_full_lock(tp, 0);
7745
7746	tp->link_config.autoneg = cmd->autoneg;
7747	if (cmd->autoneg == AUTONEG_ENABLE) {
7748		tp->link_config.advertising = cmd->advertising;
7749		tp->link_config.speed = SPEED_INVALID;
7750		tp->link_config.duplex = DUPLEX_INVALID;
7751	} else {
7752		tp->link_config.advertising = 0;
7753		tp->link_config.speed = cmd->speed;
7754		tp->link_config.duplex = cmd->duplex;
7755  	}
7756
7757	tp->link_config.orig_speed = tp->link_config.speed;
7758	tp->link_config.orig_duplex = tp->link_config.duplex;
7759	tp->link_config.orig_autoneg = tp->link_config.autoneg;
7760
7761	if (netif_running(dev))
7762		tg3_setup_phy(tp, 1);
7763
7764	tg3_full_unlock(tp);
7765
7766	return 0;
7767}
7768
7769static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
7770{
7771	struct tg3 *tp = netdev_priv(dev);
7772
7773	strcpy(info->driver, DRV_MODULE_NAME);
7774	strcpy(info->version, DRV_MODULE_VERSION);
7775	strcpy(info->fw_version, tp->fw_ver);
7776	strcpy(info->bus_info, pci_name(tp->pdev));
7777}
7778
7779static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7780{
7781	struct tg3 *tp = netdev_priv(dev);
7782
7783	if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
7784		wol->supported = WAKE_MAGIC;
7785	else
7786		wol->supported = 0;
7787	wol->wolopts = 0;
7788	if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
7789		wol->wolopts = WAKE_MAGIC;
7790	memset(&wol->sopass, 0, sizeof(wol->sopass));
7791}
7792
7793static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7794{
7795	struct tg3 *tp = netdev_priv(dev);
7796
7797	if (wol->wolopts & ~WAKE_MAGIC)
7798		return -EINVAL;
7799	if ((wol->wolopts & WAKE_MAGIC) &&
7800	    !(tp->tg3_flags & TG3_FLAG_WOL_CAP))
7801		return -EINVAL;
7802
7803	spin_lock_bh(&tp->lock);
7804	if (wol->wolopts & WAKE_MAGIC)
7805		tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
7806	else
7807		tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7808	spin_unlock_bh(&tp->lock);
7809
7810	return 0;
7811}
7812
7813static u32 tg3_get_msglevel(struct net_device *dev)
7814{
7815	struct tg3 *tp = netdev_priv(dev);
7816	return tp->msg_enable;
7817}
7818
7819static void tg3_set_msglevel(struct net_device *dev, u32 value)
7820{
7821	struct tg3 *tp = netdev_priv(dev);
7822	tp->msg_enable = value;
7823}
7824
7825static int tg3_set_tso(struct net_device *dev, u32 value)
7826{
7827	struct tg3 *tp = netdev_priv(dev);
7828
7829	if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7830		if (value)
7831			return -EINVAL;
7832		return 0;
7833	}
7834	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
7835	    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
7836		if (value)
7837			dev->features |= NETIF_F_TSO6;
7838		else
7839			dev->features &= ~NETIF_F_TSO6;
7840	}
7841	return ethtool_op_set_tso(dev, value);
7842}
7843
7844static int tg3_nway_reset(struct net_device *dev)
7845{
7846	struct tg3 *tp = netdev_priv(dev);
7847	u32 bmcr;
7848	int r;
7849
7850	if (!netif_running(dev))
7851		return -EAGAIN;
7852
7853	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7854		return -EINVAL;
7855
7856	spin_lock_bh(&tp->lock);
7857	r = -EINVAL;
7858	tg3_readphy(tp, MII_BMCR, &bmcr);
7859	if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
7860	    ((bmcr & BMCR_ANENABLE) ||
7861	     (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
7862		tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
7863					   BMCR_ANENABLE);
7864		r = 0;
7865	}
7866	spin_unlock_bh(&tp->lock);
7867
7868	return r;
7869}
7870
7871static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7872{
7873	struct tg3 *tp = netdev_priv(dev);
7874
7875	ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
7876	ering->rx_mini_max_pending = 0;
7877	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7878		ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
7879	else
7880		ering->rx_jumbo_max_pending = 0;
7881
7882	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
7883
7884	ering->rx_pending = tp->rx_pending;
7885	ering->rx_mini_pending = 0;
7886	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
7887		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
7888	else
7889		ering->rx_jumbo_pending = 0;
7890
7891	ering->tx_pending = tp->tx_pending;
7892}
7893
7894static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7895{
7896	struct tg3 *tp = netdev_priv(dev);
7897	int irq_sync = 0, err = 0;
7898
7899	if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
7900	    (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
7901	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
7902	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
7903	    ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
7904	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
7905		return -EINVAL;
7906
7907	if (netif_running(dev)) {
7908		tg3_netif_stop(tp);
7909		irq_sync = 1;
7910	}
7911
7912	tg3_full_lock(tp, irq_sync);
7913
7914	tp->rx_pending = ering->rx_pending;
7915
7916	if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
7917	    tp->rx_pending > 63)
7918		tp->rx_pending = 63;
7919	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
7920	tp->tx_pending = ering->tx_pending;
7921
7922	if (netif_running(dev)) {
7923		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7924		err = tg3_restart_hw(tp, 1);
7925		if (!err)
7926			tg3_netif_start(tp);
7927	}
7928
7929	tg3_full_unlock(tp);
7930
7931	return err;
7932}
7933
7934static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7935{
7936	struct tg3 *tp = netdev_priv(dev);
7937
7938	epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
7939	epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
7940	epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
7941}
7942
7943static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7944{
7945	struct tg3 *tp = netdev_priv(dev);
7946	int irq_sync = 0, err = 0;
7947
7948	if (netif_running(dev)) {
7949		tg3_netif_stop(tp);
7950		irq_sync = 1;
7951	}
7952
7953	tg3_full_lock(tp, irq_sync);
7954
7955	if (epause->autoneg)
7956		tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
7957	else
7958		tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
7959	if (epause->rx_pause)
7960		tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
7961	else
7962		tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
7963	if (epause->tx_pause)
7964		tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
7965	else
7966		tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
7967
7968	if (netif_running(dev)) {
7969		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7970		err = tg3_restart_hw(tp, 1);
7971		if (!err)
7972			tg3_netif_start(tp);
7973	}
7974
7975	tg3_full_unlock(tp);
7976
7977	return err;
7978}
7979
7980static u32 tg3_get_rx_csum(struct net_device *dev)
7981{
7982	struct tg3 *tp = netdev_priv(dev);
7983	return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
7984}
7985
7986static int tg3_set_rx_csum(struct net_device *dev, u32 data)
7987{
7988	struct tg3 *tp = netdev_priv(dev);
7989
7990	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
7991		if (data != 0)
7992			return -EINVAL;
7993  		return 0;
7994  	}
7995
7996	spin_lock_bh(&tp->lock);
7997	if (data)
7998		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7999	else
8000		tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8001	spin_unlock_bh(&tp->lock);
8002
8003	return 0;
8004}
8005
8006static int tg3_set_tx_csum(struct net_device *dev, u32 data)
8007{
8008	struct tg3 *tp = netdev_priv(dev);
8009
8010	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
8011		if (data != 0)
8012			return -EINVAL;
8013  		return 0;
8014  	}
8015
8016	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8017	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8018		ethtool_op_set_tx_hw_csum(dev, data);
8019	else
8020		ethtool_op_set_tx_csum(dev, data);
8021
8022	return 0;
8023}
8024
8025static int tg3_get_stats_count (struct net_device *dev)
8026{
8027	return TG3_NUM_STATS;
8028}
8029
8030static int tg3_get_test_count (struct net_device *dev)
8031{
8032	return TG3_NUM_TEST;
8033}
8034
8035static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
8036{
8037	switch (stringset) {
8038	case ETH_SS_STATS:
8039		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
8040		break;
8041	case ETH_SS_TEST:
8042		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
8043		break;
8044	default:
8045		WARN_ON(1);	/* we need a WARN() */
8046		break;
8047	}
8048}
8049
8050static int tg3_phys_id(struct net_device *dev, u32 data)
8051{
8052	struct tg3 *tp = netdev_priv(dev);
8053	int i;
8054
8055	if (!netif_running(tp->dev))
8056		return -EAGAIN;
8057
8058	if (data == 0)
8059		data = 2;
8060
8061	for (i = 0; i < (data * 2); i++) {
8062		if ((i % 2) == 0)
8063			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8064					   LED_CTRL_1000MBPS_ON |
8065					   LED_CTRL_100MBPS_ON |
8066					   LED_CTRL_10MBPS_ON |
8067					   LED_CTRL_TRAFFIC_OVERRIDE |
8068					   LED_CTRL_TRAFFIC_BLINK |
8069					   LED_CTRL_TRAFFIC_LED);
8070
8071		else
8072			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
8073					   LED_CTRL_TRAFFIC_OVERRIDE);
8074
8075		if (msleep_interruptible(500))
8076			break;
8077	}
8078	tw32(MAC_LED_CTRL, tp->led_ctrl);
8079	return 0;
8080}
8081
8082static void tg3_get_ethtool_stats (struct net_device *dev,
8083				   struct ethtool_stats *estats, u64 *tmp_stats)
8084{
8085	struct tg3 *tp = netdev_priv(dev);
8086	memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
8087}
8088
8089#define NVRAM_TEST_SIZE 0x100
8090#define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14
8091#define NVRAM_SELFBOOT_HW_SIZE 0x20
8092#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
8093
8094static int tg3_test_nvram(struct tg3 *tp)
8095{
8096	u32 *buf, csum, magic;
8097	int i, j, err = 0, size;
8098
8099	if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8100		return -EIO;
8101
8102	if (magic == TG3_EEPROM_MAGIC)
8103		size = NVRAM_TEST_SIZE;
8104	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
8105		if ((magic & 0xe00000) == 0x200000)
8106			size = NVRAM_SELFBOOT_FORMAT1_SIZE;
8107		else
8108			return 0;
8109	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
8110		size = NVRAM_SELFBOOT_HW_SIZE;
8111	else
8112		return -EIO;
8113
8114	buf = kmalloc(size, GFP_KERNEL);
8115	if (buf == NULL)
8116		return -ENOMEM;
8117
8118	err = -EIO;
8119	for (i = 0, j = 0; i < size; i += 4, j++) {
8120		u32 val;
8121
8122		if ((err = tg3_nvram_read(tp, i, &val)) != 0)
8123			break;
8124		buf[j] = cpu_to_le32(val);
8125	}
8126	if (i < size)
8127		goto out;
8128
8129	/* Selfboot format */
8130	if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_FW_MSK) ==
8131	    TG3_EEPROM_MAGIC_FW) {
8132		u8 *buf8 = (u8 *) buf, csum8 = 0;
8133
8134		for (i = 0; i < size; i++)
8135			csum8 += buf8[i];
8136
8137		if (csum8 == 0) {
8138			err = 0;
8139			goto out;
8140		}
8141
8142		err = -EIO;
8143		goto out;
8144	}
8145
8146	if ((cpu_to_be32(buf[0]) & TG3_EEPROM_MAGIC_HW_MSK) ==
8147	    TG3_EEPROM_MAGIC_HW) {
8148		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
8149	       	u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
8150		u8 *buf8 = (u8 *) buf;
8151		int j, k;
8152
8153		/* Separate the parity bits and the data bytes.  */
8154		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
8155			if ((i == 0) || (i == 8)) {
8156				int l;
8157				u8 msk;
8158
8159				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
8160					parity[k++] = buf8[i] & msk;
8161				i++;
8162			}
8163			else if (i == 16) {
8164				int l;
8165				u8 msk;
8166
8167				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
8168					parity[k++] = buf8[i] & msk;
8169				i++;
8170
8171				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
8172					parity[k++] = buf8[i] & msk;
8173				i++;
8174			}
8175			data[j++] = buf8[i];
8176		}
8177
8178		err = -EIO;
8179		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
8180			u8 hw8 = hweight8(data[i]);
8181
8182			if ((hw8 & 0x1) && parity[i])
8183				goto out;
8184			else if (!(hw8 & 0x1) && !parity[i])
8185				goto out;
8186		}
8187		err = 0;
8188		goto out;
8189	}
8190
8191	/* Bootstrap checksum at offset 0x10 */
8192	csum = calc_crc((unsigned char *) buf, 0x10);
8193	if(csum != cpu_to_le32(buf[0x10/4]))
8194		goto out;
8195
8196	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
8197	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
8198	if (csum != cpu_to_le32(buf[0xfc/4]))
8199		 goto out;
8200
8201	err = 0;
8202
8203out:
8204	kfree(buf);
8205	return err;
8206}
8207
8208#define TG3_SERDES_TIMEOUT_SEC	2
8209#define TG3_COPPER_TIMEOUT_SEC	6
8210
8211static int tg3_test_link(struct tg3 *tp)
8212{
8213	int i, max;
8214
8215	if (!netif_running(tp->dev))
8216		return -ENODEV;
8217
8218	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
8219		max = TG3_SERDES_TIMEOUT_SEC;
8220	else
8221		max = TG3_COPPER_TIMEOUT_SEC;
8222
8223	for (i = 0; i < max; i++) {
8224		if (netif_carrier_ok(tp->dev))
8225			return 0;
8226
8227		if (msleep_interruptible(1000))
8228			break;
8229	}
8230
8231	return -EIO;
8232}
8233
8234/* Only test the commonly used registers */
8235static int tg3_test_registers(struct tg3 *tp)
8236{
8237	int i, is_5705, is_5750;
8238	u32 offset, read_mask, write_mask, val, save_val, read_val;
8239	static struct {
8240		u16 offset;
8241		u16 flags;
8242#define TG3_FL_5705	0x1
8243#define TG3_FL_NOT_5705	0x2
8244#define TG3_FL_NOT_5788	0x4
8245#define TG3_FL_NOT_5750	0x8
8246		u32 read_mask;
8247		u32 write_mask;
8248	} reg_tbl[] = {
8249		/* MAC Control Registers */
8250		{ MAC_MODE, TG3_FL_NOT_5705,
8251			0x00000000, 0x00ef6f8c },
8252		{ MAC_MODE, TG3_FL_5705,
8253			0x00000000, 0x01ef6b8c },
8254		{ MAC_STATUS, TG3_FL_NOT_5705,
8255			0x03800107, 0x00000000 },
8256		{ MAC_STATUS, TG3_FL_5705,
8257			0x03800100, 0x00000000 },
8258		{ MAC_ADDR_0_HIGH, 0x0000,
8259			0x00000000, 0x0000ffff },
8260		{ MAC_ADDR_0_LOW, 0x0000,
8261		       	0x00000000, 0xffffffff },
8262		{ MAC_RX_MTU_SIZE, 0x0000,
8263			0x00000000, 0x0000ffff },
8264		{ MAC_TX_MODE, 0x0000,
8265			0x00000000, 0x00000070 },
8266		{ MAC_TX_LENGTHS, 0x0000,
8267			0x00000000, 0x00003fff },
8268		{ MAC_RX_MODE, TG3_FL_NOT_5705,
8269			0x00000000, 0x000007fc },
8270		{ MAC_RX_MODE, TG3_FL_5705,
8271			0x00000000, 0x000007dc },
8272		{ MAC_HASH_REG_0, 0x0000,
8273			0x00000000, 0xffffffff },
8274		{ MAC_HASH_REG_1, 0x0000,
8275			0x00000000, 0xffffffff },
8276		{ MAC_HASH_REG_2, 0x0000,
8277			0x00000000, 0xffffffff },
8278		{ MAC_HASH_REG_3, 0x0000,
8279			0x00000000, 0xffffffff },
8280
8281		/* Receive Data and Receive BD Initiator Control Registers. */
8282		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
8283			0x00000000, 0xffffffff },
8284		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
8285			0x00000000, 0xffffffff },
8286		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
8287			0x00000000, 0x00000003 },
8288		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
8289			0x00000000, 0xffffffff },
8290		{ RCVDBDI_STD_BD+0, 0x0000,
8291			0x00000000, 0xffffffff },
8292		{ RCVDBDI_STD_BD+4, 0x0000,
8293			0x00000000, 0xffffffff },
8294		{ RCVDBDI_STD_BD+8, 0x0000,
8295			0x00000000, 0xffff0002 },
8296		{ RCVDBDI_STD_BD+0xc, 0x0000,
8297			0x00000000, 0xffffffff },
8298
8299		/* Receive BD Initiator Control Registers. */
8300		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
8301			0x00000000, 0xffffffff },
8302		{ RCVBDI_STD_THRESH, TG3_FL_5705,
8303			0x00000000, 0x000003ff },
8304		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
8305			0x00000000, 0xffffffff },
8306
8307		/* Host Coalescing Control Registers. */
8308		{ HOSTCC_MODE, TG3_FL_NOT_5705,
8309			0x00000000, 0x00000004 },
8310		{ HOSTCC_MODE, TG3_FL_5705,
8311			0x00000000, 0x000000f6 },
8312		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
8313			0x00000000, 0xffffffff },
8314		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
8315			0x00000000, 0x000003ff },
8316		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
8317			0x00000000, 0xffffffff },
8318		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
8319			0x00000000, 0x000003ff },
8320		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
8321			0x00000000, 0xffffffff },
8322		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8323			0x00000000, 0x000000ff },
8324		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
8325			0x00000000, 0xffffffff },
8326		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
8327			0x00000000, 0x000000ff },
8328		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
8329			0x00000000, 0xffffffff },
8330		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
8331			0x00000000, 0xffffffff },
8332		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8333			0x00000000, 0xffffffff },
8334		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8335			0x00000000, 0x000000ff },
8336		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
8337			0x00000000, 0xffffffff },
8338		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
8339			0x00000000, 0x000000ff },
8340		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
8341			0x00000000, 0xffffffff },
8342		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
8343			0x00000000, 0xffffffff },
8344		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
8345			0x00000000, 0xffffffff },
8346		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
8347			0x00000000, 0xffffffff },
8348		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
8349			0x00000000, 0xffffffff },
8350		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
8351			0xffffffff, 0x00000000 },
8352		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
8353			0xffffffff, 0x00000000 },
8354
8355		/* Buffer Manager Control Registers. */
8356		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
8357			0x00000000, 0x007fff80 },
8358		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
8359			0x00000000, 0x007fffff },
8360		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
8361			0x00000000, 0x0000003f },
8362		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
8363			0x00000000, 0x000001ff },
8364		{ BUFMGR_MB_HIGH_WATER, 0x0000,
8365			0x00000000, 0x000001ff },
8366		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
8367			0xffffffff, 0x00000000 },
8368		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
8369			0xffffffff, 0x00000000 },
8370
8371		/* Mailbox Registers */
8372		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
8373			0x00000000, 0x000001ff },
8374		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
8375			0x00000000, 0x000001ff },
8376		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
8377			0x00000000, 0x000007ff },
8378		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
8379			0x00000000, 0x000001ff },
8380
8381		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
8382	};
8383
8384	is_5705 = is_5750 = 0;
8385	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8386		is_5705 = 1;
8387		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
8388			is_5750 = 1;
8389	}
8390
8391	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
8392		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
8393			continue;
8394
8395		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
8396			continue;
8397
8398		if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8399		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
8400			continue;
8401
8402		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
8403			continue;
8404
8405		offset = (u32) reg_tbl[i].offset;
8406		read_mask = reg_tbl[i].read_mask;
8407		write_mask = reg_tbl[i].write_mask;
8408
8409		/* Save the original register content */
8410		save_val = tr32(offset);
8411
8412		/* Determine the read-only value. */
8413		read_val = save_val & read_mask;
8414
8415		/* Write zero to the register, then make sure the read-only bits
8416		 * are not changed and the read/write bits are all zeros.
8417		 */
8418		tw32(offset, 0);
8419
8420		val = tr32(offset);
8421
8422		/* Test the read-only and read/write bits. */
8423		if (((val & read_mask) != read_val) || (val & write_mask))
8424			goto out;
8425
8426		/* Write ones to all the bits defined by RdMask and WrMask, then
8427		 * make sure the read-only bits are not changed and the
8428		 * read/write bits are all ones.
8429		 */
8430		tw32(offset, read_mask | write_mask);
8431
8432		val = tr32(offset);
8433
8434		/* Test the read-only bits. */
8435		if ((val & read_mask) != read_val)
8436			goto out;
8437
8438		/* Test the read/write bits. */
8439		if ((val & write_mask) != write_mask)
8440			goto out;
8441
8442		tw32(offset, save_val);
8443	}
8444
8445	return 0;
8446
8447out:
8448	if (netif_msg_hw(tp))
8449		printk(KERN_ERR PFX "Register test failed at offset %x\n",
8450		       offset);
8451	tw32(offset, save_val);
8452	return -EIO;
8453}
8454
8455static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
8456{
8457	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
8458	int i;
8459	u32 j;
8460
8461	for (i = 0; i < sizeof(test_pattern)/sizeof(u32); i++) {
8462		for (j = 0; j < len; j += 4) {
8463			u32 val;
8464
8465			tg3_write_mem(tp, offset + j, test_pattern[i]);
8466			tg3_read_mem(tp, offset + j, &val);
8467			if (val != test_pattern[i])
8468				return -EIO;
8469		}
8470	}
8471	return 0;
8472}
8473
8474static int tg3_test_memory(struct tg3 *tp)
8475{
8476	static struct mem_entry {
8477		u32 offset;
8478		u32 len;
8479	} mem_tbl_570x[] = {
8480		{ 0x00000000, 0x00b50},
8481		{ 0x00002000, 0x1c000},
8482		{ 0xffffffff, 0x00000}
8483	}, mem_tbl_5705[] = {
8484		{ 0x00000100, 0x0000c},
8485		{ 0x00000200, 0x00008},
8486		{ 0x00004000, 0x00800},
8487		{ 0x00006000, 0x01000},
8488		{ 0x00008000, 0x02000},
8489		{ 0x00010000, 0x0e000},
8490		{ 0xffffffff, 0x00000}
8491	}, mem_tbl_5755[] = {
8492		{ 0x00000200, 0x00008},
8493		{ 0x00004000, 0x00800},
8494		{ 0x00006000, 0x00800},
8495		{ 0x00008000, 0x02000},
8496		{ 0x00010000, 0x0c000},
8497		{ 0xffffffff, 0x00000}
8498	}, mem_tbl_5906[] = {
8499		{ 0x00000200, 0x00008},
8500		{ 0x00004000, 0x00400},
8501		{ 0x00006000, 0x00400},
8502		{ 0x00008000, 0x01000},
8503		{ 0x00010000, 0x01000},
8504		{ 0xffffffff, 0x00000}
8505	};
8506	struct mem_entry *mem_tbl;
8507	int err = 0;
8508	int i;
8509
8510	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8511		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8512		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8513			mem_tbl = mem_tbl_5755;
8514		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8515			mem_tbl = mem_tbl_5906;
8516		else
8517			mem_tbl = mem_tbl_5705;
8518	} else
8519		mem_tbl = mem_tbl_570x;
8520
8521	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
8522		if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
8523		    mem_tbl[i].len)) != 0)
8524			break;
8525	}
8526
8527	return err;
8528}
8529
8530#define TG3_MAC_LOOPBACK	0
8531#define TG3_PHY_LOOPBACK	1
8532
8533static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
8534{
8535	u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
8536	u32 desc_idx;
8537	struct sk_buff *skb, *rx_skb;
8538	u8 *tx_data;
8539	dma_addr_t map;
8540	int num_pkts, tx_len, rx_len, i, err;
8541	struct tg3_rx_buffer_desc *desc;
8542
8543	if (loopback_mode == TG3_MAC_LOOPBACK) {
8544		/* HW errata - mac loopback fails in some cases on 5780.
8545		 * Normal traffic and PHY loopback are not affected by
8546		 * errata.
8547		 */
8548		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
8549			return 0;
8550
8551		mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8552			   MAC_MODE_PORT_INT_LPBACK | MAC_MODE_LINK_POLARITY;
8553		if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
8554			mac_mode |= MAC_MODE_PORT_MODE_MII;
8555		else
8556			mac_mode |= MAC_MODE_PORT_MODE_GMII;
8557		tw32(MAC_MODE, mac_mode);
8558	} else if (loopback_mode == TG3_PHY_LOOPBACK) {
8559		u32 val;
8560
8561		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8562			u32 phytest;
8563
8564			if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
8565				u32 phy;
8566
8567				tg3_writephy(tp, MII_TG3_EPHY_TEST,
8568					     phytest | MII_TG3_EPHY_SHADOW_EN);
8569				if (!tg3_readphy(tp, 0x1b, &phy))
8570					tg3_writephy(tp, 0x1b, phy & ~0x20);
8571				if (!tg3_readphy(tp, 0x10, &phy))
8572					tg3_writephy(tp, 0x10, phy & ~0x4000);
8573				tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
8574			}
8575			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
8576		} else
8577			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
8578
8579		tg3_writephy(tp, MII_BMCR, val);
8580		udelay(40);
8581
8582		mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
8583			   MAC_MODE_LINK_POLARITY;
8584		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8585			tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
8586			mac_mode |= MAC_MODE_PORT_MODE_MII;
8587		} else
8588			mac_mode |= MAC_MODE_PORT_MODE_GMII;
8589
8590		/* reset to prevent losing 1st rx packet intermittently */
8591		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8592			tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8593			udelay(10);
8594			tw32_f(MAC_RX_MODE, tp->rx_mode);
8595		}
8596		if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
8597			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8598			tg3_writephy(tp, MII_TG3_EXT_CTRL,
8599				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8600		}
8601		tw32(MAC_MODE, mac_mode);
8602	}
8603	else
8604		return -EINVAL;
8605
8606	err = -EIO;
8607
8608	tx_len = 1514;
8609	skb = netdev_alloc_skb(tp->dev, tx_len);
8610	if (!skb)
8611		return -ENOMEM;
8612
8613	tx_data = skb_put(skb, tx_len);
8614	memcpy(tx_data, tp->dev->dev_addr, 6);
8615	memset(tx_data + 6, 0x0, 8);
8616
8617	tw32(MAC_RX_MTU_SIZE, tx_len + 4);
8618
8619	for (i = 14; i < tx_len; i++)
8620		tx_data[i] = (u8) (i & 0xff);
8621
8622	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
8623
8624	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8625	     HOSTCC_MODE_NOW);
8626
8627	udelay(10);
8628
8629	rx_start_idx = tp->hw_status->idx[0].rx_producer;
8630
8631	num_pkts = 0;
8632
8633	tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
8634
8635	tp->tx_prod++;
8636	num_pkts++;
8637
8638	tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
8639		     tp->tx_prod);
8640	tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
8641
8642	udelay(10);
8643
8644	/* 250 usec to allow enough time on some 10/100 Mbps devices.  */
8645	for (i = 0; i < 25; i++) {
8646		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8647		       HOSTCC_MODE_NOW);
8648
8649		udelay(10);
8650
8651		tx_idx = tp->hw_status->idx[0].tx_consumer;
8652		rx_idx = tp->hw_status->idx[0].rx_producer;
8653		if ((tx_idx == tp->tx_prod) &&
8654		    (rx_idx == (rx_start_idx + num_pkts)))
8655			break;
8656	}
8657
8658	pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
8659	dev_kfree_skb(skb);
8660
8661	if (tx_idx != tp->tx_prod)
8662		goto out;
8663
8664	if (rx_idx != rx_start_idx + num_pkts)
8665		goto out;
8666
8667	desc = &tp->rx_rcb[rx_start_idx];
8668	desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
8669	opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
8670	if (opaque_key != RXD_OPAQUE_RING_STD)
8671		goto out;
8672
8673	if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
8674	    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
8675		goto out;
8676
8677	rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
8678	if (rx_len != tx_len)
8679		goto out;
8680
8681	rx_skb = tp->rx_std_buffers[desc_idx].skb;
8682
8683	map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
8684	pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
8685
8686	for (i = 14; i < tx_len; i++) {
8687		if (*(rx_skb->data + i) != (u8) (i & 0xff))
8688			goto out;
8689	}
8690	err = 0;
8691
8692	/* tg3_free_rings will unmap and free the rx_skb */
8693out:
8694	return err;
8695}
8696
8697#define TG3_MAC_LOOPBACK_FAILED		1
8698#define TG3_PHY_LOOPBACK_FAILED		2
8699#define TG3_LOOPBACK_FAILED		(TG3_MAC_LOOPBACK_FAILED |	\
8700					 TG3_PHY_LOOPBACK_FAILED)
8701
8702static int tg3_test_loopback(struct tg3 *tp)
8703{
8704	int err = 0;
8705
8706	if (!netif_running(tp->dev))
8707		return TG3_LOOPBACK_FAILED;
8708
8709	err = tg3_reset_hw(tp, 1);
8710	if (err)
8711		return TG3_LOOPBACK_FAILED;
8712
8713	if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
8714		err |= TG3_MAC_LOOPBACK_FAILED;
8715	if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8716		if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
8717			err |= TG3_PHY_LOOPBACK_FAILED;
8718	}
8719
8720	return err;
8721}
8722
8723static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
8724			  u64 *data)
8725{
8726	struct tg3 *tp = netdev_priv(dev);
8727
8728	if (tp->link_config.phy_is_low_power)
8729		tg3_set_power_state(tp, PCI_D0);
8730
8731	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
8732
8733	if (tg3_test_nvram(tp) != 0) {
8734		etest->flags |= ETH_TEST_FL_FAILED;
8735		data[0] = 1;
8736	}
8737	if (tg3_test_link(tp) != 0) {
8738		etest->flags |= ETH_TEST_FL_FAILED;
8739		data[1] = 1;
8740	}
8741	if (etest->flags & ETH_TEST_FL_OFFLINE) {
8742		int err, irq_sync = 0;
8743
8744		if (netif_running(dev)) {
8745			tg3_netif_stop(tp);
8746			irq_sync = 1;
8747		}
8748
8749		tg3_full_lock(tp, irq_sync);
8750
8751		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
8752		err = tg3_nvram_lock(tp);
8753		tg3_halt_cpu(tp, RX_CPU_BASE);
8754		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8755			tg3_halt_cpu(tp, TX_CPU_BASE);
8756		if (!err)
8757			tg3_nvram_unlock(tp);
8758
8759		if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
8760			tg3_phy_reset(tp);
8761
8762		if (tg3_test_registers(tp) != 0) {
8763			etest->flags |= ETH_TEST_FL_FAILED;
8764			data[2] = 1;
8765		}
8766		if (tg3_test_memory(tp) != 0) {
8767			etest->flags |= ETH_TEST_FL_FAILED;
8768			data[3] = 1;
8769		}
8770		if ((data[4] = tg3_test_loopback(tp)) != 0)
8771			etest->flags |= ETH_TEST_FL_FAILED;
8772
8773		tg3_full_unlock(tp);
8774
8775		if (tg3_test_interrupt(tp) != 0) {
8776			etest->flags |= ETH_TEST_FL_FAILED;
8777			data[5] = 1;
8778		}
8779
8780		tg3_full_lock(tp, 0);
8781
8782		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8783		if (netif_running(dev)) {
8784			tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8785			if (!tg3_restart_hw(tp, 1))
8786				tg3_netif_start(tp);
8787		}
8788
8789		tg3_full_unlock(tp);
8790	}
8791	if (tp->link_config.phy_is_low_power)
8792		tg3_set_power_state(tp, PCI_D3hot);
8793
8794}
8795
8796static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8797{
8798	struct mii_ioctl_data *data = if_mii(ifr);
8799	struct tg3 *tp = netdev_priv(dev);
8800	int err;
8801
8802	switch(cmd) {
8803	case SIOCGMIIPHY:
8804		data->phy_id = PHY_ADDR;
8805
8806		/* fallthru */
8807	case SIOCGMIIREG: {
8808		u32 mii_regval;
8809
8810		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8811			break;			/* We have no PHY */
8812
8813		if (tp->link_config.phy_is_low_power)
8814			return -EAGAIN;
8815
8816		spin_lock_bh(&tp->lock);
8817		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
8818		spin_unlock_bh(&tp->lock);
8819
8820		data->val_out = mii_regval;
8821
8822		return err;
8823	}
8824
8825	case SIOCSMIIREG:
8826		if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8827			break;			/* We have no PHY */
8828
8829		if (!capable(CAP_NET_ADMIN))
8830			return -EPERM;
8831
8832		if (tp->link_config.phy_is_low_power)
8833			return -EAGAIN;
8834
8835		spin_lock_bh(&tp->lock);
8836		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
8837		spin_unlock_bh(&tp->lock);
8838
8839		return err;
8840
8841	default:
8842		/* do nothing */
8843		break;
8844	}
8845	return -EOPNOTSUPP;
8846}
8847
8848#if TG3_VLAN_TAG_USED
8849static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
8850{
8851	struct tg3 *tp = netdev_priv(dev);
8852
8853	if (netif_running(dev))
8854		tg3_netif_stop(tp);
8855
8856	tg3_full_lock(tp, 0);
8857
8858	tp->vlgrp = grp;
8859
8860	/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
8861	__tg3_set_rx_mode(dev);
8862
8863	tg3_full_unlock(tp);
8864
8865	if (netif_running(dev))
8866		tg3_netif_start(tp);
8867}
8868#endif
8869
8870static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8871{
8872	struct tg3 *tp = netdev_priv(dev);
8873
8874	memcpy(ec, &tp->coal, sizeof(*ec));
8875	return 0;
8876}
8877
8878static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
8879{
8880	struct tg3 *tp = netdev_priv(dev);
8881	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
8882	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
8883
8884	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8885		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
8886		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
8887		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
8888		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
8889	}
8890
8891	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
8892	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
8893	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
8894	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
8895	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
8896	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
8897	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
8898	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
8899	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
8900	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
8901		return -EINVAL;
8902
8903	/* No rx interrupts will be generated if both are zero */
8904	if ((ec->rx_coalesce_usecs == 0) &&
8905	    (ec->rx_max_coalesced_frames == 0))
8906		return -EINVAL;
8907
8908	/* No tx interrupts will be generated if both are zero */
8909	if ((ec->tx_coalesce_usecs == 0) &&
8910	    (ec->tx_max_coalesced_frames == 0))
8911		return -EINVAL;
8912
8913	/* Only copy relevant parameters, ignore all others. */
8914	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
8915	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
8916	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
8917	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
8918	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
8919	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
8920	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
8921	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
8922	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
8923
8924	if (netif_running(dev)) {
8925		tg3_full_lock(tp, 0);
8926		__tg3_set_coalesce(tp, &tp->coal);
8927		tg3_full_unlock(tp);
8928	}
8929	return 0;
8930}
8931
8932static const struct ethtool_ops tg3_ethtool_ops = {
8933	.get_settings		= tg3_get_settings,
8934	.set_settings		= tg3_set_settings,
8935	.get_drvinfo		= tg3_get_drvinfo,
8936	.get_regs_len		= tg3_get_regs_len,
8937	.get_regs		= tg3_get_regs,
8938	.get_wol		= tg3_get_wol,
8939	.set_wol		= tg3_set_wol,
8940	.get_msglevel		= tg3_get_msglevel,
8941	.set_msglevel		= tg3_set_msglevel,
8942	.nway_reset		= tg3_nway_reset,
8943	.get_link		= ethtool_op_get_link,
8944	.get_eeprom_len		= tg3_get_eeprom_len,
8945	.get_eeprom		= tg3_get_eeprom,
8946	.set_eeprom		= tg3_set_eeprom,
8947	.get_ringparam		= tg3_get_ringparam,
8948	.set_ringparam		= tg3_set_ringparam,
8949	.get_pauseparam		= tg3_get_pauseparam,
8950	.set_pauseparam		= tg3_set_pauseparam,
8951	.get_rx_csum		= tg3_get_rx_csum,
8952	.set_rx_csum		= tg3_set_rx_csum,
8953	.get_tx_csum		= ethtool_op_get_tx_csum,
8954	.set_tx_csum		= tg3_set_tx_csum,
8955	.get_sg			= ethtool_op_get_sg,
8956	.set_sg			= ethtool_op_set_sg,
8957	.get_tso		= ethtool_op_get_tso,
8958	.set_tso		= tg3_set_tso,
8959	.self_test_count	= tg3_get_test_count,
8960	.self_test		= tg3_self_test,
8961	.get_strings		= tg3_get_strings,
8962	.phys_id		= tg3_phys_id,
8963	.get_stats_count	= tg3_get_stats_count,
8964	.get_ethtool_stats	= tg3_get_ethtool_stats,
8965	.get_coalesce		= tg3_get_coalesce,
8966	.set_coalesce		= tg3_set_coalesce,
8967	.get_perm_addr		= ethtool_op_get_perm_addr,
8968};
8969
8970static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
8971{
8972	u32 cursize, val, magic;
8973
8974	tp->nvram_size = EEPROM_CHIP_SIZE;
8975
8976	if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
8977		return;
8978
8979	if ((magic != TG3_EEPROM_MAGIC) &&
8980	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
8981	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
8982		return;
8983
8984	/*
8985	 * Size the chip by reading offsets at increasing powers of two.
8986	 * When we encounter our validation signature, we know the addressing
8987	 * has wrapped around, and thus have our chip size.
8988	 */
8989	cursize = 0x10;
8990
8991	while (cursize < tp->nvram_size) {
8992		if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
8993			return;
8994
8995		if (val == magic)
8996			break;
8997
8998		cursize <<= 1;
8999	}
9000
9001	tp->nvram_size = cursize;
9002}
9003
9004static void __devinit tg3_get_nvram_size(struct tg3 *tp)
9005{
9006	u32 val;
9007
9008	if (tg3_nvram_read_swab(tp, 0, &val) != 0)
9009		return;
9010
9011	/* Selfboot format */
9012	if (val != TG3_EEPROM_MAGIC) {
9013		tg3_get_eeprom_size(tp);
9014		return;
9015	}
9016
9017	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
9018		if (val != 0) {
9019			tp->nvram_size = (val >> 16) * 1024;
9020			return;
9021		}
9022	}
9023	tp->nvram_size = 0x80000;
9024}
9025
9026static void __devinit tg3_get_nvram_info(struct tg3 *tp)
9027{
9028	u32 nvcfg1;
9029
9030	nvcfg1 = tr32(NVRAM_CFG1);
9031	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
9032		tp->tg3_flags2 |= TG3_FLG2_FLASH;
9033	}
9034	else {
9035		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9036		tw32(NVRAM_CFG1, nvcfg1);
9037	}
9038
9039	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
9040	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
9041		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
9042			case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
9043				tp->nvram_jedecnum = JEDEC_ATMEL;
9044				tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9045				tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9046				break;
9047			case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
9048				tp->nvram_jedecnum = JEDEC_ATMEL;
9049                         	tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
9050				break;
9051			case FLASH_VENDOR_ATMEL_EEPROM:
9052				tp->nvram_jedecnum = JEDEC_ATMEL;
9053                         	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9054				tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9055				break;
9056			case FLASH_VENDOR_ST:
9057				tp->nvram_jedecnum = JEDEC_ST;
9058				tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
9059				tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9060				break;
9061			case FLASH_VENDOR_SAIFUN:
9062				tp->nvram_jedecnum = JEDEC_SAIFUN;
9063				tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
9064				break;
9065			case FLASH_VENDOR_SST_SMALL:
9066			case FLASH_VENDOR_SST_LARGE:
9067				tp->nvram_jedecnum = JEDEC_SST;
9068				tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
9069				break;
9070		}
9071	}
9072	else {
9073		tp->nvram_jedecnum = JEDEC_ATMEL;
9074		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
9075		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9076	}
9077}
9078
9079static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
9080{
9081	u32 nvcfg1;
9082
9083	nvcfg1 = tr32(NVRAM_CFG1);
9084
9085	/* NVRAM protection for TPM */
9086	if (nvcfg1 & (1 << 27))
9087		tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9088
9089	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9090		case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
9091		case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
9092			tp->nvram_jedecnum = JEDEC_ATMEL;
9093			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9094			break;
9095		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9096			tp->nvram_jedecnum = JEDEC_ATMEL;
9097			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9098			tp->tg3_flags2 |= TG3_FLG2_FLASH;
9099			break;
9100		case FLASH_5752VENDOR_ST_M45PE10:
9101		case FLASH_5752VENDOR_ST_M45PE20:
9102		case FLASH_5752VENDOR_ST_M45PE40:
9103			tp->nvram_jedecnum = JEDEC_ST;
9104			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9105			tp->tg3_flags2 |= TG3_FLG2_FLASH;
9106			break;
9107	}
9108
9109	if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
9110		switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
9111			case FLASH_5752PAGE_SIZE_256:
9112				tp->nvram_pagesize = 256;
9113				break;
9114			case FLASH_5752PAGE_SIZE_512:
9115				tp->nvram_pagesize = 512;
9116				break;
9117			case FLASH_5752PAGE_SIZE_1K:
9118				tp->nvram_pagesize = 1024;
9119				break;
9120			case FLASH_5752PAGE_SIZE_2K:
9121				tp->nvram_pagesize = 2048;
9122				break;
9123			case FLASH_5752PAGE_SIZE_4K:
9124				tp->nvram_pagesize = 4096;
9125				break;
9126			case FLASH_5752PAGE_SIZE_264:
9127				tp->nvram_pagesize = 264;
9128				break;
9129		}
9130	}
9131	else {
9132		/* For eeprom, set pagesize to maximum eeprom size */
9133		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9134
9135		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9136		tw32(NVRAM_CFG1, nvcfg1);
9137	}
9138}
9139
9140static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
9141{
9142	u32 nvcfg1, protect = 0;
9143
9144	nvcfg1 = tr32(NVRAM_CFG1);
9145
9146	/* NVRAM protection for TPM */
9147	if (nvcfg1 & (1 << 27)) {
9148		tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
9149		protect = 1;
9150	}
9151
9152	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
9153	switch (nvcfg1) {
9154		case FLASH_5755VENDOR_ATMEL_FLASH_1:
9155		case FLASH_5755VENDOR_ATMEL_FLASH_2:
9156		case FLASH_5755VENDOR_ATMEL_FLASH_3:
9157			tp->nvram_jedecnum = JEDEC_ATMEL;
9158			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9159			tp->tg3_flags2 |= TG3_FLG2_FLASH;
9160			tp->nvram_pagesize = 264;
9161			if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1)
9162				tp->nvram_size = (protect ? 0x3e200 : 0x80000);
9163			else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
9164				tp->nvram_size = (protect ? 0x1f200 : 0x40000);
9165			else
9166				tp->nvram_size = (protect ? 0x1f200 : 0x20000);
9167			break;
9168		case FLASH_5752VENDOR_ST_M45PE10:
9169		case FLASH_5752VENDOR_ST_M45PE20:
9170		case FLASH_5752VENDOR_ST_M45PE40:
9171			tp->nvram_jedecnum = JEDEC_ST;
9172			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9173			tp->tg3_flags2 |= TG3_FLG2_FLASH;
9174			tp->nvram_pagesize = 256;
9175			if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
9176				tp->nvram_size = (protect ? 0x10000 : 0x20000);
9177			else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
9178				tp->nvram_size = (protect ? 0x10000 : 0x40000);
9179			else
9180				tp->nvram_size = (protect ? 0x20000 : 0x80000);
9181			break;
9182	}
9183}
9184
9185static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
9186{
9187	u32 nvcfg1;
9188
9189	nvcfg1 = tr32(NVRAM_CFG1);
9190
9191	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
9192		case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
9193		case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
9194		case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
9195		case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
9196			tp->nvram_jedecnum = JEDEC_ATMEL;
9197			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9198			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9199
9200			nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
9201			tw32(NVRAM_CFG1, nvcfg1);
9202			break;
9203		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
9204		case FLASH_5755VENDOR_ATMEL_FLASH_1:
9205		case FLASH_5755VENDOR_ATMEL_FLASH_2:
9206		case FLASH_5755VENDOR_ATMEL_FLASH_3:
9207			tp->nvram_jedecnum = JEDEC_ATMEL;
9208			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9209			tp->tg3_flags2 |= TG3_FLG2_FLASH;
9210			tp->nvram_pagesize = 264;
9211			break;
9212		case FLASH_5752VENDOR_ST_M45PE10:
9213		case FLASH_5752VENDOR_ST_M45PE20:
9214		case FLASH_5752VENDOR_ST_M45PE40:
9215			tp->nvram_jedecnum = JEDEC_ST;
9216			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9217			tp->tg3_flags2 |= TG3_FLG2_FLASH;
9218			tp->nvram_pagesize = 256;
9219			break;
9220	}
9221}
9222
9223static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
9224{
9225	tp->nvram_jedecnum = JEDEC_ATMEL;
9226	tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
9227	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
9228}
9229
9230/* Chips other than 5700/5701 use the NVRAM for fetching info. */
9231static void __devinit tg3_nvram_init(struct tg3 *tp)
9232{
9233	tw32_f(GRC_EEPROM_ADDR,
9234	     (EEPROM_ADDR_FSM_RESET |
9235	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
9236	       EEPROM_ADDR_CLKPERD_SHIFT)));
9237
9238	msleep(1);
9239
9240	/* Enable seeprom accesses. */
9241	tw32_f(GRC_LOCAL_CTRL,
9242	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
9243	udelay(100);
9244
9245	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
9246	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
9247		tp->tg3_flags |= TG3_FLAG_NVRAM;
9248
9249		if (tg3_nvram_lock(tp)) {
9250			printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
9251			       "tg3_nvram_init failed.\n", tp->dev->name);
9252			return;
9253		}
9254		tg3_enable_nvram_access(tp);
9255
9256		tp->nvram_size = 0;
9257
9258		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9259			tg3_get_5752_nvram_info(tp);
9260		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9261			tg3_get_5755_nvram_info(tp);
9262		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
9263			tg3_get_5787_nvram_info(tp);
9264		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9265			tg3_get_5906_nvram_info(tp);
9266		else
9267			tg3_get_nvram_info(tp);
9268
9269		if (tp->nvram_size == 0)
9270			tg3_get_nvram_size(tp);
9271
9272		tg3_disable_nvram_access(tp);
9273		tg3_nvram_unlock(tp);
9274
9275	} else {
9276		tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
9277
9278		tg3_get_eeprom_size(tp);
9279	}
9280}
9281
9282static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
9283					u32 offset, u32 *val)
9284{
9285	u32 tmp;
9286	int i;
9287
9288	if (offset > EEPROM_ADDR_ADDR_MASK ||
9289	    (offset % 4) != 0)
9290		return -EINVAL;
9291
9292	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
9293					EEPROM_ADDR_DEVID_MASK |
9294					EEPROM_ADDR_READ);
9295	tw32(GRC_EEPROM_ADDR,
9296	     tmp |
9297	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
9298	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
9299	      EEPROM_ADDR_ADDR_MASK) |
9300	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
9301
9302	for (i = 0; i < 1000; i++) {
9303		tmp = tr32(GRC_EEPROM_ADDR);
9304
9305		if (tmp & EEPROM_ADDR_COMPLETE)
9306			break;
9307		msleep(1);
9308	}
9309	if (!(tmp & EEPROM_ADDR_COMPLETE))
9310		return -EBUSY;
9311
9312	*val = tr32(GRC_EEPROM_DATA);
9313	return 0;
9314}
9315
9316#define NVRAM_CMD_TIMEOUT 10000
9317
9318static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
9319{
9320	int i;
9321
9322	tw32(NVRAM_CMD, nvram_cmd);
9323	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
9324		udelay(10);
9325		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
9326			udelay(10);
9327			break;
9328		}
9329	}
9330	if (i == NVRAM_CMD_TIMEOUT) {
9331		return -EBUSY;
9332	}
9333	return 0;
9334}
9335
9336static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
9337{
9338	if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9339	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9340	    (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9341	    (tp->nvram_jedecnum == JEDEC_ATMEL))
9342
9343		addr = ((addr / tp->nvram_pagesize) <<
9344			ATMEL_AT45DB0X1B_PAGE_POS) +
9345		       (addr % tp->nvram_pagesize);
9346
9347	return addr;
9348}
9349
9350static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
9351{
9352	if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
9353	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
9354	    (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
9355	    (tp->nvram_jedecnum == JEDEC_ATMEL))
9356
9357		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
9358			tp->nvram_pagesize) +
9359		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
9360
9361	return addr;
9362}
9363
9364static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
9365{
9366	int ret;
9367
9368	if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
9369		return tg3_nvram_read_using_eeprom(tp, offset, val);
9370
9371	offset = tg3_nvram_phys_addr(tp, offset);
9372
9373	if (offset > NVRAM_ADDR_MSK)
9374		return -EINVAL;
9375
9376	ret = tg3_nvram_lock(tp);
9377	if (ret)
9378		return ret;
9379
9380	tg3_enable_nvram_access(tp);
9381
9382	tw32(NVRAM_ADDR, offset);
9383	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
9384		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
9385
9386	if (ret == 0)
9387		*val = swab32(tr32(NVRAM_RDDATA));
9388
9389	tg3_disable_nvram_access(tp);
9390
9391	tg3_nvram_unlock(tp);
9392
9393	return ret;
9394}
9395
9396static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
9397{
9398	int err;
9399	u32 tmp;
9400
9401	err = tg3_nvram_read(tp, offset, &tmp);
9402	*val = swab32(tmp);
9403	return err;
9404}
9405
9406static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
9407				    u32 offset, u32 len, u8 *buf)
9408{
9409	int i, j, rc = 0;
9410	u32 val;
9411
9412	for (i = 0; i < len; i += 4) {
9413		u32 addr, data;
9414
9415		addr = offset + i;
9416
9417		memcpy(&data, buf + i, 4);
9418
9419		tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
9420
9421		val = tr32(GRC_EEPROM_ADDR);
9422		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
9423
9424		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
9425			EEPROM_ADDR_READ);
9426		tw32(GRC_EEPROM_ADDR, val |
9427			(0 << EEPROM_ADDR_DEVID_SHIFT) |
9428			(addr & EEPROM_ADDR_ADDR_MASK) |
9429			EEPROM_ADDR_START |
9430			EEPROM_ADDR_WRITE);
9431
9432		for (j = 0; j < 1000; j++) {
9433			val = tr32(GRC_EEPROM_ADDR);
9434
9435			if (val & EEPROM_ADDR_COMPLETE)
9436				break;
9437			msleep(1);
9438		}
9439		if (!(val & EEPROM_ADDR_COMPLETE)) {
9440			rc = -EBUSY;
9441			break;
9442		}
9443	}
9444
9445	return rc;
9446}
9447
9448/* offset and length are dword aligned */
9449static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
9450		u8 *buf)
9451{
9452	int ret = 0;
9453	u32 pagesize = tp->nvram_pagesize;
9454	u32 pagemask = pagesize - 1;
9455	u32 nvram_cmd;
9456	u8 *tmp;
9457
9458	tmp = kmalloc(pagesize, GFP_KERNEL);
9459	if (tmp == NULL)
9460		return -ENOMEM;
9461
9462	while (len) {
9463		int j;
9464		u32 phy_addr, page_off, size;
9465
9466		phy_addr = offset & ~pagemask;
9467
9468		for (j = 0; j < pagesize; j += 4) {
9469			if ((ret = tg3_nvram_read(tp, phy_addr + j,
9470						(u32 *) (tmp + j))))
9471				break;
9472		}
9473		if (ret)
9474			break;
9475
9476	        page_off = offset & pagemask;
9477		size = pagesize;
9478		if (len < size)
9479			size = len;
9480
9481		len -= size;
9482
9483		memcpy(tmp + page_off, buf, size);
9484
9485		offset = offset + (pagesize - page_off);
9486
9487		tg3_enable_nvram_access(tp);
9488
9489		/*
9490		 * Before we can erase the flash page, we need
9491		 * to issue a special "write enable" command.
9492		 */
9493		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9494
9495		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9496			break;
9497
9498		/* Erase the target page */
9499		tw32(NVRAM_ADDR, phy_addr);
9500
9501		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
9502			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
9503
9504	        if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9505			break;
9506
9507		/* Issue another write enable to start the write. */
9508		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9509
9510		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
9511			break;
9512
9513		for (j = 0; j < pagesize; j += 4) {
9514			u32 data;
9515
9516	    		data = *((u32 *) (tmp + j));
9517			tw32(NVRAM_WRDATA, cpu_to_be32(data));
9518
9519			tw32(NVRAM_ADDR, phy_addr + j);
9520
9521			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
9522				NVRAM_CMD_WR;
9523
9524			if (j == 0)
9525				nvram_cmd |= NVRAM_CMD_FIRST;
9526			else if (j == (pagesize - 4))
9527				nvram_cmd |= NVRAM_CMD_LAST;
9528
9529			if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9530				break;
9531		}
9532		if (ret)
9533			break;
9534	}
9535
9536	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
9537	tg3_nvram_exec_cmd(tp, nvram_cmd);
9538
9539	kfree(tmp);
9540
9541	return ret;
9542}
9543
9544/* offset and length are dword aligned */
9545static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
9546		u8 *buf)
9547{
9548	int i, ret = 0;
9549
9550	for (i = 0; i < len; i += 4, offset += 4) {
9551		u32 data, page_off, phy_addr, nvram_cmd;
9552
9553		memcpy(&data, buf + i, 4);
9554		tw32(NVRAM_WRDATA, cpu_to_be32(data));
9555
9556	        page_off = offset % tp->nvram_pagesize;
9557
9558		phy_addr = tg3_nvram_phys_addr(tp, offset);
9559
9560		tw32(NVRAM_ADDR, phy_addr);
9561
9562		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
9563
9564	        if ((page_off == 0) || (i == 0))
9565			nvram_cmd |= NVRAM_CMD_FIRST;
9566		if (page_off == (tp->nvram_pagesize - 4))
9567			nvram_cmd |= NVRAM_CMD_LAST;
9568
9569		if (i == (len - 4))
9570			nvram_cmd |= NVRAM_CMD_LAST;
9571
9572		if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
9573		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
9574		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
9575		    (tp->nvram_jedecnum == JEDEC_ST) &&
9576		    (nvram_cmd & NVRAM_CMD_FIRST)) {
9577
9578			if ((ret = tg3_nvram_exec_cmd(tp,
9579				NVRAM_CMD_WREN | NVRAM_CMD_GO |
9580				NVRAM_CMD_DONE)))
9581
9582				break;
9583		}
9584		if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9585			/* We always do complete word writes to eeprom. */
9586			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
9587		}
9588
9589		if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
9590			break;
9591	}
9592	return ret;
9593}
9594
9595/* offset and length are dword aligned */
9596static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
9597{
9598	int ret;
9599
9600	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9601		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
9602		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
9603		udelay(40);
9604	}
9605
9606	if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
9607		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
9608	}
9609	else {
9610		u32 grc_mode;
9611
9612		ret = tg3_nvram_lock(tp);
9613		if (ret)
9614			return ret;
9615
9616		tg3_enable_nvram_access(tp);
9617		if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
9618		    !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
9619			tw32(NVRAM_WRITE1, 0x406);
9620
9621		grc_mode = tr32(GRC_MODE);
9622		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
9623
9624		if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
9625			!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
9626
9627			ret = tg3_nvram_write_block_buffered(tp, offset, len,
9628				buf);
9629		}
9630		else {
9631			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
9632				buf);
9633		}
9634
9635		grc_mode = tr32(GRC_MODE);
9636		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
9637
9638		tg3_disable_nvram_access(tp);
9639		tg3_nvram_unlock(tp);
9640	}
9641
9642	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
9643		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9644		udelay(40);
9645	}
9646
9647	return ret;
9648}
9649
9650struct subsys_tbl_ent {
9651	u16 subsys_vendor, subsys_devid;
9652	u32 phy_id;
9653};
9654
9655static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
9656	/* Broadcom boards. */
9657	{ PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
9658	{ PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
9659	{ PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
9660	{ PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },		    /* BCM95700A9 */
9661	{ PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
9662	{ PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
9663	{ PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },		    /* BCM95701A7 */
9664	{ PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
9665	{ PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
9666	{ PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
9667	{ PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
9668
9669	/* 3com boards. */
9670	{ PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
9671	{ PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
9672	{ PCI_VENDOR_ID_3COM, 0x1004, 0 },		/* 3C996SX */
9673	{ PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
9674	{ PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
9675
9676	/* DELL boards. */
9677	{ PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
9678	{ PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
9679	{ PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
9680	{ PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
9681
9682	/* Compaq boards. */
9683	{ PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
9684	{ PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
9685	{ PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },		  /* CHANGELING */
9686	{ PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
9687	{ PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
9688
9689	/* IBM boards. */
9690	{ PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
9691};
9692
9693static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
9694{
9695	int i;
9696
9697	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
9698		if ((subsys_id_to_phy_id[i].subsys_vendor ==
9699		     tp->pdev->subsystem_vendor) &&
9700		    (subsys_id_to_phy_id[i].subsys_devid ==
9701		     tp->pdev->subsystem_device))
9702			return &subsys_id_to_phy_id[i];
9703	}
9704	return NULL;
9705}
9706
9707static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
9708{
9709	u32 val;
9710	u16 pmcsr;
9711
9712	/* On some early chips the SRAM cannot be accessed in D3hot state,
9713	 * so need make sure we're in D0.
9714	 */
9715	pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
9716	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9717	pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
9718	msleep(1);
9719
9720	/* Make sure register accesses (indirect or otherwise)
9721	 * will function correctly.
9722	 */
9723	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
9724			       tp->misc_host_ctrl);
9725
9726	/* The memory arbiter has to be enabled in order for SRAM accesses
9727	 * to succeed.  Normally on powerup the tg3 chip firmware will make
9728	 * sure it is enabled, but other entities such as system netboot
9729	 * code might disable it.
9730	 */
9731	val = tr32(MEMARB_MODE);
9732	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9733
9734	tp->phy_id = PHY_ID_INVALID;
9735	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9736
9737	/* Assume an onboard device and WOL capable by default.  */
9738	tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
9739
9740	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
9741		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
9742			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9743			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
9744		}
9745		if (tr32(VCPU_CFGSHDW) & VCPU_CFGSHDW_ASPM_DBNC)
9746			tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
9747		return;
9748	}
9749
9750	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9751	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9752		u32 nic_cfg, led_cfg;
9753		u32 nic_phy_id, ver, cfg2 = 0, eeprom_phy_id;
9754		int eeprom_phy_serdes = 0;
9755
9756		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9757		tp->nic_sram_data_cfg = nic_cfg;
9758
9759		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
9760		ver >>= NIC_SRAM_DATA_VER_SHIFT;
9761		if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
9762		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
9763		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
9764		    (ver > 0) && (ver < 0x100))
9765			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
9766
9767		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
9768		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
9769			eeprom_phy_serdes = 1;
9770
9771		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
9772		if (nic_phy_id != 0) {
9773			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
9774			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
9775
9776			eeprom_phy_id  = (id1 >> 16) << 10;
9777			eeprom_phy_id |= (id2 & 0xfc00) << 16;
9778			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
9779		} else
9780			eeprom_phy_id = 0;
9781
9782		tp->phy_id = eeprom_phy_id;
9783		if (eeprom_phy_serdes) {
9784			if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
9785				tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
9786			else
9787				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9788		}
9789
9790		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9791			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
9792				    SHASTA_EXT_LED_MODE_MASK);
9793		else
9794			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
9795
9796		switch (led_cfg) {
9797		default:
9798		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
9799			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9800			break;
9801
9802		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
9803			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9804			break;
9805
9806		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
9807			tp->led_ctrl = LED_CTRL_MODE_MAC;
9808
9809			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
9810			 * read on some older 5700/5701 bootcode.
9811			 */
9812			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
9813			    ASIC_REV_5700 ||
9814			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
9815			    ASIC_REV_5701)
9816				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
9817
9818			break;
9819
9820		case SHASTA_EXT_LED_SHARED:
9821			tp->led_ctrl = LED_CTRL_MODE_SHARED;
9822			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
9823			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
9824				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9825						 LED_CTRL_MODE_PHY_2);
9826			break;
9827
9828		case SHASTA_EXT_LED_MAC:
9829			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
9830			break;
9831
9832		case SHASTA_EXT_LED_COMBO:
9833			tp->led_ctrl = LED_CTRL_MODE_COMBO;
9834			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
9835				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
9836						 LED_CTRL_MODE_PHY_2);
9837			break;
9838
9839		};
9840
9841		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9842		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
9843		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
9844			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
9845
9846		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
9847			tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
9848			if ((tp->pdev->subsystem_vendor ==
9849			     PCI_VENDOR_ID_ARIMA) &&
9850			    (tp->pdev->subsystem_device == 0x205a ||
9851			     tp->pdev->subsystem_device == 0x2063))
9852				tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9853		} else {
9854			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
9855			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
9856		}
9857
9858		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9859			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
9860			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9861				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
9862		}
9863		if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
9864		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
9865			tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
9866
9867		if (cfg2 & (1 << 17))
9868			tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
9869
9870		/* serdes signal pre-emphasis in register 0x590 set by */
9871		/* bootcode if bit 18 is set */
9872		if (cfg2 & (1 << 18))
9873			tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
9874
9875		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
9876			u32 cfg3;
9877
9878			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
9879			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
9880				tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
9881		}
9882	}
9883}
9884
9885static int __devinit tg3_phy_probe(struct tg3 *tp)
9886{
9887	u32 hw_phy_id_1, hw_phy_id_2;
9888	u32 hw_phy_id, hw_phy_id_masked;
9889	int err;
9890
9891	/* Reading the PHY ID register can conflict with ASF
9892	 * firwmare access to the PHY hardware.
9893	 */
9894	err = 0;
9895	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
9896		hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
9897	} else {
9898		/* Now read the physical PHY_ID from the chip and verify
9899		 * that it is sane.  If it doesn't look good, we fall back
9900		 * to either the hard-coded table based PHY_ID and failing
9901		 * that the value found in the eeprom area.
9902		 */
9903		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
9904		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
9905
9906		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
9907		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
9908		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
9909
9910		hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
9911	}
9912
9913	if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
9914		tp->phy_id = hw_phy_id;
9915		if (hw_phy_id_masked == PHY_ID_BCM8002)
9916			tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9917		else
9918			tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
9919	} else {
9920		if (tp->phy_id != PHY_ID_INVALID) {
9921			/* Do nothing, phy ID already set up in
9922			 * tg3_get_eeprom_hw_cfg().
9923			 */
9924		} else {
9925			struct subsys_tbl_ent *p;
9926
9927			/* No eeprom signature?  Try the hardcoded
9928			 * subsys device table.
9929			 */
9930			p = lookup_by_subsys(tp);
9931			if (!p)
9932				return -ENODEV;
9933
9934			tp->phy_id = p->phy_id;
9935			if (!tp->phy_id ||
9936			    tp->phy_id == PHY_ID_BCM8002)
9937				tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
9938		}
9939	}
9940
9941	if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
9942	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
9943		u32 bmsr, adv_reg, tg3_ctrl, mask;
9944
9945		tg3_readphy(tp, MII_BMSR, &bmsr);
9946		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
9947		    (bmsr & BMSR_LSTATUS))
9948			goto skip_phy_reset;
9949
9950		err = tg3_phy_reset(tp);
9951		if (err)
9952			return err;
9953
9954		adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
9955			   ADVERTISE_100HALF | ADVERTISE_100FULL |
9956			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
9957		tg3_ctrl = 0;
9958		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
9959			tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
9960				    MII_TG3_CTRL_ADV_1000_FULL);
9961			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
9962			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
9963				tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
9964					     MII_TG3_CTRL_ENABLE_AS_MASTER);
9965		}
9966
9967		mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
9968			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
9969			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
9970		if (!tg3_copper_is_advertising_all(tp, mask)) {
9971			tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9972
9973			if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9974				tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9975
9976			tg3_writephy(tp, MII_BMCR,
9977				     BMCR_ANENABLE | BMCR_ANRESTART);
9978		}
9979		tg3_phy_set_wirespeed(tp);
9980
9981		tg3_writephy(tp, MII_ADVERTISE, adv_reg);
9982		if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9983			tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
9984	}
9985
9986skip_phy_reset:
9987	if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
9988		err = tg3_init_5401phy_dsp(tp);
9989		if (err)
9990			return err;
9991	}
9992
9993	if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
9994		err = tg3_init_5401phy_dsp(tp);
9995	}
9996
9997	if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9998		tp->link_config.advertising =
9999			(ADVERTISED_1000baseT_Half |
10000			 ADVERTISED_1000baseT_Full |
10001			 ADVERTISED_Autoneg |
10002			 ADVERTISED_FIBRE);
10003	if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10004		tp->link_config.advertising &=
10005			~(ADVERTISED_1000baseT_Half |
10006			  ADVERTISED_1000baseT_Full);
10007
10008	return err;
10009}
10010
10011static void __devinit tg3_read_partno(struct tg3 *tp)
10012{
10013	unsigned char vpd_data[256];
10014	unsigned int i;
10015	u32 magic;
10016
10017	if (tg3_nvram_read_swab(tp, 0x0, &magic))
10018		goto out_not_found;
10019
10020	if (magic == TG3_EEPROM_MAGIC) {
10021		for (i = 0; i < 256; i += 4) {
10022			u32 tmp;
10023
10024			if (tg3_nvram_read(tp, 0x100 + i, &tmp))
10025				goto out_not_found;
10026
10027			vpd_data[i + 0] = ((tmp >>  0) & 0xff);
10028			vpd_data[i + 1] = ((tmp >>  8) & 0xff);
10029			vpd_data[i + 2] = ((tmp >> 16) & 0xff);
10030			vpd_data[i + 3] = ((tmp >> 24) & 0xff);
10031		}
10032	} else {
10033		int vpd_cap;
10034
10035		vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
10036		for (i = 0; i < 256; i += 4) {
10037			u32 tmp, j = 0;
10038			u16 tmp16;
10039
10040			pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
10041					      i);
10042			while (j++ < 100) {
10043				pci_read_config_word(tp->pdev, vpd_cap +
10044						     PCI_VPD_ADDR, &tmp16);
10045				if (tmp16 & 0x8000)
10046					break;
10047				msleep(1);
10048			}
10049			if (!(tmp16 & 0x8000))
10050				goto out_not_found;
10051
10052			pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
10053					      &tmp);
10054			tmp = cpu_to_le32(tmp);
10055			memcpy(&vpd_data[i], &tmp, 4);
10056		}
10057	}
10058
10059	/* Now parse and find the part number. */
10060	for (i = 0; i < 254; ) {
10061		unsigned char val = vpd_data[i];
10062		unsigned int block_end;
10063
10064		if (val == 0x82 || val == 0x91) {
10065			i = (i + 3 +
10066			     (vpd_data[i + 1] +
10067			      (vpd_data[i + 2] << 8)));
10068			continue;
10069		}
10070
10071		if (val != 0x90)
10072			goto out_not_found;
10073
10074		block_end = (i + 3 +
10075			     (vpd_data[i + 1] +
10076			      (vpd_data[i + 2] << 8)));
10077		i += 3;
10078
10079		if (block_end > 256)
10080			goto out_not_found;
10081
10082		while (i < (block_end - 2)) {
10083			if (vpd_data[i + 0] == 'P' &&
10084			    vpd_data[i + 1] == 'N') {
10085				int partno_len = vpd_data[i + 2];
10086
10087				i += 3;
10088				if (partno_len > 24 || (partno_len + i) > 256)
10089					goto out_not_found;
10090
10091				memcpy(tp->board_part_number,
10092				       &vpd_data[i], partno_len);
10093
10094				/* Success. */
10095				return;
10096			}
10097			i += 3 + vpd_data[i + 2];
10098		}
10099
10100		/* Part number not found. */
10101		goto out_not_found;
10102	}
10103
10104out_not_found:
10105	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10106		strcpy(tp->board_part_number, "BCM95906");
10107	else
10108		strcpy(tp->board_part_number, "none");
10109}
10110
10111static void __devinit tg3_read_fw_ver(struct tg3 *tp)
10112{
10113	u32 val, offset, start;
10114
10115	if (tg3_nvram_read_swab(tp, 0, &val))
10116		return;
10117
10118	if (val != TG3_EEPROM_MAGIC)
10119		return;
10120
10121	if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
10122	    tg3_nvram_read_swab(tp, 0x4, &start))
10123		return;
10124
10125	offset = tg3_nvram_logical_addr(tp, offset);
10126	if (tg3_nvram_read_swab(tp, offset, &val))
10127		return;
10128
10129	if ((val & 0xfc000000) == 0x0c000000) {
10130		u32 ver_offset, addr;
10131		int i;
10132
10133		if (tg3_nvram_read_swab(tp, offset + 4, &val) ||
10134		    tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
10135			return;
10136
10137		if (val != 0)
10138			return;
10139
10140		addr = offset + ver_offset - start;
10141		for (i = 0; i < 16; i += 4) {
10142			if (tg3_nvram_read(tp, addr + i, &val))
10143				return;
10144
10145			val = cpu_to_le32(val);
10146			memcpy(tp->fw_ver + i, &val, 4);
10147		}
10148	}
10149}
10150
10151static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
10152
10153static int __devinit tg3_get_invariants(struct tg3 *tp)
10154{
10155	static struct pci_device_id write_reorder_chipsets[] = {
10156		{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
10157		             PCI_DEVICE_ID_AMD_FE_GATE_700C) },
10158		{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
10159		             PCI_DEVICE_ID_AMD_8131_BRIDGE) },
10160		{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
10161			     PCI_DEVICE_ID_VIA_8385_0) },
10162		{ },
10163	};
10164	u32 misc_ctrl_reg;
10165	u32 cacheline_sz_reg;
10166	u32 pci_state_reg, grc_misc_cfg;
10167	u32 val;
10168	u16 pci_cmd;
10169	int err, pcie_cap;
10170
10171	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10172	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
10173	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10174
10175	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10176			      &misc_ctrl_reg);
10177
10178	tp->pci_chip_rev_id = (misc_ctrl_reg >>
10179			       MISC_HOST_CTRL_CHIPREV_SHIFT);
10180
10181	/* Wrong chip ID in 5752 A0. This code can be removed later
10182	 * as A0 is not in production.
10183	 */
10184	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
10185		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
10186
10187	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
10188	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
10189		static struct tg3_dev_id {
10190			u32	vendor;
10191			u32	device;
10192			u32	rev;
10193		} ich_chipsets[] = {
10194			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
10195			  PCI_ANY_ID },
10196			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
10197			  PCI_ANY_ID },
10198			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
10199			  0xa },
10200			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
10201			  PCI_ANY_ID },
10202			{ },
10203		};
10204		struct tg3_dev_id *pci_id = &ich_chipsets[0];
10205		struct pci_dev *bridge = NULL;
10206
10207		while (pci_id->vendor != 0) {
10208			bridge = pci_get_device(pci_id->vendor, pci_id->device,
10209						bridge);
10210			if (!bridge) {
10211				pci_id++;
10212				continue;
10213			}
10214			if (pci_id->rev != PCI_ANY_ID) {
10215				u8 rev;
10216
10217				pci_read_config_byte(bridge, PCI_REVISION_ID,
10218						     &rev);
10219				if (rev > pci_id->rev)
10220					continue;
10221			}
10222			if (bridge->subordinate &&
10223			    (bridge->subordinate->number ==
10224			     tp->pdev->bus->number)) {
10225
10226				tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
10227				pci_dev_put(bridge);
10228				break;
10229			}
10230		}
10231	}
10232
10233	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10234	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
10235		tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
10236		tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10237		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
10238	}
10239	else {
10240		struct pci_dev *bridge = NULL;
10241
10242		do {
10243			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
10244						PCI_DEVICE_ID_SERVERWORKS_EPB,
10245						bridge);
10246			if (bridge && bridge->subordinate &&
10247			    (bridge->subordinate->number <=
10248			     tp->pdev->bus->number) &&
10249			    (bridge->subordinate->subordinate >=
10250			     tp->pdev->bus->number)) {
10251				tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
10252				pci_dev_put(bridge);
10253				break;
10254			}
10255		} while (bridge);
10256	}
10257
10258	/* Initialize misc host control in PCI block. */
10259	tp->misc_host_ctrl |= (misc_ctrl_reg &
10260			       MISC_HOST_CTRL_CHIPREV);
10261	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10262			       tp->misc_host_ctrl);
10263
10264	pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10265			      &cacheline_sz_reg);
10266
10267	tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
10268	tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
10269	tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
10270	tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
10271
10272	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10273	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
10274		tp->pdev_peer = tg3_find_peer(tp);
10275
10276	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10277	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10278	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10279	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10280	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
10281	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10282		tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
10283
10284	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
10285	    (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
10286		tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
10287
10288	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
10289		tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
10290		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
10291		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
10292		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
10293		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
10294		     tp->pdev_peer == tp->pdev))
10295			tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
10296
10297		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10298		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10299		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10300			tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
10301			tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
10302		} else {
10303			tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
10304			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
10305				ASIC_REV_5750 &&
10306	     		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
10307				tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
10308		}
10309	}
10310
10311	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
10312	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 &&
10313	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
10314	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755 &&
10315	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787 &&
10316	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10317		tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
10318
10319	pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
10320	if (pcie_cap != 0) {
10321		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
10322		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10323			u16 lnkctl;
10324
10325			pci_read_config_word(tp->pdev,
10326					     pcie_cap + PCI_EXP_LNKCTL,
10327					     &lnkctl);
10328			if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
10329				tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
10330		}
10331	}
10332
10333	/* If we have an AMD 762 or VIA K8T800 chipset, write
10334	 * reordering to the mailbox registers done by the host
10335	 * controller can cause major troubles.  We read back from
10336	 * every mailbox register write to force the writes to be
10337	 * posted to the chip in order.
10338	 */
10339	if (pci_dev_present(write_reorder_chipsets) &&
10340	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10341		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
10342
10343	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10344	    tp->pci_lat_timer < 64) {
10345		tp->pci_lat_timer = 64;
10346
10347		cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
10348		cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
10349		cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
10350		cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
10351
10352		pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
10353				       cacheline_sz_reg);
10354	}
10355
10356	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10357			      &pci_state_reg);
10358
10359	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
10360		tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
10361
10362		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
10363			u32 pm_reg;
10364			u16 pci_cmd;
10365
10366			tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10367
10368			/* The chip can have it's power management PCI config
10369			 * space registers clobbered due to this bug.
10370			 * So explicitly force the chip into D0 here.
10371			 */
10372			pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10373					      &pm_reg);
10374			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
10375			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
10376			pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
10377					       pm_reg);
10378
10379			/* Also, force SERR#/PERR# in PCI command. */
10380			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10381			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
10382			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10383		}
10384	}
10385
10386	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
10387		tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
10388
10389	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
10390		tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
10391	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
10392		tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
10393
10394	/* Chip-specific fixup from Broadcom driver */
10395	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
10396	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
10397		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
10398		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
10399	}
10400
10401	/* Default fast path register access methods */
10402	tp->read32 = tg3_read32;
10403	tp->write32 = tg3_write32;
10404	tp->read32_mbox = tg3_read32;
10405	tp->write32_mbox = tg3_write32;
10406	tp->write32_tx_mbox = tg3_write32;
10407	tp->write32_rx_mbox = tg3_write32;
10408
10409	if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
10410		tp->write32 = tg3_write_indirect_reg32;
10411	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
10412		 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
10413		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
10414		tp->write32 = tg3_write_flush_reg32;
10415	}
10416
10417
10418	if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
10419	    (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
10420		tp->write32_tx_mbox = tg3_write32_tx_mbox;
10421		if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
10422			tp->write32_rx_mbox = tg3_write_flush_reg32;
10423	}
10424
10425	if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
10426		tp->read32 = tg3_read_indirect_reg32;
10427		tp->write32 = tg3_write_indirect_reg32;
10428		tp->read32_mbox = tg3_read_indirect_mbox;
10429		tp->write32_mbox = tg3_write_indirect_mbox;
10430		tp->write32_tx_mbox = tg3_write_indirect_mbox;
10431		tp->write32_rx_mbox = tg3_write_indirect_mbox;
10432
10433		iounmap(tp->regs);
10434		tp->regs = NULL;
10435
10436		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10437		pci_cmd &= ~PCI_COMMAND_MEMORY;
10438		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10439	}
10440	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10441		tp->read32_mbox = tg3_read32_mbox_5906;
10442		tp->write32_mbox = tg3_write32_mbox_5906;
10443		tp->write32_tx_mbox = tg3_write32_mbox_5906;
10444		tp->write32_rx_mbox = tg3_write32_mbox_5906;
10445	}
10446
10447	if (tp->write32 == tg3_write_indirect_reg32 ||
10448	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10449	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10450	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
10451		tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
10452
10453	/* Get eeprom hw config before calling tg3_set_power_state().
10454	 * In particular, the TG3_FLG2_IS_NIC flag must be
10455	 * determined before calling tg3_set_power_state() so that
10456	 * we know whether or not to switch out of Vaux power.
10457	 * When the flag is set, it means that GPIO1 is used for eeprom
10458	 * write protect and also implies that it is a LOM where GPIOs
10459	 * are not used to switch power.
10460	 */
10461	tg3_get_eeprom_hw_cfg(tp);
10462
10463	/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
10464	 * GPIO1 driven high will bring 5700's external PHY out of reset.
10465	 * It is also used as eeprom write protect on LOMs.
10466	 */
10467	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
10468	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10469	    (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
10470		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10471				       GRC_LCLCTRL_GPIO_OUTPUT1);
10472	/* Unused GPIO3 must be driven as output on 5752 because there
10473	 * are no pull-up resistors on unused GPIO pins.
10474	 */
10475	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10476		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
10477
10478	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10479		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
10480
10481	/* Force the chip into D0. */
10482	err = tg3_set_power_state(tp, PCI_D0);
10483	if (err) {
10484		printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
10485		       pci_name(tp->pdev));
10486		return err;
10487	}
10488
10489	/* 5700 B0 chips do not support checksumming correctly due
10490	 * to hardware bugs.
10491	 */
10492	if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
10493		tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
10494
10495	/* Derive initial jumbo mode from MTU assigned in
10496	 * ether_setup() via the alloc_etherdev() call
10497	 */
10498	if (tp->dev->mtu > ETH_DATA_LEN &&
10499	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
10500		tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
10501
10502	/* Determine WakeOnLan speed to use. */
10503	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10504	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
10505	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
10506	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
10507		tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
10508	} else {
10509		tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
10510	}
10511
10512	/* A few boards don't want Ethernet@WireSpeed phy feature */
10513	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
10514	    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
10515	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
10516	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
10517	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
10518	    (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
10519		tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
10520
10521	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
10522	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
10523		tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
10524	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
10525		tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
10526
10527	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10528		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10529		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) {
10530			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
10531			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
10532				tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
10533			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
10534				tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
10535		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
10536			tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
10537	}
10538
10539	tp->coalesce_mode = 0;
10540	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
10541	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
10542		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
10543
10544	/* Initialize MAC MI mode, polling disabled. */
10545	tw32_f(MAC_MI_MODE, tp->mi_mode);
10546	udelay(80);
10547
10548	/* Initialize data/descriptor byte/word swapping. */
10549	val = tr32(GRC_MODE);
10550	val &= GRC_MODE_HOST_STACKUP;
10551	tw32(GRC_MODE, val | tp->grc_mode);
10552
10553	tg3_switch_clocks(tp);
10554
10555	/* Clear this out for sanity. */
10556	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10557
10558	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
10559			      &pci_state_reg);
10560	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
10561	    (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
10562		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
10563
10564		if (chiprevid == CHIPREV_ID_5701_A0 ||
10565		    chiprevid == CHIPREV_ID_5701_B0 ||
10566		    chiprevid == CHIPREV_ID_5701_B2 ||
10567		    chiprevid == CHIPREV_ID_5701_B5) {
10568			void __iomem *sram_base;
10569
10570			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
10571
10572			writel(0x00000000, sram_base);
10573			writel(0x00000000, sram_base + 4);
10574			writel(0xffffffff, sram_base + 4);
10575			if (readl(sram_base) != 0x00000000)
10576				tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
10577		}
10578	}
10579
10580	udelay(50);
10581	tg3_nvram_init(tp);
10582
10583	grc_misc_cfg = tr32(GRC_MISC_CFG);
10584	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
10585
10586	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10587	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
10588	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
10589		tp->tg3_flags2 |= TG3_FLG2_IS_5788;
10590
10591	if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10592	    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
10593		tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
10594	if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
10595		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
10596				      HOSTCC_MODE_CLRTICK_TXBD);
10597
10598		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
10599		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
10600				       tp->misc_host_ctrl);
10601	}
10602
10603	/* these are limited to 10/100 only */
10604	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
10605	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
10606	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
10607	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10608	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
10609	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
10610	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
10611	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
10612	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
10613	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
10614	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
10615	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10616		tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
10617
10618	err = tg3_phy_probe(tp);
10619	if (err) {
10620		printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
10621		       pci_name(tp->pdev), err);
10622		/* ... but do not return immediately ... */
10623	}
10624
10625	tg3_read_partno(tp);
10626	tg3_read_fw_ver(tp);
10627
10628	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
10629		tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10630	} else {
10631		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10632			tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
10633		else
10634			tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
10635	}
10636
10637	/* 5700 {AX,BX} chips have a broken status block link
10638	 * change bit implementation, so we must use the
10639	 * status register in those cases.
10640	 */
10641	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
10642		tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
10643	else
10644		tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
10645
10646	/* The led_ctrl is set during tg3_phy_probe, here we might
10647	 * have to force the link status polling mechanism based
10648	 * upon subsystem IDs.
10649	 */
10650	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
10651	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10652	    !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
10653		tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
10654				  TG3_FLAG_USE_LINKCHG_REG);
10655	}
10656
10657	/* For all SERDES we poll the MAC status register. */
10658	if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10659		tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
10660	else
10661		tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
10662
10663	/* All chips before 5787 can get confused if TX buffers
10664	 * straddle the 4GB address boundary in some cases.
10665	 */
10666	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10667	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10668	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10669		tp->dev->hard_start_xmit = tg3_start_xmit;
10670	else
10671		tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
10672
10673	tp->rx_offset = 2;
10674	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
10675	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
10676		tp->rx_offset = 0;
10677
10678	tp->rx_std_max_post = TG3_RX_RING_SIZE;
10679
10680	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
10681	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
10682	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10683		tp->rx_std_max_post = 8;
10684
10685	/* By default, disable wake-on-lan.  User can change this
10686	 * using ETHTOOL_SWOL.
10687	 */
10688	tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
10689
10690	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
10691		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
10692				     PCIE_PWR_MGMT_L1_THRESH_MSK;
10693
10694	return err;
10695}
10696
10697#ifdef CONFIG_SPARC
10698static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
10699{
10700	struct net_device *dev = tp->dev;
10701	struct pci_dev *pdev = tp->pdev;
10702	struct device_node *dp = pci_device_to_OF_node(pdev);
10703	const unsigned char *addr;
10704	int len;
10705
10706	addr = of_get_property(dp, "local-mac-address", &len);
10707	if (addr && len == 6) {
10708		memcpy(dev->dev_addr, addr, 6);
10709		memcpy(dev->perm_addr, dev->dev_addr, 6);
10710		return 0;
10711	}
10712	return -ENODEV;
10713}
10714
10715static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
10716{
10717	struct net_device *dev = tp->dev;
10718
10719	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
10720	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
10721	return 0;
10722}
10723#endif
10724
10725static int __devinit tg3_get_device_address(struct tg3 *tp)
10726{
10727	struct net_device *dev = tp->dev;
10728	u32 hi, lo, mac_offset;
10729	int addr_ok = 0;
10730
10731#ifdef CONFIG_SPARC
10732	if (!tg3_get_macaddr_sparc(tp))
10733		return 0;
10734#endif
10735
10736	mac_offset = 0x7c;
10737	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
10738	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10739		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
10740			mac_offset = 0xcc;
10741		if (tg3_nvram_lock(tp))
10742			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
10743		else
10744			tg3_nvram_unlock(tp);
10745	}
10746	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10747		mac_offset = 0x10;
10748
10749	/* First try to get it from MAC address mailbox. */
10750	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
10751	if ((hi >> 16) == 0x484b) {
10752		dev->dev_addr[0] = (hi >>  8) & 0xff;
10753		dev->dev_addr[1] = (hi >>  0) & 0xff;
10754
10755		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
10756		dev->dev_addr[2] = (lo >> 24) & 0xff;
10757		dev->dev_addr[3] = (lo >> 16) & 0xff;
10758		dev->dev_addr[4] = (lo >>  8) & 0xff;
10759		dev->dev_addr[5] = (lo >>  0) & 0xff;
10760
10761		/* Some old bootcode may report a 0 MAC address in SRAM */
10762		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
10763	}
10764	if (!addr_ok) {
10765		/* Next, try NVRAM. */
10766		if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
10767		    !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
10768			dev->dev_addr[0] = ((hi >> 16) & 0xff);
10769			dev->dev_addr[1] = ((hi >> 24) & 0xff);
10770			dev->dev_addr[2] = ((lo >>  0) & 0xff);
10771			dev->dev_addr[3] = ((lo >>  8) & 0xff);
10772			dev->dev_addr[4] = ((lo >> 16) & 0xff);
10773			dev->dev_addr[5] = ((lo >> 24) & 0xff);
10774		}
10775		/* Finally just fetch it out of the MAC control regs. */
10776		else {
10777			hi = tr32(MAC_ADDR_0_HIGH);
10778			lo = tr32(MAC_ADDR_0_LOW);
10779
10780			dev->dev_addr[5] = lo & 0xff;
10781			dev->dev_addr[4] = (lo >> 8) & 0xff;
10782			dev->dev_addr[3] = (lo >> 16) & 0xff;
10783			dev->dev_addr[2] = (lo >> 24) & 0xff;
10784			dev->dev_addr[1] = hi & 0xff;
10785			dev->dev_addr[0] = (hi >> 8) & 0xff;
10786		}
10787	}
10788
10789	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
10790#ifdef CONFIG_SPARC64
10791		if (!tg3_get_default_macaddr_sparc(tp))
10792			return 0;
10793#endif
10794		return -EINVAL;
10795	}
10796	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
10797	return 0;
10798}
10799
10800#define BOUNDARY_SINGLE_CACHELINE	1
10801#define BOUNDARY_MULTI_CACHELINE	2
10802
10803static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
10804{
10805	int cacheline_size;
10806	u8 byte;
10807	int goal;
10808
10809	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
10810	if (byte == 0)
10811		cacheline_size = 1024;
10812	else
10813		cacheline_size = (int) byte * 4;
10814
10815	/* On 5703 and later chips, the boundary bits have no
10816	 * effect.
10817	 */
10818	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10819	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
10820	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
10821		goto out;
10822
10823#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
10824	goal = BOUNDARY_MULTI_CACHELINE;
10825#else
10826#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
10827	goal = BOUNDARY_SINGLE_CACHELINE;
10828#else
10829	goal = 0;
10830#endif
10831#endif
10832
10833	if (!goal)
10834		goto out;
10835
10836	/* PCI controllers on most RISC systems tend to disconnect
10837	 * when a device tries to burst across a cache-line boundary.
10838	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
10839	 *
10840	 * Unfortunately, for PCI-E there are only limited
10841	 * write-side controls for this, and thus for reads
10842	 * we will still get the disconnects.  We'll also waste
10843	 * these PCI cycles for both read and write for chips
10844	 * other than 5700 and 5701 which do not implement the
10845	 * boundary bits.
10846	 */
10847	if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
10848	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
10849		switch (cacheline_size) {
10850		case 16:
10851		case 32:
10852		case 64:
10853		case 128:
10854			if (goal == BOUNDARY_SINGLE_CACHELINE) {
10855				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
10856					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
10857			} else {
10858				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10859					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10860			}
10861			break;
10862
10863		case 256:
10864			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
10865				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
10866			break;
10867
10868		default:
10869			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
10870				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
10871			break;
10872		};
10873	} else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
10874		switch (cacheline_size) {
10875		case 16:
10876		case 32:
10877		case 64:
10878			if (goal == BOUNDARY_SINGLE_CACHELINE) {
10879				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10880				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
10881				break;
10882			}
10883			/* fallthrough */
10884		case 128:
10885		default:
10886			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
10887			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
10888			break;
10889		};
10890	} else {
10891		switch (cacheline_size) {
10892		case 16:
10893			if (goal == BOUNDARY_SINGLE_CACHELINE) {
10894				val |= (DMA_RWCTRL_READ_BNDRY_16 |
10895					DMA_RWCTRL_WRITE_BNDRY_16);
10896				break;
10897			}
10898			/* fallthrough */
10899		case 32:
10900			if (goal == BOUNDARY_SINGLE_CACHELINE) {
10901				val |= (DMA_RWCTRL_READ_BNDRY_32 |
10902					DMA_RWCTRL_WRITE_BNDRY_32);
10903				break;
10904			}
10905			/* fallthrough */
10906		case 64:
10907			if (goal == BOUNDARY_SINGLE_CACHELINE) {
10908				val |= (DMA_RWCTRL_READ_BNDRY_64 |
10909					DMA_RWCTRL_WRITE_BNDRY_64);
10910				break;
10911			}
10912			/* fallthrough */
10913		case 128:
10914			if (goal == BOUNDARY_SINGLE_CACHELINE) {
10915				val |= (DMA_RWCTRL_READ_BNDRY_128 |
10916					DMA_RWCTRL_WRITE_BNDRY_128);
10917				break;
10918			}
10919			/* fallthrough */
10920		case 256:
10921			val |= (DMA_RWCTRL_READ_BNDRY_256 |
10922				DMA_RWCTRL_WRITE_BNDRY_256);
10923			break;
10924		case 512:
10925			val |= (DMA_RWCTRL_READ_BNDRY_512 |
10926				DMA_RWCTRL_WRITE_BNDRY_512);
10927			break;
10928		case 1024:
10929		default:
10930			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
10931				DMA_RWCTRL_WRITE_BNDRY_1024);
10932			break;
10933		};
10934	}
10935
10936out:
10937	return val;
10938}
10939
10940static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
10941{
10942	struct tg3_internal_buffer_desc test_desc;
10943	u32 sram_dma_descs;
10944	int i, ret;
10945
10946	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
10947
10948	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
10949	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
10950	tw32(RDMAC_STATUS, 0);
10951	tw32(WDMAC_STATUS, 0);
10952
10953	tw32(BUFMGR_MODE, 0);
10954	tw32(FTQ_RESET, 0);
10955
10956	test_desc.addr_hi = ((u64) buf_dma) >> 32;
10957	test_desc.addr_lo = buf_dma & 0xffffffff;
10958	test_desc.nic_mbuf = 0x00002100;
10959	test_desc.len = size;
10960
10961	/*
10962	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
10963	 * the *second* time the tg3 driver was getting loaded after an
10964	 * initial scan.
10965	 *
10966	 * Broadcom tells me:
10967	 *   ...the DMA engine is connected to the GRC block and a DMA
10968	 *   reset may affect the GRC block in some unpredictable way...
10969	 *   The behavior of resets to individual blocks has not been tested.
10970	 *
10971	 * Broadcom noted the GRC reset will also reset all sub-components.
10972	 */
10973	if (to_device) {
10974		test_desc.cqid_sqid = (13 << 8) | 2;
10975
10976		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
10977		udelay(40);
10978	} else {
10979		test_desc.cqid_sqid = (16 << 8) | 7;
10980
10981		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
10982		udelay(40);
10983	}
10984	test_desc.flags = 0x00000005;
10985
10986	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
10987		u32 val;
10988
10989		val = *(((u32 *)&test_desc) + i);
10990		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
10991				       sram_dma_descs + (i * sizeof(u32)));
10992		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
10993	}
10994	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
10995
10996	if (to_device) {
10997		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
10998	} else {
10999		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
11000	}
11001
11002	ret = -ENODEV;
11003	for (i = 0; i < 40; i++) {
11004		u32 val;
11005
11006		if (to_device)
11007			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
11008		else
11009			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
11010		if ((val & 0xffff) == sram_dma_descs) {
11011			ret = 0;
11012			break;
11013		}
11014
11015		udelay(100);
11016	}
11017
11018	return ret;
11019}
11020
11021#define TEST_BUFFER_SIZE	0x2000
11022
11023static int __devinit tg3_test_dma(struct tg3 *tp)
11024{
11025	dma_addr_t buf_dma;
11026	u32 *buf, saved_dma_rwctrl;
11027	int ret;
11028
11029	buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
11030	if (!buf) {
11031		ret = -ENOMEM;
11032		goto out_nofree;
11033	}
11034
11035	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
11036			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
11037
11038	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
11039
11040	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11041		/* DMA read watermark not used on PCIE */
11042		tp->dma_rwctrl |= 0x00180000;
11043	} else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
11044		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
11045		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
11046			tp->dma_rwctrl |= 0x003f0000;
11047		else
11048			tp->dma_rwctrl |= 0x003f000f;
11049	} else {
11050		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11051		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
11052			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
11053			u32 read_water = 0x7;
11054
11055			if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
11056			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11057				tp->dma_rwctrl |= 0x8000;
11058			else if (ccval == 0x6 || ccval == 0x7)
11059				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
11060
11061			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
11062				read_water = 4;
11063			/* Set bit 23 to enable PCIX hw bug fix */
11064			tp->dma_rwctrl |=
11065				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
11066				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
11067				(1 << 23);
11068		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
11069			/* 5780 always in PCIX mode */
11070			tp->dma_rwctrl |= 0x00144000;
11071		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
11072			/* 5714 always in PCIX mode */
11073			tp->dma_rwctrl |= 0x00148000;
11074		} else {
11075			tp->dma_rwctrl |= 0x001b000f;
11076		}
11077	}
11078
11079	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
11080	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
11081		tp->dma_rwctrl &= 0xfffffff0;
11082
11083	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11084	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
11085		/* Remove this if it causes problems for some boards. */
11086		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
11087
11088		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
11089	}
11090
11091	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11092
11093
11094	ret = 0;
11095	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11096	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
11097		goto out;
11098
11099	/* It is best to perform DMA test with maximum write burst size
11100	 * to expose the 5700/5701 write DMA bug.
11101	 */
11102	saved_dma_rwctrl = tp->dma_rwctrl;
11103	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11104	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11105
11106	while (1) {
11107		u32 *p = buf, i;
11108
11109		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
11110			p[i] = i;
11111
11112		/* Send the buffer to the chip. */
11113		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
11114		if (ret) {
11115			printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
11116			break;
11117		}
11118
11119		/* Now read it back. */
11120		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
11121		if (ret) {
11122			printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
11123
11124			break;
11125		}
11126
11127		/* Verify it. */
11128		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
11129			if (p[i] == i)
11130				continue;
11131
11132			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11133			    DMA_RWCTRL_WRITE_BNDRY_16) {
11134				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11135				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11136				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11137				break;
11138			} else {
11139				printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
11140				ret = -ENODEV;
11141				goto out;
11142			}
11143		}
11144
11145		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
11146			/* Success. */
11147			ret = 0;
11148			break;
11149		}
11150	}
11151	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
11152	    DMA_RWCTRL_WRITE_BNDRY_16) {
11153		static struct pci_device_id dma_wait_state_chipsets[] = {
11154			{ PCI_DEVICE(PCI_VENDOR_ID_APPLE,
11155				     PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
11156			{ },
11157		};
11158
11159		/* DMA test passed without adjusting DMA boundary,
11160		 * now look for chipsets that are known to expose the
11161		 * DMA bug without failing the test.
11162		 */
11163		if (pci_dev_present(dma_wait_state_chipsets)) {
11164			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
11165			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
11166		}
11167		else
11168			/* Safe to use the calculated DMA boundary. */
11169			tp->dma_rwctrl = saved_dma_rwctrl;
11170
11171		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
11172	}
11173
11174out:
11175	pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
11176out_nofree:
11177	return ret;
11178}
11179
11180static void __devinit tg3_init_link_config(struct tg3 *tp)
11181{
11182	tp->link_config.advertising =
11183		(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11184		 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11185		 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
11186		 ADVERTISED_Autoneg | ADVERTISED_MII);
11187	tp->link_config.speed = SPEED_INVALID;
11188	tp->link_config.duplex = DUPLEX_INVALID;
11189	tp->link_config.autoneg = AUTONEG_ENABLE;
11190	tp->link_config.active_speed = SPEED_INVALID;
11191	tp->link_config.active_duplex = DUPLEX_INVALID;
11192	tp->link_config.phy_is_low_power = 0;
11193	tp->link_config.orig_speed = SPEED_INVALID;
11194	tp->link_config.orig_duplex = DUPLEX_INVALID;
11195	tp->link_config.orig_autoneg = AUTONEG_INVALID;
11196}
11197
11198static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
11199{
11200	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11201		tp->bufmgr_config.mbuf_read_dma_low_water =
11202			DEFAULT_MB_RDMA_LOW_WATER_5705;
11203		tp->bufmgr_config.mbuf_mac_rx_low_water =
11204			DEFAULT_MB_MACRX_LOW_WATER_5705;
11205		tp->bufmgr_config.mbuf_high_water =
11206			DEFAULT_MB_HIGH_WATER_5705;
11207		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11208			tp->bufmgr_config.mbuf_mac_rx_low_water =
11209				DEFAULT_MB_MACRX_LOW_WATER_5906;
11210			tp->bufmgr_config.mbuf_high_water =
11211				DEFAULT_MB_HIGH_WATER_5906;
11212		}
11213
11214		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11215			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
11216		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11217			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
11218		tp->bufmgr_config.mbuf_high_water_jumbo =
11219			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
11220	} else {
11221		tp->bufmgr_config.mbuf_read_dma_low_water =
11222			DEFAULT_MB_RDMA_LOW_WATER;
11223		tp->bufmgr_config.mbuf_mac_rx_low_water =
11224			DEFAULT_MB_MACRX_LOW_WATER;
11225		tp->bufmgr_config.mbuf_high_water =
11226			DEFAULT_MB_HIGH_WATER;
11227
11228		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
11229			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
11230		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
11231			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
11232		tp->bufmgr_config.mbuf_high_water_jumbo =
11233			DEFAULT_MB_HIGH_WATER_JUMBO;
11234	}
11235
11236	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
11237	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
11238}
11239
11240static char * __devinit tg3_phy_string(struct tg3 *tp)
11241{
11242	switch (tp->phy_id & PHY_ID_MASK) {
11243	case PHY_ID_BCM5400:	return "5400";
11244	case PHY_ID_BCM5401:	return "5401";
11245	case PHY_ID_BCM5411:	return "5411";
11246	case PHY_ID_BCM5701:	return "5701";
11247	case PHY_ID_BCM5703:	return "5703";
11248	case PHY_ID_BCM5704:	return "5704";
11249	case PHY_ID_BCM5705:	return "5705";
11250	case PHY_ID_BCM5750:	return "5750";
11251	case PHY_ID_BCM5752:	return "5752";
11252	case PHY_ID_BCM5714:	return "5714";
11253	case PHY_ID_BCM5780:	return "5780";
11254	case PHY_ID_BCM5755:	return "5755";
11255	case PHY_ID_BCM5787:	return "5787";
11256	case PHY_ID_BCM5756:	return "5722/5756";
11257	case PHY_ID_BCM5906:	return "5906";
11258	case PHY_ID_BCM8002:	return "8002/serdes";
11259	case 0:			return "serdes";
11260	default:		return "unknown";
11261	};
11262}
11263
11264static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
11265{
11266	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11267		strcpy(str, "PCI Express");
11268		return str;
11269	} else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
11270		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
11271
11272		strcpy(str, "PCIX:");
11273
11274		if ((clock_ctrl == 7) ||
11275		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
11276		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
11277			strcat(str, "133MHz");
11278		else if (clock_ctrl == 0)
11279			strcat(str, "33MHz");
11280		else if (clock_ctrl == 2)
11281			strcat(str, "50MHz");
11282		else if (clock_ctrl == 4)
11283			strcat(str, "66MHz");
11284		else if (clock_ctrl == 6)
11285			strcat(str, "100MHz");
11286	} else {
11287		strcpy(str, "PCI:");
11288		if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
11289			strcat(str, "66MHz");
11290		else
11291			strcat(str, "33MHz");
11292	}
11293	if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
11294		strcat(str, ":32-bit");
11295	else
11296		strcat(str, ":64-bit");
11297	return str;
11298}
11299
11300static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
11301{
11302	struct pci_dev *peer;
11303	unsigned int func, devnr = tp->pdev->devfn & ~7;
11304
11305	for (func = 0; func < 8; func++) {
11306		peer = pci_get_slot(tp->pdev->bus, devnr | func);
11307		if (peer && peer != tp->pdev)
11308			break;
11309		pci_dev_put(peer);
11310	}
11311	/* 5704 can be configured in single-port mode, set peer to
11312	 * tp->pdev in that case.
11313	 */
11314	if (!peer) {
11315		peer = tp->pdev;
11316		return peer;
11317	}
11318
11319	/*
11320	 * We don't need to keep the refcount elevated; there's no way
11321	 * to remove one half of this device without removing the other
11322	 */
11323	pci_dev_put(peer);
11324
11325	return peer;
11326}
11327
11328static void __devinit tg3_init_coal(struct tg3 *tp)
11329{
11330	struct ethtool_coalesce *ec = &tp->coal;
11331
11332	memset(ec, 0, sizeof(*ec));
11333	ec->cmd = ETHTOOL_GCOALESCE;
11334	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
11335	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
11336	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
11337	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
11338	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
11339	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
11340	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
11341	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
11342	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
11343
11344	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
11345				 HOSTCC_MODE_CLRTICK_TXBD)) {
11346		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
11347		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
11348		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
11349		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
11350	}
11351
11352	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
11353		ec->rx_coalesce_usecs_irq = 0;
11354		ec->tx_coalesce_usecs_irq = 0;
11355		ec->stats_block_coalesce_usecs = 0;
11356	}
11357}
11358
11359static int __devinit tg3_init_one(struct pci_dev *pdev,
11360				  const struct pci_device_id *ent)
11361{
11362	static int tg3_version_printed = 0;
11363	unsigned long tg3reg_base, tg3reg_len;
11364	struct net_device *dev;
11365	struct tg3 *tp;
11366	int i, err, pm_cap;
11367	char str[40];
11368	u64 dma_mask, persist_dma_mask;
11369
11370	if (tg3_version_printed++ == 0)
11371		printk(KERN_INFO "%s", version);
11372
11373	err = pci_enable_device(pdev);
11374	if (err) {
11375		printk(KERN_ERR PFX "Cannot enable PCI device, "
11376		       "aborting.\n");
11377		return err;
11378	}
11379
11380	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11381		printk(KERN_ERR PFX "Cannot find proper PCI device "
11382		       "base address, aborting.\n");
11383		err = -ENODEV;
11384		goto err_out_disable_pdev;
11385	}
11386
11387	err = pci_request_regions(pdev, DRV_MODULE_NAME);
11388	if (err) {
11389		printk(KERN_ERR PFX "Cannot obtain PCI resources, "
11390		       "aborting.\n");
11391		goto err_out_disable_pdev;
11392	}
11393
11394	pci_set_master(pdev);
11395
11396	/* Find power-management capability. */
11397	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11398	if (pm_cap == 0) {
11399		printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
11400		       "aborting.\n");
11401		err = -EIO;
11402		goto err_out_free_res;
11403	}
11404
11405	tg3reg_base = pci_resource_start(pdev, 0);
11406	tg3reg_len = pci_resource_len(pdev, 0);
11407
11408	dev = alloc_etherdev(sizeof(*tp));
11409	if (!dev) {
11410		printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
11411		err = -ENOMEM;
11412		goto err_out_free_res;
11413	}
11414
11415	SET_MODULE_OWNER(dev);
11416	SET_NETDEV_DEV(dev, &pdev->dev);
11417
11418#if TG3_VLAN_TAG_USED
11419	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
11420	dev->vlan_rx_register = tg3_vlan_rx_register;
11421#endif
11422
11423	tp = netdev_priv(dev);
11424	tp->pdev = pdev;
11425	tp->dev = dev;
11426	tp->pm_cap = pm_cap;
11427	tp->mac_mode = TG3_DEF_MAC_MODE;
11428	tp->rx_mode = TG3_DEF_RX_MODE;
11429	tp->tx_mode = TG3_DEF_TX_MODE;
11430	tp->mi_mode = MAC_MI_MODE_BASE;
11431	if (tg3_debug > 0)
11432		tp->msg_enable = tg3_debug;
11433	else
11434		tp->msg_enable = TG3_DEF_MSG_ENABLE;
11435
11436	/* The word/byte swap controls here control register access byte
11437	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
11438	 * setting below.
11439	 */
11440	tp->misc_host_ctrl =
11441		MISC_HOST_CTRL_MASK_PCI_INT |
11442		MISC_HOST_CTRL_WORD_SWAP |
11443		MISC_HOST_CTRL_INDIR_ACCESS |
11444		MISC_HOST_CTRL_PCISTATE_RW;
11445
11446	/* The NONFRM (non-frame) byte/word swap controls take effect
11447	 * on descriptor entries, anything which isn't packet data.
11448	 *
11449	 * The StrongARM chips on the board (one for tx, one for rx)
11450	 * are running in big-endian mode.
11451	 */
11452	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
11453			GRC_MODE_WSWAP_NONFRM_DATA);
11454#ifdef __BIG_ENDIAN
11455	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
11456#endif
11457	spin_lock_init(&tp->lock);
11458	spin_lock_init(&tp->indirect_lock);
11459	INIT_WORK(&tp->reset_task, tg3_reset_task);
11460
11461	tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
11462	if (tp->regs == 0UL) {
11463		printk(KERN_ERR PFX "Cannot map device registers, "
11464		       "aborting.\n");
11465		err = -ENOMEM;
11466		goto err_out_free_dev;
11467	}
11468
11469	tg3_init_link_config(tp);
11470
11471	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
11472	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
11473	tp->tx_pending = TG3_DEF_TX_RING_PENDING;
11474
11475	dev->open = tg3_open;
11476	dev->stop = tg3_close;
11477	dev->get_stats = tg3_get_stats;
11478	dev->set_multicast_list = tg3_set_rx_mode;
11479	dev->set_mac_address = tg3_set_mac_addr;
11480	dev->do_ioctl = tg3_ioctl;
11481	dev->tx_timeout = tg3_tx_timeout;
11482	dev->poll = tg3_poll;
11483	dev->ethtool_ops = &tg3_ethtool_ops;
11484	dev->weight = 64;
11485	dev->watchdog_timeo = TG3_TX_TIMEOUT;
11486	dev->change_mtu = tg3_change_mtu;
11487	dev->irq = pdev->irq;
11488#ifdef CONFIG_NET_POLL_CONTROLLER
11489	dev->poll_controller = tg3_poll_controller;
11490#endif
11491
11492	err = tg3_get_invariants(tp);
11493	if (err) {
11494		printk(KERN_ERR PFX "Problem fetching invariants of chip, "
11495		       "aborting.\n");
11496		goto err_out_iounmap;
11497	}
11498
11499	/* The EPB bridge inside 5714, 5715, and 5780 and any
11500	 * device behind the EPB cannot support DMA addresses > 40-bit.
11501	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
11502	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
11503	 * do DMA address check in tg3_start_xmit().
11504	 */
11505	if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
11506		persist_dma_mask = dma_mask = DMA_32BIT_MASK;
11507	else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
11508		persist_dma_mask = dma_mask = DMA_40BIT_MASK;
11509#ifdef CONFIG_HIGHMEM
11510		dma_mask = DMA_64BIT_MASK;
11511#endif
11512	} else
11513		persist_dma_mask = dma_mask = DMA_64BIT_MASK;
11514
11515	/* Configure DMA attributes. */
11516	if (dma_mask > DMA_32BIT_MASK) {
11517		err = pci_set_dma_mask(pdev, dma_mask);
11518		if (!err) {
11519			dev->features |= NETIF_F_HIGHDMA;
11520			err = pci_set_consistent_dma_mask(pdev,
11521							  persist_dma_mask);
11522			if (err < 0) {
11523				printk(KERN_ERR PFX "Unable to obtain 64 bit "
11524				       "DMA for consistent allocations\n");
11525				goto err_out_iounmap;
11526			}
11527		}
11528	}
11529	if (err || dma_mask == DMA_32BIT_MASK) {
11530		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11531		if (err) {
11532			printk(KERN_ERR PFX "No usable DMA configuration, "
11533			       "aborting.\n");
11534			goto err_out_iounmap;
11535		}
11536	}
11537
11538	tg3_init_bufmgr_config(tp);
11539
11540	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11541		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
11542	}
11543	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11544	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
11545	    tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
11546	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
11547	    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
11548		tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
11549	} else {
11550		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
11551	}
11552
11553	/* TSO is on by default on chips that support hardware TSO.
11554	 * Firmware TSO on older chips gives lower performance, so it
11555	 * is off by default, but can be enabled using ethtool.
11556	 */
11557	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
11558		dev->features |= NETIF_F_TSO;
11559		if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
11560		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
11561			dev->features |= NETIF_F_TSO6;
11562	}
11563
11564
11565	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
11566	    !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
11567	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
11568		tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
11569		tp->rx_pending = 63;
11570	}
11571
11572	err = tg3_get_device_address(tp);
11573	if (err) {
11574		printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
11575		       "aborting.\n");
11576		goto err_out_iounmap;
11577	}
11578
11579	/*
11580	 * Reset chip in case UNDI or EFI driver did not shutdown
11581	 * DMA self test will enable WDMAC and we'll see (spurious)
11582	 * pending DMA on the PCI bus at that point.
11583	 */
11584	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
11585	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
11586		pci_save_state(tp->pdev);
11587		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
11588		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11589	}
11590
11591	err = tg3_test_dma(tp);
11592	if (err) {
11593		printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
11594		goto err_out_iounmap;
11595	}
11596
11597	/* Tigon3 can do ipv4 only... and some chips have buggy
11598	 * checksumming.
11599	 */
11600	if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
11601		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
11602		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
11603			dev->features |= NETIF_F_HW_CSUM;
11604		else
11605			dev->features |= NETIF_F_IP_CSUM;
11606		dev->features |= NETIF_F_SG;
11607		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
11608	} else
11609		tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
11610
11611	/* flow control autonegotiation is default behavior */
11612	tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
11613
11614	tg3_init_coal(tp);
11615
11616	/* Now that we have fully setup the chip, save away a snapshot
11617	 * of the PCI config space.  We need to restore this after
11618	 * GRC_MISC_CFG core clock resets and some resume events.
11619	 */
11620	pci_save_state(tp->pdev);
11621
11622	pci_set_drvdata(pdev, dev);
11623
11624	err = register_netdev(dev);
11625	if (err) {
11626		printk(KERN_ERR PFX "Cannot register net device, "
11627		       "aborting.\n");
11628		goto err_out_iounmap;
11629	}
11630
11631	printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (%s) %s Ethernet ",
11632	       dev->name,
11633	       tp->board_part_number,
11634	       tp->pci_chip_rev_id,
11635	       tg3_phy_string(tp),
11636	       tg3_bus_string(tp, str),
11637	       ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
11638		((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
11639		 "10/100/1000Base-T")));
11640
11641	for (i = 0; i < 6; i++)
11642		printk("%2.2x%c", dev->dev_addr[i],
11643		       i == 5 ? '\n' : ':');
11644
11645	printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
11646	       "MIirq[%d] ASF[%d] WireSpeed[%d] TSOcap[%d]\n",
11647	       dev->name,
11648	       (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
11649	       (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
11650	       (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
11651	       (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
11652	       (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
11653	       (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
11654	printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
11655	       dev->name, tp->dma_rwctrl,
11656	       (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
11657	        (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
11658
11659	return 0;
11660
11661err_out_iounmap:
11662	if (tp->regs) {
11663		iounmap(tp->regs);
11664		tp->regs = NULL;
11665	}
11666
11667err_out_free_dev:
11668	free_netdev(dev);
11669
11670err_out_free_res:
11671	pci_release_regions(pdev);
11672
11673err_out_disable_pdev:
11674	pci_disable_device(pdev);
11675	pci_set_drvdata(pdev, NULL);
11676	return err;
11677}
11678
11679static void __devexit tg3_remove_one(struct pci_dev *pdev)
11680{
11681	struct net_device *dev = pci_get_drvdata(pdev);
11682
11683	if (dev) {
11684		struct tg3 *tp = netdev_priv(dev);
11685
11686		flush_scheduled_work();
11687		unregister_netdev(dev);
11688		if (tp->regs) {
11689			iounmap(tp->regs);
11690			tp->regs = NULL;
11691		}
11692		free_netdev(dev);
11693		pci_release_regions(pdev);
11694		pci_disable_device(pdev);
11695		pci_set_drvdata(pdev, NULL);
11696	}
11697}
11698
11699static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
11700{
11701	struct net_device *dev = pci_get_drvdata(pdev);
11702	struct tg3 *tp = netdev_priv(dev);
11703	int err;
11704
11705	if (!netif_running(dev))
11706		return 0;
11707
11708	flush_scheduled_work();
11709	tg3_netif_stop(tp);
11710
11711	del_timer_sync(&tp->timer);
11712
11713	tg3_full_lock(tp, 1);
11714	tg3_disable_ints(tp);
11715	tg3_full_unlock(tp);
11716
11717	netif_device_detach(dev);
11718
11719	tg3_full_lock(tp, 0);
11720	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11721	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
11722	tg3_full_unlock(tp);
11723
11724	/* Save MSI address and data for resume.  */
11725	pci_save_state(pdev);
11726
11727	err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
11728	if (err) {
11729		tg3_full_lock(tp, 0);
11730
11731		tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11732		if (tg3_restart_hw(tp, 1))
11733			goto out;
11734
11735		tp->timer.expires = jiffies + tp->timer_offset;
11736		add_timer(&tp->timer);
11737
11738		netif_device_attach(dev);
11739		tg3_netif_start(tp);
11740
11741out:
11742		tg3_full_unlock(tp);
11743	}
11744
11745	return err;
11746}
11747
11748static int tg3_resume(struct pci_dev *pdev)
11749{
11750	struct net_device *dev = pci_get_drvdata(pdev);
11751	struct tg3 *tp = netdev_priv(dev);
11752	int err;
11753
11754	if (!netif_running(dev))
11755		return 0;
11756
11757	pci_restore_state(tp->pdev);
11758
11759	err = tg3_set_power_state(tp, PCI_D0);
11760	if (err)
11761		return err;
11762
11763	netif_device_attach(dev);
11764
11765	tg3_full_lock(tp, 0);
11766
11767	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11768	err = tg3_restart_hw(tp, 1);
11769	if (err)
11770		goto out;
11771
11772	tp->timer.expires = jiffies + tp->timer_offset;
11773	add_timer(&tp->timer);
11774
11775	tg3_netif_start(tp);
11776
11777out:
11778	tg3_full_unlock(tp);
11779
11780	return err;
11781}
11782
11783static struct pci_driver tg3_driver = {
11784	.name		= DRV_MODULE_NAME,
11785	.id_table	= tg3_pci_tbl,
11786	.probe		= tg3_init_one,
11787	.remove		= __devexit_p(tg3_remove_one),
11788	.suspend	= tg3_suspend,
11789	.resume		= tg3_resume
11790};
11791
11792static int __init tg3_init(void)
11793{
11794	return pci_register_driver(&tg3_driver);
11795}
11796
11797static void __exit tg3_cleanup(void)
11798{
11799	pci_unregister_driver(&tg3_driver);
11800}
11801
11802module_init(tg3_init);
11803module_exit(tg3_cleanup);
11804