• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/drivers/net/
1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
8 *
9 * Firmware is:
10 *	Derived from proprietary unpublished source code,
11 *	Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 *	Permission is hereby granted for the distribution of this firmware
14 *	data in hexadecimal or equivalent format, provided this copyright
15 *	notice is accompanying it.
16 */
17
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/stringify.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/in.h>
28#include <linux/init.h>
29#include <linux/ioport.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ethtool.h>
35#include <linux/mii.h>
36#include <linux/phy.h>
37#include <linux/brcmphy.h>
38#include <linux/if_vlan.h>
39#include <linux/ip.h>
40#include <linux/tcp.h>
41#include <linux/workqueue.h>
42#include <linux/prefetch.h>
43#include <linux/dma-mapping.h>
44#include <linux/firmware.h>
45
46#include <net/checksum.h>
47#include <net/ip.h>
48
49#include <asm/system.h>
50#include <asm/io.h>
51#include <asm/byteorder.h>
52#include <asm/uaccess.h>
53
54#ifdef CONFIG_SPARC
55#include <asm/idprom.h>
56#include <asm/prom.h>
57#endif
58
59#define BAR_0	0
60#define BAR_2	2
61
62#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
63#define TG3_VLAN_TAG_USED 1
64#else
65#define TG3_VLAN_TAG_USED 0
66#endif
67
68#include "tg3.h"
69
70#define DRV_MODULE_NAME		"tg3"
71#define TG3_MAJ_NUM			3
72#define TG3_MIN_NUM			113
73#define DRV_MODULE_VERSION	\
74	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75#define DRV_MODULE_RELDATE	"August 2, 2010"
76
77#define TG3_DEF_MAC_MODE	0
78#define TG3_DEF_RX_MODE		0
79#define TG3_DEF_TX_MODE		0
80#define TG3_DEF_MSG_ENABLE	  \
81	(NETIF_MSG_DRV		| \
82	 NETIF_MSG_PROBE	| \
83	 NETIF_MSG_LINK		| \
84	 NETIF_MSG_TIMER	| \
85	 NETIF_MSG_IFDOWN	| \
86	 NETIF_MSG_IFUP		| \
87	 NETIF_MSG_RX_ERR	| \
88	 NETIF_MSG_TX_ERR)
89
90/* length of time before we decide the hardware is borked,
91 * and dev->tx_timeout() should be called to fix the problem
92 */
93#define TG3_TX_TIMEOUT			(5 * HZ)
94
95/* hardware minimum and maximum for a single frame's data payload */
96#define TG3_MIN_MTU			60
97#define TG3_MAX_MTU(tp)	\
98	((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
99
100/* These numbers seem to be hard coded in the NIC firmware somehow.
101 * You can't change the ring sizes, but you can change where you place
102 * them in the NIC onboard memory.
103 */
104#define TG3_RX_RING_SIZE		512
105#define TG3_DEF_RX_RING_PENDING		200
106#define TG3_RX_JUMBO_RING_SIZE		256
107#define TG3_DEF_RX_JUMBO_RING_PENDING	100
108#define TG3_RSS_INDIR_TBL_SIZE		128
109
110/* Do not place this n-ring entries value into the tp struct itself,
111 * we really want to expose these constants to GCC so that modulo et
112 * al.  operations are done with shifts and masks instead of with
113 * hw multiply/modulo instructions.  Another solution would be to
114 * replace things like '% foo' with '& (foo - 1)'.
115 */
116#define TG3_RX_RCB_RING_SIZE(tp)	\
117	(((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
118	  !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
119
120#define TG3_TX_RING_SIZE		512
121#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
122
123#define TG3_RX_RING_BYTES	(sizeof(struct tg3_rx_buffer_desc) * \
124				 TG3_RX_RING_SIZE)
125#define TG3_RX_JUMBO_RING_BYTES	(sizeof(struct tg3_ext_rx_buffer_desc) * \
126				 TG3_RX_JUMBO_RING_SIZE)
127#define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
128				 TG3_RX_RCB_RING_SIZE(tp))
129#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
130				 TG3_TX_RING_SIZE)
131#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
132
133#define TG3_RX_DMA_ALIGN		16
134#define TG3_RX_HEADROOM			ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
135
136#define TG3_DMA_BYTE_ENAB		64
137
138#define TG3_RX_STD_DMA_SZ		1536
139#define TG3_RX_JMB_DMA_SZ		9046
140
141#define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
142
143#define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
144#define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
145
146#define TG3_RX_STD_BUFF_RING_SIZE \
147	(sizeof(struct ring_info) * TG3_RX_RING_SIZE)
148
149#define TG3_RX_JMB_BUFF_RING_SIZE \
150	(sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
151
152#define TG3_RX_COPY_THRESHOLD		256
153#if NET_IP_ALIGN == defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
154	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
155#else
156	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
157#endif
158
159/* minimum number of free TX descriptors required to wake up TX process */
160#define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
161
162#define TG3_RAW_IP_ALIGN 2
163
164/* number of ETHTOOL_GSTATS u64's */
165#define TG3_NUM_STATS		(sizeof(struct tg3_ethtool_stats)/sizeof(u64))
166
167#define TG3_NUM_TEST		6
168
169#define TG3_FW_UPDATE_TIMEOUT_SEC	5
170
171#define FIRMWARE_TG3		"tigon/tg3.bin"
172#define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
173#define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
174
175static char version[] __devinitdata =
176	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
177
178MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
179MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
180MODULE_LICENSE("GPL");
181MODULE_VERSION(DRV_MODULE_VERSION);
182MODULE_FIRMWARE(FIRMWARE_TG3);
183MODULE_FIRMWARE(FIRMWARE_TG3TSO);
184MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
185
186static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
187module_param(tg3_debug, int, 0);
188MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
189
190static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
191	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
192	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
193	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
194	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
195	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
196	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
197	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
198	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
199	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
200	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
201	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
202	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
203	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
204	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
205	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
206	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
207	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
208	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
209	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
210	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
211	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
212	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
213	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
214	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
215	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
216	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
217	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
218	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
219	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
220	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
221	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
222	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
223	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
224	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
225	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
226	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
227	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
228	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
229	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
230	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
231	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
232	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
233	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
234	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
235	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
236	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
237	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
238	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
239	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
240	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
241	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
242	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
243	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
244	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
245	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
246	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
247	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
248	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
249	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
250	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
251	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
252	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
253	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
254	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
255	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
256	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
257	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
258	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
259	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
260	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
261	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
262	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
263	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
264	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
265	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
266	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
267	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
268	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
269	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
270	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
271	{}
272};
273
274MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
275
276static const struct {
277	const char string[ETH_GSTRING_LEN];
278} ethtool_stats_keys[TG3_NUM_STATS] = {
279	{ "rx_octets" },
280	{ "rx_fragments" },
281	{ "rx_ucast_packets" },
282	{ "rx_mcast_packets" },
283	{ "rx_bcast_packets" },
284	{ "rx_fcs_errors" },
285	{ "rx_align_errors" },
286	{ "rx_xon_pause_rcvd" },
287	{ "rx_xoff_pause_rcvd" },
288	{ "rx_mac_ctrl_rcvd" },
289	{ "rx_xoff_entered" },
290	{ "rx_frame_too_long_errors" },
291	{ "rx_jabbers" },
292	{ "rx_undersize_packets" },
293	{ "rx_in_length_errors" },
294	{ "rx_out_length_errors" },
295	{ "rx_64_or_less_octet_packets" },
296	{ "rx_65_to_127_octet_packets" },
297	{ "rx_128_to_255_octet_packets" },
298	{ "rx_256_to_511_octet_packets" },
299	{ "rx_512_to_1023_octet_packets" },
300	{ "rx_1024_to_1522_octet_packets" },
301	{ "rx_1523_to_2047_octet_packets" },
302	{ "rx_2048_to_4095_octet_packets" },
303	{ "rx_4096_to_8191_octet_packets" },
304	{ "rx_8192_to_9022_octet_packets" },
305
306	{ "tx_octets" },
307	{ "tx_collisions" },
308
309	{ "tx_xon_sent" },
310	{ "tx_xoff_sent" },
311	{ "tx_flow_control" },
312	{ "tx_mac_errors" },
313	{ "tx_single_collisions" },
314	{ "tx_mult_collisions" },
315	{ "tx_deferred" },
316	{ "tx_excessive_collisions" },
317	{ "tx_late_collisions" },
318	{ "tx_collide_2times" },
319	{ "tx_collide_3times" },
320	{ "tx_collide_4times" },
321	{ "tx_collide_5times" },
322	{ "tx_collide_6times" },
323	{ "tx_collide_7times" },
324	{ "tx_collide_8times" },
325	{ "tx_collide_9times" },
326	{ "tx_collide_10times" },
327	{ "tx_collide_11times" },
328	{ "tx_collide_12times" },
329	{ "tx_collide_13times" },
330	{ "tx_collide_14times" },
331	{ "tx_collide_15times" },
332	{ "tx_ucast_packets" },
333	{ "tx_mcast_packets" },
334	{ "tx_bcast_packets" },
335	{ "tx_carrier_sense_errors" },
336	{ "tx_discards" },
337	{ "tx_errors" },
338
339	{ "dma_writeq_full" },
340	{ "dma_write_prioq_full" },
341	{ "rxbds_empty" },
342	{ "rx_discards" },
343	{ "rx_errors" },
344	{ "rx_threshold_hit" },
345
346	{ "dma_readq_full" },
347	{ "dma_read_prioq_full" },
348	{ "tx_comp_queue_full" },
349
350	{ "ring_set_send_prod_index" },
351	{ "ring_status_update" },
352	{ "nic_irqs" },
353	{ "nic_avoided_irqs" },
354	{ "nic_tx_threshold_hit" }
355};
356
357static const struct {
358	const char string[ETH_GSTRING_LEN];
359} ethtool_test_keys[TG3_NUM_TEST] = {
360	{ "nvram test     (online) " },
361	{ "link test      (online) " },
362	{ "register test  (offline)" },
363	{ "memory test    (offline)" },
364	{ "loopback test  (offline)" },
365	{ "interrupt test (offline)" },
366};
367
368static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
369{
370	writel(val, tp->regs + off);
371}
372
373static u32 tg3_read32(struct tg3 *tp, u32 off)
374{
375	return readl(tp->regs + off);
376}
377
378static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
379{
380	writel(val, tp->aperegs + off);
381}
382
383static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
384{
385	return readl(tp->aperegs + off);
386}
387
388static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
389{
390	unsigned long flags;
391
392	spin_lock_irqsave(&tp->indirect_lock, flags);
393	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
394	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
395	spin_unlock_irqrestore(&tp->indirect_lock, flags);
396}
397
398static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
399{
400	writel(val, tp->regs + off);
401	readl(tp->regs + off);
402}
403
404static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
405{
406	unsigned long flags;
407	u32 val;
408
409	spin_lock_irqsave(&tp->indirect_lock, flags);
410	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
411	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
412	spin_unlock_irqrestore(&tp->indirect_lock, flags);
413	return val;
414}
415
416static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
417{
418	unsigned long flags;
419
420	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
421		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
422				       TG3_64BIT_REG_LOW, val);
423		return;
424	}
425	if (off == TG3_RX_STD_PROD_IDX_REG) {
426		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
427				       TG3_64BIT_REG_LOW, val);
428		return;
429	}
430
431	spin_lock_irqsave(&tp->indirect_lock, flags);
432	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
433	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
434	spin_unlock_irqrestore(&tp->indirect_lock, flags);
435
436	/* In indirect mode when disabling interrupts, we also need
437	 * to clear the interrupt bit in the GRC local ctrl register.
438	 */
439	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
440	    (val == 0x1)) {
441		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
442				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
443	}
444}
445
446static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
447{
448	unsigned long flags;
449	u32 val;
450
451	spin_lock_irqsave(&tp->indirect_lock, flags);
452	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
453	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
454	spin_unlock_irqrestore(&tp->indirect_lock, flags);
455	return val;
456}
457
458/* usec_wait specifies the wait time in usec when writing to certain registers
459 * where it is unsafe to read back the register without some delay.
460 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
461 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
462 */
463static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
464{
465	if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
466	    (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
467		/* Non-posted methods */
468		tp->write32(tp, off, val);
469	else {
470		/* Posted method */
471		tg3_write32(tp, off, val);
472		if (usec_wait)
473			udelay(usec_wait);
474		tp->read32(tp, off);
475	}
476	/* Wait again after the read for the posted method to guarantee that
477	 * the wait time is met.
478	 */
479	if (usec_wait)
480		udelay(usec_wait);
481}
482
483static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
484{
485	tp->write32_mbox(tp, off, val);
486	if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
487	    !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
488		tp->read32_mbox(tp, off);
489}
490
491static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
492{
493	void __iomem *mbox = tp->regs + off;
494	writel(val, mbox);
495	if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
496		writel(val, mbox);
497	if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
498		readl(mbox);
499}
500
501static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
502{
503	return readl(tp->regs + off + GRCMBOX_BASE);
504}
505
506static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
507{
508	writel(val, tp->regs + off + GRCMBOX_BASE);
509}
510
511#define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
512#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
513#define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
514#define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
515#define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
516
517#define tw32(reg, val)			tp->write32(tp, reg, val)
518#define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
519#define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
520#define tr32(reg)			tp->read32(tp, reg)
521
522static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
523{
524	unsigned long flags;
525
526	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
527	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
528		return;
529
530	spin_lock_irqsave(&tp->indirect_lock, flags);
531	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
532		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
533		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
534
535		/* Always leave this as zero. */
536		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
537	} else {
538		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
539		tw32_f(TG3PCI_MEM_WIN_DATA, val);
540
541		/* Always leave this as zero. */
542		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
543	}
544	spin_unlock_irqrestore(&tp->indirect_lock, flags);
545}
546
547static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
548{
549	unsigned long flags;
550
551	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
552	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
553		*val = 0;
554		return;
555	}
556
557	spin_lock_irqsave(&tp->indirect_lock, flags);
558	if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
559		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
560		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
561
562		/* Always leave this as zero. */
563		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
564	} else {
565		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
566		*val = tr32(TG3PCI_MEM_WIN_DATA);
567
568		/* Always leave this as zero. */
569		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
570	}
571	spin_unlock_irqrestore(&tp->indirect_lock, flags);
572}
573
574static void tg3_ape_lock_init(struct tg3 *tp)
575{
576	int i;
577	u32 regbase;
578
579	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
580		regbase = TG3_APE_LOCK_GRANT;
581	else
582		regbase = TG3_APE_PER_LOCK_GRANT;
583
584	/* Make sure the driver hasn't any stale locks. */
585	for (i = 0; i < 8; i++)
586		tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
587}
588
589static int tg3_ape_lock(struct tg3 *tp, int locknum)
590{
591	int i, off;
592	int ret = 0;
593	u32 status, req, gnt;
594
595	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
596		return 0;
597
598	switch (locknum) {
599	case TG3_APE_LOCK_GRC:
600	case TG3_APE_LOCK_MEM:
601		break;
602	default:
603		return -EINVAL;
604	}
605
606	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
607		req = TG3_APE_LOCK_REQ;
608		gnt = TG3_APE_LOCK_GRANT;
609	} else {
610		req = TG3_APE_PER_LOCK_REQ;
611		gnt = TG3_APE_PER_LOCK_GRANT;
612	}
613
614	off = 4 * locknum;
615
616	tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
617
618	/* Wait for up to 1 millisecond to acquire lock. */
619	for (i = 0; i < 100; i++) {
620		status = tg3_ape_read32(tp, gnt + off);
621		if (status == APE_LOCK_GRANT_DRIVER)
622			break;
623		udelay(10);
624	}
625
626	if (status != APE_LOCK_GRANT_DRIVER) {
627		/* Revoke the lock request. */
628		tg3_ape_write32(tp, gnt + off,
629				APE_LOCK_GRANT_DRIVER);
630
631		ret = -EBUSY;
632	}
633
634	return ret;
635}
636
637static void tg3_ape_unlock(struct tg3 *tp, int locknum)
638{
639	u32 gnt;
640
641	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
642		return;
643
644	switch (locknum) {
645	case TG3_APE_LOCK_GRC:
646	case TG3_APE_LOCK_MEM:
647		break;
648	default:
649		return;
650	}
651
652	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
653		gnt = TG3_APE_LOCK_GRANT;
654	else
655		gnt = TG3_APE_PER_LOCK_GRANT;
656
657	tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
658}
659
660static void tg3_disable_ints(struct tg3 *tp)
661{
662	int i;
663
664	tw32(TG3PCI_MISC_HOST_CTRL,
665	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
666	for (i = 0; i < tp->irq_max; i++)
667		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
668}
669
670static void tg3_enable_ints(struct tg3 *tp)
671{
672	int i;
673
674	tp->irq_sync = 0;
675	wmb();
676
677	tw32(TG3PCI_MISC_HOST_CTRL,
678	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
679
680	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
681	for (i = 0; i < tp->irq_cnt; i++) {
682		struct tg3_napi *tnapi = &tp->napi[i];
683
684		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
685		if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
686			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
687
688		tp->coal_now |= tnapi->coal_now;
689	}
690
691	/* Force an initial interrupt */
692	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
693	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
694		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
695	else
696		tw32(HOSTCC_MODE, tp->coal_now);
697
698	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
699}
700
701static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
702{
703	struct tg3 *tp = tnapi->tp;
704	struct tg3_hw_status *sblk = tnapi->hw_status;
705	unsigned int work_exists = 0;
706
707	/* check for phy events */
708	if (!(tp->tg3_flags &
709	      (TG3_FLAG_USE_LINKCHG_REG |
710	       TG3_FLAG_POLL_SERDES))) {
711		if (sblk->status & SD_STATUS_LINK_CHG)
712			work_exists = 1;
713	}
714	/* check for RX/TX work to do */
715	if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
716	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
717		work_exists = 1;
718
719	return work_exists;
720}
721
722/* tg3_int_reenable
723 *  similar to tg3_enable_ints, but it accurately determines whether there
724 *  is new work pending and can return without flushing the PIO write
725 *  which reenables interrupts
726 */
727static void tg3_int_reenable(struct tg3_napi *tnapi)
728{
729	struct tg3 *tp = tnapi->tp;
730
731	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
732	mmiowb();
733
734	/* When doing tagged status, this work check is unnecessary.
735	 * The last_tag we write above tells the chip which piece of
736	 * work we've completed.
737	 */
738	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
739	    tg3_has_work(tnapi))
740		tw32(HOSTCC_MODE, tp->coalesce_mode |
741		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
742}
743
744static void tg3_napi_disable(struct tg3 *tp)
745{
746	int i;
747
748	for (i = tp->irq_cnt - 1; i >= 0; i--)
749		napi_disable(&tp->napi[i].napi);
750}
751
752static void tg3_napi_enable(struct tg3 *tp)
753{
754	int i;
755
756	for (i = 0; i < tp->irq_cnt; i++)
757		napi_enable(&tp->napi[i].napi);
758}
759
760static inline void tg3_netif_stop(struct tg3 *tp)
761{
762	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
763	tg3_napi_disable(tp);
764	netif_tx_disable(tp->dev);
765}
766
767static inline void tg3_netif_start(struct tg3 *tp)
768{
769	/* NOTE: unconditional netif_tx_wake_all_queues is only
770	 * appropriate so long as all callers are assured to
771	 * have free tx slots (such as after tg3_init_hw)
772	 */
773	netif_tx_wake_all_queues(tp->dev);
774
775	tg3_napi_enable(tp);
776	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
777	tg3_enable_ints(tp);
778}
779
780static void tg3_switch_clocks(struct tg3 *tp)
781{
782	u32 clock_ctrl;
783	u32 orig_clock_ctrl;
784
785	if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
786	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
787		return;
788
789	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
790
791	orig_clock_ctrl = clock_ctrl;
792	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
793		       CLOCK_CTRL_CLKRUN_OENABLE |
794		       0x1f);
795	tp->pci_clock_ctrl = clock_ctrl;
796
797	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
798		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
799			tw32_wait_f(TG3PCI_CLOCK_CTRL,
800				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
801		}
802	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
803		tw32_wait_f(TG3PCI_CLOCK_CTRL,
804			    clock_ctrl |
805			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
806			    40);
807		tw32_wait_f(TG3PCI_CLOCK_CTRL,
808			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
809			    40);
810	}
811	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
812}
813
814#define PHY_BUSY_LOOPS	5000
815
816static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
817{
818	u32 frame_val;
819	unsigned int loops;
820	int ret;
821
822	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
823		tw32_f(MAC_MI_MODE,
824		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
825		udelay(80);
826	}
827
828	*val = 0x0;
829
830	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
831		      MI_COM_PHY_ADDR_MASK);
832	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
833		      MI_COM_REG_ADDR_MASK);
834	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
835
836	tw32_f(MAC_MI_COM, frame_val);
837
838	loops = PHY_BUSY_LOOPS;
839	while (loops != 0) {
840		udelay(10);
841		frame_val = tr32(MAC_MI_COM);
842
843		if ((frame_val & MI_COM_BUSY) == 0) {
844			udelay(5);
845			frame_val = tr32(MAC_MI_COM);
846			break;
847		}
848		loops -= 1;
849	}
850
851	ret = -EBUSY;
852	if (loops != 0) {
853		*val = frame_val & MI_COM_DATA_MASK;
854		ret = 0;
855	}
856
857	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
858		tw32_f(MAC_MI_MODE, tp->mi_mode);
859		udelay(80);
860	}
861
862	return ret;
863}
864
865static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
866{
867	u32 frame_val;
868	unsigned int loops;
869	int ret;
870
871	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
872	    (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
873		return 0;
874
875	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
876		tw32_f(MAC_MI_MODE,
877		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
878		udelay(80);
879	}
880
881	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
882		      MI_COM_PHY_ADDR_MASK);
883	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
884		      MI_COM_REG_ADDR_MASK);
885	frame_val |= (val & MI_COM_DATA_MASK);
886	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
887
888	tw32_f(MAC_MI_COM, frame_val);
889
890	loops = PHY_BUSY_LOOPS;
891	while (loops != 0) {
892		udelay(10);
893		frame_val = tr32(MAC_MI_COM);
894		if ((frame_val & MI_COM_BUSY) == 0) {
895			udelay(5);
896			frame_val = tr32(MAC_MI_COM);
897			break;
898		}
899		loops -= 1;
900	}
901
902	ret = -EBUSY;
903	if (loops != 0)
904		ret = 0;
905
906	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
907		tw32_f(MAC_MI_MODE, tp->mi_mode);
908		udelay(80);
909	}
910
911	return ret;
912}
913
914static int tg3_bmcr_reset(struct tg3 *tp)
915{
916	u32 phy_control;
917	int limit, err;
918
919	/* OK, reset it, and poll the BMCR_RESET bit until it
920	 * clears or we time out.
921	 */
922	phy_control = BMCR_RESET;
923	err = tg3_writephy(tp, MII_BMCR, phy_control);
924	if (err != 0)
925		return -EBUSY;
926
927	limit = 5000;
928	while (limit--) {
929		err = tg3_readphy(tp, MII_BMCR, &phy_control);
930		if (err != 0)
931			return -EBUSY;
932
933		if ((phy_control & BMCR_RESET) == 0) {
934			udelay(40);
935			break;
936		}
937		udelay(10);
938	}
939	if (limit < 0)
940		return -EBUSY;
941
942	return 0;
943}
944
945static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
946{
947	struct tg3 *tp = bp->priv;
948	u32 val;
949
950	spin_lock_bh(&tp->lock);
951
952	if (tg3_readphy(tp, reg, &val))
953		val = -EIO;
954
955	spin_unlock_bh(&tp->lock);
956
957	return val;
958}
959
960static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
961{
962	struct tg3 *tp = bp->priv;
963	u32 ret = 0;
964
965	spin_lock_bh(&tp->lock);
966
967	if (tg3_writephy(tp, reg, val))
968		ret = -EIO;
969
970	spin_unlock_bh(&tp->lock);
971
972	return ret;
973}
974
975static int tg3_mdio_reset(struct mii_bus *bp)
976{
977	return 0;
978}
979
980static void tg3_mdio_config_5785(struct tg3 *tp)
981{
982	u32 val;
983	struct phy_device *phydev;
984
985	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
986	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
987	case PHY_ID_BCM50610:
988	case PHY_ID_BCM50610M:
989		val = MAC_PHYCFG2_50610_LED_MODES;
990		break;
991	case PHY_ID_BCMAC131:
992		val = MAC_PHYCFG2_AC131_LED_MODES;
993		break;
994	case PHY_ID_RTL8211C:
995		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
996		break;
997	case PHY_ID_RTL8201E:
998		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
999		break;
1000	default:
1001		return;
1002	}
1003
1004	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1005		tw32(MAC_PHYCFG2, val);
1006
1007		val = tr32(MAC_PHYCFG1);
1008		val &= ~(MAC_PHYCFG1_RGMII_INT |
1009			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1010		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1011		tw32(MAC_PHYCFG1, val);
1012
1013		return;
1014	}
1015
1016	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
1017		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1018		       MAC_PHYCFG2_FMODE_MASK_MASK |
1019		       MAC_PHYCFG2_GMODE_MASK_MASK |
1020		       MAC_PHYCFG2_ACT_MASK_MASK   |
1021		       MAC_PHYCFG2_QUAL_MASK_MASK |
1022		       MAC_PHYCFG2_INBAND_ENABLE;
1023
1024	tw32(MAC_PHYCFG2, val);
1025
1026	val = tr32(MAC_PHYCFG1);
1027	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1028		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1029	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1030		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1031			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1032		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1033			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1034	}
1035	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1036	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1037	tw32(MAC_PHYCFG1, val);
1038
1039	val = tr32(MAC_EXT_RGMII_MODE);
1040	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1041		 MAC_RGMII_MODE_RX_QUALITY |
1042		 MAC_RGMII_MODE_RX_ACTIVITY |
1043		 MAC_RGMII_MODE_RX_ENG_DET |
1044		 MAC_RGMII_MODE_TX_ENABLE |
1045		 MAC_RGMII_MODE_TX_LOWPWR |
1046		 MAC_RGMII_MODE_TX_RESET);
1047	if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1048		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1049			val |= MAC_RGMII_MODE_RX_INT_B |
1050			       MAC_RGMII_MODE_RX_QUALITY |
1051			       MAC_RGMII_MODE_RX_ACTIVITY |
1052			       MAC_RGMII_MODE_RX_ENG_DET;
1053		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1054			val |= MAC_RGMII_MODE_TX_ENABLE |
1055			       MAC_RGMII_MODE_TX_LOWPWR |
1056			       MAC_RGMII_MODE_TX_RESET;
1057	}
1058	tw32(MAC_EXT_RGMII_MODE, val);
1059}
1060
1061static void tg3_mdio_start(struct tg3 *tp)
1062{
1063	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1064	tw32_f(MAC_MI_MODE, tp->mi_mode);
1065	udelay(80);
1066
1067	if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1068	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1069		tg3_mdio_config_5785(tp);
1070}
1071
1072static int tg3_mdio_init(struct tg3 *tp)
1073{
1074	int i;
1075	u32 reg;
1076	struct phy_device *phydev;
1077
1078	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1079	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1080		u32 is_serdes;
1081
1082		tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1083
1084		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1085			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1086		else
1087			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1088				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1089		if (is_serdes)
1090			tp->phy_addr += 7;
1091	} else
1092		tp->phy_addr = TG3_PHY_MII_ADDR;
1093
1094	tg3_mdio_start(tp);
1095
1096	if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1097	    (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1098		return 0;
1099
1100	tp->mdio_bus = mdiobus_alloc();
1101	if (tp->mdio_bus == NULL)
1102		return -ENOMEM;
1103
1104	tp->mdio_bus->name     = "tg3 mdio bus";
1105	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1106		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1107	tp->mdio_bus->priv     = tp;
1108	tp->mdio_bus->parent   = &tp->pdev->dev;
1109	tp->mdio_bus->read     = &tg3_mdio_read;
1110	tp->mdio_bus->write    = &tg3_mdio_write;
1111	tp->mdio_bus->reset    = &tg3_mdio_reset;
1112	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1113	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1114
1115	for (i = 0; i < PHY_MAX_ADDR; i++)
1116		tp->mdio_bus->irq[i] = PHY_POLL;
1117
1118	/* The bus registration will look for all the PHYs on the mdio bus.
1119	 * Unfortunately, it does not ensure the PHY is powered up before
1120	 * accessing the PHY ID registers.  A chip reset is the
1121	 * quickest way to bring the device back to an operational state..
1122	 */
1123	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1124		tg3_bmcr_reset(tp);
1125
1126	i = mdiobus_register(tp->mdio_bus);
1127	if (i) {
1128		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1129		mdiobus_free(tp->mdio_bus);
1130		return i;
1131	}
1132
1133	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1134
1135	if (!phydev || !phydev->drv) {
1136		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1137		mdiobus_unregister(tp->mdio_bus);
1138		mdiobus_free(tp->mdio_bus);
1139		return -ENODEV;
1140	}
1141
1142	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1143	case PHY_ID_BCM57780:
1144		phydev->interface = PHY_INTERFACE_MODE_GMII;
1145		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1146		break;
1147	case PHY_ID_BCM50610:
1148	case PHY_ID_BCM50610M:
1149		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1150				     PHY_BRCM_RX_REFCLK_UNUSED |
1151				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1152				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1153		if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1154			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1155		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1156			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1157		if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1158			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1159		/* fallthru */
1160	case PHY_ID_RTL8211C:
1161		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1162		break;
1163	case PHY_ID_RTL8201E:
1164	case PHY_ID_BCMAC131:
1165		phydev->interface = PHY_INTERFACE_MODE_MII;
1166		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1167		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1168		break;
1169	}
1170
1171	tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1172
1173	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1174		tg3_mdio_config_5785(tp);
1175
1176	return 0;
1177}
1178
1179static void tg3_mdio_fini(struct tg3 *tp)
1180{
1181	if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1182		tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1183		mdiobus_unregister(tp->mdio_bus);
1184		mdiobus_free(tp->mdio_bus);
1185	}
1186}
1187
1188/* tp->lock is held. */
1189static inline void tg3_generate_fw_event(struct tg3 *tp)
1190{
1191	u32 val;
1192
1193	val = tr32(GRC_RX_CPU_EVENT);
1194	val |= GRC_RX_CPU_DRIVER_EVENT;
1195	tw32_f(GRC_RX_CPU_EVENT, val);
1196
1197	tp->last_event_jiffies = jiffies;
1198}
1199
1200#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1201
1202/* tp->lock is held. */
1203static void tg3_wait_for_event_ack(struct tg3 *tp)
1204{
1205	int i;
1206	unsigned int delay_cnt;
1207	long time_remain;
1208
1209	/* If enough time has passed, no wait is necessary. */
1210	time_remain = (long)(tp->last_event_jiffies + 1 +
1211		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1212		      (long)jiffies;
1213	if (time_remain < 0)
1214		return;
1215
1216	/* Check if we can shorten the wait time. */
1217	delay_cnt = jiffies_to_usecs(time_remain);
1218	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1219		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1220	delay_cnt = (delay_cnt >> 3) + 1;
1221
1222	for (i = 0; i < delay_cnt; i++) {
1223		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1224			break;
1225		udelay(8);
1226	}
1227}
1228
1229/* tp->lock is held. */
1230static void tg3_ump_link_report(struct tg3 *tp)
1231{
1232	u32 reg;
1233	u32 val;
1234
1235	if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1236	    !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
1237		return;
1238
1239	tg3_wait_for_event_ack(tp);
1240
1241	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1242
1243	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1244
1245	val = 0;
1246	if (!tg3_readphy(tp, MII_BMCR, &reg))
1247		val = reg << 16;
1248	if (!tg3_readphy(tp, MII_BMSR, &reg))
1249		val |= (reg & 0xffff);
1250	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1251
1252	val = 0;
1253	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1254		val = reg << 16;
1255	if (!tg3_readphy(tp, MII_LPA, &reg))
1256		val |= (reg & 0xffff);
1257	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1258
1259	val = 0;
1260	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1261		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1262			val = reg << 16;
1263		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1264			val |= (reg & 0xffff);
1265	}
1266	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1267
1268	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1269		val = reg << 16;
1270	else
1271		val = 0;
1272	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1273
1274	tg3_generate_fw_event(tp);
1275}
1276
1277static void tg3_link_report(struct tg3 *tp)
1278{
1279	if (!netif_carrier_ok(tp->dev)) {
1280		netif_info(tp, link, tp->dev, "Link is down\n");
1281		tg3_ump_link_report(tp);
1282	} else if (netif_msg_link(tp)) {
1283		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1284			    (tp->link_config.active_speed == SPEED_1000 ?
1285			     1000 :
1286			     (tp->link_config.active_speed == SPEED_100 ?
1287			      100 : 10)),
1288			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1289			     "full" : "half"));
1290
1291		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1292			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1293			    "on" : "off",
1294			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1295			    "on" : "off");
1296		tg3_ump_link_report(tp);
1297	}
1298}
1299
1300static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1301{
1302	u16 miireg;
1303
1304	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1305		miireg = ADVERTISE_PAUSE_CAP;
1306	else if (flow_ctrl & FLOW_CTRL_TX)
1307		miireg = ADVERTISE_PAUSE_ASYM;
1308	else if (flow_ctrl & FLOW_CTRL_RX)
1309		miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1310	else
1311		miireg = 0;
1312
1313	return miireg;
1314}
1315
1316static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1317{
1318	u16 miireg;
1319
1320	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1321		miireg = ADVERTISE_1000XPAUSE;
1322	else if (flow_ctrl & FLOW_CTRL_TX)
1323		miireg = ADVERTISE_1000XPSE_ASYM;
1324	else if (flow_ctrl & FLOW_CTRL_RX)
1325		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1326	else
1327		miireg = 0;
1328
1329	return miireg;
1330}
1331
1332static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1333{
1334	u8 cap = 0;
1335
1336	if (lcladv & ADVERTISE_1000XPAUSE) {
1337		if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1338			if (rmtadv & LPA_1000XPAUSE)
1339				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1340			else if (rmtadv & LPA_1000XPAUSE_ASYM)
1341				cap = FLOW_CTRL_RX;
1342		} else {
1343			if (rmtadv & LPA_1000XPAUSE)
1344				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1345		}
1346	} else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1347		if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1348			cap = FLOW_CTRL_TX;
1349	}
1350
1351	return cap;
1352}
1353
1354static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1355{
1356	u8 autoneg;
1357	u8 flowctrl = 0;
1358	u32 old_rx_mode = tp->rx_mode;
1359	u32 old_tx_mode = tp->tx_mode;
1360
1361	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1362		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1363	else
1364		autoneg = tp->link_config.autoneg;
1365
1366	if (autoneg == AUTONEG_ENABLE &&
1367	    (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1368		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1369			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1370		else
1371			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1372	} else
1373		flowctrl = tp->link_config.flowctrl;
1374
1375	tp->link_config.active_flowctrl = flowctrl;
1376
1377	if (flowctrl & FLOW_CTRL_RX)
1378		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1379	else
1380		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1381
1382	if (old_rx_mode != tp->rx_mode)
1383		tw32_f(MAC_RX_MODE, tp->rx_mode);
1384
1385	if (flowctrl & FLOW_CTRL_TX)
1386		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1387	else
1388		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1389
1390	if (old_tx_mode != tp->tx_mode)
1391		tw32_f(MAC_TX_MODE, tp->tx_mode);
1392}
1393
1394static void tg3_adjust_link(struct net_device *dev)
1395{
1396	u8 oldflowctrl, linkmesg = 0;
1397	u32 mac_mode, lcl_adv, rmt_adv;
1398	struct tg3 *tp = netdev_priv(dev);
1399	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1400
1401	spin_lock_bh(&tp->lock);
1402
1403	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1404				    MAC_MODE_HALF_DUPLEX);
1405
1406	oldflowctrl = tp->link_config.active_flowctrl;
1407
1408	if (phydev->link) {
1409		lcl_adv = 0;
1410		rmt_adv = 0;
1411
1412		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1413			mac_mode |= MAC_MODE_PORT_MODE_MII;
1414		else if (phydev->speed == SPEED_1000 ||
1415			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1416			mac_mode |= MAC_MODE_PORT_MODE_GMII;
1417		else
1418			mac_mode |= MAC_MODE_PORT_MODE_MII;
1419
1420		if (phydev->duplex == DUPLEX_HALF)
1421			mac_mode |= MAC_MODE_HALF_DUPLEX;
1422		else {
1423			lcl_adv = tg3_advert_flowctrl_1000T(
1424				  tp->link_config.flowctrl);
1425
1426			if (phydev->pause)
1427				rmt_adv = LPA_PAUSE_CAP;
1428			if (phydev->asym_pause)
1429				rmt_adv |= LPA_PAUSE_ASYM;
1430		}
1431
1432		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1433	} else
1434		mac_mode |= MAC_MODE_PORT_MODE_GMII;
1435
1436	if (mac_mode != tp->mac_mode) {
1437		tp->mac_mode = mac_mode;
1438		tw32_f(MAC_MODE, tp->mac_mode);
1439		udelay(40);
1440	}
1441
1442	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1443		if (phydev->speed == SPEED_10)
1444			tw32(MAC_MI_STAT,
1445			     MAC_MI_STAT_10MBPS_MODE |
1446			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1447		else
1448			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1449	}
1450
1451	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1452		tw32(MAC_TX_LENGTHS,
1453		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1454		      (6 << TX_LENGTHS_IPG_SHIFT) |
1455		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1456	else
1457		tw32(MAC_TX_LENGTHS,
1458		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1459		      (6 << TX_LENGTHS_IPG_SHIFT) |
1460		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1461
1462	if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1463	    (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1464	    phydev->speed != tp->link_config.active_speed ||
1465	    phydev->duplex != tp->link_config.active_duplex ||
1466	    oldflowctrl != tp->link_config.active_flowctrl)
1467		linkmesg = 1;
1468
1469	tp->link_config.active_speed = phydev->speed;
1470	tp->link_config.active_duplex = phydev->duplex;
1471
1472	spin_unlock_bh(&tp->lock);
1473
1474	if (linkmesg)
1475		tg3_link_report(tp);
1476}
1477
1478static int tg3_phy_init(struct tg3 *tp)
1479{
1480	struct phy_device *phydev;
1481
1482	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1483		return 0;
1484
1485	/* Bring the PHY back to a known state. */
1486	tg3_bmcr_reset(tp);
1487
1488	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1489
1490	/* Attach the MAC to the PHY. */
1491	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1492			     phydev->dev_flags, phydev->interface);
1493	if (IS_ERR(phydev)) {
1494		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1495		return PTR_ERR(phydev);
1496	}
1497
1498	/* Mask with MAC supported features. */
1499	switch (phydev->interface) {
1500	case PHY_INTERFACE_MODE_GMII:
1501	case PHY_INTERFACE_MODE_RGMII:
1502		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1503			phydev->supported &= (PHY_GBIT_FEATURES |
1504					      SUPPORTED_Pause |
1505					      SUPPORTED_Asym_Pause);
1506			break;
1507		}
1508		/* fallthru */
1509	case PHY_INTERFACE_MODE_MII:
1510		phydev->supported &= (PHY_BASIC_FEATURES |
1511				      SUPPORTED_Pause |
1512				      SUPPORTED_Asym_Pause);
1513		break;
1514	default:
1515		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1516		return -EINVAL;
1517	}
1518
1519	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1520
1521	phydev->advertising = phydev->supported;
1522
1523	return 0;
1524}
1525
1526static void tg3_phy_start(struct tg3 *tp)
1527{
1528	struct phy_device *phydev;
1529
1530	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1531		return;
1532
1533	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1534
1535	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1536		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1537		phydev->speed = tp->link_config.orig_speed;
1538		phydev->duplex = tp->link_config.orig_duplex;
1539		phydev->autoneg = tp->link_config.orig_autoneg;
1540		phydev->advertising = tp->link_config.orig_advertising;
1541	}
1542
1543	phy_start(phydev);
1544
1545	phy_start_aneg(phydev);
1546}
1547
1548static void tg3_phy_stop(struct tg3 *tp)
1549{
1550	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1551		return;
1552
1553	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1554}
1555
1556static void tg3_phy_fini(struct tg3 *tp)
1557{
1558	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1559		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1560		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1561	}
1562}
1563
1564static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1565{
1566	int err;
1567
1568	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1569	if (!err)
1570		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1571
1572	return err;
1573}
1574
1575static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1576{
1577	u32 phytest;
1578
1579	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1580		u32 phy;
1581
1582		tg3_writephy(tp, MII_TG3_FET_TEST,
1583			     phytest | MII_TG3_FET_SHADOW_EN);
1584		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1585			if (enable)
1586				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1587			else
1588				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1589			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1590		}
1591		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1592	}
1593}
1594
1595static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1596{
1597	u32 reg;
1598
1599	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1600	    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1601	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1602	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1603		return;
1604
1605	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1606		tg3_phy_fet_toggle_apd(tp, enable);
1607		return;
1608	}
1609
1610	reg = MII_TG3_MISC_SHDW_WREN |
1611	      MII_TG3_MISC_SHDW_SCR5_SEL |
1612	      MII_TG3_MISC_SHDW_SCR5_LPED |
1613	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1614	      MII_TG3_MISC_SHDW_SCR5_SDTL |
1615	      MII_TG3_MISC_SHDW_SCR5_C125OE;
1616	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1617		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1618
1619	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1620
1621
1622	reg = MII_TG3_MISC_SHDW_WREN |
1623	      MII_TG3_MISC_SHDW_APD_SEL |
1624	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1625	if (enable)
1626		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1627
1628	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1629}
1630
1631static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1632{
1633	u32 phy;
1634
1635	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1636	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1637		return;
1638
1639	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1640		u32 ephy;
1641
1642		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1643			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1644
1645			tg3_writephy(tp, MII_TG3_FET_TEST,
1646				     ephy | MII_TG3_FET_SHADOW_EN);
1647			if (!tg3_readphy(tp, reg, &phy)) {
1648				if (enable)
1649					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1650				else
1651					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1652				tg3_writephy(tp, reg, phy);
1653			}
1654			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1655		}
1656	} else {
1657		phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1658		      MII_TG3_AUXCTL_SHDWSEL_MISC;
1659		if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1660		    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1661			if (enable)
1662				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1663			else
1664				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1665			phy |= MII_TG3_AUXCTL_MISC_WREN;
1666			tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1667		}
1668	}
1669}
1670
1671static void tg3_phy_set_wirespeed(struct tg3 *tp)
1672{
1673	u32 val;
1674
1675	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1676		return;
1677
1678	if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1679	    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1680		tg3_writephy(tp, MII_TG3_AUX_CTRL,
1681			     (val | (1 << 15) | (1 << 4)));
1682}
1683
1684static void tg3_phy_apply_otp(struct tg3 *tp)
1685{
1686	u32 otp, phy;
1687
1688	if (!tp->phy_otp)
1689		return;
1690
1691	otp = tp->phy_otp;
1692
1693	/* Enable SM_DSP clock and tx 6dB coding. */
1694	phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1695	      MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1696	      MII_TG3_AUXCTL_ACTL_TX_6DB;
1697	tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1698
1699	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1700	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1701	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1702
1703	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1704	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1705	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1706
1707	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1708	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1709	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1710
1711	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1712	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1713
1714	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1715	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1716
1717	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1718	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1719	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1720
1721	/* Turn off SM_DSP clock. */
1722	phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1723	      MII_TG3_AUXCTL_ACTL_TX_6DB;
1724	tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1725}
1726
1727static int tg3_wait_macro_done(struct tg3 *tp)
1728{
1729	int limit = 100;
1730
1731	while (limit--) {
1732		u32 tmp32;
1733
1734		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1735			if ((tmp32 & 0x1000) == 0)
1736				break;
1737		}
1738	}
1739	if (limit < 0)
1740		return -EBUSY;
1741
1742	return 0;
1743}
1744
1745static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1746{
1747	static const u32 test_pat[4][6] = {
1748	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1749	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1750	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1751	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1752	};
1753	int chan;
1754
1755	for (chan = 0; chan < 4; chan++) {
1756		int i;
1757
1758		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1759			     (chan * 0x2000) | 0x0200);
1760		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1761
1762		for (i = 0; i < 6; i++)
1763			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1764				     test_pat[chan][i]);
1765
1766		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1767		if (tg3_wait_macro_done(tp)) {
1768			*resetp = 1;
1769			return -EBUSY;
1770		}
1771
1772		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1773			     (chan * 0x2000) | 0x0200);
1774		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1775		if (tg3_wait_macro_done(tp)) {
1776			*resetp = 1;
1777			return -EBUSY;
1778		}
1779
1780		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1781		if (tg3_wait_macro_done(tp)) {
1782			*resetp = 1;
1783			return -EBUSY;
1784		}
1785
1786		for (i = 0; i < 6; i += 2) {
1787			u32 low, high;
1788
1789			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1790			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1791			    tg3_wait_macro_done(tp)) {
1792				*resetp = 1;
1793				return -EBUSY;
1794			}
1795			low &= 0x7fff;
1796			high &= 0x000f;
1797			if (low != test_pat[chan][i] ||
1798			    high != test_pat[chan][i+1]) {
1799				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1800				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1801				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1802
1803				return -EBUSY;
1804			}
1805		}
1806	}
1807
1808	return 0;
1809}
1810
1811static int tg3_phy_reset_chanpat(struct tg3 *tp)
1812{
1813	int chan;
1814
1815	for (chan = 0; chan < 4; chan++) {
1816		int i;
1817
1818		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1819			     (chan * 0x2000) | 0x0200);
1820		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1821		for (i = 0; i < 6; i++)
1822			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1823		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1824		if (tg3_wait_macro_done(tp))
1825			return -EBUSY;
1826	}
1827
1828	return 0;
1829}
1830
1831static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1832{
1833	u32 reg32, phy9_orig;
1834	int retries, do_phy_reset, err;
1835
1836	retries = 10;
1837	do_phy_reset = 1;
1838	do {
1839		if (do_phy_reset) {
1840			err = tg3_bmcr_reset(tp);
1841			if (err)
1842				return err;
1843			do_phy_reset = 0;
1844		}
1845
1846		/* Disable transmitter and interrupt.  */
1847		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1848			continue;
1849
1850		reg32 |= 0x3000;
1851		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1852
1853		/* Set full-duplex, 1000 mbps.  */
1854		tg3_writephy(tp, MII_BMCR,
1855			     BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1856
1857		/* Set to master mode.  */
1858		if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1859			continue;
1860
1861		tg3_writephy(tp, MII_TG3_CTRL,
1862			     (MII_TG3_CTRL_AS_MASTER |
1863			      MII_TG3_CTRL_ENABLE_AS_MASTER));
1864
1865		/* Enable SM_DSP_CLOCK and 6dB.  */
1866		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1867
1868		/* Block the PHY control access.  */
1869		tg3_phydsp_write(tp, 0x8005, 0x0800);
1870
1871		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1872		if (!err)
1873			break;
1874	} while (--retries);
1875
1876	err = tg3_phy_reset_chanpat(tp);
1877	if (err)
1878		return err;
1879
1880	tg3_phydsp_write(tp, 0x8005, 0x0000);
1881
1882	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1883	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1884
1885	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1886	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1887		/* Set Extended packet length bit for jumbo frames */
1888		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1889	} else {
1890		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1891	}
1892
1893	tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1894
1895	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
1896		reg32 &= ~0x3000;
1897		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1898	} else if (!err)
1899		err = -EBUSY;
1900
1901	return err;
1902}
1903
1904/* This will reset the tigon3 PHY if there is no valid
1905 * link unless the FORCE argument is non-zero.
1906 */
1907static int tg3_phy_reset(struct tg3 *tp)
1908{
1909	u32 cpmuctrl;
1910	u32 phy_status;
1911	int err;
1912
1913	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1914		u32 val;
1915
1916		val = tr32(GRC_MISC_CFG);
1917		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1918		udelay(40);
1919	}
1920	err  = tg3_readphy(tp, MII_BMSR, &phy_status);
1921	err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1922	if (err != 0)
1923		return -EBUSY;
1924
1925	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1926		netif_carrier_off(tp->dev);
1927		tg3_link_report(tp);
1928	}
1929
1930	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1931	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1932	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1933		err = tg3_phy_reset_5703_4_5(tp);
1934		if (err)
1935			return err;
1936		goto out;
1937	}
1938
1939	cpmuctrl = 0;
1940	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1941	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1942		cpmuctrl = tr32(TG3_CPMU_CTRL);
1943		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1944			tw32(TG3_CPMU_CTRL,
1945			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1946	}
1947
1948	err = tg3_bmcr_reset(tp);
1949	if (err)
1950		return err;
1951
1952	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1953		u32 phy;
1954
1955		phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1956		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1957
1958		tw32(TG3_CPMU_CTRL, cpmuctrl);
1959	}
1960
1961	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1962	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1963		u32 val;
1964
1965		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1966		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1967		    CPMU_LSPD_1000MB_MACCLK_12_5) {
1968			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1969			udelay(40);
1970			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1971		}
1972	}
1973
1974	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1975	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1976	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
1977		return 0;
1978
1979	tg3_phy_apply_otp(tp);
1980
1981	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
1982		tg3_phy_toggle_apd(tp, true);
1983	else
1984		tg3_phy_toggle_apd(tp, false);
1985
1986out:
1987	if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
1988		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1989		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
1990		tg3_phydsp_write(tp, 0x000a, 0x0323);
1991		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1992	}
1993	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
1994		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1995		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
1996	}
1997	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
1998		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1999		tg3_phydsp_write(tp, 0x000a, 0x310b);
2000		tg3_phydsp_write(tp, 0x201f, 0x9506);
2001		tg3_phydsp_write(tp, 0x401f, 0x14e2);
2002		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2003	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2004		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2005		tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2006		if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2007			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2008			tg3_writephy(tp, MII_TG3_TEST1,
2009				     MII_TG3_TEST1_TRIM_EN | 0x4);
2010		} else
2011			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2012		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2013	}
2014	/* Set Extended packet length bit (bit 14) on all chips that */
2015	/* support jumbo frames */
2016	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2017		/* Cannot do read-modify-write on 5401 */
2018		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2019	} else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2020		u32 phy_reg;
2021
2022		/* Set bit 14 with read-modify-write to preserve other bits */
2023		if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2024		    !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2025			tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2026	}
2027
2028	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2029	 * jumbo frames transmission.
2030	 */
2031	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2032		u32 phy_reg;
2033
2034		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2035			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2036				     phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2037	}
2038
2039	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2040		/* adjust output voltage */
2041		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2042	}
2043
2044	tg3_phy_toggle_automdix(tp, 1);
2045	tg3_phy_set_wirespeed(tp);
2046	return 0;
2047}
2048
2049static void tg3_frob_aux_power(struct tg3 *tp)
2050{
2051	struct tg3 *tp_peer = tp;
2052
2053	/* The GPIOs do something completely different on 57765. */
2054	if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2055	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2056	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2057		return;
2058
2059	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2060	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2061	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2062		struct net_device *dev_peer;
2063
2064		dev_peer = pci_get_drvdata(tp->pdev_peer);
2065		/* remove_one() may have been run on the peer. */
2066		if (!dev_peer)
2067			tp_peer = tp;
2068		else
2069			tp_peer = netdev_priv(dev_peer);
2070	}
2071
2072	if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2073	    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2074	    (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2075	    (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2076		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2077		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2078			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2079				    (GRC_LCLCTRL_GPIO_OE0 |
2080				     GRC_LCLCTRL_GPIO_OE1 |
2081				     GRC_LCLCTRL_GPIO_OE2 |
2082				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2083				     GRC_LCLCTRL_GPIO_OUTPUT1),
2084				    100);
2085		} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2086			   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2087			/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2088			u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2089					     GRC_LCLCTRL_GPIO_OE1 |
2090					     GRC_LCLCTRL_GPIO_OE2 |
2091					     GRC_LCLCTRL_GPIO_OUTPUT0 |
2092					     GRC_LCLCTRL_GPIO_OUTPUT1 |
2093					     tp->grc_local_ctrl;
2094			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2095
2096			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2097			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2098
2099			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2100			tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2101		} else {
2102			u32 no_gpio2;
2103			u32 grc_local_ctrl = 0;
2104
2105			if (tp_peer != tp &&
2106			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2107				return;
2108
2109			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2110			    ASIC_REV_5714) {
2111				grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2112				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2113					    grc_local_ctrl, 100);
2114			}
2115
2116			/* On 5753 and variants, GPIO2 cannot be used. */
2117			no_gpio2 = tp->nic_sram_data_cfg &
2118				    NIC_SRAM_DATA_CFG_NO_GPIO2;
2119
2120			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2121					 GRC_LCLCTRL_GPIO_OE1 |
2122					 GRC_LCLCTRL_GPIO_OE2 |
2123					 GRC_LCLCTRL_GPIO_OUTPUT1 |
2124					 GRC_LCLCTRL_GPIO_OUTPUT2;
2125			if (no_gpio2) {
2126				grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2127						    GRC_LCLCTRL_GPIO_OUTPUT2);
2128			}
2129			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2130						    grc_local_ctrl, 100);
2131
2132			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2133
2134			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2135						    grc_local_ctrl, 100);
2136
2137			if (!no_gpio2) {
2138				grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2139				tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2140					    grc_local_ctrl, 100);
2141			}
2142		}
2143	} else {
2144		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2145		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2146			if (tp_peer != tp &&
2147			    (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2148				return;
2149
2150			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2151				    (GRC_LCLCTRL_GPIO_OE1 |
2152				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2153
2154			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2155				    GRC_LCLCTRL_GPIO_OE1, 100);
2156
2157			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2158				    (GRC_LCLCTRL_GPIO_OE1 |
2159				     GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2160		}
2161	}
2162}
2163
2164static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2165{
2166	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2167		return 1;
2168	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2169		if (speed != SPEED_10)
2170			return 1;
2171	} else if (speed == SPEED_10)
2172		return 1;
2173
2174	return 0;
2175}
2176
2177static int tg3_setup_phy(struct tg3 *, int);
2178
2179#define RESET_KIND_SHUTDOWN	0
2180#define RESET_KIND_INIT		1
2181#define RESET_KIND_SUSPEND	2
2182
2183static void tg3_write_sig_post_reset(struct tg3 *, int);
2184static int tg3_halt_cpu(struct tg3 *, u32);
2185
2186static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2187{
2188	u32 val;
2189
2190	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2191		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2192			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2193			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2194
2195			sg_dig_ctrl |=
2196				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2197			tw32(SG_DIG_CTRL, sg_dig_ctrl);
2198			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2199		}
2200		return;
2201	}
2202
2203	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2204		tg3_bmcr_reset(tp);
2205		val = tr32(GRC_MISC_CFG);
2206		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2207		udelay(40);
2208		return;
2209	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2210		u32 phytest;
2211		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2212			u32 phy;
2213
2214			tg3_writephy(tp, MII_ADVERTISE, 0);
2215			tg3_writephy(tp, MII_BMCR,
2216				     BMCR_ANENABLE | BMCR_ANRESTART);
2217
2218			tg3_writephy(tp, MII_TG3_FET_TEST,
2219				     phytest | MII_TG3_FET_SHADOW_EN);
2220			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2221				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2222				tg3_writephy(tp,
2223					     MII_TG3_FET_SHDW_AUXMODE4,
2224					     phy);
2225			}
2226			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2227		}
2228		return;
2229	} else if (do_low_power) {
2230		tg3_writephy(tp, MII_TG3_EXT_CTRL,
2231			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2232
2233		tg3_writephy(tp, MII_TG3_AUX_CTRL,
2234			     MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2235			     MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2236			     MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2237			     MII_TG3_AUXCTL_PCTL_VREG_11V);
2238	}
2239
2240	/* The PHY should not be powered down on some chips because
2241	 * of bugs.
2242	 */
2243	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2244	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2245	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2246	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2247		return;
2248
2249	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2250	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2251		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2252		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2253		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2254		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2255	}
2256
2257	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2258}
2259
2260/* tp->lock is held. */
2261static int tg3_nvram_lock(struct tg3 *tp)
2262{
2263	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2264		int i;
2265
2266		if (tp->nvram_lock_cnt == 0) {
2267			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2268			for (i = 0; i < 8000; i++) {
2269				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2270					break;
2271				udelay(20);
2272			}
2273			if (i == 8000) {
2274				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2275				return -ENODEV;
2276			}
2277		}
2278		tp->nvram_lock_cnt++;
2279	}
2280	return 0;
2281}
2282
2283/* tp->lock is held. */
2284static void tg3_nvram_unlock(struct tg3 *tp)
2285{
2286	if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2287		if (tp->nvram_lock_cnt > 0)
2288			tp->nvram_lock_cnt--;
2289		if (tp->nvram_lock_cnt == 0)
2290			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2291	}
2292}
2293
2294/* tp->lock is held. */
2295static void tg3_enable_nvram_access(struct tg3 *tp)
2296{
2297	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2298	    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2299		u32 nvaccess = tr32(NVRAM_ACCESS);
2300
2301		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2302	}
2303}
2304
2305/* tp->lock is held. */
2306static void tg3_disable_nvram_access(struct tg3 *tp)
2307{
2308	if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2309	    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2310		u32 nvaccess = tr32(NVRAM_ACCESS);
2311
2312		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2313	}
2314}
2315
2316static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2317					u32 offset, u32 *val)
2318{
2319	u32 tmp;
2320	int i;
2321
2322	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2323		return -EINVAL;
2324
2325	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2326					EEPROM_ADDR_DEVID_MASK |
2327					EEPROM_ADDR_READ);
2328	tw32(GRC_EEPROM_ADDR,
2329	     tmp |
2330	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
2331	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2332	      EEPROM_ADDR_ADDR_MASK) |
2333	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
2334
2335	for (i = 0; i < 1000; i++) {
2336		tmp = tr32(GRC_EEPROM_ADDR);
2337
2338		if (tmp & EEPROM_ADDR_COMPLETE)
2339			break;
2340		msleep(1);
2341	}
2342	if (!(tmp & EEPROM_ADDR_COMPLETE))
2343		return -EBUSY;
2344
2345	tmp = tr32(GRC_EEPROM_DATA);
2346
2347	/*
2348	 * The data will always be opposite the native endian
2349	 * format.  Perform a blind byteswap to compensate.
2350	 */
2351	*val = swab32(tmp);
2352
2353	return 0;
2354}
2355
2356#define NVRAM_CMD_TIMEOUT 10000
2357
2358static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2359{
2360	int i;
2361
2362	tw32(NVRAM_CMD, nvram_cmd);
2363	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2364		udelay(10);
2365		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2366			udelay(10);
2367			break;
2368		}
2369	}
2370
2371	if (i == NVRAM_CMD_TIMEOUT)
2372		return -EBUSY;
2373
2374	return 0;
2375}
2376
2377static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2378{
2379	if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2380	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2381	    (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2382	   !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2383	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2384
2385		addr = ((addr / tp->nvram_pagesize) <<
2386			ATMEL_AT45DB0X1B_PAGE_POS) +
2387		       (addr % tp->nvram_pagesize);
2388
2389	return addr;
2390}
2391
2392static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2393{
2394	if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2395	    (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2396	    (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2397	   !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2398	    (tp->nvram_jedecnum == JEDEC_ATMEL))
2399
2400		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2401			tp->nvram_pagesize) +
2402		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2403
2404	return addr;
2405}
2406
2407/* NOTE: Data read in from NVRAM is byteswapped according to
2408 * the byteswapping settings for all other register accesses.
2409 * tg3 devices are BE devices, so on a BE machine, the data
2410 * returned will be exactly as it is seen in NVRAM.  On a LE
2411 * machine, the 32-bit value will be byteswapped.
2412 */
2413static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2414{
2415	int ret;
2416
2417	if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2418		return tg3_nvram_read_using_eeprom(tp, offset, val);
2419
2420	offset = tg3_nvram_phys_addr(tp, offset);
2421
2422	if (offset > NVRAM_ADDR_MSK)
2423		return -EINVAL;
2424
2425	ret = tg3_nvram_lock(tp);
2426	if (ret)
2427		return ret;
2428
2429	tg3_enable_nvram_access(tp);
2430
2431	tw32(NVRAM_ADDR, offset);
2432	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2433		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2434
2435	if (ret == 0)
2436		*val = tr32(NVRAM_RDDATA);
2437
2438	tg3_disable_nvram_access(tp);
2439
2440	tg3_nvram_unlock(tp);
2441
2442	return ret;
2443}
2444
2445/* Ensures NVRAM data is in bytestream format. */
2446static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2447{
2448	u32 v;
2449	int res = tg3_nvram_read(tp, offset, &v);
2450	if (!res)
2451		*val = cpu_to_be32(v);
2452	return res;
2453}
2454
2455/* tp->lock is held. */
2456static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2457{
2458	u32 addr_high, addr_low;
2459	int i;
2460
2461	addr_high = ((tp->dev->dev_addr[0] << 8) |
2462		     tp->dev->dev_addr[1]);
2463	addr_low = ((tp->dev->dev_addr[2] << 24) |
2464		    (tp->dev->dev_addr[3] << 16) |
2465		    (tp->dev->dev_addr[4] <<  8) |
2466		    (tp->dev->dev_addr[5] <<  0));
2467	for (i = 0; i < 4; i++) {
2468		if (i == 1 && skip_mac_1)
2469			continue;
2470		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2471		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2472	}
2473
2474	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2475	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2476		for (i = 0; i < 12; i++) {
2477			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2478			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2479		}
2480	}
2481
2482	addr_high = (tp->dev->dev_addr[0] +
2483		     tp->dev->dev_addr[1] +
2484		     tp->dev->dev_addr[2] +
2485		     tp->dev->dev_addr[3] +
2486		     tp->dev->dev_addr[4] +
2487		     tp->dev->dev_addr[5]) &
2488		TX_BACKOFF_SEED_MASK;
2489	tw32(MAC_TX_BACKOFF_SEED, addr_high);
2490}
2491
2492static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2493{
2494	u32 misc_host_ctrl;
2495	bool device_should_wake, do_low_power;
2496
2497	/* Make sure register accesses (indirect or otherwise)
2498	 * will function correctly.
2499	 */
2500	pci_write_config_dword(tp->pdev,
2501			       TG3PCI_MISC_HOST_CTRL,
2502			       tp->misc_host_ctrl);
2503
2504	switch (state) {
2505	case PCI_D0:
2506		pci_enable_wake(tp->pdev, state, false);
2507		pci_set_power_state(tp->pdev, PCI_D0);
2508
2509		/* Switch out of Vaux if it is a NIC */
2510		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2511			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2512
2513		return 0;
2514
2515	case PCI_D1:
2516	case PCI_D2:
2517	case PCI_D3hot:
2518		break;
2519
2520	default:
2521		netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2522			   state);
2523		return -EINVAL;
2524	}
2525
2526	/* Restore the CLKREQ setting. */
2527	if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2528		u16 lnkctl;
2529
2530		pci_read_config_word(tp->pdev,
2531				     tp->pcie_cap + PCI_EXP_LNKCTL,
2532				     &lnkctl);
2533		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2534		pci_write_config_word(tp->pdev,
2535				      tp->pcie_cap + PCI_EXP_LNKCTL,
2536				      lnkctl);
2537	}
2538
2539	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2540	tw32(TG3PCI_MISC_HOST_CTRL,
2541	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2542
2543	device_should_wake = pci_pme_capable(tp->pdev, state) &&
2544			     device_may_wakeup(&tp->pdev->dev) &&
2545			     (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2546
2547	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2548		do_low_power = false;
2549		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2550		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2551			struct phy_device *phydev;
2552			u32 phyid, advertising;
2553
2554			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2555
2556			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2557
2558			tp->link_config.orig_speed = phydev->speed;
2559			tp->link_config.orig_duplex = phydev->duplex;
2560			tp->link_config.orig_autoneg = phydev->autoneg;
2561			tp->link_config.orig_advertising = phydev->advertising;
2562
2563			advertising = ADVERTISED_TP |
2564				      ADVERTISED_Pause |
2565				      ADVERTISED_Autoneg |
2566				      ADVERTISED_10baseT_Half;
2567
2568			if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2569			    device_should_wake) {
2570				if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2571					advertising |=
2572						ADVERTISED_100baseT_Half |
2573						ADVERTISED_100baseT_Full |
2574						ADVERTISED_10baseT_Full;
2575				else
2576					advertising |= ADVERTISED_10baseT_Full;
2577			}
2578
2579			phydev->advertising = advertising;
2580
2581			phy_start_aneg(phydev);
2582
2583			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2584			if (phyid != PHY_ID_BCMAC131) {
2585				phyid &= PHY_BCM_OUI_MASK;
2586				if (phyid == PHY_BCM_OUI_1 ||
2587				    phyid == PHY_BCM_OUI_2 ||
2588				    phyid == PHY_BCM_OUI_3)
2589					do_low_power = true;
2590			}
2591		}
2592	} else {
2593		do_low_power = true;
2594
2595		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2596			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2597			tp->link_config.orig_speed = tp->link_config.speed;
2598			tp->link_config.orig_duplex = tp->link_config.duplex;
2599			tp->link_config.orig_autoneg = tp->link_config.autoneg;
2600		}
2601
2602		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2603			tp->link_config.speed = SPEED_10;
2604			tp->link_config.duplex = DUPLEX_HALF;
2605			tp->link_config.autoneg = AUTONEG_ENABLE;
2606			tg3_setup_phy(tp, 0);
2607		}
2608	}
2609
2610	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2611		u32 val;
2612
2613		val = tr32(GRC_VCPU_EXT_CTRL);
2614		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2615	} else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2616		int i;
2617		u32 val;
2618
2619		for (i = 0; i < 200; i++) {
2620			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2621			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2622				break;
2623			msleep(1);
2624		}
2625	}
2626	if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2627		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2628						     WOL_DRV_STATE_SHUTDOWN |
2629						     WOL_DRV_WOL |
2630						     WOL_SET_MAGIC_PKT);
2631
2632	if (device_should_wake) {
2633		u32 mac_mode;
2634
2635		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2636			if (do_low_power) {
2637				tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2638				udelay(40);
2639			}
2640
2641			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2642				mac_mode = MAC_MODE_PORT_MODE_GMII;
2643			else
2644				mac_mode = MAC_MODE_PORT_MODE_MII;
2645
2646			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2647			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2648			    ASIC_REV_5700) {
2649				u32 speed = (tp->tg3_flags &
2650					     TG3_FLAG_WOL_SPEED_100MB) ?
2651					     SPEED_100 : SPEED_10;
2652				if (tg3_5700_link_polarity(tp, speed))
2653					mac_mode |= MAC_MODE_LINK_POLARITY;
2654				else
2655					mac_mode &= ~MAC_MODE_LINK_POLARITY;
2656			}
2657		} else {
2658			mac_mode = MAC_MODE_PORT_MODE_TBI;
2659		}
2660
2661		if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2662			tw32(MAC_LED_CTRL, tp->led_ctrl);
2663
2664		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2665		if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2666		    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2667		    ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2668		     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2669			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2670
2671		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2672			mac_mode |= tp->mac_mode &
2673				    (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2674			if (mac_mode & MAC_MODE_APE_TX_EN)
2675				mac_mode |= MAC_MODE_TDE_ENABLE;
2676		}
2677
2678		tw32_f(MAC_MODE, mac_mode);
2679		udelay(100);
2680
2681		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2682		udelay(10);
2683	}
2684
2685	if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2686	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2687	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2688		u32 base_val;
2689
2690		base_val = tp->pci_clock_ctrl;
2691		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2692			     CLOCK_CTRL_TXCLK_DISABLE);
2693
2694		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2695			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
2696	} else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2697		   (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2698		   (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2699		/* do nothing */
2700	} else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2701		     (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2702		u32 newbits1, newbits2;
2703
2704		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2705		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2706			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2707				    CLOCK_CTRL_TXCLK_DISABLE |
2708				    CLOCK_CTRL_ALTCLK);
2709			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2710		} else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2711			newbits1 = CLOCK_CTRL_625_CORE;
2712			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2713		} else {
2714			newbits1 = CLOCK_CTRL_ALTCLK;
2715			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2716		}
2717
2718		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2719			    40);
2720
2721		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2722			    40);
2723
2724		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2725			u32 newbits3;
2726
2727			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2728			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2729				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2730					    CLOCK_CTRL_TXCLK_DISABLE |
2731					    CLOCK_CTRL_44MHZ_CORE);
2732			} else {
2733				newbits3 = CLOCK_CTRL_44MHZ_CORE;
2734			}
2735
2736			tw32_wait_f(TG3PCI_CLOCK_CTRL,
2737				    tp->pci_clock_ctrl | newbits3, 40);
2738		}
2739	}
2740
2741	if (!(device_should_wake) &&
2742	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2743		tg3_power_down_phy(tp, do_low_power);
2744
2745	tg3_frob_aux_power(tp);
2746
2747	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2748	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2749		u32 val = tr32(0x7d00);
2750
2751		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2752		tw32(0x7d00, val);
2753		if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2754			int err;
2755
2756			err = tg3_nvram_lock(tp);
2757			tg3_halt_cpu(tp, RX_CPU_BASE);
2758			if (!err)
2759				tg3_nvram_unlock(tp);
2760		}
2761	}
2762
2763	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2764
2765	if (device_should_wake)
2766		pci_enable_wake(tp->pdev, state, true);
2767
2768	/* Finally, set the new power state. */
2769	pci_set_power_state(tp->pdev, state);
2770
2771	return 0;
2772}
2773
2774static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2775{
2776	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2777	case MII_TG3_AUX_STAT_10HALF:
2778		*speed = SPEED_10;
2779		*duplex = DUPLEX_HALF;
2780		break;
2781
2782	case MII_TG3_AUX_STAT_10FULL:
2783		*speed = SPEED_10;
2784		*duplex = DUPLEX_FULL;
2785		break;
2786
2787	case MII_TG3_AUX_STAT_100HALF:
2788		*speed = SPEED_100;
2789		*duplex = DUPLEX_HALF;
2790		break;
2791
2792	case MII_TG3_AUX_STAT_100FULL:
2793		*speed = SPEED_100;
2794		*duplex = DUPLEX_FULL;
2795		break;
2796
2797	case MII_TG3_AUX_STAT_1000HALF:
2798		*speed = SPEED_1000;
2799		*duplex = DUPLEX_HALF;
2800		break;
2801
2802	case MII_TG3_AUX_STAT_1000FULL:
2803		*speed = SPEED_1000;
2804		*duplex = DUPLEX_FULL;
2805		break;
2806
2807	default:
2808		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2809			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2810				 SPEED_10;
2811			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2812				  DUPLEX_HALF;
2813			break;
2814		}
2815		*speed = SPEED_INVALID;
2816		*duplex = DUPLEX_INVALID;
2817		break;
2818	}
2819}
2820
2821static void tg3_phy_copper_begin(struct tg3 *tp)
2822{
2823	u32 new_adv;
2824	int i;
2825
2826	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2827		/* Entering low power mode.  Disable gigabit and
2828		 * 100baseT advertisements.
2829		 */
2830		tg3_writephy(tp, MII_TG3_CTRL, 0);
2831
2832		new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2833			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2834		if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2835			new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2836
2837		tg3_writephy(tp, MII_ADVERTISE, new_adv);
2838	} else if (tp->link_config.speed == SPEED_INVALID) {
2839		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2840			tp->link_config.advertising &=
2841				~(ADVERTISED_1000baseT_Half |
2842				  ADVERTISED_1000baseT_Full);
2843
2844		new_adv = ADVERTISE_CSMA;
2845		if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2846			new_adv |= ADVERTISE_10HALF;
2847		if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2848			new_adv |= ADVERTISE_10FULL;
2849		if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2850			new_adv |= ADVERTISE_100HALF;
2851		if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2852			new_adv |= ADVERTISE_100FULL;
2853
2854		new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2855
2856		tg3_writephy(tp, MII_ADVERTISE, new_adv);
2857
2858		if (tp->link_config.advertising &
2859		    (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2860			new_adv = 0;
2861			if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2862				new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2863			if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2864				new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2865			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2866			    (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2867			     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2868				new_adv |= (MII_TG3_CTRL_AS_MASTER |
2869					    MII_TG3_CTRL_ENABLE_AS_MASTER);
2870			tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2871		} else {
2872			tg3_writephy(tp, MII_TG3_CTRL, 0);
2873		}
2874	} else {
2875		new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2876		new_adv |= ADVERTISE_CSMA;
2877
2878		/* Asking for a specific link mode. */
2879		if (tp->link_config.speed == SPEED_1000) {
2880			tg3_writephy(tp, MII_ADVERTISE, new_adv);
2881
2882			if (tp->link_config.duplex == DUPLEX_FULL)
2883				new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2884			else
2885				new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2886			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2887			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2888				new_adv |= (MII_TG3_CTRL_AS_MASTER |
2889					    MII_TG3_CTRL_ENABLE_AS_MASTER);
2890		} else {
2891			if (tp->link_config.speed == SPEED_100) {
2892				if (tp->link_config.duplex == DUPLEX_FULL)
2893					new_adv |= ADVERTISE_100FULL;
2894				else
2895					new_adv |= ADVERTISE_100HALF;
2896			} else {
2897				if (tp->link_config.duplex == DUPLEX_FULL)
2898					new_adv |= ADVERTISE_10FULL;
2899				else
2900					new_adv |= ADVERTISE_10HALF;
2901			}
2902			tg3_writephy(tp, MII_ADVERTISE, new_adv);
2903
2904			new_adv = 0;
2905		}
2906
2907		tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2908	}
2909
2910	if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2911	    tp->link_config.speed != SPEED_INVALID) {
2912		u32 bmcr, orig_bmcr;
2913
2914		tp->link_config.active_speed = tp->link_config.speed;
2915		tp->link_config.active_duplex = tp->link_config.duplex;
2916
2917		bmcr = 0;
2918		switch (tp->link_config.speed) {
2919		default:
2920		case SPEED_10:
2921			break;
2922
2923		case SPEED_100:
2924			bmcr |= BMCR_SPEED100;
2925			break;
2926
2927		case SPEED_1000:
2928			bmcr |= TG3_BMCR_SPEED1000;
2929			break;
2930		}
2931
2932		if (tp->link_config.duplex == DUPLEX_FULL)
2933			bmcr |= BMCR_FULLDPLX;
2934
2935		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2936		    (bmcr != orig_bmcr)) {
2937			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2938			for (i = 0; i < 1500; i++) {
2939				u32 tmp;
2940
2941				udelay(10);
2942				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2943				    tg3_readphy(tp, MII_BMSR, &tmp))
2944					continue;
2945				if (!(tmp & BMSR_LSTATUS)) {
2946					udelay(40);
2947					break;
2948				}
2949			}
2950			tg3_writephy(tp, MII_BMCR, bmcr);
2951			udelay(40);
2952		}
2953	} else {
2954		tg3_writephy(tp, MII_BMCR,
2955			     BMCR_ANENABLE | BMCR_ANRESTART);
2956	}
2957}
2958
2959static int tg3_init_5401phy_dsp(struct tg3 *tp)
2960{
2961	int err;
2962
2963	/* Turn off tap power management. */
2964	/* Set Extended packet length bit */
2965	err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2966
2967	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
2968	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
2969	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
2970	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
2971	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
2972
2973	udelay(40);
2974
2975	return err;
2976}
2977
2978static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2979{
2980	u32 adv_reg, all_mask = 0;
2981
2982	if (mask & ADVERTISED_10baseT_Half)
2983		all_mask |= ADVERTISE_10HALF;
2984	if (mask & ADVERTISED_10baseT_Full)
2985		all_mask |= ADVERTISE_10FULL;
2986	if (mask & ADVERTISED_100baseT_Half)
2987		all_mask |= ADVERTISE_100HALF;
2988	if (mask & ADVERTISED_100baseT_Full)
2989		all_mask |= ADVERTISE_100FULL;
2990
2991	if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2992		return 0;
2993
2994	if ((adv_reg & all_mask) != all_mask)
2995		return 0;
2996	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2997		u32 tg3_ctrl;
2998
2999		all_mask = 0;
3000		if (mask & ADVERTISED_1000baseT_Half)
3001			all_mask |= ADVERTISE_1000HALF;
3002		if (mask & ADVERTISED_1000baseT_Full)
3003			all_mask |= ADVERTISE_1000FULL;
3004
3005		if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3006			return 0;
3007
3008		if ((tg3_ctrl & all_mask) != all_mask)
3009			return 0;
3010	}
3011	return 1;
3012}
3013
3014static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3015{
3016	u32 curadv, reqadv;
3017
3018	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3019		return 1;
3020
3021	curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3022	reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3023
3024	if (tp->link_config.active_duplex == DUPLEX_FULL) {
3025		if (curadv != reqadv)
3026			return 0;
3027
3028		if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3029			tg3_readphy(tp, MII_LPA, rmtadv);
3030	} else {
3031		/* Reprogram the advertisement register, even if it
3032		 * does not affect the current link.  If the link
3033		 * gets renegotiated in the future, we can save an
3034		 * additional renegotiation cycle by advertising
3035		 * it correctly in the first place.
3036		 */
3037		if (curadv != reqadv) {
3038			*lcladv &= ~(ADVERTISE_PAUSE_CAP |
3039				     ADVERTISE_PAUSE_ASYM);
3040			tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3041		}
3042	}
3043
3044	return 1;
3045}
3046
3047static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3048{
3049	int current_link_up;
3050	u32 bmsr, dummy;
3051	u32 lcl_adv, rmt_adv;
3052	u16 current_speed;
3053	u8 current_duplex;
3054	int i, err;
3055
3056	tw32(MAC_EVENT, 0);
3057
3058	tw32_f(MAC_STATUS,
3059	     (MAC_STATUS_SYNC_CHANGED |
3060	      MAC_STATUS_CFG_CHANGED |
3061	      MAC_STATUS_MI_COMPLETION |
3062	      MAC_STATUS_LNKSTATE_CHANGED));
3063	udelay(40);
3064
3065	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3066		tw32_f(MAC_MI_MODE,
3067		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3068		udelay(80);
3069	}
3070
3071	tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3072
3073	/* Some third-party PHYs need to be reset on link going
3074	 * down.
3075	 */
3076	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3077	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3078	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3079	    netif_carrier_ok(tp->dev)) {
3080		tg3_readphy(tp, MII_BMSR, &bmsr);
3081		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3082		    !(bmsr & BMSR_LSTATUS))
3083			force_reset = 1;
3084	}
3085	if (force_reset)
3086		tg3_phy_reset(tp);
3087
3088	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3089		tg3_readphy(tp, MII_BMSR, &bmsr);
3090		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3091		    !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3092			bmsr = 0;
3093
3094		if (!(bmsr & BMSR_LSTATUS)) {
3095			err = tg3_init_5401phy_dsp(tp);
3096			if (err)
3097				return err;
3098
3099			tg3_readphy(tp, MII_BMSR, &bmsr);
3100			for (i = 0; i < 1000; i++) {
3101				udelay(10);
3102				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3103				    (bmsr & BMSR_LSTATUS)) {
3104					udelay(40);
3105					break;
3106				}
3107			}
3108
3109			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3110			    TG3_PHY_REV_BCM5401_B0 &&
3111			    !(bmsr & BMSR_LSTATUS) &&
3112			    tp->link_config.active_speed == SPEED_1000) {
3113				err = tg3_phy_reset(tp);
3114				if (!err)
3115					err = tg3_init_5401phy_dsp(tp);
3116				if (err)
3117					return err;
3118			}
3119		}
3120	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3121		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3122		tg3_writephy(tp, 0x15, 0x0a75);
3123		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3124		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3125		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3126	}
3127
3128	/* Clear pending interrupts... */
3129	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3130	tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3131
3132	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3133		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3134	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3135		tg3_writephy(tp, MII_TG3_IMASK, ~0);
3136
3137	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3138	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3139		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3140			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3141				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3142		else
3143			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3144	}
3145
3146	current_link_up = 0;
3147	current_speed = SPEED_INVALID;
3148	current_duplex = DUPLEX_INVALID;
3149
3150	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3151		u32 val;
3152
3153		tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3154		tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3155		if (!(val & (1 << 10))) {
3156			val |= (1 << 10);
3157			tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3158			goto relink;
3159		}
3160	}
3161
3162	bmsr = 0;
3163	for (i = 0; i < 100; i++) {
3164		tg3_readphy(tp, MII_BMSR, &bmsr);
3165		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3166		    (bmsr & BMSR_LSTATUS))
3167			break;
3168		udelay(40);
3169	}
3170
3171	if (bmsr & BMSR_LSTATUS) {
3172		u32 aux_stat, bmcr;
3173
3174		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3175		for (i = 0; i < 2000; i++) {
3176			udelay(10);
3177			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3178			    aux_stat)
3179				break;
3180		}
3181
3182		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3183					     &current_speed,
3184					     &current_duplex);
3185
3186		bmcr = 0;
3187		for (i = 0; i < 200; i++) {
3188			tg3_readphy(tp, MII_BMCR, &bmcr);
3189			if (tg3_readphy(tp, MII_BMCR, &bmcr))
3190				continue;
3191			if (bmcr && bmcr != 0x7fff)
3192				break;
3193			udelay(10);
3194		}
3195
3196		lcl_adv = 0;
3197		rmt_adv = 0;
3198
3199		tp->link_config.active_speed = current_speed;
3200		tp->link_config.active_duplex = current_duplex;
3201
3202		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3203			if ((bmcr & BMCR_ANENABLE) &&
3204			    tg3_copper_is_advertising_all(tp,
3205						tp->link_config.advertising)) {
3206				if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3207								  &rmt_adv))
3208					current_link_up = 1;
3209			}
3210		} else {
3211			if (!(bmcr & BMCR_ANENABLE) &&
3212			    tp->link_config.speed == current_speed &&
3213			    tp->link_config.duplex == current_duplex &&
3214			    tp->link_config.flowctrl ==
3215			    tp->link_config.active_flowctrl) {
3216				current_link_up = 1;
3217			}
3218		}
3219
3220		if (current_link_up == 1 &&
3221		    tp->link_config.active_duplex == DUPLEX_FULL)
3222			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3223	}
3224
3225relink:
3226	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3227		u32 tmp;
3228
3229		tg3_phy_copper_begin(tp);
3230
3231		tg3_readphy(tp, MII_BMSR, &tmp);
3232		if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3233		    (tmp & BMSR_LSTATUS))
3234			current_link_up = 1;
3235	}
3236
3237	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3238	if (current_link_up == 1) {
3239		if (tp->link_config.active_speed == SPEED_100 ||
3240		    tp->link_config.active_speed == SPEED_10)
3241			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3242		else
3243			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3244	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3245		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3246	else
3247		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3248
3249	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3250	if (tp->link_config.active_duplex == DUPLEX_HALF)
3251		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3252
3253	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3254		if (current_link_up == 1 &&
3255		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3256			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3257		else
3258			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3259	}
3260
3261	/* ??? Without this setting Netgear GA302T PHY does not
3262	 * ??? send/receive packets...
3263	 */
3264	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3265	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3266		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3267		tw32_f(MAC_MI_MODE, tp->mi_mode);
3268		udelay(80);
3269	}
3270
3271	tw32_f(MAC_MODE, tp->mac_mode);
3272	udelay(40);
3273
3274	if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3275		/* Polled via timer. */
3276		tw32_f(MAC_EVENT, 0);
3277	} else {
3278		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3279	}
3280	udelay(40);
3281
3282	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3283	    current_link_up == 1 &&
3284	    tp->link_config.active_speed == SPEED_1000 &&
3285	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3286	     (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3287		udelay(120);
3288		tw32_f(MAC_STATUS,
3289		     (MAC_STATUS_SYNC_CHANGED |
3290		      MAC_STATUS_CFG_CHANGED));
3291		udelay(40);
3292		tg3_write_mem(tp,
3293			      NIC_SRAM_FIRMWARE_MBOX,
3294			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3295	}
3296
3297	/* Prevent send BD corruption. */
3298	if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3299		u16 oldlnkctl, newlnkctl;
3300
3301		pci_read_config_word(tp->pdev,
3302				     tp->pcie_cap + PCI_EXP_LNKCTL,
3303				     &oldlnkctl);
3304		if (tp->link_config.active_speed == SPEED_100 ||
3305		    tp->link_config.active_speed == SPEED_10)
3306			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3307		else
3308			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3309		if (newlnkctl != oldlnkctl)
3310			pci_write_config_word(tp->pdev,
3311					      tp->pcie_cap + PCI_EXP_LNKCTL,
3312					      newlnkctl);
3313	}
3314
3315	if (current_link_up != netif_carrier_ok(tp->dev)) {
3316		if (current_link_up)
3317			netif_carrier_on(tp->dev);
3318		else
3319			netif_carrier_off(tp->dev);
3320		tg3_link_report(tp);
3321	}
3322
3323	return 0;
3324}
3325
3326struct tg3_fiber_aneginfo {
3327	int state;
3328#define ANEG_STATE_UNKNOWN		0
3329#define ANEG_STATE_AN_ENABLE		1
3330#define ANEG_STATE_RESTART_INIT		2
3331#define ANEG_STATE_RESTART		3
3332#define ANEG_STATE_DISABLE_LINK_OK	4
3333#define ANEG_STATE_ABILITY_DETECT_INIT	5
3334#define ANEG_STATE_ABILITY_DETECT	6
3335#define ANEG_STATE_ACK_DETECT_INIT	7
3336#define ANEG_STATE_ACK_DETECT		8
3337#define ANEG_STATE_COMPLETE_ACK_INIT	9
3338#define ANEG_STATE_COMPLETE_ACK		10
3339#define ANEG_STATE_IDLE_DETECT_INIT	11
3340#define ANEG_STATE_IDLE_DETECT		12
3341#define ANEG_STATE_LINK_OK		13
3342#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
3343#define ANEG_STATE_NEXT_PAGE_WAIT	15
3344
3345	u32 flags;
3346#define MR_AN_ENABLE		0x00000001
3347#define MR_RESTART_AN		0x00000002
3348#define MR_AN_COMPLETE		0x00000004
3349#define MR_PAGE_RX		0x00000008
3350#define MR_NP_LOADED		0x00000010
3351#define MR_TOGGLE_TX		0x00000020
3352#define MR_LP_ADV_FULL_DUPLEX	0x00000040
3353#define MR_LP_ADV_HALF_DUPLEX	0x00000080
3354#define MR_LP_ADV_SYM_PAUSE	0x00000100
3355#define MR_LP_ADV_ASYM_PAUSE	0x00000200
3356#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
3357#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
3358#define MR_LP_ADV_NEXT_PAGE	0x00001000
3359#define MR_TOGGLE_RX		0x00002000
3360#define MR_NP_RX		0x00004000
3361
3362#define MR_LINK_OK		0x80000000
3363
3364	unsigned long link_time, cur_time;
3365
3366	u32 ability_match_cfg;
3367	int ability_match_count;
3368
3369	char ability_match, idle_match, ack_match;
3370
3371	u32 txconfig, rxconfig;
3372#define ANEG_CFG_NP		0x00000080
3373#define ANEG_CFG_ACK		0x00000040
3374#define ANEG_CFG_RF2		0x00000020
3375#define ANEG_CFG_RF1		0x00000010
3376#define ANEG_CFG_PS2		0x00000001
3377#define ANEG_CFG_PS1		0x00008000
3378#define ANEG_CFG_HD		0x00004000
3379#define ANEG_CFG_FD		0x00002000
3380#define ANEG_CFG_INVAL		0x00001f06
3381
3382};
3383#define ANEG_OK		0
3384#define ANEG_DONE	1
3385#define ANEG_TIMER_ENAB	2
3386#define ANEG_FAILED	-1
3387
3388#define ANEG_STATE_SETTLE_TIME	10000
3389
3390static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3391				   struct tg3_fiber_aneginfo *ap)
3392{
3393	u16 flowctrl;
3394	unsigned long delta;
3395	u32 rx_cfg_reg;
3396	int ret;
3397
3398	if (ap->state == ANEG_STATE_UNKNOWN) {
3399		ap->rxconfig = 0;
3400		ap->link_time = 0;
3401		ap->cur_time = 0;
3402		ap->ability_match_cfg = 0;
3403		ap->ability_match_count = 0;
3404		ap->ability_match = 0;
3405		ap->idle_match = 0;
3406		ap->ack_match = 0;
3407	}
3408	ap->cur_time++;
3409
3410	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3411		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3412
3413		if (rx_cfg_reg != ap->ability_match_cfg) {
3414			ap->ability_match_cfg = rx_cfg_reg;
3415			ap->ability_match = 0;
3416			ap->ability_match_count = 0;
3417		} else {
3418			if (++ap->ability_match_count > 1) {
3419				ap->ability_match = 1;
3420				ap->ability_match_cfg = rx_cfg_reg;
3421			}
3422		}
3423		if (rx_cfg_reg & ANEG_CFG_ACK)
3424			ap->ack_match = 1;
3425		else
3426			ap->ack_match = 0;
3427
3428		ap->idle_match = 0;
3429	} else {
3430		ap->idle_match = 1;
3431		ap->ability_match_cfg = 0;
3432		ap->ability_match_count = 0;
3433		ap->ability_match = 0;
3434		ap->ack_match = 0;
3435
3436		rx_cfg_reg = 0;
3437	}
3438
3439	ap->rxconfig = rx_cfg_reg;
3440	ret = ANEG_OK;
3441
3442	switch (ap->state) {
3443	case ANEG_STATE_UNKNOWN:
3444		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3445			ap->state = ANEG_STATE_AN_ENABLE;
3446
3447		/* fallthru */
3448	case ANEG_STATE_AN_ENABLE:
3449		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3450		if (ap->flags & MR_AN_ENABLE) {
3451			ap->link_time = 0;
3452			ap->cur_time = 0;
3453			ap->ability_match_cfg = 0;
3454			ap->ability_match_count = 0;
3455			ap->ability_match = 0;
3456			ap->idle_match = 0;
3457			ap->ack_match = 0;
3458
3459			ap->state = ANEG_STATE_RESTART_INIT;
3460		} else {
3461			ap->state = ANEG_STATE_DISABLE_LINK_OK;
3462		}
3463		break;
3464
3465	case ANEG_STATE_RESTART_INIT:
3466		ap->link_time = ap->cur_time;
3467		ap->flags &= ~(MR_NP_LOADED);
3468		ap->txconfig = 0;
3469		tw32(MAC_TX_AUTO_NEG, 0);
3470		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3471		tw32_f(MAC_MODE, tp->mac_mode);
3472		udelay(40);
3473
3474		ret = ANEG_TIMER_ENAB;
3475		ap->state = ANEG_STATE_RESTART;
3476
3477		/* fallthru */
3478	case ANEG_STATE_RESTART:
3479		delta = ap->cur_time - ap->link_time;
3480		if (delta > ANEG_STATE_SETTLE_TIME)
3481			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3482		else
3483			ret = ANEG_TIMER_ENAB;
3484		break;
3485
3486	case ANEG_STATE_DISABLE_LINK_OK:
3487		ret = ANEG_DONE;
3488		break;
3489
3490	case ANEG_STATE_ABILITY_DETECT_INIT:
3491		ap->flags &= ~(MR_TOGGLE_TX);
3492		ap->txconfig = ANEG_CFG_FD;
3493		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3494		if (flowctrl & ADVERTISE_1000XPAUSE)
3495			ap->txconfig |= ANEG_CFG_PS1;
3496		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3497			ap->txconfig |= ANEG_CFG_PS2;
3498		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3499		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3500		tw32_f(MAC_MODE, tp->mac_mode);
3501		udelay(40);
3502
3503		ap->state = ANEG_STATE_ABILITY_DETECT;
3504		break;
3505
3506	case ANEG_STATE_ABILITY_DETECT:
3507		if (ap->ability_match != 0 && ap->rxconfig != 0)
3508			ap->state = ANEG_STATE_ACK_DETECT_INIT;
3509		break;
3510
3511	case ANEG_STATE_ACK_DETECT_INIT:
3512		ap->txconfig |= ANEG_CFG_ACK;
3513		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3514		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3515		tw32_f(MAC_MODE, tp->mac_mode);
3516		udelay(40);
3517
3518		ap->state = ANEG_STATE_ACK_DETECT;
3519
3520		/* fallthru */
3521	case ANEG_STATE_ACK_DETECT:
3522		if (ap->ack_match != 0) {
3523			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3524			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3525				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3526			} else {
3527				ap->state = ANEG_STATE_AN_ENABLE;
3528			}
3529		} else if (ap->ability_match != 0 &&
3530			   ap->rxconfig == 0) {
3531			ap->state = ANEG_STATE_AN_ENABLE;
3532		}
3533		break;
3534
3535	case ANEG_STATE_COMPLETE_ACK_INIT:
3536		if (ap->rxconfig & ANEG_CFG_INVAL) {
3537			ret = ANEG_FAILED;
3538			break;
3539		}
3540		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3541			       MR_LP_ADV_HALF_DUPLEX |
3542			       MR_LP_ADV_SYM_PAUSE |
3543			       MR_LP_ADV_ASYM_PAUSE |
3544			       MR_LP_ADV_REMOTE_FAULT1 |
3545			       MR_LP_ADV_REMOTE_FAULT2 |
3546			       MR_LP_ADV_NEXT_PAGE |
3547			       MR_TOGGLE_RX |
3548			       MR_NP_RX);
3549		if (ap->rxconfig & ANEG_CFG_FD)
3550			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3551		if (ap->rxconfig & ANEG_CFG_HD)
3552			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3553		if (ap->rxconfig & ANEG_CFG_PS1)
3554			ap->flags |= MR_LP_ADV_SYM_PAUSE;
3555		if (ap->rxconfig & ANEG_CFG_PS2)
3556			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3557		if (ap->rxconfig & ANEG_CFG_RF1)
3558			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3559		if (ap->rxconfig & ANEG_CFG_RF2)
3560			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3561		if (ap->rxconfig & ANEG_CFG_NP)
3562			ap->flags |= MR_LP_ADV_NEXT_PAGE;
3563
3564		ap->link_time = ap->cur_time;
3565
3566		ap->flags ^= (MR_TOGGLE_TX);
3567		if (ap->rxconfig & 0x0008)
3568			ap->flags |= MR_TOGGLE_RX;
3569		if (ap->rxconfig & ANEG_CFG_NP)
3570			ap->flags |= MR_NP_RX;
3571		ap->flags |= MR_PAGE_RX;
3572
3573		ap->state = ANEG_STATE_COMPLETE_ACK;
3574		ret = ANEG_TIMER_ENAB;
3575		break;
3576
3577	case ANEG_STATE_COMPLETE_ACK:
3578		if (ap->ability_match != 0 &&
3579		    ap->rxconfig == 0) {
3580			ap->state = ANEG_STATE_AN_ENABLE;
3581			break;
3582		}
3583		delta = ap->cur_time - ap->link_time;
3584		if (delta > ANEG_STATE_SETTLE_TIME) {
3585			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3586				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3587			} else {
3588				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3589				    !(ap->flags & MR_NP_RX)) {
3590					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3591				} else {
3592					ret = ANEG_FAILED;
3593				}
3594			}
3595		}
3596		break;
3597
3598	case ANEG_STATE_IDLE_DETECT_INIT:
3599		ap->link_time = ap->cur_time;
3600		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3601		tw32_f(MAC_MODE, tp->mac_mode);
3602		udelay(40);
3603
3604		ap->state = ANEG_STATE_IDLE_DETECT;
3605		ret = ANEG_TIMER_ENAB;
3606		break;
3607
3608	case ANEG_STATE_IDLE_DETECT:
3609		if (ap->ability_match != 0 &&
3610		    ap->rxconfig == 0) {
3611			ap->state = ANEG_STATE_AN_ENABLE;
3612			break;
3613		}
3614		delta = ap->cur_time - ap->link_time;
3615		if (delta > ANEG_STATE_SETTLE_TIME) {
3616			ap->state = ANEG_STATE_LINK_OK;
3617		}
3618		break;
3619
3620	case ANEG_STATE_LINK_OK:
3621		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3622		ret = ANEG_DONE;
3623		break;
3624
3625	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3626		/* ??? unimplemented */
3627		break;
3628
3629	case ANEG_STATE_NEXT_PAGE_WAIT:
3630		/* ??? unimplemented */
3631		break;
3632
3633	default:
3634		ret = ANEG_FAILED;
3635		break;
3636	}
3637
3638	return ret;
3639}
3640
3641static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3642{
3643	int res = 0;
3644	struct tg3_fiber_aneginfo aninfo;
3645	int status = ANEG_FAILED;
3646	unsigned int tick;
3647	u32 tmp;
3648
3649	tw32_f(MAC_TX_AUTO_NEG, 0);
3650
3651	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3652	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3653	udelay(40);
3654
3655	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3656	udelay(40);
3657
3658	memset(&aninfo, 0, sizeof(aninfo));
3659	aninfo.flags |= MR_AN_ENABLE;
3660	aninfo.state = ANEG_STATE_UNKNOWN;
3661	aninfo.cur_time = 0;
3662	tick = 0;
3663	while (++tick < 195000) {
3664		status = tg3_fiber_aneg_smachine(tp, &aninfo);
3665		if (status == ANEG_DONE || status == ANEG_FAILED)
3666			break;
3667
3668		udelay(1);
3669	}
3670
3671	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3672	tw32_f(MAC_MODE, tp->mac_mode);
3673	udelay(40);
3674
3675	*txflags = aninfo.txconfig;
3676	*rxflags = aninfo.flags;
3677
3678	if (status == ANEG_DONE &&
3679	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3680			     MR_LP_ADV_FULL_DUPLEX)))
3681		res = 1;
3682
3683	return res;
3684}
3685
3686static void tg3_init_bcm8002(struct tg3 *tp)
3687{
3688	u32 mac_status = tr32(MAC_STATUS);
3689	int i;
3690
3691	/* Reset when initting first time or we have a link. */
3692	if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3693	    !(mac_status & MAC_STATUS_PCS_SYNCED))
3694		return;
3695
3696	/* Set PLL lock range. */
3697	tg3_writephy(tp, 0x16, 0x8007);
3698
3699	/* SW reset */
3700	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3701
3702	/* Wait for reset to complete. */
3703	for (i = 0; i < 500; i++)
3704		udelay(10);
3705
3706	/* Config mode; select PMA/Ch 1 regs. */
3707	tg3_writephy(tp, 0x10, 0x8411);
3708
3709	/* Enable auto-lock and comdet, select txclk for tx. */
3710	tg3_writephy(tp, 0x11, 0x0a10);
3711
3712	tg3_writephy(tp, 0x18, 0x00a0);
3713	tg3_writephy(tp, 0x16, 0x41ff);
3714
3715	/* Assert and deassert POR. */
3716	tg3_writephy(tp, 0x13, 0x0400);
3717	udelay(40);
3718	tg3_writephy(tp, 0x13, 0x0000);
3719
3720	tg3_writephy(tp, 0x11, 0x0a50);
3721	udelay(40);
3722	tg3_writephy(tp, 0x11, 0x0a10);
3723
3724	/* Wait for signal to stabilize */
3725	for (i = 0; i < 15000; i++)
3726		udelay(10);
3727
3728	/* Deselect the channel register so we can read the PHYID
3729	 * later.
3730	 */
3731	tg3_writephy(tp, 0x10, 0x8011);
3732}
3733
3734static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3735{
3736	u16 flowctrl;
3737	u32 sg_dig_ctrl, sg_dig_status;
3738	u32 serdes_cfg, expected_sg_dig_ctrl;
3739	int workaround, port_a;
3740	int current_link_up;
3741
3742	serdes_cfg = 0;
3743	expected_sg_dig_ctrl = 0;
3744	workaround = 0;
3745	port_a = 1;
3746	current_link_up = 0;
3747
3748	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3749	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3750		workaround = 1;
3751		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3752			port_a = 0;
3753
3754		/* preserve bits 0-11,13,14 for signal pre-emphasis */
3755		/* preserve bits 20-23 for voltage regulator */
3756		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3757	}
3758
3759	sg_dig_ctrl = tr32(SG_DIG_CTRL);
3760
3761	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3762		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3763			if (workaround) {
3764				u32 val = serdes_cfg;
3765
3766				if (port_a)
3767					val |= 0xc010000;
3768				else
3769					val |= 0x4010000;
3770				tw32_f(MAC_SERDES_CFG, val);
3771			}
3772
3773			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3774		}
3775		if (mac_status & MAC_STATUS_PCS_SYNCED) {
3776			tg3_setup_flow_control(tp, 0, 0);
3777			current_link_up = 1;
3778		}
3779		goto out;
3780	}
3781
3782	/* Want auto-negotiation.  */
3783	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3784
3785	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3786	if (flowctrl & ADVERTISE_1000XPAUSE)
3787		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3788	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3789		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3790
3791	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3792		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3793		    tp->serdes_counter &&
3794		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
3795				    MAC_STATUS_RCVD_CFG)) ==
3796		     MAC_STATUS_PCS_SYNCED)) {
3797			tp->serdes_counter--;
3798			current_link_up = 1;
3799			goto out;
3800		}
3801restart_autoneg:
3802		if (workaround)
3803			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3804		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3805		udelay(5);
3806		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3807
3808		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3809		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3810	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3811				 MAC_STATUS_SIGNAL_DET)) {
3812		sg_dig_status = tr32(SG_DIG_STATUS);
3813		mac_status = tr32(MAC_STATUS);
3814
3815		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3816		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
3817			u32 local_adv = 0, remote_adv = 0;
3818
3819			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3820				local_adv |= ADVERTISE_1000XPAUSE;
3821			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3822				local_adv |= ADVERTISE_1000XPSE_ASYM;
3823
3824			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3825				remote_adv |= LPA_1000XPAUSE;
3826			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3827				remote_adv |= LPA_1000XPAUSE_ASYM;
3828
3829			tg3_setup_flow_control(tp, local_adv, remote_adv);
3830			current_link_up = 1;
3831			tp->serdes_counter = 0;
3832			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3833		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3834			if (tp->serdes_counter)
3835				tp->serdes_counter--;
3836			else {
3837				if (workaround) {
3838					u32 val = serdes_cfg;
3839
3840					if (port_a)
3841						val |= 0xc010000;
3842					else
3843						val |= 0x4010000;
3844
3845					tw32_f(MAC_SERDES_CFG, val);
3846				}
3847
3848				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3849				udelay(40);
3850
3851				/* Link parallel detection - link is up */
3852				/* only if we have PCS_SYNC and not */
3853				/* receiving config code words */
3854				mac_status = tr32(MAC_STATUS);
3855				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3856				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
3857					tg3_setup_flow_control(tp, 0, 0);
3858					current_link_up = 1;
3859					tp->phy_flags |=
3860						TG3_PHYFLG_PARALLEL_DETECT;
3861					tp->serdes_counter =
3862						SERDES_PARALLEL_DET_TIMEOUT;
3863				} else
3864					goto restart_autoneg;
3865			}
3866		}
3867	} else {
3868		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3869		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3870	}
3871
3872out:
3873	return current_link_up;
3874}
3875
3876static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3877{
3878	int current_link_up = 0;
3879
3880	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3881		goto out;
3882
3883	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3884		u32 txflags, rxflags;
3885		int i;
3886
3887		if (fiber_autoneg(tp, &txflags, &rxflags)) {
3888			u32 local_adv = 0, remote_adv = 0;
3889
3890			if (txflags & ANEG_CFG_PS1)
3891				local_adv |= ADVERTISE_1000XPAUSE;
3892			if (txflags & ANEG_CFG_PS2)
3893				local_adv |= ADVERTISE_1000XPSE_ASYM;
3894
3895			if (rxflags & MR_LP_ADV_SYM_PAUSE)
3896				remote_adv |= LPA_1000XPAUSE;
3897			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3898				remote_adv |= LPA_1000XPAUSE_ASYM;
3899
3900			tg3_setup_flow_control(tp, local_adv, remote_adv);
3901
3902			current_link_up = 1;
3903		}
3904		for (i = 0; i < 30; i++) {
3905			udelay(20);
3906			tw32_f(MAC_STATUS,
3907			       (MAC_STATUS_SYNC_CHANGED |
3908				MAC_STATUS_CFG_CHANGED));
3909			udelay(40);
3910			if ((tr32(MAC_STATUS) &
3911			     (MAC_STATUS_SYNC_CHANGED |
3912			      MAC_STATUS_CFG_CHANGED)) == 0)
3913				break;
3914		}
3915
3916		mac_status = tr32(MAC_STATUS);
3917		if (current_link_up == 0 &&
3918		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
3919		    !(mac_status & MAC_STATUS_RCVD_CFG))
3920			current_link_up = 1;
3921	} else {
3922		tg3_setup_flow_control(tp, 0, 0);
3923
3924		/* Forcing 1000FD link up. */
3925		current_link_up = 1;
3926
3927		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3928		udelay(40);
3929
3930		tw32_f(MAC_MODE, tp->mac_mode);
3931		udelay(40);
3932	}
3933
3934out:
3935	return current_link_up;
3936}
3937
3938static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3939{
3940	u32 orig_pause_cfg;
3941	u16 orig_active_speed;
3942	u8 orig_active_duplex;
3943	u32 mac_status;
3944	int current_link_up;
3945	int i;
3946
3947	orig_pause_cfg = tp->link_config.active_flowctrl;
3948	orig_active_speed = tp->link_config.active_speed;
3949	orig_active_duplex = tp->link_config.active_duplex;
3950
3951	if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3952	    netif_carrier_ok(tp->dev) &&
3953	    (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3954		mac_status = tr32(MAC_STATUS);
3955		mac_status &= (MAC_STATUS_PCS_SYNCED |
3956			       MAC_STATUS_SIGNAL_DET |
3957			       MAC_STATUS_CFG_CHANGED |
3958			       MAC_STATUS_RCVD_CFG);
3959		if (mac_status == (MAC_STATUS_PCS_SYNCED |
3960				   MAC_STATUS_SIGNAL_DET)) {
3961			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3962					    MAC_STATUS_CFG_CHANGED));
3963			return 0;
3964		}
3965	}
3966
3967	tw32_f(MAC_TX_AUTO_NEG, 0);
3968
3969	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3970	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3971	tw32_f(MAC_MODE, tp->mac_mode);
3972	udelay(40);
3973
3974	if (tp->phy_id == TG3_PHY_ID_BCM8002)
3975		tg3_init_bcm8002(tp);
3976
3977	/* Enable link change event even when serdes polling.  */
3978	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3979	udelay(40);
3980
3981	current_link_up = 0;
3982	mac_status = tr32(MAC_STATUS);
3983
3984	if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3985		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3986	else
3987		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3988
3989	tp->napi[0].hw_status->status =
3990		(SD_STATUS_UPDATED |
3991		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
3992
3993	for (i = 0; i < 100; i++) {
3994		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3995				    MAC_STATUS_CFG_CHANGED));
3996		udelay(5);
3997		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3998					 MAC_STATUS_CFG_CHANGED |
3999					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4000			break;
4001	}
4002
4003	mac_status = tr32(MAC_STATUS);
4004	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4005		current_link_up = 0;
4006		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4007		    tp->serdes_counter == 0) {
4008			tw32_f(MAC_MODE, (tp->mac_mode |
4009					  MAC_MODE_SEND_CONFIGS));
4010			udelay(1);
4011			tw32_f(MAC_MODE, tp->mac_mode);
4012		}
4013	}
4014
4015	if (current_link_up == 1) {
4016		tp->link_config.active_speed = SPEED_1000;
4017		tp->link_config.active_duplex = DUPLEX_FULL;
4018		tw32(MAC_LED_CTRL, (tp->led_ctrl |
4019				    LED_CTRL_LNKLED_OVERRIDE |
4020				    LED_CTRL_1000MBPS_ON));
4021	} else {
4022		tp->link_config.active_speed = SPEED_INVALID;
4023		tp->link_config.active_duplex = DUPLEX_INVALID;
4024		tw32(MAC_LED_CTRL, (tp->led_ctrl |
4025				    LED_CTRL_LNKLED_OVERRIDE |
4026				    LED_CTRL_TRAFFIC_OVERRIDE));
4027	}
4028
4029	if (current_link_up != netif_carrier_ok(tp->dev)) {
4030		if (current_link_up)
4031			netif_carrier_on(tp->dev);
4032		else
4033			netif_carrier_off(tp->dev);
4034		tg3_link_report(tp);
4035	} else {
4036		u32 now_pause_cfg = tp->link_config.active_flowctrl;
4037		if (orig_pause_cfg != now_pause_cfg ||
4038		    orig_active_speed != tp->link_config.active_speed ||
4039		    orig_active_duplex != tp->link_config.active_duplex)
4040			tg3_link_report(tp);
4041	}
4042
4043	return 0;
4044}
4045
4046static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4047{
4048	int current_link_up, err = 0;
4049	u32 bmsr, bmcr;
4050	u16 current_speed;
4051	u8 current_duplex;
4052	u32 local_adv, remote_adv;
4053
4054	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4055	tw32_f(MAC_MODE, tp->mac_mode);
4056	udelay(40);
4057
4058	tw32(MAC_EVENT, 0);
4059
4060	tw32_f(MAC_STATUS,
4061	     (MAC_STATUS_SYNC_CHANGED |
4062	      MAC_STATUS_CFG_CHANGED |
4063	      MAC_STATUS_MI_COMPLETION |
4064	      MAC_STATUS_LNKSTATE_CHANGED));
4065	udelay(40);
4066
4067	if (force_reset)
4068		tg3_phy_reset(tp);
4069
4070	current_link_up = 0;
4071	current_speed = SPEED_INVALID;
4072	current_duplex = DUPLEX_INVALID;
4073
4074	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4075	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4076	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4077		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4078			bmsr |= BMSR_LSTATUS;
4079		else
4080			bmsr &= ~BMSR_LSTATUS;
4081	}
4082
4083	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4084
4085	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4086	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4087		/* do nothing, just check for link up at the end */
4088	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4089		u32 adv, new_adv;
4090
4091		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4092		new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4093				  ADVERTISE_1000XPAUSE |
4094				  ADVERTISE_1000XPSE_ASYM |
4095				  ADVERTISE_SLCT);
4096
4097		new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4098
4099		if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4100			new_adv |= ADVERTISE_1000XHALF;
4101		if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4102			new_adv |= ADVERTISE_1000XFULL;
4103
4104		if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4105			tg3_writephy(tp, MII_ADVERTISE, new_adv);
4106			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4107			tg3_writephy(tp, MII_BMCR, bmcr);
4108
4109			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4110			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4111			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4112
4113			return err;
4114		}
4115	} else {
4116		u32 new_bmcr;
4117
4118		bmcr &= ~BMCR_SPEED1000;
4119		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4120
4121		if (tp->link_config.duplex == DUPLEX_FULL)
4122			new_bmcr |= BMCR_FULLDPLX;
4123
4124		if (new_bmcr != bmcr) {
4125			/* BMCR_SPEED1000 is a reserved bit that needs
4126			 * to be set on write.
4127			 */
4128			new_bmcr |= BMCR_SPEED1000;
4129
4130			/* Force a linkdown */
4131			if (netif_carrier_ok(tp->dev)) {
4132				u32 adv;
4133
4134				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4135				adv &= ~(ADVERTISE_1000XFULL |
4136					 ADVERTISE_1000XHALF |
4137					 ADVERTISE_SLCT);
4138				tg3_writephy(tp, MII_ADVERTISE, adv);
4139				tg3_writephy(tp, MII_BMCR, bmcr |
4140							   BMCR_ANRESTART |
4141							   BMCR_ANENABLE);
4142				udelay(10);
4143				netif_carrier_off(tp->dev);
4144			}
4145			tg3_writephy(tp, MII_BMCR, new_bmcr);
4146			bmcr = new_bmcr;
4147			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4148			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4149			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4150			    ASIC_REV_5714) {
4151				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4152					bmsr |= BMSR_LSTATUS;
4153				else
4154					bmsr &= ~BMSR_LSTATUS;
4155			}
4156			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4157		}
4158	}
4159
4160	if (bmsr & BMSR_LSTATUS) {
4161		current_speed = SPEED_1000;
4162		current_link_up = 1;
4163		if (bmcr & BMCR_FULLDPLX)
4164			current_duplex = DUPLEX_FULL;
4165		else
4166			current_duplex = DUPLEX_HALF;
4167
4168		local_adv = 0;
4169		remote_adv = 0;
4170
4171		if (bmcr & BMCR_ANENABLE) {
4172			u32 common;
4173
4174			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4175			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4176			common = local_adv & remote_adv;
4177			if (common & (ADVERTISE_1000XHALF |
4178				      ADVERTISE_1000XFULL)) {
4179				if (common & ADVERTISE_1000XFULL)
4180					current_duplex = DUPLEX_FULL;
4181				else
4182					current_duplex = DUPLEX_HALF;
4183			} else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4184				/* Link is up via parallel detect */
4185			} else {
4186				current_link_up = 0;
4187			}
4188		}
4189	}
4190
4191	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4192		tg3_setup_flow_control(tp, local_adv, remote_adv);
4193
4194	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4195	if (tp->link_config.active_duplex == DUPLEX_HALF)
4196		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4197
4198	tw32_f(MAC_MODE, tp->mac_mode);
4199	udelay(40);
4200
4201	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4202
4203	tp->link_config.active_speed = current_speed;
4204	tp->link_config.active_duplex = current_duplex;
4205
4206	if (current_link_up != netif_carrier_ok(tp->dev)) {
4207		if (current_link_up)
4208			netif_carrier_on(tp->dev);
4209		else {
4210			netif_carrier_off(tp->dev);
4211			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4212		}
4213		tg3_link_report(tp);
4214	}
4215	return err;
4216}
4217
4218static void tg3_serdes_parallel_detect(struct tg3 *tp)
4219{
4220	if (tp->serdes_counter) {
4221		/* Give autoneg time to complete. */
4222		tp->serdes_counter--;
4223		return;
4224	}
4225
4226	if (!netif_carrier_ok(tp->dev) &&
4227	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4228		u32 bmcr;
4229
4230		tg3_readphy(tp, MII_BMCR, &bmcr);
4231		if (bmcr & BMCR_ANENABLE) {
4232			u32 phy1, phy2;
4233
4234			/* Select shadow register 0x1f */
4235			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4236			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4237
4238			/* Select expansion interrupt status register */
4239			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4240					 MII_TG3_DSP_EXP1_INT_STAT);
4241			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4242			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4243
4244			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4245				/* We have signal detect and not receiving
4246				 * config code words, link is up by parallel
4247				 * detection.
4248				 */
4249
4250				bmcr &= ~BMCR_ANENABLE;
4251				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4252				tg3_writephy(tp, MII_BMCR, bmcr);
4253				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4254			}
4255		}
4256	} else if (netif_carrier_ok(tp->dev) &&
4257		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4258		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4259		u32 phy2;
4260
4261		/* Select expansion interrupt status register */
4262		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4263				 MII_TG3_DSP_EXP1_INT_STAT);
4264		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4265		if (phy2 & 0x20) {
4266			u32 bmcr;
4267
4268			/* Config code words received, turn on autoneg. */
4269			tg3_readphy(tp, MII_BMCR, &bmcr);
4270			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4271
4272			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4273
4274		}
4275	}
4276}
4277
4278static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4279{
4280	int err;
4281
4282	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4283		err = tg3_setup_fiber_phy(tp, force_reset);
4284	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4285		err = tg3_setup_fiber_mii_phy(tp, force_reset);
4286	else
4287		err = tg3_setup_copper_phy(tp, force_reset);
4288
4289	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4290		u32 val, scale;
4291
4292		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4293		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4294			scale = 65;
4295		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4296			scale = 6;
4297		else
4298			scale = 12;
4299
4300		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4301		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4302		tw32(GRC_MISC_CFG, val);
4303	}
4304
4305	if (tp->link_config.active_speed == SPEED_1000 &&
4306	    tp->link_config.active_duplex == DUPLEX_HALF)
4307		tw32(MAC_TX_LENGTHS,
4308		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4309		      (6 << TX_LENGTHS_IPG_SHIFT) |
4310		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4311	else
4312		tw32(MAC_TX_LENGTHS,
4313		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4314		      (6 << TX_LENGTHS_IPG_SHIFT) |
4315		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4316
4317	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4318		if (netif_carrier_ok(tp->dev)) {
4319			tw32(HOSTCC_STAT_COAL_TICKS,
4320			     tp->coal.stats_block_coalesce_usecs);
4321		} else {
4322			tw32(HOSTCC_STAT_COAL_TICKS, 0);
4323		}
4324	}
4325
4326	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4327		u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4328		if (!netif_carrier_ok(tp->dev))
4329			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4330			      tp->pwrmgmt_thresh;
4331		else
4332			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4333		tw32(PCIE_PWR_MGMT_THRESH, val);
4334	}
4335
4336	return err;
4337}
4338
4339/* This is called whenever we suspect that the system chipset is re-
4340 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4341 * is bogus tx completions. We try to recover by setting the
4342 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4343 * in the workqueue.
4344 */
4345static void tg3_tx_recover(struct tg3 *tp)
4346{
4347	BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4348	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
4349
4350	netdev_warn(tp->dev,
4351		    "The system may be re-ordering memory-mapped I/O "
4352		    "cycles to the network device, attempting to recover. "
4353		    "Please report the problem to the driver maintainer "
4354		    "and include system chipset information.\n");
4355
4356	spin_lock(&tp->lock);
4357	tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4358	spin_unlock(&tp->lock);
4359}
4360
4361static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4362{
4363	/* Tell compiler to fetch tx indices from memory. */
4364	barrier();
4365	return tnapi->tx_pending -
4366	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4367}
4368
4369/* Tigon3 never reports partial packet sends.  So we do not
4370 * need special logic to handle SKBs that have not had all
4371 * of their frags sent yet, like SunGEM does.
4372 */
4373static void tg3_tx(struct tg3_napi *tnapi)
4374{
4375	struct tg3 *tp = tnapi->tp;
4376	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4377	u32 sw_idx = tnapi->tx_cons;
4378	struct netdev_queue *txq;
4379	int index = tnapi - tp->napi;
4380
4381	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4382		index--;
4383
4384	txq = netdev_get_tx_queue(tp->dev, index);
4385
4386	while (sw_idx != hw_idx) {
4387		struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4388		struct sk_buff *skb = ri->skb;
4389		int i, tx_bug = 0;
4390
4391		if (unlikely(skb == NULL)) {
4392			tg3_tx_recover(tp);
4393			return;
4394		}
4395
4396		pci_unmap_single(tp->pdev,
4397				 dma_unmap_addr(ri, mapping),
4398				 skb_headlen(skb),
4399				 PCI_DMA_TODEVICE);
4400
4401		ri->skb = NULL;
4402
4403		sw_idx = NEXT_TX(sw_idx);
4404
4405		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4406			ri = &tnapi->tx_buffers[sw_idx];
4407			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4408				tx_bug = 1;
4409
4410			pci_unmap_page(tp->pdev,
4411				       dma_unmap_addr(ri, mapping),
4412				       skb_shinfo(skb)->frags[i].size,
4413				       PCI_DMA_TODEVICE);
4414			sw_idx = NEXT_TX(sw_idx);
4415		}
4416
4417		dev_kfree_skb(skb);
4418
4419		if (unlikely(tx_bug)) {
4420			tg3_tx_recover(tp);
4421			return;
4422		}
4423	}
4424
4425	tnapi->tx_cons = sw_idx;
4426
4427	/* Need to make the tx_cons update visible to tg3_start_xmit()
4428	 * before checking for netif_queue_stopped().  Without the
4429	 * memory barrier, there is a small possibility that tg3_start_xmit()
4430	 * will miss it and cause the queue to be stopped forever.
4431	 */
4432	smp_mb();
4433
4434	if (unlikely(netif_tx_queue_stopped(txq) &&
4435		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4436		__netif_tx_lock(txq, smp_processor_id());
4437		if (netif_tx_queue_stopped(txq) &&
4438		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4439			netif_tx_wake_queue(txq);
4440		__netif_tx_unlock(txq);
4441	}
4442}
4443
4444static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4445{
4446	if (!ri->skb)
4447		return;
4448
4449	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4450			 map_sz, PCI_DMA_FROMDEVICE);
4451	dev_kfree_skb_any(ri->skb);
4452	ri->skb = NULL;
4453}
4454
4455/* Returns size of skb allocated or < 0 on error.
4456 *
4457 * We only need to fill in the address because the other members
4458 * of the RX descriptor are invariant, see tg3_init_rings.
4459 *
4460 * Note the purposeful assymetry of cpu vs. chip accesses.  For
4461 * posting buffers we only dirty the first cache line of the RX
4462 * descriptor (containing the address).  Whereas for the RX status
4463 * buffers the cpu only reads the last cacheline of the RX descriptor
4464 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4465 */
4466static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4467			    u32 opaque_key, u32 dest_idx_unmasked)
4468{
4469	struct tg3_rx_buffer_desc *desc;
4470	struct ring_info *map, *src_map;
4471	struct sk_buff *skb;
4472	dma_addr_t mapping;
4473	int skb_size, dest_idx;
4474
4475	src_map = NULL;
4476	switch (opaque_key) {
4477	case RXD_OPAQUE_RING_STD:
4478		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4479		desc = &tpr->rx_std[dest_idx];
4480		map = &tpr->rx_std_buffers[dest_idx];
4481		skb_size = tp->rx_pkt_map_sz;
4482		break;
4483
4484	case RXD_OPAQUE_RING_JUMBO:
4485		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4486		desc = &tpr->rx_jmb[dest_idx].std;
4487		map = &tpr->rx_jmb_buffers[dest_idx];
4488		skb_size = TG3_RX_JMB_MAP_SZ;
4489		break;
4490
4491	default:
4492		return -EINVAL;
4493	}
4494
4495	/* Do not overwrite any of the map or rp information
4496	 * until we are sure we can commit to a new buffer.
4497	 *
4498	 * Callers depend upon this behavior and assume that
4499	 * we leave everything unchanged if we fail.
4500	 */
4501	skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4502	if (skb == NULL)
4503		return -ENOMEM;
4504
4505	skb_reserve(skb, tp->rx_offset);
4506
4507	mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4508				 PCI_DMA_FROMDEVICE);
4509	if (pci_dma_mapping_error(tp->pdev, mapping)) {
4510		dev_kfree_skb(skb);
4511		return -EIO;
4512	}
4513
4514	map->skb = skb;
4515	dma_unmap_addr_set(map, mapping, mapping);
4516
4517	desc->addr_hi = ((u64)mapping >> 32);
4518	desc->addr_lo = ((u64)mapping & 0xffffffff);
4519
4520	return skb_size;
4521}
4522
4523/* We only need to move over in the address because the other
4524 * members of the RX descriptor are invariant.  See notes above
4525 * tg3_alloc_rx_skb for full details.
4526 */
4527static void tg3_recycle_rx(struct tg3_napi *tnapi,
4528			   struct tg3_rx_prodring_set *dpr,
4529			   u32 opaque_key, int src_idx,
4530			   u32 dest_idx_unmasked)
4531{
4532	struct tg3 *tp = tnapi->tp;
4533	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4534	struct ring_info *src_map, *dest_map;
4535	struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4536	int dest_idx;
4537
4538	switch (opaque_key) {
4539	case RXD_OPAQUE_RING_STD:
4540		dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4541		dest_desc = &dpr->rx_std[dest_idx];
4542		dest_map = &dpr->rx_std_buffers[dest_idx];
4543		src_desc = &spr->rx_std[src_idx];
4544		src_map = &spr->rx_std_buffers[src_idx];
4545		break;
4546
4547	case RXD_OPAQUE_RING_JUMBO:
4548		dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4549		dest_desc = &dpr->rx_jmb[dest_idx].std;
4550		dest_map = &dpr->rx_jmb_buffers[dest_idx];
4551		src_desc = &spr->rx_jmb[src_idx].std;
4552		src_map = &spr->rx_jmb_buffers[src_idx];
4553		break;
4554
4555	default:
4556		return;
4557	}
4558
4559	dest_map->skb = src_map->skb;
4560	dma_unmap_addr_set(dest_map, mapping,
4561			   dma_unmap_addr(src_map, mapping));
4562	dest_desc->addr_hi = src_desc->addr_hi;
4563	dest_desc->addr_lo = src_desc->addr_lo;
4564
4565	/* Ensure that the update to the skb happens after the physical
4566	 * addresses have been transferred to the new BD location.
4567	 */
4568	smp_wmb();
4569
4570	src_map->skb = NULL;
4571}
4572
4573/* The RX ring scheme is composed of multiple rings which post fresh
4574 * buffers to the chip, and one special ring the chip uses to report
4575 * status back to the host.
4576 *
4577 * The special ring reports the status of received packets to the
4578 * host.  The chip does not write into the original descriptor the
4579 * RX buffer was obtained from.  The chip simply takes the original
4580 * descriptor as provided by the host, updates the status and length
4581 * field, then writes this into the next status ring entry.
4582 *
4583 * Each ring the host uses to post buffers to the chip is described
4584 * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4585 * it is first placed into the on-chip ram.  When the packet's length
4586 * is known, it walks down the TG3_BDINFO entries to select the ring.
4587 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4588 * which is within the range of the new packet's length is chosen.
4589 *
4590 * The "separate ring for rx status" scheme may sound queer, but it makes
4591 * sense from a cache coherency perspective.  If only the host writes
4592 * to the buffer post rings, and only the chip writes to the rx status
4593 * rings, then cache lines never move beyond shared-modified state.
4594 * If both the host and chip were to write into the same ring, cache line
4595 * eviction could occur since both entities want it in an exclusive state.
4596 */
4597static int tg3_rx(struct tg3_napi *tnapi, int budget)
4598{
4599	struct tg3 *tp = tnapi->tp;
4600	u32 work_mask, rx_std_posted = 0;
4601	u32 std_prod_idx, jmb_prod_idx;
4602	u32 sw_idx = tnapi->rx_rcb_ptr;
4603	u16 hw_idx;
4604	int received;
4605	struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4606
4607	hw_idx = *(tnapi->rx_rcb_prod_idx);
4608	/*
4609	 * We need to order the read of hw_idx and the read of
4610	 * the opaque cookie.
4611	 */
4612	rmb();
4613	work_mask = 0;
4614	received = 0;
4615	std_prod_idx = tpr->rx_std_prod_idx;
4616	jmb_prod_idx = tpr->rx_jmb_prod_idx;
4617	while (sw_idx != hw_idx && budget > 0) {
4618		struct ring_info *ri;
4619		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4620		unsigned int len;
4621		struct sk_buff *skb;
4622		dma_addr_t dma_addr;
4623		u32 opaque_key, desc_idx, *post_ptr;
4624		bool hw_vlan __maybe_unused = false;
4625		u16 vtag __maybe_unused = 0;
4626
4627		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4628		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4629		if (opaque_key == RXD_OPAQUE_RING_STD) {
4630			ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4631			dma_addr = dma_unmap_addr(ri, mapping);
4632			skb = ri->skb;
4633			post_ptr = &std_prod_idx;
4634			rx_std_posted++;
4635		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4636			ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4637			dma_addr = dma_unmap_addr(ri, mapping);
4638			skb = ri->skb;
4639			post_ptr = &jmb_prod_idx;
4640		} else
4641			goto next_pkt_nopost;
4642
4643		work_mask |= opaque_key;
4644
4645		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4646		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4647		drop_it:
4648			tg3_recycle_rx(tnapi, tpr, opaque_key,
4649				       desc_idx, *post_ptr);
4650		drop_it_no_recycle:
4651			/* Other statistics kept track of by card. */
4652			tp->rx_dropped++;
4653			goto next_pkt;
4654		}
4655
4656		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4657		      ETH_FCS_LEN;
4658
4659		if (len > TG3_RX_COPY_THRESH(tp)) {
4660			int skb_size;
4661
4662			skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4663						    *post_ptr);
4664			if (skb_size < 0)
4665				goto drop_it;
4666
4667			pci_unmap_single(tp->pdev, dma_addr, skb_size,
4668					 PCI_DMA_FROMDEVICE);
4669
4670			/* Ensure that the update to the skb happens
4671			 * after the usage of the old DMA mapping.
4672			 */
4673			smp_wmb();
4674
4675			ri->skb = NULL;
4676
4677			skb_put(skb, len);
4678		} else {
4679			struct sk_buff *copy_skb;
4680
4681			tg3_recycle_rx(tnapi, tpr, opaque_key,
4682				       desc_idx, *post_ptr);
4683
4684			copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4685						    TG3_RAW_IP_ALIGN);
4686			if (copy_skb == NULL)
4687				goto drop_it_no_recycle;
4688
4689			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4690			skb_put(copy_skb, len);
4691			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4692			skb_copy_from_linear_data(skb, copy_skb->data, len);
4693			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4694
4695			/* We'll reuse the original ring buffer. */
4696			skb = copy_skb;
4697		}
4698
4699		if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4700		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4701		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4702		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
4703			skb->ip_summed = CHECKSUM_UNNECESSARY;
4704		else
4705			skb->ip_summed = CHECKSUM_NONE;
4706
4707		skb->protocol = eth_type_trans(skb, tp->dev);
4708
4709		if (len > (tp->dev->mtu + ETH_HLEN) &&
4710		    skb->protocol != htons(ETH_P_8021Q)) {
4711			dev_kfree_skb(skb);
4712			goto drop_it_no_recycle;
4713		}
4714
4715		if (desc->type_flags & RXD_FLAG_VLAN &&
4716		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4717			vtag = desc->err_vlan & RXD_VLAN_MASK;
4718#if TG3_VLAN_TAG_USED
4719			if (tp->vlgrp)
4720				hw_vlan = true;
4721			else
4722#endif
4723			{
4724				struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4725						    __skb_push(skb, VLAN_HLEN);
4726
4727				memmove(ve, skb->data + VLAN_HLEN,
4728					ETH_ALEN * 2);
4729				ve->h_vlan_proto = htons(ETH_P_8021Q);
4730				ve->h_vlan_TCI = htons(vtag);
4731			}
4732		}
4733
4734#if TG3_VLAN_TAG_USED
4735		if (hw_vlan)
4736			vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4737		else
4738#endif
4739			napi_gro_receive(&tnapi->napi, skb);
4740
4741		received++;
4742		budget--;
4743
4744next_pkt:
4745		(*post_ptr)++;
4746
4747		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4748			tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4749			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4750				     tpr->rx_std_prod_idx);
4751			work_mask &= ~RXD_OPAQUE_RING_STD;
4752			rx_std_posted = 0;
4753		}
4754next_pkt_nopost:
4755		sw_idx++;
4756		sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4757
4758		/* Refresh hw_idx to see if there is new work */
4759		if (sw_idx == hw_idx) {
4760			hw_idx = *(tnapi->rx_rcb_prod_idx);
4761			rmb();
4762		}
4763	}
4764
4765	/* ACK the status ring. */
4766	tnapi->rx_rcb_ptr = sw_idx;
4767	tw32_rx_mbox(tnapi->consmbox, sw_idx);
4768
4769	/* Refill RX ring(s). */
4770	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4771		if (work_mask & RXD_OPAQUE_RING_STD) {
4772			tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4773			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4774				     tpr->rx_std_prod_idx);
4775		}
4776		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4777			tpr->rx_jmb_prod_idx = jmb_prod_idx %
4778					       TG3_RX_JUMBO_RING_SIZE;
4779			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4780				     tpr->rx_jmb_prod_idx);
4781		}
4782		mmiowb();
4783	} else if (work_mask) {
4784		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4785		 * updated before the producer indices can be updated.
4786		 */
4787		smp_wmb();
4788
4789		tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4790		tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4791
4792		if (tnapi != &tp->napi[1])
4793			napi_schedule(&tp->napi[1].napi);
4794	}
4795
4796	return received;
4797}
4798
4799static void tg3_poll_link(struct tg3 *tp)
4800{
4801	/* handle link change and other phy events */
4802	if (!(tp->tg3_flags &
4803	      (TG3_FLAG_USE_LINKCHG_REG |
4804	       TG3_FLAG_POLL_SERDES))) {
4805		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4806
4807		if (sblk->status & SD_STATUS_LINK_CHG) {
4808			sblk->status = SD_STATUS_UPDATED |
4809				       (sblk->status & ~SD_STATUS_LINK_CHG);
4810			spin_lock(&tp->lock);
4811			if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4812				tw32_f(MAC_STATUS,
4813				     (MAC_STATUS_SYNC_CHANGED |
4814				      MAC_STATUS_CFG_CHANGED |
4815				      MAC_STATUS_MI_COMPLETION |
4816				      MAC_STATUS_LNKSTATE_CHANGED));
4817				udelay(40);
4818			} else
4819				tg3_setup_phy(tp, 0);
4820			spin_unlock(&tp->lock);
4821		}
4822	}
4823}
4824
4825static int tg3_rx_prodring_xfer(struct tg3 *tp,
4826				struct tg3_rx_prodring_set *dpr,
4827				struct tg3_rx_prodring_set *spr)
4828{
4829	u32 si, di, cpycnt, src_prod_idx;
4830	int i, err = 0;
4831
4832	while (1) {
4833		src_prod_idx = spr->rx_std_prod_idx;
4834
4835		/* Make sure updates to the rx_std_buffers[] entries and the
4836		 * standard producer index are seen in the correct order.
4837		 */
4838		smp_rmb();
4839
4840		if (spr->rx_std_cons_idx == src_prod_idx)
4841			break;
4842
4843		if (spr->rx_std_cons_idx < src_prod_idx)
4844			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4845		else
4846			cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4847
4848		cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4849
4850		si = spr->rx_std_cons_idx;
4851		di = dpr->rx_std_prod_idx;
4852
4853		for (i = di; i < di + cpycnt; i++) {
4854			if (dpr->rx_std_buffers[i].skb) {
4855				cpycnt = i - di;
4856				err = -ENOSPC;
4857				break;
4858			}
4859		}
4860
4861		if (!cpycnt)
4862			break;
4863
4864		/* Ensure that updates to the rx_std_buffers ring and the
4865		 * shadowed hardware producer ring from tg3_recycle_skb() are
4866		 * ordered correctly WRT the skb check above.
4867		 */
4868		smp_rmb();
4869
4870		memcpy(&dpr->rx_std_buffers[di],
4871		       &spr->rx_std_buffers[si],
4872		       cpycnt * sizeof(struct ring_info));
4873
4874		for (i = 0; i < cpycnt; i++, di++, si++) {
4875			struct tg3_rx_buffer_desc *sbd, *dbd;
4876			sbd = &spr->rx_std[si];
4877			dbd = &dpr->rx_std[di];
4878			dbd->addr_hi = sbd->addr_hi;
4879			dbd->addr_lo = sbd->addr_lo;
4880		}
4881
4882		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4883				       TG3_RX_RING_SIZE;
4884		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4885				       TG3_RX_RING_SIZE;
4886	}
4887
4888	while (1) {
4889		src_prod_idx = spr->rx_jmb_prod_idx;
4890
4891		/* Make sure updates to the rx_jmb_buffers[] entries and
4892		 * the jumbo producer index are seen in the correct order.
4893		 */
4894		smp_rmb();
4895
4896		if (spr->rx_jmb_cons_idx == src_prod_idx)
4897			break;
4898
4899		if (spr->rx_jmb_cons_idx < src_prod_idx)
4900			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4901		else
4902			cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4903
4904		cpycnt = min(cpycnt,
4905			     TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4906
4907		si = spr->rx_jmb_cons_idx;
4908		di = dpr->rx_jmb_prod_idx;
4909
4910		for (i = di; i < di + cpycnt; i++) {
4911			if (dpr->rx_jmb_buffers[i].skb) {
4912				cpycnt = i - di;
4913				err = -ENOSPC;
4914				break;
4915			}
4916		}
4917
4918		if (!cpycnt)
4919			break;
4920
4921		/* Ensure that updates to the rx_jmb_buffers ring and the
4922		 * shadowed hardware producer ring from tg3_recycle_skb() are
4923		 * ordered correctly WRT the skb check above.
4924		 */
4925		smp_rmb();
4926
4927		memcpy(&dpr->rx_jmb_buffers[di],
4928		       &spr->rx_jmb_buffers[si],
4929		       cpycnt * sizeof(struct ring_info));
4930
4931		for (i = 0; i < cpycnt; i++, di++, si++) {
4932			struct tg3_rx_buffer_desc *sbd, *dbd;
4933			sbd = &spr->rx_jmb[si].std;
4934			dbd = &dpr->rx_jmb[di].std;
4935			dbd->addr_hi = sbd->addr_hi;
4936			dbd->addr_lo = sbd->addr_lo;
4937		}
4938
4939		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4940				       TG3_RX_JUMBO_RING_SIZE;
4941		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4942				       TG3_RX_JUMBO_RING_SIZE;
4943	}
4944
4945	return err;
4946}
4947
4948static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4949{
4950	struct tg3 *tp = tnapi->tp;
4951
4952	/* run TX completion thread */
4953	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4954		tg3_tx(tnapi);
4955		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4956			return work_done;
4957	}
4958
4959	/* run RX thread, within the bounds set by NAPI.
4960	 * All RX "locking" is done by ensuring outside
4961	 * code synchronizes with tg3->napi.poll()
4962	 */
4963	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4964		work_done += tg3_rx(tnapi, budget - work_done);
4965
4966	if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4967		struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4968		int i, err = 0;
4969		u32 std_prod_idx = dpr->rx_std_prod_idx;
4970		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4971
4972		for (i = 1; i < tp->irq_cnt; i++)
4973			err |= tg3_rx_prodring_xfer(tp, dpr,
4974						    tp->napi[i].prodring);
4975
4976		wmb();
4977
4978		if (std_prod_idx != dpr->rx_std_prod_idx)
4979			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4980				     dpr->rx_std_prod_idx);
4981
4982		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
4983			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4984				     dpr->rx_jmb_prod_idx);
4985
4986		mmiowb();
4987
4988		if (err)
4989			tw32_f(HOSTCC_MODE, tp->coal_now);
4990	}
4991
4992	return work_done;
4993}
4994
4995static int tg3_poll_msix(struct napi_struct *napi, int budget)
4996{
4997	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4998	struct tg3 *tp = tnapi->tp;
4999	int work_done = 0;
5000	struct tg3_hw_status *sblk = tnapi->hw_status;
5001
5002	while (1) {
5003		work_done = tg3_poll_work(tnapi, work_done, budget);
5004
5005		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5006			goto tx_recovery;
5007
5008		if (unlikely(work_done >= budget))
5009			break;
5010
5011		/* tp->last_tag is used in tg3_int_reenable() below
5012		 * to tell the hw how much work has been processed,
5013		 * so we must read it before checking for more work.
5014		 */
5015		tnapi->last_tag = sblk->status_tag;
5016		tnapi->last_irq_tag = tnapi->last_tag;
5017		rmb();
5018
5019		/* check for RX/TX work to do */
5020		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5021			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5022			napi_complete(napi);
5023			/* Reenable interrupts. */
5024			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5025			mmiowb();
5026			break;
5027		}
5028	}
5029
5030	return work_done;
5031
5032tx_recovery:
5033	/* work_done is guaranteed to be less than budget. */
5034	napi_complete(napi);
5035	schedule_work(&tp->reset_task);
5036	return work_done;
5037}
5038
5039static int tg3_poll(struct napi_struct *napi, int budget)
5040{
5041	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5042	struct tg3 *tp = tnapi->tp;
5043	int work_done = 0;
5044	struct tg3_hw_status *sblk = tnapi->hw_status;
5045
5046	while (1) {
5047		tg3_poll_link(tp);
5048
5049		work_done = tg3_poll_work(tnapi, work_done, budget);
5050
5051		if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5052			goto tx_recovery;
5053
5054		if (unlikely(work_done >= budget))
5055			break;
5056
5057		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5058			/* tp->last_tag is used in tg3_int_reenable() below
5059			 * to tell the hw how much work has been processed,
5060			 * so we must read it before checking for more work.
5061			 */
5062			tnapi->last_tag = sblk->status_tag;
5063			tnapi->last_irq_tag = tnapi->last_tag;
5064			rmb();
5065		} else
5066			sblk->status &= ~SD_STATUS_UPDATED;
5067
5068		if (likely(!tg3_has_work(tnapi))) {
5069			napi_complete(napi);
5070			tg3_int_reenable(tnapi);
5071			break;
5072		}
5073	}
5074
5075	return work_done;
5076
5077tx_recovery:
5078	/* work_done is guaranteed to be less than budget. */
5079	napi_complete(napi);
5080	schedule_work(&tp->reset_task);
5081	return work_done;
5082}
5083
5084static void tg3_irq_quiesce(struct tg3 *tp)
5085{
5086	int i;
5087
5088	BUG_ON(tp->irq_sync);
5089
5090	tp->irq_sync = 1;
5091	smp_mb();
5092
5093	for (i = 0; i < tp->irq_cnt; i++)
5094		synchronize_irq(tp->napi[i].irq_vec);
5095}
5096
5097static inline int tg3_irq_sync(struct tg3 *tp)
5098{
5099	return tp->irq_sync;
5100}
5101
5102/* Fully shutdown all tg3 driver activity elsewhere in the system.
5103 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5104 * with as well.  Most of the time, this is not necessary except when
5105 * shutting down the device.
5106 */
5107static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5108{
5109	spin_lock_bh(&tp->lock);
5110	if (irq_sync)
5111		tg3_irq_quiesce(tp);
5112}
5113
5114static inline void tg3_full_unlock(struct tg3 *tp)
5115{
5116	spin_unlock_bh(&tp->lock);
5117}
5118
5119/* One-shot MSI handler - Chip automatically disables interrupt
5120 * after sending MSI so driver doesn't have to do it.
5121 */
5122static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5123{
5124	struct tg3_napi *tnapi = dev_id;
5125	struct tg3 *tp = tnapi->tp;
5126
5127	prefetch(tnapi->hw_status);
5128	if (tnapi->rx_rcb)
5129		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5130
5131	if (likely(!tg3_irq_sync(tp)))
5132		napi_schedule(&tnapi->napi);
5133
5134	return IRQ_HANDLED;
5135}
5136
5137/* MSI ISR - No need to check for interrupt sharing and no need to
5138 * flush status block and interrupt mailbox. PCI ordering rules
5139 * guarantee that MSI will arrive after the status block.
5140 */
5141static irqreturn_t tg3_msi(int irq, void *dev_id)
5142{
5143	struct tg3_napi *tnapi = dev_id;
5144	struct tg3 *tp = tnapi->tp;
5145
5146	prefetch(tnapi->hw_status);
5147	if (tnapi->rx_rcb)
5148		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5149	/*
5150	 * Writing any value to intr-mbox-0 clears PCI INTA# and
5151	 * chip-internal interrupt pending events.
5152	 * Writing non-zero to intr-mbox-0 additional tells the
5153	 * NIC to stop sending us irqs, engaging "in-intr-handler"
5154	 * event coalescing.
5155	 */
5156	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5157	if (likely(!tg3_irq_sync(tp)))
5158		napi_schedule(&tnapi->napi);
5159
5160	return IRQ_RETVAL(1);
5161}
5162
5163static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5164{
5165	struct tg3_napi *tnapi = dev_id;
5166	struct tg3 *tp = tnapi->tp;
5167	struct tg3_hw_status *sblk = tnapi->hw_status;
5168	unsigned int handled = 1;
5169
5170	/* In INTx mode, it is possible for the interrupt to arrive at
5171	 * the CPU before the status block posted prior to the interrupt.
5172	 * Reading the PCI State register will confirm whether the
5173	 * interrupt is ours and will flush the status block.
5174	 */
5175	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5176		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5177		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5178			handled = 0;
5179			goto out;
5180		}
5181	}
5182
5183	/*
5184	 * Writing any value to intr-mbox-0 clears PCI INTA# and
5185	 * chip-internal interrupt pending events.
5186	 * Writing non-zero to intr-mbox-0 additional tells the
5187	 * NIC to stop sending us irqs, engaging "in-intr-handler"
5188	 * event coalescing.
5189	 *
5190	 * Flush the mailbox to de-assert the IRQ immediately to prevent
5191	 * spurious interrupts.  The flush impacts performance but
5192	 * excessive spurious interrupts can be worse in some cases.
5193	 */
5194	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5195	if (tg3_irq_sync(tp))
5196		goto out;
5197	sblk->status &= ~SD_STATUS_UPDATED;
5198	if (likely(tg3_has_work(tnapi))) {
5199		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5200		napi_schedule(&tnapi->napi);
5201	} else {
5202		/* No work, shared interrupt perhaps?  re-enable
5203		 * interrupts, and flush that PCI write
5204		 */
5205		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5206			       0x00000000);
5207	}
5208out:
5209	return IRQ_RETVAL(handled);
5210}
5211
5212static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5213{
5214	struct tg3_napi *tnapi = dev_id;
5215	struct tg3 *tp = tnapi->tp;
5216	struct tg3_hw_status *sblk = tnapi->hw_status;
5217	unsigned int handled = 1;
5218
5219	/* In INTx mode, it is possible for the interrupt to arrive at
5220	 * the CPU before the status block posted prior to the interrupt.
5221	 * Reading the PCI State register will confirm whether the
5222	 * interrupt is ours and will flush the status block.
5223	 */
5224	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5225		if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5226		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5227			handled = 0;
5228			goto out;
5229		}
5230	}
5231
5232	/*
5233	 * writing any value to intr-mbox-0 clears PCI INTA# and
5234	 * chip-internal interrupt pending events.
5235	 * writing non-zero to intr-mbox-0 additional tells the
5236	 * NIC to stop sending us irqs, engaging "in-intr-handler"
5237	 * event coalescing.
5238	 *
5239	 * Flush the mailbox to de-assert the IRQ immediately to prevent
5240	 * spurious interrupts.  The flush impacts performance but
5241	 * excessive spurious interrupts can be worse in some cases.
5242	 */
5243	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5244
5245	/*
5246	 * In a shared interrupt configuration, sometimes other devices'
5247	 * interrupts will scream.  We record the current status tag here
5248	 * so that the above check can report that the screaming interrupts
5249	 * are unhandled.  Eventually they will be silenced.
5250	 */
5251	tnapi->last_irq_tag = sblk->status_tag;
5252
5253	if (tg3_irq_sync(tp))
5254		goto out;
5255
5256	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5257
5258	napi_schedule(&tnapi->napi);
5259
5260out:
5261	return IRQ_RETVAL(handled);
5262}
5263
5264/* ISR for interrupt test */
5265static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5266{
5267	struct tg3_napi *tnapi = dev_id;
5268	struct tg3 *tp = tnapi->tp;
5269	struct tg3_hw_status *sblk = tnapi->hw_status;
5270
5271	if ((sblk->status & SD_STATUS_UPDATED) ||
5272	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5273		tg3_disable_ints(tp);
5274		return IRQ_RETVAL(1);
5275	}
5276	return IRQ_RETVAL(0);
5277}
5278
5279static int tg3_init_hw(struct tg3 *, int);
5280static int tg3_halt(struct tg3 *, int, int);
5281
5282/* Restart hardware after configuration changes, self-test, etc.
5283 * Invoked with tp->lock held.
5284 */
5285static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5286	__releases(tp->lock)
5287	__acquires(tp->lock)
5288{
5289	int err;
5290
5291	err = tg3_init_hw(tp, reset_phy);
5292	if (err) {
5293		netdev_err(tp->dev,
5294			   "Failed to re-initialize device, aborting\n");
5295		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5296		tg3_full_unlock(tp);
5297		del_timer_sync(&tp->timer);
5298		tp->irq_sync = 0;
5299		tg3_napi_enable(tp);
5300		dev_close(tp->dev);
5301		tg3_full_lock(tp, 0);
5302	}
5303	return err;
5304}
5305
5306#ifdef CONFIG_NET_POLL_CONTROLLER
5307static void tg3_poll_controller(struct net_device *dev)
5308{
5309	int i;
5310	struct tg3 *tp = netdev_priv(dev);
5311
5312	for (i = 0; i < tp->irq_cnt; i++)
5313		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5314}
5315#endif
5316
5317static void tg3_reset_task(struct work_struct *work)
5318{
5319	struct tg3 *tp = container_of(work, struct tg3, reset_task);
5320	int err;
5321	unsigned int restart_timer;
5322
5323	tg3_full_lock(tp, 0);
5324
5325	if (!netif_running(tp->dev)) {
5326		tg3_full_unlock(tp);
5327		return;
5328	}
5329
5330	tg3_full_unlock(tp);
5331
5332	tg3_phy_stop(tp);
5333
5334	tg3_netif_stop(tp);
5335
5336	tg3_full_lock(tp, 1);
5337
5338	restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5339	tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5340
5341	if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5342		tp->write32_tx_mbox = tg3_write32_tx_mbox;
5343		tp->write32_rx_mbox = tg3_write_flush_reg32;
5344		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5345		tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5346	}
5347
5348	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5349	err = tg3_init_hw(tp, 1);
5350	if (err)
5351		goto out;
5352
5353	tg3_netif_start(tp);
5354
5355	if (restart_timer)
5356		mod_timer(&tp->timer, jiffies + 1);
5357
5358out:
5359	tg3_full_unlock(tp);
5360
5361	if (!err)
5362		tg3_phy_start(tp);
5363}
5364
5365static void tg3_dump_short_state(struct tg3 *tp)
5366{
5367	netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5368		   tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5369	netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5370		   tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5371}
5372
5373static void tg3_tx_timeout(struct net_device *dev)
5374{
5375	struct tg3 *tp = netdev_priv(dev);
5376
5377	if (netif_msg_tx_err(tp)) {
5378		netdev_err(dev, "transmit timed out, resetting\n");
5379		tg3_dump_short_state(tp);
5380	}
5381
5382	schedule_work(&tp->reset_task);
5383}
5384
5385/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5386static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5387{
5388	u32 base = (u32) mapping & 0xffffffff;
5389
5390	return ((base > 0xffffdcc0) &&
5391		(base + len + 8 < base));
5392}
5393
5394/* Test for DMA addresses > 40-bit */
5395static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5396					  int len)
5397{
5398#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5399	if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5400		return (((u64) mapping + len) > DMA_BIT_MASK(40));
5401	return 0;
5402#else
5403	return 0;
5404#endif
5405}
5406
5407static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5408
5409static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5410				       struct sk_buff *skb, u32 last_plus_one,
5411				       u32 *start, u32 base_flags, u32 mss)
5412{
5413	struct tg3 *tp = tnapi->tp;
5414	struct sk_buff *new_skb;
5415	dma_addr_t new_addr = 0;
5416	u32 entry = *start;
5417	int i, ret = 0;
5418
5419	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5420		new_skb = skb_copy(skb, GFP_ATOMIC);
5421	else {
5422		int more_headroom = 4 - ((unsigned long)skb->data & 3);
5423
5424		new_skb = skb_copy_expand(skb,
5425					  skb_headroom(skb) + more_headroom,
5426					  skb_tailroom(skb), GFP_ATOMIC);
5427	}
5428
5429	if (!new_skb) {
5430		ret = -1;
5431	} else {
5432		/* New SKB is guaranteed to be linear. */
5433		entry = *start;
5434		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5435					  PCI_DMA_TODEVICE);
5436		/* Make sure the mapping succeeded */
5437		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5438			ret = -1;
5439			dev_kfree_skb(new_skb);
5440			new_skb = NULL;
5441
5442		/* Make sure new skb does not cross any 4G boundaries.
5443		 * Drop the packet if it does.
5444		 */
5445		} else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5446			    tg3_4g_overflow_test(new_addr, new_skb->len)) {
5447			pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5448					 PCI_DMA_TODEVICE);
5449			ret = -1;
5450			dev_kfree_skb(new_skb);
5451			new_skb = NULL;
5452		} else {
5453			tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5454				    base_flags, 1 | (mss << 1));
5455			*start = NEXT_TX(entry);
5456		}
5457	}
5458
5459	/* Now clean up the sw ring entries. */
5460	i = 0;
5461	while (entry != last_plus_one) {
5462		int len;
5463
5464		if (i == 0)
5465			len = skb_headlen(skb);
5466		else
5467			len = skb_shinfo(skb)->frags[i-1].size;
5468
5469		pci_unmap_single(tp->pdev,
5470				 dma_unmap_addr(&tnapi->tx_buffers[entry],
5471						mapping),
5472				 len, PCI_DMA_TODEVICE);
5473		if (i == 0) {
5474			tnapi->tx_buffers[entry].skb = new_skb;
5475			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5476					   new_addr);
5477		} else {
5478			tnapi->tx_buffers[entry].skb = NULL;
5479		}
5480		entry = NEXT_TX(entry);
5481		i++;
5482	}
5483
5484	dev_kfree_skb(skb);
5485
5486	return ret;
5487}
5488
5489static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5490			dma_addr_t mapping, int len, u32 flags,
5491			u32 mss_and_is_end)
5492{
5493	struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5494	int is_end = (mss_and_is_end & 0x1);
5495	u32 mss = (mss_and_is_end >> 1);
5496	u32 vlan_tag = 0;
5497
5498	if (is_end)
5499		flags |= TXD_FLAG_END;
5500	if (flags & TXD_FLAG_VLAN) {
5501		vlan_tag = flags >> 16;
5502		flags &= 0xffff;
5503	}
5504	vlan_tag |= (mss << TXD_MSS_SHIFT);
5505
5506	txd->addr_hi = ((u64) mapping >> 32);
5507	txd->addr_lo = ((u64) mapping & 0xffffffff);
5508	txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5509	txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5510}
5511
5512/* hard_start_xmit for devices that don't have any bugs and
5513 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5514 */
5515static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5516				  struct net_device *dev)
5517{
5518	struct tg3 *tp = netdev_priv(dev);
5519	u32 len, entry, base_flags, mss;
5520	dma_addr_t mapping;
5521	struct tg3_napi *tnapi;
5522	struct netdev_queue *txq;
5523	unsigned int i, last;
5524
5525	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5526	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5527	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5528		tnapi++;
5529
5530	/* We are running in BH disabled context with netif_tx_lock
5531	 * and TX reclaim runs via tp->napi.poll inside of a software
5532	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
5533	 * no IRQ context deadlocks to worry about either.  Rejoice!
5534	 */
5535	if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5536		if (!netif_tx_queue_stopped(txq)) {
5537			netif_tx_stop_queue(txq);
5538
5539			/* This is a hard error, log it. */
5540			netdev_err(dev,
5541				   "BUG! Tx Ring full when queue awake!\n");
5542		}
5543		return NETDEV_TX_BUSY;
5544	}
5545
5546	entry = tnapi->tx_prod;
5547	base_flags = 0;
5548	mss = skb_shinfo(skb)->gso_size;
5549	if (mss) {
5550		int tcp_opt_len, ip_tcp_len;
5551		u32 hdrlen;
5552
5553		if (skb_header_cloned(skb) &&
5554		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5555			dev_kfree_skb(skb);
5556			goto out_unlock;
5557		}
5558
5559		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5560			hdrlen = skb_headlen(skb) - ETH_HLEN;
5561		else {
5562			struct iphdr *iph = ip_hdr(skb);
5563
5564			tcp_opt_len = tcp_optlen(skb);
5565			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5566
5567			iph->check = 0;
5568			iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5569			hdrlen = ip_tcp_len + tcp_opt_len;
5570		}
5571
5572		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5573			mss |= (hdrlen & 0xc) << 12;
5574			if (hdrlen & 0x10)
5575				base_flags |= 0x00000010;
5576			base_flags |= (hdrlen & 0x3e0) << 5;
5577		} else
5578			mss |= hdrlen << 9;
5579
5580		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5581			       TXD_FLAG_CPU_POST_DMA);
5582
5583		tcp_hdr(skb)->check = 0;
5584
5585	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5586		base_flags |= TXD_FLAG_TCPUDP_CSUM;
5587	}
5588
5589#if TG3_VLAN_TAG_USED
5590	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5591		base_flags |= (TXD_FLAG_VLAN |
5592			       (vlan_tx_tag_get(skb) << 16));
5593#endif
5594
5595	len = skb_headlen(skb);
5596
5597	/* Queue skb data, a.k.a. the main skb fragment. */
5598	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5599	if (pci_dma_mapping_error(tp->pdev, mapping)) {
5600		dev_kfree_skb(skb);
5601		goto out_unlock;
5602	}
5603
5604	tnapi->tx_buffers[entry].skb = skb;
5605	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5606
5607	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5608	    !mss && skb->len > ETH_DATA_LEN)
5609		base_flags |= TXD_FLAG_JMB_PKT;
5610
5611	tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5612		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5613
5614	entry = NEXT_TX(entry);
5615
5616	/* Now loop through additional data fragments, and queue them. */
5617	if (skb_shinfo(skb)->nr_frags > 0) {
5618		last = skb_shinfo(skb)->nr_frags - 1;
5619		for (i = 0; i <= last; i++) {
5620			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5621
5622			len = frag->size;
5623			mapping = pci_map_page(tp->pdev,
5624					       frag->page,
5625					       frag->page_offset,
5626					       len, PCI_DMA_TODEVICE);
5627			if (pci_dma_mapping_error(tp->pdev, mapping))
5628				goto dma_error;
5629
5630			tnapi->tx_buffers[entry].skb = NULL;
5631			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5632					   mapping);
5633
5634			tg3_set_txd(tnapi, entry, mapping, len,
5635				    base_flags, (i == last) | (mss << 1));
5636
5637			entry = NEXT_TX(entry);
5638		}
5639	}
5640
5641	/* Packets are ready, update Tx producer idx local and on card. */
5642	tw32_tx_mbox(tnapi->prodmbox, entry);
5643
5644	tnapi->tx_prod = entry;
5645	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5646		netif_tx_stop_queue(txq);
5647
5648		/* netif_tx_stop_queue() must be done before checking
5649		 * checking tx index in tg3_tx_avail() below, because in
5650		 * tg3_tx(), we update tx index before checking for
5651		 * netif_tx_queue_stopped().
5652		 */
5653		smp_mb();
5654		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5655			netif_tx_wake_queue(txq);
5656	}
5657
5658out_unlock:
5659	mmiowb();
5660
5661	return NETDEV_TX_OK;
5662
5663dma_error:
5664	last = i;
5665	entry = tnapi->tx_prod;
5666	tnapi->tx_buffers[entry].skb = NULL;
5667	pci_unmap_single(tp->pdev,
5668			 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5669			 skb_headlen(skb),
5670			 PCI_DMA_TODEVICE);
5671	for (i = 0; i <= last; i++) {
5672		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5673		entry = NEXT_TX(entry);
5674
5675		pci_unmap_page(tp->pdev,
5676			       dma_unmap_addr(&tnapi->tx_buffers[entry],
5677					      mapping),
5678			       frag->size, PCI_DMA_TODEVICE);
5679	}
5680
5681	dev_kfree_skb(skb);
5682	return NETDEV_TX_OK;
5683}
5684
5685static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5686					  struct net_device *);
5687
5688static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5689{
5690	struct sk_buff *segs, *nskb;
5691	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5692
5693	/* Estimate the number of fragments in the worst case */
5694	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5695		netif_stop_queue(tp->dev);
5696
5697		/* netif_tx_stop_queue() must be done before checking
5698		 * checking tx index in tg3_tx_avail() below, because in
5699		 * tg3_tx(), we update tx index before checking for
5700		 * netif_tx_queue_stopped().
5701		 */
5702		smp_mb();
5703		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5704			return NETDEV_TX_BUSY;
5705
5706		netif_wake_queue(tp->dev);
5707	}
5708
5709	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5710	if (IS_ERR(segs))
5711		goto tg3_tso_bug_end;
5712
5713	do {
5714		nskb = segs;
5715		segs = segs->next;
5716		nskb->next = NULL;
5717		tg3_start_xmit_dma_bug(nskb, tp->dev);
5718	} while (segs);
5719
5720tg3_tso_bug_end:
5721	dev_kfree_skb(skb);
5722
5723	return NETDEV_TX_OK;
5724}
5725
5726/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5727 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5728 */
5729static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5730					  struct net_device *dev)
5731{
5732	struct tg3 *tp = netdev_priv(dev);
5733	u32 len, entry, base_flags, mss;
5734	int would_hit_hwbug;
5735	dma_addr_t mapping;
5736	struct tg3_napi *tnapi;
5737	struct netdev_queue *txq;
5738	unsigned int i, last;
5739
5740	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5741	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5742	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5743		tnapi++;
5744
5745	/* We are running in BH disabled context with netif_tx_lock
5746	 * and TX reclaim runs via tp->napi.poll inside of a software
5747	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
5748	 * no IRQ context deadlocks to worry about either.  Rejoice!
5749	 */
5750	if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5751		if (!netif_tx_queue_stopped(txq)) {
5752			netif_tx_stop_queue(txq);
5753
5754			/* This is a hard error, log it. */
5755			netdev_err(dev,
5756				   "BUG! Tx Ring full when queue awake!\n");
5757		}
5758		return NETDEV_TX_BUSY;
5759	}
5760
5761	entry = tnapi->tx_prod;
5762	base_flags = 0;
5763	if (skb->ip_summed == CHECKSUM_PARTIAL)
5764		base_flags |= TXD_FLAG_TCPUDP_CSUM;
5765
5766	mss = skb_shinfo(skb)->gso_size;
5767	if (mss) {
5768		struct iphdr *iph;
5769		u32 tcp_opt_len, hdr_len;
5770
5771		if (skb_header_cloned(skb) &&
5772		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5773			dev_kfree_skb(skb);
5774			goto out_unlock;
5775		}
5776
5777		iph = ip_hdr(skb);
5778		tcp_opt_len = tcp_optlen(skb);
5779
5780		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5781			hdr_len = skb_headlen(skb) - ETH_HLEN;
5782		} else {
5783			u32 ip_tcp_len;
5784
5785			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5786			hdr_len = ip_tcp_len + tcp_opt_len;
5787
5788			iph->check = 0;
5789			iph->tot_len = htons(mss + hdr_len);
5790		}
5791
5792		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5793			     (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5794			return tg3_tso_bug(tp, skb);
5795
5796		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5797			       TXD_FLAG_CPU_POST_DMA);
5798
5799		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5800			tcp_hdr(skb)->check = 0;
5801			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5802		} else
5803			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5804								 iph->daddr, 0,
5805								 IPPROTO_TCP,
5806								 0);
5807
5808		if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5809			mss |= (hdr_len & 0xc) << 12;
5810			if (hdr_len & 0x10)
5811				base_flags |= 0x00000010;
5812			base_flags |= (hdr_len & 0x3e0) << 5;
5813		} else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5814			mss |= hdr_len << 9;
5815		else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5816			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5817			if (tcp_opt_len || iph->ihl > 5) {
5818				int tsflags;
5819
5820				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5821				mss |= (tsflags << 11);
5822			}
5823		} else {
5824			if (tcp_opt_len || iph->ihl > 5) {
5825				int tsflags;
5826
5827				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5828				base_flags |= tsflags << 12;
5829			}
5830		}
5831	}
5832#if TG3_VLAN_TAG_USED
5833	if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5834		base_flags |= (TXD_FLAG_VLAN |
5835			       (vlan_tx_tag_get(skb) << 16));
5836#endif
5837
5838	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5839	    !mss && skb->len > ETH_DATA_LEN)
5840		base_flags |= TXD_FLAG_JMB_PKT;
5841
5842	len = skb_headlen(skb);
5843
5844	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5845	if (pci_dma_mapping_error(tp->pdev, mapping)) {
5846		dev_kfree_skb(skb);
5847		goto out_unlock;
5848	}
5849
5850	tnapi->tx_buffers[entry].skb = skb;
5851	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5852
5853	would_hit_hwbug = 0;
5854
5855	if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5856		would_hit_hwbug = 1;
5857
5858	if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5859	    tg3_4g_overflow_test(mapping, len))
5860		would_hit_hwbug = 1;
5861
5862	if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5863	    tg3_40bit_overflow_test(tp, mapping, len))
5864		would_hit_hwbug = 1;
5865
5866	if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5867		would_hit_hwbug = 1;
5868
5869	tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5870		    (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5871
5872	entry = NEXT_TX(entry);
5873
5874	/* Now loop through additional data fragments, and queue them. */
5875	if (skb_shinfo(skb)->nr_frags > 0) {
5876		last = skb_shinfo(skb)->nr_frags - 1;
5877		for (i = 0; i <= last; i++) {
5878			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5879
5880			len = frag->size;
5881			mapping = pci_map_page(tp->pdev,
5882					       frag->page,
5883					       frag->page_offset,
5884					       len, PCI_DMA_TODEVICE);
5885
5886			tnapi->tx_buffers[entry].skb = NULL;
5887			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5888					   mapping);
5889			if (pci_dma_mapping_error(tp->pdev, mapping))
5890				goto dma_error;
5891
5892			if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5893			    len <= 8)
5894				would_hit_hwbug = 1;
5895
5896			if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5897			    tg3_4g_overflow_test(mapping, len))
5898				would_hit_hwbug = 1;
5899
5900			if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5901			    tg3_40bit_overflow_test(tp, mapping, len))
5902				would_hit_hwbug = 1;
5903
5904			if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5905				tg3_set_txd(tnapi, entry, mapping, len,
5906					    base_flags, (i == last)|(mss << 1));
5907			else
5908				tg3_set_txd(tnapi, entry, mapping, len,
5909					    base_flags, (i == last));
5910
5911			entry = NEXT_TX(entry);
5912		}
5913	}
5914
5915	if (would_hit_hwbug) {
5916		u32 last_plus_one = entry;
5917		u32 start;
5918
5919		start = entry - 1 - skb_shinfo(skb)->nr_frags;
5920		start &= (TG3_TX_RING_SIZE - 1);
5921
5922		if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5923						&start, base_flags, mss))
5924			goto out_unlock;
5925
5926		entry = start;
5927	}
5928
5929	/* Packets are ready, update Tx producer idx local and on card. */
5930	tw32_tx_mbox(tnapi->prodmbox, entry);
5931
5932	tnapi->tx_prod = entry;
5933	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5934		netif_tx_stop_queue(txq);
5935
5936		/* netif_tx_stop_queue() must be done before checking
5937		 * checking tx index in tg3_tx_avail() below, because in
5938		 * tg3_tx(), we update tx index before checking for
5939		 * netif_tx_queue_stopped().
5940		 */
5941		smp_mb();
5942		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5943			netif_tx_wake_queue(txq);
5944	}
5945
5946out_unlock:
5947	mmiowb();
5948
5949	return NETDEV_TX_OK;
5950
5951dma_error:
5952	last = i;
5953	entry = tnapi->tx_prod;
5954	tnapi->tx_buffers[entry].skb = NULL;
5955	pci_unmap_single(tp->pdev,
5956			 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5957			 skb_headlen(skb),
5958			 PCI_DMA_TODEVICE);
5959	for (i = 0; i <= last; i++) {
5960		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5961		entry = NEXT_TX(entry);
5962
5963		pci_unmap_page(tp->pdev,
5964			       dma_unmap_addr(&tnapi->tx_buffers[entry],
5965					      mapping),
5966			       frag->size, PCI_DMA_TODEVICE);
5967	}
5968
5969	dev_kfree_skb(skb);
5970	return NETDEV_TX_OK;
5971}
5972
5973static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5974			       int new_mtu)
5975{
5976	dev->mtu = new_mtu;
5977
5978	if (new_mtu > ETH_DATA_LEN) {
5979		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5980			tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5981			ethtool_op_set_tso(dev, 0);
5982		} else {
5983			tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5984		}
5985	} else {
5986		if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5987			tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5988		tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5989	}
5990}
5991
5992static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5993{
5994	struct tg3 *tp = netdev_priv(dev);
5995	int err;
5996
5997	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5998		return -EINVAL;
5999
6000	if (!netif_running(dev)) {
6001		/* We'll just catch it later when the
6002		 * device is up'd.
6003		 */
6004		tg3_set_mtu(dev, tp, new_mtu);
6005		return 0;
6006	}
6007
6008	tg3_phy_stop(tp);
6009
6010	tg3_netif_stop(tp);
6011
6012	tg3_full_lock(tp, 1);
6013
6014	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6015
6016	tg3_set_mtu(dev, tp, new_mtu);
6017
6018	err = tg3_restart_hw(tp, 0);
6019
6020	if (!err)
6021		tg3_netif_start(tp);
6022
6023	tg3_full_unlock(tp);
6024
6025	if (!err)
6026		tg3_phy_start(tp);
6027
6028	return err;
6029}
6030
6031static void tg3_rx_prodring_free(struct tg3 *tp,
6032				 struct tg3_rx_prodring_set *tpr)
6033{
6034	int i;
6035
6036	if (tpr != &tp->prodring[0]) {
6037		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6038		     i = (i + 1) % TG3_RX_RING_SIZE)
6039			tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6040					tp->rx_pkt_map_sz);
6041
6042		if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6043			for (i = tpr->rx_jmb_cons_idx;
6044			     i != tpr->rx_jmb_prod_idx;
6045			     i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
6046				tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6047						TG3_RX_JMB_MAP_SZ);
6048			}
6049		}
6050
6051		return;
6052	}
6053
6054	for (i = 0; i < TG3_RX_RING_SIZE; i++)
6055		tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6056				tp->rx_pkt_map_sz);
6057
6058	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6059		for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
6060			tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6061					TG3_RX_JMB_MAP_SZ);
6062	}
6063}
6064
6065/* Initialize rx rings for packet processing.
6066 *
6067 * The chip has been shut down and the driver detached from
6068 * the networking, so no interrupts or new tx packets will
6069 * end up in the driver.  tp->{tx,}lock are held and thus
6070 * we may not sleep.
6071 */
6072static int tg3_rx_prodring_alloc(struct tg3 *tp,
6073				 struct tg3_rx_prodring_set *tpr)
6074{
6075	u32 i, rx_pkt_dma_sz;
6076
6077	tpr->rx_std_cons_idx = 0;
6078	tpr->rx_std_prod_idx = 0;
6079	tpr->rx_jmb_cons_idx = 0;
6080	tpr->rx_jmb_prod_idx = 0;
6081
6082	if (tpr != &tp->prodring[0]) {
6083		memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
6084		if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6085			memset(&tpr->rx_jmb_buffers[0], 0,
6086			       TG3_RX_JMB_BUFF_RING_SIZE);
6087		goto done;
6088	}
6089
6090	/* Zero out all descriptors. */
6091	memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
6092
6093	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6094	if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6095	    tp->dev->mtu > ETH_DATA_LEN)
6096		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6097	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6098
6099	/* Initialize invariants of the rings, we only set this
6100	 * stuff once.  This works because the card does not
6101	 * write into the rx buffer posting rings.
6102	 */
6103	for (i = 0; i < TG3_RX_RING_SIZE; i++) {
6104		struct tg3_rx_buffer_desc *rxd;
6105
6106		rxd = &tpr->rx_std[i];
6107		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6108		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6109		rxd->opaque = (RXD_OPAQUE_RING_STD |
6110			       (i << RXD_OPAQUE_INDEX_SHIFT));
6111	}
6112
6113	/* Now allocate fresh SKBs for each rx ring. */
6114	for (i = 0; i < tp->rx_pending; i++) {
6115		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6116			netdev_warn(tp->dev,
6117				    "Using a smaller RX standard ring. Only "
6118				    "%d out of %d buffers were allocated "
6119				    "successfully\n", i, tp->rx_pending);
6120			if (i == 0)
6121				goto initfail;
6122			tp->rx_pending = i;
6123			break;
6124		}
6125	}
6126
6127	if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6128		goto done;
6129
6130	memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6131
6132	if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6133		goto done;
6134
6135	for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6136		struct tg3_rx_buffer_desc *rxd;
6137
6138		rxd = &tpr->rx_jmb[i].std;
6139		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6140		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6141				  RXD_FLAG_JUMBO;
6142		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6143		       (i << RXD_OPAQUE_INDEX_SHIFT));
6144	}
6145
6146	for (i = 0; i < tp->rx_jumbo_pending; i++) {
6147		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6148			netdev_warn(tp->dev,
6149				    "Using a smaller RX jumbo ring. Only %d "
6150				    "out of %d buffers were allocated "
6151				    "successfully\n", i, tp->rx_jumbo_pending);
6152			if (i == 0)
6153				goto initfail;
6154			tp->rx_jumbo_pending = i;
6155			break;
6156		}
6157	}
6158
6159done:
6160	return 0;
6161
6162initfail:
6163	tg3_rx_prodring_free(tp, tpr);
6164	return -ENOMEM;
6165}
6166
6167static void tg3_rx_prodring_fini(struct tg3 *tp,
6168				 struct tg3_rx_prodring_set *tpr)
6169{
6170	kfree(tpr->rx_std_buffers);
6171	tpr->rx_std_buffers = NULL;
6172	kfree(tpr->rx_jmb_buffers);
6173	tpr->rx_jmb_buffers = NULL;
6174	if (tpr->rx_std) {
6175		pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6176				    tpr->rx_std, tpr->rx_std_mapping);
6177		tpr->rx_std = NULL;
6178	}
6179	if (tpr->rx_jmb) {
6180		pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6181				    tpr->rx_jmb, tpr->rx_jmb_mapping);
6182		tpr->rx_jmb = NULL;
6183	}
6184}
6185
6186static int tg3_rx_prodring_init(struct tg3 *tp,
6187				struct tg3_rx_prodring_set *tpr)
6188{
6189	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6190	if (!tpr->rx_std_buffers)
6191		return -ENOMEM;
6192
6193	tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6194					   &tpr->rx_std_mapping);
6195	if (!tpr->rx_std)
6196		goto err_out;
6197
6198	if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6199		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6200					      GFP_KERNEL);
6201		if (!tpr->rx_jmb_buffers)
6202			goto err_out;
6203
6204		tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6205						   TG3_RX_JUMBO_RING_BYTES,
6206						   &tpr->rx_jmb_mapping);
6207		if (!tpr->rx_jmb)
6208			goto err_out;
6209	}
6210
6211	return 0;
6212
6213err_out:
6214	tg3_rx_prodring_fini(tp, tpr);
6215	return -ENOMEM;
6216}
6217
6218/* Free up pending packets in all rx/tx rings.
6219 *
6220 * The chip has been shut down and the driver detached from
6221 * the networking, so no interrupts or new tx packets will
6222 * end up in the driver.  tp->{tx,}lock is not held and we are not
6223 * in an interrupt context and thus may sleep.
6224 */
6225static void tg3_free_rings(struct tg3 *tp)
6226{
6227	int i, j;
6228
6229	for (j = 0; j < tp->irq_cnt; j++) {
6230		struct tg3_napi *tnapi = &tp->napi[j];
6231
6232		tg3_rx_prodring_free(tp, &tp->prodring[j]);
6233
6234		if (!tnapi->tx_buffers)
6235			continue;
6236
6237		for (i = 0; i < TG3_TX_RING_SIZE; ) {
6238			struct ring_info *txp;
6239			struct sk_buff *skb;
6240			unsigned int k;
6241
6242			txp = &tnapi->tx_buffers[i];
6243			skb = txp->skb;
6244
6245			if (skb == NULL) {
6246				i++;
6247				continue;
6248			}
6249
6250			pci_unmap_single(tp->pdev,
6251					 dma_unmap_addr(txp, mapping),
6252					 skb_headlen(skb),
6253					 PCI_DMA_TODEVICE);
6254			txp->skb = NULL;
6255
6256			i++;
6257
6258			for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6259				txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6260				pci_unmap_page(tp->pdev,
6261					       dma_unmap_addr(txp, mapping),
6262					       skb_shinfo(skb)->frags[k].size,
6263					       PCI_DMA_TODEVICE);
6264				i++;
6265			}
6266
6267			dev_kfree_skb_any(skb);
6268		}
6269	}
6270}
6271
6272/* Initialize tx/rx rings for packet processing.
6273 *
6274 * The chip has been shut down and the driver detached from
6275 * the networking, so no interrupts or new tx packets will
6276 * end up in the driver.  tp->{tx,}lock are held and thus
6277 * we may not sleep.
6278 */
6279static int tg3_init_rings(struct tg3 *tp)
6280{
6281	int i;
6282
6283	/* Free up all the SKBs. */
6284	tg3_free_rings(tp);
6285
6286	for (i = 0; i < tp->irq_cnt; i++) {
6287		struct tg3_napi *tnapi = &tp->napi[i];
6288
6289		tnapi->last_tag = 0;
6290		tnapi->last_irq_tag = 0;
6291		tnapi->hw_status->status = 0;
6292		tnapi->hw_status->status_tag = 0;
6293		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6294
6295		tnapi->tx_prod = 0;
6296		tnapi->tx_cons = 0;
6297		if (tnapi->tx_ring)
6298			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6299
6300		tnapi->rx_rcb_ptr = 0;
6301		if (tnapi->rx_rcb)
6302			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6303
6304		if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
6305			tg3_free_rings(tp);
6306			return -ENOMEM;
6307		}
6308	}
6309
6310	return 0;
6311}
6312
6313/*
6314 * Must not be invoked with interrupt sources disabled and
6315 * the hardware shutdown down.
6316 */
6317static void tg3_free_consistent(struct tg3 *tp)
6318{
6319	int i;
6320
6321	for (i = 0; i < tp->irq_cnt; i++) {
6322		struct tg3_napi *tnapi = &tp->napi[i];
6323
6324		if (tnapi->tx_ring) {
6325			pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6326				tnapi->tx_ring, tnapi->tx_desc_mapping);
6327			tnapi->tx_ring = NULL;
6328		}
6329
6330		kfree(tnapi->tx_buffers);
6331		tnapi->tx_buffers = NULL;
6332
6333		if (tnapi->rx_rcb) {
6334			pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6335					    tnapi->rx_rcb,
6336					    tnapi->rx_rcb_mapping);
6337			tnapi->rx_rcb = NULL;
6338		}
6339
6340		if (tnapi->hw_status) {
6341			pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6342					    tnapi->hw_status,
6343					    tnapi->status_mapping);
6344			tnapi->hw_status = NULL;
6345		}
6346	}
6347
6348	if (tp->hw_stats) {
6349		pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6350				    tp->hw_stats, tp->stats_mapping);
6351		tp->hw_stats = NULL;
6352	}
6353
6354	for (i = 0; i < tp->irq_cnt; i++)
6355		tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6356}
6357
6358/*
6359 * Must not be invoked with interrupt sources disabled and
6360 * the hardware shutdown down.  Can sleep.
6361 */
6362static int tg3_alloc_consistent(struct tg3 *tp)
6363{
6364	int i;
6365
6366	for (i = 0; i < tp->irq_cnt; i++) {
6367		if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6368			goto err_out;
6369	}
6370
6371	tp->hw_stats = pci_alloc_consistent(tp->pdev,
6372					    sizeof(struct tg3_hw_stats),
6373					    &tp->stats_mapping);
6374	if (!tp->hw_stats)
6375		goto err_out;
6376
6377	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6378
6379	for (i = 0; i < tp->irq_cnt; i++) {
6380		struct tg3_napi *tnapi = &tp->napi[i];
6381		struct tg3_hw_status *sblk;
6382
6383		tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6384							TG3_HW_STATUS_SIZE,
6385							&tnapi->status_mapping);
6386		if (!tnapi->hw_status)
6387			goto err_out;
6388
6389		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6390		sblk = tnapi->hw_status;
6391
6392		/* If multivector TSS is enabled, vector 0 does not handle
6393		 * tx interrupts.  Don't allocate any resources for it.
6394		 */
6395		if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6396		    (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6397			tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6398						    TG3_TX_RING_SIZE,
6399						    GFP_KERNEL);
6400			if (!tnapi->tx_buffers)
6401				goto err_out;
6402
6403			tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6404							      TG3_TX_RING_BYTES,
6405						       &tnapi->tx_desc_mapping);
6406			if (!tnapi->tx_ring)
6407				goto err_out;
6408		}
6409
6410		/*
6411		 * When RSS is enabled, the status block format changes
6412		 * slightly.  The "rx_jumbo_consumer", "reserved",
6413		 * and "rx_mini_consumer" members get mapped to the
6414		 * other three rx return ring producer indexes.
6415		 */
6416		switch (i) {
6417		default:
6418			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6419			break;
6420		case 2:
6421			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6422			break;
6423		case 3:
6424			tnapi->rx_rcb_prod_idx = &sblk->reserved;
6425			break;
6426		case 4:
6427			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6428			break;
6429		}
6430
6431		tnapi->prodring = &tp->prodring[i];
6432
6433		/*
6434		 * If multivector RSS is enabled, vector 0 does not handle
6435		 * rx or tx interrupts.  Don't allocate any resources for it.
6436		 */
6437		if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6438			continue;
6439
6440		tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6441						     TG3_RX_RCB_RING_BYTES(tp),
6442						     &tnapi->rx_rcb_mapping);
6443		if (!tnapi->rx_rcb)
6444			goto err_out;
6445
6446		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6447	}
6448
6449	return 0;
6450
6451err_out:
6452	tg3_free_consistent(tp);
6453	return -ENOMEM;
6454}
6455
6456#define MAX_WAIT_CNT 1000
6457
6458/* To stop a block, clear the enable bit and poll till it
6459 * clears.  tp->lock is held.
6460 */
6461static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6462{
6463	unsigned int i;
6464	u32 val;
6465
6466	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6467		switch (ofs) {
6468		case RCVLSC_MODE:
6469		case DMAC_MODE:
6470		case MBFREE_MODE:
6471		case BUFMGR_MODE:
6472		case MEMARB_MODE:
6473			/* We can't enable/disable these bits of the
6474			 * 5705/5750, just say success.
6475			 */
6476			return 0;
6477
6478		default:
6479			break;
6480		}
6481	}
6482
6483	val = tr32(ofs);
6484	val &= ~enable_bit;
6485	tw32_f(ofs, val);
6486
6487	for (i = 0; i < MAX_WAIT_CNT; i++) {
6488		udelay(100);
6489		val = tr32(ofs);
6490		if ((val & enable_bit) == 0)
6491			break;
6492	}
6493
6494	if (i == MAX_WAIT_CNT && !silent) {
6495		dev_err(&tp->pdev->dev,
6496			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6497			ofs, enable_bit);
6498		return -ENODEV;
6499	}
6500
6501	return 0;
6502}
6503
6504/* tp->lock is held. */
6505static int tg3_abort_hw(struct tg3 *tp, int silent)
6506{
6507	int i, err;
6508
6509	tg3_disable_ints(tp);
6510
6511	tp->rx_mode &= ~RX_MODE_ENABLE;
6512	tw32_f(MAC_RX_MODE, tp->rx_mode);
6513	udelay(10);
6514
6515	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6516	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6517	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6518	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6519	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6520	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6521
6522	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6523	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6524	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6525	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6526	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6527	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6528	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6529
6530	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6531	tw32_f(MAC_MODE, tp->mac_mode);
6532	udelay(40);
6533
6534	tp->tx_mode &= ~TX_MODE_ENABLE;
6535	tw32_f(MAC_TX_MODE, tp->tx_mode);
6536
6537	for (i = 0; i < MAX_WAIT_CNT; i++) {
6538		udelay(100);
6539		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6540			break;
6541	}
6542	if (i >= MAX_WAIT_CNT) {
6543		dev_err(&tp->pdev->dev,
6544			"%s timed out, TX_MODE_ENABLE will not clear "
6545			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6546		err |= -ENODEV;
6547	}
6548
6549	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6550	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6551	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6552
6553	tw32(FTQ_RESET, 0xffffffff);
6554	tw32(FTQ_RESET, 0x00000000);
6555
6556	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6557	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6558
6559	for (i = 0; i < tp->irq_cnt; i++) {
6560		struct tg3_napi *tnapi = &tp->napi[i];
6561		if (tnapi->hw_status)
6562			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6563	}
6564	if (tp->hw_stats)
6565		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6566
6567	return err;
6568}
6569
6570static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6571{
6572	int i;
6573	u32 apedata;
6574
6575	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6576	if (apedata != APE_SEG_SIG_MAGIC)
6577		return;
6578
6579	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6580	if (!(apedata & APE_FW_STATUS_READY))
6581		return;
6582
6583	/* Wait for up to 1 millisecond for APE to service previous event. */
6584	for (i = 0; i < 10; i++) {
6585		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6586			return;
6587
6588		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6589
6590		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6591			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6592					event | APE_EVENT_STATUS_EVENT_PENDING);
6593
6594		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6595
6596		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6597			break;
6598
6599		udelay(100);
6600	}
6601
6602	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6603		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6604}
6605
6606static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6607{
6608	u32 event;
6609	u32 apedata;
6610
6611	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6612		return;
6613
6614	switch (kind) {
6615	case RESET_KIND_INIT:
6616		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6617				APE_HOST_SEG_SIG_MAGIC);
6618		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6619				APE_HOST_SEG_LEN_MAGIC);
6620		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6621		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6622		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6623			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6624		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6625				APE_HOST_BEHAV_NO_PHYLOCK);
6626
6627		event = APE_EVENT_STATUS_STATE_START;
6628		break;
6629	case RESET_KIND_SHUTDOWN:
6630		/* With the interface we are currently using,
6631		 * APE does not track driver state.  Wiping
6632		 * out the HOST SEGMENT SIGNATURE forces
6633		 * the APE to assume OS absent status.
6634		 */
6635		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6636
6637		event = APE_EVENT_STATUS_STATE_UNLOAD;
6638		break;
6639	case RESET_KIND_SUSPEND:
6640		event = APE_EVENT_STATUS_STATE_SUSPEND;
6641		break;
6642	default:
6643		return;
6644	}
6645
6646	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6647
6648	tg3_ape_send_event(tp, event);
6649}
6650
6651/* tp->lock is held. */
6652static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6653{
6654	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6655		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6656
6657	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6658		switch (kind) {
6659		case RESET_KIND_INIT:
6660			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6661				      DRV_STATE_START);
6662			break;
6663
6664		case RESET_KIND_SHUTDOWN:
6665			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6666				      DRV_STATE_UNLOAD);
6667			break;
6668
6669		case RESET_KIND_SUSPEND:
6670			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6671				      DRV_STATE_SUSPEND);
6672			break;
6673
6674		default:
6675			break;
6676		}
6677	}
6678
6679	if (kind == RESET_KIND_INIT ||
6680	    kind == RESET_KIND_SUSPEND)
6681		tg3_ape_driver_state_change(tp, kind);
6682}
6683
6684/* tp->lock is held. */
6685static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6686{
6687	if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6688		switch (kind) {
6689		case RESET_KIND_INIT:
6690			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6691				      DRV_STATE_START_DONE);
6692			break;
6693
6694		case RESET_KIND_SHUTDOWN:
6695			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6696				      DRV_STATE_UNLOAD_DONE);
6697			break;
6698
6699		default:
6700			break;
6701		}
6702	}
6703
6704	if (kind == RESET_KIND_SHUTDOWN)
6705		tg3_ape_driver_state_change(tp, kind);
6706}
6707
6708/* tp->lock is held. */
6709static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6710{
6711	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6712		switch (kind) {
6713		case RESET_KIND_INIT:
6714			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6715				      DRV_STATE_START);
6716			break;
6717
6718		case RESET_KIND_SHUTDOWN:
6719			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6720				      DRV_STATE_UNLOAD);
6721			break;
6722
6723		case RESET_KIND_SUSPEND:
6724			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6725				      DRV_STATE_SUSPEND);
6726			break;
6727
6728		default:
6729			break;
6730		}
6731	}
6732}
6733
6734static int tg3_poll_fw(struct tg3 *tp)
6735{
6736	int i;
6737	u32 val;
6738
6739	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6740		/* Wait up to 20ms for init done. */
6741		for (i = 0; i < 200; i++) {
6742			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6743				return 0;
6744			udelay(100);
6745		}
6746		return -ENODEV;
6747	}
6748
6749	/* Wait for firmware initialization to complete. */
6750	for (i = 0; i < 100000; i++) {
6751		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6752		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6753			break;
6754		udelay(10);
6755	}
6756
6757	/* Chip might not be fitted with firmware.  Some Sun onboard
6758	 * parts are configured like that.  So don't signal the timeout
6759	 * of the above loop as an error, but do report the lack of
6760	 * running firmware once.
6761	 */
6762	if (i >= 100000 &&
6763	    !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6764		tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6765
6766		netdev_info(tp->dev, "No firmware running\n");
6767	}
6768
6769	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6770		/* The 57765 A0 needs a little more
6771		 * time to do some important work.
6772		 */
6773		mdelay(10);
6774	}
6775
6776	return 0;
6777}
6778
6779/* Save PCI command register before chip reset */
6780static void tg3_save_pci_state(struct tg3 *tp)
6781{
6782	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6783}
6784
6785/* Restore PCI state after chip reset */
6786static void tg3_restore_pci_state(struct tg3 *tp)
6787{
6788	u32 val;
6789
6790	/* Re-enable indirect register accesses. */
6791	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6792			       tp->misc_host_ctrl);
6793
6794	/* Set MAX PCI retry to zero. */
6795	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6796	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6797	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6798		val |= PCISTATE_RETRY_SAME_DMA;
6799	/* Allow reads and writes to the APE register and memory space. */
6800	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6801		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6802		       PCISTATE_ALLOW_APE_SHMEM_WR |
6803		       PCISTATE_ALLOW_APE_PSPACE_WR;
6804	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6805
6806	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6807
6808	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6809		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6810			pcie_set_readrq(tp->pdev, 4096);
6811		else {
6812			pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6813					      tp->pci_cacheline_sz);
6814			pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6815					      tp->pci_lat_timer);
6816		}
6817	}
6818
6819	/* Make sure PCI-X relaxed ordering bit is clear. */
6820	if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6821		u16 pcix_cmd;
6822
6823		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6824				     &pcix_cmd);
6825		pcix_cmd &= ~PCI_X_CMD_ERO;
6826		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6827				      pcix_cmd);
6828	}
6829
6830	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6831
6832		/* Chip reset on 5780 will reset MSI enable bit,
6833		 * so need to restore it.
6834		 */
6835		if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6836			u16 ctrl;
6837
6838			pci_read_config_word(tp->pdev,
6839					     tp->msi_cap + PCI_MSI_FLAGS,
6840					     &ctrl);
6841			pci_write_config_word(tp->pdev,
6842					      tp->msi_cap + PCI_MSI_FLAGS,
6843					      ctrl | PCI_MSI_FLAGS_ENABLE);
6844			val = tr32(MSGINT_MODE);
6845			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6846		}
6847	}
6848}
6849
6850static void tg3_stop_fw(struct tg3 *);
6851
6852/* tp->lock is held. */
6853static int tg3_chip_reset(struct tg3 *tp)
6854{
6855	u32 val;
6856	void (*write_op)(struct tg3 *, u32, u32);
6857	int i, err;
6858
6859	tg3_nvram_lock(tp);
6860
6861	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6862
6863	/* No matching tg3_nvram_unlock() after this because
6864	 * chip reset below will undo the nvram lock.
6865	 */
6866	tp->nvram_lock_cnt = 0;
6867
6868	/* GRC_MISC_CFG core clock reset will clear the memory
6869	 * enable bit in PCI register 4 and the MSI enable bit
6870	 * on some chips, so we save relevant registers here.
6871	 */
6872	tg3_save_pci_state(tp);
6873
6874	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6875	    (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6876		tw32(GRC_FASTBOOT_PC, 0);
6877
6878	write_op = tp->write32;
6879	if (write_op == tg3_write_flush_reg32)
6880		tp->write32 = tg3_write32;
6881
6882	/* Prevent the irq handler from reading or writing PCI registers
6883	 * during chip reset when the memory enable bit in the PCI command
6884	 * register may be cleared.  The chip does not generate interrupt
6885	 * at this time, but the irq handler may still be called due to irq
6886	 * sharing or irqpoll.
6887	 */
6888	tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6889	for (i = 0; i < tp->irq_cnt; i++) {
6890		struct tg3_napi *tnapi = &tp->napi[i];
6891		if (tnapi->hw_status) {
6892			tnapi->hw_status->status = 0;
6893			tnapi->hw_status->status_tag = 0;
6894		}
6895		tnapi->last_tag = 0;
6896		tnapi->last_irq_tag = 0;
6897	}
6898	smp_mb();
6899
6900	for (i = 0; i < tp->irq_cnt; i++)
6901		synchronize_irq(tp->napi[i].irq_vec);
6902
6903	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6904		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6905		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6906	}
6907
6908	/* do the reset */
6909	val = GRC_MISC_CFG_CORECLK_RESET;
6910
6911	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6912		/* Force PCIe 1.0a mode */
6913		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
6914		    !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
6915		    tr32(TG3_PCIE_PHY_TSTCTL) ==
6916		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
6917			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
6918
6919		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6920			tw32(GRC_MISC_CFG, (1 << 29));
6921			val |= (1 << 29);
6922		}
6923	}
6924
6925	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6926		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6927		tw32(GRC_VCPU_EXT_CTRL,
6928		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6929	}
6930
6931	/* Manage gphy power for all CPMU absent PCIe devices. */
6932	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
6933	    !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
6934		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6935
6936	tw32(GRC_MISC_CFG, val);
6937
6938	tp->write32 = write_op;
6939
6940	/* Unfortunately, we have to delay before the PCI read back.
6941	 * Some 575X chips even will not respond to a PCI cfg access
6942	 * when the reset command is given to the chip.
6943	 *
6944	 * How do these hardware designers expect things to work
6945	 * properly if the PCI write is posted for a long period
6946	 * of time?  It is always necessary to have some method by
6947	 * which a register read back can occur to push the write
6948	 * out which does the reset.
6949	 *
6950	 * For most tg3 variants the trick below was working.
6951	 * Ho hum...
6952	 */
6953	udelay(120);
6954
6955	/* Flush PCI posted writes.  The normal MMIO registers
6956	 * are inaccessible at this time so this is the only
6957	 * way to make this reliably (actually, this is no longer
6958	 * the case, see above).  I tried to use indirect
6959	 * register read/write but this upset some 5701 variants.
6960	 */
6961	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6962
6963	udelay(120);
6964
6965	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6966		u16 val16;
6967
6968		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6969			int i;
6970			u32 cfg_val;
6971
6972			/* Wait for link training to complete.  */
6973			for (i = 0; i < 5000; i++)
6974				udelay(100);
6975
6976			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6977			pci_write_config_dword(tp->pdev, 0xc4,
6978					       cfg_val | (1 << 15));
6979		}
6980
6981		/* Clear the "no snoop" and "relaxed ordering" bits. */
6982		pci_read_config_word(tp->pdev,
6983				     tp->pcie_cap + PCI_EXP_DEVCTL,
6984				     &val16);
6985		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6986			   PCI_EXP_DEVCTL_NOSNOOP_EN);
6987		/*
6988		 * Older PCIe devices only support the 128 byte
6989		 * MPS setting.  Enforce the restriction.
6990		 */
6991		if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
6992			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6993		pci_write_config_word(tp->pdev,
6994				      tp->pcie_cap + PCI_EXP_DEVCTL,
6995				      val16);
6996
6997		pcie_set_readrq(tp->pdev, 4096);
6998
6999		/* Clear error status */
7000		pci_write_config_word(tp->pdev,
7001				      tp->pcie_cap + PCI_EXP_DEVSTA,
7002				      PCI_EXP_DEVSTA_CED |
7003				      PCI_EXP_DEVSTA_NFED |
7004				      PCI_EXP_DEVSTA_FED |
7005				      PCI_EXP_DEVSTA_URD);
7006	}
7007
7008	tg3_restore_pci_state(tp);
7009
7010	tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
7011
7012	val = 0;
7013	if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7014		val = tr32(MEMARB_MODE);
7015	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7016
7017	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7018		tg3_stop_fw(tp);
7019		tw32(0x5000, 0x400);
7020	}
7021
7022	tw32(GRC_MODE, tp->grc_mode);
7023
7024	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7025		val = tr32(0xc4);
7026
7027		tw32(0xc4, val | (1 << 15));
7028	}
7029
7030	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7031	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7032		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7033		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7034			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7035		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7036	}
7037
7038	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7039		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7040		tw32_f(MAC_MODE, tp->mac_mode);
7041	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7042		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7043		tw32_f(MAC_MODE, tp->mac_mode);
7044	} else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7045		tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7046		if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7047			tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7048		tw32_f(MAC_MODE, tp->mac_mode);
7049	} else
7050		tw32_f(MAC_MODE, 0);
7051	udelay(40);
7052
7053	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7054
7055	err = tg3_poll_fw(tp);
7056	if (err)
7057		return err;
7058
7059	tg3_mdio_start(tp);
7060
7061	if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7062	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7063	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7064	    !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
7065		val = tr32(0x7c00);
7066
7067		tw32(0x7c00, val | (1 << 25));
7068	}
7069
7070	/* Reprobe ASF enable state.  */
7071	tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7072	tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7073	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7074	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7075		u32 nic_cfg;
7076
7077		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7078		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7079			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7080			tp->last_event_jiffies = jiffies;
7081			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7082				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7083		}
7084	}
7085
7086	return 0;
7087}
7088
7089/* tp->lock is held. */
7090static void tg3_stop_fw(struct tg3 *tp)
7091{
7092	if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7093	   !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7094		/* Wait for RX cpu to ACK the previous event. */
7095		tg3_wait_for_event_ack(tp);
7096
7097		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7098
7099		tg3_generate_fw_event(tp);
7100
7101		/* Wait for RX cpu to ACK this event. */
7102		tg3_wait_for_event_ack(tp);
7103	}
7104}
7105
7106/* tp->lock is held. */
7107static int tg3_halt(struct tg3 *tp, int kind, int silent)
7108{
7109	int err;
7110
7111	tg3_stop_fw(tp);
7112
7113	tg3_write_sig_pre_reset(tp, kind);
7114
7115	tg3_abort_hw(tp, silent);
7116	err = tg3_chip_reset(tp);
7117
7118	__tg3_set_mac_addr(tp, 0);
7119
7120	tg3_write_sig_legacy(tp, kind);
7121	tg3_write_sig_post_reset(tp, kind);
7122
7123	if (err)
7124		return err;
7125
7126	return 0;
7127}
7128
7129#define RX_CPU_SCRATCH_BASE	0x30000
7130#define RX_CPU_SCRATCH_SIZE	0x04000
7131#define TX_CPU_SCRATCH_BASE	0x34000
7132#define TX_CPU_SCRATCH_SIZE	0x04000
7133
7134/* tp->lock is held. */
7135static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7136{
7137	int i;
7138
7139	BUG_ON(offset == TX_CPU_BASE &&
7140	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7141
7142	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7143		u32 val = tr32(GRC_VCPU_EXT_CTRL);
7144
7145		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7146		return 0;
7147	}
7148	if (offset == RX_CPU_BASE) {
7149		for (i = 0; i < 10000; i++) {
7150			tw32(offset + CPU_STATE, 0xffffffff);
7151			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7152			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7153				break;
7154		}
7155
7156		tw32(offset + CPU_STATE, 0xffffffff);
7157		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7158		udelay(10);
7159	} else {
7160		for (i = 0; i < 10000; i++) {
7161			tw32(offset + CPU_STATE, 0xffffffff);
7162			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7163			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7164				break;
7165		}
7166	}
7167
7168	if (i >= 10000) {
7169		netdev_err(tp->dev, "%s timed out, %s CPU\n",
7170			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7171		return -ENODEV;
7172	}
7173
7174	/* Clear firmware's nvram arbitration. */
7175	if (tp->tg3_flags & TG3_FLAG_NVRAM)
7176		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7177	return 0;
7178}
7179
7180struct fw_info {
7181	unsigned int fw_base;
7182	unsigned int fw_len;
7183	const __be32 *fw_data;
7184};
7185
7186/* tp->lock is held. */
7187static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7188				 int cpu_scratch_size, struct fw_info *info)
7189{
7190	int err, lock_err, i;
7191	void (*write_op)(struct tg3 *, u32, u32);
7192
7193	if (cpu_base == TX_CPU_BASE &&
7194	    (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7195		netdev_err(tp->dev,
7196			   "%s: Trying to load TX cpu firmware which is 5705\n",
7197			   __func__);
7198		return -EINVAL;
7199	}
7200
7201	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7202		write_op = tg3_write_mem;
7203	else
7204		write_op = tg3_write_indirect_reg32;
7205
7206	/* It is possible that bootcode is still loading at this point.
7207	 * Get the nvram lock first before halting the cpu.
7208	 */
7209	lock_err = tg3_nvram_lock(tp);
7210	err = tg3_halt_cpu(tp, cpu_base);
7211	if (!lock_err)
7212		tg3_nvram_unlock(tp);
7213	if (err)
7214		goto out;
7215
7216	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7217		write_op(tp, cpu_scratch_base + i, 0);
7218	tw32(cpu_base + CPU_STATE, 0xffffffff);
7219	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7220	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7221		write_op(tp, (cpu_scratch_base +
7222			      (info->fw_base & 0xffff) +
7223			      (i * sizeof(u32))),
7224			      be32_to_cpu(info->fw_data[i]));
7225
7226	err = 0;
7227
7228out:
7229	return err;
7230}
7231
7232/* tp->lock is held. */
7233static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7234{
7235	struct fw_info info;
7236	const __be32 *fw_data;
7237	int err, i;
7238
7239	fw_data = (void *)tp->fw->data;
7240
7241	/* Firmware blob starts with version numbers, followed by
7242	   start address and length. We are setting complete length.
7243	   length = end_address_of_bss - start_address_of_text.
7244	   Remainder is the blob to be loaded contiguously
7245	   from start address. */
7246
7247	info.fw_base = be32_to_cpu(fw_data[1]);
7248	info.fw_len = tp->fw->size - 12;
7249	info.fw_data = &fw_data[3];
7250
7251	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7252				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7253				    &info);
7254	if (err)
7255		return err;
7256
7257	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7258				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7259				    &info);
7260	if (err)
7261		return err;
7262
7263	/* Now startup only the RX cpu. */
7264	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7265	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7266
7267	for (i = 0; i < 5; i++) {
7268		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7269			break;
7270		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7271		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7272		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7273		udelay(1000);
7274	}
7275	if (i >= 5) {
7276		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7277			   "should be %08x\n", __func__,
7278			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7279		return -ENODEV;
7280	}
7281	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7282	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7283
7284	return 0;
7285}
7286
7287/* 5705 needs a special version of the TSO firmware.  */
7288
7289/* tp->lock is held. */
7290static int tg3_load_tso_firmware(struct tg3 *tp)
7291{
7292	struct fw_info info;
7293	const __be32 *fw_data;
7294	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7295	int err, i;
7296
7297	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7298		return 0;
7299
7300	fw_data = (void *)tp->fw->data;
7301
7302	/* Firmware blob starts with version numbers, followed by
7303	   start address and length. We are setting complete length.
7304	   length = end_address_of_bss - start_address_of_text.
7305	   Remainder is the blob to be loaded contiguously
7306	   from start address. */
7307
7308	info.fw_base = be32_to_cpu(fw_data[1]);
7309	cpu_scratch_size = tp->fw_len;
7310	info.fw_len = tp->fw->size - 12;
7311	info.fw_data = &fw_data[3];
7312
7313	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7314		cpu_base = RX_CPU_BASE;
7315		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7316	} else {
7317		cpu_base = TX_CPU_BASE;
7318		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7319		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7320	}
7321
7322	err = tg3_load_firmware_cpu(tp, cpu_base,
7323				    cpu_scratch_base, cpu_scratch_size,
7324				    &info);
7325	if (err)
7326		return err;
7327
7328	/* Now startup the cpu. */
7329	tw32(cpu_base + CPU_STATE, 0xffffffff);
7330	tw32_f(cpu_base + CPU_PC, info.fw_base);
7331
7332	for (i = 0; i < 5; i++) {
7333		if (tr32(cpu_base + CPU_PC) == info.fw_base)
7334			break;
7335		tw32(cpu_base + CPU_STATE, 0xffffffff);
7336		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7337		tw32_f(cpu_base + CPU_PC, info.fw_base);
7338		udelay(1000);
7339	}
7340	if (i >= 5) {
7341		netdev_err(tp->dev,
7342			   "%s fails to set CPU PC, is %08x should be %08x\n",
7343			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7344		return -ENODEV;
7345	}
7346	tw32(cpu_base + CPU_STATE, 0xffffffff);
7347	tw32_f(cpu_base + CPU_MODE,  0x00000000);
7348	return 0;
7349}
7350
7351
7352static int tg3_set_mac_addr(struct net_device *dev, void *p)
7353{
7354	struct tg3 *tp = netdev_priv(dev);
7355	struct sockaddr *addr = p;
7356	int err = 0, skip_mac_1 = 0;
7357
7358	if (!is_valid_ether_addr(addr->sa_data))
7359		return -EINVAL;
7360
7361	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7362
7363	if (!netif_running(dev))
7364		return 0;
7365
7366	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7367		u32 addr0_high, addr0_low, addr1_high, addr1_low;
7368
7369		addr0_high = tr32(MAC_ADDR_0_HIGH);
7370		addr0_low = tr32(MAC_ADDR_0_LOW);
7371		addr1_high = tr32(MAC_ADDR_1_HIGH);
7372		addr1_low = tr32(MAC_ADDR_1_LOW);
7373
7374		/* Skip MAC addr 1 if ASF is using it. */
7375		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7376		    !(addr1_high == 0 && addr1_low == 0))
7377			skip_mac_1 = 1;
7378	}
7379	spin_lock_bh(&tp->lock);
7380	__tg3_set_mac_addr(tp, skip_mac_1);
7381	spin_unlock_bh(&tp->lock);
7382
7383	return err;
7384}
7385
7386/* tp->lock is held. */
7387static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7388			   dma_addr_t mapping, u32 maxlen_flags,
7389			   u32 nic_addr)
7390{
7391	tg3_write_mem(tp,
7392		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7393		      ((u64) mapping >> 32));
7394	tg3_write_mem(tp,
7395		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7396		      ((u64) mapping & 0xffffffff));
7397	tg3_write_mem(tp,
7398		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7399		       maxlen_flags);
7400
7401	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7402		tg3_write_mem(tp,
7403			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7404			      nic_addr);
7405}
7406
7407static void __tg3_set_rx_mode(struct net_device *);
7408static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7409{
7410	int i;
7411
7412	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7413		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7414		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7415		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7416	} else {
7417		tw32(HOSTCC_TXCOL_TICKS, 0);
7418		tw32(HOSTCC_TXMAX_FRAMES, 0);
7419		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7420	}
7421
7422	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7423		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7424		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7425		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7426	} else {
7427		tw32(HOSTCC_RXCOL_TICKS, 0);
7428		tw32(HOSTCC_RXMAX_FRAMES, 0);
7429		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7430	}
7431
7432	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7433		u32 val = ec->stats_block_coalesce_usecs;
7434
7435		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7436		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7437
7438		if (!netif_carrier_ok(tp->dev))
7439			val = 0;
7440
7441		tw32(HOSTCC_STAT_COAL_TICKS, val);
7442	}
7443
7444	for (i = 0; i < tp->irq_cnt - 1; i++) {
7445		u32 reg;
7446
7447		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7448		tw32(reg, ec->rx_coalesce_usecs);
7449		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7450		tw32(reg, ec->rx_max_coalesced_frames);
7451		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7452		tw32(reg, ec->rx_max_coalesced_frames_irq);
7453
7454		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7455			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7456			tw32(reg, ec->tx_coalesce_usecs);
7457			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7458			tw32(reg, ec->tx_max_coalesced_frames);
7459			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7460			tw32(reg, ec->tx_max_coalesced_frames_irq);
7461		}
7462	}
7463
7464	for (; i < tp->irq_max - 1; i++) {
7465		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7466		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7467		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7468
7469		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7470			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7471			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7472			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7473		}
7474	}
7475}
7476
7477/* tp->lock is held. */
7478static void tg3_rings_reset(struct tg3 *tp)
7479{
7480	int i;
7481	u32 stblk, txrcb, rxrcb, limit;
7482	struct tg3_napi *tnapi = &tp->napi[0];
7483
7484	/* Disable all transmit rings but the first. */
7485	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7486		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7487	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7488		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7489	else
7490		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7491
7492	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7493	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7494		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7495			      BDINFO_FLAGS_DISABLED);
7496
7497
7498	/* Disable all receive return rings but the first. */
7499	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7500	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7501		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7502	else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7503		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7504	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7505		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7506		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7507	else
7508		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7509
7510	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7511	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7512		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7513			      BDINFO_FLAGS_DISABLED);
7514
7515	/* Disable interrupts */
7516	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7517
7518	/* Zero mailbox registers. */
7519	if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7520		for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7521			tp->napi[i].tx_prod = 0;
7522			tp->napi[i].tx_cons = 0;
7523			if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7524				tw32_mailbox(tp->napi[i].prodmbox, 0);
7525			tw32_rx_mbox(tp->napi[i].consmbox, 0);
7526			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7527		}
7528		if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7529			tw32_mailbox(tp->napi[0].prodmbox, 0);
7530	} else {
7531		tp->napi[0].tx_prod = 0;
7532		tp->napi[0].tx_cons = 0;
7533		tw32_mailbox(tp->napi[0].prodmbox, 0);
7534		tw32_rx_mbox(tp->napi[0].consmbox, 0);
7535	}
7536
7537	/* Make sure the NIC-based send BD rings are disabled. */
7538	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7539		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7540		for (i = 0; i < 16; i++)
7541			tw32_tx_mbox(mbox + i * 8, 0);
7542	}
7543
7544	txrcb = NIC_SRAM_SEND_RCB;
7545	rxrcb = NIC_SRAM_RCV_RET_RCB;
7546
7547	/* Clear status block in ram. */
7548	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7549
7550	/* Set status block DMA address */
7551	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7552	     ((u64) tnapi->status_mapping >> 32));
7553	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7554	     ((u64) tnapi->status_mapping & 0xffffffff));
7555
7556	if (tnapi->tx_ring) {
7557		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7558			       (TG3_TX_RING_SIZE <<
7559				BDINFO_FLAGS_MAXLEN_SHIFT),
7560			       NIC_SRAM_TX_BUFFER_DESC);
7561		txrcb += TG3_BDINFO_SIZE;
7562	}
7563
7564	if (tnapi->rx_rcb) {
7565		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7566			       (TG3_RX_RCB_RING_SIZE(tp) <<
7567				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7568		rxrcb += TG3_BDINFO_SIZE;
7569	}
7570
7571	stblk = HOSTCC_STATBLCK_RING1;
7572
7573	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7574		u64 mapping = (u64)tnapi->status_mapping;
7575		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7576		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7577
7578		/* Clear status block in ram. */
7579		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7580
7581		if (tnapi->tx_ring) {
7582			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7583				       (TG3_TX_RING_SIZE <<
7584					BDINFO_FLAGS_MAXLEN_SHIFT),
7585				       NIC_SRAM_TX_BUFFER_DESC);
7586			txrcb += TG3_BDINFO_SIZE;
7587		}
7588
7589		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7590			       (TG3_RX_RCB_RING_SIZE(tp) <<
7591				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7592
7593		stblk += 8;
7594		rxrcb += TG3_BDINFO_SIZE;
7595	}
7596}
7597
7598/* tp->lock is held. */
7599static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7600{
7601	u32 val, rdmac_mode;
7602	int i, err, limit;
7603	struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7604
7605	tg3_disable_ints(tp);
7606
7607	tg3_stop_fw(tp);
7608
7609	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7610
7611	if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7612		tg3_abort_hw(tp, 1);
7613
7614	if (reset_phy)
7615		tg3_phy_reset(tp);
7616
7617	err = tg3_chip_reset(tp);
7618	if (err)
7619		return err;
7620
7621	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7622
7623	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7624		val = tr32(TG3_CPMU_CTRL);
7625		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7626		tw32(TG3_CPMU_CTRL, val);
7627
7628		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7629		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7630		val |= CPMU_LSPD_10MB_MACCLK_6_25;
7631		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7632
7633		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7634		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7635		val |= CPMU_LNK_AWARE_MACCLK_6_25;
7636		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7637
7638		val = tr32(TG3_CPMU_HST_ACC);
7639		val &= ~CPMU_HST_ACC_MACCLK_MASK;
7640		val |= CPMU_HST_ACC_MACCLK_6_25;
7641		tw32(TG3_CPMU_HST_ACC, val);
7642	}
7643
7644	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7645		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7646		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7647		       PCIE_PWR_MGMT_L1_THRESH_4MS;
7648		tw32(PCIE_PWR_MGMT_THRESH, val);
7649
7650		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7651		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7652
7653		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7654
7655		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7656		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7657	}
7658
7659	if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7660		u32 grc_mode = tr32(GRC_MODE);
7661
7662		/* Access the lower 1K of PL PCIE block registers. */
7663		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7664		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7665
7666		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7667		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7668		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7669
7670		tw32(GRC_MODE, grc_mode);
7671	}
7672
7673	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7674		u32 grc_mode = tr32(GRC_MODE);
7675
7676		/* Access the lower 1K of PL PCIE block registers. */
7677		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7678		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7679
7680		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
7681		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7682		     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7683
7684		tw32(GRC_MODE, grc_mode);
7685
7686		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7687		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7688		val |= CPMU_LSPD_10MB_MACCLK_6_25;
7689		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7690	}
7691
7692	/* This works around an issue with Athlon chipsets on
7693	 * B3 tigon3 silicon.  This bit has no effect on any
7694	 * other revision.  But do not set this on PCI Express
7695	 * chips and don't even touch the clocks if the CPMU is present.
7696	 */
7697	if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7698		if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7699			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7700		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7701	}
7702
7703	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7704	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7705		val = tr32(TG3PCI_PCISTATE);
7706		val |= PCISTATE_RETRY_SAME_DMA;
7707		tw32(TG3PCI_PCISTATE, val);
7708	}
7709
7710	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7711		/* Allow reads and writes to the
7712		 * APE register and memory space.
7713		 */
7714		val = tr32(TG3PCI_PCISTATE);
7715		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7716		       PCISTATE_ALLOW_APE_SHMEM_WR |
7717		       PCISTATE_ALLOW_APE_PSPACE_WR;
7718		tw32(TG3PCI_PCISTATE, val);
7719	}
7720
7721	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7722		/* Enable some hw fixes.  */
7723		val = tr32(TG3PCI_MSI_DATA);
7724		val |= (1 << 26) | (1 << 28) | (1 << 29);
7725		tw32(TG3PCI_MSI_DATA, val);
7726	}
7727
7728	/* Descriptor ring init may make accesses to the
7729	 * NIC SRAM area to setup the TX descriptors, so we
7730	 * can only do this after the hardware has been
7731	 * successfully reset.
7732	 */
7733	err = tg3_init_rings(tp);
7734	if (err)
7735		return err;
7736
7737	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7738		val = tr32(TG3PCI_DMA_RW_CTRL) &
7739		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7740		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7741			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7742		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7743	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7744		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7745		/* This value is determined during the probe time DMA
7746		 * engine test, tg3_test_dma.
7747		 */
7748		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7749	}
7750
7751	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7752			  GRC_MODE_4X_NIC_SEND_RINGS |
7753			  GRC_MODE_NO_TX_PHDR_CSUM |
7754			  GRC_MODE_NO_RX_PHDR_CSUM);
7755	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7756
7757	/* Pseudo-header checksum is done by hardware logic and not
7758	 * the offload processers, so make the chip do the pseudo-
7759	 * header checksums on receive.  For transmit it is more
7760	 * convenient to do the pseudo-header checksum in software
7761	 * as Linux does that on transmit for us in all cases.
7762	 */
7763	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7764
7765	tw32(GRC_MODE,
7766	     tp->grc_mode |
7767	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7768
7769	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
7770	val = tr32(GRC_MISC_CFG);
7771	val &= ~0xff;
7772	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7773	tw32(GRC_MISC_CFG, val);
7774
7775	/* Initialize MBUF/DESC pool. */
7776	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7777		/* Do nothing.  */
7778	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7779		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7780		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7781			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7782		else
7783			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7784		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7785		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7786	} else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7787		int fw_len;
7788
7789		fw_len = tp->fw_len;
7790		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7791		tw32(BUFMGR_MB_POOL_ADDR,
7792		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7793		tw32(BUFMGR_MB_POOL_SIZE,
7794		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7795	}
7796
7797	if (tp->dev->mtu <= ETH_DATA_LEN) {
7798		tw32(BUFMGR_MB_RDMA_LOW_WATER,
7799		     tp->bufmgr_config.mbuf_read_dma_low_water);
7800		tw32(BUFMGR_MB_MACRX_LOW_WATER,
7801		     tp->bufmgr_config.mbuf_mac_rx_low_water);
7802		tw32(BUFMGR_MB_HIGH_WATER,
7803		     tp->bufmgr_config.mbuf_high_water);
7804	} else {
7805		tw32(BUFMGR_MB_RDMA_LOW_WATER,
7806		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7807		tw32(BUFMGR_MB_MACRX_LOW_WATER,
7808		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7809		tw32(BUFMGR_MB_HIGH_WATER,
7810		     tp->bufmgr_config.mbuf_high_water_jumbo);
7811	}
7812	tw32(BUFMGR_DMA_LOW_WATER,
7813	     tp->bufmgr_config.dma_low_water);
7814	tw32(BUFMGR_DMA_HIGH_WATER,
7815	     tp->bufmgr_config.dma_high_water);
7816
7817	tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7818	for (i = 0; i < 2000; i++) {
7819		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7820			break;
7821		udelay(10);
7822	}
7823	if (i >= 2000) {
7824		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
7825		return -ENODEV;
7826	}
7827
7828	/* Setup replenish threshold. */
7829	val = tp->rx_pending / 8;
7830	if (val == 0)
7831		val = 1;
7832	else if (val > tp->rx_std_max_post)
7833		val = tp->rx_std_max_post;
7834	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7835		if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7836			tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7837
7838		if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7839			val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7840	}
7841
7842	tw32(RCVBDI_STD_THRESH, val);
7843
7844	/* Initialize TG3_BDINFO's at:
7845	 *  RCVDBDI_STD_BD:	standard eth size rx ring
7846	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
7847	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
7848	 *
7849	 * like so:
7850	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
7851	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
7852	 *                              ring attribute flags
7853	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
7854	 *
7855	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7856	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7857	 *
7858	 * The size of each ring is fixed in the firmware, but the location is
7859	 * configurable.
7860	 */
7861	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7862	     ((u64) tpr->rx_std_mapping >> 32));
7863	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7864	     ((u64) tpr->rx_std_mapping & 0xffffffff));
7865	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7866	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
7867		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7868		     NIC_SRAM_RX_BUFFER_DESC);
7869
7870	/* Disable the mini ring */
7871	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7872		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7873		     BDINFO_FLAGS_DISABLED);
7874
7875	/* Program the jumbo buffer descriptor ring control
7876	 * blocks on those devices that have them.
7877	 */
7878	if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7879	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7880		/* Setup replenish threshold. */
7881		tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7882
7883		if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7884			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7885			     ((u64) tpr->rx_jmb_mapping >> 32));
7886			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7887			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7888			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7889			     (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7890			     BDINFO_FLAGS_USE_EXT_RECV);
7891			if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
7892			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7893				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7894				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7895		} else {
7896			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7897			     BDINFO_FLAGS_DISABLED);
7898		}
7899
7900		if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
7901			val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7902			      (TG3_RX_STD_DMA_SZ << 2);
7903		else
7904			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
7905	} else
7906		val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7907
7908	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7909
7910	tpr->rx_std_prod_idx = tp->rx_pending;
7911	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7912
7913	tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7914			  tp->rx_jumbo_pending : 0;
7915	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7916
7917	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7918		tw32(STD_REPLENISH_LWM, 32);
7919		tw32(JMB_REPLENISH_LWM, 16);
7920	}
7921
7922	tg3_rings_reset(tp);
7923
7924	/* Initialize MAC address and backoff seed. */
7925	__tg3_set_mac_addr(tp, 0);
7926
7927	/* MTU + ethernet header + FCS + optional VLAN tag */
7928	tw32(MAC_RX_MTU_SIZE,
7929	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7930
7931	/* The slot time is changed by tg3_setup_phy if we
7932	 * run at gigabit with half duplex.
7933	 */
7934	tw32(MAC_TX_LENGTHS,
7935	     (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7936	     (6 << TX_LENGTHS_IPG_SHIFT) |
7937	     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7938
7939	/* Receive rules. */
7940	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7941	tw32(RCVLPC_CONFIG, 0x0181);
7942
7943	/* Calculate RDMAC_MODE setting early, we need it to determine
7944	 * the RCVLPC_STATE_ENABLE mask.
7945	 */
7946	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7947		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7948		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7949		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7950		      RDMAC_MODE_LNGREAD_ENAB);
7951
7952	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7953	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7954		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
7955
7956	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7957	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7958	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7959		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7960			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7961			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7962
7963	/* If statement applies to 5705 and 5750 PCI devices only */
7964	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7965	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7966	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7967		if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7968		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7969			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7970		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7971			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7972			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7973		}
7974	}
7975
7976	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7977		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7978
7979	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7980		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7981
7982	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7983	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7984	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7985		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7986
7987	/* Receive/send statistics. */
7988	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7989		val = tr32(RCVLPC_STATS_ENABLE);
7990		val &= ~RCVLPC_STATSENAB_DACK_FIX;
7991		tw32(RCVLPC_STATS_ENABLE, val);
7992	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7993		   (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7994		val = tr32(RCVLPC_STATS_ENABLE);
7995		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7996		tw32(RCVLPC_STATS_ENABLE, val);
7997	} else {
7998		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7999	}
8000	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8001	tw32(SNDDATAI_STATSENAB, 0xffffff);
8002	tw32(SNDDATAI_STATSCTRL,
8003	     (SNDDATAI_SCTRL_ENABLE |
8004	      SNDDATAI_SCTRL_FASTUPD));
8005
8006	/* Setup host coalescing engine. */
8007	tw32(HOSTCC_MODE, 0);
8008	for (i = 0; i < 2000; i++) {
8009		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8010			break;
8011		udelay(10);
8012	}
8013
8014	__tg3_set_coalesce(tp, &tp->coal);
8015
8016	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8017		/* Status/statistics block address.  See tg3_timer,
8018		 * the tg3_periodic_fetch_stats call there, and
8019		 * tg3_get_stats to see how this works for 5705/5750 chips.
8020		 */
8021		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8022		     ((u64) tp->stats_mapping >> 32));
8023		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8024		     ((u64) tp->stats_mapping & 0xffffffff));
8025		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8026
8027		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8028
8029		/* Clear statistics and status block memory areas */
8030		for (i = NIC_SRAM_STATS_BLK;
8031		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8032		     i += sizeof(u32)) {
8033			tg3_write_mem(tp, i, 0);
8034			udelay(40);
8035		}
8036	}
8037
8038	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8039
8040	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8041	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8042	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8043		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8044
8045	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8046		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8047		/* reset to prevent losing 1st rx packet intermittently */
8048		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8049		udelay(10);
8050	}
8051
8052	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8053		tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8054	else
8055		tp->mac_mode = 0;
8056	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8057		MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8058	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8059	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8060	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8061		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8062	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8063	udelay(40);
8064
8065	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8066	 * If TG3_FLG2_IS_NIC is zero, we should read the
8067	 * register to preserve the GPIO settings for LOMs. The GPIOs,
8068	 * whether used as inputs or outputs, are set by boot code after
8069	 * reset.
8070	 */
8071	if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8072		u32 gpio_mask;
8073
8074		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8075			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8076			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8077
8078		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8079			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8080				     GRC_LCLCTRL_GPIO_OUTPUT3;
8081
8082		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8083			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8084
8085		tp->grc_local_ctrl &= ~gpio_mask;
8086		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8087
8088		/* GPIO1 must be driven high for eeprom write protect */
8089		if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8090			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8091					       GRC_LCLCTRL_GPIO_OUTPUT1);
8092	}
8093	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8094	udelay(100);
8095
8096	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8097		val = tr32(MSGINT_MODE);
8098		val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8099		tw32(MSGINT_MODE, val);
8100	}
8101
8102	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8103		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8104		udelay(40);
8105	}
8106
8107	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8108	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8109	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8110	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8111	       WDMAC_MODE_LNGREAD_ENAB);
8112
8113	/* If statement applies to 5705 and 5750 PCI devices only */
8114	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8115	     tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8116	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8117		if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8118		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8119		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8120			/* nothing */
8121		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8122			   !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8123			   !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8124			val |= WDMAC_MODE_RX_ACCEL;
8125		}
8126	}
8127
8128	/* Enable host coalescing bug fix */
8129	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8130		val |= WDMAC_MODE_STATUS_TAG_FIX;
8131
8132	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8133		val |= WDMAC_MODE_BURST_ALL_DATA;
8134
8135	tw32_f(WDMAC_MODE, val);
8136	udelay(40);
8137
8138	if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8139		u16 pcix_cmd;
8140
8141		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8142				     &pcix_cmd);
8143		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8144			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8145			pcix_cmd |= PCI_X_CMD_READ_2K;
8146		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8147			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8148			pcix_cmd |= PCI_X_CMD_READ_2K;
8149		}
8150		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8151				      pcix_cmd);
8152	}
8153
8154	tw32_f(RDMAC_MODE, rdmac_mode);
8155	udelay(40);
8156
8157	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8158	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8159		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8160
8161	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8162		tw32(SNDDATAC_MODE,
8163		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8164	else
8165		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8166
8167	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8168	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8169	tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
8170	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8171	if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8172		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8173	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8174	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8175		val |= SNDBDI_MODE_MULTI_TXQ_EN;
8176	tw32(SNDBDI_MODE, val);
8177	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8178
8179	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8180		err = tg3_load_5701_a0_firmware_fix(tp);
8181		if (err)
8182			return err;
8183	}
8184
8185	if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8186		err = tg3_load_tso_firmware(tp);
8187		if (err)
8188			return err;
8189	}
8190
8191	tp->tx_mode = TX_MODE_ENABLE;
8192	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8193	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8194		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8195	tw32_f(MAC_TX_MODE, tp->tx_mode);
8196	udelay(100);
8197
8198	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8199		u32 reg = MAC_RSS_INDIR_TBL_0;
8200		u8 *ent = (u8 *)&val;
8201
8202		/* Setup the indirection table */
8203		for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8204			int idx = i % sizeof(val);
8205
8206			ent[idx] = i % (tp->irq_cnt - 1);
8207			if (idx == sizeof(val) - 1) {
8208				tw32(reg, val);
8209				reg += 4;
8210			}
8211		}
8212
8213		/* Setup the "secret" hash key. */
8214		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8215		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8216		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8217		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8218		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8219		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8220		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8221		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8222		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8223		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8224	}
8225
8226	tp->rx_mode = RX_MODE_ENABLE;
8227	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8228		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8229
8230	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8231		tp->rx_mode |= RX_MODE_RSS_ENABLE |
8232			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
8233			       RX_MODE_RSS_IPV6_HASH_EN |
8234			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
8235			       RX_MODE_RSS_IPV4_HASH_EN |
8236			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
8237
8238	tw32_f(MAC_RX_MODE, tp->rx_mode);
8239	udelay(10);
8240
8241	tw32(MAC_LED_CTRL, tp->led_ctrl);
8242
8243	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8244	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8245		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8246		udelay(10);
8247	}
8248	tw32_f(MAC_RX_MODE, tp->rx_mode);
8249	udelay(10);
8250
8251	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8252		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8253			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8254			/* Set drive transmission level to 1.2V  */
8255			/* only if the signal pre-emphasis bit is not set  */
8256			val = tr32(MAC_SERDES_CFG);
8257			val &= 0xfffff000;
8258			val |= 0x880;
8259			tw32(MAC_SERDES_CFG, val);
8260		}
8261		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8262			tw32(MAC_SERDES_CFG, 0x616000);
8263	}
8264
8265	/* Prevent chip from dropping frames when flow control
8266	 * is enabled.
8267	 */
8268	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8269		val = 1;
8270	else
8271		val = 2;
8272	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8273
8274	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8275	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8276		/* Use hardware link auto-negotiation */
8277		tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8278	}
8279
8280	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8281	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8282		u32 tmp;
8283
8284		tmp = tr32(SERDES_RX_CTRL);
8285		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8286		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8287		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8288		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8289	}
8290
8291	if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8292		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8293			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8294			tp->link_config.speed = tp->link_config.orig_speed;
8295			tp->link_config.duplex = tp->link_config.orig_duplex;
8296			tp->link_config.autoneg = tp->link_config.orig_autoneg;
8297		}
8298
8299		err = tg3_setup_phy(tp, 0);
8300		if (err)
8301			return err;
8302
8303		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8304		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8305			u32 tmp;
8306
8307			/* Clear CRC stats. */
8308			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8309				tg3_writephy(tp, MII_TG3_TEST1,
8310					     tmp | MII_TG3_TEST1_CRC_EN);
8311				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8312			}
8313		}
8314	}
8315
8316	__tg3_set_rx_mode(tp->dev);
8317
8318	/* Initialize receive rules. */
8319	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8320	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8321	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8322	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8323
8324	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8325	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8326		limit = 8;
8327	else
8328		limit = 16;
8329	if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8330		limit -= 4;
8331	switch (limit) {
8332	case 16:
8333		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8334	case 15:
8335		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8336	case 14:
8337		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8338	case 13:
8339		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8340	case 12:
8341		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8342	case 11:
8343		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8344	case 10:
8345		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8346	case 9:
8347		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8348	case 8:
8349		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8350	case 7:
8351		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8352	case 6:
8353		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8354	case 5:
8355		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8356	case 4:
8357		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8358	case 3:
8359		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8360	case 2:
8361	case 1:
8362
8363	default:
8364		break;
8365	}
8366
8367	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8368		/* Write our heartbeat update interval to APE. */
8369		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8370				APE_HOST_HEARTBEAT_INT_DISABLE);
8371
8372	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8373
8374	return 0;
8375}
8376
8377/* Called at device open time to get the chip ready for
8378 * packet processing.  Invoked with tp->lock held.
8379 */
8380static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8381{
8382	tg3_switch_clocks(tp);
8383
8384	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8385
8386	return tg3_reset_hw(tp, reset_phy);
8387}
8388
8389#define TG3_STAT_ADD32(PSTAT, REG) \
8390do {	u32 __val = tr32(REG); \
8391	(PSTAT)->low += __val; \
8392	if ((PSTAT)->low < __val) \
8393		(PSTAT)->high += 1; \
8394} while (0)
8395
8396static void tg3_periodic_fetch_stats(struct tg3 *tp)
8397{
8398	struct tg3_hw_stats *sp = tp->hw_stats;
8399
8400	if (!netif_carrier_ok(tp->dev))
8401		return;
8402
8403	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8404	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8405	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8406	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8407	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8408	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8409	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8410	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8411	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8412	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8413	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8414	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8415	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8416
8417	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8418	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8419	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8420	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8421	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8422	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8423	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8424	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8425	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8426	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8427	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8428	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8429	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8430	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8431
8432	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8433	TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8434	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8435}
8436
8437static void tg3_timer(unsigned long __opaque)
8438{
8439	struct tg3 *tp = (struct tg3 *) __opaque;
8440
8441	if (tp->irq_sync)
8442		goto restart_timer;
8443
8444	spin_lock(&tp->lock);
8445
8446	if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8447		/* All of this garbage is because when using non-tagged
8448		 * IRQ status the mailbox/status_block protocol the chip
8449		 * uses with the cpu is race prone.
8450		 */
8451		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8452			tw32(GRC_LOCAL_CTRL,
8453			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8454		} else {
8455			tw32(HOSTCC_MODE, tp->coalesce_mode |
8456			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8457		}
8458
8459		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8460			tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8461			spin_unlock(&tp->lock);
8462			schedule_work(&tp->reset_task);
8463			return;
8464		}
8465	}
8466
8467	/* This part only runs once per second. */
8468	if (!--tp->timer_counter) {
8469		if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8470			tg3_periodic_fetch_stats(tp);
8471
8472		if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8473			u32 mac_stat;
8474			int phy_event;
8475
8476			mac_stat = tr32(MAC_STATUS);
8477
8478			phy_event = 0;
8479			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8480				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8481					phy_event = 1;
8482			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8483				phy_event = 1;
8484
8485			if (phy_event)
8486				tg3_setup_phy(tp, 0);
8487		} else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8488			u32 mac_stat = tr32(MAC_STATUS);
8489			int need_setup = 0;
8490
8491			if (netif_carrier_ok(tp->dev) &&
8492			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8493				need_setup = 1;
8494			}
8495			if (!netif_carrier_ok(tp->dev) &&
8496			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
8497					 MAC_STATUS_SIGNAL_DET))) {
8498				need_setup = 1;
8499			}
8500			if (need_setup) {
8501				if (!tp->serdes_counter) {
8502					tw32_f(MAC_MODE,
8503					     (tp->mac_mode &
8504					      ~MAC_MODE_PORT_MODE_MASK));
8505					udelay(40);
8506					tw32_f(MAC_MODE, tp->mac_mode);
8507					udelay(40);
8508				}
8509				tg3_setup_phy(tp, 0);
8510			}
8511		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8512			   (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8513			tg3_serdes_parallel_detect(tp);
8514		}
8515
8516		tp->timer_counter = tp->timer_multiplier;
8517	}
8518
8519	/* Heartbeat is only sent once every 2 seconds.
8520	 *
8521	 * The heartbeat is to tell the ASF firmware that the host
8522	 * driver is still alive.  In the event that the OS crashes,
8523	 * ASF needs to reset the hardware to free up the FIFO space
8524	 * that may be filled with rx packets destined for the host.
8525	 * If the FIFO is full, ASF will no longer function properly.
8526	 *
8527	 * Unintended resets have been reported on real time kernels
8528	 * where the timer doesn't run on time.  Netpoll will also have
8529	 * same problem.
8530	 *
8531	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8532	 * to check the ring condition when the heartbeat is expiring
8533	 * before doing the reset.  This will prevent most unintended
8534	 * resets.
8535	 */
8536	if (!--tp->asf_counter) {
8537		if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8538		    !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8539			tg3_wait_for_event_ack(tp);
8540
8541			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8542				      FWCMD_NICDRV_ALIVE3);
8543			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8544			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8545				      TG3_FW_UPDATE_TIMEOUT_SEC);
8546
8547			tg3_generate_fw_event(tp);
8548		}
8549		tp->asf_counter = tp->asf_multiplier;
8550	}
8551
8552	spin_unlock(&tp->lock);
8553
8554restart_timer:
8555	tp->timer.expires = jiffies + tp->timer_offset;
8556	add_timer(&tp->timer);
8557}
8558
8559static int tg3_request_irq(struct tg3 *tp, int irq_num)
8560{
8561	irq_handler_t fn;
8562	unsigned long flags;
8563	char *name;
8564	struct tg3_napi *tnapi = &tp->napi[irq_num];
8565
8566	if (tp->irq_cnt == 1)
8567		name = tp->dev->name;
8568	else {
8569		name = &tnapi->irq_lbl[0];
8570		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8571		name[IFNAMSIZ-1] = 0;
8572	}
8573
8574	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8575		fn = tg3_msi;
8576		if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8577			fn = tg3_msi_1shot;
8578		flags = IRQF_SAMPLE_RANDOM;
8579	} else {
8580		fn = tg3_interrupt;
8581		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8582			fn = tg3_interrupt_tagged;
8583		flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8584	}
8585
8586	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8587}
8588
8589static int tg3_test_interrupt(struct tg3 *tp)
8590{
8591	struct tg3_napi *tnapi = &tp->napi[0];
8592	struct net_device *dev = tp->dev;
8593	int err, i, intr_ok = 0;
8594	u32 val;
8595
8596	if (!netif_running(dev))
8597		return -ENODEV;
8598
8599	tg3_disable_ints(tp);
8600
8601	free_irq(tnapi->irq_vec, tnapi);
8602
8603	/*
8604	 * Turn off MSI one shot mode.  Otherwise this test has no
8605	 * observable way to know whether the interrupt was delivered.
8606	 */
8607	if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8608	    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8609		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8610		tw32(MSGINT_MODE, val);
8611	}
8612
8613	err = request_irq(tnapi->irq_vec, tg3_test_isr,
8614			  IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8615	if (err)
8616		return err;
8617
8618	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8619	tg3_enable_ints(tp);
8620
8621	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8622	       tnapi->coal_now);
8623
8624	for (i = 0; i < 5; i++) {
8625		u32 int_mbox, misc_host_ctrl;
8626
8627		int_mbox = tr32_mailbox(tnapi->int_mbox);
8628		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8629
8630		if ((int_mbox != 0) ||
8631		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8632			intr_ok = 1;
8633			break;
8634		}
8635
8636		msleep(10);
8637	}
8638
8639	tg3_disable_ints(tp);
8640
8641	free_irq(tnapi->irq_vec, tnapi);
8642
8643	err = tg3_request_irq(tp, 0);
8644
8645	if (err)
8646		return err;
8647
8648	if (intr_ok) {
8649		/* Reenable MSI one shot mode. */
8650		if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8651		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8652			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8653			tw32(MSGINT_MODE, val);
8654		}
8655		return 0;
8656	}
8657
8658	return -EIO;
8659}
8660
8661/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8662 * successfully restored
8663 */
8664static int tg3_test_msi(struct tg3 *tp)
8665{
8666	int err;
8667	u16 pci_cmd;
8668
8669	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8670		return 0;
8671
8672	/* Turn off SERR reporting in case MSI terminates with Master
8673	 * Abort.
8674	 */
8675	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8676	pci_write_config_word(tp->pdev, PCI_COMMAND,
8677			      pci_cmd & ~PCI_COMMAND_SERR);
8678
8679	err = tg3_test_interrupt(tp);
8680
8681	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8682
8683	if (!err)
8684		return 0;
8685
8686	/* other failures */
8687	if (err != -EIO)
8688		return err;
8689
8690	/* MSI test failed, go back to INTx mode */
8691	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8692		    "to INTx mode. Please report this failure to the PCI "
8693		    "maintainer and include system chipset information\n");
8694
8695	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8696
8697	pci_disable_msi(tp->pdev);
8698
8699	tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8700	tp->napi[0].irq_vec = tp->pdev->irq;
8701
8702	err = tg3_request_irq(tp, 0);
8703	if (err)
8704		return err;
8705
8706	/* Need to reset the chip because the MSI cycle may have terminated
8707	 * with Master Abort.
8708	 */
8709	tg3_full_lock(tp, 1);
8710
8711	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8712	err = tg3_init_hw(tp, 1);
8713
8714	tg3_full_unlock(tp);
8715
8716	if (err)
8717		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8718
8719	return err;
8720}
8721
8722static int tg3_request_firmware(struct tg3 *tp)
8723{
8724	const __be32 *fw_data;
8725
8726	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8727		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8728			   tp->fw_needed);
8729		return -ENOENT;
8730	}
8731
8732	fw_data = (void *)tp->fw->data;
8733
8734	/* Firmware blob starts with version numbers, followed by
8735	 * start address and _full_ length including BSS sections
8736	 * (which must be longer than the actual data, of course
8737	 */
8738
8739	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
8740	if (tp->fw_len < (tp->fw->size - 12)) {
8741		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8742			   tp->fw_len, tp->fw_needed);
8743		release_firmware(tp->fw);
8744		tp->fw = NULL;
8745		return -EINVAL;
8746	}
8747
8748	/* We no longer need firmware; we have it. */
8749	tp->fw_needed = NULL;
8750	return 0;
8751}
8752
8753static bool tg3_enable_msix(struct tg3 *tp)
8754{
8755	int i, rc, cpus = num_online_cpus();
8756	struct msix_entry msix_ent[tp->irq_max];
8757
8758	if (cpus == 1)
8759		/* Just fallback to the simpler MSI mode. */
8760		return false;
8761
8762	/*
8763	 * We want as many rx rings enabled as there are cpus.
8764	 * The first MSIX vector only deals with link interrupts, etc,
8765	 * so we add one to the number of vectors we are requesting.
8766	 */
8767	tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8768
8769	for (i = 0; i < tp->irq_max; i++) {
8770		msix_ent[i].entry  = i;
8771		msix_ent[i].vector = 0;
8772	}
8773
8774	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8775	if (rc < 0) {
8776		return false;
8777	} else if (rc != 0) {
8778		if (pci_enable_msix(tp->pdev, msix_ent, rc))
8779			return false;
8780		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
8781			      tp->irq_cnt, rc);
8782		tp->irq_cnt = rc;
8783	}
8784
8785	for (i = 0; i < tp->irq_max; i++)
8786		tp->napi[i].irq_vec = msix_ent[i].vector;
8787
8788	tp->dev->real_num_tx_queues = 1;
8789	if (tp->irq_cnt > 1) {
8790		tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8791
8792		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8793		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8794			tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8795			tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8796		}
8797	}
8798
8799	return true;
8800}
8801
8802static void tg3_ints_init(struct tg3 *tp)
8803{
8804	if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8805	    !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8806		/* All MSI supporting chips should support tagged
8807		 * status.  Assert that this is the case.
8808		 */
8809		netdev_warn(tp->dev,
8810			    "MSI without TAGGED_STATUS? Not using MSI\n");
8811		goto defcfg;
8812	}
8813
8814	if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8815		tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8816	else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8817		 pci_enable_msi(tp->pdev) == 0)
8818		tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8819
8820	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8821		u32 msi_mode = tr32(MSGINT_MODE);
8822		if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8823			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8824		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8825	}
8826defcfg:
8827	if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8828		tp->irq_cnt = 1;
8829		tp->napi[0].irq_vec = tp->pdev->irq;
8830		tp->dev->real_num_tx_queues = 1;
8831	}
8832}
8833
8834static void tg3_ints_fini(struct tg3 *tp)
8835{
8836	if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8837		pci_disable_msix(tp->pdev);
8838	else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8839		pci_disable_msi(tp->pdev);
8840	tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8841	tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
8842}
8843
8844static int tg3_open(struct net_device *dev)
8845{
8846	struct tg3 *tp = netdev_priv(dev);
8847	int i, err;
8848
8849	if (tp->fw_needed) {
8850		err = tg3_request_firmware(tp);
8851		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8852			if (err)
8853				return err;
8854		} else if (err) {
8855			netdev_warn(tp->dev, "TSO capability disabled\n");
8856			tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8857		} else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8858			netdev_notice(tp->dev, "TSO capability restored\n");
8859			tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8860		}
8861	}
8862
8863	netif_carrier_off(tp->dev);
8864
8865	err = tg3_set_power_state(tp, PCI_D0);
8866	if (err)
8867		return err;
8868
8869	tg3_full_lock(tp, 0);
8870
8871	tg3_disable_ints(tp);
8872	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8873
8874	tg3_full_unlock(tp);
8875
8876	/*
8877	 * Setup interrupts first so we know how
8878	 * many NAPI resources to allocate
8879	 */
8880	tg3_ints_init(tp);
8881
8882	/* The placement of this call is tied
8883	 * to the setup and use of Host TX descriptors.
8884	 */
8885	err = tg3_alloc_consistent(tp);
8886	if (err)
8887		goto err_out1;
8888
8889	tg3_napi_enable(tp);
8890
8891	for (i = 0; i < tp->irq_cnt; i++) {
8892		struct tg3_napi *tnapi = &tp->napi[i];
8893		err = tg3_request_irq(tp, i);
8894		if (err) {
8895			for (i--; i >= 0; i--)
8896				free_irq(tnapi->irq_vec, tnapi);
8897			break;
8898		}
8899	}
8900
8901	if (err)
8902		goto err_out2;
8903
8904	tg3_full_lock(tp, 0);
8905
8906	err = tg3_init_hw(tp, 1);
8907	if (err) {
8908		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8909		tg3_free_rings(tp);
8910	} else {
8911		if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8912			tp->timer_offset = HZ;
8913		else
8914			tp->timer_offset = HZ / 10;
8915
8916		BUG_ON(tp->timer_offset > HZ);
8917		tp->timer_counter = tp->timer_multiplier =
8918			(HZ / tp->timer_offset);
8919		tp->asf_counter = tp->asf_multiplier =
8920			((HZ / tp->timer_offset) * 2);
8921
8922		init_timer(&tp->timer);
8923		tp->timer.expires = jiffies + tp->timer_offset;
8924		tp->timer.data = (unsigned long) tp;
8925		tp->timer.function = tg3_timer;
8926	}
8927
8928	tg3_full_unlock(tp);
8929
8930	if (err)
8931		goto err_out3;
8932
8933	if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8934		err = tg3_test_msi(tp);
8935
8936		if (err) {
8937			tg3_full_lock(tp, 0);
8938			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8939			tg3_free_rings(tp);
8940			tg3_full_unlock(tp);
8941
8942			goto err_out2;
8943		}
8944
8945		if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8946		    (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8947			u32 val = tr32(PCIE_TRANSACTION_CFG);
8948
8949			tw32(PCIE_TRANSACTION_CFG,
8950			     val | PCIE_TRANS_CFG_1SHOT_MSI);
8951		}
8952	}
8953
8954	tg3_phy_start(tp);
8955
8956	tg3_full_lock(tp, 0);
8957
8958	add_timer(&tp->timer);
8959	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8960	tg3_enable_ints(tp);
8961
8962	tg3_full_unlock(tp);
8963
8964	netif_tx_start_all_queues(dev);
8965
8966	return 0;
8967
8968err_out3:
8969	for (i = tp->irq_cnt - 1; i >= 0; i--) {
8970		struct tg3_napi *tnapi = &tp->napi[i];
8971		free_irq(tnapi->irq_vec, tnapi);
8972	}
8973
8974err_out2:
8975	tg3_napi_disable(tp);
8976	tg3_free_consistent(tp);
8977
8978err_out1:
8979	tg3_ints_fini(tp);
8980	return err;
8981}
8982
8983static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8984						 struct rtnl_link_stats64 *);
8985static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8986
8987static int tg3_close(struct net_device *dev)
8988{
8989	int i;
8990	struct tg3 *tp = netdev_priv(dev);
8991
8992	tg3_napi_disable(tp);
8993	cancel_work_sync(&tp->reset_task);
8994
8995	netif_tx_stop_all_queues(dev);
8996
8997	del_timer_sync(&tp->timer);
8998
8999	tg3_phy_stop(tp);
9000
9001	tg3_full_lock(tp, 1);
9002
9003	tg3_disable_ints(tp);
9004
9005	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9006	tg3_free_rings(tp);
9007	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9008
9009	tg3_full_unlock(tp);
9010
9011	for (i = tp->irq_cnt - 1; i >= 0; i--) {
9012		struct tg3_napi *tnapi = &tp->napi[i];
9013		free_irq(tnapi->irq_vec, tnapi);
9014	}
9015
9016	tg3_ints_fini(tp);
9017
9018	tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9019
9020	memcpy(&tp->estats_prev, tg3_get_estats(tp),
9021	       sizeof(tp->estats_prev));
9022
9023	tg3_free_consistent(tp);
9024
9025	tg3_set_power_state(tp, PCI_D3hot);
9026
9027	netif_carrier_off(tp->dev);
9028
9029	return 0;
9030}
9031
9032static inline u64 get_stat64(tg3_stat64_t *val)
9033{
9034       return ((u64)val->high << 32) | ((u64)val->low);
9035}
9036
9037static u64 calc_crc_errors(struct tg3 *tp)
9038{
9039	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9040
9041	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9042	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9043	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9044		u32 val;
9045
9046		spin_lock_bh(&tp->lock);
9047		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9048			tg3_writephy(tp, MII_TG3_TEST1,
9049				     val | MII_TG3_TEST1_CRC_EN);
9050			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9051		} else
9052			val = 0;
9053		spin_unlock_bh(&tp->lock);
9054
9055		tp->phy_crc_errors += val;
9056
9057		return tp->phy_crc_errors;
9058	}
9059
9060	return get_stat64(&hw_stats->rx_fcs_errors);
9061}
9062
9063#define ESTAT_ADD(member) \
9064	estats->member =	old_estats->member + \
9065				get_stat64(&hw_stats->member)
9066
9067static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9068{
9069	struct tg3_ethtool_stats *estats = &tp->estats;
9070	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9071	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9072
9073	if (!hw_stats)
9074		return old_estats;
9075
9076	ESTAT_ADD(rx_octets);
9077	ESTAT_ADD(rx_fragments);
9078	ESTAT_ADD(rx_ucast_packets);
9079	ESTAT_ADD(rx_mcast_packets);
9080	ESTAT_ADD(rx_bcast_packets);
9081	ESTAT_ADD(rx_fcs_errors);
9082	ESTAT_ADD(rx_align_errors);
9083	ESTAT_ADD(rx_xon_pause_rcvd);
9084	ESTAT_ADD(rx_xoff_pause_rcvd);
9085	ESTAT_ADD(rx_mac_ctrl_rcvd);
9086	ESTAT_ADD(rx_xoff_entered);
9087	ESTAT_ADD(rx_frame_too_long_errors);
9088	ESTAT_ADD(rx_jabbers);
9089	ESTAT_ADD(rx_undersize_packets);
9090	ESTAT_ADD(rx_in_length_errors);
9091	ESTAT_ADD(rx_out_length_errors);
9092	ESTAT_ADD(rx_64_or_less_octet_packets);
9093	ESTAT_ADD(rx_65_to_127_octet_packets);
9094	ESTAT_ADD(rx_128_to_255_octet_packets);
9095	ESTAT_ADD(rx_256_to_511_octet_packets);
9096	ESTAT_ADD(rx_512_to_1023_octet_packets);
9097	ESTAT_ADD(rx_1024_to_1522_octet_packets);
9098	ESTAT_ADD(rx_1523_to_2047_octet_packets);
9099	ESTAT_ADD(rx_2048_to_4095_octet_packets);
9100	ESTAT_ADD(rx_4096_to_8191_octet_packets);
9101	ESTAT_ADD(rx_8192_to_9022_octet_packets);
9102
9103	ESTAT_ADD(tx_octets);
9104	ESTAT_ADD(tx_collisions);
9105	ESTAT_ADD(tx_xon_sent);
9106	ESTAT_ADD(tx_xoff_sent);
9107	ESTAT_ADD(tx_flow_control);
9108	ESTAT_ADD(tx_mac_errors);
9109	ESTAT_ADD(tx_single_collisions);
9110	ESTAT_ADD(tx_mult_collisions);
9111	ESTAT_ADD(tx_deferred);
9112	ESTAT_ADD(tx_excessive_collisions);
9113	ESTAT_ADD(tx_late_collisions);
9114	ESTAT_ADD(tx_collide_2times);
9115	ESTAT_ADD(tx_collide_3times);
9116	ESTAT_ADD(tx_collide_4times);
9117	ESTAT_ADD(tx_collide_5times);
9118	ESTAT_ADD(tx_collide_6times);
9119	ESTAT_ADD(tx_collide_7times);
9120	ESTAT_ADD(tx_collide_8times);
9121	ESTAT_ADD(tx_collide_9times);
9122	ESTAT_ADD(tx_collide_10times);
9123	ESTAT_ADD(tx_collide_11times);
9124	ESTAT_ADD(tx_collide_12times);
9125	ESTAT_ADD(tx_collide_13times);
9126	ESTAT_ADD(tx_collide_14times);
9127	ESTAT_ADD(tx_collide_15times);
9128	ESTAT_ADD(tx_ucast_packets);
9129	ESTAT_ADD(tx_mcast_packets);
9130	ESTAT_ADD(tx_bcast_packets);
9131	ESTAT_ADD(tx_carrier_sense_errors);
9132	ESTAT_ADD(tx_discards);
9133	ESTAT_ADD(tx_errors);
9134
9135	ESTAT_ADD(dma_writeq_full);
9136	ESTAT_ADD(dma_write_prioq_full);
9137	ESTAT_ADD(rxbds_empty);
9138	ESTAT_ADD(rx_discards);
9139	ESTAT_ADD(rx_errors);
9140	ESTAT_ADD(rx_threshold_hit);
9141
9142	ESTAT_ADD(dma_readq_full);
9143	ESTAT_ADD(dma_read_prioq_full);
9144	ESTAT_ADD(tx_comp_queue_full);
9145
9146	ESTAT_ADD(ring_set_send_prod_index);
9147	ESTAT_ADD(ring_status_update);
9148	ESTAT_ADD(nic_irqs);
9149	ESTAT_ADD(nic_avoided_irqs);
9150	ESTAT_ADD(nic_tx_threshold_hit);
9151
9152	return estats;
9153}
9154
9155static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9156						 struct rtnl_link_stats64 *stats)
9157{
9158	struct tg3 *tp = netdev_priv(dev);
9159	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9160	struct tg3_hw_stats *hw_stats = tp->hw_stats;
9161
9162	if (!hw_stats)
9163		return old_stats;
9164
9165	stats->rx_packets = old_stats->rx_packets +
9166		get_stat64(&hw_stats->rx_ucast_packets) +
9167		get_stat64(&hw_stats->rx_mcast_packets) +
9168		get_stat64(&hw_stats->rx_bcast_packets);
9169
9170	stats->tx_packets = old_stats->tx_packets +
9171		get_stat64(&hw_stats->tx_ucast_packets) +
9172		get_stat64(&hw_stats->tx_mcast_packets) +
9173		get_stat64(&hw_stats->tx_bcast_packets);
9174
9175	stats->rx_bytes = old_stats->rx_bytes +
9176		get_stat64(&hw_stats->rx_octets);
9177	stats->tx_bytes = old_stats->tx_bytes +
9178		get_stat64(&hw_stats->tx_octets);
9179
9180	stats->rx_errors = old_stats->rx_errors +
9181		get_stat64(&hw_stats->rx_errors);
9182	stats->tx_errors = old_stats->tx_errors +
9183		get_stat64(&hw_stats->tx_errors) +
9184		get_stat64(&hw_stats->tx_mac_errors) +
9185		get_stat64(&hw_stats->tx_carrier_sense_errors) +
9186		get_stat64(&hw_stats->tx_discards);
9187
9188	stats->multicast = old_stats->multicast +
9189		get_stat64(&hw_stats->rx_mcast_packets);
9190	stats->collisions = old_stats->collisions +
9191		get_stat64(&hw_stats->tx_collisions);
9192
9193	stats->rx_length_errors = old_stats->rx_length_errors +
9194		get_stat64(&hw_stats->rx_frame_too_long_errors) +
9195		get_stat64(&hw_stats->rx_undersize_packets);
9196
9197	stats->rx_over_errors = old_stats->rx_over_errors +
9198		get_stat64(&hw_stats->rxbds_empty);
9199	stats->rx_frame_errors = old_stats->rx_frame_errors +
9200		get_stat64(&hw_stats->rx_align_errors);
9201	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9202		get_stat64(&hw_stats->tx_discards);
9203	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9204		get_stat64(&hw_stats->tx_carrier_sense_errors);
9205
9206	stats->rx_crc_errors = old_stats->rx_crc_errors +
9207		calc_crc_errors(tp);
9208
9209	stats->rx_missed_errors = old_stats->rx_missed_errors +
9210		get_stat64(&hw_stats->rx_discards);
9211
9212	stats->rx_dropped = tp->rx_dropped;
9213
9214	return stats;
9215}
9216
9217static inline u32 calc_crc(unsigned char *buf, int len)
9218{
9219	u32 reg;
9220	u32 tmp;
9221	int j, k;
9222
9223	reg = 0xffffffff;
9224
9225	for (j = 0; j < len; j++) {
9226		reg ^= buf[j];
9227
9228		for (k = 0; k < 8; k++) {
9229			tmp = reg & 0x01;
9230
9231			reg >>= 1;
9232
9233			if (tmp)
9234				reg ^= 0xedb88320;
9235		}
9236	}
9237
9238	return ~reg;
9239}
9240
9241static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9242{
9243	/* accept or reject all multicast frames */
9244	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9245	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9246	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9247	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9248}
9249
9250static void __tg3_set_rx_mode(struct net_device *dev)
9251{
9252	struct tg3 *tp = netdev_priv(dev);
9253	u32 rx_mode;
9254
9255	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9256				  RX_MODE_KEEP_VLAN_TAG);
9257
9258	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9259	 * flag clear.
9260	 */
9261#if TG3_VLAN_TAG_USED
9262	if (!tp->vlgrp &&
9263	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9264		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9265#else
9266	/* By definition, VLAN is disabled always in this
9267	 * case.
9268	 */
9269	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9270		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9271#endif
9272
9273	if (dev->flags & IFF_PROMISC) {
9274		/* Promiscuous mode. */
9275		rx_mode |= RX_MODE_PROMISC;
9276	} else if (dev->flags & IFF_ALLMULTI) {
9277		/* Accept all multicast. */
9278		tg3_set_multi(tp, 1);
9279	} else if (netdev_mc_empty(dev)) {
9280		/* Reject all multicast. */
9281		tg3_set_multi(tp, 0);
9282	} else {
9283		/* Accept one or more multicast(s). */
9284		struct netdev_hw_addr *ha;
9285		u32 mc_filter[4] = { 0, };
9286		u32 regidx;
9287		u32 bit;
9288		u32 crc;
9289
9290		netdev_for_each_mc_addr(ha, dev) {
9291			crc = calc_crc(ha->addr, ETH_ALEN);
9292			bit = ~crc & 0x7f;
9293			regidx = (bit & 0x60) >> 5;
9294			bit &= 0x1f;
9295			mc_filter[regidx] |= (1 << bit);
9296		}
9297
9298		tw32(MAC_HASH_REG_0, mc_filter[0]);
9299		tw32(MAC_HASH_REG_1, mc_filter[1]);
9300		tw32(MAC_HASH_REG_2, mc_filter[2]);
9301		tw32(MAC_HASH_REG_3, mc_filter[3]);
9302	}
9303
9304	if (rx_mode != tp->rx_mode) {
9305		tp->rx_mode = rx_mode;
9306		tw32_f(MAC_RX_MODE, rx_mode);
9307		udelay(10);
9308	}
9309}
9310
9311static void tg3_set_rx_mode(struct net_device *dev)
9312{
9313	struct tg3 *tp = netdev_priv(dev);
9314
9315	if (!netif_running(dev))
9316		return;
9317
9318	tg3_full_lock(tp, 0);
9319	__tg3_set_rx_mode(dev);
9320	tg3_full_unlock(tp);
9321}
9322
9323#define TG3_REGDUMP_LEN		(32 * 1024)
9324
9325static int tg3_get_regs_len(struct net_device *dev)
9326{
9327	return TG3_REGDUMP_LEN;
9328}
9329
9330static void tg3_get_regs(struct net_device *dev,
9331		struct ethtool_regs *regs, void *_p)
9332{
9333	u32 *p = _p;
9334	struct tg3 *tp = netdev_priv(dev);
9335	u8 *orig_p = _p;
9336	int i;
9337
9338	regs->version = 0;
9339
9340	memset(p, 0, TG3_REGDUMP_LEN);
9341
9342	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9343		return;
9344
9345	tg3_full_lock(tp, 0);
9346
9347#define __GET_REG32(reg)	(*(p)++ = tr32(reg))
9348#define GET_REG32_LOOP(base, len)		\
9349do {	p = (u32 *)(orig_p + (base));		\
9350	for (i = 0; i < len; i += 4)		\
9351		__GET_REG32((base) + i);	\
9352} while (0)
9353#define GET_REG32_1(reg)			\
9354do {	p = (u32 *)(orig_p + (reg));		\
9355	__GET_REG32((reg));			\
9356} while (0)
9357
9358	GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9359	GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9360	GET_REG32_LOOP(MAC_MODE, 0x4f0);
9361	GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9362	GET_REG32_1(SNDDATAC_MODE);
9363	GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9364	GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9365	GET_REG32_1(SNDBDC_MODE);
9366	GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9367	GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9368	GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9369	GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9370	GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9371	GET_REG32_1(RCVDCC_MODE);
9372	GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9373	GET_REG32_LOOP(RCVCC_MODE, 0x14);
9374	GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9375	GET_REG32_1(MBFREE_MODE);
9376	GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9377	GET_REG32_LOOP(MEMARB_MODE, 0x10);
9378	GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9379	GET_REG32_LOOP(RDMAC_MODE, 0x08);
9380	GET_REG32_LOOP(WDMAC_MODE, 0x08);
9381	GET_REG32_1(RX_CPU_MODE);
9382	GET_REG32_1(RX_CPU_STATE);
9383	GET_REG32_1(RX_CPU_PGMCTR);
9384	GET_REG32_1(RX_CPU_HWBKPT);
9385	GET_REG32_1(TX_CPU_MODE);
9386	GET_REG32_1(TX_CPU_STATE);
9387	GET_REG32_1(TX_CPU_PGMCTR);
9388	GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9389	GET_REG32_LOOP(FTQ_RESET, 0x120);
9390	GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9391	GET_REG32_1(DMAC_MODE);
9392	GET_REG32_LOOP(GRC_MODE, 0x4c);
9393	if (tp->tg3_flags & TG3_FLAG_NVRAM)
9394		GET_REG32_LOOP(NVRAM_CMD, 0x24);
9395
9396#undef __GET_REG32
9397#undef GET_REG32_LOOP
9398#undef GET_REG32_1
9399
9400	tg3_full_unlock(tp);
9401}
9402
9403static int tg3_get_eeprom_len(struct net_device *dev)
9404{
9405	struct tg3 *tp = netdev_priv(dev);
9406
9407	return tp->nvram_size;
9408}
9409
9410static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9411{
9412	struct tg3 *tp = netdev_priv(dev);
9413	int ret;
9414	u8  *pd;
9415	u32 i, offset, len, b_offset, b_count;
9416	__be32 val;
9417
9418	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9419		return -EINVAL;
9420
9421	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9422		return -EAGAIN;
9423
9424	offset = eeprom->offset;
9425	len = eeprom->len;
9426	eeprom->len = 0;
9427
9428	eeprom->magic = TG3_EEPROM_MAGIC;
9429
9430	if (offset & 3) {
9431		/* adjustments to start on required 4 byte boundary */
9432		b_offset = offset & 3;
9433		b_count = 4 - b_offset;
9434		if (b_count > len) {
9435			/* i.e. offset=1 len=2 */
9436			b_count = len;
9437		}
9438		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9439		if (ret)
9440			return ret;
9441		memcpy(data, ((char *)&val) + b_offset, b_count);
9442		len -= b_count;
9443		offset += b_count;
9444		eeprom->len += b_count;
9445	}
9446
9447	/* read bytes upto the last 4 byte boundary */
9448	pd = &data[eeprom->len];
9449	for (i = 0; i < (len - (len & 3)); i += 4) {
9450		ret = tg3_nvram_read_be32(tp, offset + i, &val);
9451		if (ret) {
9452			eeprom->len += i;
9453			return ret;
9454		}
9455		memcpy(pd + i, &val, 4);
9456	}
9457	eeprom->len += i;
9458
9459	if (len & 3) {
9460		/* read last bytes not ending on 4 byte boundary */
9461		pd = &data[eeprom->len];
9462		b_count = len & 3;
9463		b_offset = offset + len - b_count;
9464		ret = tg3_nvram_read_be32(tp, b_offset, &val);
9465		if (ret)
9466			return ret;
9467		memcpy(pd, &val, b_count);
9468		eeprom->len += b_count;
9469	}
9470	return 0;
9471}
9472
9473static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9474
9475static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9476{
9477	struct tg3 *tp = netdev_priv(dev);
9478	int ret;
9479	u32 offset, len, b_offset, odd_len;
9480	u8 *buf;
9481	__be32 start, end;
9482
9483	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9484		return -EAGAIN;
9485
9486	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9487	    eeprom->magic != TG3_EEPROM_MAGIC)
9488		return -EINVAL;
9489
9490	offset = eeprom->offset;
9491	len = eeprom->len;
9492
9493	if ((b_offset = (offset & 3))) {
9494		/* adjustments to start on required 4 byte boundary */
9495		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9496		if (ret)
9497			return ret;
9498		len += b_offset;
9499		offset &= ~3;
9500		if (len < 4)
9501			len = 4;
9502	}
9503
9504	odd_len = 0;
9505	if (len & 3) {
9506		/* adjustments to end on required 4 byte boundary */
9507		odd_len = 1;
9508		len = (len + 3) & ~3;
9509		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9510		if (ret)
9511			return ret;
9512	}
9513
9514	buf = data;
9515	if (b_offset || odd_len) {
9516		buf = kmalloc(len, GFP_KERNEL);
9517		if (!buf)
9518			return -ENOMEM;
9519		if (b_offset)
9520			memcpy(buf, &start, 4);
9521		if (odd_len)
9522			memcpy(buf+len-4, &end, 4);
9523		memcpy(buf + b_offset, data, eeprom->len);
9524	}
9525
9526	ret = tg3_nvram_write_block(tp, offset, len, buf);
9527
9528	if (buf != data)
9529		kfree(buf);
9530
9531	return ret;
9532}
9533
9534static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9535{
9536	struct tg3 *tp = netdev_priv(dev);
9537
9538	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9539		struct phy_device *phydev;
9540		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9541			return -EAGAIN;
9542		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9543		return phy_ethtool_gset(phydev, cmd);
9544	}
9545
9546	cmd->supported = (SUPPORTED_Autoneg);
9547
9548	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9549		cmd->supported |= (SUPPORTED_1000baseT_Half |
9550				   SUPPORTED_1000baseT_Full);
9551
9552	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9553		cmd->supported |= (SUPPORTED_100baseT_Half |
9554				  SUPPORTED_100baseT_Full |
9555				  SUPPORTED_10baseT_Half |
9556				  SUPPORTED_10baseT_Full |
9557				  SUPPORTED_TP);
9558		cmd->port = PORT_TP;
9559	} else {
9560		cmd->supported |= SUPPORTED_FIBRE;
9561		cmd->port = PORT_FIBRE;
9562	}
9563
9564	cmd->advertising = tp->link_config.advertising;
9565	if (netif_running(dev)) {
9566		cmd->speed = tp->link_config.active_speed;
9567		cmd->duplex = tp->link_config.active_duplex;
9568	}
9569	cmd->phy_address = tp->phy_addr;
9570	cmd->transceiver = XCVR_INTERNAL;
9571	cmd->autoneg = tp->link_config.autoneg;
9572	cmd->maxtxpkt = 0;
9573	cmd->maxrxpkt = 0;
9574	return 0;
9575}
9576
9577static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9578{
9579	struct tg3 *tp = netdev_priv(dev);
9580
9581	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9582		struct phy_device *phydev;
9583		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9584			return -EAGAIN;
9585		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9586		return phy_ethtool_sset(phydev, cmd);
9587	}
9588
9589	if (cmd->autoneg != AUTONEG_ENABLE &&
9590	    cmd->autoneg != AUTONEG_DISABLE)
9591		return -EINVAL;
9592
9593	if (cmd->autoneg == AUTONEG_DISABLE &&
9594	    cmd->duplex != DUPLEX_FULL &&
9595	    cmd->duplex != DUPLEX_HALF)
9596		return -EINVAL;
9597
9598	if (cmd->autoneg == AUTONEG_ENABLE) {
9599		u32 mask = ADVERTISED_Autoneg |
9600			   ADVERTISED_Pause |
9601			   ADVERTISED_Asym_Pause;
9602
9603		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9604			mask |= ADVERTISED_1000baseT_Half |
9605				ADVERTISED_1000baseT_Full;
9606
9607		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9608			mask |= ADVERTISED_100baseT_Half |
9609				ADVERTISED_100baseT_Full |
9610				ADVERTISED_10baseT_Half |
9611				ADVERTISED_10baseT_Full |
9612				ADVERTISED_TP;
9613		else
9614			mask |= ADVERTISED_FIBRE;
9615
9616		if (cmd->advertising & ~mask)
9617			return -EINVAL;
9618
9619		mask &= (ADVERTISED_1000baseT_Half |
9620			 ADVERTISED_1000baseT_Full |
9621			 ADVERTISED_100baseT_Half |
9622			 ADVERTISED_100baseT_Full |
9623			 ADVERTISED_10baseT_Half |
9624			 ADVERTISED_10baseT_Full);
9625
9626		cmd->advertising &= mask;
9627	} else {
9628		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9629			if (cmd->speed != SPEED_1000)
9630				return -EINVAL;
9631
9632			if (cmd->duplex != DUPLEX_FULL)
9633				return -EINVAL;
9634		} else {
9635			if (cmd->speed != SPEED_100 &&
9636			    cmd->speed != SPEED_10)
9637				return -EINVAL;
9638		}
9639	}
9640
9641	tg3_full_lock(tp, 0);
9642
9643	tp->link_config.autoneg = cmd->autoneg;
9644	if (cmd->autoneg == AUTONEG_ENABLE) {
9645		tp->link_config.advertising = (cmd->advertising |
9646					      ADVERTISED_Autoneg);
9647		tp->link_config.speed = SPEED_INVALID;
9648		tp->link_config.duplex = DUPLEX_INVALID;
9649	} else {
9650		tp->link_config.advertising = 0;
9651		tp->link_config.speed = cmd->speed;
9652		tp->link_config.duplex = cmd->duplex;
9653	}
9654
9655	tp->link_config.orig_speed = tp->link_config.speed;
9656	tp->link_config.orig_duplex = tp->link_config.duplex;
9657	tp->link_config.orig_autoneg = tp->link_config.autoneg;
9658
9659	if (netif_running(dev))
9660		tg3_setup_phy(tp, 1);
9661
9662	tg3_full_unlock(tp);
9663
9664	return 0;
9665}
9666
9667static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9668{
9669	struct tg3 *tp = netdev_priv(dev);
9670
9671	strcpy(info->driver, DRV_MODULE_NAME);
9672	strcpy(info->version, DRV_MODULE_VERSION);
9673	strcpy(info->fw_version, tp->fw_ver);
9674	strcpy(info->bus_info, pci_name(tp->pdev));
9675}
9676
9677static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9678{
9679	struct tg3 *tp = netdev_priv(dev);
9680
9681	if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9682	    device_can_wakeup(&tp->pdev->dev))
9683		wol->supported = WAKE_MAGIC;
9684	else
9685		wol->supported = 0;
9686	wol->wolopts = 0;
9687	if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9688	    device_can_wakeup(&tp->pdev->dev))
9689		wol->wolopts = WAKE_MAGIC;
9690	memset(&wol->sopass, 0, sizeof(wol->sopass));
9691}
9692
9693static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9694{
9695	struct tg3 *tp = netdev_priv(dev);
9696	struct device *dp = &tp->pdev->dev;
9697
9698	if (wol->wolopts & ~WAKE_MAGIC)
9699		return -EINVAL;
9700	if ((wol->wolopts & WAKE_MAGIC) &&
9701	    !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9702		return -EINVAL;
9703
9704	spin_lock_bh(&tp->lock);
9705	if (wol->wolopts & WAKE_MAGIC) {
9706		tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9707		device_set_wakeup_enable(dp, true);
9708	} else {
9709		tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9710		device_set_wakeup_enable(dp, false);
9711	}
9712	spin_unlock_bh(&tp->lock);
9713
9714	return 0;
9715}
9716
9717static u32 tg3_get_msglevel(struct net_device *dev)
9718{
9719	struct tg3 *tp = netdev_priv(dev);
9720	return tp->msg_enable;
9721}
9722
9723static void tg3_set_msglevel(struct net_device *dev, u32 value)
9724{
9725	struct tg3 *tp = netdev_priv(dev);
9726	tp->msg_enable = value;
9727}
9728
9729static int tg3_set_tso(struct net_device *dev, u32 value)
9730{
9731	struct tg3 *tp = netdev_priv(dev);
9732
9733	if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9734		if (value)
9735			return -EINVAL;
9736		return 0;
9737	}
9738	if ((dev->features & NETIF_F_IPV6_CSUM) &&
9739	    ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9740	     (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9741		if (value) {
9742			dev->features |= NETIF_F_TSO6;
9743			if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9744			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9745			    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9746			     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9747			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9748			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9749				dev->features |= NETIF_F_TSO_ECN;
9750		} else
9751			dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9752	}
9753	return ethtool_op_set_tso(dev, value);
9754}
9755
9756static int tg3_nway_reset(struct net_device *dev)
9757{
9758	struct tg3 *tp = netdev_priv(dev);
9759	int r;
9760
9761	if (!netif_running(dev))
9762		return -EAGAIN;
9763
9764	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
9765		return -EINVAL;
9766
9767	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9768		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9769			return -EAGAIN;
9770		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9771	} else {
9772		u32 bmcr;
9773
9774		spin_lock_bh(&tp->lock);
9775		r = -EINVAL;
9776		tg3_readphy(tp, MII_BMCR, &bmcr);
9777		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9778		    ((bmcr & BMCR_ANENABLE) ||
9779		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
9780			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9781						   BMCR_ANENABLE);
9782			r = 0;
9783		}
9784		spin_unlock_bh(&tp->lock);
9785	}
9786
9787	return r;
9788}
9789
9790static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9791{
9792	struct tg3 *tp = netdev_priv(dev);
9793
9794	ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9795	ering->rx_mini_max_pending = 0;
9796	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9797		ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9798	else
9799		ering->rx_jumbo_max_pending = 0;
9800
9801	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9802
9803	ering->rx_pending = tp->rx_pending;
9804	ering->rx_mini_pending = 0;
9805	if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9806		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9807	else
9808		ering->rx_jumbo_pending = 0;
9809
9810	ering->tx_pending = tp->napi[0].tx_pending;
9811}
9812
9813static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9814{
9815	struct tg3 *tp = netdev_priv(dev);
9816	int i, irq_sync = 0, err = 0;
9817
9818	if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9819	    (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9820	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9821	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
9822	    ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9823	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9824		return -EINVAL;
9825
9826	if (netif_running(dev)) {
9827		tg3_phy_stop(tp);
9828		tg3_netif_stop(tp);
9829		irq_sync = 1;
9830	}
9831
9832	tg3_full_lock(tp, irq_sync);
9833
9834	tp->rx_pending = ering->rx_pending;
9835
9836	if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9837	    tp->rx_pending > 63)
9838		tp->rx_pending = 63;
9839	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9840
9841	for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9842		tp->napi[i].tx_pending = ering->tx_pending;
9843
9844	if (netif_running(dev)) {
9845		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9846		err = tg3_restart_hw(tp, 1);
9847		if (!err)
9848			tg3_netif_start(tp);
9849	}
9850
9851	tg3_full_unlock(tp);
9852
9853	if (irq_sync && !err)
9854		tg3_phy_start(tp);
9855
9856	return err;
9857}
9858
9859static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9860{
9861	struct tg3 *tp = netdev_priv(dev);
9862
9863	epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9864
9865	if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9866		epause->rx_pause = 1;
9867	else
9868		epause->rx_pause = 0;
9869
9870	if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9871		epause->tx_pause = 1;
9872	else
9873		epause->tx_pause = 0;
9874}
9875
9876static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9877{
9878	struct tg3 *tp = netdev_priv(dev);
9879	int err = 0;
9880
9881	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9882		u32 newadv;
9883		struct phy_device *phydev;
9884
9885		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9886
9887		if (!(phydev->supported & SUPPORTED_Pause) ||
9888		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
9889		     ((epause->rx_pause && !epause->tx_pause) ||
9890		      (!epause->rx_pause && epause->tx_pause))))
9891			return -EINVAL;
9892
9893		tp->link_config.flowctrl = 0;
9894		if (epause->rx_pause) {
9895			tp->link_config.flowctrl |= FLOW_CTRL_RX;
9896
9897			if (epause->tx_pause) {
9898				tp->link_config.flowctrl |= FLOW_CTRL_TX;
9899				newadv = ADVERTISED_Pause;
9900			} else
9901				newadv = ADVERTISED_Pause |
9902					 ADVERTISED_Asym_Pause;
9903		} else if (epause->tx_pause) {
9904			tp->link_config.flowctrl |= FLOW_CTRL_TX;
9905			newadv = ADVERTISED_Asym_Pause;
9906		} else
9907			newadv = 0;
9908
9909		if (epause->autoneg)
9910			tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9911		else
9912			tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9913
9914		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
9915			u32 oldadv = phydev->advertising &
9916				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
9917			if (oldadv != newadv) {
9918				phydev->advertising &=
9919					~(ADVERTISED_Pause |
9920					  ADVERTISED_Asym_Pause);
9921				phydev->advertising |= newadv;
9922				if (phydev->autoneg) {
9923					/*
9924					 * Always renegotiate the link to
9925					 * inform our link partner of our
9926					 * flow control settings, even if the
9927					 * flow control is forced.  Let
9928					 * tg3_adjust_link() do the final
9929					 * flow control setup.
9930					 */
9931					return phy_start_aneg(phydev);
9932				}
9933			}
9934
9935			if (!epause->autoneg)
9936				tg3_setup_flow_control(tp, 0, 0);
9937		} else {
9938			tp->link_config.orig_advertising &=
9939					~(ADVERTISED_Pause |
9940					  ADVERTISED_Asym_Pause);
9941			tp->link_config.orig_advertising |= newadv;
9942		}
9943	} else {
9944		int irq_sync = 0;
9945
9946		if (netif_running(dev)) {
9947			tg3_netif_stop(tp);
9948			irq_sync = 1;
9949		}
9950
9951		tg3_full_lock(tp, irq_sync);
9952
9953		if (epause->autoneg)
9954			tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9955		else
9956			tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9957		if (epause->rx_pause)
9958			tp->link_config.flowctrl |= FLOW_CTRL_RX;
9959		else
9960			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
9961		if (epause->tx_pause)
9962			tp->link_config.flowctrl |= FLOW_CTRL_TX;
9963		else
9964			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
9965
9966		if (netif_running(dev)) {
9967			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9968			err = tg3_restart_hw(tp, 1);
9969			if (!err)
9970				tg3_netif_start(tp);
9971		}
9972
9973		tg3_full_unlock(tp);
9974	}
9975
9976	return err;
9977}
9978
9979static u32 tg3_get_rx_csum(struct net_device *dev)
9980{
9981	struct tg3 *tp = netdev_priv(dev);
9982	return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9983}
9984
9985static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9986{
9987	struct tg3 *tp = netdev_priv(dev);
9988
9989	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9990		if (data != 0)
9991			return -EINVAL;
9992		return 0;
9993	}
9994
9995	spin_lock_bh(&tp->lock);
9996	if (data)
9997		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9998	else
9999		tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10000	spin_unlock_bh(&tp->lock);
10001
10002	return 0;
10003}
10004
10005static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10006{
10007	struct tg3 *tp = netdev_priv(dev);
10008
10009	if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10010		if (data != 0)
10011			return -EINVAL;
10012		return 0;
10013	}
10014
10015	if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10016		ethtool_op_set_tx_ipv6_csum(dev, data);
10017	else
10018		ethtool_op_set_tx_csum(dev, data);
10019
10020	return 0;
10021}
10022
10023static int tg3_get_sset_count(struct net_device *dev, int sset)
10024{
10025	switch (sset) {
10026	case ETH_SS_TEST:
10027		return TG3_NUM_TEST;
10028	case ETH_SS_STATS:
10029		return TG3_NUM_STATS;
10030	default:
10031		return -EOPNOTSUPP;
10032	}
10033}
10034
10035static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10036{
10037	switch (stringset) {
10038	case ETH_SS_STATS:
10039		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10040		break;
10041	case ETH_SS_TEST:
10042		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10043		break;
10044	default:
10045		WARN_ON(1);	/* we need a WARN() */
10046		break;
10047	}
10048}
10049
10050static int tg3_phys_id(struct net_device *dev, u32 data)
10051{
10052	struct tg3 *tp = netdev_priv(dev);
10053	int i;
10054
10055	if (!netif_running(tp->dev))
10056		return -EAGAIN;
10057
10058	if (data == 0)
10059		data = UINT_MAX / 2;
10060
10061	for (i = 0; i < (data * 2); i++) {
10062		if ((i % 2) == 0)
10063			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10064					   LED_CTRL_1000MBPS_ON |
10065					   LED_CTRL_100MBPS_ON |
10066					   LED_CTRL_10MBPS_ON |
10067					   LED_CTRL_TRAFFIC_OVERRIDE |
10068					   LED_CTRL_TRAFFIC_BLINK |
10069					   LED_CTRL_TRAFFIC_LED);
10070
10071		else
10072			tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10073					   LED_CTRL_TRAFFIC_OVERRIDE);
10074
10075		if (msleep_interruptible(500))
10076			break;
10077	}
10078	tw32(MAC_LED_CTRL, tp->led_ctrl);
10079	return 0;
10080}
10081
10082static void tg3_get_ethtool_stats(struct net_device *dev,
10083				   struct ethtool_stats *estats, u64 *tmp_stats)
10084{
10085	struct tg3 *tp = netdev_priv(dev);
10086	memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10087}
10088
10089#define NVRAM_TEST_SIZE 0x100
10090#define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
10091#define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
10092#define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
10093#define NVRAM_SELFBOOT_HW_SIZE 0x20
10094#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10095
10096static int tg3_test_nvram(struct tg3 *tp)
10097{
10098	u32 csum, magic;
10099	__be32 *buf;
10100	int i, j, k, err = 0, size;
10101
10102	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10103		return 0;
10104
10105	if (tg3_nvram_read(tp, 0, &magic) != 0)
10106		return -EIO;
10107
10108	if (magic == TG3_EEPROM_MAGIC)
10109		size = NVRAM_TEST_SIZE;
10110	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10111		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10112		    TG3_EEPROM_SB_FORMAT_1) {
10113			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10114			case TG3_EEPROM_SB_REVISION_0:
10115				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10116				break;
10117			case TG3_EEPROM_SB_REVISION_2:
10118				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10119				break;
10120			case TG3_EEPROM_SB_REVISION_3:
10121				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10122				break;
10123			default:
10124				return 0;
10125			}
10126		} else
10127			return 0;
10128	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10129		size = NVRAM_SELFBOOT_HW_SIZE;
10130	else
10131		return -EIO;
10132
10133	buf = kmalloc(size, GFP_KERNEL);
10134	if (buf == NULL)
10135		return -ENOMEM;
10136
10137	err = -EIO;
10138	for (i = 0, j = 0; i < size; i += 4, j++) {
10139		err = tg3_nvram_read_be32(tp, i, &buf[j]);
10140		if (err)
10141			break;
10142	}
10143	if (i < size)
10144		goto out;
10145
10146	/* Selfboot format */
10147	magic = be32_to_cpu(buf[0]);
10148	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10149	    TG3_EEPROM_MAGIC_FW) {
10150		u8 *buf8 = (u8 *) buf, csum8 = 0;
10151
10152		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10153		    TG3_EEPROM_SB_REVISION_2) {
10154			/* For rev 2, the csum doesn't include the MBA. */
10155			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10156				csum8 += buf8[i];
10157			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10158				csum8 += buf8[i];
10159		} else {
10160			for (i = 0; i < size; i++)
10161				csum8 += buf8[i];
10162		}
10163
10164		if (csum8 == 0) {
10165			err = 0;
10166			goto out;
10167		}
10168
10169		err = -EIO;
10170		goto out;
10171	}
10172
10173	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10174	    TG3_EEPROM_MAGIC_HW) {
10175		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10176		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10177		u8 *buf8 = (u8 *) buf;
10178
10179		/* Separate the parity bits and the data bytes.  */
10180		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10181			if ((i == 0) || (i == 8)) {
10182				int l;
10183				u8 msk;
10184
10185				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10186					parity[k++] = buf8[i] & msk;
10187				i++;
10188			} else if (i == 16) {
10189				int l;
10190				u8 msk;
10191
10192				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10193					parity[k++] = buf8[i] & msk;
10194				i++;
10195
10196				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10197					parity[k++] = buf8[i] & msk;
10198				i++;
10199			}
10200			data[j++] = buf8[i];
10201		}
10202
10203		err = -EIO;
10204		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10205			u8 hw8 = hweight8(data[i]);
10206
10207			if ((hw8 & 0x1) && parity[i])
10208				goto out;
10209			else if (!(hw8 & 0x1) && !parity[i])
10210				goto out;
10211		}
10212		err = 0;
10213		goto out;
10214	}
10215
10216	/* Bootstrap checksum at offset 0x10 */
10217	csum = calc_crc((unsigned char *) buf, 0x10);
10218	if (csum != be32_to_cpu(buf[0x10/4]))
10219		goto out;
10220
10221	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10222	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10223	if (csum != be32_to_cpu(buf[0xfc/4]))
10224		goto out;
10225
10226	err = 0;
10227
10228out:
10229	kfree(buf);
10230	return err;
10231}
10232
10233#define TG3_SERDES_TIMEOUT_SEC	2
10234#define TG3_COPPER_TIMEOUT_SEC	6
10235
10236static int tg3_test_link(struct tg3 *tp)
10237{
10238	int i, max;
10239
10240	if (!netif_running(tp->dev))
10241		return -ENODEV;
10242
10243	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10244		max = TG3_SERDES_TIMEOUT_SEC;
10245	else
10246		max = TG3_COPPER_TIMEOUT_SEC;
10247
10248	for (i = 0; i < max; i++) {
10249		if (netif_carrier_ok(tp->dev))
10250			return 0;
10251
10252		if (msleep_interruptible(1000))
10253			break;
10254	}
10255
10256	return -EIO;
10257}
10258
10259/* Only test the commonly used registers */
10260static int tg3_test_registers(struct tg3 *tp)
10261{
10262	int i, is_5705, is_5750;
10263	u32 offset, read_mask, write_mask, val, save_val, read_val;
10264	static struct {
10265		u16 offset;
10266		u16 flags;
10267#define TG3_FL_5705	0x1
10268#define TG3_FL_NOT_5705	0x2
10269#define TG3_FL_NOT_5788	0x4
10270#define TG3_FL_NOT_5750	0x8
10271		u32 read_mask;
10272		u32 write_mask;
10273	} reg_tbl[] = {
10274		/* MAC Control Registers */
10275		{ MAC_MODE, TG3_FL_NOT_5705,
10276			0x00000000, 0x00ef6f8c },
10277		{ MAC_MODE, TG3_FL_5705,
10278			0x00000000, 0x01ef6b8c },
10279		{ MAC_STATUS, TG3_FL_NOT_5705,
10280			0x03800107, 0x00000000 },
10281		{ MAC_STATUS, TG3_FL_5705,
10282			0x03800100, 0x00000000 },
10283		{ MAC_ADDR_0_HIGH, 0x0000,
10284			0x00000000, 0x0000ffff },
10285		{ MAC_ADDR_0_LOW, 0x0000,
10286			0x00000000, 0xffffffff },
10287		{ MAC_RX_MTU_SIZE, 0x0000,
10288			0x00000000, 0x0000ffff },
10289		{ MAC_TX_MODE, 0x0000,
10290			0x00000000, 0x00000070 },
10291		{ MAC_TX_LENGTHS, 0x0000,
10292			0x00000000, 0x00003fff },
10293		{ MAC_RX_MODE, TG3_FL_NOT_5705,
10294			0x00000000, 0x000007fc },
10295		{ MAC_RX_MODE, TG3_FL_5705,
10296			0x00000000, 0x000007dc },
10297		{ MAC_HASH_REG_0, 0x0000,
10298			0x00000000, 0xffffffff },
10299		{ MAC_HASH_REG_1, 0x0000,
10300			0x00000000, 0xffffffff },
10301		{ MAC_HASH_REG_2, 0x0000,
10302			0x00000000, 0xffffffff },
10303		{ MAC_HASH_REG_3, 0x0000,
10304			0x00000000, 0xffffffff },
10305
10306		/* Receive Data and Receive BD Initiator Control Registers. */
10307		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10308			0x00000000, 0xffffffff },
10309		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10310			0x00000000, 0xffffffff },
10311		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10312			0x00000000, 0x00000003 },
10313		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10314			0x00000000, 0xffffffff },
10315		{ RCVDBDI_STD_BD+0, 0x0000,
10316			0x00000000, 0xffffffff },
10317		{ RCVDBDI_STD_BD+4, 0x0000,
10318			0x00000000, 0xffffffff },
10319		{ RCVDBDI_STD_BD+8, 0x0000,
10320			0x00000000, 0xffff0002 },
10321		{ RCVDBDI_STD_BD+0xc, 0x0000,
10322			0x00000000, 0xffffffff },
10323
10324		/* Receive BD Initiator Control Registers. */
10325		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10326			0x00000000, 0xffffffff },
10327		{ RCVBDI_STD_THRESH, TG3_FL_5705,
10328			0x00000000, 0x000003ff },
10329		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10330			0x00000000, 0xffffffff },
10331
10332		/* Host Coalescing Control Registers. */
10333		{ HOSTCC_MODE, TG3_FL_NOT_5705,
10334			0x00000000, 0x00000004 },
10335		{ HOSTCC_MODE, TG3_FL_5705,
10336			0x00000000, 0x000000f6 },
10337		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10338			0x00000000, 0xffffffff },
10339		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10340			0x00000000, 0x000003ff },
10341		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10342			0x00000000, 0xffffffff },
10343		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10344			0x00000000, 0x000003ff },
10345		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10346			0x00000000, 0xffffffff },
10347		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10348			0x00000000, 0x000000ff },
10349		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10350			0x00000000, 0xffffffff },
10351		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10352			0x00000000, 0x000000ff },
10353		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10354			0x00000000, 0xffffffff },
10355		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10356			0x00000000, 0xffffffff },
10357		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10358			0x00000000, 0xffffffff },
10359		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10360			0x00000000, 0x000000ff },
10361		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10362			0x00000000, 0xffffffff },
10363		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10364			0x00000000, 0x000000ff },
10365		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10366			0x00000000, 0xffffffff },
10367		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10368			0x00000000, 0xffffffff },
10369		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10370			0x00000000, 0xffffffff },
10371		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10372			0x00000000, 0xffffffff },
10373		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10374			0x00000000, 0xffffffff },
10375		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10376			0xffffffff, 0x00000000 },
10377		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10378			0xffffffff, 0x00000000 },
10379
10380		/* Buffer Manager Control Registers. */
10381		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10382			0x00000000, 0x007fff80 },
10383		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10384			0x00000000, 0x007fffff },
10385		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10386			0x00000000, 0x0000003f },
10387		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10388			0x00000000, 0x000001ff },
10389		{ BUFMGR_MB_HIGH_WATER, 0x0000,
10390			0x00000000, 0x000001ff },
10391		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10392			0xffffffff, 0x00000000 },
10393		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10394			0xffffffff, 0x00000000 },
10395
10396		/* Mailbox Registers */
10397		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10398			0x00000000, 0x000001ff },
10399		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10400			0x00000000, 0x000001ff },
10401		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10402			0x00000000, 0x000007ff },
10403		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10404			0x00000000, 0x000001ff },
10405
10406		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
10407	};
10408
10409	is_5705 = is_5750 = 0;
10410	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10411		is_5705 = 1;
10412		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10413			is_5750 = 1;
10414	}
10415
10416	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10417		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10418			continue;
10419
10420		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10421			continue;
10422
10423		if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10424		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
10425			continue;
10426
10427		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10428			continue;
10429
10430		offset = (u32) reg_tbl[i].offset;
10431		read_mask = reg_tbl[i].read_mask;
10432		write_mask = reg_tbl[i].write_mask;
10433
10434		/* Save the original register content */
10435		save_val = tr32(offset);
10436
10437		/* Determine the read-only value. */
10438		read_val = save_val & read_mask;
10439
10440		/* Write zero to the register, then make sure the read-only bits
10441		 * are not changed and the read/write bits are all zeros.
10442		 */
10443		tw32(offset, 0);
10444
10445		val = tr32(offset);
10446
10447		/* Test the read-only and read/write bits. */
10448		if (((val & read_mask) != read_val) || (val & write_mask))
10449			goto out;
10450
10451		/* Write ones to all the bits defined by RdMask and WrMask, then
10452		 * make sure the read-only bits are not changed and the
10453		 * read/write bits are all ones.
10454		 */
10455		tw32(offset, read_mask | write_mask);
10456
10457		val = tr32(offset);
10458
10459		/* Test the read-only bits. */
10460		if ((val & read_mask) != read_val)
10461			goto out;
10462
10463		/* Test the read/write bits. */
10464		if ((val & write_mask) != write_mask)
10465			goto out;
10466
10467		tw32(offset, save_val);
10468	}
10469
10470	return 0;
10471
10472out:
10473	if (netif_msg_hw(tp))
10474		netdev_err(tp->dev,
10475			   "Register test failed at offset %x\n", offset);
10476	tw32(offset, save_val);
10477	return -EIO;
10478}
10479
10480static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10481{
10482	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10483	int i;
10484	u32 j;
10485
10486	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10487		for (j = 0; j < len; j += 4) {
10488			u32 val;
10489
10490			tg3_write_mem(tp, offset + j, test_pattern[i]);
10491			tg3_read_mem(tp, offset + j, &val);
10492			if (val != test_pattern[i])
10493				return -EIO;
10494		}
10495	}
10496	return 0;
10497}
10498
10499static int tg3_test_memory(struct tg3 *tp)
10500{
10501	static struct mem_entry {
10502		u32 offset;
10503		u32 len;
10504	} mem_tbl_570x[] = {
10505		{ 0x00000000, 0x00b50},
10506		{ 0x00002000, 0x1c000},
10507		{ 0xffffffff, 0x00000}
10508	}, mem_tbl_5705[] = {
10509		{ 0x00000100, 0x0000c},
10510		{ 0x00000200, 0x00008},
10511		{ 0x00004000, 0x00800},
10512		{ 0x00006000, 0x01000},
10513		{ 0x00008000, 0x02000},
10514		{ 0x00010000, 0x0e000},
10515		{ 0xffffffff, 0x00000}
10516	}, mem_tbl_5755[] = {
10517		{ 0x00000200, 0x00008},
10518		{ 0x00004000, 0x00800},
10519		{ 0x00006000, 0x00800},
10520		{ 0x00008000, 0x02000},
10521		{ 0x00010000, 0x0c000},
10522		{ 0xffffffff, 0x00000}
10523	}, mem_tbl_5906[] = {
10524		{ 0x00000200, 0x00008},
10525		{ 0x00004000, 0x00400},
10526		{ 0x00006000, 0x00400},
10527		{ 0x00008000, 0x01000},
10528		{ 0x00010000, 0x01000},
10529		{ 0xffffffff, 0x00000}
10530	}, mem_tbl_5717[] = {
10531		{ 0x00000200, 0x00008},
10532		{ 0x00010000, 0x0a000},
10533		{ 0x00020000, 0x13c00},
10534		{ 0xffffffff, 0x00000}
10535	}, mem_tbl_57765[] = {
10536		{ 0x00000200, 0x00008},
10537		{ 0x00004000, 0x00800},
10538		{ 0x00006000, 0x09800},
10539		{ 0x00010000, 0x0a000},
10540		{ 0xffffffff, 0x00000}
10541	};
10542	struct mem_entry *mem_tbl;
10543	int err = 0;
10544	int i;
10545
10546	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10547	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10548		mem_tbl = mem_tbl_5717;
10549	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10550		mem_tbl = mem_tbl_57765;
10551	else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10552		mem_tbl = mem_tbl_5755;
10553	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10554		mem_tbl = mem_tbl_5906;
10555	else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10556		mem_tbl = mem_tbl_5705;
10557	else
10558		mem_tbl = mem_tbl_570x;
10559
10560	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10561		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10562		if (err)
10563			break;
10564	}
10565
10566	return err;
10567}
10568
10569#define TG3_MAC_LOOPBACK	0
10570#define TG3_PHY_LOOPBACK	1
10571
10572static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10573{
10574	u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10575	u32 desc_idx, coal_now;
10576	struct sk_buff *skb, *rx_skb;
10577	u8 *tx_data;
10578	dma_addr_t map;
10579	int num_pkts, tx_len, rx_len, i, err;
10580	struct tg3_rx_buffer_desc *desc;
10581	struct tg3_napi *tnapi, *rnapi;
10582	struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10583
10584	tnapi = &tp->napi[0];
10585	rnapi = &tp->napi[0];
10586	if (tp->irq_cnt > 1) {
10587		rnapi = &tp->napi[1];
10588		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10589			tnapi = &tp->napi[1];
10590	}
10591	coal_now = tnapi->coal_now | rnapi->coal_now;
10592
10593	if (loopback_mode == TG3_MAC_LOOPBACK) {
10594		/* HW errata - mac loopback fails in some cases on 5780.
10595		 * Normal traffic and PHY loopback are not affected by
10596		 * errata.
10597		 */
10598		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10599			return 0;
10600
10601		mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10602			   MAC_MODE_PORT_INT_LPBACK;
10603		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10604			mac_mode |= MAC_MODE_LINK_POLARITY;
10605		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
10606			mac_mode |= MAC_MODE_PORT_MODE_MII;
10607		else
10608			mac_mode |= MAC_MODE_PORT_MODE_GMII;
10609		tw32(MAC_MODE, mac_mode);
10610	} else if (loopback_mode == TG3_PHY_LOOPBACK) {
10611		u32 val;
10612
10613		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10614			tg3_phy_fet_toggle_apd(tp, false);
10615			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10616		} else
10617			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10618
10619		tg3_phy_toggle_automdix(tp, 0);
10620
10621		tg3_writephy(tp, MII_BMCR, val);
10622		udelay(40);
10623
10624		mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10625		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10626			tg3_writephy(tp, MII_TG3_FET_PTEST,
10627				     MII_TG3_FET_PTEST_FRC_TX_LINK |
10628				     MII_TG3_FET_PTEST_FRC_TX_LOCK);
10629			/* The write needs to be flushed for the AC131 */
10630			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10631				tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10632			mac_mode |= MAC_MODE_PORT_MODE_MII;
10633		} else
10634			mac_mode |= MAC_MODE_PORT_MODE_GMII;
10635
10636		/* reset to prevent losing 1st rx packet intermittently */
10637		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10638			tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10639			udelay(10);
10640			tw32_f(MAC_RX_MODE, tp->rx_mode);
10641		}
10642		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10643			u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10644			if (masked_phy_id == TG3_PHY_ID_BCM5401)
10645				mac_mode &= ~MAC_MODE_LINK_POLARITY;
10646			else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10647				mac_mode |= MAC_MODE_LINK_POLARITY;
10648			tg3_writephy(tp, MII_TG3_EXT_CTRL,
10649				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10650		}
10651		tw32(MAC_MODE, mac_mode);
10652	} else {
10653		return -EINVAL;
10654	}
10655
10656	err = -EIO;
10657
10658	tx_len = 1514;
10659	skb = netdev_alloc_skb(tp->dev, tx_len);
10660	if (!skb)
10661		return -ENOMEM;
10662
10663	tx_data = skb_put(skb, tx_len);
10664	memcpy(tx_data, tp->dev->dev_addr, 6);
10665	memset(tx_data + 6, 0x0, 8);
10666
10667	tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10668
10669	for (i = 14; i < tx_len; i++)
10670		tx_data[i] = (u8) (i & 0xff);
10671
10672	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10673	if (pci_dma_mapping_error(tp->pdev, map)) {
10674		dev_kfree_skb(skb);
10675		return -EIO;
10676	}
10677
10678	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10679	       rnapi->coal_now);
10680
10681	udelay(10);
10682
10683	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10684
10685	num_pkts = 0;
10686
10687	tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10688
10689	tnapi->tx_prod++;
10690	num_pkts++;
10691
10692	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10693	tr32_mailbox(tnapi->prodmbox);
10694
10695	udelay(10);
10696
10697	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
10698	for (i = 0; i < 35; i++) {
10699		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10700		       coal_now);
10701
10702		udelay(10);
10703
10704		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10705		rx_idx = rnapi->hw_status->idx[0].rx_producer;
10706		if ((tx_idx == tnapi->tx_prod) &&
10707		    (rx_idx == (rx_start_idx + num_pkts)))
10708			break;
10709	}
10710
10711	pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10712	dev_kfree_skb(skb);
10713
10714	if (tx_idx != tnapi->tx_prod)
10715		goto out;
10716
10717	if (rx_idx != rx_start_idx + num_pkts)
10718		goto out;
10719
10720	desc = &rnapi->rx_rcb[rx_start_idx];
10721	desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10722	opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10723	if (opaque_key != RXD_OPAQUE_RING_STD)
10724		goto out;
10725
10726	if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10727	    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10728		goto out;
10729
10730	rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10731	if (rx_len != tx_len)
10732		goto out;
10733
10734	rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10735
10736	map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10737	pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10738
10739	for (i = 14; i < tx_len; i++) {
10740		if (*(rx_skb->data + i) != (u8) (i & 0xff))
10741			goto out;
10742	}
10743	err = 0;
10744
10745	/* tg3_free_rings will unmap and free the rx_skb */
10746out:
10747	return err;
10748}
10749
10750#define TG3_MAC_LOOPBACK_FAILED		1
10751#define TG3_PHY_LOOPBACK_FAILED		2
10752#define TG3_LOOPBACK_FAILED		(TG3_MAC_LOOPBACK_FAILED |	\
10753					 TG3_PHY_LOOPBACK_FAILED)
10754
10755static int tg3_test_loopback(struct tg3 *tp)
10756{
10757	int err = 0;
10758	u32 cpmuctrl = 0;
10759
10760	if (!netif_running(tp->dev))
10761		return TG3_LOOPBACK_FAILED;
10762
10763	err = tg3_reset_hw(tp, 1);
10764	if (err)
10765		return TG3_LOOPBACK_FAILED;
10766
10767	/* Turn off gphy autopowerdown. */
10768	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10769		tg3_phy_toggle_apd(tp, false);
10770
10771	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10772		int i;
10773		u32 status;
10774
10775		tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10776
10777		/* Wait for up to 40 microseconds to acquire lock. */
10778		for (i = 0; i < 4; i++) {
10779			status = tr32(TG3_CPMU_MUTEX_GNT);
10780			if (status == CPMU_MUTEX_GNT_DRIVER)
10781				break;
10782			udelay(10);
10783		}
10784
10785		if (status != CPMU_MUTEX_GNT_DRIVER)
10786			return TG3_LOOPBACK_FAILED;
10787
10788		/* Turn off link-based power management. */
10789		cpmuctrl = tr32(TG3_CPMU_CTRL);
10790		tw32(TG3_CPMU_CTRL,
10791		     cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10792				  CPMU_CTRL_LINK_AWARE_MODE));
10793	}
10794
10795	if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10796		err |= TG3_MAC_LOOPBACK_FAILED;
10797
10798	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10799		tw32(TG3_CPMU_CTRL, cpmuctrl);
10800
10801		/* Release the mutex */
10802		tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10803	}
10804
10805	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10806	    !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10807		if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10808			err |= TG3_PHY_LOOPBACK_FAILED;
10809	}
10810
10811	/* Re-enable gphy autopowerdown. */
10812	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
10813		tg3_phy_toggle_apd(tp, true);
10814
10815	return err;
10816}
10817
10818static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10819			  u64 *data)
10820{
10821	struct tg3 *tp = netdev_priv(dev);
10822
10823	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10824		tg3_set_power_state(tp, PCI_D0);
10825
10826	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10827
10828	if (tg3_test_nvram(tp) != 0) {
10829		etest->flags |= ETH_TEST_FL_FAILED;
10830		data[0] = 1;
10831	}
10832	if (tg3_test_link(tp) != 0) {
10833		etest->flags |= ETH_TEST_FL_FAILED;
10834		data[1] = 1;
10835	}
10836	if (etest->flags & ETH_TEST_FL_OFFLINE) {
10837		int err, err2 = 0, irq_sync = 0;
10838
10839		if (netif_running(dev)) {
10840			tg3_phy_stop(tp);
10841			tg3_netif_stop(tp);
10842			irq_sync = 1;
10843		}
10844
10845		tg3_full_lock(tp, irq_sync);
10846
10847		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10848		err = tg3_nvram_lock(tp);
10849		tg3_halt_cpu(tp, RX_CPU_BASE);
10850		if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10851			tg3_halt_cpu(tp, TX_CPU_BASE);
10852		if (!err)
10853			tg3_nvram_unlock(tp);
10854
10855		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
10856			tg3_phy_reset(tp);
10857
10858		if (tg3_test_registers(tp) != 0) {
10859			etest->flags |= ETH_TEST_FL_FAILED;
10860			data[2] = 1;
10861		}
10862		if (tg3_test_memory(tp) != 0) {
10863			etest->flags |= ETH_TEST_FL_FAILED;
10864			data[3] = 1;
10865		}
10866		if ((data[4] = tg3_test_loopback(tp)) != 0)
10867			etest->flags |= ETH_TEST_FL_FAILED;
10868
10869		tg3_full_unlock(tp);
10870
10871		if (tg3_test_interrupt(tp) != 0) {
10872			etest->flags |= ETH_TEST_FL_FAILED;
10873			data[5] = 1;
10874		}
10875
10876		tg3_full_lock(tp, 0);
10877
10878		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10879		if (netif_running(dev)) {
10880			tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10881			err2 = tg3_restart_hw(tp, 1);
10882			if (!err2)
10883				tg3_netif_start(tp);
10884		}
10885
10886		tg3_full_unlock(tp);
10887
10888		if (irq_sync && !err2)
10889			tg3_phy_start(tp);
10890	}
10891	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10892		tg3_set_power_state(tp, PCI_D3hot);
10893
10894}
10895
10896static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10897{
10898	struct mii_ioctl_data *data = if_mii(ifr);
10899	struct tg3 *tp = netdev_priv(dev);
10900	int err;
10901
10902	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10903		struct phy_device *phydev;
10904		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10905			return -EAGAIN;
10906		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10907		return phy_mii_ioctl(phydev, ifr, cmd);
10908	}
10909
10910	switch (cmd) {
10911	case SIOCGMIIPHY:
10912		data->phy_id = tp->phy_addr;
10913
10914		/* fallthru */
10915	case SIOCGMIIREG: {
10916		u32 mii_regval;
10917
10918		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10919			break;			/* We have no PHY */
10920
10921		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10922			return -EAGAIN;
10923
10924		spin_lock_bh(&tp->lock);
10925		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10926		spin_unlock_bh(&tp->lock);
10927
10928		data->val_out = mii_regval;
10929
10930		return err;
10931	}
10932
10933	case SIOCSMIIREG:
10934		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10935			break;			/* We have no PHY */
10936
10937		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10938			return -EAGAIN;
10939
10940		spin_lock_bh(&tp->lock);
10941		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10942		spin_unlock_bh(&tp->lock);
10943
10944		return err;
10945
10946	default:
10947		/* do nothing */
10948		break;
10949	}
10950	return -EOPNOTSUPP;
10951}
10952
10953#if TG3_VLAN_TAG_USED
10954static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10955{
10956	struct tg3 *tp = netdev_priv(dev);
10957
10958	if (!netif_running(dev)) {
10959		tp->vlgrp = grp;
10960		return;
10961	}
10962
10963	tg3_netif_stop(tp);
10964
10965	tg3_full_lock(tp, 0);
10966
10967	tp->vlgrp = grp;
10968
10969	/* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10970	__tg3_set_rx_mode(dev);
10971
10972	tg3_netif_start(tp);
10973
10974	tg3_full_unlock(tp);
10975}
10976#endif
10977
10978static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10979{
10980	struct tg3 *tp = netdev_priv(dev);
10981
10982	memcpy(ec, &tp->coal, sizeof(*ec));
10983	return 0;
10984}
10985
10986static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10987{
10988	struct tg3 *tp = netdev_priv(dev);
10989	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10990	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10991
10992	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10993		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10994		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10995		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10996		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10997	}
10998
10999	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11000	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11001	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11002	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11003	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11004	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11005	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11006	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11007	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11008	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11009		return -EINVAL;
11010
11011	/* No rx interrupts will be generated if both are zero */
11012	if ((ec->rx_coalesce_usecs == 0) &&
11013	    (ec->rx_max_coalesced_frames == 0))
11014		return -EINVAL;
11015
11016	/* No tx interrupts will be generated if both are zero */
11017	if ((ec->tx_coalesce_usecs == 0) &&
11018	    (ec->tx_max_coalesced_frames == 0))
11019		return -EINVAL;
11020
11021	/* Only copy relevant parameters, ignore all others. */
11022	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11023	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11024	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11025	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11026	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11027	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11028	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11029	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11030	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11031
11032	if (netif_running(dev)) {
11033		tg3_full_lock(tp, 0);
11034		__tg3_set_coalesce(tp, &tp->coal);
11035		tg3_full_unlock(tp);
11036	}
11037	return 0;
11038}
11039
11040static const struct ethtool_ops tg3_ethtool_ops = {
11041	.get_settings		= tg3_get_settings,
11042	.set_settings		= tg3_set_settings,
11043	.get_drvinfo		= tg3_get_drvinfo,
11044	.get_regs_len		= tg3_get_regs_len,
11045	.get_regs		= tg3_get_regs,
11046	.get_wol		= tg3_get_wol,
11047	.set_wol		= tg3_set_wol,
11048	.get_msglevel		= tg3_get_msglevel,
11049	.set_msglevel		= tg3_set_msglevel,
11050	.nway_reset		= tg3_nway_reset,
11051	.get_link		= ethtool_op_get_link,
11052	.get_eeprom_len		= tg3_get_eeprom_len,
11053	.get_eeprom		= tg3_get_eeprom,
11054	.set_eeprom		= tg3_set_eeprom,
11055	.get_ringparam		= tg3_get_ringparam,
11056	.set_ringparam		= tg3_set_ringparam,
11057	.get_pauseparam		= tg3_get_pauseparam,
11058	.set_pauseparam		= tg3_set_pauseparam,
11059	.get_rx_csum		= tg3_get_rx_csum,
11060	.set_rx_csum		= tg3_set_rx_csum,
11061	.set_tx_csum		= tg3_set_tx_csum,
11062	.set_sg			= ethtool_op_set_sg,
11063	.set_tso		= tg3_set_tso,
11064	.self_test		= tg3_self_test,
11065	.get_strings		= tg3_get_strings,
11066	.phys_id		= tg3_phys_id,
11067	.get_ethtool_stats	= tg3_get_ethtool_stats,
11068	.get_coalesce		= tg3_get_coalesce,
11069	.set_coalesce		= tg3_set_coalesce,
11070	.get_sset_count		= tg3_get_sset_count,
11071};
11072
11073static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11074{
11075	u32 cursize, val, magic;
11076
11077	tp->nvram_size = EEPROM_CHIP_SIZE;
11078
11079	if (tg3_nvram_read(tp, 0, &magic) != 0)
11080		return;
11081
11082	if ((magic != TG3_EEPROM_MAGIC) &&
11083	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11084	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11085		return;
11086
11087	/*
11088	 * Size the chip by reading offsets at increasing powers of two.
11089	 * When we encounter our validation signature, we know the addressing
11090	 * has wrapped around, and thus have our chip size.
11091	 */
11092	cursize = 0x10;
11093
11094	while (cursize < tp->nvram_size) {
11095		if (tg3_nvram_read(tp, cursize, &val) != 0)
11096			return;
11097
11098		if (val == magic)
11099			break;
11100
11101		cursize <<= 1;
11102	}
11103
11104	tp->nvram_size = cursize;
11105}
11106
11107static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11108{
11109	u32 val;
11110
11111	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11112	    tg3_nvram_read(tp, 0, &val) != 0)
11113		return;
11114
11115	/* Selfboot format */
11116	if (val != TG3_EEPROM_MAGIC) {
11117		tg3_get_eeprom_size(tp);
11118		return;
11119	}
11120
11121	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11122		if (val != 0) {
11123			/* This is confusing.  We want to operate on the
11124			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11125			 * call will read from NVRAM and byteswap the data
11126			 * according to the byteswapping settings for all
11127			 * other register accesses.  This ensures the data we
11128			 * want will always reside in the lower 16-bits.
11129			 * However, the data in NVRAM is in LE format, which
11130			 * means the data from the NVRAM read will always be
11131			 * opposite the endianness of the CPU.  The 16-bit
11132			 * byteswap then brings the data to CPU endianness.
11133			 */
11134			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11135			return;
11136		}
11137	}
11138	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11139}
11140
11141static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11142{
11143	u32 nvcfg1;
11144
11145	nvcfg1 = tr32(NVRAM_CFG1);
11146	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11147		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11148	} else {
11149		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11150		tw32(NVRAM_CFG1, nvcfg1);
11151	}
11152
11153	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11154	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11155		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11156		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11157			tp->nvram_jedecnum = JEDEC_ATMEL;
11158			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11159			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11160			break;
11161		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11162			tp->nvram_jedecnum = JEDEC_ATMEL;
11163			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11164			break;
11165		case FLASH_VENDOR_ATMEL_EEPROM:
11166			tp->nvram_jedecnum = JEDEC_ATMEL;
11167			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11168			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11169			break;
11170		case FLASH_VENDOR_ST:
11171			tp->nvram_jedecnum = JEDEC_ST;
11172			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11173			tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11174			break;
11175		case FLASH_VENDOR_SAIFUN:
11176			tp->nvram_jedecnum = JEDEC_SAIFUN;
11177			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11178			break;
11179		case FLASH_VENDOR_SST_SMALL:
11180		case FLASH_VENDOR_SST_LARGE:
11181			tp->nvram_jedecnum = JEDEC_SST;
11182			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11183			break;
11184		}
11185	} else {
11186		tp->nvram_jedecnum = JEDEC_ATMEL;
11187		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11188		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11189	}
11190}
11191
11192static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11193{
11194	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11195	case FLASH_5752PAGE_SIZE_256:
11196		tp->nvram_pagesize = 256;
11197		break;
11198	case FLASH_5752PAGE_SIZE_512:
11199		tp->nvram_pagesize = 512;
11200		break;
11201	case FLASH_5752PAGE_SIZE_1K:
11202		tp->nvram_pagesize = 1024;
11203		break;
11204	case FLASH_5752PAGE_SIZE_2K:
11205		tp->nvram_pagesize = 2048;
11206		break;
11207	case FLASH_5752PAGE_SIZE_4K:
11208		tp->nvram_pagesize = 4096;
11209		break;
11210	case FLASH_5752PAGE_SIZE_264:
11211		tp->nvram_pagesize = 264;
11212		break;
11213	case FLASH_5752PAGE_SIZE_528:
11214		tp->nvram_pagesize = 528;
11215		break;
11216	}
11217}
11218
11219static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11220{
11221	u32 nvcfg1;
11222
11223	nvcfg1 = tr32(NVRAM_CFG1);
11224
11225	/* NVRAM protection for TPM */
11226	if (nvcfg1 & (1 << 27))
11227		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11228
11229	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11230	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11231	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11232		tp->nvram_jedecnum = JEDEC_ATMEL;
11233		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11234		break;
11235	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11236		tp->nvram_jedecnum = JEDEC_ATMEL;
11237		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11238		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11239		break;
11240	case FLASH_5752VENDOR_ST_M45PE10:
11241	case FLASH_5752VENDOR_ST_M45PE20:
11242	case FLASH_5752VENDOR_ST_M45PE40:
11243		tp->nvram_jedecnum = JEDEC_ST;
11244		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11245		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11246		break;
11247	}
11248
11249	if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11250		tg3_nvram_get_pagesize(tp, nvcfg1);
11251	} else {
11252		/* For eeprom, set pagesize to maximum eeprom size */
11253		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11254
11255		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11256		tw32(NVRAM_CFG1, nvcfg1);
11257	}
11258}
11259
11260static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11261{
11262	u32 nvcfg1, protect = 0;
11263
11264	nvcfg1 = tr32(NVRAM_CFG1);
11265
11266	/* NVRAM protection for TPM */
11267	if (nvcfg1 & (1 << 27)) {
11268		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11269		protect = 1;
11270	}
11271
11272	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11273	switch (nvcfg1) {
11274	case FLASH_5755VENDOR_ATMEL_FLASH_1:
11275	case FLASH_5755VENDOR_ATMEL_FLASH_2:
11276	case FLASH_5755VENDOR_ATMEL_FLASH_3:
11277	case FLASH_5755VENDOR_ATMEL_FLASH_5:
11278		tp->nvram_jedecnum = JEDEC_ATMEL;
11279		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11280		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11281		tp->nvram_pagesize = 264;
11282		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11283		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11284			tp->nvram_size = (protect ? 0x3e200 :
11285					  TG3_NVRAM_SIZE_512KB);
11286		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11287			tp->nvram_size = (protect ? 0x1f200 :
11288					  TG3_NVRAM_SIZE_256KB);
11289		else
11290			tp->nvram_size = (protect ? 0x1f200 :
11291					  TG3_NVRAM_SIZE_128KB);
11292		break;
11293	case FLASH_5752VENDOR_ST_M45PE10:
11294	case FLASH_5752VENDOR_ST_M45PE20:
11295	case FLASH_5752VENDOR_ST_M45PE40:
11296		tp->nvram_jedecnum = JEDEC_ST;
11297		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11298		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11299		tp->nvram_pagesize = 256;
11300		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11301			tp->nvram_size = (protect ?
11302					  TG3_NVRAM_SIZE_64KB :
11303					  TG3_NVRAM_SIZE_128KB);
11304		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11305			tp->nvram_size = (protect ?
11306					  TG3_NVRAM_SIZE_64KB :
11307					  TG3_NVRAM_SIZE_256KB);
11308		else
11309			tp->nvram_size = (protect ?
11310					  TG3_NVRAM_SIZE_128KB :
11311					  TG3_NVRAM_SIZE_512KB);
11312		break;
11313	}
11314}
11315
11316static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11317{
11318	u32 nvcfg1;
11319
11320	nvcfg1 = tr32(NVRAM_CFG1);
11321
11322	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11323	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11324	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11325	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11326	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11327		tp->nvram_jedecnum = JEDEC_ATMEL;
11328		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11329		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11330
11331		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11332		tw32(NVRAM_CFG1, nvcfg1);
11333		break;
11334	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11335	case FLASH_5755VENDOR_ATMEL_FLASH_1:
11336	case FLASH_5755VENDOR_ATMEL_FLASH_2:
11337	case FLASH_5755VENDOR_ATMEL_FLASH_3:
11338		tp->nvram_jedecnum = JEDEC_ATMEL;
11339		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11340		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11341		tp->nvram_pagesize = 264;
11342		break;
11343	case FLASH_5752VENDOR_ST_M45PE10:
11344	case FLASH_5752VENDOR_ST_M45PE20:
11345	case FLASH_5752VENDOR_ST_M45PE40:
11346		tp->nvram_jedecnum = JEDEC_ST;
11347		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11348		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11349		tp->nvram_pagesize = 256;
11350		break;
11351	}
11352}
11353
11354static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11355{
11356	u32 nvcfg1, protect = 0;
11357
11358	nvcfg1 = tr32(NVRAM_CFG1);
11359
11360	/* NVRAM protection for TPM */
11361	if (nvcfg1 & (1 << 27)) {
11362		tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11363		protect = 1;
11364	}
11365
11366	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11367	switch (nvcfg1) {
11368	case FLASH_5761VENDOR_ATMEL_ADB021D:
11369	case FLASH_5761VENDOR_ATMEL_ADB041D:
11370	case FLASH_5761VENDOR_ATMEL_ADB081D:
11371	case FLASH_5761VENDOR_ATMEL_ADB161D:
11372	case FLASH_5761VENDOR_ATMEL_MDB021D:
11373	case FLASH_5761VENDOR_ATMEL_MDB041D:
11374	case FLASH_5761VENDOR_ATMEL_MDB081D:
11375	case FLASH_5761VENDOR_ATMEL_MDB161D:
11376		tp->nvram_jedecnum = JEDEC_ATMEL;
11377		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11378		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11379		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11380		tp->nvram_pagesize = 256;
11381		break;
11382	case FLASH_5761VENDOR_ST_A_M45PE20:
11383	case FLASH_5761VENDOR_ST_A_M45PE40:
11384	case FLASH_5761VENDOR_ST_A_M45PE80:
11385	case FLASH_5761VENDOR_ST_A_M45PE16:
11386	case FLASH_5761VENDOR_ST_M_M45PE20:
11387	case FLASH_5761VENDOR_ST_M_M45PE40:
11388	case FLASH_5761VENDOR_ST_M_M45PE80:
11389	case FLASH_5761VENDOR_ST_M_M45PE16:
11390		tp->nvram_jedecnum = JEDEC_ST;
11391		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11392		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11393		tp->nvram_pagesize = 256;
11394		break;
11395	}
11396
11397	if (protect) {
11398		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11399	} else {
11400		switch (nvcfg1) {
11401		case FLASH_5761VENDOR_ATMEL_ADB161D:
11402		case FLASH_5761VENDOR_ATMEL_MDB161D:
11403		case FLASH_5761VENDOR_ST_A_M45PE16:
11404		case FLASH_5761VENDOR_ST_M_M45PE16:
11405			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11406			break;
11407		case FLASH_5761VENDOR_ATMEL_ADB081D:
11408		case FLASH_5761VENDOR_ATMEL_MDB081D:
11409		case FLASH_5761VENDOR_ST_A_M45PE80:
11410		case FLASH_5761VENDOR_ST_M_M45PE80:
11411			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11412			break;
11413		case FLASH_5761VENDOR_ATMEL_ADB041D:
11414		case FLASH_5761VENDOR_ATMEL_MDB041D:
11415		case FLASH_5761VENDOR_ST_A_M45PE40:
11416		case FLASH_5761VENDOR_ST_M_M45PE40:
11417			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11418			break;
11419		case FLASH_5761VENDOR_ATMEL_ADB021D:
11420		case FLASH_5761VENDOR_ATMEL_MDB021D:
11421		case FLASH_5761VENDOR_ST_A_M45PE20:
11422		case FLASH_5761VENDOR_ST_M_M45PE20:
11423			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11424			break;
11425		}
11426	}
11427}
11428
11429static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11430{
11431	tp->nvram_jedecnum = JEDEC_ATMEL;
11432	tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11433	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11434}
11435
11436static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11437{
11438	u32 nvcfg1;
11439
11440	nvcfg1 = tr32(NVRAM_CFG1);
11441
11442	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11443	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11444	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11445		tp->nvram_jedecnum = JEDEC_ATMEL;
11446		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11447		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11448
11449		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11450		tw32(NVRAM_CFG1, nvcfg1);
11451		return;
11452	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11453	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11454	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11455	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11456	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11457	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11458	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11459		tp->nvram_jedecnum = JEDEC_ATMEL;
11460		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11461		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11462
11463		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11464		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11465		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11466		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11467			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11468			break;
11469		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11470		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11471			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11472			break;
11473		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11474		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11475			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11476			break;
11477		}
11478		break;
11479	case FLASH_5752VENDOR_ST_M45PE10:
11480	case FLASH_5752VENDOR_ST_M45PE20:
11481	case FLASH_5752VENDOR_ST_M45PE40:
11482		tp->nvram_jedecnum = JEDEC_ST;
11483		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11484		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11485
11486		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11487		case FLASH_5752VENDOR_ST_M45PE10:
11488			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11489			break;
11490		case FLASH_5752VENDOR_ST_M45PE20:
11491			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11492			break;
11493		case FLASH_5752VENDOR_ST_M45PE40:
11494			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11495			break;
11496		}
11497		break;
11498	default:
11499		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11500		return;
11501	}
11502
11503	tg3_nvram_get_pagesize(tp, nvcfg1);
11504	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11505		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11506}
11507
11508
11509static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11510{
11511	u32 nvcfg1;
11512
11513	nvcfg1 = tr32(NVRAM_CFG1);
11514
11515	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11516	case FLASH_5717VENDOR_ATMEL_EEPROM:
11517	case FLASH_5717VENDOR_MICRO_EEPROM:
11518		tp->nvram_jedecnum = JEDEC_ATMEL;
11519		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11520		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11521
11522		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11523		tw32(NVRAM_CFG1, nvcfg1);
11524		return;
11525	case FLASH_5717VENDOR_ATMEL_MDB011D:
11526	case FLASH_5717VENDOR_ATMEL_ADB011B:
11527	case FLASH_5717VENDOR_ATMEL_ADB011D:
11528	case FLASH_5717VENDOR_ATMEL_MDB021D:
11529	case FLASH_5717VENDOR_ATMEL_ADB021B:
11530	case FLASH_5717VENDOR_ATMEL_ADB021D:
11531	case FLASH_5717VENDOR_ATMEL_45USPT:
11532		tp->nvram_jedecnum = JEDEC_ATMEL;
11533		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11534		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11535
11536		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11537		case FLASH_5717VENDOR_ATMEL_MDB021D:
11538		case FLASH_5717VENDOR_ATMEL_ADB021B:
11539		case FLASH_5717VENDOR_ATMEL_ADB021D:
11540			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11541			break;
11542		default:
11543			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11544			break;
11545		}
11546		break;
11547	case FLASH_5717VENDOR_ST_M_M25PE10:
11548	case FLASH_5717VENDOR_ST_A_M25PE10:
11549	case FLASH_5717VENDOR_ST_M_M45PE10:
11550	case FLASH_5717VENDOR_ST_A_M45PE10:
11551	case FLASH_5717VENDOR_ST_M_M25PE20:
11552	case FLASH_5717VENDOR_ST_A_M25PE20:
11553	case FLASH_5717VENDOR_ST_M_M45PE20:
11554	case FLASH_5717VENDOR_ST_A_M45PE20:
11555	case FLASH_5717VENDOR_ST_25USPT:
11556	case FLASH_5717VENDOR_ST_45USPT:
11557		tp->nvram_jedecnum = JEDEC_ST;
11558		tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11559		tp->tg3_flags2 |= TG3_FLG2_FLASH;
11560
11561		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11562		case FLASH_5717VENDOR_ST_M_M25PE20:
11563		case FLASH_5717VENDOR_ST_A_M25PE20:
11564		case FLASH_5717VENDOR_ST_M_M45PE20:
11565		case FLASH_5717VENDOR_ST_A_M45PE20:
11566			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11567			break;
11568		default:
11569			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11570			break;
11571		}
11572		break;
11573	default:
11574		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11575		return;
11576	}
11577
11578	tg3_nvram_get_pagesize(tp, nvcfg1);
11579	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11580		tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11581}
11582
11583/* Chips other than 5700/5701 use the NVRAM for fetching info. */
11584static void __devinit tg3_nvram_init(struct tg3 *tp)
11585{
11586	tw32_f(GRC_EEPROM_ADDR,
11587	     (EEPROM_ADDR_FSM_RESET |
11588	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
11589	       EEPROM_ADDR_CLKPERD_SHIFT)));
11590
11591	msleep(1);
11592
11593	/* Enable seeprom accesses. */
11594	tw32_f(GRC_LOCAL_CTRL,
11595	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11596	udelay(100);
11597
11598	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11599	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11600		tp->tg3_flags |= TG3_FLAG_NVRAM;
11601
11602		if (tg3_nvram_lock(tp)) {
11603			netdev_warn(tp->dev,
11604				    "Cannot get nvram lock, %s failed\n",
11605				    __func__);
11606			return;
11607		}
11608		tg3_enable_nvram_access(tp);
11609
11610		tp->nvram_size = 0;
11611
11612		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11613			tg3_get_5752_nvram_info(tp);
11614		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11615			tg3_get_5755_nvram_info(tp);
11616		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11617			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11618			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11619			tg3_get_5787_nvram_info(tp);
11620		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11621			tg3_get_5761_nvram_info(tp);
11622		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11623			tg3_get_5906_nvram_info(tp);
11624		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11625			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11626			tg3_get_57780_nvram_info(tp);
11627		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11628			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11629			tg3_get_5717_nvram_info(tp);
11630		else
11631			tg3_get_nvram_info(tp);
11632
11633		if (tp->nvram_size == 0)
11634			tg3_get_nvram_size(tp);
11635
11636		tg3_disable_nvram_access(tp);
11637		tg3_nvram_unlock(tp);
11638
11639	} else {
11640		tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11641
11642		tg3_get_eeprom_size(tp);
11643	}
11644}
11645
11646static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11647				    u32 offset, u32 len, u8 *buf)
11648{
11649	int i, j, rc = 0;
11650	u32 val;
11651
11652	for (i = 0; i < len; i += 4) {
11653		u32 addr;
11654		__be32 data;
11655
11656		addr = offset + i;
11657
11658		memcpy(&data, buf + i, 4);
11659
11660		/*
11661		 * The SEEPROM interface expects the data to always be opposite
11662		 * the native endian format.  We accomplish this by reversing
11663		 * all the operations that would have been performed on the
11664		 * data from a call to tg3_nvram_read_be32().
11665		 */
11666		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11667
11668		val = tr32(GRC_EEPROM_ADDR);
11669		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11670
11671		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11672			EEPROM_ADDR_READ);
11673		tw32(GRC_EEPROM_ADDR, val |
11674			(0 << EEPROM_ADDR_DEVID_SHIFT) |
11675			(addr & EEPROM_ADDR_ADDR_MASK) |
11676			EEPROM_ADDR_START |
11677			EEPROM_ADDR_WRITE);
11678
11679		for (j = 0; j < 1000; j++) {
11680			val = tr32(GRC_EEPROM_ADDR);
11681
11682			if (val & EEPROM_ADDR_COMPLETE)
11683				break;
11684			msleep(1);
11685		}
11686		if (!(val & EEPROM_ADDR_COMPLETE)) {
11687			rc = -EBUSY;
11688			break;
11689		}
11690	}
11691
11692	return rc;
11693}
11694
11695/* offset and length are dword aligned */
11696static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11697		u8 *buf)
11698{
11699	int ret = 0;
11700	u32 pagesize = tp->nvram_pagesize;
11701	u32 pagemask = pagesize - 1;
11702	u32 nvram_cmd;
11703	u8 *tmp;
11704
11705	tmp = kmalloc(pagesize, GFP_KERNEL);
11706	if (tmp == NULL)
11707		return -ENOMEM;
11708
11709	while (len) {
11710		int j;
11711		u32 phy_addr, page_off, size;
11712
11713		phy_addr = offset & ~pagemask;
11714
11715		for (j = 0; j < pagesize; j += 4) {
11716			ret = tg3_nvram_read_be32(tp, phy_addr + j,
11717						  (__be32 *) (tmp + j));
11718			if (ret)
11719				break;
11720		}
11721		if (ret)
11722			break;
11723
11724		page_off = offset & pagemask;
11725		size = pagesize;
11726		if (len < size)
11727			size = len;
11728
11729		len -= size;
11730
11731		memcpy(tmp + page_off, buf, size);
11732
11733		offset = offset + (pagesize - page_off);
11734
11735		tg3_enable_nvram_access(tp);
11736
11737		/*
11738		 * Before we can erase the flash page, we need
11739		 * to issue a special "write enable" command.
11740		 */
11741		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11742
11743		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11744			break;
11745
11746		/* Erase the target page */
11747		tw32(NVRAM_ADDR, phy_addr);
11748
11749		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11750			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11751
11752		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11753			break;
11754
11755		/* Issue another write enable to start the write. */
11756		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11757
11758		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11759			break;
11760
11761		for (j = 0; j < pagesize; j += 4) {
11762			__be32 data;
11763
11764			data = *((__be32 *) (tmp + j));
11765
11766			tw32(NVRAM_WRDATA, be32_to_cpu(data));
11767
11768			tw32(NVRAM_ADDR, phy_addr + j);
11769
11770			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11771				NVRAM_CMD_WR;
11772
11773			if (j == 0)
11774				nvram_cmd |= NVRAM_CMD_FIRST;
11775			else if (j == (pagesize - 4))
11776				nvram_cmd |= NVRAM_CMD_LAST;
11777
11778			if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11779				break;
11780		}
11781		if (ret)
11782			break;
11783	}
11784
11785	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11786	tg3_nvram_exec_cmd(tp, nvram_cmd);
11787
11788	kfree(tmp);
11789
11790	return ret;
11791}
11792
11793/* offset and length are dword aligned */
11794static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11795		u8 *buf)
11796{
11797	int i, ret = 0;
11798
11799	for (i = 0; i < len; i += 4, offset += 4) {
11800		u32 page_off, phy_addr, nvram_cmd;
11801		__be32 data;
11802
11803		memcpy(&data, buf + i, 4);
11804		tw32(NVRAM_WRDATA, be32_to_cpu(data));
11805
11806		page_off = offset % tp->nvram_pagesize;
11807
11808		phy_addr = tg3_nvram_phys_addr(tp, offset);
11809
11810		tw32(NVRAM_ADDR, phy_addr);
11811
11812		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11813
11814		if (page_off == 0 || i == 0)
11815			nvram_cmd |= NVRAM_CMD_FIRST;
11816		if (page_off == (tp->nvram_pagesize - 4))
11817			nvram_cmd |= NVRAM_CMD_LAST;
11818
11819		if (i == (len - 4))
11820			nvram_cmd |= NVRAM_CMD_LAST;
11821
11822		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11823		    !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11824		    (tp->nvram_jedecnum == JEDEC_ST) &&
11825		    (nvram_cmd & NVRAM_CMD_FIRST)) {
11826
11827			if ((ret = tg3_nvram_exec_cmd(tp,
11828				NVRAM_CMD_WREN | NVRAM_CMD_GO |
11829				NVRAM_CMD_DONE)))
11830
11831				break;
11832		}
11833		if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11834			/* We always do complete word writes to eeprom. */
11835			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11836		}
11837
11838		if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11839			break;
11840	}
11841	return ret;
11842}
11843
11844/* offset and length are dword aligned */
11845static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11846{
11847	int ret;
11848
11849	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11850		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11851		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
11852		udelay(40);
11853	}
11854
11855	if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11856		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11857	} else {
11858		u32 grc_mode;
11859
11860		ret = tg3_nvram_lock(tp);
11861		if (ret)
11862			return ret;
11863
11864		tg3_enable_nvram_access(tp);
11865		if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11866		    !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11867			tw32(NVRAM_WRITE1, 0x406);
11868
11869		grc_mode = tr32(GRC_MODE);
11870		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11871
11872		if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11873			!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11874
11875			ret = tg3_nvram_write_block_buffered(tp, offset, len,
11876				buf);
11877		} else {
11878			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11879				buf);
11880		}
11881
11882		grc_mode = tr32(GRC_MODE);
11883		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11884
11885		tg3_disable_nvram_access(tp);
11886		tg3_nvram_unlock(tp);
11887	}
11888
11889	if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11890		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11891		udelay(40);
11892	}
11893
11894	return ret;
11895}
11896
11897struct subsys_tbl_ent {
11898	u16 subsys_vendor, subsys_devid;
11899	u32 phy_id;
11900};
11901
11902static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
11903	/* Broadcom boards. */
11904	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11905	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
11906	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11907	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
11908	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11909	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
11910	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11911	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
11912	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11913	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
11914	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11915	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
11916	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11917	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
11918	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11919	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
11920	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11921	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
11922	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11923	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
11924	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
11925	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
11926
11927	/* 3com boards. */
11928	{ TG3PCI_SUBVENDOR_ID_3COM,
11929	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
11930	{ TG3PCI_SUBVENDOR_ID_3COM,
11931	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
11932	{ TG3PCI_SUBVENDOR_ID_3COM,
11933	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
11934	{ TG3PCI_SUBVENDOR_ID_3COM,
11935	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
11936	{ TG3PCI_SUBVENDOR_ID_3COM,
11937	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
11938
11939	/* DELL boards. */
11940	{ TG3PCI_SUBVENDOR_ID_DELL,
11941	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
11942	{ TG3PCI_SUBVENDOR_ID_DELL,
11943	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
11944	{ TG3PCI_SUBVENDOR_ID_DELL,
11945	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
11946	{ TG3PCI_SUBVENDOR_ID_DELL,
11947	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
11948
11949	/* Compaq boards. */
11950	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
11951	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
11952	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
11953	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
11954	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
11955	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
11956	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
11957	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
11958	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
11959	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
11960
11961	/* IBM boards. */
11962	{ TG3PCI_SUBVENDOR_ID_IBM,
11963	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
11964};
11965
11966static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
11967{
11968	int i;
11969
11970	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11971		if ((subsys_id_to_phy_id[i].subsys_vendor ==
11972		     tp->pdev->subsystem_vendor) &&
11973		    (subsys_id_to_phy_id[i].subsys_devid ==
11974		     tp->pdev->subsystem_device))
11975			return &subsys_id_to_phy_id[i];
11976	}
11977	return NULL;
11978}
11979
11980static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11981{
11982	u32 val;
11983	u16 pmcsr;
11984
11985	/* On some early chips the SRAM cannot be accessed in D3hot state,
11986	 * so need make sure we're in D0.
11987	 */
11988	pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11989	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11990	pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11991	msleep(1);
11992
11993	/* Make sure register accesses (indirect or otherwise)
11994	 * will function correctly.
11995	 */
11996	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11997			       tp->misc_host_ctrl);
11998
11999	/* The memory arbiter has to be enabled in order for SRAM accesses
12000	 * to succeed.  Normally on powerup the tg3 chip firmware will make
12001	 * sure it is enabled, but other entities such as system netboot
12002	 * code might disable it.
12003	 */
12004	val = tr32(MEMARB_MODE);
12005	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12006
12007	tp->phy_id = TG3_PHY_ID_INVALID;
12008	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12009
12010	/* Assume an onboard device and WOL capable by default.  */
12011	tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12012
12013	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12014		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12015			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12016			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12017		}
12018		val = tr32(VCPU_CFGSHDW);
12019		if (val & VCPU_CFGSHDW_ASPM_DBNC)
12020			tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12021		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12022		    (val & VCPU_CFGSHDW_WOL_MAGPKT))
12023			tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12024		goto done;
12025	}
12026
12027	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12028	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12029		u32 nic_cfg, led_cfg;
12030		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12031		int eeprom_phy_serdes = 0;
12032
12033		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12034		tp->nic_sram_data_cfg = nic_cfg;
12035
12036		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12037		ver >>= NIC_SRAM_DATA_VER_SHIFT;
12038		if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12039		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12040		    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12041		    (ver > 0) && (ver < 0x100))
12042			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12043
12044		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12045			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12046
12047		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12048		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12049			eeprom_phy_serdes = 1;
12050
12051		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12052		if (nic_phy_id != 0) {
12053			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12054			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12055
12056			eeprom_phy_id  = (id1 >> 16) << 10;
12057			eeprom_phy_id |= (id2 & 0xfc00) << 16;
12058			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12059		} else
12060			eeprom_phy_id = 0;
12061
12062		tp->phy_id = eeprom_phy_id;
12063		if (eeprom_phy_serdes) {
12064			if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12065				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12066			else
12067				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12068		}
12069
12070		if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12071			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12072				    SHASTA_EXT_LED_MODE_MASK);
12073		else
12074			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12075
12076		switch (led_cfg) {
12077		default:
12078		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12079			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12080			break;
12081
12082		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12083			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12084			break;
12085
12086		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12087			tp->led_ctrl = LED_CTRL_MODE_MAC;
12088
12089			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
12090			 * read on some older 5700/5701 bootcode.
12091			 */
12092			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12093			    ASIC_REV_5700 ||
12094			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
12095			    ASIC_REV_5701)
12096				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12097
12098			break;
12099
12100		case SHASTA_EXT_LED_SHARED:
12101			tp->led_ctrl = LED_CTRL_MODE_SHARED;
12102			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12103			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12104				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12105						 LED_CTRL_MODE_PHY_2);
12106			break;
12107
12108		case SHASTA_EXT_LED_MAC:
12109			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12110			break;
12111
12112		case SHASTA_EXT_LED_COMBO:
12113			tp->led_ctrl = LED_CTRL_MODE_COMBO;
12114			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12115				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12116						 LED_CTRL_MODE_PHY_2);
12117			break;
12118
12119		}
12120
12121		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12122		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12123		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12124			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12125
12126		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12127			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12128
12129		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12130			tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12131			if ((tp->pdev->subsystem_vendor ==
12132			     PCI_VENDOR_ID_ARIMA) &&
12133			    (tp->pdev->subsystem_device == 0x205a ||
12134			     tp->pdev->subsystem_device == 0x2063))
12135				tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12136		} else {
12137			tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12138			tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12139		}
12140
12141		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12142			tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12143			if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12144				tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12145		}
12146
12147		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12148			(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12149			tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12150
12151		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12152		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12153			tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12154
12155		if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12156		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12157			tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12158
12159		if (cfg2 & (1 << 17))
12160			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12161
12162		/* serdes signal pre-emphasis in register 0x590 set by */
12163		/* bootcode if bit 18 is set */
12164		if (cfg2 & (1 << 18))
12165			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12166
12167		if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12168		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12169		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12170			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12171
12172		if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12173		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12174		    !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
12175			u32 cfg3;
12176
12177			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12178			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12179				tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12180		}
12181
12182		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12183			tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12184		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12185			tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12186		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12187			tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12188	}
12189done:
12190	device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12191	device_set_wakeup_enable(&tp->pdev->dev,
12192				 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12193}
12194
12195static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12196{
12197	int i;
12198	u32 val;
12199
12200	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12201	tw32(OTP_CTRL, cmd);
12202
12203	/* Wait for up to 1 ms for command to execute. */
12204	for (i = 0; i < 100; i++) {
12205		val = tr32(OTP_STATUS);
12206		if (val & OTP_STATUS_CMD_DONE)
12207			break;
12208		udelay(10);
12209	}
12210
12211	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12212}
12213
12214/* Read the gphy configuration from the OTP region of the chip.  The gphy
12215 * configuration is a 32-bit value that straddles the alignment boundary.
12216 * We do two 32-bit reads and then shift and merge the results.
12217 */
12218static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12219{
12220	u32 bhalf_otp, thalf_otp;
12221
12222	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12223
12224	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12225		return 0;
12226
12227	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12228
12229	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12230		return 0;
12231
12232	thalf_otp = tr32(OTP_READ_DATA);
12233
12234	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12235
12236	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12237		return 0;
12238
12239	bhalf_otp = tr32(OTP_READ_DATA);
12240
12241	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12242}
12243
12244static int __devinit tg3_phy_probe(struct tg3 *tp)
12245{
12246	u32 hw_phy_id_1, hw_phy_id_2;
12247	u32 hw_phy_id, hw_phy_id_masked;
12248	int err;
12249
12250	if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12251		return tg3_phy_init(tp);
12252
12253	/* Reading the PHY ID register can conflict with ASF
12254	 * firmware access to the PHY hardware.
12255	 */
12256	err = 0;
12257	if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12258	    (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12259		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12260	} else {
12261		/* Now read the physical PHY_ID from the chip and verify
12262		 * that it is sane.  If it doesn't look good, we fall back
12263		 * to either the hard-coded table based PHY_ID and failing
12264		 * that the value found in the eeprom area.
12265		 */
12266		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12267		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12268
12269		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12270		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12271		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12272
12273		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12274	}
12275
12276	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12277		tp->phy_id = hw_phy_id;
12278		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12279			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12280		else
12281			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12282	} else {
12283		if (tp->phy_id != TG3_PHY_ID_INVALID) {
12284			/* Do nothing, phy ID already set up in
12285			 * tg3_get_eeprom_hw_cfg().
12286			 */
12287		} else {
12288			struct subsys_tbl_ent *p;
12289
12290			/* No eeprom signature?  Try the hardcoded
12291			 * subsys device table.
12292			 */
12293			p = tg3_lookup_by_subsys(tp);
12294			if (!p)
12295				return -ENODEV;
12296
12297			tp->phy_id = p->phy_id;
12298			if (!tp->phy_id ||
12299			    tp->phy_id == TG3_PHY_ID_BCM8002)
12300				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12301		}
12302	}
12303
12304	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12305	    !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12306	    !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12307		u32 bmsr, adv_reg, tg3_ctrl, mask;
12308
12309		tg3_readphy(tp, MII_BMSR, &bmsr);
12310		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12311		    (bmsr & BMSR_LSTATUS))
12312			goto skip_phy_reset;
12313
12314		err = tg3_phy_reset(tp);
12315		if (err)
12316			return err;
12317
12318		adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12319			   ADVERTISE_100HALF | ADVERTISE_100FULL |
12320			   ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12321		tg3_ctrl = 0;
12322		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12323			tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12324				    MII_TG3_CTRL_ADV_1000_FULL);
12325			if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12326			    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12327				tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12328					     MII_TG3_CTRL_ENABLE_AS_MASTER);
12329		}
12330
12331		mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12332			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12333			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12334		if (!tg3_copper_is_advertising_all(tp, mask)) {
12335			tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12336
12337			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12338				tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12339
12340			tg3_writephy(tp, MII_BMCR,
12341				     BMCR_ANENABLE | BMCR_ANRESTART);
12342		}
12343		tg3_phy_set_wirespeed(tp);
12344
12345		tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12346		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12347			tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12348	}
12349
12350skip_phy_reset:
12351	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12352		err = tg3_init_5401phy_dsp(tp);
12353		if (err)
12354			return err;
12355
12356		err = tg3_init_5401phy_dsp(tp);
12357	}
12358
12359	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12360		tp->link_config.advertising =
12361			(ADVERTISED_1000baseT_Half |
12362			 ADVERTISED_1000baseT_Full |
12363			 ADVERTISED_Autoneg |
12364			 ADVERTISED_FIBRE);
12365	if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
12366		tp->link_config.advertising &=
12367			~(ADVERTISED_1000baseT_Half |
12368			  ADVERTISED_1000baseT_Full);
12369
12370	return err;
12371}
12372
12373static void __devinit tg3_read_vpd(struct tg3 *tp)
12374{
12375	u8 vpd_data[TG3_NVM_VPD_LEN];
12376	unsigned int block_end, rosize, len;
12377	int j, i = 0;
12378	u32 magic;
12379
12380	if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12381	    tg3_nvram_read(tp, 0x0, &magic))
12382		goto out_not_found;
12383
12384	if (magic == TG3_EEPROM_MAGIC) {
12385		for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12386			u32 tmp;
12387
12388			/* The data is in little-endian format in NVRAM.
12389			 * Use the big-endian read routines to preserve
12390			 * the byte order as it exists in NVRAM.
12391			 */
12392			if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12393				goto out_not_found;
12394
12395			memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12396		}
12397	} else {
12398		ssize_t cnt;
12399		unsigned int pos = 0;
12400
12401		for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12402			cnt = pci_read_vpd(tp->pdev, pos,
12403					   TG3_NVM_VPD_LEN - pos,
12404					   &vpd_data[pos]);
12405			if (cnt == -ETIMEDOUT || -EINTR)
12406				cnt = 0;
12407			else if (cnt < 0)
12408				goto out_not_found;
12409		}
12410		if (pos != TG3_NVM_VPD_LEN)
12411			goto out_not_found;
12412	}
12413
12414	i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12415			     PCI_VPD_LRDT_RO_DATA);
12416	if (i < 0)
12417		goto out_not_found;
12418
12419	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12420	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12421	i += PCI_VPD_LRDT_TAG_SIZE;
12422
12423	if (block_end > TG3_NVM_VPD_LEN)
12424		goto out_not_found;
12425
12426	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12427				      PCI_VPD_RO_KEYWORD_MFR_ID);
12428	if (j > 0) {
12429		len = pci_vpd_info_field_size(&vpd_data[j]);
12430
12431		j += PCI_VPD_INFO_FLD_HDR_SIZE;
12432		if (j + len > block_end || len != 4 ||
12433		    memcmp(&vpd_data[j], "1028", 4))
12434			goto partno;
12435
12436		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12437					      PCI_VPD_RO_KEYWORD_VENDOR0);
12438		if (j < 0)
12439			goto partno;
12440
12441		len = pci_vpd_info_field_size(&vpd_data[j]);
12442
12443		j += PCI_VPD_INFO_FLD_HDR_SIZE;
12444		if (j + len > block_end)
12445			goto partno;
12446
12447		memcpy(tp->fw_ver, &vpd_data[j], len);
12448		strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12449	}
12450
12451partno:
12452	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12453				      PCI_VPD_RO_KEYWORD_PARTNO);
12454	if (i < 0)
12455		goto out_not_found;
12456
12457	len = pci_vpd_info_field_size(&vpd_data[i]);
12458
12459	i += PCI_VPD_INFO_FLD_HDR_SIZE;
12460	if (len > TG3_BPN_SIZE ||
12461	    (len + i) > TG3_NVM_VPD_LEN)
12462		goto out_not_found;
12463
12464	memcpy(tp->board_part_number, &vpd_data[i], len);
12465
12466	return;
12467
12468out_not_found:
12469	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12470		strcpy(tp->board_part_number, "BCM95906");
12471	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12472		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12473		strcpy(tp->board_part_number, "BCM57780");
12474	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12475		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12476		strcpy(tp->board_part_number, "BCM57760");
12477	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12478		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12479		strcpy(tp->board_part_number, "BCM57790");
12480	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12481		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12482		strcpy(tp->board_part_number, "BCM57788");
12483	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12484		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12485		strcpy(tp->board_part_number, "BCM57761");
12486	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12487		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12488		strcpy(tp->board_part_number, "BCM57765");
12489	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12490		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12491		strcpy(tp->board_part_number, "BCM57781");
12492	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12493		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12494		strcpy(tp->board_part_number, "BCM57785");
12495	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12496		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12497		strcpy(tp->board_part_number, "BCM57791");
12498	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12499		 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12500		strcpy(tp->board_part_number, "BCM57795");
12501	else
12502		strcpy(tp->board_part_number, "none");
12503}
12504
12505static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12506{
12507	u32 val;
12508
12509	if (tg3_nvram_read(tp, offset, &val) ||
12510	    (val & 0xfc000000) != 0x0c000000 ||
12511	    tg3_nvram_read(tp, offset + 4, &val) ||
12512	    val != 0)
12513		return 0;
12514
12515	return 1;
12516}
12517
12518static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12519{
12520	u32 val, offset, start, ver_offset;
12521	int i, dst_off;
12522	bool newver = false;
12523
12524	if (tg3_nvram_read(tp, 0xc, &offset) ||
12525	    tg3_nvram_read(tp, 0x4, &start))
12526		return;
12527
12528	offset = tg3_nvram_logical_addr(tp, offset);
12529
12530	if (tg3_nvram_read(tp, offset, &val))
12531		return;
12532
12533	if ((val & 0xfc000000) == 0x0c000000) {
12534		if (tg3_nvram_read(tp, offset + 4, &val))
12535			return;
12536
12537		if (val == 0)
12538			newver = true;
12539	}
12540
12541	dst_off = strlen(tp->fw_ver);
12542
12543	if (newver) {
12544		if (TG3_VER_SIZE - dst_off < 16 ||
12545		    tg3_nvram_read(tp, offset + 8, &ver_offset))
12546			return;
12547
12548		offset = offset + ver_offset - start;
12549		for (i = 0; i < 16; i += 4) {
12550			__be32 v;
12551			if (tg3_nvram_read_be32(tp, offset + i, &v))
12552				return;
12553
12554			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12555		}
12556	} else {
12557		u32 major, minor;
12558
12559		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12560			return;
12561
12562		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12563			TG3_NVM_BCVER_MAJSFT;
12564		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12565		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12566			 "v%d.%02d", major, minor);
12567	}
12568}
12569
12570static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12571{
12572	u32 val, major, minor;
12573
12574	/* Use native endian representation */
12575	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12576		return;
12577
12578	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12579		TG3_NVM_HWSB_CFG1_MAJSFT;
12580	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12581		TG3_NVM_HWSB_CFG1_MINSFT;
12582
12583	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12584}
12585
12586static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12587{
12588	u32 offset, major, minor, build;
12589
12590	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12591
12592	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12593		return;
12594
12595	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12596	case TG3_EEPROM_SB_REVISION_0:
12597		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12598		break;
12599	case TG3_EEPROM_SB_REVISION_2:
12600		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12601		break;
12602	case TG3_EEPROM_SB_REVISION_3:
12603		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12604		break;
12605	case TG3_EEPROM_SB_REVISION_4:
12606		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12607		break;
12608	case TG3_EEPROM_SB_REVISION_5:
12609		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12610		break;
12611	default:
12612		return;
12613	}
12614
12615	if (tg3_nvram_read(tp, offset, &val))
12616		return;
12617
12618	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12619		TG3_EEPROM_SB_EDH_BLD_SHFT;
12620	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12621		TG3_EEPROM_SB_EDH_MAJ_SHFT;
12622	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
12623
12624	if (minor > 99 || build > 26)
12625		return;
12626
12627	offset = strlen(tp->fw_ver);
12628	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12629		 " v%d.%02d", major, minor);
12630
12631	if (build > 0) {
12632		offset = strlen(tp->fw_ver);
12633		if (offset < TG3_VER_SIZE - 1)
12634			tp->fw_ver[offset] = 'a' + build - 1;
12635	}
12636}
12637
12638static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12639{
12640	u32 val, offset, start;
12641	int i, vlen;
12642
12643	for (offset = TG3_NVM_DIR_START;
12644	     offset < TG3_NVM_DIR_END;
12645	     offset += TG3_NVM_DIRENT_SIZE) {
12646		if (tg3_nvram_read(tp, offset, &val))
12647			return;
12648
12649		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12650			break;
12651	}
12652
12653	if (offset == TG3_NVM_DIR_END)
12654		return;
12655
12656	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12657		start = 0x08000000;
12658	else if (tg3_nvram_read(tp, offset - 4, &start))
12659		return;
12660
12661	if (tg3_nvram_read(tp, offset + 4, &offset) ||
12662	    !tg3_fw_img_is_valid(tp, offset) ||
12663	    tg3_nvram_read(tp, offset + 8, &val))
12664		return;
12665
12666	offset += val - start;
12667
12668	vlen = strlen(tp->fw_ver);
12669
12670	tp->fw_ver[vlen++] = ',';
12671	tp->fw_ver[vlen++] = ' ';
12672
12673	for (i = 0; i < 4; i++) {
12674		__be32 v;
12675		if (tg3_nvram_read_be32(tp, offset, &v))
12676			return;
12677
12678		offset += sizeof(v);
12679
12680		if (vlen > TG3_VER_SIZE - sizeof(v)) {
12681			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12682			break;
12683		}
12684
12685		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12686		vlen += sizeof(v);
12687	}
12688}
12689
12690static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12691{
12692	int vlen;
12693	u32 apedata;
12694	char *fwtype;
12695
12696	if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12697	    !(tp->tg3_flags  & TG3_FLAG_ENABLE_ASF))
12698		return;
12699
12700	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12701	if (apedata != APE_SEG_SIG_MAGIC)
12702		return;
12703
12704	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12705	if (!(apedata & APE_FW_STATUS_READY))
12706		return;
12707
12708	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12709
12710	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
12711		fwtype = "NCSI";
12712	else
12713		fwtype = "DASH";
12714
12715	vlen = strlen(tp->fw_ver);
12716
12717	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
12718		 fwtype,
12719		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12720		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12721		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12722		 (apedata & APE_FW_VERSION_BLDMSK));
12723}
12724
12725static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12726{
12727	u32 val;
12728	bool vpd_vers = false;
12729
12730	if (tp->fw_ver[0] != 0)
12731		vpd_vers = true;
12732
12733	if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12734		strcat(tp->fw_ver, "sb");
12735		return;
12736	}
12737
12738	if (tg3_nvram_read(tp, 0, &val))
12739		return;
12740
12741	if (val == TG3_EEPROM_MAGIC)
12742		tg3_read_bc_ver(tp);
12743	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12744		tg3_read_sb_ver(tp, val);
12745	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12746		tg3_read_hwsb_ver(tp);
12747	else
12748		return;
12749
12750	if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12751	     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
12752		goto done;
12753
12754	tg3_read_mgmtfw_ver(tp);
12755
12756done:
12757	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12758}
12759
12760static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12761
12762static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
12763{
12764#if TG3_VLAN_TAG_USED
12765	dev->vlan_features |= flags;
12766#endif
12767}
12768
12769static int __devinit tg3_get_invariants(struct tg3 *tp)
12770{
12771	static struct pci_device_id write_reorder_chipsets[] = {
12772		{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
12773			     PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12774		{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
12775			     PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12776		{ PCI_DEVICE(PCI_VENDOR_ID_VIA,
12777			     PCI_DEVICE_ID_VIA_8385_0) },
12778		{ },
12779	};
12780	u32 misc_ctrl_reg;
12781	u32 pci_state_reg, grc_misc_cfg;
12782	u32 val;
12783	u16 pci_cmd;
12784	int err;
12785
12786	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12787	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12788	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12789
12790	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12791			      &misc_ctrl_reg);
12792
12793	tp->pci_chip_rev_id = (misc_ctrl_reg >>
12794			       MISC_HOST_CTRL_CHIPREV_SHIFT);
12795	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12796		u32 prod_id_asic_rev;
12797
12798		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12799		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12800		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
12801		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
12802			pci_read_config_dword(tp->pdev,
12803					      TG3PCI_GEN2_PRODID_ASICREV,
12804					      &prod_id_asic_rev);
12805		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12806			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12807			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12808			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12809			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12810			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12811			pci_read_config_dword(tp->pdev,
12812					      TG3PCI_GEN15_PRODID_ASICREV,
12813					      &prod_id_asic_rev);
12814		else
12815			pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12816					      &prod_id_asic_rev);
12817
12818		tp->pci_chip_rev_id = prod_id_asic_rev;
12819	}
12820
12821	/* Wrong chip ID in 5752 A0. This code can be removed later
12822	 * as A0 is not in production.
12823	 */
12824	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12825		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12826
12827	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12828	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12829		static struct tg3_dev_id {
12830			u32	vendor;
12831			u32	device;
12832			u32	rev;
12833		} ich_chipsets[] = {
12834			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12835			  PCI_ANY_ID },
12836			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12837			  PCI_ANY_ID },
12838			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12839			  0xa },
12840			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12841			  PCI_ANY_ID },
12842			{ },
12843		};
12844		struct tg3_dev_id *pci_id = &ich_chipsets[0];
12845		struct pci_dev *bridge = NULL;
12846
12847		while (pci_id->vendor != 0) {
12848			bridge = pci_get_device(pci_id->vendor, pci_id->device,
12849						bridge);
12850			if (!bridge) {
12851				pci_id++;
12852				continue;
12853			}
12854			if (pci_id->rev != PCI_ANY_ID) {
12855				if (bridge->revision > pci_id->rev)
12856					continue;
12857			}
12858			if (bridge->subordinate &&
12859			    (bridge->subordinate->number ==
12860			     tp->pdev->bus->number)) {
12861
12862				tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12863				pci_dev_put(bridge);
12864				break;
12865			}
12866		}
12867	}
12868
12869	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12870		static struct tg3_dev_id {
12871			u32	vendor;
12872			u32	device;
12873		} bridge_chipsets[] = {
12874			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12875			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12876			{ },
12877		};
12878		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12879		struct pci_dev *bridge = NULL;
12880
12881		while (pci_id->vendor != 0) {
12882			bridge = pci_get_device(pci_id->vendor,
12883						pci_id->device,
12884						bridge);
12885			if (!bridge) {
12886				pci_id++;
12887				continue;
12888			}
12889			if (bridge->subordinate &&
12890			    (bridge->subordinate->number <=
12891			     tp->pdev->bus->number) &&
12892			    (bridge->subordinate->subordinate >=
12893			     tp->pdev->bus->number)) {
12894				tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12895				pci_dev_put(bridge);
12896				break;
12897			}
12898		}
12899	}
12900
12901	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12902	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12903		tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12904		tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12905		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12906	} else {
12907		struct pci_dev *bridge = NULL;
12908
12909		do {
12910			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12911						PCI_DEVICE_ID_SERVERWORKS_EPB,
12912						bridge);
12913			if (bridge && bridge->subordinate &&
12914			    (bridge->subordinate->number <=
12915			     tp->pdev->bus->number) &&
12916			    (bridge->subordinate->subordinate >=
12917			     tp->pdev->bus->number)) {
12918				tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12919				pci_dev_put(bridge);
12920				break;
12921			}
12922		} while (bridge);
12923	}
12924
12925	/* Initialize misc host control in PCI block. */
12926	tp->misc_host_ctrl |= (misc_ctrl_reg &
12927			       MISC_HOST_CTRL_CHIPREV);
12928	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12929			       tp->misc_host_ctrl);
12930
12931	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
12932	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
12933	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12934		tp->pdev_peer = tg3_find_peer(tp);
12935
12936	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12937	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
12938	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12939		tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
12940
12941	/* Intentionally exclude ASIC_REV_5906 */
12942	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12943	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12944	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12945	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12946	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12947	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12948	    (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
12949		tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
12950
12951	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12952	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12953	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12954	    (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12955	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12956		tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12957
12958	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12959	    (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12960		tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12961
12962	/* 5700 B0 chips do not support checksumming correctly due
12963	 * to hardware bugs.
12964	 */
12965	if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12966		tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12967	else {
12968		unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
12969
12970		tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
12971		if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
12972			features |= NETIF_F_IPV6_CSUM;
12973		tp->dev->features |= features;
12974		vlan_features_add(tp->dev, features);
12975	}
12976
12977	/* Determine TSO capabilities */
12978	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
12979		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
12980	else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12981		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12982		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12983	else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12984		tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12985		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
12986		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12987			tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12988	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12989		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12990		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
12991		tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
12992		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
12993			tp->fw_needed = FIRMWARE_TG3TSO5;
12994		else
12995			tp->fw_needed = FIRMWARE_TG3TSO;
12996	}
12997
12998	tp->irq_max = 1;
12999
13000	if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13001		tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13002		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13003		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13004		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13005		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13006		     tp->pdev_peer == tp->pdev))
13007			tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13008
13009		if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13010		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13011			tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13012		}
13013
13014		if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13015			tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13016			tp->irq_max = TG3_IRQ_MAX_VECS;
13017		}
13018	}
13019
13020	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13021	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13022	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13023		tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13024	else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13025		tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13026		tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13027	}
13028
13029	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13030		tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13031
13032	if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13033	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13034	    (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13035		tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13036
13037	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13038			      &pci_state_reg);
13039
13040	tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13041	if (tp->pcie_cap != 0) {
13042		u16 lnkctl;
13043
13044		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13045
13046		pcie_set_readrq(tp->pdev, 4096);
13047
13048		pci_read_config_word(tp->pdev,
13049				     tp->pcie_cap + PCI_EXP_LNKCTL,
13050				     &lnkctl);
13051		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13052			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13053				tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13054			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13055			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13056			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13057			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13058				tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13059		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13060			tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13061		}
13062	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13063		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13064	} else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13065		   (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13066		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13067		if (!tp->pcix_cap) {
13068			dev_err(&tp->pdev->dev,
13069				"Cannot find PCI-X capability, aborting\n");
13070			return -EIO;
13071		}
13072
13073		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13074			tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13075	}
13076
13077	/* If we have an AMD 762 or VIA K8T800 chipset, write
13078	 * reordering to the mailbox registers done by the host
13079	 * controller can cause major troubles.  We read back from
13080	 * every mailbox register write to force the writes to be
13081	 * posted to the chip in order.
13082	 */
13083	if (pci_dev_present(write_reorder_chipsets) &&
13084	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13085		tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13086
13087	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13088			     &tp->pci_cacheline_sz);
13089	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13090			     &tp->pci_lat_timer);
13091	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13092	    tp->pci_lat_timer < 64) {
13093		tp->pci_lat_timer = 64;
13094		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13095				      tp->pci_lat_timer);
13096	}
13097
13098	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13099		tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13100
13101		if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13102			u32 pm_reg;
13103
13104			tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13105
13106			/* The chip can have it's power management PCI config
13107			 * space registers clobbered due to this bug.
13108			 * So explicitly force the chip into D0 here.
13109			 */
13110			pci_read_config_dword(tp->pdev,
13111					      tp->pm_cap + PCI_PM_CTRL,
13112					      &pm_reg);
13113			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13114			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13115			pci_write_config_dword(tp->pdev,
13116					       tp->pm_cap + PCI_PM_CTRL,
13117					       pm_reg);
13118
13119			/* Also, force SERR#/PERR# in PCI command. */
13120			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13121			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13122			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13123		}
13124	}
13125
13126	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13127		tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13128	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13129		tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13130
13131	/* Chip-specific fixup from Broadcom driver */
13132	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13133	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13134		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13135		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13136	}
13137
13138	/* Default fast path register access methods */
13139	tp->read32 = tg3_read32;
13140	tp->write32 = tg3_write32;
13141	tp->read32_mbox = tg3_read32;
13142	tp->write32_mbox = tg3_write32;
13143	tp->write32_tx_mbox = tg3_write32;
13144	tp->write32_rx_mbox = tg3_write32;
13145
13146	if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13147		tp->write32 = tg3_write_indirect_reg32;
13148	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13149		 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13150		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13151		tp->write32 = tg3_write_flush_reg32;
13152	}
13153
13154	if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13155	    (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13156		tp->write32_tx_mbox = tg3_write32_tx_mbox;
13157		if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13158			tp->write32_rx_mbox = tg3_write_flush_reg32;
13159	}
13160
13161	if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13162		tp->read32 = tg3_read_indirect_reg32;
13163		tp->write32 = tg3_write_indirect_reg32;
13164		tp->read32_mbox = tg3_read_indirect_mbox;
13165		tp->write32_mbox = tg3_write_indirect_mbox;
13166		tp->write32_tx_mbox = tg3_write_indirect_mbox;
13167		tp->write32_rx_mbox = tg3_write_indirect_mbox;
13168
13169		iounmap(tp->regs);
13170		tp->regs = NULL;
13171
13172		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13173		pci_cmd &= ~PCI_COMMAND_MEMORY;
13174		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13175	}
13176	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13177		tp->read32_mbox = tg3_read32_mbox_5906;
13178		tp->write32_mbox = tg3_write32_mbox_5906;
13179		tp->write32_tx_mbox = tg3_write32_mbox_5906;
13180		tp->write32_rx_mbox = tg3_write32_mbox_5906;
13181	}
13182
13183	if (tp->write32 == tg3_write_indirect_reg32 ||
13184	    ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13185	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13186	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13187		tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13188
13189	/* Get eeprom hw config before calling tg3_set_power_state().
13190	 * In particular, the TG3_FLG2_IS_NIC flag must be
13191	 * determined before calling tg3_set_power_state() so that
13192	 * we know whether or not to switch out of Vaux power.
13193	 * When the flag is set, it means that GPIO1 is used for eeprom
13194	 * write protect and also implies that it is a LOM where GPIOs
13195	 * are not used to switch power.
13196	 */
13197	tg3_get_eeprom_hw_cfg(tp);
13198
13199	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13200		/* Allow reads and writes to the
13201		 * APE register and memory space.
13202		 */
13203		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13204				 PCISTATE_ALLOW_APE_SHMEM_WR |
13205				 PCISTATE_ALLOW_APE_PSPACE_WR;
13206		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13207				       pci_state_reg);
13208	}
13209
13210	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13211	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13212	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13213	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13214	    (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13215		tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13216
13217	/* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13218	 * GPIO1 driven high will bring 5700's external PHY out of reset.
13219	 * It is also used as eeprom write protect on LOMs.
13220	 */
13221	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13222	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13223	    (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13224		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13225				       GRC_LCLCTRL_GPIO_OUTPUT1);
13226	/* Unused GPIO3 must be driven as output on 5752 because there
13227	 * are no pull-up resistors on unused GPIO pins.
13228	 */
13229	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13230		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13231
13232	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13233	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13234	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13235		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13236
13237	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13238	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13239		/* Turn off the debug UART. */
13240		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13241		if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13242			/* Keep VMain power. */
13243			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13244					      GRC_LCLCTRL_GPIO_OUTPUT0;
13245	}
13246
13247	/* Force the chip into D0. */
13248	err = tg3_set_power_state(tp, PCI_D0);
13249	if (err) {
13250		dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13251		return err;
13252	}
13253
13254	/* Derive initial jumbo mode from MTU assigned in
13255	 * ether_setup() via the alloc_etherdev() call
13256	 */
13257	if (tp->dev->mtu > ETH_DATA_LEN &&
13258	    !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13259		tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13260
13261	/* Determine WakeOnLan speed to use. */
13262	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13263	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13264	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13265	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13266		tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13267	} else {
13268		tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13269	}
13270
13271	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13272		tp->phy_flags |= TG3_PHYFLG_IS_FET;
13273
13274	/* A few boards don't want Ethernet@WireSpeed phy feature */
13275	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13276	    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13277	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13278	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13279	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13280	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13281		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13282
13283	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13284	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13285		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13286	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13287		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13288
13289	if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13290	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13291	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13292	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13293	    !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
13294		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13295		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13296		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13297		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13298			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13299			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13300				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13301			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13302				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13303		} else
13304			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13305	}
13306
13307	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13308	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13309		tp->phy_otp = tg3_read_otp_phycfg(tp);
13310		if (tp->phy_otp == 0)
13311			tp->phy_otp = TG3_OTP_DEFAULT;
13312	}
13313
13314	if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13315		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13316	else
13317		tp->mi_mode = MAC_MI_MODE_BASE;
13318
13319	tp->coalesce_mode = 0;
13320	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13321	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13322		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13323
13324	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13325	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13326		tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13327
13328	err = tg3_mdio_init(tp);
13329	if (err)
13330		return err;
13331
13332	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13333	    tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
13334		return -ENOTSUPP;
13335
13336	/* Initialize data/descriptor byte/word swapping. */
13337	val = tr32(GRC_MODE);
13338	val &= GRC_MODE_HOST_STACKUP;
13339	tw32(GRC_MODE, val | tp->grc_mode);
13340
13341	tg3_switch_clocks(tp);
13342
13343	/* Clear this out for sanity. */
13344	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13345
13346	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13347			      &pci_state_reg);
13348	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13349	    (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13350		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13351
13352		if (chiprevid == CHIPREV_ID_5701_A0 ||
13353		    chiprevid == CHIPREV_ID_5701_B0 ||
13354		    chiprevid == CHIPREV_ID_5701_B2 ||
13355		    chiprevid == CHIPREV_ID_5701_B5) {
13356			void __iomem *sram_base;
13357
13358			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13359
13360			writel(0x00000000, sram_base);
13361			writel(0x00000000, sram_base + 4);
13362			writel(0xffffffff, sram_base + 4);
13363			if (readl(sram_base) != 0x00000000)
13364				tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13365		}
13366	}
13367
13368	udelay(50);
13369	tg3_nvram_init(tp);
13370
13371	grc_misc_cfg = tr32(GRC_MISC_CFG);
13372	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13373
13374	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13375	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13376	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13377		tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13378
13379	if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13380	    (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13381		tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13382	if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13383		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13384				      HOSTCC_MODE_CLRTICK_TXBD);
13385
13386		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13387		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13388				       tp->misc_host_ctrl);
13389	}
13390
13391	/* Preserve the APE MAC_MODE bits */
13392	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13393		tp->mac_mode = tr32(MAC_MODE) |
13394			       MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13395	else
13396		tp->mac_mode = TG3_DEF_MAC_MODE;
13397
13398	/* these are limited to 10/100 only */
13399	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13400	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13401	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13402	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13403	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13404	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13405	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13406	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13407	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13408	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13409	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13410	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13411	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13412	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13413	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
13414		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
13415
13416	err = tg3_phy_probe(tp);
13417	if (err) {
13418		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13419		/* ... but do not return immediately ... */
13420		tg3_mdio_fini(tp);
13421	}
13422
13423	tg3_read_vpd(tp);
13424	tg3_read_fw_ver(tp);
13425
13426	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
13427		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13428	} else {
13429		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13430			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13431		else
13432			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13433	}
13434
13435	/* 5700 {AX,BX} chips have a broken status block link
13436	 * change bit implementation, so we must use the
13437	 * status register in those cases.
13438	 */
13439	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13440		tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13441	else
13442		tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13443
13444	/* The led_ctrl is set during tg3_phy_probe, here we might
13445	 * have to force the link status polling mechanism based
13446	 * upon subsystem IDs.
13447	 */
13448	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13449	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13450	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
13451		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13452		tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13453	}
13454
13455	/* For all SERDES we poll the MAC status register. */
13456	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13457		tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13458	else
13459		tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13460
13461	tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
13462	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13463	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13464	    (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13465		tp->rx_offset -= NET_IP_ALIGN;
13466#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13467		tp->rx_copy_thresh = ~(u16)0;
13468#endif
13469	}
13470
13471	tp->rx_std_max_post = TG3_RX_RING_SIZE;
13472
13473	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13474	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13475	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13476		tp->rx_std_max_post = 8;
13477
13478	if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13479		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13480				     PCIE_PWR_MGMT_L1_THRESH_MSK;
13481
13482	return err;
13483}
13484
13485#ifdef CONFIG_SPARC
13486static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13487{
13488	struct net_device *dev = tp->dev;
13489	struct pci_dev *pdev = tp->pdev;
13490	struct device_node *dp = pci_device_to_OF_node(pdev);
13491	const unsigned char *addr;
13492	int len;
13493
13494	addr = of_get_property(dp, "local-mac-address", &len);
13495	if (addr && len == 6) {
13496		memcpy(dev->dev_addr, addr, 6);
13497		memcpy(dev->perm_addr, dev->dev_addr, 6);
13498		return 0;
13499	}
13500	return -ENODEV;
13501}
13502
13503static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13504{
13505	struct net_device *dev = tp->dev;
13506
13507	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13508	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13509	return 0;
13510}
13511#endif
13512
13513static int __devinit tg3_get_device_address(struct tg3 *tp)
13514{
13515	struct net_device *dev = tp->dev;
13516	u32 hi, lo, mac_offset;
13517	int addr_ok = 0;
13518
13519#ifdef CONFIG_SPARC
13520	if (!tg3_get_macaddr_sparc(tp))
13521		return 0;
13522#endif
13523
13524	mac_offset = 0x7c;
13525	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13526	    (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13527		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13528			mac_offset = 0xcc;
13529		if (tg3_nvram_lock(tp))
13530			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13531		else
13532			tg3_nvram_unlock(tp);
13533	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13534		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13535		if (PCI_FUNC(tp->pdev->devfn) & 1)
13536			mac_offset = 0xcc;
13537		if (PCI_FUNC(tp->pdev->devfn) > 1)
13538			mac_offset += 0x18c;
13539	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13540		mac_offset = 0x10;
13541
13542	/* First try to get it from MAC address mailbox. */
13543	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13544	if ((hi >> 16) == 0x484b) {
13545		dev->dev_addr[0] = (hi >>  8) & 0xff;
13546		dev->dev_addr[1] = (hi >>  0) & 0xff;
13547
13548		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13549		dev->dev_addr[2] = (lo >> 24) & 0xff;
13550		dev->dev_addr[3] = (lo >> 16) & 0xff;
13551		dev->dev_addr[4] = (lo >>  8) & 0xff;
13552		dev->dev_addr[5] = (lo >>  0) & 0xff;
13553
13554		/* Some old bootcode may report a 0 MAC address in SRAM */
13555		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13556	}
13557	if (!addr_ok) {
13558		/* Next, try NVRAM. */
13559		if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13560		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13561		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13562			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13563			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13564		}
13565		/* Finally just fetch it out of the MAC control regs. */
13566		else {
13567			hi = tr32(MAC_ADDR_0_HIGH);
13568			lo = tr32(MAC_ADDR_0_LOW);
13569
13570			dev->dev_addr[5] = lo & 0xff;
13571			dev->dev_addr[4] = (lo >> 8) & 0xff;
13572			dev->dev_addr[3] = (lo >> 16) & 0xff;
13573			dev->dev_addr[2] = (lo >> 24) & 0xff;
13574			dev->dev_addr[1] = hi & 0xff;
13575			dev->dev_addr[0] = (hi >> 8) & 0xff;
13576		}
13577	}
13578
13579	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13580#ifdef CONFIG_SPARC
13581		if (!tg3_get_default_macaddr_sparc(tp))
13582			return 0;
13583#endif
13584		return -EINVAL;
13585	}
13586	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13587	return 0;
13588}
13589
13590#define BOUNDARY_SINGLE_CACHELINE	1
13591#define BOUNDARY_MULTI_CACHELINE	2
13592
13593static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13594{
13595	int cacheline_size;
13596	u8 byte;
13597	int goal;
13598
13599	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13600	if (byte == 0)
13601		cacheline_size = 1024;
13602	else
13603		cacheline_size = (int) byte * 4;
13604
13605	/* On 5703 and later chips, the boundary bits have no
13606	 * effect.
13607	 */
13608	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13609	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13610	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13611		goto out;
13612
13613#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13614	goal = BOUNDARY_MULTI_CACHELINE;
13615#else
13616#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13617	goal = BOUNDARY_SINGLE_CACHELINE;
13618#else
13619	goal = 0;
13620#endif
13621#endif
13622
13623	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13624		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13625		goto out;
13626	}
13627
13628	if (!goal)
13629		goto out;
13630
13631	/* PCI controllers on most RISC systems tend to disconnect
13632	 * when a device tries to burst across a cache-line boundary.
13633	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13634	 *
13635	 * Unfortunately, for PCI-E there are only limited
13636	 * write-side controls for this, and thus for reads
13637	 * we will still get the disconnects.  We'll also waste
13638	 * these PCI cycles for both read and write for chips
13639	 * other than 5700 and 5701 which do not implement the
13640	 * boundary bits.
13641	 */
13642	if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13643	    !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13644		switch (cacheline_size) {
13645		case 16:
13646		case 32:
13647		case 64:
13648		case 128:
13649			if (goal == BOUNDARY_SINGLE_CACHELINE) {
13650				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13651					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13652			} else {
13653				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13654					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13655			}
13656			break;
13657
13658		case 256:
13659			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13660				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13661			break;
13662
13663		default:
13664			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13665				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13666			break;
13667		}
13668	} else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13669		switch (cacheline_size) {
13670		case 16:
13671		case 32:
13672		case 64:
13673			if (goal == BOUNDARY_SINGLE_CACHELINE) {
13674				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13675				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13676				break;
13677			}
13678			/* fallthrough */
13679		case 128:
13680		default:
13681			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13682			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13683			break;
13684		}
13685	} else {
13686		switch (cacheline_size) {
13687		case 16:
13688			if (goal == BOUNDARY_SINGLE_CACHELINE) {
13689				val |= (DMA_RWCTRL_READ_BNDRY_16 |
13690					DMA_RWCTRL_WRITE_BNDRY_16);
13691				break;
13692			}
13693			/* fallthrough */
13694		case 32:
13695			if (goal == BOUNDARY_SINGLE_CACHELINE) {
13696				val |= (DMA_RWCTRL_READ_BNDRY_32 |
13697					DMA_RWCTRL_WRITE_BNDRY_32);
13698				break;
13699			}
13700			/* fallthrough */
13701		case 64:
13702			if (goal == BOUNDARY_SINGLE_CACHELINE) {
13703				val |= (DMA_RWCTRL_READ_BNDRY_64 |
13704					DMA_RWCTRL_WRITE_BNDRY_64);
13705				break;
13706			}
13707			/* fallthrough */
13708		case 128:
13709			if (goal == BOUNDARY_SINGLE_CACHELINE) {
13710				val |= (DMA_RWCTRL_READ_BNDRY_128 |
13711					DMA_RWCTRL_WRITE_BNDRY_128);
13712				break;
13713			}
13714			/* fallthrough */
13715		case 256:
13716			val |= (DMA_RWCTRL_READ_BNDRY_256 |
13717				DMA_RWCTRL_WRITE_BNDRY_256);
13718			break;
13719		case 512:
13720			val |= (DMA_RWCTRL_READ_BNDRY_512 |
13721				DMA_RWCTRL_WRITE_BNDRY_512);
13722			break;
13723		case 1024:
13724		default:
13725			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13726				DMA_RWCTRL_WRITE_BNDRY_1024);
13727			break;
13728		}
13729	}
13730
13731out:
13732	return val;
13733}
13734
13735static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13736{
13737	struct tg3_internal_buffer_desc test_desc;
13738	u32 sram_dma_descs;
13739	int i, ret;
13740
13741	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13742
13743	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13744	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13745	tw32(RDMAC_STATUS, 0);
13746	tw32(WDMAC_STATUS, 0);
13747
13748	tw32(BUFMGR_MODE, 0);
13749	tw32(FTQ_RESET, 0);
13750
13751	test_desc.addr_hi = ((u64) buf_dma) >> 32;
13752	test_desc.addr_lo = buf_dma & 0xffffffff;
13753	test_desc.nic_mbuf = 0x00002100;
13754	test_desc.len = size;
13755
13756	/*
13757	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13758	 * the *second* time the tg3 driver was getting loaded after an
13759	 * initial scan.
13760	 *
13761	 * Broadcom tells me:
13762	 *   ...the DMA engine is connected to the GRC block and a DMA
13763	 *   reset may affect the GRC block in some unpredictable way...
13764	 *   The behavior of resets to individual blocks has not been tested.
13765	 *
13766	 * Broadcom noted the GRC reset will also reset all sub-components.
13767	 */
13768	if (to_device) {
13769		test_desc.cqid_sqid = (13 << 8) | 2;
13770
13771		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13772		udelay(40);
13773	} else {
13774		test_desc.cqid_sqid = (16 << 8) | 7;
13775
13776		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13777		udelay(40);
13778	}
13779	test_desc.flags = 0x00000005;
13780
13781	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13782		u32 val;
13783
13784		val = *(((u32 *)&test_desc) + i);
13785		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13786				       sram_dma_descs + (i * sizeof(u32)));
13787		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13788	}
13789	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13790
13791	if (to_device)
13792		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13793	else
13794		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13795
13796	ret = -ENODEV;
13797	for (i = 0; i < 40; i++) {
13798		u32 val;
13799
13800		if (to_device)
13801			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13802		else
13803			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13804		if ((val & 0xffff) == sram_dma_descs) {
13805			ret = 0;
13806			break;
13807		}
13808
13809		udelay(100);
13810	}
13811
13812	return ret;
13813}
13814
13815#define TEST_BUFFER_SIZE	0x2000
13816
13817static int __devinit tg3_test_dma(struct tg3 *tp)
13818{
13819	dma_addr_t buf_dma;
13820	u32 *buf, saved_dma_rwctrl;
13821	int ret = 0;
13822
13823	buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13824	if (!buf) {
13825		ret = -ENOMEM;
13826		goto out_nofree;
13827	}
13828
13829	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13830			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13831
13832	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13833
13834	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13835		goto out;
13836
13837	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13838		/* DMA read watermark not used on PCIE */
13839		tp->dma_rwctrl |= 0x00180000;
13840	} else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13841		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13842		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13843			tp->dma_rwctrl |= 0x003f0000;
13844		else
13845			tp->dma_rwctrl |= 0x003f000f;
13846	} else {
13847		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13848		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13849			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13850			u32 read_water = 0x7;
13851
13852			if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13853			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13854				tp->dma_rwctrl |= 0x8000;
13855			else if (ccval == 0x6 || ccval == 0x7)
13856				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13857
13858			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13859				read_water = 4;
13860			/* Set bit 23 to enable PCIX hw bug fix */
13861			tp->dma_rwctrl |=
13862				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13863				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13864				(1 << 23);
13865		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13866			/* 5780 always in PCIX mode */
13867			tp->dma_rwctrl |= 0x00144000;
13868		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13869			/* 5714 always in PCIX mode */
13870			tp->dma_rwctrl |= 0x00148000;
13871		} else {
13872			tp->dma_rwctrl |= 0x001b000f;
13873		}
13874	}
13875
13876	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13877	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13878		tp->dma_rwctrl &= 0xfffffff0;
13879
13880	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13881	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13882		/* Remove this if it causes problems for some boards. */
13883		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13884
13885		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13886	}
13887
13888	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13889
13890
13891	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13892	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13893		goto out;
13894
13895	/* It is best to perform DMA test with maximum write burst size
13896	 * to expose the 5700/5701 write DMA bug.
13897	 */
13898	saved_dma_rwctrl = tp->dma_rwctrl;
13899	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13900	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13901
13902	while (1) {
13903		u32 *p = buf, i;
13904
13905		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13906			p[i] = i;
13907
13908		/* Send the buffer to the chip. */
13909		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13910		if (ret) {
13911			dev_err(&tp->pdev->dev,
13912				"%s: Buffer write failed. err = %d\n",
13913				__func__, ret);
13914			break;
13915		}
13916
13917		/* Now read it back. */
13918		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13919		if (ret) {
13920			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
13921				"err = %d\n", __func__, ret);
13922			break;
13923		}
13924
13925		/* Verify it. */
13926		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13927			if (p[i] == i)
13928				continue;
13929
13930			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13931			    DMA_RWCTRL_WRITE_BNDRY_16) {
13932				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13933				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13934				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13935				break;
13936			} else {
13937				dev_err(&tp->pdev->dev,
13938					"%s: Buffer corrupted on read back! "
13939					"(%d != %d)\n", __func__, p[i], i);
13940				ret = -ENODEV;
13941				goto out;
13942			}
13943		}
13944
13945		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13946			/* Success. */
13947			ret = 0;
13948			break;
13949		}
13950	}
13951	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13952	    DMA_RWCTRL_WRITE_BNDRY_16) {
13953		static struct pci_device_id dma_wait_state_chipsets[] = {
13954			{ PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13955				     PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13956			{ },
13957		};
13958
13959		/* DMA test passed without adjusting DMA boundary,
13960		 * now look for chipsets that are known to expose the
13961		 * DMA bug without failing the test.
13962		 */
13963		if (pci_dev_present(dma_wait_state_chipsets)) {
13964			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13965			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13966		} else {
13967			/* Safe to use the calculated DMA boundary. */
13968			tp->dma_rwctrl = saved_dma_rwctrl;
13969		}
13970
13971		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13972	}
13973
13974out:
13975	pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13976out_nofree:
13977	return ret;
13978}
13979
13980static void __devinit tg3_init_link_config(struct tg3 *tp)
13981{
13982	tp->link_config.advertising =
13983		(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13984		 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13985		 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13986		 ADVERTISED_Autoneg | ADVERTISED_MII);
13987	tp->link_config.speed = SPEED_INVALID;
13988	tp->link_config.duplex = DUPLEX_INVALID;
13989	tp->link_config.autoneg = AUTONEG_ENABLE;
13990	tp->link_config.active_speed = SPEED_INVALID;
13991	tp->link_config.active_duplex = DUPLEX_INVALID;
13992	tp->link_config.orig_speed = SPEED_INVALID;
13993	tp->link_config.orig_duplex = DUPLEX_INVALID;
13994	tp->link_config.orig_autoneg = AUTONEG_INVALID;
13995}
13996
13997static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13998{
13999	if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14000		tp->bufmgr_config.mbuf_read_dma_low_water =
14001			DEFAULT_MB_RDMA_LOW_WATER_5705;
14002		tp->bufmgr_config.mbuf_mac_rx_low_water =
14003			DEFAULT_MB_MACRX_LOW_WATER_57765;
14004		tp->bufmgr_config.mbuf_high_water =
14005			DEFAULT_MB_HIGH_WATER_57765;
14006
14007		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14008			DEFAULT_MB_RDMA_LOW_WATER_5705;
14009		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14010			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14011		tp->bufmgr_config.mbuf_high_water_jumbo =
14012			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14013	} else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14014		tp->bufmgr_config.mbuf_read_dma_low_water =
14015			DEFAULT_MB_RDMA_LOW_WATER_5705;
14016		tp->bufmgr_config.mbuf_mac_rx_low_water =
14017			DEFAULT_MB_MACRX_LOW_WATER_5705;
14018		tp->bufmgr_config.mbuf_high_water =
14019			DEFAULT_MB_HIGH_WATER_5705;
14020		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14021			tp->bufmgr_config.mbuf_mac_rx_low_water =
14022				DEFAULT_MB_MACRX_LOW_WATER_5906;
14023			tp->bufmgr_config.mbuf_high_water =
14024				DEFAULT_MB_HIGH_WATER_5906;
14025		}
14026
14027		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14028			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14029		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14030			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14031		tp->bufmgr_config.mbuf_high_water_jumbo =
14032			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14033	} else {
14034		tp->bufmgr_config.mbuf_read_dma_low_water =
14035			DEFAULT_MB_RDMA_LOW_WATER;
14036		tp->bufmgr_config.mbuf_mac_rx_low_water =
14037			DEFAULT_MB_MACRX_LOW_WATER;
14038		tp->bufmgr_config.mbuf_high_water =
14039			DEFAULT_MB_HIGH_WATER;
14040
14041		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14042			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14043		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14044			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14045		tp->bufmgr_config.mbuf_high_water_jumbo =
14046			DEFAULT_MB_HIGH_WATER_JUMBO;
14047	}
14048
14049	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14050	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14051}
14052
14053static char * __devinit tg3_phy_string(struct tg3 *tp)
14054{
14055	switch (tp->phy_id & TG3_PHY_ID_MASK) {
14056	case TG3_PHY_ID_BCM5400:	return "5400";
14057	case TG3_PHY_ID_BCM5401:	return "5401";
14058	case TG3_PHY_ID_BCM5411:	return "5411";
14059	case TG3_PHY_ID_BCM5701:	return "5701";
14060	case TG3_PHY_ID_BCM5703:	return "5703";
14061	case TG3_PHY_ID_BCM5704:	return "5704";
14062	case TG3_PHY_ID_BCM5705:	return "5705";
14063	case TG3_PHY_ID_BCM5750:	return "5750";
14064	case TG3_PHY_ID_BCM5752:	return "5752";
14065	case TG3_PHY_ID_BCM5714:	return "5714";
14066	case TG3_PHY_ID_BCM5780:	return "5780";
14067	case TG3_PHY_ID_BCM5755:	return "5755";
14068	case TG3_PHY_ID_BCM5787:	return "5787";
14069	case TG3_PHY_ID_BCM5784:	return "5784";
14070	case TG3_PHY_ID_BCM5756:	return "5722/5756";
14071	case TG3_PHY_ID_BCM5906:	return "5906";
14072	case TG3_PHY_ID_BCM5761:	return "5761";
14073	case TG3_PHY_ID_BCM5718C:	return "5718C";
14074	case TG3_PHY_ID_BCM5718S:	return "5718S";
14075	case TG3_PHY_ID_BCM57765:	return "57765";
14076	case TG3_PHY_ID_BCM5719C:	return "5719C";
14077	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
14078	case 0:			return "serdes";
14079	default:		return "unknown";
14080	}
14081}
14082
14083static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14084{
14085	if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14086		strcpy(str, "PCI Express");
14087		return str;
14088	} else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14089		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14090
14091		strcpy(str, "PCIX:");
14092
14093		if ((clock_ctrl == 7) ||
14094		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14095		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14096			strcat(str, "133MHz");
14097		else if (clock_ctrl == 0)
14098			strcat(str, "33MHz");
14099		else if (clock_ctrl == 2)
14100			strcat(str, "50MHz");
14101		else if (clock_ctrl == 4)
14102			strcat(str, "66MHz");
14103		else if (clock_ctrl == 6)
14104			strcat(str, "100MHz");
14105	} else {
14106		strcpy(str, "PCI:");
14107		if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14108			strcat(str, "66MHz");
14109		else
14110			strcat(str, "33MHz");
14111	}
14112	if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14113		strcat(str, ":32-bit");
14114	else
14115		strcat(str, ":64-bit");
14116	return str;
14117}
14118
14119static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14120{
14121	struct pci_dev *peer;
14122	unsigned int func, devnr = tp->pdev->devfn & ~7;
14123
14124	for (func = 0; func < 8; func++) {
14125		peer = pci_get_slot(tp->pdev->bus, devnr | func);
14126		if (peer && peer != tp->pdev)
14127			break;
14128		pci_dev_put(peer);
14129	}
14130	/* 5704 can be configured in single-port mode, set peer to
14131	 * tp->pdev in that case.
14132	 */
14133	if (!peer) {
14134		peer = tp->pdev;
14135		return peer;
14136	}
14137
14138	/*
14139	 * We don't need to keep the refcount elevated; there's no way
14140	 * to remove one half of this device without removing the other
14141	 */
14142	pci_dev_put(peer);
14143
14144	return peer;
14145}
14146
14147static void __devinit tg3_init_coal(struct tg3 *tp)
14148{
14149	struct ethtool_coalesce *ec = &tp->coal;
14150
14151	memset(ec, 0, sizeof(*ec));
14152	ec->cmd = ETHTOOL_GCOALESCE;
14153	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14154	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14155	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14156	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14157	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14158	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14159	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14160	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14161	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14162
14163	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14164				 HOSTCC_MODE_CLRTICK_TXBD)) {
14165		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14166		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14167		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14168		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14169	}
14170
14171	if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14172		ec->rx_coalesce_usecs_irq = 0;
14173		ec->tx_coalesce_usecs_irq = 0;
14174		ec->stats_block_coalesce_usecs = 0;
14175	}
14176}
14177
14178static const struct net_device_ops tg3_netdev_ops = {
14179	.ndo_open		= tg3_open,
14180	.ndo_stop		= tg3_close,
14181	.ndo_start_xmit		= tg3_start_xmit,
14182	.ndo_get_stats64	= tg3_get_stats64,
14183	.ndo_validate_addr	= eth_validate_addr,
14184	.ndo_set_multicast_list	= tg3_set_rx_mode,
14185	.ndo_set_mac_address	= tg3_set_mac_addr,
14186	.ndo_do_ioctl		= tg3_ioctl,
14187	.ndo_tx_timeout		= tg3_tx_timeout,
14188	.ndo_change_mtu		= tg3_change_mtu,
14189#if TG3_VLAN_TAG_USED
14190	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
14191#endif
14192#ifdef CONFIG_NET_POLL_CONTROLLER
14193	.ndo_poll_controller	= tg3_poll_controller,
14194#endif
14195};
14196
14197static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14198	.ndo_open		= tg3_open,
14199	.ndo_stop		= tg3_close,
14200	.ndo_start_xmit		= tg3_start_xmit_dma_bug,
14201	.ndo_get_stats64	= tg3_get_stats64,
14202	.ndo_validate_addr	= eth_validate_addr,
14203	.ndo_set_multicast_list	= tg3_set_rx_mode,
14204	.ndo_set_mac_address	= tg3_set_mac_addr,
14205	.ndo_do_ioctl		= tg3_ioctl,
14206	.ndo_tx_timeout		= tg3_tx_timeout,
14207	.ndo_change_mtu		= tg3_change_mtu,
14208#if TG3_VLAN_TAG_USED
14209	.ndo_vlan_rx_register	= tg3_vlan_rx_register,
14210#endif
14211#ifdef CONFIG_NET_POLL_CONTROLLER
14212	.ndo_poll_controller	= tg3_poll_controller,
14213#endif
14214};
14215
14216static int __devinit tg3_init_one(struct pci_dev *pdev,
14217				  const struct pci_device_id *ent)
14218{
14219	struct net_device *dev;
14220	struct tg3 *tp;
14221	int i, err, pm_cap;
14222	u32 sndmbx, rcvmbx, intmbx;
14223	char str[40];
14224	u64 dma_mask, persist_dma_mask;
14225
14226	printk_once(KERN_INFO "%s\n", version);
14227
14228	err = pci_enable_device(pdev);
14229	if (err) {
14230		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14231		return err;
14232	}
14233
14234	err = pci_request_regions(pdev, DRV_MODULE_NAME);
14235	if (err) {
14236		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14237		goto err_out_disable_pdev;
14238	}
14239
14240	pci_set_master(pdev);
14241
14242	/* Find power-management capability. */
14243	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14244	if (pm_cap == 0) {
14245		dev_err(&pdev->dev,
14246			"Cannot find Power Management capability, aborting\n");
14247		err = -EIO;
14248		goto err_out_free_res;
14249	}
14250
14251	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14252	if (!dev) {
14253		dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14254		err = -ENOMEM;
14255		goto err_out_free_res;
14256	}
14257
14258	SET_NETDEV_DEV(dev, &pdev->dev);
14259
14260#if TG3_VLAN_TAG_USED
14261	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14262#endif
14263
14264	tp = netdev_priv(dev);
14265	tp->pdev = pdev;
14266	tp->dev = dev;
14267	tp->pm_cap = pm_cap;
14268	tp->rx_mode = TG3_DEF_RX_MODE;
14269	tp->tx_mode = TG3_DEF_TX_MODE;
14270
14271	if (tg3_debug > 0)
14272		tp->msg_enable = tg3_debug;
14273	else
14274		tp->msg_enable = TG3_DEF_MSG_ENABLE;
14275
14276	/* The word/byte swap controls here control register access byte
14277	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
14278	 * setting below.
14279	 */
14280	tp->misc_host_ctrl =
14281		MISC_HOST_CTRL_MASK_PCI_INT |
14282		MISC_HOST_CTRL_WORD_SWAP |
14283		MISC_HOST_CTRL_INDIR_ACCESS |
14284		MISC_HOST_CTRL_PCISTATE_RW;
14285
14286	/* The NONFRM (non-frame) byte/word swap controls take effect
14287	 * on descriptor entries, anything which isn't packet data.
14288	 *
14289	 * The StrongARM chips on the board (one for tx, one for rx)
14290	 * are running in big-endian mode.
14291	 */
14292	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14293			GRC_MODE_WSWAP_NONFRM_DATA);
14294#ifdef __BIG_ENDIAN
14295	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14296#endif
14297	spin_lock_init(&tp->lock);
14298	spin_lock_init(&tp->indirect_lock);
14299	INIT_WORK(&tp->reset_task, tg3_reset_task);
14300
14301	tp->regs = pci_ioremap_bar(pdev, BAR_0);
14302	if (!tp->regs) {
14303		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14304		err = -ENOMEM;
14305		goto err_out_free_dev;
14306	}
14307
14308	tg3_init_link_config(tp);
14309
14310	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14311	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14312
14313	dev->ethtool_ops = &tg3_ethtool_ops;
14314	dev->watchdog_timeo = TG3_TX_TIMEOUT;
14315	dev->irq = pdev->irq;
14316
14317	err = tg3_get_invariants(tp);
14318	if (err) {
14319		dev_err(&pdev->dev,
14320			"Problem fetching invariants of chip, aborting\n");
14321		goto err_out_iounmap;
14322	}
14323
14324	if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14325	    tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 &&
14326	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14327		dev->netdev_ops = &tg3_netdev_ops;
14328	else
14329		dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14330
14331
14332	/* The EPB bridge inside 5714, 5715, and 5780 and any
14333	 * device behind the EPB cannot support DMA addresses > 40-bit.
14334	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14335	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14336	 * do DMA address check in tg3_start_xmit().
14337	 */
14338	if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14339		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14340	else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14341		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14342#ifdef CONFIG_HIGHMEM
14343		dma_mask = DMA_BIT_MASK(64);
14344#endif
14345	} else
14346		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14347
14348	/* Configure DMA attributes. */
14349	if (dma_mask > DMA_BIT_MASK(32)) {
14350		err = pci_set_dma_mask(pdev, dma_mask);
14351		if (!err) {
14352			dev->features |= NETIF_F_HIGHDMA;
14353			err = pci_set_consistent_dma_mask(pdev,
14354							  persist_dma_mask);
14355			if (err < 0) {
14356				dev_err(&pdev->dev, "Unable to obtain 64 bit "
14357					"DMA for consistent allocations\n");
14358				goto err_out_iounmap;
14359			}
14360		}
14361	}
14362	if (err || dma_mask == DMA_BIT_MASK(32)) {
14363		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14364		if (err) {
14365			dev_err(&pdev->dev,
14366				"No usable DMA configuration, aborting\n");
14367			goto err_out_iounmap;
14368		}
14369	}
14370
14371	tg3_init_bufmgr_config(tp);
14372
14373	/* Selectively allow TSO based on operating conditions */
14374	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14375	    (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14376		tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14377	else {
14378		tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14379		tp->fw_needed = NULL;
14380	}
14381
14382	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14383		tp->fw_needed = FIRMWARE_TG3;
14384
14385	/* TSO is on by default on chips that support hardware TSO.
14386	 * Firmware TSO on older chips gives lower performance, so it
14387	 * is off by default, but can be enabled using ethtool.
14388	 */
14389	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14390	    (dev->features & NETIF_F_IP_CSUM)) {
14391		dev->features |= NETIF_F_TSO;
14392		vlan_features_add(dev, NETIF_F_TSO);
14393	}
14394	if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14395	    (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14396		if (dev->features & NETIF_F_IPV6_CSUM) {
14397			dev->features |= NETIF_F_TSO6;
14398			vlan_features_add(dev, NETIF_F_TSO6);
14399		}
14400		if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14401		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14402		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14403		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14404			GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14405		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14406			dev->features |= NETIF_F_TSO_ECN;
14407			vlan_features_add(dev, NETIF_F_TSO_ECN);
14408		}
14409	}
14410
14411	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14412	    !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14413	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14414		tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14415		tp->rx_pending = 63;
14416	}
14417
14418	err = tg3_get_device_address(tp);
14419	if (err) {
14420		dev_err(&pdev->dev,
14421			"Could not obtain valid ethernet address, aborting\n");
14422		goto err_out_iounmap;
14423	}
14424
14425	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14426		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14427		if (!tp->aperegs) {
14428			dev_err(&pdev->dev,
14429				"Cannot map APE registers, aborting\n");
14430			err = -ENOMEM;
14431			goto err_out_iounmap;
14432		}
14433
14434		tg3_ape_lock_init(tp);
14435
14436		if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14437			tg3_read_dash_ver(tp);
14438	}
14439
14440	/*
14441	 * Reset chip in case UNDI or EFI driver did not shutdown
14442	 * DMA self test will enable WDMAC and we'll see (spurious)
14443	 * pending DMA on the PCI bus at that point.
14444	 */
14445	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14446	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14447		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14448		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14449	}
14450
14451	err = tg3_test_dma(tp);
14452	if (err) {
14453		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14454		goto err_out_apeunmap;
14455	}
14456
14457	/* flow control autonegotiation is default behavior */
14458	tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14459	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14460
14461	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14462	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14463	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14464	for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14465		struct tg3_napi *tnapi = &tp->napi[i];
14466
14467		tnapi->tp = tp;
14468		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14469
14470		tnapi->int_mbox = intmbx;
14471		if (i < 4)
14472			intmbx += 0x8;
14473		else
14474			intmbx += 0x4;
14475
14476		tnapi->consmbox = rcvmbx;
14477		tnapi->prodmbox = sndmbx;
14478
14479		if (i) {
14480			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14481			netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14482		} else {
14483			tnapi->coal_now = HOSTCC_MODE_NOW;
14484			netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14485		}
14486
14487		if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14488			break;
14489
14490		/*
14491		 * If we support MSIX, we'll be using RSS.  If we're using
14492		 * RSS, the first vector only handles link interrupts and the
14493		 * remaining vectors handle rx and tx interrupts.  Reuse the
14494		 * mailbox values for the next iteration.  The values we setup
14495		 * above are still useful for the single vectored mode.
14496		 */
14497		if (!i)
14498			continue;
14499
14500		rcvmbx += 0x8;
14501
14502		if (sndmbx & 0x4)
14503			sndmbx -= 0x4;
14504		else
14505			sndmbx += 0xc;
14506	}
14507
14508	tg3_init_coal(tp);
14509
14510	pci_set_drvdata(pdev, dev);
14511
14512	err = register_netdev(dev);
14513	if (err) {
14514		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14515		goto err_out_apeunmap;
14516	}
14517
14518	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14519		    tp->board_part_number,
14520		    tp->pci_chip_rev_id,
14521		    tg3_bus_string(tp, str),
14522		    dev->dev_addr);
14523
14524	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
14525		struct phy_device *phydev;
14526		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14527		netdev_info(dev,
14528			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14529			    phydev->drv->name, dev_name(&phydev->dev));
14530	} else {
14531		char *ethtype;
14532
14533		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
14534			ethtype = "10/100Base-TX";
14535		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
14536			ethtype = "1000Base-SX";
14537		else
14538			ethtype = "10/100/1000Base-T";
14539
14540		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14541			    "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
14542			  (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
14543	}
14544
14545	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14546		    (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14547		    (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14548		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
14549		    (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14550		    (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14551	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14552		    tp->dma_rwctrl,
14553		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14554		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14555
14556	return 0;
14557
14558err_out_apeunmap:
14559	if (tp->aperegs) {
14560		iounmap(tp->aperegs);
14561		tp->aperegs = NULL;
14562	}
14563
14564err_out_iounmap:
14565	if (tp->regs) {
14566		iounmap(tp->regs);
14567		tp->regs = NULL;
14568	}
14569
14570err_out_free_dev:
14571	free_netdev(dev);
14572
14573err_out_free_res:
14574	pci_release_regions(pdev);
14575
14576err_out_disable_pdev:
14577	pci_disable_device(pdev);
14578	pci_set_drvdata(pdev, NULL);
14579	return err;
14580}
14581
14582static void __devexit tg3_remove_one(struct pci_dev *pdev)
14583{
14584	struct net_device *dev = pci_get_drvdata(pdev);
14585
14586	if (dev) {
14587		struct tg3 *tp = netdev_priv(dev);
14588
14589		if (tp->fw)
14590			release_firmware(tp->fw);
14591
14592		flush_scheduled_work();
14593
14594		if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14595			tg3_phy_fini(tp);
14596			tg3_mdio_fini(tp);
14597		}
14598
14599		unregister_netdev(dev);
14600		if (tp->aperegs) {
14601			iounmap(tp->aperegs);
14602			tp->aperegs = NULL;
14603		}
14604		if (tp->regs) {
14605			iounmap(tp->regs);
14606			tp->regs = NULL;
14607		}
14608		free_netdev(dev);
14609		pci_release_regions(pdev);
14610		pci_disable_device(pdev);
14611		pci_set_drvdata(pdev, NULL);
14612	}
14613}
14614
14615static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14616{
14617	struct net_device *dev = pci_get_drvdata(pdev);
14618	struct tg3 *tp = netdev_priv(dev);
14619	pci_power_t target_state;
14620	int err;
14621
14622	/* PCI register 4 needs to be saved whether netif_running() or not.
14623	 * MSI address and data need to be saved if using MSI and
14624	 * netif_running().
14625	 */
14626	pci_save_state(pdev);
14627
14628	if (!netif_running(dev))
14629		return 0;
14630
14631	flush_scheduled_work();
14632	tg3_phy_stop(tp);
14633	tg3_netif_stop(tp);
14634
14635	del_timer_sync(&tp->timer);
14636
14637	tg3_full_lock(tp, 1);
14638	tg3_disable_ints(tp);
14639	tg3_full_unlock(tp);
14640
14641	netif_device_detach(dev);
14642
14643	tg3_full_lock(tp, 0);
14644	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14645	tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14646	tg3_full_unlock(tp);
14647
14648	target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14649
14650	err = tg3_set_power_state(tp, target_state);
14651	if (err) {
14652		int err2;
14653
14654		tg3_full_lock(tp, 0);
14655
14656		tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14657		err2 = tg3_restart_hw(tp, 1);
14658		if (err2)
14659			goto out;
14660
14661		tp->timer.expires = jiffies + tp->timer_offset;
14662		add_timer(&tp->timer);
14663
14664		netif_device_attach(dev);
14665		tg3_netif_start(tp);
14666
14667out:
14668		tg3_full_unlock(tp);
14669
14670		if (!err2)
14671			tg3_phy_start(tp);
14672	}
14673
14674	return err;
14675}
14676
14677static int tg3_resume(struct pci_dev *pdev)
14678{
14679	struct net_device *dev = pci_get_drvdata(pdev);
14680	struct tg3 *tp = netdev_priv(dev);
14681	int err;
14682
14683	pci_restore_state(tp->pdev);
14684
14685	if (!netif_running(dev))
14686		return 0;
14687
14688	err = tg3_set_power_state(tp, PCI_D0);
14689	if (err)
14690		return err;
14691
14692	netif_device_attach(dev);
14693
14694	tg3_full_lock(tp, 0);
14695
14696	tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14697	err = tg3_restart_hw(tp, 1);
14698	if (err)
14699		goto out;
14700
14701	tp->timer.expires = jiffies + tp->timer_offset;
14702	add_timer(&tp->timer);
14703
14704	tg3_netif_start(tp);
14705
14706out:
14707	tg3_full_unlock(tp);
14708
14709	if (!err)
14710		tg3_phy_start(tp);
14711
14712	return err;
14713}
14714
14715static struct pci_driver tg3_driver = {
14716	.name		= DRV_MODULE_NAME,
14717	.id_table	= tg3_pci_tbl,
14718	.probe		= tg3_init_one,
14719	.remove		= __devexit_p(tg3_remove_one),
14720	.suspend	= tg3_suspend,
14721	.resume		= tg3_resume
14722};
14723
14724static int __init tg3_init(void)
14725{
14726	return pci_register_driver(&tg3_driver);
14727}
14728
14729static void __exit tg3_cleanup(void)
14730{
14731	pci_unregister_driver(&tg3_driver);
14732}
14733
14734module_init(tg3_init);
14735module_exit(tg3_cleanup);
14736