1/******************************************************************************/
2/*                                                                            */
3/* Broadcom BCM5700 Linux Network Driver, Copyright (c) 2000 - 2005 Broadcom  */
4/* Corporation.                                                               */
5/* All rights reserved.                                                       */
6/*                                                                            */
7/* This program is free software; you can redistribute it and/or modify       */
8/* it under the terms of the GNU General Public License as published by       */
9/* the Free Software Foundation, located in the file LICENSE.                 */
10/*                                                                            */
11/******************************************************************************/
12
13
14char bcm5700_driver[] = "bcm5700";
15char bcm5700_version[] = "8.3.14";
16char bcm5700_date[] = "(11/2/05)";
17
18#define B57UM
19#include "mm.h"
20
21#include "typedefs.h"
22#include "osl.h"
23#include "bcmdefs.h"
24#include "bcmdevs.h"
25#include "bcmutils.h"
26#include "sbconfig.h"
27#include "sbutils.h"
28#include "hndgige.h"
29#include "bcmrobo.h"
30
31/* this is needed to get good and stable performances */
32#define EXTRA_HDR BCMEXTRAHDROOM
33
34/* A few user-configurable values. */
35
36#define MAX_UNITS 16
37/* Used to pass the full-duplex flag, etc. */
38static int line_speed[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
39static int auto_speed[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
40static int full_duplex[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
41static int rx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
42static int tx_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
43static int auto_flow_control[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
44#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
45static int mtu[MAX_UNITS] = {1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500,1500};	/* Jumbo MTU for interfaces. */
46#endif
47static int tx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
48static int rx_checksum[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
49static int scatter_gather[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
50
51#define TX_DESC_CNT DEFAULT_TX_PACKET_DESC_COUNT
52static unsigned int tx_pkt_desc_cnt[MAX_UNITS] =
53	{TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
54	TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
55	TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,TX_DESC_CNT,
56	TX_DESC_CNT};
57
58#define RX_DESC_CNT DEFAULT_STD_RCV_DESC_COUNT
59static unsigned int rx_std_desc_cnt[MAX_UNITS] =
60	{RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
61	RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
62	RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,RX_DESC_CNT,
63	RX_DESC_CNT };
64
65#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
66#define JBO_DESC_CNT DEFAULT_JUMBO_RCV_DESC_COUNT
67static unsigned int rx_jumbo_desc_cnt[MAX_UNITS] =
68	{JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
69	JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
70	JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,JBO_DESC_CNT,
71	JBO_DESC_CNT };
72#endif
73
74#ifdef BCM_INT_COAL
75#ifdef BCM_NAPI_RXPOLL
76static unsigned int adaptive_coalesce[MAX_UNITS] =
77	{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
78#else
79static unsigned int adaptive_coalesce[MAX_UNITS] =
80	{1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
81#endif
82
83#define RX_COAL_TK DEFAULT_RX_COALESCING_TICKS
84static unsigned int rx_coalesce_ticks[MAX_UNITS] =
85	{RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
86	RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
87	RX_COAL_TK,RX_COAL_TK, RX_COAL_TK,RX_COAL_TK,RX_COAL_TK,
88	RX_COAL_TK};
89
90#define RX_COAL_FM DEFAULT_RX_MAX_COALESCED_FRAMES
91static unsigned int rx_max_coalesce_frames[MAX_UNITS] =
92	{RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
93	RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
94	RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,RX_COAL_FM,
95	RX_COAL_FM};
96
97#define TX_COAL_TK DEFAULT_TX_COALESCING_TICKS
98static unsigned int tx_coalesce_ticks[MAX_UNITS] =
99	{TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
100	TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
101	TX_COAL_TK,TX_COAL_TK, TX_COAL_TK,TX_COAL_TK,TX_COAL_TK,
102	TX_COAL_TK};
103
104#define TX_COAL_FM DEFAULT_TX_MAX_COALESCED_FRAMES
105static unsigned int tx_max_coalesce_frames[MAX_UNITS] =
106	{TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
107	TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
108	TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,TX_COAL_FM,
109	TX_COAL_FM};
110
111#define ST_COAL_TK DEFAULT_STATS_COALESCING_TICKS
112static unsigned int stats_coalesce_ticks[MAX_UNITS] =
113	{ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
114	ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
115	ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,ST_COAL_TK,
116	ST_COAL_TK,};
117
118#endif
119#ifdef BCM_WOL
120static int enable_wol[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
121#endif
122#ifdef BCM_TSO
123static int enable_tso[MAX_UNITS] = {1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
124#endif
125#ifdef BCM_NIC_SEND_BD
126static int nic_tx_bd[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
127#endif
128#ifdef BCM_ASF
129static int vlan_tag_mode[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
130#endif
131static int delay_link[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
132static int disable_d3hot[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
133
134#if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
135static int disable_msi[MAX_UNITS] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
136static int bcm_msi_chipset_bug = 0;
137#endif
138
139#define BCM_TIMER_GRANULARITY  (1000000 / HZ)
140
141/* Hack to hook the data path to the BCM WL dirver */
142#ifdef BCM_WL_EMULATOR
143#include "bcmnvram.h"
144#include "wl_bcm57emu.h"
145#ifdef SKB_MANAGER
146int skb_old_alloc = 0;
147#endif
148#endif /* BCM_WL_EMULATOR */
149
150/* Operational parameters that usually are not changed. */
151/* Time in jiffies before concluding the transmitter is hung. */
152#define TX_TIMEOUT  (2*HZ)
153
154#if (LINUX_VERSION_CODE < 0x02030d)
155#define pci_resource_start(dev, bar)	(dev->base_address[bar] & PCI_BASE_ADDRESS_MEM_MASK)
156#elif (LINUX_VERSION_CODE < 0x02032b)
157#define pci_resource_start(dev, bar)	(dev->resource[bar] & PCI_BASE_ADDRESS_MEM_MASK)
158#endif
159
160#if (LINUX_VERSION_CODE < 0x02032b)
161#define dev_kfree_skb_irq(skb)  dev_kfree_skb(skb)
162#define netif_wake_queue(dev)	clear_bit(0, &dev->tbusy); mark_bh(NET_BH)
163#define netif_stop_queue(dev)	set_bit(0, &dev->tbusy)
164
165static inline void netif_start_queue(struct net_device *dev)
166{
167	dev->tbusy = 0;
168	dev->interrupt = 0;
169	dev->start = 1;
170}
171
172#define netif_queue_stopped(dev)	dev->tbusy
173#define netif_running(dev)		dev->start
174
175static inline void tasklet_schedule(struct tasklet_struct *tasklet)
176{
177	queue_task(tasklet, &tq_immediate);
178	mark_bh(IMMEDIATE_BH);
179}
180
181static inline void tasklet_init(struct tasklet_struct *tasklet,
182				void (*func)(unsigned long),
183				unsigned long data)
184{
185		tasklet->next = NULL;
186		tasklet->sync = 0;
187		tasklet->routine = (void (*)(void *))func;
188		tasklet->data = (void *)data;
189}
190
191#define tasklet_kill(tasklet)
192
193#endif
194
195#if (LINUX_VERSION_CODE < 0x020300)
196struct pci_device_id {
197	unsigned int vendor, device;		/* Vendor and device ID or PCI_ANY_ID */
198	unsigned int subvendor, subdevice;	/* Subsystem ID's or PCI_ANY_ID */
199	unsigned int class, class_mask;		/* (class,subclass,prog-if) triplet */
200	unsigned long driver_data;		/* Data private to the driver */
201};
202
203#define PCI_ANY_ID		0
204
205#define pci_set_drvdata(pdev, dev)
206#define pci_get_drvdata(pdev) 0
207
208#define pci_enable_device(pdev) 0
209
210#define __devinit		__init
211#define __devinitdata		__initdata
212#define __devexit
213
214#define SET_MODULE_OWNER(dev)
215#define MODULE_DEVICE_TABLE(pci, pci_tbl)
216
217#endif
218
219#if (LINUX_VERSION_CODE < 0x020411)
220#ifndef __devexit_p
221#define __devexit_p(x)	x
222#endif
223#endif
224
225#ifndef MODULE_LICENSE
226#define MODULE_LICENSE(license)
227#endif
228
229#ifndef IRQ_RETVAL
230typedef void irqreturn_t;
231#define IRQ_RETVAL(x)
232#endif
233
234#if (LINUX_VERSION_CODE < 0x02032a)
235static inline void *pci_alloc_consistent(struct pci_dev *pdev, size_t size,
236					 dma_addr_t *dma_handle)
237{
238	void *virt_ptr;
239
240	/* Maximum in slab.c */
241	if (size > 131072)
242		return 0;
243
244	virt_ptr = kmalloc(size, GFP_KERNEL);
245	*dma_handle = virt_to_bus(virt_ptr);
246	return virt_ptr;
247}
248#define pci_free_consistent(dev, size, ptr, dma_ptr)	kfree(ptr)
249
250#endif /*#if (LINUX_VERSION_CODE < 0x02032a) */
251
252
253#if (LINUX_VERSION_CODE < 0x02040d)
254
255#if (LINUX_VERSION_CODE >= 0x020409) && defined(RED_HAT_LINUX_KERNEL)
256
257#define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
258#define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
259
260#else
261/* pci_set_dma_mask is using dma_addr_t */
262
263#define BCM_32BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
264#define BCM_64BIT_DMA_MASK ((dma_addr_t) 0xffffffff)
265
266#endif
267
268#else /* (LINUX_VERSION_CODE < 0x02040d) */
269
270#define BCM_32BIT_DMA_MASK ((u64) 0x00000000ffffffffULL)
271#define BCM_64BIT_DMA_MASK ((u64) 0xffffffffffffffffULL)
272#endif
273
274#if (LINUX_VERSION_CODE < 0x020329)
275#define pci_set_dma_mask(pdev, mask) (0)
276#else
277#if (LINUX_VERSION_CODE < 0x020403)
278int
279pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
280{
281    if(! pci_dma_supported(dev, mask))
282        return -EIO;
283
284    dev->dma_mask = mask;
285
286    return 0;
287}
288#endif
289#endif
290
291#if (LINUX_VERSION_CODE < 0x020547)
292#define pci_set_consistent_dma_mask(pdev, mask) (0)
293#endif
294
295#if (LINUX_VERSION_CODE < 0x020402)
296#define pci_request_regions(pdev, name) (0)
297#define pci_release_regions(pdev)
298#endif
299
300#if !defined(spin_is_locked)
301#define spin_is_locked(lock)    (test_bit(0,(lock)))
302#endif
303
304#define BCM5700_LOCK(pUmDevice, flags)					\
305	if ((pUmDevice)->do_global_lock) {				\
306		spin_lock_irqsave(&(pUmDevice)->global_lock, flags);	\
307	}
308
309#define BCM5700_UNLOCK(pUmDevice, flags)				\
310	if ((pUmDevice)->do_global_lock) {				\
311		spin_unlock_irqrestore(&(pUmDevice)->global_lock, flags);\
312	}
313
314inline void
315bcm5700_intr_lock(PUM_DEVICE_BLOCK pUmDevice)
316{
317	if (pUmDevice->do_global_lock) {
318		spin_lock(&pUmDevice->global_lock);
319	}
320}
321
322inline void
323bcm5700_intr_unlock(PUM_DEVICE_BLOCK pUmDevice)
324{
325	if (pUmDevice->do_global_lock) {
326		spin_unlock(&pUmDevice->global_lock);
327	}
328}
329
330void
331bcm5700_intr_off(PUM_DEVICE_BLOCK pUmDevice)
332{
333	atomic_inc(&pUmDevice->intr_sem);
334	LM_DisableInterrupt(&pUmDevice->lm_dev);
335#if (LINUX_VERSION_CODE >= 0x2051c)
336	synchronize_irq(pUmDevice->dev->irq);
337#else
338	synchronize_irq();
339#endif
340	LM_DisableInterrupt(&pUmDevice->lm_dev);
341}
342
343void
344bcm5700_intr_on(PUM_DEVICE_BLOCK pUmDevice)
345{
346	if (atomic_dec_and_test(&pUmDevice->intr_sem)) {
347		LM_EnableInterrupt(&pUmDevice->lm_dev);
348	}
349}
350
351/*
352 * Broadcom NIC Extension support
353 * -ffan
354 */
355#ifdef NICE_SUPPORT
356#include "nicext.h"
357
358typedef struct {
359	ushort  tag;
360	ushort  signature;
361} vlan_tag_t;
362
363#endif /* NICE_SUPPORT */
364
365int MM_Packet_Desc_Size = sizeof(UM_PACKET);
366
367#if defined(MODULE)
368MODULE_AUTHOR("Michael Chan <mchan at broadcom dot com> and Gary Zambrano <zambrano at broadcom dot com>");
369MODULE_DESCRIPTION("BCM5700 Driver");
370MODULE_LICENSE("GPL");
371
372#if (LINUX_VERSION_CODE < 0x020605)
373
374MODULE_PARM(debug, "i");
375MODULE_PARM(msglevel, "i");
376MODULE_PARM(line_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
377MODULE_PARM(auto_speed, "1-" __MODULE_STRING(MAX_UNITS) "i");
378MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
379MODULE_PARM(rx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
380MODULE_PARM(tx_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
381MODULE_PARM(auto_flow_control, "1-" __MODULE_STRING(MAX_UNITS) "i");
382#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
383MODULE_PARM(mtu, "1-" __MODULE_STRING(MAX_UNITS) "i");
384#endif
385MODULE_PARM(tx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
386MODULE_PARM(rx_checksum, "1-" __MODULE_STRING(MAX_UNITS) "i");
387MODULE_PARM(scatter_gather, "1-" __MODULE_STRING(MAX_UNITS) "i");
388MODULE_PARM(tx_pkt_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
389MODULE_PARM(rx_std_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
390#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
391MODULE_PARM(rx_jumbo_desc_cnt, "1-" __MODULE_STRING(MAX_UNITS) "i");
392#endif
393#ifdef BCM_INT_COAL
394MODULE_PARM(adaptive_coalesce, "1-" __MODULE_STRING(MAX_UNITS) "i");
395MODULE_PARM(rx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
396MODULE_PARM(rx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
397MODULE_PARM(tx_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
398MODULE_PARM(tx_max_coalesce_frames, "1-" __MODULE_STRING(MAX_UNITS) "i");
399MODULE_PARM(stats_coalesce_ticks, "1-" __MODULE_STRING(MAX_UNITS) "i");
400#endif
401#ifdef BCM_WOL
402MODULE_PARM(enable_wol, "1-" __MODULE_STRING(MAX_UNITS) "i");
403#endif
404#ifdef BCM_TSO
405MODULE_PARM(enable_tso, "1-" __MODULE_STRING(MAX_UNITS) "i");
406#endif
407#ifdef BCM_NIC_SEND_BD
408MODULE_PARM(nic_tx_bd, "1-" __MODULE_STRING(MAX_UNITS) "i");
409#endif
410#ifdef BCM_ASF
411MODULE_PARM(vlan_tag_mode, "1-" __MODULE_STRING(MAX_UNITS) "i");
412#endif
413MODULE_PARM(delay_link, "1-" __MODULE_STRING(MAX_UNITS) "i");
414MODULE_PARM(disable_d3hot, "1-" __MODULE_STRING(MAX_UNITS) "i");
415
416#if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
417MODULE_PARM(disable_msi, "1-" __MODULE_STRING(MAX_UNITS) "i");
418#endif
419
420#else /* parms*/
421
422#if (LINUX_VERSION_CODE >= 0x020605) && (LINUX_VERSION_CODE < 0x02060a)
423
424static int var;
425
426#define numvar var
427
428#endif
429
430#if (LINUX_VERSION_CODE >= 0x2060a)
431
432#define numvar NULL
433
434#endif
435
436module_param_array(line_speed, int, numvar, 0);
437module_param_array(auto_speed, int, numvar, 0);
438module_param_array(full_duplex, int, numvar, 0);
439module_param_array(rx_flow_control, int, numvar, 0);
440module_param_array(tx_flow_control, int, numvar, 0);
441module_param_array(auto_flow_control, int, numvar, 0);
442#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
443module_param_array(mtu, int, numvar, 0);
444#endif
445module_param_array(tx_checksum, int, numvar, 0);
446module_param_array(rx_checksum, int, numvar, 0);
447module_param_array(scatter_gather, int, numvar, 0);
448module_param_array(tx_pkt_desc_cnt, int, numvar, 0);
449module_param_array(rx_std_desc_cnt, int, numvar, 0);
450#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
451module_param_array(rx_jumbo_desc_cnt, int, numvar, 0);
452#endif
453#ifdef BCM_INT_COAL
454module_param_array(adaptive_coalesce, int, numvar, 0);
455module_param_array(rx_coalesce_ticks, int, numvar, 0);
456module_param_array(rx_max_coalesce_frames, int, numvar, 0);
457module_param_array(tx_coalesce_ticks, int, numvar, 0);
458module_param_array(tx_max_coalesce_frames, int, numvar, 0);
459module_param_array(stats_coalesce_ticks, int, numvar, 0);
460#endif
461#ifdef BCM_WOL
462module_param_array(enable_wol, int, numvar, 0);
463#endif
464#ifdef BCM_TSO
465module_param_array(enable_tso, int, numvar, 0);
466#endif
467#ifdef BCM_NIC_SEND_BD
468module_param_array(nic_tx_bd, int, numvar, 0);
469#endif
470#ifdef BCM_ASF
471module_param_array(vlan_tag_mode, int, numvar, 0);
472#endif
473module_param_array(delay_link, int, numvar, 0);
474module_param_array(disable_d3hot, int, numvar, 0);
475
476#if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
477module_param_array(disable_msi, int, numvar, 0);
478#endif
479
480
481#endif /* params */
482
483
484#endif
485
486#define RUN_AT(x) (jiffies + (x))
487
488char kernel_version[] = UTS_RELEASE;
489
490#define PCI_SUPPORT_VER2
491
492#if !defined(CAP_NET_ADMIN)
493#define capable(CAP_XXX) (suser())
494#endif
495
496#define tigon3_debug debug
497#if TIGON3_DEBUG
498static int tigon3_debug = TIGON3_DEBUG;
499#else
500static int tigon3_debug = 0;
501#endif
502static int msglevel = 0xdeadbeef;
503int b57_msg_level;
504
505int bcm5700_open(struct net_device *dev);
506STATIC void bcm5700_timer(unsigned long data);
507STATIC void bcm5700_stats_timer(unsigned long data);
508STATIC void bcm5700_reset(struct net_device *dev);
509STATIC int bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev);
510STATIC irqreturn_t bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
511#ifdef BCM_TASKLET
512STATIC void bcm5700_tasklet(unsigned long data);
513#endif
514STATIC int bcm5700_close(struct net_device *dev);
515STATIC struct net_device_stats *bcm5700_get_stats(struct net_device *dev);
516STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
517STATIC void bcm5700_do_rx_mode(struct net_device *dev);
518STATIC void bcm5700_set_rx_mode(struct net_device *dev);
519STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p);
520#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
521STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu);
522#endif
523#ifdef BCM_NAPI_RXPOLL
524STATIC int bcm5700_poll(struct net_device *dev, int *budget);
525#endif
526STATIC int replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max);
527STATIC int bcm5700_freemem(struct net_device *dev);
528#ifdef NICE_SUPPORT
529STATIC int bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index);
530#endif
531#ifdef BCM_INT_COAL
532#ifndef BCM_NAPI_RXPOLL
533STATIC int bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice);
534#endif
535#endif
536STATIC void bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice);
537STATIC int bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice);
538#ifdef BCM_VLAN
539STATIC void bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
540STATIC void bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid);
541#endif
542void bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice);
543void bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice);
544void bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
545	char *param_name, int min, int max, int deflt);
546
547#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
548STATIC void poll_bcm5700(struct net_device *dev);
549#endif
550
551/* A list of all installed bcm5700 devices. */
552static struct net_device *root_tigon3_dev = NULL;
553
554#if defined(CONFIG_SPARC64) || defined(CONFIG_X86_64) ||defined(CONFIG_PPC64)
555
556#ifdef NICE_SUPPORT
557#if (LINUX_VERSION_CODE < 0x20500)
558extern int register_ioctl32_conversion(unsigned int cmd,
559	int (*handler)(unsigned int, unsigned int, unsigned long,
560	struct file *));
561int unregister_ioctl32_conversion(unsigned int cmd);
562#else
563#include <linux/ioctl32.h>
564#endif
565
566#define BCM_IOCTL32 1
567
568atomic_t bcm5700_load_count = ATOMIC_INIT(0);
569
570static int
571bcm5700_ioctl32(unsigned int fd, unsigned int cmd, unsigned long arg,
572	struct file *filep)
573{
574	struct ifreq rq;
575	struct net_device *tmp_dev = root_tigon3_dev;
576	int ret;
577	struct nice_req* nrq;
578	struct ifreq_nice32 {
579		char ifnr_name[16];
580		__u32 cmd;
581		__u32 nrq1;
582		__u32 nrq2;
583		__u32 nrq3;
584	} nrq32;
585
586	if (!capable(CAP_NET_ADMIN))
587		return -EPERM;
588
589	if (mm_copy_from_user(&nrq32, (char *) arg, 32))
590		return -EFAULT;
591
592	memcpy(rq.ifr_name, nrq32.ifnr_name, 16);
593
594	nrq = (struct nice_req*) &rq.ifr_ifru;
595	nrq->cmd = nrq32.cmd;
596	if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK) {
597		nrq->nrq_stats_useraddr = (void *) ((__u64) nrq32.nrq1);
598		nrq->nrq_stats_size = nrq32.nrq2;
599	}
600	else {
601		memcpy(&nrq->nrq_speed, &nrq32.nrq1, 12);
602	}
603	while (tmp_dev) {
604		if (strcmp(rq.ifr_name, tmp_dev->name) == 0) {
605			ret = bcm5700_ioctl(tmp_dev, &rq, cmd);
606			if (ret == 0) {
607				if (nrq->cmd == NICE_CMD_GET_STATS_BLOCK)
608					return ret;
609
610				memcpy(&nrq32.nrq1, &nrq->nrq_speed, 12);
611				if (mm_copy_to_user((char *) arg, &nrq32, 32))
612					return -EFAULT;
613			}
614			return ret;
615		}
616		tmp_dev = ((UM_DEVICE_BLOCK *)(tmp_dev->priv))->next_module;
617	}
618	return -ENODEV;
619}
620#endif /* NICE_SUPPORT */
621#endif
622
623typedef enum {
624	BCM5700A6 = 0,
625	BCM5700T6,
626	BCM5700A9,
627	BCM5700T9,
628	BCM5700,
629	BCM5701A5,
630	BCM5701T1,
631	BCM5701T8,
632	BCM5701A7,
633	BCM5701A10,
634	BCM5701A12,
635	BCM5701,
636	BCM5702,
637	BCM5703,
638	BCM5703A31,
639	BCM5703ARBUCKLE,
640	TC996T,
641	TC996ST,
642	TC996SSX,
643	TC996SX,
644	TC996BT,
645	TC997T,
646	TC997SX,
647	TC1000T,
648	TC1000BT,
649	TC940BR01,
650	TC942BR01,
651	TC998T,
652	TC998SX,
653	TC999T,
654	NC6770,
655	NC1020,
656	NC150T,
657	NC7760,
658	NC7761,
659	NC7770,
660	NC7771,
661	NC7780,
662	NC7781,
663	NC7772,
664	NC7782,
665	NC7783,
666	NC320T,
667	NC320I,
668	NC325I,
669	NC324I,
670	NC326I,
671	BCM5704CIOBE,
672	BCM5704,
673	BCM5704S,
674	BCM5705,
675	BCM5705M,
676	BCM5705F,
677	BCM5901,
678	BCM5782,
679	BCM5788,
680	BCM5789,
681	BCM5750,
682	BCM5750M,
683	BCM5720,
684	BCM5751,
685	BCM5751M,
686	BCM5751F,
687	BCM5721,
688	BCM5753,
689	BCM5753M,
690	BCM5753F,
691	BCM5781,
692	BCM5752,
693	BCM5752M,
694	BCM5714,
695	BCM5780,
696	BCM5780S,
697	BCM5715,
698	BCM4785,
699	BCM5903M,
700	UNK5788
701} board_t;
702
703
704/* indexed by board_t, above */
705static struct {
706	char *name;
707} board_info[] __devinitdata = {
708	{ "Broadcom BCM5700 1000Base-T" },
709	{ "Broadcom BCM5700 1000Base-SX" },
710	{ "Broadcom BCM5700 1000Base-SX" },
711	{ "Broadcom BCM5700 1000Base-T" },
712	{ "Broadcom BCM5700" },
713	{ "Broadcom BCM5701 1000Base-T" },
714	{ "Broadcom BCM5701 1000Base-T" },
715	{ "Broadcom BCM5701 1000Base-T" },
716	{ "Broadcom BCM5701 1000Base-SX" },
717	{ "Broadcom BCM5701 1000Base-T" },
718	{ "Broadcom BCM5701 1000Base-T" },
719	{ "Broadcom BCM5701" },
720	{ "Broadcom BCM5702 1000Base-T" },
721	{ "Broadcom BCM5703 1000Base-T" },
722	{ "Broadcom BCM5703 1000Base-SX" },
723	{ "Broadcom B5703 1000Base-SX" },
724	{ "3Com 3C996 10/100/1000 Server NIC" },
725	{ "3Com 3C996 10/100/1000 Server NIC" },
726	{ "3Com 3C996 Gigabit Fiber-SX Server NIC" },
727	{ "3Com 3C996 Gigabit Fiber-SX Server NIC" },
728	{ "3Com 3C996B Gigabit Server NIC" },
729	{ "3Com 3C997 Gigabit Server NIC" },
730	{ "3Com 3C997 Gigabit Fiber-SX Server NIC" },
731	{ "3Com 3C1000 Gigabit NIC" },
732	{ "3Com 3C1000B-T 10/100/1000 PCI" },
733	{ "3Com 3C940 Gigabit LOM (21X21)" },
734	{ "3Com 3C942 Gigabit LOM (31X31)" },
735	{ "3Com 3C998-T Dual Port 10/100/1000 PCI-X Server NIC" },
736	{ "3Com 3C998-SX Dual Port 1000-SX PCI-X Server NIC" },
737	{ "3Com 3C999-T Quad Port 10/100/1000 PCI-X Server NIC" },
738	{ "HP NC6770 Gigabit Server Adapter" },
739	{ "NC1020 HP ProLiant Gigabit Server Adapter 32 PCI" },
740	{ "HP ProLiant NC 150T PCI 4-port Gigabit Combo Switch Adapter" },
741	{ "HP NC7760 Gigabit Server Adapter" },
742	{ "HP NC7761 Gigabit Server Adapter" },
743	{ "HP NC7770 Gigabit Server Adapter" },
744	{ "HP NC7771 Gigabit Server Adapter" },
745	{ "HP NC7780 Gigabit Server Adapter" },
746	{ "HP NC7781 Gigabit Server Adapter" },
747	{ "HP NC7772 Gigabit Server Adapter" },
748	{ "HP NC7782 Gigabit Server Adapter" },
749	{ "HP NC7783 Gigabit Server Adapter" },
750	{ "HP ProLiant NC 320T PCI Express Gigabit Server Adapter" },
751	{ "HP ProLiant NC 320i PCI Express Gigabit Server Adapter" },
752	{ "HP NC325i Integrated Dual Port PCI Express Gigabit Server Adapter" },
753	{ "HP NC324i Integrated Dual Port PCI Express Gigabit Server Adapter" },
754	{ "HP NC326i Integrated Dual Port PCI Express Gigabit Server Adapter" },
755	{ "Broadcom BCM5704 CIOB-E 1000Base-T" },
756	{ "Broadcom BCM5704 1000Base-T" },
757	{ "Broadcom BCM5704 1000Base-SX" },
758	{ "Broadcom BCM5705 1000Base-T" },
759	{ "Broadcom BCM5705M 1000Base-T" },
760	{ "Broadcom 570x 10/100 Integrated Controller" },
761	{ "Broadcom BCM5901 100Base-TX" },
762	{ "Broadcom NetXtreme Gigabit Ethernet for hp" },
763	{ "Broadcom BCM5788 NetLink 1000Base-T" },
764	{ "Broadcom BCM5789 NetLink 1000Base-T PCI Express" },
765	{ "Broadcom BCM5750 1000Base-T PCI" },
766	{ "Broadcom BCM5750M 1000Base-T PCI" },
767	{ "Broadcom BCM5720 1000Base-T PCI" },
768	{ "Broadcom BCM5751 1000Base-T PCI Express" },
769	{ "Broadcom BCM5751M 1000Base-T PCI Express" },
770	{ "Broadcom BCM5751F 100Base-TX PCI Express" },
771	{ "Broadcom BCM5721 1000Base-T PCI Express" },
772	{ "Broadcom BCM5753 1000Base-T PCI Express" },
773	{ "Broadcom BCM5753M 1000Base-T PCI Express" },
774	{ "Broadcom BCM5753F 100Base-TX PCI Express" },
775	{ "Broadcom BCM5781 NetLink 1000Base-T PCI Express" },
776	{ "Broadcom BCM5752 1000Base-T PCI Express" },
777	{ "Broadcom BCM5752M 1000Base-T PCI Express" },
778	{ "Broadcom BCM5714 1000Base-T " },
779	{ "Broadcom BCM5780 1000Base-T" },
780	{ "Broadcom BCM5780S 1000Base-SX" },
781	{ "Broadcom BCM5715 1000Base-T " },
782	{ "Broadcom BCM4785 10/100/1000 Integrated Controller" },
783	{ "Broadcom BCM5903M Gigabit Ethernet " },
784	{ "Unknown BCM5788 Gigabit Ethernet " },
785	{ 0 }
786	};
787
788static struct pci_device_id bcm5700_pci_tbl[] __devinitdata = {
789	{0x14e4, 0x1644, 0x14e4, 0x1644, 0, 0, BCM5700A6 },
790	{0x14e4, 0x1644, 0x14e4, 0x2, 0, 0, BCM5700T6 },
791	{0x14e4, 0x1644, 0x14e4, 0x3, 0, 0, BCM5700A9 },
792	{0x14e4, 0x1644, 0x14e4, 0x4, 0, 0, BCM5700T9 },
793	{0x14e4, 0x1644, 0x1028, 0xd1, 0, 0, BCM5700 },
794	{0x14e4, 0x1644, 0x1028, 0x0106, 0, 0, BCM5700 },
795	{0x14e4, 0x1644, 0x1028, 0x0109, 0, 0, BCM5700 },
796	{0x14e4, 0x1644, 0x1028, 0x010a, 0, 0, BCM5700 },
797	{0x14e4, 0x1644, 0x10b7, 0x1000, 0, 0, TC996T },
798	{0x14e4, 0x1644, 0x10b7, 0x1001, 0, 0, TC996ST },
799	{0x14e4, 0x1644, 0x10b7, 0x1002, 0, 0, TC996SSX },
800	{0x14e4, 0x1644, 0x10b7, 0x1003, 0, 0, TC997T },
801	{0x14e4, 0x1644, 0x10b7, 0x1005, 0, 0, TC997SX },
802	{0x14e4, 0x1644, 0x10b7, 0x1008, 0, 0, TC942BR01 },
803	{0x14e4, 0x1644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5700 },
804	{0x14e4, 0x1645, 0x14e4, 1, 0, 0, BCM5701A5 },
805	{0x14e4, 0x1645, 0x14e4, 5, 0, 0, BCM5701T1 },
806	{0x14e4, 0x1645, 0x14e4, 6, 0, 0, BCM5701T8 },
807	{0x14e4, 0x1645, 0x14e4, 7, 0, 0, BCM5701A7 },
808	{0x14e4, 0x1645, 0x14e4, 8, 0, 0, BCM5701A10 },
809	{0x14e4, 0x1645, 0x14e4, 0x8008, 0, 0, BCM5701A12 },
810	{0x14e4, 0x1645, 0x0e11, 0xc1, 0, 0, NC6770 },
811	{0x14e4, 0x1645, 0x0e11, 0x7c, 0, 0, NC7770 },
812	{0x14e4, 0x1645, 0x0e11, 0x85, 0, 0, NC7780 },
813	{0x14e4, 0x1645, 0x1028, 0x0121, 0, 0, BCM5701 },
814	{0x14e4, 0x1645, 0x10b7, 0x1004, 0, 0, TC996SX },
815	{0x14e4, 0x1645, 0x10b7, 0x1006, 0, 0, TC996BT },
816	{0x14e4, 0x1645, 0x10b7, 0x1007, 0, 0, TC1000T },
817	{0x14e4, 0x1645, 0x10b7, 0x1008, 0, 0, TC940BR01 },
818	{0x14e4, 0x1645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5701 },
819	{0x14e4, 0x1646, 0x14e4, 0x8009, 0, 0, BCM5702 },
820	{0x14e4, 0x1646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
821	{0x14e4, 0x16a6, 0x14e4, 0x8009, 0, 0, BCM5702 },
822	{0x14e4, 0x16a6, 0x14e4, 0x000c, 0, 0, BCM5702 },
823	{0x14e4, 0x16a6, 0x0e11, 0xbb, 0, 0, NC7760 },
824	{0x14e4, 0x16a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
825	{0x14e4, 0x16c6, 0x10b7, 0x1100, 0, 0, TC1000BT },
826	{0x14e4, 0x16c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5702 },
827	{0x14e4, 0x1647, 0x14e4, 0x0009, 0, 0, BCM5703 },
828	{0x14e4, 0x1647, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
829	{0x14e4, 0x1647, 0x14e4, 0x000b, 0, 0, BCM5703 },
830	{0x14e4, 0x1647, 0x14e4, 0x800a, 0, 0, BCM5703 },
831	{0x14e4, 0x1647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
832	{0x14e4, 0x16a7, 0x14e4, 0x0009, 0, 0, BCM5703 },
833	{0x14e4, 0x16a7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
834	{0x14e4, 0x16a7, 0x14e4, 0x000b, 0, 0, BCM5703 },
835	{0x14e4, 0x16a7, 0x14e4, 0x800a, 0, 0, BCM5703 },
836	{0x14e4, 0x16a7, 0x0e11, 0xca, 0, 0, NC7771 },
837	{0x14e4, 0x16a7, 0x0e11, 0xcb, 0, 0, NC7781 },
838	{0x14e4, 0x16a7, 0x1014, 0x0281, 0, 0, BCM5703ARBUCKLE },
839	{0x14e4, 0x16a7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
840	{0x14e4, 0x16c7, 0x14e4, 0x000a, 0, 0, BCM5703A31 },
841	{0x14e4, 0x16c7, 0x0e11, 0xca, 0, 0, NC7771 },
842	{0x14e4, 0x16c7, 0x0e11, 0xcb, 0, 0, NC7781 },
843	{0x14e4, 0x16c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5703 },
844	{0x14e4, 0x1648, 0x0e11, 0xcf, 0, 0, NC7772 },
845	{0x14e4, 0x1648, 0x0e11, 0xd0, 0, 0, NC7782 },
846	{0x14e4, 0x1648, 0x0e11, 0xd1, 0, 0, NC7783 },
847	{0x14e4, 0x1648, 0x10b7, 0x2000, 0, 0, TC998T },
848	{0x14e4, 0x1648, 0x10b7, 0x3000, 0, 0, TC999T },
849	{0x14e4, 0x1648, 0x1166, 0x1648, 0, 0, BCM5704CIOBE },
850	{0x14e4, 0x1648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704 },
851	{0x14e4, 0x1649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
852	{0x14e4, 0x16a8, 0x14e4, 0x16a8, 0, 0, BCM5704S },
853	{0x14e4, 0x16a8, 0x10b7, 0x2001, 0, 0, TC998SX },
854	{0x14e4, 0x16a8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5704S },
855	{0x14e4, 0x1653, 0x0e11, 0x00e3, 0, 0, NC7761 },
856	{0x14e4, 0x1653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
857	{0x14e4, 0x1654, 0x0e11, 0x00e3, 0, 0, NC7761 },
858	{0x14e4, 0x1654, 0x103c, 0x3100, 0, 0, NC1020 },
859	{0x14e4, 0x1654, 0x103c, 0x3226, 0, 0, NC150T },
860	{0x14e4, 0x1654, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705 },
861	{0x14e4, 0x165d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
862	{0x14e4, 0x165e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705M },
863	{0x14e4, 0x166e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5705F },
864	{0x14e4, 0x1696, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5782 },
865	{0x14e4, 0x169c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5788 },
866	{0x14e4, 0x169d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5789 },
867	{0x14e4, 0x170d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
868	{0x14e4, 0x170e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5901 },
869	{0x14e4, 0x1676, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750 },
870	{0x14e4, 0x167c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5750M },
871	{0x14e4, 0x1677, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751 },
872	{0x14e4, 0x167d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751M },
873	{0x14e4, 0x167e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5751F },
874	{0x14e4, 0x1658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5720 },
875	{0x14e4, 0x1659, 0x103c, 0x7031, 0, 0, NC320T },
876	{0x14e4, 0x1659, 0x103c, 0x7032, 0, 0, NC320T },
877	{0x14e4, 0x166a, 0x103c, 0x7035, 0, 0, NC325I },
878	{0x14e4, 0x166b, 0x103c, 0x7036, 0, 0, NC325I },
879	{0x14e4, 0x1668, 0x103c, 0x7039, 0, 0, NC324I },
880	{0x14e4, 0x1669, 0x103c, 0x703a, 0, 0, NC324I },
881	{0x14e4, 0x1678, 0x103c, 0x703e, 0, 0, NC326I },
882	{0x14e4, 0x1679, 0x103c, 0x703c, 0, 0, NC326I },
883	{0x14e4, 0x1659, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5721 },
884	{0x14e4, 0x16f7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753 },
885	{0x14e4, 0x16fd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753M },
886	{0x14e4, 0x16fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5753F },
887	{0x14e4, 0x16dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5781 },
888	{0x14e4, 0x1600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752 },
889	{0x14e4, 0x1601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5752M },
890	{0x14e4, 0x1668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5714 },
891	{0x14e4, 0x166a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780 },
892	{0x14e4, 0x166b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5780S },
893	{0x14e4, 0x1678, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5715 },
894	{0x14e4, 0x471f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM4785 },
895	{0x14e4, 0x16ff, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5903M },
896	{0x173b, 0x03ed, PCI_ANY_ID, PCI_ANY_ID, 0, 0, UNK5788 },
897	{0,}
898	};
899
900MODULE_DEVICE_TABLE(pci, bcm5700_pci_tbl);
901
902#ifdef BCM_PROC_FS
903extern int bcm5700_proc_create(void);
904extern int bcm5700_proc_create_dev(struct net_device *dev);
905extern int bcm5700_proc_remove_dev(struct net_device *dev);
906extern int bcm5700_proc_remove_notifier(void);
907#endif
908
909#if (LINUX_VERSION_CODE >= 0x2060a)
910	static struct pci_device_id pci_AMD762id[]={
911		{ PCI_DEVICE(PCI_VENDOR_ID_AMD,
912			PCI_DEVICE_ID_AMD_FE_GATE_700C) },
913		{ }
914	};
915#endif
916
917static int sbgige = -1;
918
919/*******************************************************************************
920 *******************************************************************************
921*/
922
923int get_csum_flag(LM_UINT32 ChipRevId)
924{
925        return NETIF_F_IP_CSUM;
926}
927
928/*******************************************************************************
929 *******************************************************************************
930
931   This function returns true if the device passed to it is attached to an
932   ICH-ICH4. If the chip is not attached to an ICH, or is attached to an ICH5
933   or newer, it returns false.
934
935   This function determines which bridge it is attached to by scaning the pci
936   bus looking for bridge chips (hdr_type=1). When a bridge chip is detected,
937   the bridge's subordinate's secondary bus number is compared with this
938   devices bus number. If they match, then the device is attached to this
939   bridge. The bridge's device id is compared to a list of known device ids for
940   ICH-ICH4. Since many older ICH's (ICH2-ICH7) share the same device id, the
941   chip revision must also be checked to determine if the chip is older than an
942   ICH5.
943
944   To scan the bus, one of two functions is used depending on the kernel
945   version. For 2.4 kernels, the pci_find_device function is used. This
946   function has been depricated in the 2.6 kernel and replaced with the
947   fucntion pci_get_device. The macro walk_pci_bus determines which function to
948   use when the driver is built.
949*/
950
951#if (LINUX_VERSION_CODE >= 0x2060a)
952#define walk_pci_bus(d)		while ((d = pci_get_device( \
953					PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
954
955#define unwalk_pci_bus(d)	pci_dev_put(d)
956
957#else
958#define walk_pci_bus(d)		while ((d = pci_find_device( \
959					PCI_ANY_ID, PCI_ANY_ID, d)) != NULL)
960#define unwalk_pci_bus(d)
961
962#endif
963
964#define ICH5_CHIP_VERSION	0xc0
965
966static struct pci_device_id pci_ICHtable[] = {
967	{0x8086, 0x2418}, /* PCI_DEVICE_ID_INTEL_82801AA_8  */
968	{0x8086, 0x2428}, /* PCI_DEVICE_ID_INTEL_82801AB_8  */
969	{0x8086, 0x244e}, /* PCI_DEVICE_ID_INTEL_82801BA_6  */
970	{0x8086, 0x2448}, /* PCI_DEVICE_ID_INTEL_82801BA_11 */
971	{0, 0}
972};
973
974int attached_to_ICH4_or_older( struct pci_dev *pdev)
975{
976	struct pci_dev *tmp_pdev = NULL;
977	struct pci_device_id *ich_table;
978	u8 chip_rev;
979
980	walk_pci_bus (tmp_pdev) {
981		if ((tmp_pdev->hdr_type == 1) &&
982		   (tmp_pdev->subordinate != NULL) &&
983		   (tmp_pdev->subordinate->secondary == pdev->bus->number)) {
984
985			ich_table = pci_ICHtable;
986
987			while (ich_table->vendor) {
988				if ((ich_table->vendor == tmp_pdev->vendor) &&
989				    (ich_table->device == tmp_pdev->device)) {
990
991					pci_read_config_byte( tmp_pdev,
992						PCI_REVISION_ID, &chip_rev);
993
994					if (chip_rev < ICH5_CHIP_VERSION) {
995						unwalk_pci_bus( tmp_pdev);
996						return 1;
997					}
998				}
999				ich_table++;
1000			}
1001		}
1002	}
1003	return 0;
1004}
1005
1006static int
1007__devinit bcm5700_init_board(struct pci_dev *pdev, struct net_device **dev_out, int board_idx)
1008{
1009	struct net_device *dev;
1010	PUM_DEVICE_BLOCK pUmDevice;
1011	PLM_DEVICE_BLOCK pDevice;
1012	bool rgmii = FALSE;
1013	sb_t *sbh = NULL;
1014	int rc;
1015
1016	*dev_out = NULL;
1017
1018	/* dev zeroed in init_etherdev */
1019#if (LINUX_VERSION_CODE >= 0x20600)
1020	dev = alloc_etherdev(sizeof(*pUmDevice));
1021#else
1022	dev = init_etherdev(NULL, sizeof(*pUmDevice));
1023#endif
1024	if (dev == NULL) {
1025		printk(KERN_ERR "%s: unable to alloc new ethernet\n", bcm5700_driver);
1026		return -ENOMEM;
1027	}
1028	SET_MODULE_OWNER(dev);
1029#if (LINUX_VERSION_CODE >= 0x20600)
1030	SET_NETDEV_DEV(dev, &pdev->dev);
1031#endif
1032	pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1033
1034	/* enable device (incl. PCI PM wakeup), and bus-mastering */
1035	rc = pci_enable_device(pdev);
1036	if (rc)
1037		goto err_out;
1038
1039	/* init core specific stuff */
1040	if (pdev->device == T3_PCI_DEVICE_ID(T3_PCI_ID_BCM471F)) {
1041		sbh = sb_kattach(SB_OSH);
1042		sb_gige_init(sbh, ++sbgige, &rgmii);
1043	}
1044
1045	rc = pci_request_regions(pdev, bcm5700_driver);
1046	if (rc) {
1047		if (!sbh)
1048			goto err_out;
1049		printk(KERN_INFO "bcm5700_init_board: pci_request_regions returned error %d\n"
1050				 "This may be because the region is already requested by"
1051				 " the SMBus driver. Ignore the PCI error messages.\n", rc);
1052	}
1053
1054	pci_set_master(pdev);
1055
1056	if (pci_set_dma_mask(pdev, BCM_64BIT_DMA_MASK) == 0) {
1057		pUmDevice->using_dac = 1;
1058		if (pci_set_consistent_dma_mask(pdev, BCM_64BIT_DMA_MASK) != 0) {
1059			printk(KERN_ERR "pci_set_consistent_dma_mask failed\n");
1060			pci_release_regions(pdev);
1061			goto err_out;
1062		}
1063	} else if (pci_set_dma_mask(pdev, BCM_32BIT_DMA_MASK) == 0) {
1064		pUmDevice->using_dac = 0;
1065	} else {
1066		printk(KERN_ERR "System does not support DMA\n");
1067		pci_release_regions(pdev);
1068		goto err_out;
1069	}
1070
1071	pUmDevice->dev = dev;
1072	pUmDevice->pdev = pdev;
1073	pUmDevice->mem_list_num = 0;
1074	pUmDevice->next_module = root_tigon3_dev;
1075	pUmDevice->index = board_idx;
1076	pUmDevice->sbh = (void *)sbh;
1077	root_tigon3_dev = dev;
1078
1079	spin_lock_init(&pUmDevice->global_lock);
1080
1081	spin_lock_init(&pUmDevice->undi_lock);
1082
1083	spin_lock_init(&pUmDevice->phy_lock);
1084
1085	pDevice = &pUmDevice->lm_dev;
1086	pDevice->Flags = 0;
1087	pDevice->FunctNum = PCI_FUNC(pUmDevice->pdev->devfn);
1088	pUmDevice->boardflags = getintvar(NULL, "boardflags");
1089	if (sbh) {
1090		if (pUmDevice->boardflags & BFL_ENETROBO)
1091			pDevice->Flags |= ROBO_SWITCH_FLAG;
1092		pDevice->Flags |= rgmii ? RGMII_MODE_FLAG : 0;
1093		if (sb_chip(sbh) == BCM4785_CHIP_ID && sb_chiprev(sbh) < 2)
1094			pDevice->Flags |= ONE_DMA_AT_ONCE_FLAG;
1095		pDevice->Flags |= SB_CORE_FLAG;
1096		if (sb_chip(sbh) == BCM4785_CHIP_ID)
1097			pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1098	}
1099
1100#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1101	if (board_idx < MAX_UNITS) {
1102		bcm5700_validate_param_range(pUmDevice, &mtu[board_idx], "mtu", 1500, 9000, 1500);
1103		dev->mtu = mtu[board_idx];
1104	}
1105#endif
1106
1107	if (attached_to_ICH4_or_older(pdev)) {
1108		pDevice->Flags |= UNDI_FIX_FLAG;
1109	}
1110
1111#if (LINUX_VERSION_CODE >= 0x2060a)
1112	if (pci_dev_present(pci_AMD762id)) {
1113		pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1114		pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1115	}
1116#else
1117	if (pci_find_device(0x1022, 0x700c, NULL)) {
1118		/* AMD762 writes I/O out of order */
1119		/* Setting bit 1 in 762's register 0x4C still doesn't work */
1120		/* in all cases */
1121		pDevice->Flags |= FLUSH_POSTED_WRITE_FLAG;
1122		pDevice->Flags &= ~NIC_SEND_BD_FLAG;
1123	}
1124#endif
1125	if (LM_GetAdapterInfo(pDevice) != LM_STATUS_SUCCESS) {
1126		rc = -ENODEV;
1127		goto err_out_unmap;
1128	}
1129
1130	if (pDevice->Flags & ROBO_SWITCH_FLAG) {
1131		robo_info_t	*robo;
1132
1133		if ((robo = bcm_robo_attach(sbh, pDevice, NULL,
1134		                            robo_miird, robo_miiwr)) == NULL) {
1135			B57_ERR(("robo_setup: failed to attach robo switch \n"));
1136			goto robo_fail;
1137		}
1138
1139		if (bcm_robo_enable_device(robo)) {
1140			B57_ERR(("robo_setup: failed to enable robo switch \n"));
1141			goto robo_fail;
1142		}
1143
1144		/* Configure the switch to do VLAN */
1145		if ((pUmDevice->boardflags & BFL_ENETVLAN) && bcm_robo_config_vlan(robo)) {
1146			B57_ERR(("robo_setup: robo_config_vlan failed\n"));
1147			goto robo_fail;
1148		}
1149
1150		/* Enable the switch */
1151		if (bcm_robo_enable_switch(robo)) {
1152			B57_ERR(("robo_setup: robo_enable_switch failed\n"));
1153robo_fail:
1154			bcm_robo_detach(robo);
1155			rc = -ENODEV;
1156			goto err_out_unmap;
1157		}
1158		pUmDevice->robo = (void *)robo;
1159	}
1160
1161	/*
1162	 * We are lucky there is nothing special being keyed on PCI
1163	 * device ID BCM5750 (0x1676) inside LM_GetAdapterInfo(),
1164	 * otherwise we would have to modify the function to do the
1165	 * same thing for BCM471F (BCM5750 as a SB core in BCM4785).
1166	 *
1167	 * Apply MAC address.
1168	 */
1169	if (sbh) {
1170		char etXmacaddr[] = "etXXXXmacaddr";
1171		sprintf(etXmacaddr, "et%umacaddr", sbgige);
1172		bcm_ether_atoe(getvar(NULL, etXmacaddr),
1173		               (struct ether_addr *)pDevice->NodeAddress);
1174		LM_SetMacAddress(pDevice, pDevice->NodeAddress);
1175	}
1176
1177	if ((pDevice->Flags & JUMBO_CAPABLE_FLAG) == 0) {
1178		if (dev->mtu > 1500) {
1179			dev->mtu = 1500;
1180			printk(KERN_WARNING
1181			       "%s-%d: Jumbo mtu sizes not supported, using mtu=1500\n",
1182			       bcm5700_driver, pUmDevice->index);
1183		}
1184	}
1185
1186	pUmDevice->do_global_lock = 0;
1187	if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
1188		/* The 5700 chip works best without interleaved register */
1189		/* accesses on certain machines. */
1190		pUmDevice->do_global_lock = 1;
1191	}
1192
1193	if ((T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5701) &&
1194		((pDevice->PciState & T3_PCI_STATE_NOT_PCI_X_BUS) == 0)) {
1195
1196		pUmDevice->rx_buf_align = 0;
1197	} else {
1198		pUmDevice->rx_buf_align = 2;
1199	}
1200	dev->mem_start = pci_resource_start(pdev, 0);
1201	dev->mem_end = dev->mem_start + sizeof(T3_STD_MEM_MAP);
1202	dev->irq = pdev->irq;
1203
1204	*dev_out = dev;
1205	return 0;
1206
1207err_out_unmap:
1208	pci_release_regions(pdev);
1209	bcm5700_freemem(dev);
1210
1211err_out:
1212#if (LINUX_VERSION_CODE < 0x020600)
1213	unregister_netdev(dev);
1214	kfree(dev);
1215#else
1216	free_netdev(dev);
1217#endif
1218	return rc;
1219}
1220
1221static int __devinit
1222bcm5700_print_ver(void)
1223{
1224	printk(KERN_INFO "Broadcom Gigabit Ethernet Driver %s ",
1225		bcm5700_driver);
1226#ifdef NICE_SUPPORT
1227	printk("with Broadcom NIC Extension (NICE) ");
1228#endif
1229	printk("ver. %s %s\n", bcm5700_version, bcm5700_date);
1230	return 0;
1231}
1232
1233static int __devinit
1234bcm5700_init_one(struct pci_dev *pdev,
1235				       const struct pci_device_id *ent)
1236{
1237	struct net_device *dev = NULL;
1238	PUM_DEVICE_BLOCK pUmDevice;
1239	PLM_DEVICE_BLOCK pDevice;
1240	int i;
1241	static int board_idx = -1;
1242	static int printed_version = 0;
1243	struct pci_dev *pci_dev;
1244
1245	board_idx++;
1246
1247	if (!printed_version) {
1248		bcm5700_print_ver();
1249#ifdef BCM_PROC_FS
1250		bcm5700_proc_create();
1251#endif
1252		printed_version = 1;
1253	}
1254
1255	i = bcm5700_init_board(pdev, &dev, board_idx);
1256	if (i < 0) {
1257		return i;
1258	}
1259
1260	if (dev == NULL)
1261		return -ENOMEM;
1262
1263#ifdef BCM_IOCTL32
1264	if (atomic_read(&bcm5700_load_count) == 0) {
1265		register_ioctl32_conversion(SIOCNICE, bcm5700_ioctl32);
1266	}
1267	atomic_inc(&bcm5700_load_count);
1268#endif
1269	dev->open = bcm5700_open;
1270	dev->hard_start_xmit = bcm5700_start_xmit;
1271	dev->stop = bcm5700_close;
1272	dev->get_stats = bcm5700_get_stats;
1273	dev->set_multicast_list = bcm5700_set_rx_mode;
1274	dev->do_ioctl = bcm5700_ioctl;
1275	dev->set_mac_address = &bcm5700_set_mac_addr;
1276#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
1277	dev->change_mtu = &bcm5700_change_mtu;
1278#endif
1279#if (LINUX_VERSION_CODE >= 0x20400)
1280	dev->tx_timeout = bcm5700_reset;
1281	dev->watchdog_timeo = TX_TIMEOUT;
1282#endif
1283#ifdef BCM_VLAN
1284	dev->vlan_rx_register = &bcm5700_vlan_rx_register;
1285	dev->vlan_rx_kill_vid = &bcm5700_vlan_rx_kill_vid;
1286#endif
1287#ifdef BCM_NAPI_RXPOLL
1288	dev->poll = bcm5700_poll;
1289	dev->weight = 64;
1290#endif
1291
1292	pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
1293	pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1294
1295	dev->base_addr = pci_resource_start(pdev, 0);
1296	dev->irq = pdev->irq;
1297#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
1298	dev->poll_controller = poll_bcm5700;
1299#endif
1300
1301#if (LINUX_VERSION_CODE >= 0x20600)
1302	if ((i = register_netdev(dev))) {
1303		printk(KERN_ERR "%s: Cannot register net device\n",
1304			bcm5700_driver);
1305		if (pUmDevice->lm_dev.pMappedMemBase)
1306			iounmap(pUmDevice->lm_dev.pMappedMemBase);
1307		pci_release_regions(pdev);
1308		bcm5700_freemem(dev);
1309		free_netdev(dev);
1310		return i;
1311	}
1312#endif
1313
1314
1315	pci_set_drvdata(pdev, dev);
1316
1317	memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1318	pUmDevice->name = board_info[ent->driver_data].name,
1319	printk(KERN_INFO "%s: %s found at mem %lx, IRQ %d, ",
1320		dev->name, pUmDevice->name, dev->base_addr,
1321		dev->irq);
1322	printk("node addr ");
1323	for (i = 0; i < 6; i++) {
1324		printk("%2.2x", dev->dev_addr[i]);
1325	}
1326	printk("\n");
1327
1328	printk(KERN_INFO "%s: ", dev->name);
1329	if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5400_PHY_ID)
1330		printk("Broadcom BCM5400 Copper ");
1331	else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5401_PHY_ID)
1332		printk("Broadcom BCM5401 Copper ");
1333	else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5411_PHY_ID)
1334		printk("Broadcom BCM5411 Copper ");
1335	else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5461_PHY_ID)
1336		printk("Broadcom BCM5461 Copper ");
1337	else if (((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5701_PHY_ID) &&
1338		!(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
1339		printk("Broadcom BCM5701 Integrated Copper ");
1340	}
1341	else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5703_PHY_ID) {
1342		printk("Broadcom BCM5703 Integrated ");
1343		if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1344			printk("SerDes ");
1345		else
1346			printk("Copper ");
1347	}
1348	else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5704_PHY_ID) {
1349		printk("Broadcom BCM5704 Integrated ");
1350		if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
1351			printk("SerDes ");
1352		else
1353			printk("Copper ");
1354	}
1355        else if (pDevice->PhyFlags & PHY_IS_FIBER){
1356            if(( pDevice->PhyId & PHY_ID_MASK ) == PHY_BCM5780_PHY_ID)
1357                printk("Broadcom BCM5780S Integrated Serdes ");
1358
1359        }
1360	else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5705_PHY_ID)
1361		printk("Broadcom BCM5705 Integrated Copper ");
1362	else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5750_PHY_ID)
1363		printk("Broadcom BCM5750 Integrated Copper ");
1364
1365        else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5714_PHY_ID)
1366                printk("Broadcom BCM5714 Integrated Copper ");
1367        else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5780_PHY_ID)
1368                printk("Broadcom BCM5780 Integrated Copper ");
1369
1370	else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM5752_PHY_ID)
1371		printk("Broadcom BCM5752 Integrated Copper ");
1372	else if ((pDevice->PhyId & PHY_ID_MASK) == PHY_BCM8002_PHY_ID)
1373		printk("Broadcom BCM8002 SerDes ");
1374	else if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
1375		if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1376			printk("Broadcom BCM5703 Integrated SerDes ");
1377		}
1378		else if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1379			printk("Broadcom BCM5704 Integrated SerDes ");
1380		}
1381		else {
1382			printk("Agilent HDMP-1636 SerDes ");
1383		}
1384	}
1385	else {
1386		printk("Unknown ");
1387	}
1388	printk("transceiver found\n");
1389
1390#if (LINUX_VERSION_CODE >= 0x20400)
1391	if (scatter_gather[board_idx]) {
1392		dev->features |= NETIF_F_SG;
1393		if (pUmDevice->using_dac && !(pDevice->Flags & BCM5788_FLAG))
1394			dev->features |= NETIF_F_HIGHDMA;
1395	}
1396	if ((pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM) &&
1397		tx_checksum[board_idx]) {
1398
1399		dev->features |= get_csum_flag( pDevice->ChipRevId);
1400	}
1401#ifdef BCM_VLAN
1402	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1403#endif
1404#ifdef BCM_TSO
1405	/* On 5714/15/80 chips, Jumbo Frames and TSO cannot both be enabled at
1406	   the same time. Since only one of these features can be enable at a
1407           time, we'll enable only Jumbo Frames and disable TSO when the user
1408	   tries to enable both.
1409	*/
1410	dev->features &= ~NETIF_F_TSO;
1411
1412	if ((pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION) &&
1413	    (enable_tso[board_idx])) {
1414		if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
1415		   (dev->mtu > 1500)) {
1416			printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
1417		} else {
1418			dev->features |= NETIF_F_TSO;
1419		}
1420	}
1421#endif
1422	printk(KERN_INFO "%s: Scatter-gather %s, 64-bit DMA %s, Tx Checksum %s, ",
1423			dev->name,
1424			(char *) ((dev->features & NETIF_F_SG) ? "ON" : "OFF"),
1425			(char *) ((dev->features & NETIF_F_HIGHDMA) ? "ON" : "OFF"),
1426			(char *) ((dev->features & get_csum_flag( pDevice->ChipRevId)) ? "ON" : "OFF"));
1427#endif
1428	if ((pDevice->ChipRevId != T3_CHIP_ID_5700_B0) &&
1429		rx_checksum[board_idx])
1430		printk("Rx Checksum ON");
1431	else
1432		printk("Rx Checksum OFF");
1433#ifdef BCM_VLAN
1434	printk(", 802.1Q VLAN ON");
1435#endif
1436#ifdef BCM_TSO
1437	if (dev->features & NETIF_F_TSO) {
1438		printk(", TSO ON");
1439	}
1440	else
1441#endif
1442#ifdef BCM_NAPI_RXPOLL
1443	printk(", NAPI ON");
1444#endif
1445	printk("\n");
1446
1447#ifdef BCM_PROC_FS
1448	bcm5700_proc_create_dev(dev);
1449#endif
1450#ifdef BCM_TASKLET
1451	tasklet_init(&pUmDevice->tasklet, bcm5700_tasklet,
1452		(unsigned long) pUmDevice);
1453#endif
1454	if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
1455		if ((REG_RD(pDevice, PciCfg.DualMacCtrl) &
1456			T3_DUAL_MAC_CH_CTRL_MASK) == 3) {
1457
1458printk(KERN_WARNING "%s: Device is configured for Hardware Based Teaming which is not supported with this operating system. Please consult the user diagnostic guide to disable Turbo Teaming.\n", dev->name);
1459		}
1460	}
1461
1462#if (LINUX_VERSION_CODE > 0x20605)
1463
1464	if ((pci_dev = pci_get_device(0x1022, 0x700c, NULL))) {
1465#else
1466	if ((pci_dev = pci_find_device(0x1022, 0x700c, NULL))) {
1467#endif
1468		u32 val;
1469
1470		/* Found AMD 762 North bridge */
1471		pci_read_config_dword(pci_dev, 0x4c, &val);
1472		if ((val & 0x02) == 0) {
1473			pci_write_config_dword(pci_dev, 0x4c, val | 0x02);
1474			printk(KERN_INFO "%s: Setting AMD762 Northbridge to enable PCI ordering compliance\n", bcm5700_driver);
1475		}
1476	}
1477
1478#if (LINUX_VERSION_CODE > 0x20605)
1479
1480	pci_dev_put(pci_dev);
1481
1482#if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1483
1484	if ((pci_dev = pci_get_device(0x1066, 0x0017, NULL))) {
1485		bcm_msi_chipset_bug = 1;
1486	}
1487	pci_dev_put(pci_dev);
1488#endif
1489#endif
1490
1491	return 0;
1492}
1493
1494
1495static void __devexit
1496bcm5700_remove_one (struct pci_dev *pdev)
1497{
1498	struct net_device *dev = pci_get_drvdata (pdev);
1499	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1500
1501#ifdef BCM_PROC_FS
1502	bcm5700_proc_remove_dev(dev);
1503#endif
1504#ifdef BCM_IOCTL32
1505	atomic_dec(&bcm5700_load_count);
1506	if (atomic_read(&bcm5700_load_count) == 0)
1507		unregister_ioctl32_conversion(SIOCNICE);
1508#endif
1509	unregister_netdev(dev);
1510
1511	if (pUmDevice->lm_dev.pMappedMemBase)
1512		iounmap(pUmDevice->lm_dev.pMappedMemBase);
1513
1514	pci_release_regions(pdev);
1515
1516#if (LINUX_VERSION_CODE < 0x020600)
1517	kfree(dev);
1518#else
1519	free_netdev(dev);
1520#endif
1521
1522	pci_set_drvdata(pdev, NULL);
1523
1524}
1525
1526int b57_test_intr(UM_DEVICE_BLOCK *pUmDevice);
1527
1528#ifdef BCM_WL_EMULATOR
1529/* new transmit callback  */
1530static int bcm5700emu_start_xmit(struct sk_buff *skb, struct net_device *dev);
1531/* keep track of the 2 gige devices */
1532static PLM_DEVICE_BLOCK pDev1;
1533static PLM_DEVICE_BLOCK pDev2;
1534
1535static void
1536bcm5700emu_open(struct net_device *dev)
1537{
1538	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1539	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1540	static int instance = 0;
1541	static char *wlemu_if = NULL;
1542	char *wlemu_mode = NULL;
1543	//int wlemu_idx = 0;
1544	static int rx_enable = 0;
1545	static int tx_enable = 0;
1546
1547	/* which interface is the emulator ? */
1548	if(instance == 0) {
1549		wlemu_if = nvram_get("wlemu_if");
1550		/* do we emulate rx, tx or both  */
1551		wlemu_mode = nvram_get("wlemu_mode");
1552		if(wlemu_mode) {
1553			if (!strcmp(wlemu_mode,"rx"))
1554			{
1555				rx_enable = 1;
1556			}
1557			else if (!strcmp(wlemu_mode,"tx"))
1558			{
1559
1560				tx_enable = 1;
1561
1562			}
1563			else if (!strcmp(wlemu_mode,"rx_tx"))
1564			{
1565
1566				rx_enable = 1;
1567				tx_enable = 1;
1568			}
1569		}
1570	}
1571
1572	instance++;
1573
1574	/* The context is used for accessing the OSL for emulating devices */
1575	pDevice->wlc = NULL;
1576
1577	/* determines if this device is an emulator */
1578	pDevice->wl_emulate_rx = 0;
1579	pDevice->wl_emulate_tx = 0;
1580
1581	if(wlemu_if && !strcmp(dev->name,wlemu_if))
1582	{
1583		/* create an emulator context. */
1584		pDevice->wlc = (void *)wlcemu_wlccreate((void *)dev);
1585		B57_INFO(("Using %s for wl emulation \n", dev->name));
1586		if(rx_enable)
1587		{
1588			B57_INFO(("Enabling wl RX emulation \n"));
1589			pDevice->wl_emulate_rx = 1;
1590		}
1591		/* re-direct transmit callback to emulator */
1592		if(tx_enable)
1593		{
1594			pDevice->wl_emulate_tx = 1;
1595			dev->hard_start_xmit = bcm5700emu_start_xmit;
1596			B57_INFO(("Enabling wl TX emulation \n"));
1597		}
1598	}
1599	/* for debug access to configured devices only */
1600	if(instance == 1)
1601		pDev1 = pDevice;
1602	else if (instance == 2)
1603		pDev2 = pDevice;
1604}
1605
1606/* Public API to get current emulation info */
1607int bcm5700emu_get_info(char *buf)
1608{
1609	int len = 0;
1610	PLM_DEVICE_BLOCK p;
1611
1612	/* look for an emulating device */
1613	if(pDev1->wlc) {
1614		p = pDev1;
1615		len += sprintf(buf+len,"emulation device : eth0\n");
1616	}
1617	else if (pDev2->wlc) {
1618		p = pDev2;
1619		len += sprintf(buf+len,"emulation device : eth1\n");
1620	}
1621	else {
1622		len += sprintf(buf+len,"emulation not activated\n");
1623		return len;
1624	}
1625	if(p->wl_emulate_rx)
1626		len += sprintf(buf+len,"RX emulation enabled\n");
1627	else
1628		len += sprintf(buf+len,"RX emulation disabled\n");
1629	if(p->wl_emulate_tx)
1630		len += sprintf(buf+len,"TX emulation enabled\n");
1631	else
1632		len += sprintf(buf+len,"TX emulation disabled\n");
1633	return len;
1634
1635}
1636
1637
1638/* Public API to access the bcm5700_start_xmit callback */
1639
1640int
1641bcm5700emu_forward_xmit(struct sk_buff *skb, struct net_device *dev)
1642{
1643  return bcm5700_start_xmit(skb, dev);
1644}
1645
1646
1647/* hook to kernel txmit callback */
1648STATIC int
1649bcm5700emu_start_xmit(struct sk_buff *skb, struct net_device *dev)
1650{
1651
1652  PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1653  PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1654  return wlcemu_start_xmit(skb,pDevice->wlc);
1655}
1656
1657#endif /* BCM_WL_EMULATOR */
1658
1659int
1660bcm5700_open(struct net_device *dev)
1661{
1662	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1663	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1664	int rc;
1665
1666	if (pUmDevice->suspended){
1667            return -EAGAIN;
1668        }
1669
1670#ifdef BCM_WL_EMULATOR
1671	bcm5700emu_open(dev);
1672#endif
1673
1674	/* delay for 6 seconds */
1675	pUmDevice->delayed_link_ind = (6 * HZ) / pUmDevice->timer_interval;
1676
1677#ifdef BCM_INT_COAL
1678#ifndef BCM_NAPI_RXPOLL
1679	pUmDevice->adaptive_expiry = HZ / pUmDevice->timer_interval;
1680#endif
1681#endif
1682
1683#ifdef INCLUDE_TBI_SUPPORT
1684	if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
1685		(pDevice->TbiFlags & TBI_POLLING_FLAGS)) {
1686		pUmDevice->poll_tbi_interval = HZ / pUmDevice->timer_interval;
1687		if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) {
1688			pUmDevice->poll_tbi_interval /= 4;
1689		}
1690		pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1691	}
1692#endif
1693	/* set this timer for 2 seconds */
1694	pUmDevice->asf_heartbeat = (2 * HZ) / pUmDevice->timer_interval;
1695
1696#if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1697
1698
1699	if ( (  (T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ) &&
1700		(T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5714_A0 ) &&
1701		(T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_AX ) &&
1702		(T3_CHIP_REV(pDevice->ChipRevId) != T3_CHIP_REV_5750_BX ) ) &&
1703		!bcm_msi_chipset_bug	){
1704
1705		if (disable_msi[pUmDevice->index]==1){
1706			/* do nothing-it's not turned on */
1707		}else{
1708			pDevice->Flags |= USING_MSI_FLAG;
1709
1710                        REG_WR(pDevice, Msi.Mode,  2 );
1711
1712			rc = pci_enable_msi(pUmDevice->pdev);
1713
1714			if(rc!=0){
1715				pDevice->Flags &= ~ USING_MSI_FLAG;
1716                        	REG_WR(pDevice, Msi.Mode,  1 );
1717			}
1718		}
1719	}
1720
1721
1722#endif
1723
1724	if ((rc= request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt, SA_SHIRQ, dev->name, dev)))
1725	{
1726
1727#if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1728
1729		if(pDevice->Flags & USING_MSI_FLAG)  {
1730
1731			pci_disable_msi(pUmDevice->pdev);
1732			pDevice->Flags &= ~USING_MSI_FLAG;
1733                       	REG_WR(pDevice, Msi.Mode,  1 );
1734
1735		}
1736#endif
1737		return rc;
1738	}
1739
1740	pUmDevice->opened = 1;
1741	if (LM_InitializeAdapter(pDevice) != LM_STATUS_SUCCESS) {
1742		pUmDevice->opened = 0;
1743		free_irq(dev->irq, dev);
1744		bcm5700_freemem(dev);
1745		return -EAGAIN;
1746	}
1747
1748	bcm5700_set_vlan_mode(pUmDevice);
1749	bcm5700_init_counters(pUmDevice);
1750
1751	if (pDevice->Flags & UNDI_FIX_FLAG) {
1752		printk(KERN_INFO "%s: Using indirect register access\n", dev->name);
1753	}
1754
1755	if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6))
1756	{
1757		/* Do not use invalid eth addrs: any multicast & all zeros */
1758		if( is_valid_ether_addr(dev->dev_addr) ){
1759			LM_SetMacAddress(pDevice, dev->dev_addr);
1760		}
1761		else
1762		{
1763			printk(KERN_INFO "%s: Invalid administered node address\n",dev->name);
1764			memcpy(dev->dev_addr, pDevice->NodeAddress, 6);
1765		}
1766	}
1767
1768	if (tigon3_debug > 1)
1769		printk(KERN_DEBUG "%s: tigon3_open() irq %d.\n", dev->name, dev->irq);
1770
1771	QQ_InitQueue(&pUmDevice->rx_out_of_buf_q.Container,
1772        MAX_RX_PACKET_DESC_COUNT);
1773
1774
1775#if (LINUX_VERSION_CODE < 0x020300)
1776	MOD_INC_USE_COUNT;
1777#endif
1778
1779	atomic_set(&pUmDevice->intr_sem, 0);
1780
1781	LM_EnableInterrupt(pDevice);
1782
1783#if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
1784
1785	if (pDevice->Flags & USING_MSI_FLAG){
1786
1787		/* int test to check support on older machines */
1788		if (b57_test_intr(pUmDevice) != 1) {
1789
1790			LM_DisableInterrupt(pDevice);
1791			free_irq(pUmDevice->pdev->irq, dev);
1792			pci_disable_msi(pUmDevice->pdev);
1793                        REG_WR(pDevice, Msi.Mode,  1 );
1794			pDevice->Flags &= ~USING_MSI_FLAG;
1795
1796			rc = LM_ResetAdapter(pDevice);
1797printk(KERN_ALERT " The MSI support in this system is not functional.\n");
1798
1799			if (rc == LM_STATUS_SUCCESS)
1800				rc = 0;
1801			else
1802				rc = -ENODEV;
1803
1804			if(rc == 0){
1805				rc = request_irq(pUmDevice->pdev->irq, &bcm5700_interrupt,
1806					    SA_SHIRQ, dev->name, dev);
1807			}
1808
1809			if(rc){
1810				LM_Halt(pDevice);
1811				bcm5700_freemem(dev);
1812				pUmDevice->opened = 0;
1813				return rc;
1814			}
1815
1816
1817			pDevice->InitDone = TRUE;
1818			atomic_set(&pUmDevice->intr_sem, 0);
1819			LM_EnableInterrupt(pDevice);
1820		}
1821	}
1822#endif
1823
1824	init_timer(&pUmDevice->timer);
1825	pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1826	pUmDevice->timer.data = (unsigned long)dev;
1827	pUmDevice->timer.function = &bcm5700_timer;
1828	add_timer(&pUmDevice->timer);
1829
1830	if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
1831		init_timer(&pUmDevice->statstimer);
1832		pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1833		pUmDevice->statstimer.data = (unsigned long)dev;
1834		pUmDevice->statstimer.function = &bcm5700_stats_timer;
1835		add_timer(&pUmDevice->statstimer);
1836	}
1837
1838	if(pDevice->Flags & USING_MSI_FLAG)
1839		printk(KERN_INFO "%s: Using Message Signaled Interrupt (MSI)  \n", dev->name);
1840	else
1841		printk(KERN_INFO "%s: Using PCI INTX interrupt \n", dev->name);
1842
1843	netif_start_queue(dev);
1844
1845	return 0;
1846}
1847
1848
1849STATIC void
1850bcm5700_stats_timer(unsigned long data)
1851{
1852	struct net_device *dev = (struct net_device *)data;
1853	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1854	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1855	unsigned long flags = 0;
1856
1857	if (!pUmDevice->opened)
1858		return;
1859
1860	if (!atomic_read(&pUmDevice->intr_sem) &&
1861	    !pUmDevice->suspended              &&
1862	   (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE)) {
1863		BCM5700_LOCK(pUmDevice, flags);
1864		LM_GetStats(pDevice);
1865		BCM5700_UNLOCK(pUmDevice, flags);
1866	}
1867
1868	pUmDevice->statstimer.expires = RUN_AT(pUmDevice->statstimer_interval);
1869
1870	add_timer(&pUmDevice->statstimer);
1871}
1872
1873
1874STATIC void
1875bcm5700_timer(unsigned long data)
1876{
1877	struct net_device *dev = (struct net_device *)data;
1878	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
1879	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
1880	unsigned long flags = 0;
1881	LM_UINT32 value32;
1882
1883	if (!pUmDevice->opened)
1884		return;
1885
1886	/* BCM4785: Flush posted writes from GbE to host memory. */
1887	if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
1888		REG_RD(pDevice, HostCoalesce.Mode);
1889
1890	if (atomic_read(&pUmDevice->intr_sem) || pUmDevice->suspended) {
1891		pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
1892		add_timer(&pUmDevice->timer);
1893		return;
1894	}
1895
1896#ifdef INCLUDE_TBI_SUPPORT
1897	if ((pDevice->TbiFlags & TBI_POLLING_FLAGS) &&
1898		(--pUmDevice->poll_tbi_expiry <= 0)) {
1899
1900		BCM5700_PHY_LOCK(pUmDevice, flags);
1901		value32 = REG_RD(pDevice, MacCtrl.Status);
1902		if (((pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) &&
1903			((value32 & (MAC_STATUS_LINK_STATE_CHANGED |
1904				MAC_STATUS_CFG_CHANGED)) ||
1905			!(value32 & MAC_STATUS_PCS_SYNCED)))
1906			||
1907			((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
1908			(value32 & (MAC_STATUS_PCS_SYNCED |
1909				MAC_STATUS_SIGNAL_DETECTED))))
1910		{
1911			LM_SetupPhy(pDevice);
1912		}
1913		BCM5700_PHY_UNLOCK(pUmDevice, flags);
1914		pUmDevice->poll_tbi_expiry = pUmDevice->poll_tbi_interval;
1915
1916        }
1917#endif
1918
1919	if (pUmDevice->delayed_link_ind > 0) {
1920		if (pUmDevice->delayed_link_ind == 1)
1921			MM_IndicateStatus(pDevice, pDevice->LinkStatus);
1922		else
1923			pUmDevice->delayed_link_ind--;
1924	}
1925
1926	if (pUmDevice->crc_counter_expiry > 0)
1927		pUmDevice->crc_counter_expiry--;
1928
1929	if (!pUmDevice->interrupt) {
1930		if (!(pDevice->Flags & USE_TAGGED_STATUS_FLAG)) {
1931			BCM5700_LOCK(pUmDevice, flags);
1932			if (pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) {
1933				/* This will generate an interrupt */
1934				REG_WR(pDevice, Grc.LocalCtrl,
1935					pDevice->GrcLocalCtrl |
1936					GRC_MISC_LOCAL_CTRL_SET_INT);
1937			}
1938			else {
1939				REG_WR(pDevice, HostCoalesce.Mode,
1940					pDevice->CoalesceMode |
1941					HOST_COALESCE_ENABLE |
1942					HOST_COALESCE_NOW);
1943			}
1944			if (!(REG_RD(pDevice, DmaWrite.Mode) &
1945				DMA_WRITE_MODE_ENABLE)) {
1946				BCM5700_UNLOCK(pUmDevice, flags);
1947				bcm5700_reset(dev);
1948			}
1949			else {
1950				BCM5700_UNLOCK(pUmDevice, flags);
1951			}
1952			if (pUmDevice->tx_queued) {
1953				pUmDevice->tx_queued = 0;
1954				netif_wake_queue(dev);
1955			}
1956		}
1957#if (LINUX_VERSION_CODE < 0x02032b)
1958		if ((QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) !=
1959			pDevice->TxPacketDescCnt) &&
1960			((jiffies - dev->trans_start) > TX_TIMEOUT)) {
1961
1962			printk(KERN_WARNING "%s: Tx hung\n", dev->name);
1963			bcm5700_reset(dev);
1964		}
1965#endif
1966	}
1967#ifdef BCM_INT_COAL
1968#ifndef BCM_NAPI_RXPOLL
1969	if (pUmDevice->adaptive_coalesce) {
1970		pUmDevice->adaptive_expiry--;
1971		if (pUmDevice->adaptive_expiry == 0) {
1972			pUmDevice->adaptive_expiry = HZ /
1973				pUmDevice->timer_interval;
1974			bcm5700_adapt_coalesce(pUmDevice);
1975		}
1976	}
1977#endif
1978#endif
1979	if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container) >
1980		(unsigned int) pUmDevice->rx_buf_repl_panic_thresh) {
1981		/* Generate interrupt and let isr allocate buffers */
1982		REG_WR(pDevice, HostCoalesce.Mode, pDevice->CoalesceMode |
1983			HOST_COALESCE_ENABLE | HOST_COALESCE_NOW);
1984	}
1985
1986#ifdef BCM_ASF
1987	if (pDevice->AsfFlags & ASF_ENABLED) {
1988		pUmDevice->asf_heartbeat--;
1989		if (pUmDevice->asf_heartbeat == 0) {
1990			if( (pDevice->Flags & UNDI_FIX_FLAG) ||
1991			    (pDevice->Flags & ENABLE_PCIX_FIX_FLAG)) {
1992				MEM_WR_OFFSET(pDevice, T3_CMD_MAILBOX,
1993					T3_CMD_NICDRV_ALIVE2);
1994				MEM_WR_OFFSET(pDevice, T3_CMD_LENGTH_MAILBOX,
1995					4);
1996				MEM_WR_OFFSET(pDevice, T3_CMD_DATA_MAILBOX, 5);
1997			} else {
1998				LM_RegWr(pDevice,
1999					 (T3_NIC_MBUF_POOL_ADDR +
2000					  T3_CMD_MAILBOX),
2001					 T3_CMD_NICDRV_ALIVE2, 1);
2002				LM_RegWr(pDevice,
2003					 (T3_NIC_MBUF_POOL_ADDR +
2004					  T3_CMD_LENGTH_MAILBOX),4,1);
2005				LM_RegWr(pDevice,
2006					 (T3_NIC_MBUF_POOL_ADDR +
2007					  T3_CMD_DATA_MAILBOX),5,1);
2008 			}
2009
2010			value32 = REG_RD(pDevice, Grc.RxCpuEvent);
2011			REG_WR(pDevice, Grc.RxCpuEvent, value32 | BIT_14);
2012			pUmDevice->asf_heartbeat = (2 * HZ) /
2013				pUmDevice->timer_interval;
2014		}
2015	}
2016#endif
2017
2018	if (pDevice->PhyFlags & PHY_IS_FIBER){
2019		BCM5700_PHY_LOCK(pUmDevice, flags);
2020		LM_5714_FamFiberCheckLink(pDevice);
2021		BCM5700_PHY_UNLOCK(pUmDevice, flags);
2022	}
2023
2024	pUmDevice->timer.expires = RUN_AT(pUmDevice->timer_interval);
2025	add_timer(&pUmDevice->timer);
2026}
2027
2028STATIC int
2029bcm5700_init_counters(PUM_DEVICE_BLOCK pUmDevice)
2030{
2031#ifdef BCM_INT_COAL
2032#ifndef BCM_NAPI_RXPOLL
2033	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2034
2035	pUmDevice->rx_curr_coalesce_frames = pDevice->RxMaxCoalescedFrames;
2036	pUmDevice->rx_curr_coalesce_ticks = pDevice->RxCoalescingTicks;
2037	pUmDevice->tx_curr_coalesce_frames = pDevice->TxMaxCoalescedFrames;
2038	pUmDevice->rx_last_cnt = 0;
2039	pUmDevice->tx_last_cnt = 0;
2040#endif
2041#endif
2042	pUmDevice->phy_crc_count = 0;
2043#if TIGON3_DEBUG
2044	pUmDevice->tx_zc_count = 0;
2045	pUmDevice->tx_chksum_count = 0;
2046	pUmDevice->tx_himem_count = 0;
2047	pUmDevice->rx_good_chksum_count = 0;
2048	pUmDevice->rx_bad_chksum_count = 0;
2049#endif
2050#ifdef BCM_TSO
2051	pUmDevice->tso_pkt_count = 0;
2052#endif
2053	return 0;
2054}
2055
2056#ifdef BCM_INT_COAL
2057#ifndef BCM_NAPI_RXPOLL
2058STATIC int
2059bcm5700_do_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice,
2060	int rx_frames, int rx_ticks, int tx_frames, int rx_frames_intr)
2061{
2062	unsigned long flags = 0;
2063	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2064
2065	if (pUmDevice->do_global_lock) {
2066		if (spin_is_locked(&pUmDevice->global_lock))
2067			return 0;
2068		spin_lock_irqsave(&pUmDevice->global_lock, flags);
2069	}
2070	pUmDevice->rx_curr_coalesce_frames = rx_frames;
2071	pUmDevice->rx_curr_coalesce_ticks = rx_ticks;
2072	pUmDevice->tx_curr_coalesce_frames = tx_frames;
2073	pUmDevice->rx_curr_coalesce_frames_intr = rx_frames_intr;
2074	REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFrames, rx_frames);
2075
2076	REG_WR(pDevice, HostCoalesce.RxCoalescingTicks, rx_ticks);
2077
2078	REG_WR(pDevice, HostCoalesce.TxMaxCoalescedFrames, tx_frames);
2079
2080	REG_WR(pDevice, HostCoalesce.RxMaxCoalescedFramesDuringInt,
2081		rx_frames_intr);
2082
2083	BCM5700_UNLOCK(pUmDevice, flags);
2084	return 0;
2085}
2086
2087STATIC int
2088bcm5700_adapt_coalesce(PUM_DEVICE_BLOCK pUmDevice)
2089{
2090	PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2091	uint rx_curr_cnt, tx_curr_cnt, rx_delta, tx_delta, total_delta;
2092
2093	rx_curr_cnt = pDevice->pStatsBlkVirt->ifHCInUcastPkts.Low;
2094	tx_curr_cnt = pDevice->pStatsBlkVirt->ifHCOutUcastPkts.Low;
2095	if ((rx_curr_cnt <= pUmDevice->rx_last_cnt) ||
2096		(tx_curr_cnt < pUmDevice->tx_last_cnt)) {
2097
2098		/* skip if there is counter rollover */
2099		pUmDevice->rx_last_cnt = rx_curr_cnt;
2100		pUmDevice->tx_last_cnt = tx_curr_cnt;
2101		return 0;
2102	}
2103
2104	rx_delta = rx_curr_cnt - pUmDevice->rx_last_cnt;
2105	tx_delta = tx_curr_cnt - pUmDevice->tx_last_cnt;
2106	total_delta = (((rx_delta + rx_delta) + tx_delta) / 3) << 1;
2107
2108	pUmDevice->rx_last_cnt = rx_curr_cnt;
2109	pUmDevice->tx_last_cnt = tx_curr_cnt;
2110
2111	if (total_delta < ADAPTIVE_LO_PKT_THRESH) {
2112		if (pUmDevice->rx_curr_coalesce_frames !=
2113			ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES) {
2114
2115			bcm5700_do_adapt_coalesce(pUmDevice,
2116				ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES,
2117				ADAPTIVE_LO_RX_COALESCING_TICKS,
2118				ADAPTIVE_LO_TX_MAX_COALESCED_FRAMES,
2119				ADAPTIVE_LO_RX_MAX_COALESCED_FRAMES_DURING_INT);
2120		}
2121	}
2122	else if (total_delta < ADAPTIVE_HI_PKT_THRESH) {
2123		if (pUmDevice->rx_curr_coalesce_frames !=
2124			DEFAULT_RX_MAX_COALESCED_FRAMES) {
2125
2126			bcm5700_do_adapt_coalesce(pUmDevice,
2127				DEFAULT_RX_MAX_COALESCED_FRAMES,
2128				DEFAULT_RX_COALESCING_TICKS,
2129				DEFAULT_TX_MAX_COALESCED_FRAMES,
2130				DEFAULT_RX_MAX_COALESCED_FRAMES_DURING_INT);
2131		}
2132	}
2133	else {
2134		if (pUmDevice->rx_curr_coalesce_frames !=
2135			ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES) {
2136
2137			bcm5700_do_adapt_coalesce(pUmDevice,
2138				ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES,
2139				ADAPTIVE_HI_RX_COALESCING_TICKS,
2140				ADAPTIVE_HI_TX_MAX_COALESCED_FRAMES,
2141				ADAPTIVE_HI_RX_MAX_COALESCED_FRAMES_DURING_INT);
2142		}
2143	}
2144	return 0;
2145}
2146#endif
2147#endif
2148
2149STATIC void
2150bcm5700_reset(struct net_device *dev)
2151{
2152	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2153	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2154	unsigned long flags;
2155
2156#ifdef BCM_TSO
2157
2158	if( (dev->features & NETIF_F_TSO) &&
2159		(pUmDevice->tx_full) )	   {
2160
2161		dev->features &= ~NETIF_F_TSO;
2162	}
2163#endif
2164
2165	netif_stop_queue(dev);
2166	bcm5700_intr_off(pUmDevice);
2167	BCM5700_PHY_LOCK(pUmDevice, flags);
2168	LM_ResetAdapter(pDevice);
2169	pDevice->InitDone = TRUE;
2170	bcm5700_do_rx_mode(dev);
2171	bcm5700_set_vlan_mode(pUmDevice);
2172	bcm5700_init_counters(pUmDevice);
2173	if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
2174		LM_SetMacAddress(pDevice, dev->dev_addr);
2175	}
2176	BCM5700_PHY_UNLOCK(pUmDevice, flags);
2177	atomic_set(&pUmDevice->intr_sem, 1);
2178	bcm5700_intr_on(pUmDevice);
2179	netif_wake_queue(dev);
2180}
2181
2182STATIC void
2183bcm5700_set_vlan_mode(UM_DEVICE_BLOCK *pUmDevice)
2184{
2185	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2186	LM_UINT32 ReceiveMask = pDevice->ReceiveMask;
2187	int vlan_tag_mode = pUmDevice->vlan_tag_mode;
2188
2189	if (vlan_tag_mode == VLAN_TAG_MODE_AUTO_STRIP) {
2190	        if (pDevice->AsfFlags & ASF_ENABLED) {
2191			vlan_tag_mode = VLAN_TAG_MODE_FORCED_STRIP;
2192		}
2193		else {
2194			vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
2195		}
2196	}
2197	if (vlan_tag_mode == VLAN_TAG_MODE_NORMAL_STRIP) {
2198		ReceiveMask |= LM_KEEP_VLAN_TAG;
2199#ifdef BCM_VLAN
2200		if (pUmDevice->vlgrp)
2201			ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2202#endif
2203#ifdef NICE_SUPPORT
2204		if (pUmDevice->nice_rx)
2205			ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2206#endif
2207	}
2208	else if (vlan_tag_mode == VLAN_TAG_MODE_FORCED_STRIP) {
2209		ReceiveMask &= ~LM_KEEP_VLAN_TAG;
2210	}
2211	if (ReceiveMask != pDevice->ReceiveMask)
2212	{
2213		LM_SetReceiveMask(pDevice, ReceiveMask);
2214	}
2215}
2216
2217static void
2218bcm5700_poll_wait(UM_DEVICE_BLOCK *pUmDevice)
2219{
2220#ifdef BCM_NAPI_RXPOLL
2221	while (pUmDevice->lm_dev.RxPoll) {
2222		current->state = TASK_INTERRUPTIBLE;
2223		schedule_timeout(1);
2224	}
2225#endif
2226}
2227
2228
2229#ifdef BCM_VLAN
2230STATIC void
2231bcm5700_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
2232{
2233	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2234
2235	bcm5700_intr_off(pUmDevice);
2236	bcm5700_poll_wait(pUmDevice);
2237	pUmDevice->vlgrp = vlgrp;
2238	bcm5700_set_vlan_mode(pUmDevice);
2239	bcm5700_intr_on(pUmDevice);
2240}
2241
2242STATIC void
2243bcm5700_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
2244{
2245	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
2246
2247	bcm5700_intr_off(pUmDevice);
2248	bcm5700_poll_wait(pUmDevice);
2249	if (pUmDevice->vlgrp) {
2250		pUmDevice->vlgrp->vlan_devices[vid] = NULL;
2251	}
2252	bcm5700_intr_on(pUmDevice);
2253}
2254#endif
2255
2256STATIC int
2257bcm5700_start_xmit(struct sk_buff *skb, struct net_device *dev)
2258{
2259	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2260	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2261	PLM_PACKET pPacket;
2262	PUM_PACKET pUmPacket;
2263	unsigned long flags = 0;
2264	int frag_no;
2265#ifdef NICE_SUPPORT
2266	vlan_tag_t *vlan_tag;
2267#endif
2268#ifdef BCM_TSO
2269	LM_UINT32 mss = 0 ;
2270	uint16_t ip_tcp_len, tcp_opt_len, tcp_seg_flags;
2271#endif
2272
2273	if ((pDevice->LinkStatus == LM_STATUS_LINK_DOWN) ||
2274		!pDevice->InitDone || pUmDevice->suspended)
2275	{
2276		dev_kfree_skb(skb);
2277		return 0;
2278	}
2279
2280#if (LINUX_VERSION_CODE < 0x02032b)
2281	if (test_and_set_bit(0, &dev->tbusy)) {
2282	    return 1;
2283	}
2284#endif
2285
2286	if (pUmDevice->do_global_lock && pUmDevice->interrupt) {
2287		netif_stop_queue(dev);
2288		pUmDevice->tx_queued = 1;
2289		if (!pUmDevice->interrupt) {
2290			netif_wake_queue(dev);
2291			pUmDevice->tx_queued = 0;
2292		}
2293	    return 1;
2294	}
2295
2296	pPacket = (PLM_PACKET)
2297		QQ_PopHead(&pDevice->TxPacketFreeQ.Container);
2298	if (pPacket == 0) {
2299		netif_stop_queue(dev);
2300		pUmDevice->tx_full = 1;
2301		if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container)) {
2302			netif_wake_queue(dev);
2303			pUmDevice->tx_full = 0;
2304		}
2305	    return 1;
2306	}
2307	pUmPacket = (PUM_PACKET) pPacket;
2308	pUmPacket->skbuff = skb;
2309
2310	if (skb->ip_summed == CHECKSUM_HW) {
2311		pPacket->Flags = SND_BD_FLAG_TCP_UDP_CKSUM;
2312#if TIGON3_DEBUG
2313		pUmDevice->tx_chksum_count++;
2314#endif
2315	}
2316	else {
2317		pPacket->Flags = 0;
2318	}
2319#if MAX_SKB_FRAGS
2320	frag_no = skb_shinfo(skb)->nr_frags;
2321#else
2322	frag_no = 0;
2323#endif
2324	if (atomic_read(&pDevice->SendBdLeft) < (frag_no + 1)) {
2325		netif_stop_queue(dev);
2326		pUmDevice->tx_full = 1;
2327		QQ_PushHead(&pDevice->TxPacketFreeQ.Container, pPacket);
2328		if (atomic_read(&pDevice->SendBdLeft) >= (frag_no + 1)) {
2329			netif_wake_queue(dev);
2330			pUmDevice->tx_full = 0;
2331		}
2332		return 1;
2333	}
2334
2335	pPacket->u.Tx.FragCount = frag_no + 1;
2336#if TIGON3_DEBUG
2337	if (pPacket->u.Tx.FragCount > 1)
2338		pUmDevice->tx_zc_count++;
2339#endif
2340
2341#ifdef BCM_VLAN
2342	if (pUmDevice->vlgrp && vlan_tx_tag_present(skb)) {
2343		pPacket->VlanTag = vlan_tx_tag_get(skb);
2344		pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2345	}
2346#endif
2347#ifdef NICE_SUPPORT
2348	vlan_tag = (vlan_tag_t *) &skb->cb[0];
2349	if (vlan_tag->signature == 0x5555) {
2350		pPacket->VlanTag = vlan_tag->tag;
2351		pPacket->Flags |= SND_BD_FLAG_VLAN_TAG;
2352		vlan_tag->signature = 0;
2353	}
2354#endif
2355
2356#ifdef BCM_TSO
2357	if ((mss = (LM_UINT32) skb_shinfo(skb)->tso_size) &&
2358		(skb->len > pDevice->TxMtu)) {
2359
2360#if (LINUX_VERSION_CODE >= 0x02060c)
2361
2362		if (skb_header_cloned(skb) &&
2363			pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
2364
2365			dev_kfree_skb(skb);
2366			return 0;
2367		}
2368#endif
2369		pUmDevice->tso_pkt_count++;
2370
2371		pPacket->Flags |= SND_BD_FLAG_CPU_PRE_DMA |
2372			SND_BD_FLAG_CPU_POST_DMA;
2373
2374		tcp_opt_len = 0;
2375		if (skb->h.th->doff > 5) {
2376			tcp_opt_len = (skb->h.th->doff - 5) << 2;
2377		}
2378		ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
2379		skb->nh.iph->check = 0;
2380
2381		if ( T3_ASIC_IS_575X_PLUS(pDevice->ChipRevId) ){
2382			skb->h.th->check = 0;
2383			pPacket->Flags &= ~SND_BD_FLAG_TCP_UDP_CKSUM;
2384		}
2385		else {
2386			skb->h.th->check = ~csum_tcpudp_magic(
2387				skb->nh.iph->saddr, skb->nh.iph->daddr,
2388				0, IPPROTO_TCP, 0);
2389		}
2390
2391		skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
2392		tcp_seg_flags = 0;
2393
2394		if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
2395			if ( T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId) ){
2396				tcp_seg_flags =
2397					((skb->nh.iph->ihl - 5) +
2398					(tcp_opt_len >> 2)) << 11;
2399			}
2400			else {
2401				pPacket->Flags |=
2402					((skb->nh.iph->ihl - 5) +
2403					(tcp_opt_len >> 2)) << 12;
2404			}
2405		}
2406		pPacket->u.Tx.MaxSegmentSize = mss | tcp_seg_flags;
2407	}
2408	else
2409	{
2410		pPacket->u.Tx.MaxSegmentSize = 0;
2411	}
2412#endif
2413	BCM5700_LOCK(pUmDevice, flags);
2414	LM_SendPacket(pDevice, pPacket);
2415	BCM5700_UNLOCK(pUmDevice, flags);
2416
2417#if (LINUX_VERSION_CODE < 0x02032b)
2418	netif_wake_queue(dev);
2419#endif
2420	dev->trans_start = jiffies;
2421
2422
2423	return 0;
2424}
2425
2426#ifdef BCM_NAPI_RXPOLL
2427STATIC int
2428bcm5700_poll(struct net_device *dev, int *budget)
2429{
2430	int orig_budget = *budget;
2431	int work_done;
2432	UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
2433	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2434	unsigned long flags = 0;
2435	LM_UINT32 tag;
2436
2437	if (orig_budget > dev->quota)
2438		orig_budget = dev->quota;
2439
2440	BCM5700_LOCK(pUmDevice, flags);
2441	/* BCM4785: Flush posted writes from GbE to host memory. */
2442	if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2443		REG_RD(pDevice, HostCoalesce.Mode);
2444	work_done = LM_ServiceRxPoll(pDevice, orig_budget);
2445	*budget -= work_done;
2446	dev->quota -= work_done;
2447
2448	if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2449		replenish_rx_buffers(pUmDevice, 0);
2450	}
2451	BCM5700_UNLOCK(pUmDevice, flags);
2452	if (work_done) {
2453		MM_IndicateRxPackets(pDevice);
2454		BCM5700_LOCK(pUmDevice, flags);
2455		LM_QueueRxPackets(pDevice);
2456		BCM5700_UNLOCK(pUmDevice, flags);
2457	}
2458	if ((work_done < orig_budget) || atomic_read(&pUmDevice->intr_sem) ||
2459		pUmDevice->suspended) {
2460
2461		netif_rx_complete(dev);
2462		BCM5700_LOCK(pUmDevice, flags);
2463		REG_WR(pDevice, Grc.Mode, pDevice->GrcMode);
2464		pDevice->RxPoll = FALSE;
2465		if (pDevice->RxPoll) {
2466			BCM5700_UNLOCK(pUmDevice, flags);
2467			return 0;
2468		}
2469		/* Take care of possible missed rx interrupts */
2470		REG_RD_BACK(pDevice, Grc.Mode);	/* flush the register write */
2471		tag = pDevice->pStatusBlkVirt->StatusTag;
2472		if ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2473			(pDevice->pStatusBlkVirt->Idx[0].RcvProdIdx !=
2474			pDevice->RcvRetConIdx)) {
2475
2476			REG_WR(pDevice, HostCoalesce.Mode,
2477				pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2478				HOST_COALESCE_NOW);
2479		}
2480		/* If a new status block is pending in the WDMA state machine */
2481		/* before the register write to enable the rx interrupt,      */
2482		/* the new status block may DMA with no interrupt. In this    */
2483		/* scenario, the tag read above will be older than the tag in */
2484		/* the pending status block and writing the older tag will    */
2485		/* cause interrupt to be generated.                           */
2486		else if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2487			MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low,
2488				tag << 24);
2489			/* Make sure we service tx in case some tx interrupts */
2490			/* are cleared */
2491			if (atomic_read(&pDevice->SendBdLeft) <
2492				(T3_SEND_RCB_ENTRY_COUNT / 2)) {
2493				REG_WR(pDevice, HostCoalesce.Mode,
2494					pDevice->CoalesceMode |
2495					HOST_COALESCE_ENABLE |
2496					HOST_COALESCE_NOW);
2497			}
2498		}
2499		BCM5700_UNLOCK(pUmDevice, flags);
2500		return 0;
2501	}
2502	return 1;
2503}
2504#endif /* BCM_NAPI_RXPOLL */
2505
2506STATIC irqreturn_t
2507bcm5700_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
2508{
2509	struct net_device *dev = (struct net_device *)dev_instance;
2510	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2511	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2512	LM_UINT32 oldtag, newtag;
2513	int i, max_intr_loop;
2514#ifdef BCM_TASKLET
2515	int repl_buf_count;
2516#endif
2517	unsigned int handled = 1;
2518
2519	if (!pDevice->InitDone) {
2520		handled = 0;
2521		return IRQ_RETVAL(handled);
2522	}
2523
2524	bcm5700_intr_lock(pUmDevice);
2525	if (atomic_read(&pUmDevice->intr_sem)) {
2526		MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2527		bcm5700_intr_unlock(pUmDevice);
2528		handled = 0;
2529		return IRQ_RETVAL(handled);
2530	}
2531
2532	if (test_and_set_bit(0, (void*)&pUmDevice->interrupt)) {
2533		printk(KERN_ERR "%s: Duplicate entry of the interrupt handler\n",
2534			dev->name);
2535		bcm5700_intr_unlock(pUmDevice);
2536		handled = 0;
2537		return IRQ_RETVAL(handled);
2538	}
2539
2540	/* BCM4785: Flush posted writes from GbE to host memory. */
2541	if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2542		REG_RD(pDevice, HostCoalesce.Mode);
2543
2544	if ((pDevice->Flags & USING_MSI_FLAG) ||
2545		(pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) ||
2546		!(REG_RD(pDevice,PciCfg.PciState) & T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) )
2547	{
2548
2549		if (pUmDevice->intr_test) {
2550			if (!(REG_RD(pDevice, PciCfg.PciState) &
2551					T3_PCI_STATE_INTERRUPT_NOT_ACTIVE) ||
2552						pDevice->Flags & USING_MSI_FLAG ) {
2553				pUmDevice->intr_test_result = 1;
2554			}
2555			pUmDevice->intr_test = 0;
2556		}
2557
2558#ifdef BCM_NAPI_RXPOLL
2559		max_intr_loop = 1;
2560#else
2561		max_intr_loop = 50;
2562#endif
2563		if (pDevice->Flags & USE_TAGGED_STATUS_FLAG) {
2564			MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2565			oldtag = pDevice->pStatusBlkVirt->StatusTag;
2566
2567			for (i = 0; ; i++) {
2568				pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2569
2570				LM_ServiceInterrupts(pDevice);
2571				/* BCM4785: Flush GbE posted writes to host memory. */
2572				if (pDevice->Flags & FLUSH_POSTED_WRITE_FLAG)
2573					MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2574				newtag = pDevice->pStatusBlkVirt->StatusTag;
2575				if ((newtag == oldtag) || (i > max_intr_loop)) {
2576					MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, oldtag << 24);
2577					pDevice->LastTag = oldtag;
2578					if (pDevice->Flags & UNDI_FIX_FLAG) {
2579						REG_WR(pDevice, Grc.LocalCtrl,
2580						pDevice->GrcLocalCtrl | 0x2);
2581					}
2582					break;
2583				}
2584				oldtag = newtag;
2585			}
2586		}
2587		else
2588		{
2589			i = 0;
2590			do {
2591				uint dummy;
2592
2593				MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 1);
2594				pDevice->pStatusBlkVirt->Status &= ~STATUS_BLOCK_UPDATED;
2595				LM_ServiceInterrupts(pDevice);
2596				MB_REG_WR(pDevice, Mailbox.Interrupt[0].Low, 0);
2597				dummy = MB_REG_RD(pDevice, Mailbox.Interrupt[0].Low);
2598				i++;
2599			}
2600			while ((pDevice->pStatusBlkVirt->Status & STATUS_BLOCK_UPDATED) &&
2601				(i < max_intr_loop));
2602
2603			if (pDevice->Flags & UNDI_FIX_FLAG) {
2604				REG_WR(pDevice, Grc.LocalCtrl,
2605				pDevice->GrcLocalCtrl | 0x2);
2606			}
2607		}
2608	}
2609	else
2610	{
2611		/* not my interrupt */
2612		handled = 0;
2613	}
2614
2615#ifdef BCM_TASKLET
2616	repl_buf_count = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
2617	if (((repl_buf_count > pUmDevice->rx_buf_repl_panic_thresh) ||
2618		pDevice->QueueAgain) &&
2619		(!test_and_set_bit(0, &pUmDevice->tasklet_busy))) {
2620
2621		replenish_rx_buffers(pUmDevice, pUmDevice->rx_buf_repl_isr_limit);
2622		clear_bit(0, (void*)&pUmDevice->tasklet_busy);
2623	}
2624	else if ((repl_buf_count > pUmDevice->rx_buf_repl_thresh) &&
2625		!pUmDevice->tasklet_pending) {
2626
2627		pUmDevice->tasklet_pending = 1;
2628		tasklet_schedule(&pUmDevice->tasklet);
2629	}
2630#else
2631#ifdef BCM_NAPI_RXPOLL
2632	if (!pDevice->RxPoll &&
2633		QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2634		pDevice->RxPoll = 1;
2635		MM_ScheduleRxPoll(pDevice);
2636	}
2637#else
2638	if (QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container)) {
2639		replenish_rx_buffers(pUmDevice, 0);
2640	}
2641
2642	if (QQ_GetEntryCnt(&pDevice->RxPacketFreeQ.Container) ||
2643		pDevice->QueueAgain) {
2644
2645		LM_QueueRxPackets(pDevice);
2646	}
2647#endif
2648#endif
2649
2650	clear_bit(0, (void*)&pUmDevice->interrupt);
2651	bcm5700_intr_unlock(pUmDevice);
2652	if (pUmDevice->tx_queued) {
2653		pUmDevice->tx_queued = 0;
2654		netif_wake_queue(dev);
2655	}
2656	return IRQ_RETVAL(handled);
2657}
2658
2659
2660#ifdef BCM_TASKLET
2661STATIC void
2662bcm5700_tasklet(unsigned long data)
2663{
2664	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)data;
2665	unsigned long flags = 0;
2666
2667	/* RH 7.2 Beta 3 tasklets are reentrant */
2668	if (test_and_set_bit(0, &pUmDevice->tasklet_busy)) {
2669		pUmDevice->tasklet_pending = 0;
2670		return;
2671	}
2672
2673	pUmDevice->tasklet_pending = 0;
2674	if (pUmDevice->opened && !pUmDevice->suspended) {
2675		BCM5700_LOCK(pUmDevice, flags);
2676		replenish_rx_buffers(pUmDevice, 0);
2677		BCM5700_UNLOCK(pUmDevice, flags);
2678	}
2679
2680	clear_bit(0, &pUmDevice->tasklet_busy);
2681}
2682#endif
2683
2684STATIC int
2685bcm5700_close(struct net_device *dev)
2686{
2687
2688	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2689	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2690
2691#if (LINUX_VERSION_CODE < 0x02032b)
2692	dev->start = 0;
2693#endif
2694	netif_stop_queue(dev);
2695	pUmDevice->opened = 0;
2696
2697#ifdef BCM_ASF
2698	if( !(pDevice->AsfFlags & ASF_ENABLED) )
2699#endif
2700#ifdef BCM_WOL
2701		if( enable_wol[pUmDevice->index] == 0 )
2702#endif
2703			B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name));
2704
2705	if (tigon3_debug > 1)
2706		printk(KERN_DEBUG "%s: Shutting down Tigon3\n",
2707			   dev->name);
2708
2709	LM_MulticastClear(pDevice);
2710	bcm5700_shutdown(pUmDevice);
2711
2712	if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
2713		del_timer_sync(&pUmDevice->statstimer);
2714	}
2715
2716	del_timer_sync(&pUmDevice->timer);
2717
2718	free_irq(pUmDevice->pdev->irq, dev);
2719
2720#if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
2721
2722	if(pDevice->Flags & USING_MSI_FLAG) {
2723		pci_disable_msi(pUmDevice->pdev);
2724                REG_WR(pDevice, Msi.Mode,  1 );
2725		pDevice->Flags &= ~USING_MSI_FLAG;
2726	}
2727
2728#endif
2729
2730
2731#if (LINUX_VERSION_CODE < 0x020300)
2732	MOD_DEC_USE_COUNT;
2733#endif
2734	{
2735	/* BCM4785: Don't go to low-power state because it will power down the smbus block. */
2736	if (!(pDevice->Flags & SB_CORE_FLAG))
2737		LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
2738	}
2739
2740	bcm5700_freemem(dev);
2741
2742	QQ_InitQueue(&pDevice->RxPacketFreeQ.Container,
2743        		MAX_RX_PACKET_DESC_COUNT);
2744
2745	return 0;
2746}
2747
2748STATIC int
2749bcm5700_freemem(struct net_device *dev)
2750{
2751	int i;
2752	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2753	LM_DEVICE_BLOCK	 *pDevice = &pUmDevice->lm_dev;
2754
2755	for (i = 0; i < pUmDevice->mem_list_num; i++) {
2756		if (pUmDevice->mem_size_list[i] == 0) {
2757			kfree(pUmDevice->mem_list[i]);
2758		}
2759		else {
2760			pci_free_consistent(pUmDevice->pdev,
2761				(size_t) pUmDevice->mem_size_list[i],
2762				pUmDevice->mem_list[i],
2763				pUmDevice->dma_list[i]);
2764		}
2765	}
2766
2767	pDevice->pStatusBlkVirt = 0;
2768	pDevice->pStatsBlkVirt  = 0;
2769	pUmDevice->mem_list_num = 0;
2770
2771#ifdef NICE_SUPPORT
2772	if (!pUmDevice->opened) {
2773		for (i = 0; i < MAX_MEM2; i++) {
2774			if (pUmDevice->mem_size_list2[i]) {
2775				bcm5700_freemem2(pUmDevice, i);
2776			}
2777		}
2778	}
2779#endif
2780	return 0;
2781}
2782
2783#ifdef NICE_SUPPORT
2784/* Frees consistent memory allocated through ioctl */
2785/* The memory to be freed is in mem_list2[index] */
2786STATIC int
2787bcm5700_freemem2(UM_DEVICE_BLOCK *pUmDevice, int index)
2788{
2789#if (LINUX_VERSION_CODE >= 0x020400)
2790	void *ptr;
2791	struct page *pg, *last_pg;
2792
2793	/* Probably won't work on some architectures */
2794	ptr = pUmDevice->mem_list2[index],
2795	pg = virt_to_page(ptr);
2796	last_pg = virt_to_page(ptr + pUmDevice->mem_size_list2[index] - 1);
2797	for (; ; pg++) {
2798#if (LINUX_VERSION_CODE > 0x020500)
2799		ClearPageReserved(pg);
2800#else
2801		mem_map_unreserve(pg);
2802#endif
2803		if (pg == last_pg)
2804			break;
2805	}
2806	pci_free_consistent(pUmDevice->pdev,
2807		(size_t) pUmDevice->mem_size_list2[index],
2808		pUmDevice->mem_list2[index],
2809		pUmDevice->dma_list2[index]);
2810	pUmDevice->mem_size_list2[index] = 0;
2811#endif
2812	return 0;
2813}
2814#endif
2815
2816uint64_t
2817bcm5700_crc_count(PUM_DEVICE_BLOCK pUmDevice)
2818{
2819	PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
2820	LM_UINT32 Value32;
2821	PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2822	unsigned long flags;
2823
2824	if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5700 ||
2825		T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5701) &&
2826		!(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
2827
2828		if (!pUmDevice->opened || !pDevice->InitDone)
2829		{
2830
2831			return 0;
2832		}
2833
2834		/* regulate MDIO access during run time */
2835		if (pUmDevice->crc_counter_expiry > 0)
2836			return pUmDevice->phy_crc_count;
2837
2838		pUmDevice->crc_counter_expiry = (5 * HZ) /
2839			pUmDevice->timer_interval;
2840
2841		BCM5700_PHY_LOCK(pUmDevice, flags);
2842		LM_ReadPhy(pDevice, 0x1e, &Value32);
2843		if ((Value32 & 0x8000) == 0)
2844			LM_WritePhy(pDevice, 0x1e, Value32 | 0x8000);
2845		LM_ReadPhy(pDevice, 0x14, &Value32);
2846		BCM5700_PHY_UNLOCK(pUmDevice, flags);
2847		/* Sometimes data on the MDIO bus can be corrupted */
2848		if (Value32 != 0xffff)
2849			pUmDevice->phy_crc_count += Value32;
2850		return pUmDevice->phy_crc_count;
2851	}
2852	else if (pStats == 0) {
2853		return 0;
2854	}
2855	else {
2856		return (MM_GETSTATS64(pStats->dot3StatsFCSErrors));
2857	}
2858}
2859
2860uint64_t
2861bcm5700_rx_err_count(UM_DEVICE_BLOCK *pUmDevice)
2862{
2863	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2864	T3_STATS_BLOCK *pStats = (T3_STATS_BLOCK *) pDevice->pStatsBlkVirt;
2865
2866	if (pStats == 0)
2867		return 0;
2868	return (bcm5700_crc_count(pUmDevice) +
2869		MM_GETSTATS64(pStats->dot3StatsAlignmentErrors) +
2870		MM_GETSTATS64(pStats->etherStatsUndersizePkts) +
2871		MM_GETSTATS64(pStats->etherStatsFragments) +
2872		MM_GETSTATS64(pStats->dot3StatsFramesTooLong) +
2873		MM_GETSTATS64(pStats->etherStatsJabbers));
2874}
2875
2876STATIC struct net_device_stats *
2877bcm5700_get_stats(struct net_device *dev)
2878{
2879	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
2880	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
2881	PT3_STATS_BLOCK pStats = (PT3_STATS_BLOCK) pDevice->pStatsBlkVirt;
2882	struct net_device_stats *p_netstats = &pUmDevice->stats;
2883
2884	if (pStats == 0)
2885		return p_netstats;
2886
2887	/* Get stats from LM */
2888	p_netstats->rx_packets =
2889		MM_GETSTATS(pStats->ifHCInUcastPkts) +
2890		MM_GETSTATS(pStats->ifHCInMulticastPkts) +
2891		MM_GETSTATS(pStats->ifHCInBroadcastPkts);
2892	p_netstats->tx_packets =
2893		MM_GETSTATS(pStats->ifHCOutUcastPkts) +
2894		MM_GETSTATS(pStats->ifHCOutMulticastPkts) +
2895		MM_GETSTATS(pStats->ifHCOutBroadcastPkts);
2896	p_netstats->rx_bytes = MM_GETSTATS(pStats->ifHCInOctets);
2897	p_netstats->tx_bytes = MM_GETSTATS(pStats->ifHCOutOctets);
2898	p_netstats->tx_errors =
2899		MM_GETSTATS(pStats->dot3StatsInternalMacTransmitErrors) +
2900		MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors) +
2901		MM_GETSTATS(pStats->ifOutDiscards) +
2902		MM_GETSTATS(pStats->ifOutErrors);
2903	p_netstats->multicast = MM_GETSTATS(pStats->ifHCInMulticastPkts);
2904	p_netstats->collisions = MM_GETSTATS(pStats->etherStatsCollisions);
2905	p_netstats->rx_length_errors =
2906		MM_GETSTATS(pStats->dot3StatsFramesTooLong) +
2907		MM_GETSTATS(pStats->etherStatsUndersizePkts);
2908	p_netstats->rx_over_errors = MM_GETSTATS(pStats->nicNoMoreRxBDs);
2909	p_netstats->rx_frame_errors =
2910		MM_GETSTATS(pStats->dot3StatsAlignmentErrors);
2911	p_netstats->rx_crc_errors = (unsigned long)
2912		bcm5700_crc_count(pUmDevice);
2913	p_netstats->rx_errors = (unsigned long)
2914		bcm5700_rx_err_count(pUmDevice);
2915
2916	p_netstats->tx_aborted_errors = MM_GETSTATS(pStats->ifOutDiscards);
2917	p_netstats->tx_carrier_errors =
2918		MM_GETSTATS(pStats->dot3StatsCarrierSenseErrors);
2919
2920	return p_netstats;
2921}
2922
2923void
2924b57_suspend_chip(UM_DEVICE_BLOCK *pUmDevice)
2925{
2926	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2927
2928	if (pUmDevice->opened) {
2929		bcm5700_intr_off(pUmDevice);
2930		netif_carrier_off(pUmDevice->dev);
2931		netif_stop_queue(pUmDevice->dev);
2932#ifdef BCM_TASKLET
2933		tasklet_kill(&pUmDevice->tasklet);
2934#endif
2935		bcm5700_poll_wait(pUmDevice);
2936	}
2937	pUmDevice->suspended = 1;
2938	LM_ShutdownChip(pDevice, LM_SUSPEND_RESET);
2939}
2940
2941void
2942b57_resume_chip(UM_DEVICE_BLOCK *pUmDevice)
2943{
2944	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2945
2946	if (pUmDevice->suspended) {
2947		pUmDevice->suspended = 0;
2948		if (pUmDevice->opened) {
2949			bcm5700_reset(pUmDevice->dev);
2950		}
2951		else {
2952			LM_ShutdownChip(pDevice, LM_SHUTDOWN_RESET);
2953		}
2954	}
2955}
2956
2957/* Returns 0 on failure, 1 on success */
2958int
2959b57_test_intr(UM_DEVICE_BLOCK *pUmDevice)
2960{
2961	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
2962	int j;
2963
2964	if (!pUmDevice->opened)
2965		return 0;
2966	pUmDevice->intr_test_result = 0;
2967	pUmDevice->intr_test = 1;
2968
2969	REG_WR(pDevice, HostCoalesce.Mode,
2970		pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2971		HOST_COALESCE_NOW);
2972
2973	for (j = 0; j < 10; j++) {
2974		if (pUmDevice->intr_test_result){
2975			break;
2976		}
2977
2978		REG_WR(pDevice, HostCoalesce.Mode,
2979		pDevice->CoalesceMode | HOST_COALESCE_ENABLE |
2980		HOST_COALESCE_NOW);
2981
2982		MM_Sleep(pDevice, 1);
2983	}
2984
2985	return pUmDevice->intr_test_result;
2986
2987}
2988
2989#ifdef SIOCETHTOOL
2990
2991#ifdef ETHTOOL_GSTRINGS
2992
2993#define ETH_NUM_STATS 30
2994#define RX_CRC_IDX 5
2995#define RX_MAC_ERR_IDX 14
2996
2997struct {
2998	char string[ETH_GSTRING_LEN];
2999} bcm5700_stats_str_arr[ETH_NUM_STATS] = {
3000	{ "rx_unicast_packets" },
3001	{ "rx_multicast_packets" },
3002	{ "rx_broadcast_packets" },
3003	{ "rx_bytes" },
3004	{ "rx_fragments" },
3005	{ "rx_crc_errors" },	/* this needs to be calculated */
3006	{ "rx_align_errors" },
3007	{ "rx_xon_frames" },
3008	{ "rx_xoff_frames" },
3009	{ "rx_long_frames" },
3010	{ "rx_short_frames" },
3011	{ "rx_jabber" },
3012	{ "rx_discards" },
3013	{ "rx_errors" },
3014	{ "rx_mac_errors" },	/* this needs to be calculated */
3015	{ "tx_unicast_packets" },
3016	{ "tx_multicast_packets" },
3017	{ "tx_broadcast_packets" },
3018	{ "tx_bytes" },
3019	{ "tx_deferred" },
3020	{ "tx_single_collisions" },
3021	{ "tx_multi_collisions" },
3022	{ "tx_total_collisions" },
3023	{ "tx_excess_collisions" },
3024	{ "tx_late_collisions" },
3025	{ "tx_xon_frames" },
3026	{ "tx_xoff_frames" },
3027	{ "tx_internal_mac_errors" },
3028	{ "tx_carrier_errors" },
3029	{ "tx_errors" },
3030};
3031
3032#define STATS_OFFSET(offset_name) ((OFFSETOF(T3_STATS_BLOCK, offset_name)) / sizeof(uint64_t))
3033
3034#ifdef __BIG_ENDIAN
3035#define SWAP_DWORD_64(x) (x)
3036#else
3037#define SWAP_DWORD_64(x) ((x << 32) | (x >> 32))
3038#endif
3039
3040unsigned long bcm5700_stats_offset_arr[ETH_NUM_STATS] = {
3041	STATS_OFFSET(ifHCInUcastPkts),
3042	STATS_OFFSET(ifHCInMulticastPkts),
3043	STATS_OFFSET(ifHCInBroadcastPkts),
3044	STATS_OFFSET(ifHCInOctets),
3045	STATS_OFFSET(etherStatsFragments),
3046	0,
3047	STATS_OFFSET(dot3StatsAlignmentErrors),
3048	STATS_OFFSET(xonPauseFramesReceived),
3049	STATS_OFFSET(xoffPauseFramesReceived),
3050	STATS_OFFSET(dot3StatsFramesTooLong),
3051	STATS_OFFSET(etherStatsUndersizePkts),
3052	STATS_OFFSET(etherStatsJabbers),
3053	STATS_OFFSET(ifInDiscards),
3054	STATS_OFFSET(ifInErrors),
3055	0,
3056	STATS_OFFSET(ifHCOutUcastPkts),
3057	STATS_OFFSET(ifHCOutMulticastPkts),
3058	STATS_OFFSET(ifHCOutBroadcastPkts),
3059	STATS_OFFSET(ifHCOutOctets),
3060	STATS_OFFSET(dot3StatsDeferredTransmissions),
3061	STATS_OFFSET(dot3StatsSingleCollisionFrames),
3062	STATS_OFFSET(dot3StatsMultipleCollisionFrames),
3063	STATS_OFFSET(etherStatsCollisions),
3064	STATS_OFFSET(dot3StatsExcessiveCollisions),
3065	STATS_OFFSET(dot3StatsLateCollisions),
3066	STATS_OFFSET(outXonSent),
3067	STATS_OFFSET(outXoffSent),
3068	STATS_OFFSET(dot3StatsInternalMacTransmitErrors),
3069	STATS_OFFSET(dot3StatsCarrierSenseErrors),
3070	STATS_OFFSET(ifOutErrors),
3071};
3072
3073#endif /* ETHTOOL_GSTRINGS */
3074
3075#ifdef ETHTOOL_TEST
3076#define ETH_NUM_TESTS 6
3077struct {
3078	char string[ETH_GSTRING_LEN];
3079} bcm5700_tests_str_arr[ETH_NUM_TESTS] = {
3080	{ "register test (offline)" },
3081	{ "memory test (offline)" },
3082	{ "loopback test (offline)" },
3083	{ "nvram test (online)" },
3084	{ "interrupt test (online)" },
3085	{ "link test (online)" },
3086};
3087
3088extern LM_STATUS b57_test_registers(UM_DEVICE_BLOCK *pUmDevice);
3089extern LM_STATUS b57_test_memory(UM_DEVICE_BLOCK *pUmDevice);
3090extern LM_STATUS b57_test_nvram(UM_DEVICE_BLOCK *pUmDevice);
3091extern LM_STATUS b57_test_link(UM_DEVICE_BLOCK *pUmDevice);
3092extern LM_STATUS b57_test_loopback(UM_DEVICE_BLOCK *pUmDevice, int looptype, int linespeed);
3093#endif
3094
3095#ifdef ETHTOOL_GREGS
3096#if (LINUX_VERSION_CODE >= 0x02040f)
3097static void
3098bcm5700_get_reg_blk(UM_DEVICE_BLOCK *pUmDevice, u32 **buf, u32 start, u32 end,
3099		int reserved)
3100{
3101	u32 offset;
3102	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
3103
3104	if (reserved) {
3105		memset(*buf, 0, end - start);
3106		*buf = *buf + (end - start)/4;
3107		return;
3108	}
3109	for (offset = start; offset < end; offset+=4, *buf = *buf + 1) {
3110		if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
3111			if (((offset >= 0x3400) && (offset < 0x3c00)) ||
3112				((offset >= 0x5400) && (offset < 0x5800)) ||
3113				((offset >= 0x6400) && (offset < 0x6800))) {
3114				**buf = 0;
3115				continue;
3116			}
3117		}
3118		**buf = REG_RD_OFFSET(pDevice, offset);
3119	}
3120}
3121#endif
3122#endif
3123
3124static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr)
3125{
3126	struct ethtool_cmd ethcmd;
3127	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
3128	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
3129
3130	if (mm_copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
3131		return -EFAULT;
3132
3133        switch (ethcmd.cmd) {
3134#ifdef ETHTOOL_GDRVINFO
3135        case ETHTOOL_GDRVINFO: {
3136		struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
3137
3138		strcpy(info.driver,  bcm5700_driver);
3139#ifdef INCLUDE_5701_AX_FIX
3140		if(pDevice->ChipRevId == T3_CHIP_ID_5701_A0) {
3141			extern int t3FwReleaseMajor;
3142			extern int t3FwReleaseMinor;
3143			extern int t3FwReleaseFix;
3144
3145			sprintf(info.fw_version, "%i.%i.%i",
3146				t3FwReleaseMajor, t3FwReleaseMinor,
3147				t3FwReleaseFix);
3148		}
3149#endif
3150		strcpy(info.fw_version, pDevice->BootCodeVer);
3151		strcpy(info.version, bcm5700_version);
3152#if (LINUX_VERSION_CODE <= 0x020422)
3153		strcpy(info.bus_info, pUmDevice->pdev->slot_name);
3154#else
3155		strcpy(info.bus_info, pci_name(pUmDevice->pdev));
3156#endif
3157
3158
3159
3160#ifdef ETHTOOL_GEEPROM
3161		BCM_EEDUMP_LEN(&info, pDevice->NvramSize);
3162#endif
3163#ifdef ETHTOOL_GREGS
3164		/* dump everything, including holes in the register space */
3165		info.regdump_len = 0x6c00;
3166#endif
3167#ifdef ETHTOOL_GSTATS
3168		info.n_stats = ETH_NUM_STATS;
3169#endif
3170#ifdef ETHTOOL_TEST
3171		info.testinfo_len = ETH_NUM_TESTS;
3172#endif
3173		if (mm_copy_to_user(useraddr, &info, sizeof(info)))
3174			return -EFAULT;
3175		return 0;
3176	}
3177#endif
3178        case ETHTOOL_GSET: {
3179		if ((pDevice->TbiFlags & ENABLE_TBI_FLAG)||
3180			(pDevice->PhyFlags & PHY_IS_FIBER)) {
3181			ethcmd.supported =
3182				(SUPPORTED_1000baseT_Full |
3183				SUPPORTED_Autoneg);
3184			ethcmd.supported |= SUPPORTED_FIBRE;
3185			ethcmd.port = PORT_FIBRE;
3186		}
3187		else {
3188			ethcmd.supported =
3189				(SUPPORTED_10baseT_Half |
3190				SUPPORTED_10baseT_Full |
3191				SUPPORTED_100baseT_Half |
3192				SUPPORTED_100baseT_Full |
3193				SUPPORTED_1000baseT_Half |
3194				SUPPORTED_1000baseT_Full |
3195				SUPPORTED_Autoneg);
3196			ethcmd.supported |= SUPPORTED_TP;
3197			ethcmd.port = PORT_TP;
3198		}
3199
3200		ethcmd.transceiver = XCVR_INTERNAL;
3201		ethcmd.phy_address = 0;
3202
3203		if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
3204			ethcmd.speed = SPEED_1000;
3205		else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
3206			ethcmd.speed = SPEED_100;
3207		else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
3208			ethcmd.speed = SPEED_10;
3209		else
3210			ethcmd.speed = 0;
3211
3212		if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
3213			ethcmd.duplex = DUPLEX_FULL;
3214		else
3215			ethcmd.duplex = DUPLEX_HALF;
3216
3217		if (pDevice->DisableAutoNeg == FALSE) {
3218			ethcmd.autoneg = AUTONEG_ENABLE;
3219			ethcmd.advertising = ADVERTISED_Autoneg;
3220			if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
3221				(pDevice->PhyFlags & PHY_IS_FIBER)) {
3222				ethcmd.advertising |=
3223					ADVERTISED_1000baseT_Full |
3224					ADVERTISED_FIBRE;
3225			}
3226			else {
3227				ethcmd.advertising |=
3228					ADVERTISED_TP;
3229				if (pDevice->advertising &
3230					PHY_AN_AD_10BASET_HALF) {
3231
3232					ethcmd.advertising |=
3233						ADVERTISED_10baseT_Half;
3234				}
3235				if (pDevice->advertising &
3236					PHY_AN_AD_10BASET_FULL) {
3237
3238					ethcmd.advertising |=
3239						ADVERTISED_10baseT_Full;
3240				}
3241				if (pDevice->advertising &
3242					PHY_AN_AD_100BASETX_HALF) {
3243
3244					ethcmd.advertising |=
3245						ADVERTISED_100baseT_Half;
3246				}
3247				if (pDevice->advertising &
3248					PHY_AN_AD_100BASETX_FULL) {
3249
3250					ethcmd.advertising |=
3251						ADVERTISED_100baseT_Full;
3252				}
3253				if (pDevice->advertising1000 &
3254					BCM540X_AN_AD_1000BASET_HALF) {
3255
3256					ethcmd.advertising |=
3257						ADVERTISED_1000baseT_Half;
3258				}
3259				if (pDevice->advertising1000 &
3260					BCM540X_AN_AD_1000BASET_FULL) {
3261
3262					ethcmd.advertising |=
3263						ADVERTISED_1000baseT_Full;
3264				}
3265			}
3266		}
3267		else {
3268			ethcmd.autoneg = AUTONEG_DISABLE;
3269			ethcmd.advertising = 0;
3270		}
3271
3272		ethcmd.maxtxpkt = pDevice->TxMaxCoalescedFrames;
3273		ethcmd.maxrxpkt = pDevice->RxMaxCoalescedFrames;
3274
3275		if(mm_copy_to_user(useraddr, &ethcmd, sizeof(ethcmd)))
3276			return -EFAULT;
3277		return 0;
3278	}
3279	case ETHTOOL_SSET: {
3280		unsigned long flags;
3281
3282		if(!capable(CAP_NET_ADMIN))
3283			return -EPERM;
3284		if (ethcmd.autoneg == AUTONEG_ENABLE) {
3285			pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3286			pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_UNKNOWN;
3287			pDevice->DisableAutoNeg = FALSE;
3288		}
3289		else {
3290			if (ethcmd.speed == SPEED_1000 &&
3291				pDevice->PhyFlags & PHY_NO_GIGABIT)
3292					return -EINVAL;
3293
3294			if (ethcmd.speed == SPEED_1000 &&
3295			    (pDevice->TbiFlags & ENABLE_TBI_FLAG ||
3296			     pDevice->PhyFlags & PHY_IS_FIBER ) ) {
3297
3298				pDevice->RequestedLineSpeed =
3299					LM_LINE_SPEED_1000MBPS;
3300
3301				pDevice->RequestedDuplexMode =
3302					LM_DUPLEX_MODE_FULL;
3303			}
3304			else if (ethcmd.speed == SPEED_100 &&
3305			        !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3306			     	!(pDevice->PhyFlags & PHY_IS_FIBER)) {
3307
3308				pDevice->RequestedLineSpeed =
3309					LM_LINE_SPEED_100MBPS;
3310			}
3311			else if (ethcmd.speed == SPEED_10  &&
3312			        !(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3313			     	!(pDevice->PhyFlags & PHY_IS_FIBER)) {
3314
3315                                pDevice->RequestedLineSpeed =
3316					LM_LINE_SPEED_10MBPS;
3317			}
3318			else {
3319				return -EINVAL;
3320			}
3321
3322			pDevice->DisableAutoNeg = TRUE;
3323			if (ethcmd.duplex == DUPLEX_FULL) {
3324				pDevice->RequestedDuplexMode =
3325					LM_DUPLEX_MODE_FULL;
3326			}
3327			else {
3328				if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3329			     	    !(pDevice->PhyFlags & PHY_IS_FIBER)  ) {
3330
3331					pDevice->RequestedDuplexMode =
3332							LM_DUPLEX_MODE_HALF;
3333				}
3334			}
3335		}
3336		if (netif_running(dev)) {
3337			BCM5700_PHY_LOCK(pUmDevice, flags);
3338			LM_SetupPhy(pDevice);
3339			BCM5700_PHY_UNLOCK(pUmDevice, flags);
3340		}
3341		return 0;
3342	}
3343#ifdef ETHTOOL_GWOL
3344#ifdef BCM_WOL
3345	case ETHTOOL_GWOL: {
3346		struct ethtool_wolinfo wol = {ETHTOOL_GWOL};
3347
3348		if (((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3349			!(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3350			(pDevice->Flags & DISABLE_D3HOT_FLAG)) {
3351			wol.supported = 0;
3352			wol.wolopts = 0;
3353		}
3354		else {
3355			wol.supported = WAKE_MAGIC;
3356			if (pDevice->WakeUpMode == LM_WAKE_UP_MODE_MAGIC_PACKET)
3357			{
3358				wol.wolopts = WAKE_MAGIC;
3359			}
3360			else {
3361				wol.wolopts = 0;
3362			}
3363		}
3364		if (mm_copy_to_user(useraddr, &wol, sizeof(wol)))
3365			return -EFAULT;
3366		return 0;
3367	}
3368	case ETHTOOL_SWOL: {
3369		struct ethtool_wolinfo wol;
3370
3371		if(!capable(CAP_NET_ADMIN))
3372			return -EPERM;
3373		if (mm_copy_from_user(&wol, useraddr, sizeof(wol)))
3374			return -EFAULT;
3375		if ((((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
3376			!(pDevice->Flags & FIBER_WOL_CAPABLE_FLAG)) ||
3377			(pDevice->Flags & DISABLE_D3HOT_FLAG)) &&
3378			wol.wolopts) {
3379			return -EINVAL;
3380		}
3381
3382		if ((wol.wolopts & ~WAKE_MAGIC) != 0) {
3383			return -EINVAL;
3384		}
3385		if (wol.wolopts & WAKE_MAGIC) {
3386			pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
3387			pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
3388		}
3389		else {
3390			pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
3391			pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
3392		}
3393		return 0;
3394        }
3395#endif
3396#endif
3397#ifdef ETHTOOL_GLINK
3398	case ETHTOOL_GLINK: {
3399		struct ethtool_value edata = {ETHTOOL_GLINK};
3400
3401		/* ifup only waits for 5 seconds for link up */
3402		/* NIC may take more than 5 seconds to establish link */
3403		if ((pUmDevice->delayed_link_ind > 0) &&
3404			delay_link[pUmDevice->index])
3405			return -EOPNOTSUPP;
3406
3407		if (pDevice->LinkStatus == LM_STATUS_LINK_ACTIVE) {
3408			edata.data =  1;
3409		}
3410		else {
3411			edata.data =  0;
3412		}
3413		if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3414			return -EFAULT;
3415		return 0;
3416	}
3417#endif
3418#ifdef ETHTOOL_NWAY_RST
3419	case ETHTOOL_NWAY_RST: {
3420		LM_UINT32 phyctrl;
3421		unsigned long flags;
3422
3423		if(!capable(CAP_NET_ADMIN))
3424			return -EPERM;
3425		if (pDevice->DisableAutoNeg) {
3426			return -EINVAL;
3427		}
3428		if (!netif_running(dev))
3429			return -EAGAIN;
3430		BCM5700_PHY_LOCK(pUmDevice, flags);
3431		if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
3432			pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
3433			pDevice->DisableAutoNeg = TRUE;
3434			LM_SetupPhy(pDevice);
3435
3436			pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
3437			pDevice->DisableAutoNeg = FALSE;
3438			LM_SetupPhy(pDevice);
3439		}
3440		else {
3441			if ((T3_ASIC_REV(pDevice->ChipRevId) ==
3442					T3_ASIC_REV_5703) ||
3443				(T3_ASIC_REV(pDevice->ChipRevId) ==
3444					T3_ASIC_REV_5704) ||
3445				(T3_ASIC_REV(pDevice->ChipRevId) ==
3446					T3_ASIC_REV_5705))
3447			{
3448				LM_ResetPhy(pDevice);
3449				LM_SetupPhy(pDevice);
3450			}
3451			pDevice->PhyFlags &= ~PHY_FIBER_FALLBACK;
3452			LM_ReadPhy(pDevice, PHY_CTRL_REG, &phyctrl);
3453			LM_WritePhy(pDevice, PHY_CTRL_REG, phyctrl |
3454				PHY_CTRL_AUTO_NEG_ENABLE |
3455				PHY_CTRL_RESTART_AUTO_NEG);
3456		}
3457		BCM5700_PHY_UNLOCK(pUmDevice, flags);
3458		return 0;
3459	}
3460#endif
3461#ifdef ETHTOOL_GEEPROM
3462	case ETHTOOL_GEEPROM: {
3463		struct ethtool_eeprom eeprom;
3464		LM_UINT32 *buf = 0;
3465		LM_UINT32 buf1[64/4];
3466		int i, j, offset, len;
3467
3468		if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3469			return -EFAULT;
3470
3471		if (eeprom.offset >= pDevice->NvramSize)
3472			return -EFAULT;
3473
3474		/* maximum data limited */
3475		/* to read more, call again with a different offset */
3476		if (eeprom.len > 0x800) {
3477			eeprom.len = 0x800;
3478			if (mm_copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
3479				return -EFAULT;
3480		}
3481
3482		if (eeprom.len > 64) {
3483			buf = kmalloc(eeprom.len, GFP_KERNEL);
3484			if (!buf)
3485				return -ENOMEM;
3486		}
3487		else {
3488			buf = buf1;
3489		}
3490		useraddr += offsetof(struct ethtool_eeprom, data);
3491
3492		offset = eeprom.offset;
3493		len = eeprom.len;
3494		if (offset & 3) {
3495			offset &= 0xfffffffc;
3496			len += (offset & 3);
3497		}
3498		len = (len + 3) & 0xfffffffc;
3499		for (i = 0, j = 0; j < len; i++, j += 4) {
3500			if (LM_NvramRead(pDevice, offset + j, buf + i) !=
3501				LM_STATUS_SUCCESS) {
3502				break;
3503			}
3504		}
3505		if (j >= len) {
3506			buf += (eeprom.offset & 3);
3507			i = mm_copy_to_user(useraddr, buf, eeprom.len);
3508		}
3509		if (eeprom.len > 64) {
3510			kfree(buf);
3511		}
3512		if ((j < len) || i)
3513			return -EFAULT;
3514		return 0;
3515	}
3516	case ETHTOOL_SEEPROM: {
3517		struct ethtool_eeprom eeprom;
3518		LM_UINT32 buf[64/4];
3519		int i, offset, len;
3520
3521		if(!capable(CAP_NET_ADMIN))
3522			return -EPERM;
3523		if (mm_copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
3524			return -EFAULT;
3525
3526		if ((eeprom.offset & 3) || (eeprom.len & 3) ||
3527			(eeprom.offset >= pDevice->NvramSize)) {
3528			return -EFAULT;
3529		}
3530
3531		if ((eeprom.offset + eeprom.len) >= pDevice->NvramSize) {
3532			eeprom.len = pDevice->NvramSize - eeprom.offset;
3533		}
3534
3535		useraddr += offsetof(struct ethtool_eeprom, data);
3536
3537		len = eeprom.len;
3538		offset = eeprom.offset;
3539		for (; len > 0; ) {
3540			if (len < 64)
3541				i = len;
3542			else
3543				i = 64;
3544			if (mm_copy_from_user(&buf, useraddr, i))
3545				return -EFAULT;
3546
3547			bcm5700_intr_off(pUmDevice);
3548			/* Prevent race condition on Grc.Mode register */
3549			bcm5700_poll_wait(pUmDevice);
3550
3551			if (LM_NvramWriteBlock(pDevice, offset, buf, i/4) !=
3552				LM_STATUS_SUCCESS) {
3553				bcm5700_intr_on(pUmDevice);
3554				return -EFAULT;
3555			}
3556			bcm5700_intr_on(pUmDevice);
3557			len -= i;
3558			offset += i;
3559			useraddr += i;
3560		}
3561		return 0;
3562	}
3563#endif
3564#ifdef ETHTOOL_GREGS
3565#if (LINUX_VERSION_CODE >= 0x02040f)
3566	case ETHTOOL_GREGS: {
3567		struct ethtool_regs eregs;
3568		LM_UINT32 *buf, *buf1;
3569		unsigned int i;
3570
3571		if(!capable(CAP_NET_ADMIN))
3572			return -EPERM;
3573		if (pDevice->Flags & UNDI_FIX_FLAG)
3574			return -EOPNOTSUPP;
3575		if (mm_copy_from_user(&eregs, useraddr, sizeof(eregs)))
3576			return -EFAULT;
3577		if (eregs.len > 0x6c00)
3578			eregs.len = 0x6c00;
3579		eregs.version = 0x0;
3580		if (mm_copy_to_user(useraddr, &eregs, sizeof(eregs)))
3581			return -EFAULT;
3582		buf = buf1 = kmalloc(eregs.len, GFP_KERNEL);
3583		if (!buf)
3584			return -ENOMEM;
3585		bcm5700_get_reg_blk(pUmDevice, &buf, 0,      0xb0,   0);
3586		bcm5700_get_reg_blk(pUmDevice, &buf, 0xb0,   0x200,  1);
3587		bcm5700_get_reg_blk(pUmDevice, &buf, 0x200,  0x8f0,  0);
3588		bcm5700_get_reg_blk(pUmDevice, &buf, 0x8f0,  0xc00,  1);
3589		bcm5700_get_reg_blk(pUmDevice, &buf, 0xc00,  0xce0,  0);
3590		bcm5700_get_reg_blk(pUmDevice, &buf, 0xce0,  0x1000, 1);
3591		bcm5700_get_reg_blk(pUmDevice, &buf, 0x1000, 0x1004, 0);
3592		bcm5700_get_reg_blk(pUmDevice, &buf, 0x1004, 0x1400, 1);
3593		bcm5700_get_reg_blk(pUmDevice, &buf, 0x1400, 0x1480, 0);
3594		bcm5700_get_reg_blk(pUmDevice, &buf, 0x1480, 0x1800, 1);
3595		bcm5700_get_reg_blk(pUmDevice, &buf, 0x1800, 0x1848, 0);
3596		bcm5700_get_reg_blk(pUmDevice, &buf, 0x1848, 0x1c00, 1);
3597		bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c00, 0x1c04, 0);
3598		bcm5700_get_reg_blk(pUmDevice, &buf, 0x1c04, 0x2000, 1);
3599		bcm5700_get_reg_blk(pUmDevice, &buf, 0x2000, 0x225c, 0);
3600		bcm5700_get_reg_blk(pUmDevice, &buf, 0x225c, 0x2400, 1);
3601		bcm5700_get_reg_blk(pUmDevice, &buf, 0x2400, 0x24c4, 0);
3602		bcm5700_get_reg_blk(pUmDevice, &buf, 0x24c4, 0x2800, 1);
3603		bcm5700_get_reg_blk(pUmDevice, &buf, 0x2800, 0x2804, 0);
3604		bcm5700_get_reg_blk(pUmDevice, &buf, 0x2804, 0x2c00, 1);
3605		bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c00, 0x2c20, 0);
3606		bcm5700_get_reg_blk(pUmDevice, &buf, 0x2c20, 0x3000, 1);
3607		bcm5700_get_reg_blk(pUmDevice, &buf, 0x3000, 0x3014, 0);
3608		bcm5700_get_reg_blk(pUmDevice, &buf, 0x3014, 0x3400, 1);
3609		bcm5700_get_reg_blk(pUmDevice, &buf, 0x3400, 0x3408, 0);
3610		bcm5700_get_reg_blk(pUmDevice, &buf, 0x3408, 0x3800, 1);
3611		bcm5700_get_reg_blk(pUmDevice, &buf, 0x3800, 0x3808, 0);
3612		bcm5700_get_reg_blk(pUmDevice, &buf, 0x3808, 0x3c00, 1);
3613		bcm5700_get_reg_blk(pUmDevice, &buf, 0x3c00, 0x3d00, 0);
3614		bcm5700_get_reg_blk(pUmDevice, &buf, 0x3d00, 0x4000, 1);
3615		bcm5700_get_reg_blk(pUmDevice, &buf, 0x4000, 0x4010, 0);
3616		bcm5700_get_reg_blk(pUmDevice, &buf, 0x4010, 0x4400, 1);
3617		bcm5700_get_reg_blk(pUmDevice, &buf, 0x4400, 0x4458, 0);
3618		bcm5700_get_reg_blk(pUmDevice, &buf, 0x4458, 0x4800, 1);
3619		bcm5700_get_reg_blk(pUmDevice, &buf, 0x4800, 0x4808, 0);
3620		bcm5700_get_reg_blk(pUmDevice, &buf, 0x4808, 0x4c00, 1);
3621		bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c00, 0x4c08, 0);
3622		bcm5700_get_reg_blk(pUmDevice, &buf, 0x4c08, 0x5000, 1);
3623		bcm5700_get_reg_blk(pUmDevice, &buf, 0x5000, 0x5050, 0);
3624		bcm5700_get_reg_blk(pUmDevice, &buf, 0x5050, 0x5400, 1);
3625		bcm5700_get_reg_blk(pUmDevice, &buf, 0x5400, 0x5450, 0);
3626		bcm5700_get_reg_blk(pUmDevice, &buf, 0x5450, 0x5800, 1);
3627		bcm5700_get_reg_blk(pUmDevice, &buf, 0x5800, 0x5a10, 0);
3628		bcm5700_get_reg_blk(pUmDevice, &buf, 0x5a10, 0x6000, 1);
3629		bcm5700_get_reg_blk(pUmDevice, &buf, 0x6000, 0x600c, 0);
3630		bcm5700_get_reg_blk(pUmDevice, &buf, 0x600c, 0x6400, 1);
3631		bcm5700_get_reg_blk(pUmDevice, &buf, 0x6400, 0x6404, 0);
3632		bcm5700_get_reg_blk(pUmDevice, &buf, 0x6404, 0x6800, 1);
3633		bcm5700_get_reg_blk(pUmDevice, &buf, 0x6800, 0x6848, 0);
3634		bcm5700_get_reg_blk(pUmDevice, &buf, 0x6848, 0x6c00, 1);
3635
3636		i = mm_copy_to_user(useraddr + sizeof(eregs), buf1, eregs.len);
3637		kfree(buf1);
3638		if (i)
3639			return -EFAULT;
3640		return 0;
3641	}
3642#endif
3643#endif
3644#ifdef ETHTOOL_GPAUSEPARAM
3645	case ETHTOOL_GPAUSEPARAM: {
3646		struct ethtool_pauseparam epause = { ETHTOOL_GPAUSEPARAM };
3647
3648		if (!pDevice->DisableAutoNeg) {
3649			epause.autoneg = (pDevice->FlowControlCap &
3650				LM_FLOW_CONTROL_AUTO_PAUSE) != 0;
3651		}
3652		else {
3653			epause.autoneg = 0;
3654		}
3655		epause.rx_pause =
3656			(pDevice->FlowControl &
3657			LM_FLOW_CONTROL_RECEIVE_PAUSE) != 0;
3658		epause.tx_pause =
3659			(pDevice->FlowControl &
3660			LM_FLOW_CONTROL_TRANSMIT_PAUSE) != 0;
3661		if (mm_copy_to_user(useraddr, &epause, sizeof(epause)))
3662			return -EFAULT;
3663
3664		return 0;
3665	}
3666	case ETHTOOL_SPAUSEPARAM: {
3667		struct ethtool_pauseparam epause;
3668		unsigned long flags;
3669
3670		if(!capable(CAP_NET_ADMIN))
3671			return -EPERM;
3672		if (mm_copy_from_user(&epause, useraddr, sizeof(epause)))
3673			return -EFAULT;
3674		pDevice->FlowControlCap = 0;
3675		if (epause.autoneg && !pDevice->DisableAutoNeg) {
3676			pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
3677		}
3678		if (epause.rx_pause)  {
3679			pDevice->FlowControlCap |=
3680				LM_FLOW_CONTROL_RECEIVE_PAUSE;
3681		}
3682		if (epause.tx_pause)  {
3683			pDevice->FlowControlCap |=
3684				LM_FLOW_CONTROL_TRANSMIT_PAUSE;
3685		}
3686		if (netif_running(dev)) {
3687			BCM5700_PHY_LOCK(pUmDevice, flags);
3688			LM_SetupPhy(pDevice);
3689			BCM5700_PHY_UNLOCK(pUmDevice, flags);
3690		}
3691
3692		return 0;
3693	}
3694#endif
3695#ifdef ETHTOOL_GRXCSUM
3696	case ETHTOOL_GRXCSUM: {
3697		struct ethtool_value edata = { ETHTOOL_GRXCSUM };
3698
3699		edata.data =
3700			(pDevice->TaskToOffload &
3701			LM_TASK_OFFLOAD_RX_TCP_CHECKSUM) != 0;
3702		if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3703			return -EFAULT;
3704
3705		return 0;
3706	}
3707	case ETHTOOL_SRXCSUM: {
3708		struct ethtool_value edata;
3709
3710		if(!capable(CAP_NET_ADMIN))
3711			return -EPERM;
3712		if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3713			return -EFAULT;
3714		if (edata.data) {
3715			if (!(pDevice->TaskOffloadCap &
3716				LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3717
3718				return -EINVAL;
3719			}
3720			pDevice->TaskToOffload |=
3721				LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3722				LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
3723		}
3724		else {
3725			pDevice->TaskToOffload &=
3726				~(LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
3727				LM_TASK_OFFLOAD_RX_UDP_CHECKSUM);
3728		}
3729		return 0;
3730	}
3731	case ETHTOOL_GTXCSUM: {
3732		struct ethtool_value edata = { ETHTOOL_GTXCSUM };
3733
3734		edata.data =
3735			(dev->features & get_csum_flag( pDevice->ChipRevId)) != 0;
3736		if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3737			return -EFAULT;
3738
3739		return 0;
3740	}
3741	case ETHTOOL_STXCSUM: {
3742		struct ethtool_value edata;
3743
3744		if(!capable(CAP_NET_ADMIN))
3745			return -EPERM;
3746		if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3747			return -EFAULT;
3748		if (edata.data) {
3749			if (!(pDevice->TaskOffloadCap &
3750				LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
3751
3752				return -EINVAL;
3753			}
3754			dev->features |= get_csum_flag( pDevice->ChipRevId);
3755			pDevice->TaskToOffload |=
3756				LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3757				LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
3758		}
3759		else {
3760			dev->features &= ~get_csum_flag( pDevice->ChipRevId);
3761			pDevice->TaskToOffload &=
3762				~(LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
3763				LM_TASK_OFFLOAD_TX_UDP_CHECKSUM);
3764		}
3765		return 0;
3766	}
3767	case ETHTOOL_GSG: {
3768		struct ethtool_value edata = { ETHTOOL_GSG };
3769
3770		edata.data =
3771			(dev->features & NETIF_F_SG) != 0;
3772		if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3773			return -EFAULT;
3774		return 0;
3775	}
3776	case ETHTOOL_SSG: {
3777		struct ethtool_value edata;
3778
3779		if(!capable(CAP_NET_ADMIN))
3780			return -EPERM;
3781		if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3782			return -EFAULT;
3783		if (edata.data) {
3784			dev->features |= NETIF_F_SG;
3785		}
3786		else {
3787			dev->features &= ~NETIF_F_SG;
3788		}
3789		return 0;
3790	}
3791#endif
3792#ifdef ETHTOOL_GRINGPARAM
3793	case ETHTOOL_GRINGPARAM: {
3794		struct ethtool_ringparam ering = { ETHTOOL_GRINGPARAM };
3795
3796		ering.rx_max_pending = T3_STD_RCV_RCB_ENTRY_COUNT - 1;
3797		ering.rx_pending = pDevice->RxStdDescCnt;
3798		ering.rx_mini_max_pending = 0;
3799		ering.rx_mini_pending = 0;
3800#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
3801		ering.rx_jumbo_max_pending = T3_JUMBO_RCV_RCB_ENTRY_COUNT - 1;
3802		ering.rx_jumbo_pending = pDevice->RxJumboDescCnt;
3803#else
3804		ering.rx_jumbo_max_pending = 0;
3805		ering.rx_jumbo_pending = 0;
3806#endif
3807		ering.tx_max_pending = MAX_TX_PACKET_DESC_COUNT - 1;
3808		ering.tx_pending = pDevice->TxPacketDescCnt;
3809		if (mm_copy_to_user(useraddr, &ering, sizeof(ering)))
3810			return -EFAULT;
3811		return 0;
3812	}
3813#endif
3814#ifdef ETHTOOL_PHYS_ID
3815	case ETHTOOL_PHYS_ID: {
3816		struct ethtool_value edata;
3817
3818		if(!capable(CAP_NET_ADMIN))
3819			return -EPERM;
3820		if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
3821			return -EFAULT;
3822		if (LM_BlinkLED(pDevice, edata.data) == LM_STATUS_SUCCESS)
3823			return 0;
3824		return -EINTR;
3825	}
3826#endif
3827#ifdef ETHTOOL_GSTRINGS
3828	case ETHTOOL_GSTRINGS: {
3829		struct ethtool_gstrings egstr = { ETHTOOL_GSTRINGS };
3830
3831		if (mm_copy_from_user(&egstr, useraddr, sizeof(egstr)))
3832			return -EFAULT;
3833		switch(egstr.string_set) {
3834#ifdef ETHTOOL_GSTATS
3835		case ETH_SS_STATS:
3836			egstr.len = ETH_NUM_STATS;
3837			if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3838				return -EFAULT;
3839			if (mm_copy_to_user(useraddr + sizeof(egstr),
3840				bcm5700_stats_str_arr,
3841				sizeof(bcm5700_stats_str_arr)))
3842				return -EFAULT;
3843			return 0;
3844#endif
3845#ifdef ETHTOOL_TEST
3846		case ETH_SS_TEST:
3847			egstr.len = ETH_NUM_TESTS;
3848			if (mm_copy_to_user(useraddr, &egstr, sizeof(egstr)))
3849				return -EFAULT;
3850			if (mm_copy_to_user(useraddr + sizeof(egstr),
3851				bcm5700_tests_str_arr,
3852				sizeof(bcm5700_tests_str_arr)))
3853				return -EFAULT;
3854			return 0;
3855#endif
3856		default:
3857			return -EOPNOTSUPP;
3858		}
3859		}
3860#endif
3861#ifdef ETHTOOL_GSTATS
3862	case ETHTOOL_GSTATS: {
3863		struct ethtool_stats estats = { ETHTOOL_GSTATS };
3864		uint64_t stats[ETH_NUM_STATS];
3865		int i;
3866		uint64_t *pStats =
3867			(uint64_t *) pDevice->pStatsBlkVirt;
3868
3869		estats.n_stats = ETH_NUM_STATS;
3870		if (pStats == 0) {
3871			memset(stats, 0, sizeof(stats));
3872		}
3873		else {
3874
3875			for (i = 0; i < ETH_NUM_STATS; i++) {
3876				if (bcm5700_stats_offset_arr[i] != 0) {
3877					stats[i] = SWAP_DWORD_64(*(pStats +
3878						bcm5700_stats_offset_arr[i]));
3879				}
3880				else if (i == RX_CRC_IDX) {
3881					stats[i] =
3882						bcm5700_crc_count(pUmDevice);
3883				}
3884				else if (i == RX_MAC_ERR_IDX) {
3885					stats[i] =
3886						bcm5700_rx_err_count(pUmDevice);
3887				}
3888			}
3889		}
3890		if (mm_copy_to_user(useraddr, &estats, sizeof(estats))) {
3891			return -EFAULT;
3892		}
3893		if (mm_copy_to_user(useraddr + sizeof(estats), &stats,
3894			sizeof(stats))) {
3895			return -EFAULT;
3896		}
3897		return 0;
3898	}
3899#endif
3900#ifdef ETHTOOL_TEST
3901	case ETHTOOL_TEST: {
3902		struct ethtool_test etest;
3903		uint64_t tests[ETH_NUM_TESTS] = {0, 0, 0, 0, 0, 0};
3904		LM_POWER_STATE old_power_level;
3905
3906		printk( KERN_ALERT "Performing ethtool test.\n"
3907		                   "This test will take a few seconds to complete.\n" );
3908
3909		if (mm_copy_from_user(&etest, useraddr, sizeof(etest)))
3910			return -EFAULT;
3911
3912		etest.len = ETH_NUM_TESTS;
3913		old_power_level = pDevice->PowerLevel;
3914		if (old_power_level != LM_POWER_STATE_D0) {
3915			LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
3916			LM_SwitchClocks(pDevice);
3917		}
3918		MM_Sleep(pDevice, 1000);
3919		if (etest.flags & ETH_TEST_FL_OFFLINE) {
3920			b57_suspend_chip(pUmDevice);
3921			MM_Sleep(pDevice, 1000);
3922			LM_HaltCpu(pDevice,T3_RX_CPU_ID | T3_TX_CPU_ID);
3923			MM_Sleep(pDevice, 1000);
3924			if (b57_test_registers(pUmDevice) == 0) {
3925				etest.flags |= ETH_TEST_FL_FAILED;
3926				tests[0] = 1;
3927			}
3928			MM_Sleep(pDevice, 1000);
3929			if (b57_test_memory(pUmDevice) == 0) {
3930				etest.flags |= ETH_TEST_FL_FAILED;
3931				tests[1] = 1;
3932			}
3933			MM_Sleep(pDevice, 1000);
3934			if (b57_test_loopback(pUmDevice, NICE_LOOPBACK_TESTTYPE_MAC, 0) == 0) {
3935				etest.flags |= ETH_TEST_FL_FAILED;
3936				tests[2] = 1;
3937			}
3938			MM_Sleep(pDevice, 1000);
3939			b57_resume_chip(pUmDevice);
3940			/* wait for link to come up for the link test */
3941			MM_Sleep(pDevice, 4000);
3942			if ((pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE) &&
3943				!(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
3944
3945				/* wait a little longer for linkup on copper */
3946				MM_Sleep(pDevice, 3000);
3947			}
3948		}
3949		if (b57_test_nvram(pUmDevice) == 0) {
3950			etest.flags |= ETH_TEST_FL_FAILED;
3951			tests[3] = 1;
3952		}
3953		MM_Sleep(pDevice, 1000);
3954		if (b57_test_intr(pUmDevice) == 0) {
3955			etest.flags |= ETH_TEST_FL_FAILED;
3956			tests[4] = 1;
3957		}
3958		MM_Sleep(pDevice, 1000);
3959		if (b57_test_link(pUmDevice) == 0) {
3960			etest.flags |= ETH_TEST_FL_FAILED;
3961			tests[5] = 1;
3962		}
3963		MM_Sleep(pDevice, 1000);
3964		if (old_power_level != LM_POWER_STATE_D0) {
3965			LM_SetPowerState(pDevice, old_power_level);
3966		}
3967		if (mm_copy_to_user(useraddr, &etest, sizeof(etest))) {
3968			return -EFAULT;
3969		}
3970		if (mm_copy_to_user(useraddr + sizeof(etest), tests,
3971			sizeof(tests))) {
3972			return -EFAULT;
3973		}
3974		return 0;
3975	}
3976#endif
3977#ifdef ETHTOOL_GTSO
3978	case ETHTOOL_GTSO: {
3979		struct ethtool_value edata = { ETHTOOL_GTSO };
3980
3981#ifdef BCM_TSO
3982		edata.data =
3983			(dev->features & NETIF_F_TSO) != 0;
3984#else
3985		edata.data = 0;
3986#endif
3987		if (mm_copy_to_user(useraddr, &edata, sizeof(edata)))
3988			return -EFAULT;
3989		return 0;
3990	}
3991#endif
3992#ifdef ETHTOOL_STSO
3993	case ETHTOOL_STSO: {
3994#ifdef BCM_TSO
3995		struct ethtool_value edata;
3996
3997		if (!capable(CAP_NET_ADMIN))
3998			return -EPERM;
3999
4000		if (mm_copy_from_user(&edata, useraddr, sizeof(edata)))
4001			return -EFAULT;
4002
4003		if (!(pDevice->TaskToOffload &
4004			LM_TASK_OFFLOAD_TCP_SEGMENTATION)) {
4005			return -EINVAL;
4006		}
4007
4008		dev->features &= ~NETIF_F_TSO;
4009
4010		if (edata.data) {
4011			if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4012			   (dev->mtu > 1500)) {
4013				printk(KERN_ALERT "%s: Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4014			return -EINVAL;
4015			} else {
4016				dev->features |= NETIF_F_TSO;
4017			}
4018	        }
4019		return 0;
4020#else
4021		return -EINVAL;
4022#endif
4023	}
4024#endif
4025	}
4026
4027	return -EOPNOTSUPP;
4028}
4029#endif /* #ifdef SIOCETHTOOL */
4030
4031#if (LINUX_VERSION_CODE >= 0x20400) && (LINUX_VERSION_CODE < 0x20600)
4032#include <linux/iobuf.h>
4033#endif
4034
4035/* Provide ioctl() calls to examine the MII xcvr state. */
4036STATIC int bcm5700_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4037{
4038	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4039	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4040	u16 *data = (u16 *)&rq->ifr_data;
4041	u32 value;
4042	unsigned long flags;
4043
4044	switch(cmd) {
4045#ifdef SIOCGMIIPHY
4046	case SIOCGMIIPHY:
4047#endif
4048	case SIOCDEVPRIVATE:			/* Get the address of the PHY in use. */
4049		data[0] = pDevice->PhyAddr;
4050		return 0;
4051
4052#ifdef SIOCGMIIREG
4053	case SIOCGMIIREG:
4054#endif
4055	case SIOCDEVPRIVATE + 1:		/* Read the specified MII register. */
4056	{
4057		uint32 savephyaddr = 0;
4058
4059		if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4060			return -EOPNOTSUPP;
4061
4062		/* ifup only waits for 5 seconds for link up */
4063		/* NIC may take more than 5 seconds to establish link */
4064		if ((pUmDevice->delayed_link_ind > 0) &&
4065			delay_link[pUmDevice->index]) {
4066			return -EOPNOTSUPP;
4067		}
4068
4069		BCM5700_PHY_LOCK(pUmDevice, flags);
4070		if (data[0] != 0xffff) {
4071			savephyaddr = pDevice->PhyAddr;
4072			pDevice->PhyAddr = data[0];
4073		}
4074		LM_ReadPhy(pDevice, data[1] & 0x1f, (LM_UINT32 *)&value);
4075		if (data[0] != 0xffff)
4076			pDevice->PhyAddr = savephyaddr;
4077		BCM5700_PHY_UNLOCK(pUmDevice, flags);
4078		data[3] = value & 0xffff;
4079		return 0;
4080	}
4081
4082#ifdef SIOCSMIIREG
4083	case SIOCSMIIREG:
4084#endif
4085	case SIOCDEVPRIVATE + 2:		/* Write the specified MII register */
4086	{
4087		uint32 savephyaddr = 0;
4088
4089		if (!capable(CAP_NET_ADMIN))
4090			return -EPERM;
4091
4092		if (pDevice->TbiFlags & ENABLE_TBI_FLAG)
4093			return -EOPNOTSUPP;
4094
4095		BCM5700_PHY_LOCK(pUmDevice, flags);
4096		if (data[0] != 0xffff) {
4097			savephyaddr = pDevice->PhyAddr;
4098			pDevice->PhyAddr = data[0];
4099		}
4100		LM_WritePhy(pDevice, data[1] & 0x1f, data[2]);
4101		if (data[0] != 0xffff)
4102			pDevice->PhyAddr = savephyaddr;
4103		BCM5700_PHY_UNLOCK(pUmDevice, flags);
4104		data[3] = 0;
4105		return 0;
4106	}
4107
4108	case SIOCDEVPRIVATE + 3:		/* Read the specified ROBO register. */
4109	{
4110		robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4111
4112		if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) || (robo == NULL))
4113			return -ENXIO;
4114
4115		if (robo->ops->read_reg(robo, data[0] & 0xff, data[1] & 0xff, &value, 2))
4116			return -EIO;
4117
4118		data[3] = value & 0xffff;
4119		return 0;
4120	}
4121
4122	case SIOCDEVPRIVATE + 4:		/* Write the specified ROBO register. */
4123	{
4124		robo_info_t *robo = (robo_info_t *)pUmDevice->robo;
4125
4126		if (!capable(CAP_NET_ADMIN))
4127			return -EPERM;
4128
4129		if (((pDevice->Flags & ROBO_SWITCH_FLAG) == 0) || (robo == NULL))
4130			return -ENXIO;
4131
4132		value = data[2];
4133		if (robo->ops->write_reg(robo, data[0] & 0xff, data[1] & 0xff, &value, 2))
4134			return -EIO;
4135
4136		data[3] = 0;
4137		return 0;
4138	}
4139
4140#ifdef NICE_SUPPORT
4141	case SIOCNICE:
4142	{
4143		struct nice_req* nrq;
4144
4145		if (!capable(CAP_NET_ADMIN))
4146			return -EPERM;
4147
4148		nrq = (struct nice_req*)&rq->ifr_ifru;
4149		if( nrq->cmd == NICE_CMD_QUERY_SUPPORT ) {
4150			nrq->nrq_magic = NICE_DEVICE_MAGIC;
4151			nrq->nrq_support_rx = 1;
4152			nrq->nrq_support_vlan = 1;
4153			nrq->nrq_support_get_speed = 1;
4154#ifdef BCM_NAPI_RXPOLL
4155			nrq->nrq_support_rx_napi = 1;
4156#endif
4157			return 0;
4158		}
4159#ifdef BCM_NAPI_RXPOLL
4160		else if( nrq->cmd == NICE_CMD_SET_RX_NAPI )
4161#else
4162		else if( nrq->cmd == NICE_CMD_SET_RX )
4163#endif
4164		{
4165			pUmDevice->nice_rx = nrq->nrq_rx;
4166			pUmDevice->nice_ctx = nrq->nrq_ctx;
4167			bcm5700_set_vlan_mode(pUmDevice);
4168			return 0;
4169		}
4170#ifdef BCM_NAPI_RXPOLL
4171		else if( nrq->cmd == NICE_CMD_GET_RX_NAPI )
4172#else
4173		else if( nrq->cmd == NICE_CMD_GET_RX )
4174#endif
4175		{
4176			nrq->nrq_rx = pUmDevice->nice_rx;
4177			nrq->nrq_ctx = pUmDevice->nice_ctx;
4178			return 0;
4179		}
4180		else if( nrq->cmd == NICE_CMD_GET_SPEED ) {
4181			if(pDevice->LinkStatus != LM_STATUS_LINK_ACTIVE){
4182				nrq->nrq_speed = 0;
4183			}
4184			else if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS) {
4185				nrq->nrq_speed = SPEED_1000;
4186			} else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS) {
4187				nrq->nrq_speed = SPEED_100;
4188			} else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS) {
4189				nrq->nrq_speed = SPEED_100;
4190			} else {
4191				nrq->nrq_speed = 0;
4192			}
4193			return 0;
4194		}
4195		else {
4196			if (!pUmDevice->opened)
4197				return -EINVAL;
4198
4199			switch (nrq->cmd) {
4200			case NICE_CMD_BLINK_LED:
4201				if (LM_BlinkLED(pDevice, nrq->nrq_blink_time) ==
4202					LM_STATUS_SUCCESS) {
4203					return 0;
4204				}
4205				return -EINTR;
4206
4207			case NICE_CMD_DIAG_SUSPEND:
4208				b57_suspend_chip(pUmDevice);
4209				return 0;
4210
4211			case NICE_CMD_DIAG_RESUME:
4212				b57_resume_chip(pUmDevice);
4213				return 0;
4214
4215			case NICE_CMD_REG_READ:
4216				if (nrq->nrq_offset >= 0x10000) {
4217					nrq->nrq_data = LM_RegRdInd(pDevice,
4218						nrq->nrq_offset);
4219				}
4220				else {
4221					nrq->nrq_data = LM_RegRd(pDevice,
4222						nrq->nrq_offset);
4223				}
4224				return 0;
4225
4226			case NICE_CMD_REG_WRITE:
4227				if (nrq->nrq_offset >= 0x10000) {
4228					LM_RegWrInd(pDevice, nrq->nrq_offset,
4229						nrq->nrq_data);
4230				}
4231				else {
4232					LM_RegWr(pDevice, nrq->nrq_offset,
4233						nrq->nrq_data, FALSE);
4234				}
4235				return 0;
4236
4237			case NICE_CMD_REG_READ_DIRECT:
4238			case NICE_CMD_REG_WRITE_DIRECT:
4239				if ((nrq->nrq_offset >= 0x10000) ||
4240					(pDevice->Flags & UNDI_FIX_FLAG)) {
4241					return -EINVAL;
4242				}
4243
4244				if (nrq->cmd == NICE_CMD_REG_READ_DIRECT) {
4245					nrq->nrq_data = REG_RD_OFFSET(pDevice,
4246						nrq->nrq_offset);
4247				}
4248				else {
4249					REG_WR_OFFSET(pDevice, nrq->nrq_offset,
4250							nrq->nrq_data);
4251				}
4252				return 0;
4253
4254			case NICE_CMD_MEM_READ:
4255				nrq->nrq_data = LM_MemRdInd(pDevice,
4256					nrq->nrq_offset);
4257				return 0;
4258
4259			case NICE_CMD_MEM_WRITE:
4260				LM_MemWrInd(pDevice, nrq->nrq_offset,
4261					nrq->nrq_data);
4262				return 0;
4263
4264			case NICE_CMD_CFG_READ32:
4265				pci_read_config_dword(pUmDevice->pdev,
4266					nrq->nrq_offset, (u32 *)&nrq->nrq_data);
4267				return 0;
4268
4269			case NICE_CMD_CFG_READ16:
4270				pci_read_config_word(pUmDevice->pdev,
4271					nrq->nrq_offset, (u16 *)&nrq->nrq_data);
4272				return 0;
4273
4274			case NICE_CMD_CFG_READ8:
4275				pci_read_config_byte(pUmDevice->pdev,
4276					nrq->nrq_offset, (u8 *)&nrq->nrq_data);
4277				return 0;
4278
4279			case NICE_CMD_CFG_WRITE32:
4280				pci_write_config_dword(pUmDevice->pdev,
4281					nrq->nrq_offset, (u32)nrq->nrq_data);
4282				return 0;
4283
4284			case NICE_CMD_CFG_WRITE16:
4285				pci_write_config_word(pUmDevice->pdev,
4286					nrq->nrq_offset, (u16)nrq->nrq_data);
4287				return 0;
4288
4289			case NICE_CMD_CFG_WRITE8:
4290				pci_write_config_byte(pUmDevice->pdev,
4291					nrq->nrq_offset, (u8)nrq->nrq_data);
4292				return 0;
4293
4294			case NICE_CMD_RESET:
4295				bcm5700_reset(dev);
4296				return 0;
4297
4298			case NICE_CMD_ENABLE_MAC_LOOPBACK:
4299				if (pDevice->LoopBackMode != 0) {
4300					return -EINVAL;
4301				}
4302
4303				BCM5700_PHY_LOCK(pUmDevice, flags);
4304				LM_EnableMacLoopBack(pDevice);
4305				BCM5700_PHY_UNLOCK(pUmDevice, flags);
4306				return 0;
4307
4308			case NICE_CMD_DISABLE_MAC_LOOPBACK:
4309				if (pDevice->LoopBackMode !=
4310					LM_MAC_LOOP_BACK_MODE) {
4311					return -EINVAL;
4312				}
4313
4314				BCM5700_PHY_LOCK(pUmDevice, flags);
4315				LM_DisableMacLoopBack(pDevice);
4316				BCM5700_PHY_UNLOCK(pUmDevice, flags);
4317				return 0;
4318
4319			case NICE_CMD_ENABLE_PHY_LOOPBACK:
4320				if (pDevice->LoopBackMode != 0) {
4321					return -EINVAL;
4322				}
4323
4324				BCM5700_PHY_LOCK(pUmDevice, flags);
4325				LM_EnablePhyLoopBack(pDevice);
4326				BCM5700_PHY_UNLOCK(pUmDevice, flags);
4327				return 0;
4328
4329			case NICE_CMD_DISABLE_PHY_LOOPBACK:
4330				if (pDevice->LoopBackMode !=
4331					LM_PHY_LOOP_BACK_MODE) {
4332					return -EINVAL;
4333				}
4334
4335				BCM5700_PHY_LOCK(pUmDevice, flags);
4336				LM_DisablePhyLoopBack(pDevice);
4337				BCM5700_PHY_UNLOCK(pUmDevice, flags);
4338				return 0;
4339
4340			case NICE_CMD_ENABLE_EXT_LOOPBACK:
4341				if (pDevice->LoopBackMode != 0) {
4342					return -EINVAL;
4343				}
4344
4345				if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
4346					if (nrq->nrq_speed != 1000)
4347						return -EINVAL;
4348				}
4349				else {
4350					if ((nrq->nrq_speed != 1000) &&
4351						(nrq->nrq_speed != 100) &&
4352						(nrq->nrq_speed != 10)) {
4353						return -EINVAL;
4354					}
4355				}
4356				BCM5700_PHY_LOCK(pUmDevice, flags);
4357				LM_EnableExtLoopBack(pDevice, nrq->nrq_speed);
4358				BCM5700_PHY_UNLOCK(pUmDevice, flags);
4359				return 0;
4360
4361			case NICE_CMD_DISABLE_EXT_LOOPBACK:
4362				if (pDevice->LoopBackMode !=
4363					LM_EXT_LOOP_BACK_MODE) {
4364					return -EINVAL;
4365				}
4366
4367				BCM5700_PHY_LOCK(pUmDevice, flags);
4368				LM_DisableExtLoopBack(pDevice);
4369				BCM5700_PHY_UNLOCK(pUmDevice, flags);
4370				return 0;
4371
4372			case NICE_CMD_INTERRUPT_TEST:
4373				nrq->nrq_intr_test_result =
4374					b57_test_intr(pUmDevice);
4375				return 0;
4376
4377			case NICE_CMD_LOOPBACK_TEST:
4378				value = 0;
4379				switch (nrq->nrq_looptype) {
4380					case NICE_LOOPBACK_TESTTYPE_EXT:
4381						if ((nrq->nrq_loopspeed & ~NICE_LOOPBACK_TEST_SPEEDMASK) ||
4382						   !(nrq->nrq_loopspeed & NICE_LOOPBACK_TEST_SPEEDMASK))
4383							break;
4384						switch (nrq->nrq_loopspeed) {
4385							case NICE_LOOPBACK_TEST_10MBPS:
4386								value = LM_LINE_SPEED_10MBPS;
4387								break;
4388							case NICE_LOOPBACK_TEST_100MBPS:
4389								value = LM_LINE_SPEED_100MBPS;
4390								break;
4391							case NICE_LOOPBACK_TEST_1000MBPS:
4392								value = LM_LINE_SPEED_1000MBPS;
4393								break;
4394						}
4395						/* Fall through */
4396
4397					case NICE_LOOPBACK_TESTTYPE_MAC:
4398					case NICE_LOOPBACK_TESTTYPE_PHY:
4399						b57_suspend_chip(pUmDevice);
4400						value = b57_test_loopback(pUmDevice,
4401						                          nrq->nrq_looptype, value);
4402						b57_resume_chip(pUmDevice);
4403						break;
4404				}
4405
4406				if (value == 1) {
4407					/* A '1' indicates success */
4408					value = 0;
4409				} else {
4410					value = -EINTR;
4411				}
4412
4413				return value;
4414
4415			case NICE_CMD_KMALLOC_PHYS: {
4416#if (LINUX_VERSION_CODE >= 0x020400)
4417                                dma_addr_t mapping;
4418                                __u64 cpu_pa;
4419                                void *ptr;
4420                                int i;
4421                                struct page *pg, *last_pg;
4422
4423                                for (i = 0; i < MAX_MEM2; i++) {
4424                                        if (pUmDevice->mem_size_list2[i] == 0)
4425                                                break;
4426                                }
4427                                if (i >= MAX_MEM2)
4428                                        return -EFAULT;
4429                                ptr = pci_alloc_consistent(pUmDevice->pdev,
4430                                        nrq->nrq_size, &mapping);
4431                                if (!ptr) {
4432                                        return -EFAULT;
4433                                }
4434                                pUmDevice->mem_size_list2[i] = nrq->nrq_size;
4435                                pUmDevice->mem_list2[i] = ptr;
4436                                pUmDevice->dma_list2[i] = mapping;
4437
4438                                /* put pci mapping at the beginning of buffer */
4439                                *((__u64 *) ptr) = (__u64) mapping;
4440
4441                                /* Probably won't work on some architectures */
4442                                /* get CPU mapping */
4443                                cpu_pa = (__u64) virt_to_phys(ptr);
4444                                pUmDevice->cpu_pa_list2[i] = cpu_pa;
4445                                nrq->nrq_phys_addr_lo = (__u32) cpu_pa;
4446                                nrq->nrq_phys_addr_hi = (__u32) (cpu_pa >> 32);
4447
4448                                pg = virt_to_page(ptr);
4449                                last_pg = virt_to_page(ptr + nrq->nrq_size - 1);
4450                                for (; ; pg++) {
4451#if (LINUX_VERSION_CODE > 0x020500)
4452                                        SetPageReserved(pg);
4453#else
4454                                        mem_map_reserve(pg);
4455#endif
4456                                        if (pg == last_pg)
4457                                                break;
4458                                }
4459                                return 0;
4460#else
4461                                return -EOPNOTSUPP;
4462#endif
4463			}
4464
4465			case NICE_CMD_KFREE_PHYS: {
4466                                int i;
4467                                __u64 cpu_pa;
4468
4469                                cpu_pa = (__u64) nrq->nrq_phys_addr_lo +
4470                                        ((__u64) nrq->nrq_phys_addr_hi << 32);
4471                                for (i = 0; i < MAX_MEM2; i++) {
4472                                        if (pUmDevice->cpu_pa_list2[i] ==
4473                                                cpu_pa)
4474                                        {
4475                                                break;
4476                                        }
4477                                }
4478                                if (i >= MAX_MEM2)
4479                                        return -EFAULT;
4480
4481                                bcm5700_freemem2(pUmDevice, i);
4482                                return 0;
4483			}
4484
4485			case NICE_CMD_SET_WRITE_PROTECT:
4486				if (nrq->nrq_write_protect)
4487					pDevice->Flags |= EEPROM_WP_FLAG;
4488				else
4489					pDevice->Flags &= ~EEPROM_WP_FLAG;
4490				return 0;
4491			case NICE_CMD_GET_STATS_BLOCK: {
4492				PT3_STATS_BLOCK pStats =
4493					(PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4494				if (mm_copy_to_user(nrq->nrq_stats_useraddr,
4495					pStats, nrq->nrq_stats_size)) {
4496					return -EFAULT;
4497				}
4498				return 0;
4499			}
4500			case NICE_CMD_CLR_STATS_BLOCK: {
4501				int j;
4502				PT3_STATS_BLOCK pStats =
4503					(PT3_STATS_BLOCK)pDevice->pStatsBlkVirt;
4504
4505				memset(pStats, 0, sizeof(T3_STATS_BLOCK));
4506				if (T3_ASIC_REV(pDevice->ChipRevId) ==
4507					T3_ASIC_REV_5705) {
4508					return 0;
4509				}
4510				for(j = 0x0300; j < 0x0b00; j = j + 4) {
4511					MEM_WR_OFFSET(pDevice, j, 0);
4512				}
4513
4514				return 0;
4515			}
4516
4517			}
4518		}
4519		return -EOPNOTSUPP;
4520	}
4521#endif /* NICE_SUPPORT */
4522#ifdef SIOCETHTOOL
4523	case SIOCETHTOOL:
4524		return netdev_ethtool_ioctl(dev, (void *) rq->ifr_data);
4525#endif
4526	default:
4527		return -EOPNOTSUPP;
4528	}
4529	return -EOPNOTSUPP;
4530}
4531
4532STATIC void bcm5700_do_rx_mode(struct net_device *dev)
4533{
4534	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4535	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4536	int i;
4537	struct dev_mc_list *mclist;
4538
4539	LM_MulticastClear(pDevice);
4540	for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4541			 i++, mclist = mclist->next) {
4542		LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4543	}
4544	if (dev->flags & IFF_ALLMULTI) {
4545		if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4546			LM_SetReceiveMask(pDevice,
4547				pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4548		}
4549	}
4550	else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4551		LM_SetReceiveMask(pDevice,
4552			pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4553	}
4554	if (dev->flags & IFF_PROMISC) {
4555		if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4556			LM_SetReceiveMask(pDevice,
4557				pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4558		}
4559	}
4560	else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4561		LM_SetReceiveMask(pDevice,
4562			pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4563	}
4564
4565}
4566
4567STATIC void bcm5700_set_rx_mode(struct net_device *dev)
4568{
4569	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4570	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
4571	int i;
4572	struct dev_mc_list *mclist;
4573	unsigned long flags;
4574
4575	BCM5700_PHY_LOCK(pUmDevice, flags);
4576
4577	LM_MulticastClear(pDevice);
4578	for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
4579			 i++, mclist = mclist->next) {
4580		LM_MulticastAdd(pDevice, (PLM_UINT8) &mclist->dmi_addr);
4581	}
4582	if (dev->flags & IFF_ALLMULTI) {
4583		if (!(pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST)) {
4584			LM_SetReceiveMask(pDevice,
4585				pDevice->ReceiveMask | LM_ACCEPT_ALL_MULTICAST);
4586		}
4587	}
4588	else if (pDevice->ReceiveMask & LM_ACCEPT_ALL_MULTICAST) {
4589		LM_SetReceiveMask(pDevice,
4590			pDevice->ReceiveMask & ~LM_ACCEPT_ALL_MULTICAST);
4591	}
4592	if (dev->flags & IFF_PROMISC) {
4593		if (!(pDevice->ReceiveMask & LM_PROMISCUOUS_MODE)) {
4594			LM_SetReceiveMask(pDevice,
4595				pDevice->ReceiveMask | LM_PROMISCUOUS_MODE);
4596		}
4597	}
4598	else if (pDevice->ReceiveMask & LM_PROMISCUOUS_MODE) {
4599		LM_SetReceiveMask(pDevice,
4600			pDevice->ReceiveMask & ~LM_PROMISCUOUS_MODE);
4601	}
4602
4603	BCM5700_PHY_UNLOCK(pUmDevice, flags);
4604}
4605
4606/*
4607 * Set the hardware MAC address.
4608 */
4609STATIC int bcm5700_set_mac_addr(struct net_device *dev, void *p)
4610{
4611	struct sockaddr *addr=p;
4612	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) dev->priv;
4613	UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4614
4615	if(is_valid_ether_addr(addr->sa_data)){
4616
4617	    memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
4618	    if (pUmDevice->opened)
4619	        LM_SetMacAddress(pDevice, dev->dev_addr);
4620            return 0;
4621        }
4622	return -EINVAL;
4623}
4624
4625#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
4626STATIC int bcm5700_change_mtu(struct net_device *dev, int new_mtu)
4627{
4628	int pkt_size = new_mtu + ETHERNET_PACKET_HEADER_SIZE;
4629	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK)dev->priv;
4630	PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4631	unsigned long flags;
4632	int reinit = 0;
4633
4634	if ((pkt_size < MIN_ETHERNET_PACKET_SIZE_NO_CRC) ||
4635		(pkt_size > MAX_ETHERNET_JUMBO_PACKET_SIZE_NO_CRC)) {
4636
4637		return -EINVAL;
4638	}
4639	if ( !(pDevice->Flags & JUMBO_CAPABLE_FLAG)    &&
4640		(pkt_size > MAX_ETHERNET_PACKET_SIZE_NO_CRC) ) {
4641
4642		return -EINVAL;
4643	}
4644	if (pUmDevice->suspended)
4645		return -EAGAIN;
4646
4647	if (pUmDevice->opened && (new_mtu != dev->mtu) &&
4648		(pDevice->Flags & JUMBO_CAPABLE_FLAG)) {
4649		reinit = 1;
4650	}
4651
4652	BCM5700_PHY_LOCK(pUmDevice, flags);
4653	if (reinit) {
4654		netif_stop_queue(dev);
4655		bcm5700_shutdown(pUmDevice);
4656		bcm5700_freemem(dev);
4657	}
4658
4659	dev->mtu = new_mtu;
4660	if (pkt_size < MAX_ETHERNET_PACKET_SIZE_NO_CRC) {
4661		pDevice->RxMtu = pDevice->TxMtu =
4662			MAX_ETHERNET_PACKET_SIZE_NO_CRC;
4663	}
4664	else {
4665		pDevice->RxMtu = pDevice->TxMtu = pkt_size;
4666	}
4667
4668	if (dev->mtu <= 1514)  {
4669		pDevice->RxJumboDescCnt = 0;
4670	}
4671	else if (pDevice->Flags & JUMBO_CAPABLE_FLAG){
4672		pDevice->RxJumboDescCnt =
4673			rx_jumbo_desc_cnt[pUmDevice->index];
4674	}
4675	pDevice->RxPacketDescCnt = pDevice->RxJumboDescCnt +
4676		pDevice->RxStdDescCnt;
4677
4678	pDevice->RxJumboBufferSize = (pDevice->RxMtu + 8 /* CRC + VLAN */ +
4679		COMMON_CACHE_LINE_SIZE-1) & ~COMMON_CACHE_LINE_MASK;
4680
4681#ifdef BCM_TSO
4682	if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
4683	   (dev->mtu > 1514) ) {
4684		if (dev->features & NETIF_F_TSO) {
4685			dev->features &= ~NETIF_F_TSO;
4686			printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
4687		}
4688	}
4689#endif
4690
4691	if (reinit) {
4692		LM_InitializeAdapter(pDevice);
4693		bcm5700_do_rx_mode(dev);
4694		bcm5700_set_vlan_mode(pUmDevice);
4695		bcm5700_init_counters(pUmDevice);
4696		if (memcmp(dev->dev_addr, pDevice->NodeAddress, 6)) {
4697			LM_SetMacAddress(pDevice, dev->dev_addr);
4698		}
4699		netif_start_queue(dev);
4700		bcm5700_intr_on(pUmDevice);
4701	}
4702	BCM5700_PHY_UNLOCK(pUmDevice, flags);
4703
4704	return 0;
4705}
4706#endif
4707
4708
4709#if (LINUX_VERSION_CODE < 0x020300)
4710int
4711bcm5700_probe(struct net_device *dev)
4712{
4713	int cards_found = 0;
4714	struct pci_dev *pdev = NULL;
4715	struct pci_device_id *pci_tbl;
4716	u16 ssvid, ssid;
4717
4718	if ( ! pci_present())
4719		return -ENODEV;
4720
4721	pci_tbl = bcm5700_pci_tbl;
4722	while ((pdev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pdev))) {
4723		int idx;
4724
4725		pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &ssvid);
4726		pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &ssid);
4727		for (idx = 0; pci_tbl[idx].vendor; idx++) {
4728			if ((pci_tbl[idx].vendor == PCI_ANY_ID ||
4729				pci_tbl[idx].vendor == pdev->vendor) &&
4730				(pci_tbl[idx].device == PCI_ANY_ID ||
4731				pci_tbl[idx].device == pdev->device) &&
4732				(pci_tbl[idx].subvendor == PCI_ANY_ID ||
4733				pci_tbl[idx].subvendor == ssvid) &&
4734				(pci_tbl[idx].subdevice == PCI_ANY_ID ||
4735				pci_tbl[idx].subdevice == ssid))
4736			{
4737
4738				break;
4739			}
4740		}
4741		if (pci_tbl[idx].vendor == 0)
4742			continue;
4743
4744
4745		if (bcm5700_init_one(pdev, &pci_tbl[idx]) == 0)
4746			cards_found++;
4747	}
4748
4749	return cards_found ? 0 : -ENODEV;
4750}
4751
4752#ifdef MODULE
4753int init_module(void)
4754{
4755	return bcm5700_probe(NULL);
4756}
4757
4758void cleanup_module(void)
4759{
4760	struct net_device *next_dev;
4761	PUM_DEVICE_BLOCK pUmDevice;
4762
4763#ifdef BCM_PROC_FS
4764	bcm5700_proc_remove_notifier();
4765#endif
4766	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
4767	while (root_tigon3_dev) {
4768		pUmDevice = (PUM_DEVICE_BLOCK)root_tigon3_dev->priv;
4769#ifdef BCM_PROC_FS
4770		bcm5700_proc_remove_dev(root_tigon3_dev);
4771#endif
4772		next_dev = pUmDevice->next_module;
4773		unregister_netdev(root_tigon3_dev);
4774		if (pUmDevice->lm_dev.pMappedMemBase)
4775			iounmap(pUmDevice->lm_dev.pMappedMemBase);
4776#if (LINUX_VERSION_CODE < 0x020600)
4777		kfree(root_tigon3_dev);
4778#else
4779		free_netdev(root_tigon3_dev);
4780#endif
4781		root_tigon3_dev = next_dev;
4782	}
4783#ifdef BCM_IOCTL32
4784	unregister_ioctl32_conversion(SIOCNICE);
4785#endif
4786}
4787
4788#endif  /* MODULE */
4789#else	/* LINUX_VERSION_CODE < 0x020300 */
4790
4791#if (LINUX_VERSION_CODE >= 0x020406)
4792static int bcm5700_suspend (struct pci_dev *pdev, u32 state)
4793#else
4794static void bcm5700_suspend (struct pci_dev *pdev)
4795#endif
4796{
4797	struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4798	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4799	PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4800
4801	if (!netif_running(dev))
4802#if (LINUX_VERSION_CODE >= 0x020406)
4803		return 0;
4804#else
4805		return;
4806#endif
4807
4808	netif_device_detach (dev);
4809	bcm5700_shutdown(pUmDevice);
4810
4811	LM_SetPowerState(pDevice, LM_POWER_STATE_D3);
4812
4813/*	pci_power_off(pdev, -1);*/
4814#if (LINUX_VERSION_CODE >= 0x020406)
4815	return 0;
4816#endif
4817}
4818
4819
4820#if (LINUX_VERSION_CODE >= 0x020406)
4821static int bcm5700_resume(struct pci_dev *pdev)
4822#else
4823static void bcm5700_resume(struct pci_dev *pdev)
4824#endif
4825{
4826	struct net_device *dev = (struct net_device *) pci_get_drvdata(pdev);
4827	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) dev->priv;
4828	PLM_DEVICE_BLOCK pDevice = &pUmDevice->lm_dev;
4829
4830	if (!netif_running(dev))
4831#if (LINUX_VERSION_CODE >= 0x020406)
4832		return 0;
4833#else
4834		return;
4835#endif
4836/*	pci_power_on(pdev);*/
4837	netif_device_attach(dev);
4838	LM_SetPowerState(pDevice, LM_POWER_STATE_D0);
4839	MM_InitializeUmPackets(pDevice);
4840	bcm5700_reset(dev);
4841#if (LINUX_VERSION_CODE >= 0x020406)
4842	return 0;
4843#endif
4844}
4845
4846
4847static struct pci_driver bcm5700_pci_driver = {
4848	name:		bcm5700_driver,
4849	id_table:	bcm5700_pci_tbl,
4850	probe:		bcm5700_init_one,
4851	remove:		__devexit_p(bcm5700_remove_one),
4852	suspend:	bcm5700_suspend,
4853	resume:		bcm5700_resume,
4854};
4855
4856
4857static int __init bcm5700_init_module (void)
4858{
4859	if (msglevel != 0xdeadbeef) {
4860		b57_msg_level = msglevel;
4861		printf("%s: msglevel set to 0x%x\n", __FUNCTION__, b57_msg_level);
4862	} else
4863		b57_msg_level = B57_ERR_VAL;
4864
4865	return pci_module_init(&bcm5700_pci_driver);
4866}
4867
4868
4869static void __exit bcm5700_cleanup_module (void)
4870{
4871#ifdef BCM_PROC_FS
4872	bcm5700_proc_remove_notifier();
4873#endif
4874	pci_unregister_driver(&bcm5700_pci_driver);
4875}
4876
4877
4878module_init(bcm5700_init_module);
4879module_exit(bcm5700_cleanup_module);
4880#endif
4881
4882/*
4883 * Middle Module
4884 *
4885 */
4886
4887
4888#ifdef BCM_NAPI_RXPOLL
4889LM_STATUS
4890MM_ScheduleRxPoll(LM_DEVICE_BLOCK *pDevice)
4891{
4892	struct net_device *dev = ((UM_DEVICE_BLOCK *) pDevice)->dev;
4893
4894	if (netif_rx_schedule_prep(dev)) {
4895		__netif_rx_schedule(dev);
4896		return LM_STATUS_SUCCESS;
4897	}
4898	return LM_STATUS_FAILURE;
4899}
4900#endif
4901
4902LM_STATUS
4903MM_ReadConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4904	LM_UINT16 *pValue16)
4905{
4906	UM_DEVICE_BLOCK *pUmDevice;
4907
4908	pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4909	pci_read_config_word(pUmDevice->pdev, Offset, (u16 *) pValue16);
4910	return LM_STATUS_SUCCESS;
4911}
4912
4913LM_STATUS
4914MM_ReadConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4915	LM_UINT32 *pValue32)
4916{
4917	UM_DEVICE_BLOCK *pUmDevice;
4918
4919	pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4920	pci_read_config_dword(pUmDevice->pdev, Offset, (u32 *) pValue32);
4921	return LM_STATUS_SUCCESS;
4922}
4923
4924LM_STATUS
4925MM_WriteConfig16(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4926	LM_UINT16 Value16)
4927{
4928	UM_DEVICE_BLOCK *pUmDevice;
4929
4930	pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4931	pci_write_config_word(pUmDevice->pdev, Offset, Value16);
4932	return LM_STATUS_SUCCESS;
4933}
4934
4935LM_STATUS
4936MM_WriteConfig32(PLM_DEVICE_BLOCK pDevice, LM_UINT32 Offset,
4937	LM_UINT32 Value32)
4938{
4939	UM_DEVICE_BLOCK *pUmDevice;
4940
4941	pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
4942	pci_write_config_dword(pUmDevice->pdev, Offset, Value32);
4943	return LM_STATUS_SUCCESS;
4944}
4945
4946LM_STATUS
4947MM_AllocateSharedMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4948	PLM_VOID *pMemoryBlockVirt, PLM_PHYSICAL_ADDRESS pMemoryBlockPhy,
4949	LM_BOOL Cached)
4950{
4951	PLM_VOID pvirt;
4952	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4953	dma_addr_t mapping;
4954
4955	pvirt = pci_alloc_consistent(pUmDevice->pdev, BlockSize,
4956					       &mapping);
4957	if (!pvirt) {
4958		return LM_STATUS_FAILURE;
4959	}
4960	pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4961	pUmDevice->dma_list[pUmDevice->mem_list_num] = mapping;
4962	pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = BlockSize;
4963	memset(pvirt, 0, BlockSize);
4964	*pMemoryBlockVirt = (PLM_VOID) pvirt;
4965	MM_SetAddr(pMemoryBlockPhy, mapping);
4966	return LM_STATUS_SUCCESS;
4967}
4968
4969LM_STATUS
4970MM_AllocateMemory(PLM_DEVICE_BLOCK pDevice, LM_UINT32 BlockSize,
4971	PLM_VOID *pMemoryBlockVirt)
4972{
4973	PLM_VOID pvirt;
4974	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
4975
4976
4977	/* Maximum in slab.c */
4978	if (BlockSize > 131072) {
4979		goto MM_Alloc_error;
4980	}
4981
4982	pvirt = kmalloc(BlockSize,GFP_ATOMIC);
4983	if (!pvirt) {
4984		goto MM_Alloc_error;
4985	}
4986	pUmDevice->mem_list[pUmDevice->mem_list_num] = pvirt;
4987	pUmDevice->dma_list[pUmDevice->mem_list_num] = 0;
4988	pUmDevice->mem_size_list[pUmDevice->mem_list_num++] = 0;
4989	/* mem_size_list[i] == 0 indicates that the memory should be freed */
4990	/* using kfree */
4991	memset(pvirt, 0, BlockSize);
4992	*pMemoryBlockVirt = pvirt;
4993	return LM_STATUS_SUCCESS;
4994
4995MM_Alloc_error:
4996	printk(KERN_WARNING "%s: Memory allocation failed - buffer parameters may be set too high\n", pUmDevice->dev->name);
4997	return LM_STATUS_FAILURE;
4998}
4999
5000LM_STATUS
5001MM_MapMemBase(PLM_DEVICE_BLOCK pDevice)
5002{
5003	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5004
5005	pDevice->pMappedMemBase = ioremap_nocache(
5006		pci_resource_start(pUmDevice->pdev, 0), sizeof(T3_STD_MEM_MAP));
5007	if (pDevice->pMappedMemBase == 0)
5008		return LM_STATUS_FAILURE;
5009
5010	return LM_STATUS_SUCCESS;
5011}
5012
5013LM_STATUS
5014MM_InitializeUmPackets(PLM_DEVICE_BLOCK pDevice)
5015{
5016	unsigned int i;
5017	struct sk_buff *skb;
5018	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5019	PUM_PACKET pUmPacket;
5020	PLM_PACKET pPacket;
5021
5022	for (i = 0; i < pDevice->RxPacketDescCnt; i++) {
5023		pPacket = QQ_PopHead(&pDevice->RxPacketFreeQ.Container);
5024		pUmPacket = (PUM_PACKET) pPacket;
5025		if (pPacket == 0) {
5026			printk(KERN_DEBUG "Bad RxPacketFreeQ\n");
5027		}
5028		if (pUmPacket->skbuff == 0) {
5029#ifdef BCM_WL_EMULATOR
5030			skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2);
5031#else
5032			skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR);
5033#endif
5034			if (skb == 0) {
5035				pUmPacket->skbuff = 0;
5036				QQ_PushTail(
5037					&pUmDevice->rx_out_of_buf_q.Container,
5038					pPacket);
5039				continue;
5040			}
5041			pUmPacket->skbuff = skb;
5042			skb->dev = pUmDevice->dev;
5043#ifndef BCM_WL_EMULATOR
5044			skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5045#endif
5046		}
5047		QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5048	}
5049	if (T3_ASIC_REV(pUmDevice->lm_dev.ChipRevId) == T3_ASIC_REV_5700) {
5050		/* reallocate buffers in the ISR */
5051		pUmDevice->rx_buf_repl_thresh = 0;
5052		pUmDevice->rx_buf_repl_panic_thresh = 0;
5053		pUmDevice->rx_buf_repl_isr_limit = 0;
5054	}
5055	else {
5056		pUmDevice->rx_buf_repl_thresh = pDevice->RxPacketDescCnt / 8;
5057		pUmDevice->rx_buf_repl_panic_thresh =
5058			pDevice->RxPacketDescCnt  * 7 / 8;
5059
5060		/* This limits the time spent in the ISR when the receiver */
5061		/* is in a steady state of being overrun. */
5062		pUmDevice->rx_buf_repl_isr_limit = pDevice->RxPacketDescCnt / 8;
5063
5064#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5065		if (pDevice->RxJumboDescCnt != 0) {
5066			if (pUmDevice->rx_buf_repl_thresh >=
5067				pDevice->RxJumboDescCnt) {
5068
5069				pUmDevice->rx_buf_repl_thresh =
5070				pUmDevice->rx_buf_repl_panic_thresh =
5071					pDevice->RxJumboDescCnt - 1;
5072			}
5073			if (pUmDevice->rx_buf_repl_thresh >=
5074				pDevice->RxStdDescCnt) {
5075
5076				pUmDevice->rx_buf_repl_thresh =
5077				pUmDevice->rx_buf_repl_panic_thresh =
5078					pDevice->RxStdDescCnt - 1;
5079			}
5080		}
5081#endif
5082	}
5083	return LM_STATUS_SUCCESS;
5084}
5085
5086LM_STATUS
5087MM_GetConfig(PLM_DEVICE_BLOCK pDevice)
5088{
5089	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5090	int index = pUmDevice->index;
5091	struct net_device *dev = pUmDevice->dev;
5092
5093	if (index >= MAX_UNITS)
5094		return LM_STATUS_SUCCESS;
5095
5096#if LINUX_KERNEL_VERSION < 0x0020609
5097
5098	bcm5700_validate_param_range(pUmDevice, &auto_speed[index], "auto_speed",
5099		0, 1, 1);
5100	if (auto_speed[index] == 0)
5101		pDevice->DisableAutoNeg = TRUE;
5102	else
5103		pDevice->DisableAutoNeg = FALSE;
5104
5105	if (line_speed[index] == 0) {
5106		pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5107		pDevice->DisableAutoNeg = FALSE;
5108	}
5109	else {
5110		bcm5700_validate_param_range(pUmDevice, &full_duplex[index],
5111			"full_duplex", 0, 1, 1);
5112		if (full_duplex[index]) {
5113			pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
5114		}
5115		else {
5116			pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_HALF;
5117		}
5118
5119		if (line_speed[index] == 1000) {
5120			pDevice->RequestedLineSpeed = LM_LINE_SPEED_1000MBPS;
5121			if (pDevice->PhyFlags & PHY_NO_GIGABIT) {
5122				pDevice->RequestedLineSpeed =
5123					LM_LINE_SPEED_100MBPS;
5124				printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (1000), using 100\n", bcm5700_driver, index);
5125			}
5126			else {
5127				if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
5128					!full_duplex[index]) {
5129					printk(KERN_WARNING "%s-%d: Invalid full_duplex parameter (0) for fiber, using 1\n", bcm5700_driver, index);
5130					pDevice->RequestedDuplexMode =
5131						LM_DUPLEX_MODE_FULL;
5132				}
5133
5134				if (!(pDevice->TbiFlags & ENABLE_TBI_FLAG) &&
5135					!auto_speed[index] && !(pDevice->PhyFlags & PHY_IS_FIBER) ) {
5136					printk(KERN_WARNING "%s-%d: Invalid auto_speed parameter (0) for copper, using 1\n", bcm5700_driver, index);
5137					pDevice->DisableAutoNeg = FALSE;
5138				}
5139			}
5140		}
5141		else if ((pDevice->TbiFlags & ENABLE_TBI_FLAG) ||
5142                         (pDevice->PhyFlags & PHY_IS_FIBER)){
5143			 pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5144			 pDevice->RequestedDuplexMode = LM_DUPLEX_MODE_FULL;
5145			 pDevice->DisableAutoNeg = FALSE;
5146			 printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using auto\n", bcm5700_driver, index, line_speed[index]);
5147		}
5148		else if (line_speed[index] == 100) {
5149
5150                        pDevice->RequestedLineSpeed = LM_LINE_SPEED_100MBPS;
5151		}
5152		else if (line_speed[index] == 10) {
5153
5154			pDevice->RequestedLineSpeed = LM_LINE_SPEED_10MBPS;
5155		}
5156		else {
5157			pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5158			pDevice->DisableAutoNeg = FALSE;
5159			printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n", bcm5700_driver, index, line_speed[index]);
5160		}
5161
5162	}
5163
5164#endif /* LINUX_KERNEL_VERSION */
5165
5166	/* This is an unmanageable switch nic and will have link problems if
5167	   not set to auto
5168	*/
5169	if(pDevice->SubsystemVendorId==0x103c && pDevice->SubsystemId==0x3226)
5170	{
5171	    if(pDevice->RequestedLineSpeed != LM_LINE_SPEED_AUTO)
5172	    {
5173		printk(KERN_WARNING "%s-%d: Invalid line_speed parameter (%d), using 0\n",
5174			bcm5700_driver, index, line_speed[index]);
5175	    }
5176	    pDevice->RequestedLineSpeed = LM_LINE_SPEED_AUTO;
5177	    pDevice->DisableAutoNeg = FALSE;
5178	}
5179
5180#if LINUX_KERNEL_VERSION < 0x0020609
5181
5182	pDevice->FlowControlCap = 0;
5183	bcm5700_validate_param_range(pUmDevice, &rx_flow_control[index],
5184		"rx_flow_control", 0, 1, 0);
5185	if (rx_flow_control[index] != 0) {
5186		pDevice->FlowControlCap |= LM_FLOW_CONTROL_RECEIVE_PAUSE;
5187	}
5188	bcm5700_validate_param_range(pUmDevice, &tx_flow_control[index],
5189		"tx_flow_control", 0, 1, 0);
5190	if (tx_flow_control[index] != 0) {
5191		pDevice->FlowControlCap |= LM_FLOW_CONTROL_TRANSMIT_PAUSE;
5192	}
5193	bcm5700_validate_param_range(pUmDevice, &auto_flow_control[index],
5194		"auto_flow_control", 0, 1, 0);
5195	if (auto_flow_control[index] != 0) {
5196		if (pDevice->DisableAutoNeg == FALSE) {
5197
5198			pDevice->FlowControlCap |= LM_FLOW_CONTROL_AUTO_PAUSE;
5199			if ((tx_flow_control[index] == 0) &&
5200				(rx_flow_control[index] == 0)) {
5201
5202				pDevice->FlowControlCap |=
5203					LM_FLOW_CONTROL_TRANSMIT_PAUSE |
5204					LM_FLOW_CONTROL_RECEIVE_PAUSE;
5205			}
5206		}
5207	}
5208
5209	if (dev->mtu > 1500) {
5210#ifdef BCM_TSO
5211		if (T3_ASIC_5714_FAMILY(pDevice->ChipRevId) &&
5212		   (dev->features & NETIF_F_TSO)) {
5213				dev->features &= ~NETIF_F_TSO;
5214				printk(KERN_ALERT "%s: TSO previously enabled. Jumbo Frames and TSO cannot simultaneously be enabled. Jumbo Frames enabled. TSO disabled.\n", dev->name);
5215		}
5216#endif
5217		pDevice->RxMtu = dev->mtu + 14;
5218	}
5219
5220	if ((T3_ASIC_REV(pDevice->ChipRevId) != T3_ASIC_REV_5700) &&
5221		!(pDevice->Flags & BCM5788_FLAG)) {
5222		pDevice->Flags |= USE_TAGGED_STATUS_FLAG;
5223		pUmDevice->timer_interval = HZ;
5224		if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703) &&
5225			(pDevice->TbiFlags & ENABLE_TBI_FLAG)) {
5226			pUmDevice->timer_interval = HZ/4;
5227		}
5228	}
5229	else {
5230		pUmDevice->timer_interval = HZ/10;
5231	}
5232
5233	bcm5700_validate_param_range(pUmDevice, &tx_pkt_desc_cnt[index],
5234		"tx_pkt_desc_cnt", 1, MAX_TX_PACKET_DESC_COUNT-1, TX_DESC_CNT);
5235	pDevice->TxPacketDescCnt = tx_pkt_desc_cnt[index];
5236	bcm5700_validate_param_range(pUmDevice, &rx_std_desc_cnt[index],
5237		"rx_std_desc_cnt", 1, T3_STD_RCV_RCB_ENTRY_COUNT-1,
5238		RX_DESC_CNT);
5239	pDevice->RxStdDescCnt = rx_std_desc_cnt[index];
5240
5241#if T3_JUMBO_RCV_RCB_ENTRY_COUNT
5242	bcm5700_validate_param_range(pUmDevice, &rx_jumbo_desc_cnt[index],
5243		"rx_jumbo_desc_cnt", 1, T3_JUMBO_RCV_RCB_ENTRY_COUNT-1,
5244		JBO_DESC_CNT);
5245
5246	if (mtu[index] <= 1514)
5247		pDevice->RxJumboDescCnt = 0;
5248	else if(!T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)){
5249		pDevice->RxJumboDescCnt = rx_jumbo_desc_cnt[index];
5250        }
5251#endif
5252
5253#ifdef BCM_INT_COAL
5254	bcm5700_validate_param_range(pUmDevice, &adaptive_coalesce[index],
5255		"adaptive_coalesce", 0, 1, 1);
5256#ifdef BCM_NAPI_RXPOLL
5257	if (adaptive_coalesce[index]) {
5258		printk(KERN_WARNING "%s-%d: adaptive_coalesce not used in NAPI mode\n", bcm5700_driver, index);
5259		adaptive_coalesce[index] = 0;
5260
5261	}
5262#endif
5263	pUmDevice->adaptive_coalesce = adaptive_coalesce[index];
5264	if (!pUmDevice->adaptive_coalesce) {
5265		bcm5700_validate_param_range(pUmDevice,
5266			&rx_coalesce_ticks[index], "rx_coalesce_ticks", 0,
5267			MAX_RX_COALESCING_TICKS, RX_COAL_TK);
5268		if ((rx_coalesce_ticks[index] == 0) &&
5269			(rx_max_coalesce_frames[index] == 0)) {
5270
5271			printk(KERN_WARNING "%s-%d: Conflicting rx_coalesce_ticks (0) and rx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5272				bcm5700_driver, index, RX_COAL_TK, RX_COAL_FM);
5273
5274			rx_coalesce_ticks[index] = RX_COAL_TK;
5275			rx_max_coalesce_frames[index] = RX_COAL_FM;
5276		}
5277		pDevice->RxCoalescingTicks = pUmDevice->rx_curr_coalesce_ticks =
5278			rx_coalesce_ticks[index];
5279#ifdef BCM_NAPI_RXPOLL
5280		pDevice->RxCoalescingTicksDuringInt = rx_coalesce_ticks[index];
5281#endif
5282
5283		bcm5700_validate_param_range(pUmDevice,
5284			&rx_max_coalesce_frames[index],
5285			"rx_max_coalesce_frames", 0,
5286			MAX_RX_MAX_COALESCED_FRAMES, RX_COAL_FM);
5287
5288		pDevice->RxMaxCoalescedFrames =
5289			pUmDevice->rx_curr_coalesce_frames =
5290			rx_max_coalesce_frames[index];
5291#ifdef BCM_NAPI_RXPOLL
5292		pDevice->RxMaxCoalescedFramesDuringInt =
5293			rx_max_coalesce_frames[index];
5294#endif
5295
5296		bcm5700_validate_param_range(pUmDevice,
5297			&tx_coalesce_ticks[index], "tx_coalesce_ticks", 0,
5298			MAX_TX_COALESCING_TICKS, TX_COAL_TK);
5299		if ((tx_coalesce_ticks[index] == 0) &&
5300			(tx_max_coalesce_frames[index] == 0)) {
5301
5302			printk(KERN_WARNING "%s-%d: Conflicting tx_coalesce_ticks (0) and tx_max_coalesce_frames (0) parameters, using %d and %d respectively\n",
5303				bcm5700_driver, index, TX_COAL_TK, TX_COAL_FM);
5304
5305			tx_coalesce_ticks[index] = TX_COAL_TK;
5306			tx_max_coalesce_frames[index] = TX_COAL_FM;
5307		}
5308		pDevice->TxCoalescingTicks = tx_coalesce_ticks[index];
5309		bcm5700_validate_param_range(pUmDevice,
5310			&tx_max_coalesce_frames[index],
5311			"tx_max_coalesce_frames", 0,
5312			MAX_TX_MAX_COALESCED_FRAMES, TX_COAL_FM);
5313		pDevice->TxMaxCoalescedFrames = tx_max_coalesce_frames[index];
5314		pUmDevice->tx_curr_coalesce_frames =
5315			pDevice->TxMaxCoalescedFrames;
5316
5317		bcm5700_validate_param_range(pUmDevice,
5318			&stats_coalesce_ticks[index], "stats_coalesce_ticks",
5319			0, MAX_STATS_COALESCING_TICKS, ST_COAL_TK);
5320		if (adaptive_coalesce[index]) {
5321			printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter set with with adaptive_coalesce parameter. Using adaptive_coalesce.\n", bcm5700_driver, index);
5322		}else{
5323			if ((stats_coalesce_ticks[index] > 0) &&
5324				(stats_coalesce_ticks[index] < 100)) {
5325				printk(KERN_WARNING "%s-%d: Invalid stats_coalesce_ticks parameter (%u), using 100\n", bcm5700_driver, index, (unsigned int) stats_coalesce_ticks[index]);
5326				stats_coalesce_ticks[index] = 100;
5327				pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5328				pDevice->StatsCoalescingTicks = stats_coalesce_ticks[index];
5329			}
5330		}
5331	}
5332	else {
5333		pUmDevice->rx_curr_coalesce_frames = RX_COAL_FM;
5334		pUmDevice->rx_curr_coalesce_ticks = RX_COAL_TK;
5335		pUmDevice->tx_curr_coalesce_frames = TX_COAL_FM;
5336	}
5337#endif
5338
5339	if (T3_ASIC_IS_5705_BEYOND(pDevice->ChipRevId)) {
5340		unsigned int tmpvar;
5341
5342		tmpvar = pDevice->StatsCoalescingTicks / BCM_TIMER_GRANULARITY;
5343
5344		/*
5345		 * If the result is zero, the request is too demanding.
5346		 */
5347		if (tmpvar == 0) {
5348			tmpvar = 1;
5349		}
5350
5351		pDevice->StatsCoalescingTicks = tmpvar * BCM_TIMER_GRANULARITY;
5352
5353		pUmDevice->statstimer_interval = tmpvar;
5354	}
5355
5356#ifdef BCM_WOL
5357	bcm5700_validate_param_range(pUmDevice, &enable_wol[index],
5358		"enable_wol", 0, 1, 0);
5359	if (enable_wol[index]) {
5360		pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_MAGIC_PACKET;
5361		pDevice->WakeUpMode = LM_WAKE_UP_MODE_MAGIC_PACKET;
5362	}
5363#endif
5364#ifdef INCLUDE_TBI_SUPPORT
5365	if (pDevice->TbiFlags & ENABLE_TBI_FLAG) {
5366		if ((T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) ||
5367			(T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5703)) {
5368			/* just poll since we have hardware autoneg. in 5704 */
5369			pDevice->TbiFlags |= TBI_PURE_POLLING_FLAG;
5370		}
5371		else {
5372			pDevice->TbiFlags |= TBI_POLLING_INTR_FLAG;
5373		}
5374	}
5375#endif
5376	bcm5700_validate_param_range(pUmDevice, &scatter_gather[index],
5377		"scatter_gather", 0, 1, 1);
5378	bcm5700_validate_param_range(pUmDevice, &tx_checksum[index],
5379		"tx_checksum", 0, 1, 1);
5380	bcm5700_validate_param_range(pUmDevice, &rx_checksum[index],
5381		"rx_checksum", 0, 1, 1);
5382	if (!(pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TX_TCP_CHECKSUM)) {
5383		if (tx_checksum[index] || rx_checksum[index]) {
5384
5385			pDevice->TaskToOffload = LM_TASK_OFFLOAD_NONE;
5386			printk(KERN_WARNING "%s-%d: Checksum offload not available on this NIC\n", bcm5700_driver, index);
5387		}
5388	}
5389	else {
5390		if (rx_checksum[index]) {
5391			pDevice->TaskToOffload |=
5392				LM_TASK_OFFLOAD_RX_TCP_CHECKSUM |
5393				LM_TASK_OFFLOAD_RX_UDP_CHECKSUM;
5394		}
5395		if (tx_checksum[index]) {
5396			pDevice->TaskToOffload |=
5397				LM_TASK_OFFLOAD_TX_TCP_CHECKSUM |
5398				LM_TASK_OFFLOAD_TX_UDP_CHECKSUM;
5399			pDevice->Flags |= NO_TX_PSEUDO_HDR_CSUM_FLAG;
5400		}
5401	}
5402#ifdef BCM_TSO
5403	bcm5700_validate_param_range(pUmDevice, &enable_tso[index],
5404		"enable_tso", 0, 1, 1);
5405
5406	/* Always enable TSO firmware if supported */
5407	/* This way we can turn it on or off on the fly */
5408	if (pDevice->TaskOffloadCap & LM_TASK_OFFLOAD_TCP_SEGMENTATION)
5409	{
5410		pDevice->TaskToOffload |=
5411			LM_TASK_OFFLOAD_TCP_SEGMENTATION;
5412	}
5413	if (enable_tso[index] &&
5414		!(pDevice->TaskToOffload & LM_TASK_OFFLOAD_TCP_SEGMENTATION))
5415	{
5416		printk(KERN_WARNING "%s-%d: TSO not available on this NIC\n", bcm5700_driver, index);
5417	}
5418#endif
5419#ifdef BCM_ASF
5420	bcm5700_validate_param_range(pUmDevice, &vlan_tag_mode[index],
5421		"vlan_strip_mode", 0, 2, 0);
5422	pUmDevice->vlan_tag_mode = vlan_tag_mode[index];
5423#else
5424	pUmDevice->vlan_tag_mode = VLAN_TAG_MODE_NORMAL_STRIP;
5425#endif
5426
5427#endif /* LINUX_KERNEL_VERSION */
5428
5429#ifdef BCM_NIC_SEND_BD
5430	bcm5700_validate_param_range(pUmDevice, &nic_tx_bd[index], "nic_tx_bd",
5431		0, 1, 0);
5432	if (nic_tx_bd[index])
5433		pDevice->Flags |= NIC_SEND_BD_FLAG;
5434	if ((pDevice->Flags & ENABLE_PCIX_FIX_FLAG) ||
5435		(T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5705)) {
5436		if (pDevice->Flags & NIC_SEND_BD_FLAG) {
5437			pDevice->Flags &= ~NIC_SEND_BD_FLAG;
5438			printk(KERN_WARNING "%s-%d: Nic Send BDs not available on this NIC or not possible on this system\n", bcm5700_driver, index);
5439		}
5440	}
5441#endif
5442#if defined(CONFIG_PCI_MSI) || defined(CONFIG_PCI_USE_VECTOR)
5443	bcm5700_validate_param_range(pUmDevice, &disable_msi[pUmDevice->index],
5444		"disable_msi", 0, 1, 0);
5445#endif
5446
5447	bcm5700_validate_param_range(pUmDevice, &delay_link[index],
5448		"delay_link", 0, 1, 0);
5449
5450	bcm5700_validate_param_range(pUmDevice, &disable_d3hot[index],
5451		"disable_d3hot", 0, 1, 0);
5452	if (disable_d3hot[index]) {
5453
5454#ifdef BCM_WOL
5455		if (enable_wol[index]) {
5456			pDevice->WakeUpModeCap = LM_WAKE_UP_MODE_NONE;
5457			pDevice->WakeUpMode = LM_WAKE_UP_MODE_NONE;
5458			printk(KERN_WARNING "%s-%d: Wake-On-Lan disabled because D3Hot is disabled\n", bcm5700_driver, index);
5459		}
5460#endif
5461		pDevice->Flags |= DISABLE_D3HOT_FLAG;
5462	}
5463
5464    return LM_STATUS_SUCCESS;
5465}
5466
5467LM_STATUS
5468MM_IndicateRxPackets(PLM_DEVICE_BLOCK pDevice)
5469{
5470	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5471	PLM_PACKET pPacket;
5472	PUM_PACKET pUmPacket;
5473	struct sk_buff *skb;
5474	int size;
5475	int vlan_tag_size = 0;
5476
5477	if (pDevice->ReceiveMask & LM_KEEP_VLAN_TAG)
5478		vlan_tag_size = 4;
5479
5480	while (1) {
5481		pPacket = (PLM_PACKET)
5482			QQ_PopHead(&pDevice->RxPacketReceivedQ.Container);
5483		if (pPacket == 0)
5484			break;
5485		pUmPacket = (PUM_PACKET) pPacket;
5486#if !defined(NO_PCI_UNMAP)
5487		pci_unmap_single(pUmDevice->pdev,
5488				pci_unmap_addr(pUmPacket, map[0]),
5489				pPacket->u.Rx.RxBufferSize,
5490				PCI_DMA_FROMDEVICE);
5491#endif
5492		if ((pPacket->PacketStatus != LM_STATUS_SUCCESS) ||
5493			((size = pPacket->PacketSize) >
5494			(pDevice->RxMtu + vlan_tag_size))) {
5495
5496			/* reuse skb */
5497#ifdef BCM_TASKLET
5498			QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5499#else
5500			QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5501#endif
5502			pUmDevice->rx_misc_errors++;
5503			continue;
5504		}
5505		skb = pUmPacket->skbuff;
5506		skb_put(skb, size);
5507		skb->pkt_type = 0;
5508		skb->protocol = eth_type_trans(skb, skb->dev);
5509		if (size > pDevice->RxMtu) {
5510			/* Make sure we have a valid VLAN tag */
5511			if (htons(skb->protocol) != 0x8100) {
5512				dev_kfree_skb_irq(skb);
5513				pUmDevice->rx_misc_errors++;
5514				goto drop_rx;
5515			}
5516		}
5517		if ((pPacket->Flags & RCV_BD_FLAG_TCP_UDP_CHKSUM_FIELD) &&
5518			(pDevice->TaskToOffload &
5519				LM_TASK_OFFLOAD_RX_TCP_CHECKSUM)) {
5520			if (pPacket->u.Rx.TcpUdpChecksum == 0xffff) {
5521
5522				skb->ip_summed = CHECKSUM_UNNECESSARY;
5523#if TIGON3_DEBUG
5524				pUmDevice->rx_good_chksum_count++;
5525#endif
5526			}
5527			else {
5528				skb->ip_summed = CHECKSUM_NONE;
5529				pUmDevice->rx_bad_chksum_count++;
5530			}
5531		}
5532		else {
5533			skb->ip_summed = CHECKSUM_NONE;
5534		}
5535#ifdef NICE_SUPPORT
5536		if( pUmDevice->nice_rx ) {
5537			vlan_tag_t *vlan_tag;
5538
5539			vlan_tag = (vlan_tag_t *) &skb->cb[0];
5540			if (pPacket->Flags & RCV_BD_FLAG_VLAN_TAG) {
5541				vlan_tag->signature = 0x7777;
5542				vlan_tag->tag = pPacket->VlanTag;
5543			}
5544			else {
5545				vlan_tag->signature = 0;
5546			}
5547			pUmDevice->nice_rx(skb, pUmDevice->nice_ctx);
5548		}
5549		else
5550#endif
5551		{
5552#ifdef BCM_VLAN
5553			if (pUmDevice->vlgrp &&
5554				(pPacket->Flags & RCV_BD_FLAG_VLAN_TAG)) {
5555
5556#ifdef BCM_NAPI_RXPOLL
5557				vlan_hwaccel_receive_skb(skb, pUmDevice->vlgrp,
5558					pPacket->VlanTag);
5559#else
5560				vlan_hwaccel_rx(skb, pUmDevice->vlgrp,
5561					pPacket->VlanTag);
5562#endif
5563			}
5564			else
5565#endif
5566			{
5567#ifdef BCM_WL_EMULATOR
5568				if(pDevice->wl_emulate_rx) {
5569					/* bcmstats("emu recv %d %d"); */
5570					wlcemu_receive_skb(pDevice->wlc, skb);
5571					/* bcmstats("emu recv end %d %d"); */
5572				}
5573				else
5574#endif /* BCM_WL_EMULATOR  */
5575				{
5576#ifdef BCM_NAPI_RXPOLL
5577				netif_receive_skb(skb);
5578#else
5579				netif_rx(skb);
5580#endif
5581				}
5582			}
5583		}
5584		pUmDevice->dev->last_rx = jiffies;
5585
5586drop_rx:
5587#ifdef BCM_TASKLET
5588		pUmPacket->skbuff = 0;
5589		QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5590#else
5591#ifdef BCM_WL_EMULATOR
5592		skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2);
5593#else
5594		skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR);
5595#endif /* BCM_WL_EMULATOR  */
5596		if (skb == 0) {
5597			pUmPacket->skbuff = 0;
5598			QQ_PushTail(&pUmDevice->rx_out_of_buf_q.Container, pPacket);
5599		}
5600		else {
5601			pUmPacket->skbuff = skb;
5602			skb->dev = pUmDevice->dev;
5603#ifndef BCM_WL_EMULATOR
5604			skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5605#endif
5606			QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5607		}
5608#endif
5609	}
5610	return LM_STATUS_SUCCESS;
5611}
5612
5613LM_STATUS
5614MM_CoalesceTxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5615{
5616	PUM_PACKET pUmPacket = (PUM_PACKET) pPacket;
5617	struct sk_buff *skb = pUmPacket->skbuff;
5618	struct sk_buff *nskb;
5619#if !defined(NO_PCI_UNMAP)
5620	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5621
5622	pci_unmap_single(pUmDevice->pdev,
5623			pci_unmap_addr(pUmPacket, map[0]),
5624			pci_unmap_len(pUmPacket, map_len[0]),
5625			PCI_DMA_TODEVICE);
5626#if MAX_SKB_FRAGS
5627	{
5628		int i;
5629
5630		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5631			pci_unmap_page(pUmDevice->pdev,
5632				pci_unmap_addr(pUmPacket, map[i + 1]),
5633				pci_unmap_len(pUmPacket, map_len[i + 1]),
5634				PCI_DMA_TODEVICE);
5635		}
5636	}
5637#endif
5638#endif
5639	if ((nskb = skb_copy(skb, GFP_ATOMIC))) {
5640		pUmPacket->lm_packet.u.Tx.FragCount = 1;
5641		dev_kfree_skb(skb);
5642		pUmPacket->skbuff = nskb;
5643		return LM_STATUS_SUCCESS;
5644	}
5645	dev_kfree_skb(skb);
5646	pUmPacket->skbuff = 0;
5647	return LM_STATUS_FAILURE;
5648}
5649
5650/* Returns 1 if not all buffers are allocated */
5651STATIC int
5652replenish_rx_buffers(PUM_DEVICE_BLOCK pUmDevice, int max)
5653{
5654	PLM_PACKET pPacket;
5655	PUM_PACKET pUmPacket;
5656	PLM_DEVICE_BLOCK pDevice = (PLM_DEVICE_BLOCK) pUmDevice;
5657	struct sk_buff *skb;
5658	int queue_rx = 0;
5659	int alloc_cnt = 0;
5660	int ret = 0;
5661
5662	while ((pUmPacket = (PUM_PACKET)
5663		QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container)) != 0) {
5664		pPacket = (PLM_PACKET) pUmPacket;
5665		if (pUmPacket->skbuff) {
5666			/* reuse an old skb */
5667			QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5668			queue_rx = 1;
5669			continue;
5670		}
5671#ifdef BCM_WL_EMULATOR
5672		if ((skb = (struct sk_buff *)wlcemu_pktget(pDevice->wlc,pPacket->u.Rx.RxBufferSize + 2)) == 0)
5673#else
5674	       if ((skb = dev_alloc_skb(pPacket->u.Rx.RxBufferSize + 2 + EXTRA_HDR)) == 0)
5675#endif /* BCM_WL_EMULATOR  */
5676	       {
5677		       QQ_PushHead(&pUmDevice->rx_out_of_buf_q.Container,
5678		                   pPacket);
5679		       ret = 1;
5680		       break;
5681	       }
5682		pUmPacket->skbuff = skb;
5683		skb->dev = pUmDevice->dev;
5684#ifndef BCM_WL_EMULATOR
5685			skb_reserve(skb, EXTRA_HDR - pUmDevice->rx_buf_align);
5686#endif
5687		QQ_PushTail(&pDevice->RxPacketFreeQ.Container, pPacket);
5688		queue_rx = 1;
5689		if (max > 0) {
5690			alloc_cnt++;
5691			if (alloc_cnt >= max)
5692				break;
5693		}
5694	}
5695	if (queue_rx || pDevice->QueueAgain) {
5696		LM_QueueRxPackets(pDevice);
5697	}
5698	return ret;
5699}
5700
5701LM_STATUS
5702MM_IndicateTxPackets(PLM_DEVICE_BLOCK pDevice)
5703{
5704	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5705	PLM_PACKET pPacket;
5706	PUM_PACKET pUmPacket;
5707	struct sk_buff *skb;
5708#if !defined(NO_PCI_UNMAP) && MAX_SKB_FRAGS
5709	int i;
5710#endif
5711
5712	while (1) {
5713		pPacket = (PLM_PACKET)
5714			QQ_PopHead(&pDevice->TxPacketXmittedQ.Container);
5715		if (pPacket == 0)
5716			break;
5717		pUmPacket = (PUM_PACKET) pPacket;
5718		skb = pUmPacket->skbuff;
5719#if !defined(NO_PCI_UNMAP)
5720		pci_unmap_single(pUmDevice->pdev,
5721				pci_unmap_addr(pUmPacket, map[0]),
5722				pci_unmap_len(pUmPacket, map_len[0]),
5723				PCI_DMA_TODEVICE);
5724#if MAX_SKB_FRAGS
5725		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5726			pci_unmap_page(pUmDevice->pdev,
5727				pci_unmap_addr(pUmPacket, map[i + 1]),
5728				pci_unmap_len(pUmPacket, map_len[i + 1]),
5729				PCI_DMA_TODEVICE);
5730		}
5731#endif
5732#endif
5733		dev_kfree_skb_irq(skb);
5734		pUmPacket->skbuff = 0;
5735		QQ_PushTail(&pDevice->TxPacketFreeQ.Container, pPacket);
5736	}
5737	if (pUmDevice->tx_full) {
5738		if (QQ_GetEntryCnt(&pDevice->TxPacketFreeQ.Container) >=
5739			(pDevice->TxPacketDescCnt >> 1)) {
5740
5741			pUmDevice->tx_full = 0;
5742			netif_wake_queue(pUmDevice->dev);
5743		}
5744	}
5745	return LM_STATUS_SUCCESS;
5746}
5747
5748LM_STATUS
5749MM_IndicateStatus(PLM_DEVICE_BLOCK pDevice, LM_STATUS Status)
5750{
5751	PUM_DEVICE_BLOCK pUmDevice = (PUM_DEVICE_BLOCK) pDevice;
5752	struct net_device *dev = pUmDevice->dev;
5753	LM_FLOW_CONTROL flow_control;
5754	int speed = 0;
5755
5756	if (!pUmDevice->opened)
5757		return LM_STATUS_SUCCESS;
5758
5759	if (!pUmDevice->suspended) {
5760		if (Status == LM_STATUS_LINK_DOWN) {
5761			netif_carrier_off(dev);
5762		}
5763		else if (Status == LM_STATUS_LINK_ACTIVE) {
5764			netif_carrier_on(dev);
5765		}
5766	}
5767
5768	if (pUmDevice->delayed_link_ind > 0) {
5769		pUmDevice->delayed_link_ind = 0;
5770		if (Status == LM_STATUS_LINK_DOWN) {
5771			B57_INFO(("%s: %s NIC Link is DOWN\n", bcm5700_driver, dev->name));
5772		}
5773		else if (Status == LM_STATUS_LINK_ACTIVE) {
5774			B57_INFO(("%s: %s NIC Link is UP, ", bcm5700_driver, dev->name));
5775		}
5776	}
5777	else {
5778		if (Status == LM_STATUS_LINK_DOWN) {
5779			B57_INFO(("%s: %s NIC Link is Down\n", bcm5700_driver, dev->name));
5780		}
5781		else if (Status == LM_STATUS_LINK_ACTIVE) {
5782			B57_INFO(("%s: %s NIC Link is Up, ", bcm5700_driver, dev->name));
5783		}
5784	}
5785
5786	if (Status == LM_STATUS_LINK_ACTIVE) {
5787		if (pDevice->LineSpeed == LM_LINE_SPEED_1000MBPS)
5788			speed = 1000;
5789		else if (pDevice->LineSpeed == LM_LINE_SPEED_100MBPS)
5790			speed = 100;
5791		else if (pDevice->LineSpeed == LM_LINE_SPEED_10MBPS)
5792			speed = 10;
5793
5794		B57_INFO(("%d Mbps ", speed));
5795
5796		if (pDevice->DuplexMode == LM_DUPLEX_MODE_FULL)
5797			B57_INFO(("full duplex"));
5798		else
5799			B57_INFO(("half duplex"));
5800
5801		flow_control = pDevice->FlowControl &
5802			(LM_FLOW_CONTROL_RECEIVE_PAUSE |
5803			LM_FLOW_CONTROL_TRANSMIT_PAUSE);
5804		if (flow_control) {
5805			if (flow_control & LM_FLOW_CONTROL_RECEIVE_PAUSE) {
5806				B57_INFO((", receive "));
5807				if (flow_control & LM_FLOW_CONTROL_TRANSMIT_PAUSE)
5808					B57_INFO(("& transmit "));
5809			}
5810			else {
5811				B57_INFO((", transmit "));
5812			}
5813			B57_INFO(("flow control ON"));
5814		}
5815		B57_INFO(("\n"));
5816	}
5817	return LM_STATUS_SUCCESS;
5818}
5819
5820void
5821MM_UnmapRxDma(LM_DEVICE_BLOCK *pDevice, LM_PACKET *pPacket)
5822{
5823#if !defined(NO_PCI_UNMAP)
5824	UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5825	UM_PACKET *pUmPacket = (UM_PACKET *) pPacket;
5826
5827	if (!pUmPacket->skbuff)
5828		return;
5829
5830	pci_unmap_single(pUmDevice->pdev,
5831			pci_unmap_addr(pUmPacket, map[0]),
5832			pPacket->u.Rx.RxBufferSize,
5833			PCI_DMA_FROMDEVICE);
5834#endif
5835}
5836
5837LM_STATUS
5838MM_FreeRxBuffer(PLM_DEVICE_BLOCK pDevice, PLM_PACKET pPacket)
5839{
5840	PUM_PACKET pUmPacket;
5841	struct sk_buff *skb;
5842
5843	if (pPacket == 0)
5844		return LM_STATUS_SUCCESS;
5845	pUmPacket = (PUM_PACKET) pPacket;
5846	if ((skb = pUmPacket->skbuff)) {
5847		/* DMA address already unmapped */
5848		dev_kfree_skb(skb);
5849	}
5850	pUmPacket->skbuff = 0;
5851	return LM_STATUS_SUCCESS;
5852}
5853
5854LM_STATUS
5855MM_Sleep(LM_DEVICE_BLOCK *pDevice, LM_UINT32 msec)
5856{
5857	current->state = TASK_INTERRUPTIBLE;
5858	if (schedule_timeout(HZ * msec / 1000) != 0) {
5859		return LM_STATUS_FAILURE;
5860	}
5861	if (signal_pending(current))
5862		return LM_STATUS_FAILURE;
5863
5864	return LM_STATUS_SUCCESS;
5865}
5866
5867void
5868bcm5700_shutdown(UM_DEVICE_BLOCK *pUmDevice)
5869{
5870	LM_DEVICE_BLOCK *pDevice = (LM_DEVICE_BLOCK *) pUmDevice;
5871
5872	bcm5700_intr_off(pUmDevice);
5873	netif_carrier_off(pUmDevice->dev);
5874#ifdef BCM_TASKLET
5875	tasklet_kill(&pUmDevice->tasklet);
5876#endif
5877	bcm5700_poll_wait(pUmDevice);
5878
5879	LM_Halt(pDevice);
5880
5881	pDevice->InitDone = 0;
5882	bcm5700_free_remaining_rx_bufs(pUmDevice);
5883}
5884
5885void
5886bcm5700_free_remaining_rx_bufs(UM_DEVICE_BLOCK *pUmDevice)
5887{
5888	LM_DEVICE_BLOCK *pDevice = &pUmDevice->lm_dev;
5889	UM_PACKET *pUmPacket;
5890	int cnt, i;
5891
5892	cnt = QQ_GetEntryCnt(&pUmDevice->rx_out_of_buf_q.Container);
5893	for (i = 0; i < cnt; i++) {
5894		if ((pUmPacket =
5895			QQ_PopHead(&pUmDevice->rx_out_of_buf_q.Container))
5896			!= 0) {
5897
5898			MM_UnmapRxDma(pDevice, (LM_PACKET *) pUmPacket);
5899			MM_FreeRxBuffer(pDevice, &pUmPacket->lm_packet);
5900			QQ_PushTail(&pDevice->RxPacketFreeQ.Container,
5901				pUmPacket);
5902		}
5903	}
5904}
5905
5906void
5907bcm5700_validate_param_range(UM_DEVICE_BLOCK *pUmDevice, int *param,
5908	char *param_name, int min, int max, int deflt)
5909{
5910	if (((unsigned int) *param < (unsigned int) min) ||
5911		((unsigned int) *param > (unsigned int) max)) {
5912
5913		printk(KERN_WARNING "%s-%d: Invalid %s parameter (%u), using %u\n", bcm5700_driver, pUmDevice->index, param_name, (unsigned int) *param, (unsigned int) deflt);
5914		*param = deflt;
5915	}
5916}
5917
5918struct net_device *
5919bcm5700_find_peer(struct net_device *dev)
5920{
5921	struct net_device *tmp_dev;
5922	UM_DEVICE_BLOCK *pUmDevice, *pUmTmp;
5923	LM_DEVICE_BLOCK *pDevice;
5924
5925	tmp_dev = 0;
5926	pUmDevice = (UM_DEVICE_BLOCK *) dev->priv;
5927	pDevice = &pUmDevice->lm_dev;
5928	if (T3_ASIC_REV(pDevice->ChipRevId) == T3_ASIC_REV_5704) {
5929		tmp_dev = root_tigon3_dev;
5930		while (tmp_dev) {
5931			pUmTmp = (PUM_DEVICE_BLOCK) tmp_dev->priv;
5932			if ((tmp_dev != dev) &&
5933				(pUmDevice->pdev->bus->number ==
5934				pUmTmp->pdev->bus->number) &&
5935				PCI_SLOT(pUmDevice->pdev->devfn) ==
5936				PCI_SLOT(pUmTmp->pdev->devfn)) {
5937
5938				break;
5939			}
5940			tmp_dev = pUmTmp->next_module;
5941		}
5942	}
5943	return tmp_dev;
5944}
5945
5946LM_DEVICE_BLOCK *
5947MM_FindPeerDev(LM_DEVICE_BLOCK *pDevice)
5948{
5949	UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5950	struct net_device *dev = pUmDevice->dev;
5951	struct net_device *peer_dev;
5952
5953	peer_dev = bcm5700_find_peer(dev);
5954	if (!peer_dev)
5955		return 0;
5956	return ((LM_DEVICE_BLOCK *) peer_dev->priv);
5957}
5958
5959int MM_FindCapability(LM_DEVICE_BLOCK *pDevice, int capability)
5960{
5961	UM_DEVICE_BLOCK *pUmDevice = (UM_DEVICE_BLOCK *) pDevice;
5962	return (pci_find_capability(pUmDevice->pdev, capability));
5963}
5964
5965#if defined(HAVE_POLL_CONTROLLER)||defined(CONFIG_NET_POLL_CONTROLLER)
5966STATIC void
5967poll_bcm5700(struct net_device *dev)
5968{
5969	UM_DEVICE_BLOCK *pUmDevice = dev->priv;
5970
5971#if defined(RED_HAT_LINUX_KERNEL) && (LINUX_VERSION_CODE < 0x020605)
5972	if (netdump_mode) {
5973		bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5974#ifdef BCM_NAPI_RXPOLL
5975		if (dev->poll_list.prev) {
5976			int budget = 64;
5977
5978			bcm5700_poll(dev, &budget);
5979		}
5980#endif
5981	}
5982	else
5983#endif
5984	{
5985		disable_irq(pUmDevice->pdev->irq);
5986		bcm5700_interrupt(pUmDevice->pdev->irq, dev, NULL);
5987		enable_irq(pUmDevice->pdev->irq);
5988	}
5989}
5990#endif
5991