• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/chelsio/
1/*****************************************************************************
2 *                                                                           *
3 * File: cxgb2.c                                                             *
4 * $Revision: 1.25 $                                                         *
5 * $Date: 2005/06/22 00:43:25 $                                              *
6 * Description:                                                              *
7 *  Chelsio 10Gb Ethernet Driver.                                            *
8 *                                                                           *
9 * This program is free software; you can redistribute it and/or modify      *
10 * it under the terms of the GNU General Public License, version 2, as       *
11 * published by the Free Software Foundation.                                *
12 *                                                                           *
13 * You should have received a copy of the GNU General Public License along   *
14 * with this program; if not, write to the Free Software Foundation, Inc.,   *
15 * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
16 *                                                                           *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
20 *                                                                           *
21 * http://www.chelsio.com                                                    *
22 *                                                                           *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
24 * All rights reserved.                                                      *
25 *                                                                           *
26 * Maintainers: maintainers@chelsio.com                                      *
27 *                                                                           *
28 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
29 *          Tina Yang               <tainay@chelsio.com>                     *
30 *          Felix Marti             <felix@chelsio.com>                      *
31 *          Scott Bardone           <sbardone@chelsio.com>                   *
32 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
33 *          Frank DiMambro          <frank@chelsio.com>                      *
34 *                                                                           *
35 * History:                                                                  *
36 *                                                                           *
37 ****************************************************************************/
38
39#include "common.h"
40#include <linux/module.h>
41#include <linux/init.h>
42#include <linux/pci.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/if_vlan.h>
46#include <linux/mii.h>
47#include <linux/sockios.h>
48#include <linux/dma-mapping.h>
49#include <asm/uaccess.h>
50
51#include "cpl5_cmd.h"
52#include "regs.h"
53#include "gmac.h"
54#include "cphy.h"
55#include "sge.h"
56#include "tp.h"
57#include "espi.h"
58#include "elmer0.h"
59
60#include <linux/workqueue.h>
61
62static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63{
64	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65}
66
67static inline void cancel_mac_stats_update(struct adapter *ap)
68{
69	cancel_delayed_work(&ap->stats_update_task);
70}
71
72#define MAX_CMDQ_ENTRIES	16384
73#define MAX_CMDQ1_ENTRIES	1024
74#define MAX_RX_BUFFERS		16384
75#define MAX_RX_JUMBO_BUFFERS	16384
76#define MAX_TX_BUFFERS_HIGH	16384U
77#define MAX_TX_BUFFERS_LOW	1536U
78#define MAX_TX_BUFFERS		1460U
79#define MIN_FL_ENTRIES		32
80
81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85/*
86 * The EEPROM is actually bigger but only the first few bytes are used so we
87 * only report those.
88 */
89#define EEPROM_SIZE 32
90
91MODULE_DESCRIPTION(DRV_DESCRIPTION);
92MODULE_AUTHOR("Chelsio Communications");
93MODULE_LICENSE("GPL");
94
95static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
97module_param(dflt_msg_enable, int, 0);
98MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100#define HCLOCK 0x0
101#define LCLOCK 0x1
102
103/* T1 cards powersave mode */
104static int t1_clock(struct adapter *adapter, int mode);
105static int t1powersave = 1;	/* HW default is powersave mode. */
106
107module_param(t1powersave, int, 0);
108MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109
110static int disable_msi = 0;
111module_param(disable_msi, int, 0);
112MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
113
114static const char pci_speed[][4] = {
115	"33", "66", "100", "133"
116};
117
118/*
119 * Setup MAC to receive the types of packets we want.
120 */
121static void t1_set_rxmode(struct net_device *dev)
122{
123	struct adapter *adapter = dev->ml_priv;
124	struct cmac *mac = adapter->port[dev->if_port].mac;
125	struct t1_rx_mode rm;
126
127	rm.dev = dev;
128	mac->ops->set_rx_mode(mac, &rm);
129}
130
131static void link_report(struct port_info *p)
132{
133	if (!netif_carrier_ok(p->dev))
134		printk(KERN_INFO "%s: link down\n", p->dev->name);
135	else {
136		const char *s = "10Mbps";
137
138		switch (p->link_config.speed) {
139			case SPEED_10000: s = "10Gbps"; break;
140			case SPEED_1000:  s = "1000Mbps"; break;
141			case SPEED_100:   s = "100Mbps"; break;
142		}
143
144		printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
145		       p->dev->name, s,
146		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
147	}
148}
149
150void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
151			int speed, int duplex, int pause)
152{
153	struct port_info *p = &adapter->port[port_id];
154
155	if (link_stat != netif_carrier_ok(p->dev)) {
156		if (link_stat)
157			netif_carrier_on(p->dev);
158		else
159			netif_carrier_off(p->dev);
160		link_report(p);
161
162		/* multi-ports: inform toe */
163		if ((speed > 0) && (adapter->params.nports > 1)) {
164			unsigned int sched_speed = 10;
165			switch (speed) {
166			case SPEED_1000:
167				sched_speed = 1000;
168				break;
169			case SPEED_100:
170				sched_speed = 100;
171				break;
172			case SPEED_10:
173				sched_speed = 10;
174				break;
175			}
176			t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
177		}
178	}
179}
180
181static void link_start(struct port_info *p)
182{
183	struct cmac *mac = p->mac;
184
185	mac->ops->reset(mac);
186	if (mac->ops->macaddress_set)
187		mac->ops->macaddress_set(mac, p->dev->dev_addr);
188	t1_set_rxmode(p->dev);
189	t1_link_start(p->phy, mac, &p->link_config);
190	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
191}
192
193static void enable_hw_csum(struct adapter *adapter)
194{
195	if (adapter->flags & TSO_CAPABLE)
196		t1_tp_set_ip_checksum_offload(adapter->tp, 1);	/* for TSO only */
197	if (adapter->flags & UDP_CSUM_CAPABLE)
198		t1_tp_set_udp_checksum_offload(adapter->tp, 1);
199	t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
200}
201
202/*
203 * Things to do upon first use of a card.
204 * This must run with the rtnl lock held.
205 */
206static int cxgb_up(struct adapter *adapter)
207{
208	int err = 0;
209
210	if (!(adapter->flags & FULL_INIT_DONE)) {
211		err = t1_init_hw_modules(adapter);
212		if (err)
213			goto out_err;
214
215		enable_hw_csum(adapter);
216		adapter->flags |= FULL_INIT_DONE;
217	}
218
219	t1_interrupts_clear(adapter);
220
221	adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
222	err = request_irq(adapter->pdev->irq, t1_interrupt,
223			  adapter->params.has_msi ? 0 : IRQF_SHARED,
224			  adapter->name, adapter);
225	if (err) {
226		if (adapter->params.has_msi)
227			pci_disable_msi(adapter->pdev);
228
229		goto out_err;
230	}
231
232	t1_sge_start(adapter->sge);
233	t1_interrupts_enable(adapter);
234out_err:
235	return err;
236}
237
238/*
239 * Release resources when all the ports have been stopped.
240 */
241static void cxgb_down(struct adapter *adapter)
242{
243	t1_sge_stop(adapter->sge);
244	t1_interrupts_disable(adapter);
245	free_irq(adapter->pdev->irq, adapter);
246	if (adapter->params.has_msi)
247		pci_disable_msi(adapter->pdev);
248}
249
250static int cxgb_open(struct net_device *dev)
251{
252	int err;
253	struct adapter *adapter = dev->ml_priv;
254	int other_ports = adapter->open_device_map & PORT_MASK;
255
256	napi_enable(&adapter->napi);
257	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
258		napi_disable(&adapter->napi);
259		return err;
260	}
261
262	__set_bit(dev->if_port, &adapter->open_device_map);
263	link_start(&adapter->port[dev->if_port]);
264	netif_start_queue(dev);
265	if (!other_ports && adapter->params.stats_update_period)
266		schedule_mac_stats_update(adapter,
267					  adapter->params.stats_update_period);
268	return 0;
269}
270
271static int cxgb_close(struct net_device *dev)
272{
273	struct adapter *adapter = dev->ml_priv;
274	struct port_info *p = &adapter->port[dev->if_port];
275	struct cmac *mac = p->mac;
276
277	netif_stop_queue(dev);
278	napi_disable(&adapter->napi);
279	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
280	netif_carrier_off(dev);
281
282	clear_bit(dev->if_port, &adapter->open_device_map);
283	if (adapter->params.stats_update_period &&
284	    !(adapter->open_device_map & PORT_MASK)) {
285		/* Stop statistics accumulation. */
286		smp_mb__after_clear_bit();
287		spin_lock(&adapter->work_lock);   /* sync with update task */
288		spin_unlock(&adapter->work_lock);
289		cancel_mac_stats_update(adapter);
290	}
291
292	if (!adapter->open_device_map)
293		cxgb_down(adapter);
294	return 0;
295}
296
297static struct net_device_stats *t1_get_stats(struct net_device *dev)
298{
299	struct adapter *adapter = dev->ml_priv;
300	struct port_info *p = &adapter->port[dev->if_port];
301	struct net_device_stats *ns = &p->netstats;
302	const struct cmac_statistics *pstats;
303
304	/* Do a full update of the MAC stats */
305	pstats = p->mac->ops->statistics_update(p->mac,
306						MAC_STATS_UPDATE_FULL);
307
308	ns->tx_packets = pstats->TxUnicastFramesOK +
309		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
310
311	ns->rx_packets = pstats->RxUnicastFramesOK +
312		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
313
314	ns->tx_bytes = pstats->TxOctetsOK;
315	ns->rx_bytes = pstats->RxOctetsOK;
316
317	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
318		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
319	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
320		pstats->RxFCSErrors + pstats->RxAlignErrors +
321		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
322		pstats->RxSymbolErrors + pstats->RxRuntErrors;
323
324	ns->multicast  = pstats->RxMulticastFramesOK;
325	ns->collisions = pstats->TxTotalCollisions;
326
327	/* detailed rx_errors */
328	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
329		pstats->RxJabberErrors;
330	ns->rx_over_errors   = 0;
331	ns->rx_crc_errors    = pstats->RxFCSErrors;
332	ns->rx_frame_errors  = pstats->RxAlignErrors;
333	ns->rx_fifo_errors   = 0;
334	ns->rx_missed_errors = 0;
335
336	/* detailed tx_errors */
337	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
338	ns->tx_carrier_errors   = 0;
339	ns->tx_fifo_errors      = pstats->TxUnderrun;
340	ns->tx_heartbeat_errors = 0;
341	ns->tx_window_errors    = pstats->TxLateCollisions;
342	return ns;
343}
344
345static u32 get_msglevel(struct net_device *dev)
346{
347	struct adapter *adapter = dev->ml_priv;
348
349	return adapter->msg_enable;
350}
351
352static void set_msglevel(struct net_device *dev, u32 val)
353{
354	struct adapter *adapter = dev->ml_priv;
355
356	adapter->msg_enable = val;
357}
358
359static char stats_strings[][ETH_GSTRING_LEN] = {
360	"TxOctetsOK",
361	"TxOctetsBad",
362	"TxUnicastFramesOK",
363	"TxMulticastFramesOK",
364	"TxBroadcastFramesOK",
365	"TxPauseFrames",
366	"TxFramesWithDeferredXmissions",
367	"TxLateCollisions",
368	"TxTotalCollisions",
369	"TxFramesAbortedDueToXSCollisions",
370	"TxUnderrun",
371	"TxLengthErrors",
372	"TxInternalMACXmitError",
373	"TxFramesWithExcessiveDeferral",
374	"TxFCSErrors",
375	"TxJumboFramesOk",
376	"TxJumboOctetsOk",
377
378	"RxOctetsOK",
379	"RxOctetsBad",
380	"RxUnicastFramesOK",
381	"RxMulticastFramesOK",
382	"RxBroadcastFramesOK",
383	"RxPauseFrames",
384	"RxFCSErrors",
385	"RxAlignErrors",
386	"RxSymbolErrors",
387	"RxDataErrors",
388	"RxSequenceErrors",
389	"RxRuntErrors",
390	"RxJabberErrors",
391	"RxInternalMACRcvError",
392	"RxInRangeLengthErrors",
393	"RxOutOfRangeLengthField",
394	"RxFrameTooLongErrors",
395	"RxJumboFramesOk",
396	"RxJumboOctetsOk",
397
398	/* Port stats */
399	"RxCsumGood",
400	"TxCsumOffload",
401	"TxTso",
402	"RxVlan",
403	"TxVlan",
404	"TxNeedHeadroom",
405
406	/* Interrupt stats */
407	"rx drops",
408	"pure_rsps",
409	"unhandled irqs",
410	"respQ_empty",
411	"respQ_overflow",
412	"freelistQ_empty",
413	"pkt_too_big",
414	"pkt_mismatch",
415	"cmdQ_full0",
416	"cmdQ_full1",
417
418	"espi_DIP2ParityErr",
419	"espi_DIP4Err",
420	"espi_RxDrops",
421	"espi_TxDrops",
422	"espi_RxOvfl",
423	"espi_ParityErr"
424};
425
426#define T2_REGMAP_SIZE (3 * 1024)
427
428static int get_regs_len(struct net_device *dev)
429{
430	return T2_REGMAP_SIZE;
431}
432
433static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
434{
435	struct adapter *adapter = dev->ml_priv;
436
437	strcpy(info->driver, DRV_NAME);
438	strcpy(info->version, DRV_VERSION);
439	strcpy(info->fw_version, "N/A");
440	strcpy(info->bus_info, pci_name(adapter->pdev));
441}
442
443static int get_sset_count(struct net_device *dev, int sset)
444{
445	switch (sset) {
446	case ETH_SS_STATS:
447		return ARRAY_SIZE(stats_strings);
448	default:
449		return -EOPNOTSUPP;
450	}
451}
452
453static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
454{
455	if (stringset == ETH_SS_STATS)
456		memcpy(data, stats_strings, sizeof(stats_strings));
457}
458
459static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
460		      u64 *data)
461{
462	struct adapter *adapter = dev->ml_priv;
463	struct cmac *mac = adapter->port[dev->if_port].mac;
464	const struct cmac_statistics *s;
465	const struct sge_intr_counts *t;
466	struct sge_port_stats ss;
467
468	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
469	t = t1_sge_get_intr_counts(adapter->sge);
470	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
471
472	*data++ = s->TxOctetsOK;
473	*data++ = s->TxOctetsBad;
474	*data++ = s->TxUnicastFramesOK;
475	*data++ = s->TxMulticastFramesOK;
476	*data++ = s->TxBroadcastFramesOK;
477	*data++ = s->TxPauseFrames;
478	*data++ = s->TxFramesWithDeferredXmissions;
479	*data++ = s->TxLateCollisions;
480	*data++ = s->TxTotalCollisions;
481	*data++ = s->TxFramesAbortedDueToXSCollisions;
482	*data++ = s->TxUnderrun;
483	*data++ = s->TxLengthErrors;
484	*data++ = s->TxInternalMACXmitError;
485	*data++ = s->TxFramesWithExcessiveDeferral;
486	*data++ = s->TxFCSErrors;
487	*data++ = s->TxJumboFramesOK;
488	*data++ = s->TxJumboOctetsOK;
489
490	*data++ = s->RxOctetsOK;
491	*data++ = s->RxOctetsBad;
492	*data++ = s->RxUnicastFramesOK;
493	*data++ = s->RxMulticastFramesOK;
494	*data++ = s->RxBroadcastFramesOK;
495	*data++ = s->RxPauseFrames;
496	*data++ = s->RxFCSErrors;
497	*data++ = s->RxAlignErrors;
498	*data++ = s->RxSymbolErrors;
499	*data++ = s->RxDataErrors;
500	*data++ = s->RxSequenceErrors;
501	*data++ = s->RxRuntErrors;
502	*data++ = s->RxJabberErrors;
503	*data++ = s->RxInternalMACRcvError;
504	*data++ = s->RxInRangeLengthErrors;
505	*data++ = s->RxOutOfRangeLengthField;
506	*data++ = s->RxFrameTooLongErrors;
507	*data++ = s->RxJumboFramesOK;
508	*data++ = s->RxJumboOctetsOK;
509
510	*data++ = ss.rx_cso_good;
511	*data++ = ss.tx_cso;
512	*data++ = ss.tx_tso;
513	*data++ = ss.vlan_xtract;
514	*data++ = ss.vlan_insert;
515	*data++ = ss.tx_need_hdrroom;
516
517	*data++ = t->rx_drops;
518	*data++ = t->pure_rsps;
519	*data++ = t->unhandled_irqs;
520	*data++ = t->respQ_empty;
521	*data++ = t->respQ_overflow;
522	*data++ = t->freelistQ_empty;
523	*data++ = t->pkt_too_big;
524	*data++ = t->pkt_mismatch;
525	*data++ = t->cmdQ_full[0];
526	*data++ = t->cmdQ_full[1];
527
528	if (adapter->espi) {
529		const struct espi_intr_counts *e;
530
531		e = t1_espi_get_intr_counts(adapter->espi);
532		*data++ = e->DIP2_parity_err;
533		*data++ = e->DIP4_err;
534		*data++ = e->rx_drops;
535		*data++ = e->tx_drops;
536		*data++ = e->rx_ovflw;
537		*data++ = e->parity_err;
538	}
539}
540
541static inline void reg_block_dump(struct adapter *ap, void *buf,
542				  unsigned int start, unsigned int end)
543{
544	u32 *p = buf + start;
545
546	for ( ; start <= end; start += sizeof(u32))
547		*p++ = readl(ap->regs + start);
548}
549
550static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
551		     void *buf)
552{
553	struct adapter *ap = dev->ml_priv;
554
555	/*
556	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
557	 */
558	regs->version = 2;
559
560	memset(buf, 0, T2_REGMAP_SIZE);
561	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
562	reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
563	reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
564	reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
565	reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
566	reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
567	reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
568	reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
569	reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
570	reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
571}
572
573static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
574{
575	struct adapter *adapter = dev->ml_priv;
576	struct port_info *p = &adapter->port[dev->if_port];
577
578	cmd->supported = p->link_config.supported;
579	cmd->advertising = p->link_config.advertising;
580
581	if (netif_carrier_ok(dev)) {
582		cmd->speed = p->link_config.speed;
583		cmd->duplex = p->link_config.duplex;
584	} else {
585		cmd->speed = -1;
586		cmd->duplex = -1;
587	}
588
589	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
590	cmd->phy_address = p->phy->mdio.prtad;
591	cmd->transceiver = XCVR_EXTERNAL;
592	cmd->autoneg = p->link_config.autoneg;
593	cmd->maxtxpkt = 0;
594	cmd->maxrxpkt = 0;
595	return 0;
596}
597
598static int speed_duplex_to_caps(int speed, int duplex)
599{
600	int cap = 0;
601
602	switch (speed) {
603	case SPEED_10:
604		if (duplex == DUPLEX_FULL)
605			cap = SUPPORTED_10baseT_Full;
606		else
607			cap = SUPPORTED_10baseT_Half;
608		break;
609	case SPEED_100:
610		if (duplex == DUPLEX_FULL)
611			cap = SUPPORTED_100baseT_Full;
612		else
613			cap = SUPPORTED_100baseT_Half;
614		break;
615	case SPEED_1000:
616		if (duplex == DUPLEX_FULL)
617			cap = SUPPORTED_1000baseT_Full;
618		else
619			cap = SUPPORTED_1000baseT_Half;
620		break;
621	case SPEED_10000:
622		if (duplex == DUPLEX_FULL)
623			cap = SUPPORTED_10000baseT_Full;
624	}
625	return cap;
626}
627
628#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
629		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
630		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
631		      ADVERTISED_10000baseT_Full)
632
633static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
634{
635	struct adapter *adapter = dev->ml_priv;
636	struct port_info *p = &adapter->port[dev->if_port];
637	struct link_config *lc = &p->link_config;
638
639	if (!(lc->supported & SUPPORTED_Autoneg))
640		return -EOPNOTSUPP;             /* can't change speed/duplex */
641
642	if (cmd->autoneg == AUTONEG_DISABLE) {
643		int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
644
645		if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
646			return -EINVAL;
647		lc->requested_speed = cmd->speed;
648		lc->requested_duplex = cmd->duplex;
649		lc->advertising = 0;
650	} else {
651		cmd->advertising &= ADVERTISED_MASK;
652		if (cmd->advertising & (cmd->advertising - 1))
653			cmd->advertising = lc->supported;
654		cmd->advertising &= lc->supported;
655		if (!cmd->advertising)
656			return -EINVAL;
657		lc->requested_speed = SPEED_INVALID;
658		lc->requested_duplex = DUPLEX_INVALID;
659		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
660	}
661	lc->autoneg = cmd->autoneg;
662	if (netif_running(dev))
663		t1_link_start(p->phy, p->mac, lc);
664	return 0;
665}
666
667static void get_pauseparam(struct net_device *dev,
668			   struct ethtool_pauseparam *epause)
669{
670	struct adapter *adapter = dev->ml_priv;
671	struct port_info *p = &adapter->port[dev->if_port];
672
673	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
674	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
675	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
676}
677
678static int set_pauseparam(struct net_device *dev,
679			  struct ethtool_pauseparam *epause)
680{
681	struct adapter *adapter = dev->ml_priv;
682	struct port_info *p = &adapter->port[dev->if_port];
683	struct link_config *lc = &p->link_config;
684
685	if (epause->autoneg == AUTONEG_DISABLE)
686		lc->requested_fc = 0;
687	else if (lc->supported & SUPPORTED_Autoneg)
688		lc->requested_fc = PAUSE_AUTONEG;
689	else
690		return -EINVAL;
691
692	if (epause->rx_pause)
693		lc->requested_fc |= PAUSE_RX;
694	if (epause->tx_pause)
695		lc->requested_fc |= PAUSE_TX;
696	if (lc->autoneg == AUTONEG_ENABLE) {
697		if (netif_running(dev))
698			t1_link_start(p->phy, p->mac, lc);
699	} else {
700		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
701		if (netif_running(dev))
702			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
703							 lc->fc);
704	}
705	return 0;
706}
707
708static u32 get_rx_csum(struct net_device *dev)
709{
710	struct adapter *adapter = dev->ml_priv;
711
712	return (adapter->flags & RX_CSUM_ENABLED) != 0;
713}
714
715static int set_rx_csum(struct net_device *dev, u32 data)
716{
717	struct adapter *adapter = dev->ml_priv;
718
719	if (data)
720		adapter->flags |= RX_CSUM_ENABLED;
721	else
722		adapter->flags &= ~RX_CSUM_ENABLED;
723	return 0;
724}
725
726static int set_tso(struct net_device *dev, u32 value)
727{
728	struct adapter *adapter = dev->ml_priv;
729
730	if (!(adapter->flags & TSO_CAPABLE))
731		return value ? -EOPNOTSUPP : 0;
732	return ethtool_op_set_tso(dev, value);
733}
734
735static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
736{
737	struct adapter *adapter = dev->ml_priv;
738	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
739
740	e->rx_max_pending = MAX_RX_BUFFERS;
741	e->rx_mini_max_pending = 0;
742	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
743	e->tx_max_pending = MAX_CMDQ_ENTRIES;
744
745	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
746	e->rx_mini_pending = 0;
747	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
748	e->tx_pending = adapter->params.sge.cmdQ_size[0];
749}
750
751static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
752{
753	struct adapter *adapter = dev->ml_priv;
754	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
755
756	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
757	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
758	    e->tx_pending > MAX_CMDQ_ENTRIES ||
759	    e->rx_pending < MIN_FL_ENTRIES ||
760	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
761	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
762		return -EINVAL;
763
764	if (adapter->flags & FULL_INIT_DONE)
765		return -EBUSY;
766
767	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
768	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
769	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
770	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
771		MAX_CMDQ1_ENTRIES : e->tx_pending;
772	return 0;
773}
774
775static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
776{
777	struct adapter *adapter = dev->ml_priv;
778
779	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
780	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
781	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
782	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
783	return 0;
784}
785
786static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
787{
788	struct adapter *adapter = dev->ml_priv;
789
790	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
791	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
792	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
793	return 0;
794}
795
796static int get_eeprom_len(struct net_device *dev)
797{
798	struct adapter *adapter = dev->ml_priv;
799
800	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
801}
802
803#define EEPROM_MAGIC(ap) \
804	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
805
806static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
807		      u8 *data)
808{
809	int i;
810	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
811	struct adapter *adapter = dev->ml_priv;
812
813	e->magic = EEPROM_MAGIC(adapter);
814	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
815		t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
816	memcpy(data, buf + e->offset, e->len);
817	return 0;
818}
819
820static const struct ethtool_ops t1_ethtool_ops = {
821	.get_settings      = get_settings,
822	.set_settings      = set_settings,
823	.get_drvinfo       = get_drvinfo,
824	.get_msglevel      = get_msglevel,
825	.set_msglevel      = set_msglevel,
826	.get_ringparam     = get_sge_param,
827	.set_ringparam     = set_sge_param,
828	.get_coalesce      = get_coalesce,
829	.set_coalesce      = set_coalesce,
830	.get_eeprom_len    = get_eeprom_len,
831	.get_eeprom        = get_eeprom,
832	.get_pauseparam    = get_pauseparam,
833	.set_pauseparam    = set_pauseparam,
834	.get_rx_csum       = get_rx_csum,
835	.set_rx_csum       = set_rx_csum,
836	.set_tx_csum       = ethtool_op_set_tx_csum,
837	.set_sg            = ethtool_op_set_sg,
838	.get_link          = ethtool_op_get_link,
839	.get_strings       = get_strings,
840	.get_sset_count	   = get_sset_count,
841	.get_ethtool_stats = get_stats,
842	.get_regs_len      = get_regs_len,
843	.get_regs          = get_regs,
844	.set_tso           = set_tso,
845};
846
847static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
848{
849	struct adapter *adapter = dev->ml_priv;
850	struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
851
852	return mdio_mii_ioctl(mdio, if_mii(req), cmd);
853}
854
855static int t1_change_mtu(struct net_device *dev, int new_mtu)
856{
857	int ret;
858	struct adapter *adapter = dev->ml_priv;
859	struct cmac *mac = adapter->port[dev->if_port].mac;
860
861	if (!mac->ops->set_mtu)
862		return -EOPNOTSUPP;
863	if (new_mtu < 68)
864		return -EINVAL;
865	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
866		return ret;
867	dev->mtu = new_mtu;
868	return 0;
869}
870
871static int t1_set_mac_addr(struct net_device *dev, void *p)
872{
873	struct adapter *adapter = dev->ml_priv;
874	struct cmac *mac = adapter->port[dev->if_port].mac;
875	struct sockaddr *addr = p;
876
877	if (!mac->ops->macaddress_set)
878		return -EOPNOTSUPP;
879
880	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
881	mac->ops->macaddress_set(mac, dev->dev_addr);
882	return 0;
883}
884
885#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
886static void t1_vlan_rx_register(struct net_device *dev,
887				   struct vlan_group *grp)
888{
889	struct adapter *adapter = dev->ml_priv;
890
891	spin_lock_irq(&adapter->async_lock);
892	adapter->vlan_grp = grp;
893	t1_set_vlan_accel(adapter, grp != NULL);
894	spin_unlock_irq(&adapter->async_lock);
895}
896#endif
897
898#ifdef CONFIG_NET_POLL_CONTROLLER
899static void t1_netpoll(struct net_device *dev)
900{
901	unsigned long flags;
902	struct adapter *adapter = dev->ml_priv;
903
904	local_irq_save(flags);
905	t1_interrupt(adapter->pdev->irq, adapter);
906	local_irq_restore(flags);
907}
908#endif
909
910/*
911 * Periodic accumulation of MAC statistics.  This is used only if the MAC
912 * does not have any other way to prevent stats counter overflow.
913 */
914static void mac_stats_task(struct work_struct *work)
915{
916	int i;
917	struct adapter *adapter =
918		container_of(work, struct adapter, stats_update_task.work);
919
920	for_each_port(adapter, i) {
921		struct port_info *p = &adapter->port[i];
922
923		if (netif_running(p->dev))
924			p->mac->ops->statistics_update(p->mac,
925						       MAC_STATS_UPDATE_FAST);
926	}
927
928	/* Schedule the next statistics update if any port is active. */
929	spin_lock(&adapter->work_lock);
930	if (adapter->open_device_map & PORT_MASK)
931		schedule_mac_stats_update(adapter,
932					  adapter->params.stats_update_period);
933	spin_unlock(&adapter->work_lock);
934}
935
936/*
937 * Processes elmer0 external interrupts in process context.
938 */
939static void ext_intr_task(struct work_struct *work)
940{
941	struct adapter *adapter =
942		container_of(work, struct adapter, ext_intr_handler_task);
943
944	t1_elmer0_ext_intr_handler(adapter);
945
946	/* Now reenable external interrupts */
947	spin_lock_irq(&adapter->async_lock);
948	adapter->slow_intr_mask |= F_PL_INTR_EXT;
949	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
950	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
951		   adapter->regs + A_PL_ENABLE);
952	spin_unlock_irq(&adapter->async_lock);
953}
954
955/*
956 * Interrupt-context handler for elmer0 external interrupts.
957 */
958void t1_elmer0_ext_intr(struct adapter *adapter)
959{
960	/*
961	 * Schedule a task to handle external interrupts as we require
962	 * a process context.  We disable EXT interrupts in the interim
963	 * and let the task reenable them when it's done.
964	 */
965	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
966	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
967		   adapter->regs + A_PL_ENABLE);
968	schedule_work(&adapter->ext_intr_handler_task);
969}
970
971void t1_fatal_err(struct adapter *adapter)
972{
973	if (adapter->flags & FULL_INIT_DONE) {
974		t1_sge_stop(adapter->sge);
975		t1_interrupts_disable(adapter);
976	}
977	pr_alert("%s: encountered fatal error, operation suspended\n",
978		 adapter->name);
979}
980
981static const struct net_device_ops cxgb_netdev_ops = {
982	.ndo_open		= cxgb_open,
983	.ndo_stop		= cxgb_close,
984	.ndo_start_xmit		= t1_start_xmit,
985	.ndo_get_stats		= t1_get_stats,
986	.ndo_validate_addr	= eth_validate_addr,
987	.ndo_set_multicast_list	= t1_set_rxmode,
988	.ndo_do_ioctl		= t1_ioctl,
989	.ndo_change_mtu		= t1_change_mtu,
990	.ndo_set_mac_address	= t1_set_mac_addr,
991#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
992	.ndo_vlan_rx_register	= t1_vlan_rx_register,
993#endif
994#ifdef CONFIG_NET_POLL_CONTROLLER
995	.ndo_poll_controller	= t1_netpoll,
996#endif
997};
998
999static int __devinit init_one(struct pci_dev *pdev,
1000			      const struct pci_device_id *ent)
1001{
1002	static int version_printed;
1003
1004	int i, err, pci_using_dac = 0;
1005	unsigned long mmio_start, mmio_len;
1006	const struct board_info *bi;
1007	struct adapter *adapter = NULL;
1008	struct port_info *pi;
1009
1010	if (!version_printed) {
1011		printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
1012		       DRV_VERSION);
1013		++version_printed;
1014	}
1015
1016	err = pci_enable_device(pdev);
1017	if (err)
1018		return err;
1019
1020	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1021		pr_err("%s: cannot find PCI device memory base address\n",
1022		       pci_name(pdev));
1023		err = -ENODEV;
1024		goto out_disable_pdev;
1025	}
1026
1027	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1028		pci_using_dac = 1;
1029
1030		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
1031			pr_err("%s: unable to obtain 64-bit DMA for "
1032			       "consistent allocations\n", pci_name(pdev));
1033			err = -ENODEV;
1034			goto out_disable_pdev;
1035		}
1036
1037	} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
1038		pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1039		goto out_disable_pdev;
1040	}
1041
1042	err = pci_request_regions(pdev, DRV_NAME);
1043	if (err) {
1044		pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1045		goto out_disable_pdev;
1046	}
1047
1048	pci_set_master(pdev);
1049
1050	mmio_start = pci_resource_start(pdev, 0);
1051	mmio_len = pci_resource_len(pdev, 0);
1052	bi = t1_get_board_info(ent->driver_data);
1053
1054	for (i = 0; i < bi->port_number; ++i) {
1055		struct net_device *netdev;
1056
1057		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1058		if (!netdev) {
1059			err = -ENOMEM;
1060			goto out_free_dev;
1061		}
1062
1063		SET_NETDEV_DEV(netdev, &pdev->dev);
1064
1065		if (!adapter) {
1066			adapter = netdev_priv(netdev);
1067			adapter->pdev = pdev;
1068			adapter->port[0].dev = netdev;  /* so we don't leak it */
1069
1070			adapter->regs = ioremap(mmio_start, mmio_len);
1071			if (!adapter->regs) {
1072				pr_err("%s: cannot map device registers\n",
1073				       pci_name(pdev));
1074				err = -ENOMEM;
1075				goto out_free_dev;
1076			}
1077
1078			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1079				err = -ENODEV;	  /* Can't handle this chip rev */
1080				goto out_free_dev;
1081			}
1082
1083			adapter->name = pci_name(pdev);
1084			adapter->msg_enable = dflt_msg_enable;
1085			adapter->mmio_len = mmio_len;
1086
1087			spin_lock_init(&adapter->tpi_lock);
1088			spin_lock_init(&adapter->work_lock);
1089			spin_lock_init(&adapter->async_lock);
1090			spin_lock_init(&adapter->mac_lock);
1091
1092			INIT_WORK(&adapter->ext_intr_handler_task,
1093				  ext_intr_task);
1094			INIT_DELAYED_WORK(&adapter->stats_update_task,
1095					  mac_stats_task);
1096
1097			pci_set_drvdata(pdev, netdev);
1098		}
1099
1100		pi = &adapter->port[i];
1101		pi->dev = netdev;
1102		netif_carrier_off(netdev);
1103		netdev->irq = pdev->irq;
1104		netdev->if_port = i;
1105		netdev->mem_start = mmio_start;
1106		netdev->mem_end = mmio_start + mmio_len - 1;
1107		netdev->ml_priv = adapter;
1108		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1109		netdev->features |= NETIF_F_LLTX;
1110
1111		adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1112		if (pci_using_dac)
1113			netdev->features |= NETIF_F_HIGHDMA;
1114		if (vlan_tso_capable(adapter)) {
1115#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1116			adapter->flags |= VLAN_ACCEL_CAPABLE;
1117			netdev->features |=
1118				NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1119#endif
1120
1121			/* T204: disable TSO */
1122			if (!(is_T2(adapter)) || bi->port_number != 4) {
1123				adapter->flags |= TSO_CAPABLE;
1124				netdev->features |= NETIF_F_TSO;
1125			}
1126		}
1127
1128		netdev->netdev_ops = &cxgb_netdev_ops;
1129		netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1130			sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1131
1132		netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1133
1134		SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1135	}
1136
1137	if (t1_init_sw_modules(adapter, bi) < 0) {
1138		err = -ENODEV;
1139		goto out_free_dev;
1140	}
1141
1142	/*
1143	 * The card is now ready to go.  If any errors occur during device
1144	 * registration we do not fail the whole card but rather proceed only
1145	 * with the ports we manage to register successfully.  However we must
1146	 * register at least one net device.
1147	 */
1148	for (i = 0; i < bi->port_number; ++i) {
1149		err = register_netdev(adapter->port[i].dev);
1150		if (err)
1151			pr_warning("%s: cannot register net device %s, skipping\n",
1152				   pci_name(pdev), adapter->port[i].dev->name);
1153		else {
1154			/*
1155			 * Change the name we use for messages to the name of
1156			 * the first successfully registered interface.
1157			 */
1158			if (!adapter->registered_device_map)
1159				adapter->name = adapter->port[i].dev->name;
1160
1161			__set_bit(i, &adapter->registered_device_map);
1162		}
1163	}
1164	if (!adapter->registered_device_map) {
1165		pr_err("%s: could not register any net devices\n",
1166		       pci_name(pdev));
1167		goto out_release_adapter_res;
1168	}
1169
1170	printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1171	       bi->desc, adapter->params.chip_revision,
1172	       adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1173	       adapter->params.pci.speed, adapter->params.pci.width);
1174
1175	/*
1176	 * Set the T1B ASIC and memory clocks.
1177	 */
1178	if (t1powersave)
1179		adapter->t1powersave = LCLOCK;	/* HW default is powersave mode. */
1180	else
1181		adapter->t1powersave = HCLOCK;
1182	if (t1_is_T1B(adapter))
1183		t1_clock(adapter, t1powersave);
1184
1185	return 0;
1186
1187out_release_adapter_res:
1188	t1_free_sw_modules(adapter);
1189out_free_dev:
1190	if (adapter) {
1191		if (adapter->regs)
1192			iounmap(adapter->regs);
1193		for (i = bi->port_number - 1; i >= 0; --i)
1194			if (adapter->port[i].dev)
1195				free_netdev(adapter->port[i].dev);
1196	}
1197	pci_release_regions(pdev);
1198out_disable_pdev:
1199	pci_disable_device(pdev);
1200	pci_set_drvdata(pdev, NULL);
1201	return err;
1202}
1203
1204static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1205{
1206	int data;
1207	int i;
1208	u32 val;
1209
1210	enum {
1211		S_CLOCK = 1 << 3,
1212		S_DATA = 1 << 4
1213	};
1214
1215	for (i = (nbits - 1); i > -1; i--) {
1216
1217		udelay(50);
1218
1219		data = ((bitdata >> i) & 0x1);
1220		__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1221
1222		if (data)
1223			val |= S_DATA;
1224		else
1225			val &= ~S_DATA;
1226
1227		udelay(50);
1228
1229		/* Set SCLOCK low */
1230		val &= ~S_CLOCK;
1231		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1232
1233		udelay(50);
1234
1235		/* Write SCLOCK high */
1236		val |= S_CLOCK;
1237		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1238
1239	}
1240}
1241
1242static int t1_clock(struct adapter *adapter, int mode)
1243{
1244	u32 val;
1245	int M_CORE_VAL;
1246	int M_MEM_VAL;
1247
1248	enum {
1249		M_CORE_BITS	= 9,
1250		T_CORE_VAL	= 0,
1251		T_CORE_BITS	= 2,
1252		N_CORE_VAL	= 0,
1253		N_CORE_BITS	= 2,
1254		M_MEM_BITS	= 9,
1255		T_MEM_VAL	= 0,
1256		T_MEM_BITS	= 2,
1257		N_MEM_VAL	= 0,
1258		N_MEM_BITS	= 2,
1259		NP_LOAD		= 1 << 17,
1260		S_LOAD_MEM	= 1 << 5,
1261		S_LOAD_CORE	= 1 << 6,
1262		S_CLOCK		= 1 << 3
1263	};
1264
1265	if (!t1_is_T1B(adapter))
1266		return -ENODEV;	/* Can't re-clock this chip. */
1267
1268	if (mode & 2)
1269		return 0;	/* show current mode. */
1270
1271	if ((adapter->t1powersave & 1) == (mode & 1))
1272		return -EALREADY;	/* ASIC already running in mode. */
1273
1274	if ((mode & 1) == HCLOCK) {
1275		M_CORE_VAL = 0x14;
1276		M_MEM_VAL = 0x18;
1277		adapter->t1powersave = HCLOCK;	/* overclock */
1278	} else {
1279		M_CORE_VAL = 0xe;
1280		M_MEM_VAL = 0x10;
1281		adapter->t1powersave = LCLOCK;	/* underclock */
1282	}
1283
1284	/* Don't interrupt this serial stream! */
1285	spin_lock(&adapter->tpi_lock);
1286
1287	/* Initialize for ASIC core */
1288	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1289	val |= NP_LOAD;
1290	udelay(50);
1291	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1292	udelay(50);
1293	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1294	val &= ~S_LOAD_CORE;
1295	val &= ~S_CLOCK;
1296	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1297	udelay(50);
1298
1299	/* Serial program the ASIC clock synthesizer */
1300	bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1301	bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1302	bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1303	udelay(50);
1304
1305	/* Finish ASIC core */
1306	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1307	val |= S_LOAD_CORE;
1308	udelay(50);
1309	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1310	udelay(50);
1311	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1312	val &= ~S_LOAD_CORE;
1313	udelay(50);
1314	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1315	udelay(50);
1316
1317	/* Initialize for memory */
1318	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1319	val |= NP_LOAD;
1320	udelay(50);
1321	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1322	udelay(50);
1323	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1324	val &= ~S_LOAD_MEM;
1325	val &= ~S_CLOCK;
1326	udelay(50);
1327	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1328	udelay(50);
1329
1330	/* Serial program the memory clock synthesizer */
1331	bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1332	bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1333	bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1334	udelay(50);
1335
1336	/* Finish memory */
1337	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1338	val |= S_LOAD_MEM;
1339	udelay(50);
1340	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1341	udelay(50);
1342	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1343	val &= ~S_LOAD_MEM;
1344	udelay(50);
1345	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1346
1347	spin_unlock(&adapter->tpi_lock);
1348
1349	return 0;
1350}
1351
1352static inline void t1_sw_reset(struct pci_dev *pdev)
1353{
1354	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1355	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1356}
1357
1358static void __devexit remove_one(struct pci_dev *pdev)
1359{
1360	struct net_device *dev = pci_get_drvdata(pdev);
1361	struct adapter *adapter = dev->ml_priv;
1362	int i;
1363
1364	for_each_port(adapter, i) {
1365		if (test_bit(i, &adapter->registered_device_map))
1366			unregister_netdev(adapter->port[i].dev);
1367	}
1368
1369	t1_free_sw_modules(adapter);
1370	iounmap(adapter->regs);
1371
1372	while (--i >= 0) {
1373		if (adapter->port[i].dev)
1374			free_netdev(adapter->port[i].dev);
1375	}
1376
1377	pci_release_regions(pdev);
1378	pci_disable_device(pdev);
1379	pci_set_drvdata(pdev, NULL);
1380	t1_sw_reset(pdev);
1381}
1382
1383static struct pci_driver driver = {
1384	.name     = DRV_NAME,
1385	.id_table = t1_pci_tbl,
1386	.probe    = init_one,
1387	.remove   = __devexit_p(remove_one),
1388};
1389
1390static int __init t1_init_module(void)
1391{
1392	return pci_register_driver(&driver);
1393}
1394
1395static void __exit t1_cleanup_module(void)
1396{
1397	pci_unregister_driver(&driver);
1398}
1399
1400module_init(t1_init_module);
1401module_exit(t1_cleanup_module);
1402