1/*****************************************************************************
2 *                                                                           *
3 * File: cxgb2.c                                                             *
4 * $Revision: 1.1.1.1 $                                                         *
5 * $Date: 2007/08/03 18:52:46 $                                              *
6 * Description:                                                              *
7 *  Chelsio 10Gb Ethernet Driver.                                            *
8 *                                                                           *
9 * This program is free software; you can redistribute it and/or modify      *
10 * it under the terms of the GNU General Public License, version 2, as       *
11 * published by the Free Software Foundation.                                *
12 *                                                                           *
13 * You should have received a copy of the GNU General Public License along   *
14 * with this program; if not, write to the Free Software Foundation, Inc.,   *
15 * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
16 *                                                                           *
17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
20 *                                                                           *
21 * http://www.chelsio.com                                                    *
22 *                                                                           *
23 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
24 * All rights reserved.                                                      *
25 *                                                                           *
26 * Maintainers: maintainers@chelsio.com                                      *
27 *                                                                           *
28 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
29 *          Tina Yang               <tainay@chelsio.com>                     *
30 *          Felix Marti             <felix@chelsio.com>                      *
31 *          Scott Bardone           <sbardone@chelsio.com>                   *
32 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
33 *          Frank DiMambro          <frank@chelsio.com>                      *
34 *                                                                           *
35 * History:                                                                  *
36 *                                                                           *
37 ****************************************************************************/
38
39#include "common.h"
40#include <linux/module.h>
41#include <linux/init.h>
42#include <linux/pci.h>
43#include <linux/netdevice.h>
44#include <linux/etherdevice.h>
45#include <linux/if_vlan.h>
46#include <linux/mii.h>
47#include <linux/sockios.h>
48#include <linux/dma-mapping.h>
49#include <asm/uaccess.h>
50
51#include "cpl5_cmd.h"
52#include "regs.h"
53#include "gmac.h"
54#include "cphy.h"
55#include "sge.h"
56#include "tp.h"
57#include "espi.h"
58#include "elmer0.h"
59
60#include <linux/workqueue.h>
61
62static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
63{
64	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
65}
66
67static inline void cancel_mac_stats_update(struct adapter *ap)
68{
69	cancel_delayed_work(&ap->stats_update_task);
70}
71
72#define MAX_CMDQ_ENTRIES	16384
73#define MAX_CMDQ1_ENTRIES	1024
74#define MAX_RX_BUFFERS		16384
75#define MAX_RX_JUMBO_BUFFERS	16384
76#define MAX_TX_BUFFERS_HIGH	16384U
77#define MAX_TX_BUFFERS_LOW	1536U
78#define MAX_TX_BUFFERS		1460U
79#define MIN_FL_ENTRIES		32
80
81#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
82			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
83			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
84
85/*
86 * The EEPROM is actually bigger but only the first few bytes are used so we
87 * only report those.
88 */
89#define EEPROM_SIZE 32
90
91MODULE_DESCRIPTION(DRV_DESCRIPTION);
92MODULE_AUTHOR("Chelsio Communications");
93MODULE_LICENSE("GPL");
94
95static int dflt_msg_enable = DFLT_MSG_ENABLE;
96
97module_param(dflt_msg_enable, int, 0);
98MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
99
100#define HCLOCK 0x0
101#define LCLOCK 0x1
102
103/* T1 cards powersave mode */
104static int t1_clock(struct adapter *adapter, int mode);
105static int t1powersave = 1;	/* HW default is powersave mode. */
106
107module_param(t1powersave, int, 0);
108MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
109
110static int disable_msi = 0;
111module_param(disable_msi, int, 0);
112MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
113
114static const char pci_speed[][4] = {
115	"33", "66", "100", "133"
116};
117
118/*
119 * Setup MAC to receive the types of packets we want.
120 */
121static void t1_set_rxmode(struct net_device *dev)
122{
123	struct adapter *adapter = dev->priv;
124	struct cmac *mac = adapter->port[dev->if_port].mac;
125	struct t1_rx_mode rm;
126
127	rm.dev = dev;
128	rm.idx = 0;
129	rm.list = dev->mc_list;
130	mac->ops->set_rx_mode(mac, &rm);
131}
132
133static void link_report(struct port_info *p)
134{
135	if (!netif_carrier_ok(p->dev))
136		printk(KERN_INFO "%s: link down\n", p->dev->name);
137	else {
138		const char *s = "10Mbps";
139
140		switch (p->link_config.speed) {
141			case SPEED_10000: s = "10Gbps"; break;
142			case SPEED_1000:  s = "1000Mbps"; break;
143			case SPEED_100:   s = "100Mbps"; break;
144		}
145
146		printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
147		       p->dev->name, s,
148		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149	}
150}
151
152void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
153			int speed, int duplex, int pause)
154{
155	struct port_info *p = &adapter->port[port_id];
156
157	if (link_stat != netif_carrier_ok(p->dev)) {
158		if (link_stat)
159			netif_carrier_on(p->dev);
160		else
161			netif_carrier_off(p->dev);
162		link_report(p);
163
164		/* multi-ports: inform toe */
165		if ((speed > 0) && (adapter->params.nports > 1)) {
166			unsigned int sched_speed = 10;
167			switch (speed) {
168			case SPEED_1000:
169				sched_speed = 1000;
170				break;
171			case SPEED_100:
172				sched_speed = 100;
173				break;
174			case SPEED_10:
175				sched_speed = 10;
176				break;
177			}
178			t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
179		}
180	}
181}
182
183static void link_start(struct port_info *p)
184{
185	struct cmac *mac = p->mac;
186
187	mac->ops->reset(mac);
188	if (mac->ops->macaddress_set)
189		mac->ops->macaddress_set(mac, p->dev->dev_addr);
190	t1_set_rxmode(p->dev);
191	t1_link_start(p->phy, mac, &p->link_config);
192	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
193}
194
195static void enable_hw_csum(struct adapter *adapter)
196{
197	if (adapter->flags & TSO_CAPABLE)
198		t1_tp_set_ip_checksum_offload(adapter->tp, 1);	/* for TSO only */
199	if (adapter->flags & UDP_CSUM_CAPABLE)
200		t1_tp_set_udp_checksum_offload(adapter->tp, 1);
201	t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
202}
203
204/*
205 * Things to do upon first use of a card.
206 * This must run with the rtnl lock held.
207 */
208static int cxgb_up(struct adapter *adapter)
209{
210	int err = 0;
211
212	if (!(adapter->flags & FULL_INIT_DONE)) {
213		err = t1_init_hw_modules(adapter);
214		if (err)
215			goto out_err;
216
217		enable_hw_csum(adapter);
218		adapter->flags |= FULL_INIT_DONE;
219	}
220
221	t1_interrupts_clear(adapter);
222
223	adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
224	err = request_irq(adapter->pdev->irq, t1_interrupt,
225			  adapter->params.has_msi ? 0 : IRQF_SHARED,
226			  adapter->name, adapter);
227	if (err) {
228		if (adapter->params.has_msi)
229			pci_disable_msi(adapter->pdev);
230
231		goto out_err;
232	}
233
234	t1_sge_start(adapter->sge);
235	t1_interrupts_enable(adapter);
236out_err:
237	return err;
238}
239
240/*
241 * Release resources when all the ports have been stopped.
242 */
243static void cxgb_down(struct adapter *adapter)
244{
245	t1_sge_stop(adapter->sge);
246	t1_interrupts_disable(adapter);
247	free_irq(adapter->pdev->irq, adapter);
248	if (adapter->params.has_msi)
249		pci_disable_msi(adapter->pdev);
250}
251
252static int cxgb_open(struct net_device *dev)
253{
254	int err;
255	struct adapter *adapter = dev->priv;
256	int other_ports = adapter->open_device_map & PORT_MASK;
257
258	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
259		return err;
260
261	__set_bit(dev->if_port, &adapter->open_device_map);
262	link_start(&adapter->port[dev->if_port]);
263	netif_start_queue(dev);
264	if (!other_ports && adapter->params.stats_update_period)
265		schedule_mac_stats_update(adapter,
266					  adapter->params.stats_update_period);
267	return 0;
268}
269
270static int cxgb_close(struct net_device *dev)
271{
272	struct adapter *adapter = dev->priv;
273	struct port_info *p = &adapter->port[dev->if_port];
274	struct cmac *mac = p->mac;
275
276	netif_stop_queue(dev);
277	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
278	netif_carrier_off(dev);
279
280	clear_bit(dev->if_port, &adapter->open_device_map);
281	if (adapter->params.stats_update_period &&
282	    !(adapter->open_device_map & PORT_MASK)) {
283		/* Stop statistics accumulation. */
284		smp_mb__after_clear_bit();
285		spin_lock(&adapter->work_lock);   /* sync with update task */
286		spin_unlock(&adapter->work_lock);
287		cancel_mac_stats_update(adapter);
288	}
289
290	if (!adapter->open_device_map)
291		cxgb_down(adapter);
292	return 0;
293}
294
295static struct net_device_stats *t1_get_stats(struct net_device *dev)
296{
297	struct adapter *adapter = dev->priv;
298	struct port_info *p = &adapter->port[dev->if_port];
299	struct net_device_stats *ns = &p->netstats;
300	const struct cmac_statistics *pstats;
301
302	/* Do a full update of the MAC stats */
303	pstats = p->mac->ops->statistics_update(p->mac,
304						MAC_STATS_UPDATE_FULL);
305
306	ns->tx_packets = pstats->TxUnicastFramesOK +
307		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
308
309	ns->rx_packets = pstats->RxUnicastFramesOK +
310		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
311
312	ns->tx_bytes = pstats->TxOctetsOK;
313	ns->rx_bytes = pstats->RxOctetsOK;
314
315	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
316		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
317	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
318		pstats->RxFCSErrors + pstats->RxAlignErrors +
319		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
320		pstats->RxSymbolErrors + pstats->RxRuntErrors;
321
322	ns->multicast  = pstats->RxMulticastFramesOK;
323	ns->collisions = pstats->TxTotalCollisions;
324
325	/* detailed rx_errors */
326	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
327		pstats->RxJabberErrors;
328	ns->rx_over_errors   = 0;
329	ns->rx_crc_errors    = pstats->RxFCSErrors;
330	ns->rx_frame_errors  = pstats->RxAlignErrors;
331	ns->rx_fifo_errors   = 0;
332	ns->rx_missed_errors = 0;
333
334	/* detailed tx_errors */
335	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
336	ns->tx_carrier_errors   = 0;
337	ns->tx_fifo_errors      = pstats->TxUnderrun;
338	ns->tx_heartbeat_errors = 0;
339	ns->tx_window_errors    = pstats->TxLateCollisions;
340	return ns;
341}
342
343static u32 get_msglevel(struct net_device *dev)
344{
345	struct adapter *adapter = dev->priv;
346
347	return adapter->msg_enable;
348}
349
350static void set_msglevel(struct net_device *dev, u32 val)
351{
352	struct adapter *adapter = dev->priv;
353
354	adapter->msg_enable = val;
355}
356
357static char stats_strings[][ETH_GSTRING_LEN] = {
358	"TxOctetsOK",
359	"TxOctetsBad",
360	"TxUnicastFramesOK",
361	"TxMulticastFramesOK",
362	"TxBroadcastFramesOK",
363	"TxPauseFrames",
364	"TxFramesWithDeferredXmissions",
365	"TxLateCollisions",
366	"TxTotalCollisions",
367	"TxFramesAbortedDueToXSCollisions",
368	"TxUnderrun",
369	"TxLengthErrors",
370	"TxInternalMACXmitError",
371	"TxFramesWithExcessiveDeferral",
372	"TxFCSErrors",
373
374	"RxOctetsOK",
375	"RxOctetsBad",
376	"RxUnicastFramesOK",
377	"RxMulticastFramesOK",
378	"RxBroadcastFramesOK",
379	"RxPauseFrames",
380	"RxFCSErrors",
381	"RxAlignErrors",
382	"RxSymbolErrors",
383	"RxDataErrors",
384	"RxSequenceErrors",
385	"RxRuntErrors",
386	"RxJabberErrors",
387	"RxInternalMACRcvError",
388	"RxInRangeLengthErrors",
389	"RxOutOfRangeLengthField",
390	"RxFrameTooLongErrors",
391
392	/* Port stats */
393	"RxPackets",
394	"RxCsumGood",
395	"TxPackets",
396	"TxCsumOffload",
397	"TxTso",
398	"RxVlan",
399	"TxVlan",
400
401	/* Interrupt stats */
402	"rx drops",
403	"pure_rsps",
404	"unhandled irqs",
405	"respQ_empty",
406	"respQ_overflow",
407	"freelistQ_empty",
408	"pkt_too_big",
409	"pkt_mismatch",
410	"cmdQ_full0",
411	"cmdQ_full1",
412
413	"espi_DIP2ParityErr",
414	"espi_DIP4Err",
415	"espi_RxDrops",
416	"espi_TxDrops",
417	"espi_RxOvfl",
418	"espi_ParityErr"
419};
420
421#define T2_REGMAP_SIZE (3 * 1024)
422
423static int get_regs_len(struct net_device *dev)
424{
425	return T2_REGMAP_SIZE;
426}
427
428static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
429{
430	struct adapter *adapter = dev->priv;
431
432	strcpy(info->driver, DRV_NAME);
433	strcpy(info->version, DRV_VERSION);
434	strcpy(info->fw_version, "N/A");
435	strcpy(info->bus_info, pci_name(adapter->pdev));
436}
437
438static int get_stats_count(struct net_device *dev)
439{
440	return ARRAY_SIZE(stats_strings);
441}
442
443static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
444{
445	if (stringset == ETH_SS_STATS)
446		memcpy(data, stats_strings, sizeof(stats_strings));
447}
448
449static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
450		      u64 *data)
451{
452	struct adapter *adapter = dev->priv;
453	struct cmac *mac = adapter->port[dev->if_port].mac;
454	const struct cmac_statistics *s;
455	const struct sge_intr_counts *t;
456	struct sge_port_stats ss;
457	unsigned int len;
458
459	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
460
461	len = sizeof(u64)*(&s->TxFCSErrors + 1 - &s->TxOctetsOK);
462	memcpy(data, &s->TxOctetsOK, len);
463	data += len;
464
465	len = sizeof(u64)*(&s->RxFrameTooLongErrors + 1 - &s->RxOctetsOK);
466	memcpy(data, &s->RxOctetsOK, len);
467	data += len;
468
469	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
470	memcpy(data, &ss, sizeof(ss));
471	data += sizeof(ss);
472
473	t = t1_sge_get_intr_counts(adapter->sge);
474	*data++ = t->rx_drops;
475	*data++ = t->pure_rsps;
476	*data++ = t->unhandled_irqs;
477	*data++ = t->respQ_empty;
478	*data++ = t->respQ_overflow;
479	*data++ = t->freelistQ_empty;
480	*data++ = t->pkt_too_big;
481	*data++ = t->pkt_mismatch;
482	*data++ = t->cmdQ_full[0];
483	*data++ = t->cmdQ_full[1];
484
485	if (adapter->espi) {
486		const struct espi_intr_counts *e;
487
488		e = t1_espi_get_intr_counts(adapter->espi);
489		*data++ = e->DIP2_parity_err;
490		*data++ = e->DIP4_err;
491		*data++ = e->rx_drops;
492		*data++ = e->tx_drops;
493		*data++ = e->rx_ovflw;
494		*data++ = e->parity_err;
495	}
496}
497
498static inline void reg_block_dump(struct adapter *ap, void *buf,
499				  unsigned int start, unsigned int end)
500{
501	u32 *p = buf + start;
502
503	for ( ; start <= end; start += sizeof(u32))
504		*p++ = readl(ap->regs + start);
505}
506
507static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
508		     void *buf)
509{
510	struct adapter *ap = dev->priv;
511
512	/*
513	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
514	 */
515	regs->version = 2;
516
517	memset(buf, 0, T2_REGMAP_SIZE);
518	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
519	reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
520	reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
521	reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
522	reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
523	reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
524	reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
525	reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
526	reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
527	reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
528}
529
530static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
531{
532	struct adapter *adapter = dev->priv;
533	struct port_info *p = &adapter->port[dev->if_port];
534
535	cmd->supported = p->link_config.supported;
536	cmd->advertising = p->link_config.advertising;
537
538	if (netif_carrier_ok(dev)) {
539		cmd->speed = p->link_config.speed;
540		cmd->duplex = p->link_config.duplex;
541	} else {
542		cmd->speed = -1;
543		cmd->duplex = -1;
544	}
545
546	cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
547	cmd->phy_address = p->phy->addr;
548	cmd->transceiver = XCVR_EXTERNAL;
549	cmd->autoneg = p->link_config.autoneg;
550	cmd->maxtxpkt = 0;
551	cmd->maxrxpkt = 0;
552	return 0;
553}
554
555static int speed_duplex_to_caps(int speed, int duplex)
556{
557	int cap = 0;
558
559	switch (speed) {
560	case SPEED_10:
561		if (duplex == DUPLEX_FULL)
562			cap = SUPPORTED_10baseT_Full;
563		else
564			cap = SUPPORTED_10baseT_Half;
565		break;
566	case SPEED_100:
567		if (duplex == DUPLEX_FULL)
568			cap = SUPPORTED_100baseT_Full;
569		else
570			cap = SUPPORTED_100baseT_Half;
571		break;
572	case SPEED_1000:
573		if (duplex == DUPLEX_FULL)
574			cap = SUPPORTED_1000baseT_Full;
575		else
576			cap = SUPPORTED_1000baseT_Half;
577		break;
578	case SPEED_10000:
579		if (duplex == DUPLEX_FULL)
580			cap = SUPPORTED_10000baseT_Full;
581	}
582	return cap;
583}
584
585#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
586		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
587		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
588		      ADVERTISED_10000baseT_Full)
589
590static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
591{
592	struct adapter *adapter = dev->priv;
593	struct port_info *p = &adapter->port[dev->if_port];
594	struct link_config *lc = &p->link_config;
595
596	if (!(lc->supported & SUPPORTED_Autoneg))
597		return -EOPNOTSUPP;             /* can't change speed/duplex */
598
599	if (cmd->autoneg == AUTONEG_DISABLE) {
600		int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
601
602		if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
603			return -EINVAL;
604		lc->requested_speed = cmd->speed;
605		lc->requested_duplex = cmd->duplex;
606		lc->advertising = 0;
607	} else {
608		cmd->advertising &= ADVERTISED_MASK;
609		if (cmd->advertising & (cmd->advertising - 1))
610			cmd->advertising = lc->supported;
611		cmd->advertising &= lc->supported;
612		if (!cmd->advertising)
613			return -EINVAL;
614		lc->requested_speed = SPEED_INVALID;
615		lc->requested_duplex = DUPLEX_INVALID;
616		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
617	}
618	lc->autoneg = cmd->autoneg;
619	if (netif_running(dev))
620		t1_link_start(p->phy, p->mac, lc);
621	return 0;
622}
623
624static void get_pauseparam(struct net_device *dev,
625			   struct ethtool_pauseparam *epause)
626{
627	struct adapter *adapter = dev->priv;
628	struct port_info *p = &adapter->port[dev->if_port];
629
630	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
631	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
632	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
633}
634
635static int set_pauseparam(struct net_device *dev,
636			  struct ethtool_pauseparam *epause)
637{
638	struct adapter *adapter = dev->priv;
639	struct port_info *p = &adapter->port[dev->if_port];
640	struct link_config *lc = &p->link_config;
641
642	if (epause->autoneg == AUTONEG_DISABLE)
643		lc->requested_fc = 0;
644	else if (lc->supported & SUPPORTED_Autoneg)
645		lc->requested_fc = PAUSE_AUTONEG;
646	else
647		return -EINVAL;
648
649	if (epause->rx_pause)
650		lc->requested_fc |= PAUSE_RX;
651	if (epause->tx_pause)
652		lc->requested_fc |= PAUSE_TX;
653	if (lc->autoneg == AUTONEG_ENABLE) {
654		if (netif_running(dev))
655			t1_link_start(p->phy, p->mac, lc);
656	} else {
657		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
658		if (netif_running(dev))
659			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
660							 lc->fc);
661	}
662	return 0;
663}
664
665static u32 get_rx_csum(struct net_device *dev)
666{
667	struct adapter *adapter = dev->priv;
668
669	return (adapter->flags & RX_CSUM_ENABLED) != 0;
670}
671
672static int set_rx_csum(struct net_device *dev, u32 data)
673{
674	struct adapter *adapter = dev->priv;
675
676	if (data)
677		adapter->flags |= RX_CSUM_ENABLED;
678	else
679		adapter->flags &= ~RX_CSUM_ENABLED;
680	return 0;
681}
682
683static int set_tso(struct net_device *dev, u32 value)
684{
685	struct adapter *adapter = dev->priv;
686
687	if (!(adapter->flags & TSO_CAPABLE))
688		return value ? -EOPNOTSUPP : 0;
689	return ethtool_op_set_tso(dev, value);
690}
691
692static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
693{
694	struct adapter *adapter = dev->priv;
695	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
696
697	e->rx_max_pending = MAX_RX_BUFFERS;
698	e->rx_mini_max_pending = 0;
699	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
700	e->tx_max_pending = MAX_CMDQ_ENTRIES;
701
702	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
703	e->rx_mini_pending = 0;
704	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
705	e->tx_pending = adapter->params.sge.cmdQ_size[0];
706}
707
708static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
709{
710	struct adapter *adapter = dev->priv;
711	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
712
713	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
714	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
715	    e->tx_pending > MAX_CMDQ_ENTRIES ||
716	    e->rx_pending < MIN_FL_ENTRIES ||
717	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
718	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
719		return -EINVAL;
720
721	if (adapter->flags & FULL_INIT_DONE)
722		return -EBUSY;
723
724	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
725	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
726	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
727	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
728		MAX_CMDQ1_ENTRIES : e->tx_pending;
729	return 0;
730}
731
732static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
733{
734	struct adapter *adapter = dev->priv;
735
736	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
737	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
738	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
739	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
740	return 0;
741}
742
743static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
744{
745	struct adapter *adapter = dev->priv;
746
747	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
748	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
749	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
750	return 0;
751}
752
753static int get_eeprom_len(struct net_device *dev)
754{
755	struct adapter *adapter = dev->priv;
756
757	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
758}
759
760#define EEPROM_MAGIC(ap) \
761	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
762
763static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
764		      u8 *data)
765{
766	int i;
767	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
768	struct adapter *adapter = dev->priv;
769
770	e->magic = EEPROM_MAGIC(adapter);
771	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
772		t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
773	memcpy(data, buf + e->offset, e->len);
774	return 0;
775}
776
777static const struct ethtool_ops t1_ethtool_ops = {
778	.get_settings      = get_settings,
779	.set_settings      = set_settings,
780	.get_drvinfo       = get_drvinfo,
781	.get_msglevel      = get_msglevel,
782	.set_msglevel      = set_msglevel,
783	.get_ringparam     = get_sge_param,
784	.set_ringparam     = set_sge_param,
785	.get_coalesce      = get_coalesce,
786	.set_coalesce      = set_coalesce,
787	.get_eeprom_len    = get_eeprom_len,
788	.get_eeprom        = get_eeprom,
789	.get_pauseparam    = get_pauseparam,
790	.set_pauseparam    = set_pauseparam,
791	.get_rx_csum       = get_rx_csum,
792	.set_rx_csum       = set_rx_csum,
793	.get_tx_csum       = ethtool_op_get_tx_csum,
794	.set_tx_csum       = ethtool_op_set_tx_csum,
795	.get_sg            = ethtool_op_get_sg,
796	.set_sg            = ethtool_op_set_sg,
797	.get_link          = ethtool_op_get_link,
798	.get_strings       = get_strings,
799	.get_stats_count   = get_stats_count,
800	.get_ethtool_stats = get_stats,
801	.get_regs_len      = get_regs_len,
802	.get_regs          = get_regs,
803	.get_tso           = ethtool_op_get_tso,
804	.set_tso           = set_tso,
805};
806
807static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
808{
809	struct adapter *adapter = dev->priv;
810	struct mii_ioctl_data *data = if_mii(req);
811
812	switch (cmd) {
813	case SIOCGMIIPHY:
814		data->phy_id = adapter->port[dev->if_port].phy->addr;
815		/* FALLTHRU */
816	case SIOCGMIIREG: {
817		struct cphy *phy = adapter->port[dev->if_port].phy;
818		u32 val;
819
820		if (!phy->mdio_read)
821			return -EOPNOTSUPP;
822		phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
823			       &val);
824		data->val_out = val;
825		break;
826	}
827	case SIOCSMIIREG: {
828		struct cphy *phy = adapter->port[dev->if_port].phy;
829
830		if (!capable(CAP_NET_ADMIN))
831		    return -EPERM;
832		if (!phy->mdio_write)
833			return -EOPNOTSUPP;
834		phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
835			        data->val_in);
836		break;
837	}
838
839	default:
840		return -EOPNOTSUPP;
841	}
842	return 0;
843}
844
845static int t1_change_mtu(struct net_device *dev, int new_mtu)
846{
847	int ret;
848	struct adapter *adapter = dev->priv;
849	struct cmac *mac = adapter->port[dev->if_port].mac;
850
851	if (!mac->ops->set_mtu)
852		return -EOPNOTSUPP;
853	if (new_mtu < 68)
854		return -EINVAL;
855	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
856		return ret;
857	dev->mtu = new_mtu;
858	return 0;
859}
860
861static int t1_set_mac_addr(struct net_device *dev, void *p)
862{
863	struct adapter *adapter = dev->priv;
864	struct cmac *mac = adapter->port[dev->if_port].mac;
865	struct sockaddr *addr = p;
866
867	if (!mac->ops->macaddress_set)
868		return -EOPNOTSUPP;
869
870	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
871	mac->ops->macaddress_set(mac, dev->dev_addr);
872	return 0;
873}
874
875#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
876static void vlan_rx_register(struct net_device *dev,
877				   struct vlan_group *grp)
878{
879	struct adapter *adapter = dev->priv;
880
881	spin_lock_irq(&adapter->async_lock);
882	adapter->vlan_grp = grp;
883	t1_set_vlan_accel(adapter, grp != NULL);
884	spin_unlock_irq(&adapter->async_lock);
885}
886#endif
887
888#ifdef CONFIG_NET_POLL_CONTROLLER
889static void t1_netpoll(struct net_device *dev)
890{
891	unsigned long flags;
892	struct adapter *adapter = dev->priv;
893
894	local_irq_save(flags);
895	t1_interrupt(adapter->pdev->irq, adapter);
896	local_irq_restore(flags);
897}
898#endif
899
900/*
901 * Periodic accumulation of MAC statistics.  This is used only if the MAC
902 * does not have any other way to prevent stats counter overflow.
903 */
904static void mac_stats_task(struct work_struct *work)
905{
906	int i;
907	struct adapter *adapter =
908		container_of(work, struct adapter, stats_update_task.work);
909
910	for_each_port(adapter, i) {
911		struct port_info *p = &adapter->port[i];
912
913		if (netif_running(p->dev))
914			p->mac->ops->statistics_update(p->mac,
915						       MAC_STATS_UPDATE_FAST);
916	}
917
918	/* Schedule the next statistics update if any port is active. */
919	spin_lock(&adapter->work_lock);
920	if (adapter->open_device_map & PORT_MASK)
921		schedule_mac_stats_update(adapter,
922					  adapter->params.stats_update_period);
923	spin_unlock(&adapter->work_lock);
924}
925
926/*
927 * Processes elmer0 external interrupts in process context.
928 */
929static void ext_intr_task(struct work_struct *work)
930{
931	struct adapter *adapter =
932		container_of(work, struct adapter, ext_intr_handler_task);
933
934	t1_elmer0_ext_intr_handler(adapter);
935
936	/* Now reenable external interrupts */
937	spin_lock_irq(&adapter->async_lock);
938	adapter->slow_intr_mask |= F_PL_INTR_EXT;
939	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
940	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
941		   adapter->regs + A_PL_ENABLE);
942	spin_unlock_irq(&adapter->async_lock);
943}
944
945/*
946 * Interrupt-context handler for elmer0 external interrupts.
947 */
948void t1_elmer0_ext_intr(struct adapter *adapter)
949{
950	/*
951	 * Schedule a task to handle external interrupts as we require
952	 * a process context.  We disable EXT interrupts in the interim
953	 * and let the task reenable them when it's done.
954	 */
955	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
956	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
957		   adapter->regs + A_PL_ENABLE);
958	schedule_work(&adapter->ext_intr_handler_task);
959}
960
961void t1_fatal_err(struct adapter *adapter)
962{
963	if (adapter->flags & FULL_INIT_DONE) {
964		t1_sge_stop(adapter->sge);
965		t1_interrupts_disable(adapter);
966	}
967	CH_ALERT("%s: encountered fatal error, operation suspended\n",
968		 adapter->name);
969}
970
971static int __devinit init_one(struct pci_dev *pdev,
972			      const struct pci_device_id *ent)
973{
974	static int version_printed;
975
976	int i, err, pci_using_dac = 0;
977	unsigned long mmio_start, mmio_len;
978	const struct board_info *bi;
979	struct adapter *adapter = NULL;
980	struct port_info *pi;
981
982	if (!version_printed) {
983		printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
984		       DRV_VERSION);
985		++version_printed;
986	}
987
988	err = pci_enable_device(pdev);
989	if (err)
990		return err;
991
992	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
993		CH_ERR("%s: cannot find PCI device memory base address\n",
994		       pci_name(pdev));
995		err = -ENODEV;
996		goto out_disable_pdev;
997	}
998
999	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1000		pci_using_dac = 1;
1001
1002		if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
1003			CH_ERR("%s: unable to obtain 64-bit DMA for"
1004			       "consistent allocations\n", pci_name(pdev));
1005			err = -ENODEV;
1006			goto out_disable_pdev;
1007		}
1008
1009	} else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
1010		CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
1011		goto out_disable_pdev;
1012	}
1013
1014	err = pci_request_regions(pdev, DRV_NAME);
1015	if (err) {
1016		CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
1017		goto out_disable_pdev;
1018	}
1019
1020	pci_set_master(pdev);
1021
1022	mmio_start = pci_resource_start(pdev, 0);
1023	mmio_len = pci_resource_len(pdev, 0);
1024	bi = t1_get_board_info(ent->driver_data);
1025
1026	for (i = 0; i < bi->port_number; ++i) {
1027		struct net_device *netdev;
1028
1029		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1030		if (!netdev) {
1031			err = -ENOMEM;
1032			goto out_free_dev;
1033		}
1034
1035		SET_MODULE_OWNER(netdev);
1036		SET_NETDEV_DEV(netdev, &pdev->dev);
1037
1038		if (!adapter) {
1039			adapter = netdev->priv;
1040			adapter->pdev = pdev;
1041			adapter->port[0].dev = netdev;  /* so we don't leak it */
1042
1043			adapter->regs = ioremap(mmio_start, mmio_len);
1044			if (!adapter->regs) {
1045				CH_ERR("%s: cannot map device registers\n",
1046				       pci_name(pdev));
1047				err = -ENOMEM;
1048				goto out_free_dev;
1049			}
1050
1051			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1052				err = -ENODEV;	  /* Can't handle this chip rev */
1053				goto out_free_dev;
1054			}
1055
1056			adapter->name = pci_name(pdev);
1057			adapter->msg_enable = dflt_msg_enable;
1058			adapter->mmio_len = mmio_len;
1059
1060			spin_lock_init(&adapter->tpi_lock);
1061			spin_lock_init(&adapter->work_lock);
1062			spin_lock_init(&adapter->async_lock);
1063			spin_lock_init(&adapter->mac_lock);
1064
1065			INIT_WORK(&adapter->ext_intr_handler_task,
1066				  ext_intr_task);
1067			INIT_DELAYED_WORK(&adapter->stats_update_task,
1068					  mac_stats_task);
1069
1070			pci_set_drvdata(pdev, netdev);
1071		}
1072
1073		pi = &adapter->port[i];
1074		pi->dev = netdev;
1075		netif_carrier_off(netdev);
1076		netdev->irq = pdev->irq;
1077		netdev->if_port = i;
1078		netdev->mem_start = mmio_start;
1079		netdev->mem_end = mmio_start + mmio_len - 1;
1080		netdev->priv = adapter;
1081		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
1082		netdev->features |= NETIF_F_LLTX;
1083
1084		adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
1085		if (pci_using_dac)
1086			netdev->features |= NETIF_F_HIGHDMA;
1087		if (vlan_tso_capable(adapter)) {
1088#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1089			adapter->flags |= VLAN_ACCEL_CAPABLE;
1090			netdev->features |=
1091				NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1092			netdev->vlan_rx_register = vlan_rx_register;
1093#endif
1094
1095			/* T204: disable TSO */
1096			if (!(is_T2(adapter)) || bi->port_number != 4) {
1097				adapter->flags |= TSO_CAPABLE;
1098				netdev->features |= NETIF_F_TSO;
1099			}
1100		}
1101
1102		netdev->open = cxgb_open;
1103		netdev->stop = cxgb_close;
1104		netdev->hard_start_xmit = t1_start_xmit;
1105		netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
1106			sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1107		netdev->get_stats = t1_get_stats;
1108		netdev->set_multicast_list = t1_set_rxmode;
1109		netdev->do_ioctl = t1_ioctl;
1110		netdev->change_mtu = t1_change_mtu;
1111		netdev->set_mac_address = t1_set_mac_addr;
1112#ifdef CONFIG_NET_POLL_CONTROLLER
1113		netdev->poll_controller = t1_netpoll;
1114#endif
1115#ifdef CONFIG_CHELSIO_T1_NAPI
1116		netdev->weight = 64;
1117		netdev->poll = t1_poll;
1118#endif
1119
1120		SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
1121	}
1122
1123	if (t1_init_sw_modules(adapter, bi) < 0) {
1124		err = -ENODEV;
1125		goto out_free_dev;
1126	}
1127
1128	/*
1129	 * The card is now ready to go.  If any errors occur during device
1130	 * registration we do not fail the whole card but rather proceed only
1131	 * with the ports we manage to register successfully.  However we must
1132	 * register at least one net device.
1133	 */
1134	for (i = 0; i < bi->port_number; ++i) {
1135		err = register_netdev(adapter->port[i].dev);
1136		if (err)
1137			CH_WARN("%s: cannot register net device %s, skipping\n",
1138				pci_name(pdev), adapter->port[i].dev->name);
1139		else {
1140			/*
1141			 * Change the name we use for messages to the name of
1142			 * the first successfully registered interface.
1143			 */
1144			if (!adapter->registered_device_map)
1145				adapter->name = adapter->port[i].dev->name;
1146
1147			__set_bit(i, &adapter->registered_device_map);
1148		}
1149	}
1150	if (!adapter->registered_device_map) {
1151		CH_ERR("%s: could not register any net devices\n",
1152		       pci_name(pdev));
1153		goto out_release_adapter_res;
1154	}
1155
1156	printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
1157	       bi->desc, adapter->params.chip_revision,
1158	       adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1159	       adapter->params.pci.speed, adapter->params.pci.width);
1160
1161	/*
1162	 * Set the T1B ASIC and memory clocks.
1163	 */
1164	if (t1powersave)
1165		adapter->t1powersave = LCLOCK;	/* HW default is powersave mode. */
1166	else
1167		adapter->t1powersave = HCLOCK;
1168	if (t1_is_T1B(adapter))
1169		t1_clock(adapter, t1powersave);
1170
1171	return 0;
1172
1173out_release_adapter_res:
1174	t1_free_sw_modules(adapter);
1175out_free_dev:
1176	if (adapter) {
1177		if (adapter->regs)
1178			iounmap(adapter->regs);
1179		for (i = bi->port_number - 1; i >= 0; --i)
1180			if (adapter->port[i].dev)
1181				free_netdev(adapter->port[i].dev);
1182	}
1183	pci_release_regions(pdev);
1184out_disable_pdev:
1185	pci_disable_device(pdev);
1186	pci_set_drvdata(pdev, NULL);
1187	return err;
1188}
1189
1190static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1191{
1192	int data;
1193	int i;
1194	u32 val;
1195
1196	enum {
1197		S_CLOCK = 1 << 3,
1198		S_DATA = 1 << 4
1199	};
1200
1201	for (i = (nbits - 1); i > -1; i--) {
1202
1203		udelay(50);
1204
1205		data = ((bitdata >> i) & 0x1);
1206		__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1207
1208		if (data)
1209			val |= S_DATA;
1210		else
1211			val &= ~S_DATA;
1212
1213		udelay(50);
1214
1215		/* Set SCLOCK low */
1216		val &= ~S_CLOCK;
1217		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1218
1219		udelay(50);
1220
1221		/* Write SCLOCK high */
1222		val |= S_CLOCK;
1223		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1224
1225	}
1226}
1227
1228static int t1_clock(struct adapter *adapter, int mode)
1229{
1230	u32 val;
1231	int M_CORE_VAL;
1232	int M_MEM_VAL;
1233
1234	enum {
1235		M_CORE_BITS	= 9,
1236		T_CORE_VAL	= 0,
1237		T_CORE_BITS	= 2,
1238		N_CORE_VAL	= 0,
1239		N_CORE_BITS	= 2,
1240		M_MEM_BITS	= 9,
1241		T_MEM_VAL	= 0,
1242		T_MEM_BITS	= 2,
1243		N_MEM_VAL	= 0,
1244		N_MEM_BITS	= 2,
1245		NP_LOAD		= 1 << 17,
1246		S_LOAD_MEM	= 1 << 5,
1247		S_LOAD_CORE	= 1 << 6,
1248		S_CLOCK		= 1 << 3
1249	};
1250
1251	if (!t1_is_T1B(adapter))
1252		return -ENODEV;	/* Can't re-clock this chip. */
1253
1254	if (mode & 2)
1255		return 0;	/* show current mode. */
1256
1257	if ((adapter->t1powersave & 1) == (mode & 1))
1258		return -EALREADY;	/* ASIC already running in mode. */
1259
1260	if ((mode & 1) == HCLOCK) {
1261		M_CORE_VAL = 0x14;
1262		M_MEM_VAL = 0x18;
1263		adapter->t1powersave = HCLOCK;	/* overclock */
1264	} else {
1265		M_CORE_VAL = 0xe;
1266		M_MEM_VAL = 0x10;
1267		adapter->t1powersave = LCLOCK;	/* underclock */
1268	}
1269
1270	/* Don't interrupt this serial stream! */
1271	spin_lock(&adapter->tpi_lock);
1272
1273	/* Initialize for ASIC core */
1274	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1275	val |= NP_LOAD;
1276	udelay(50);
1277	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1278	udelay(50);
1279	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1280	val &= ~S_LOAD_CORE;
1281	val &= ~S_CLOCK;
1282	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1283	udelay(50);
1284
1285	/* Serial program the ASIC clock synthesizer */
1286	bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1287	bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1288	bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1289	udelay(50);
1290
1291	/* Finish ASIC core */
1292	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1293	val |= S_LOAD_CORE;
1294	udelay(50);
1295	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1296	udelay(50);
1297	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1298	val &= ~S_LOAD_CORE;
1299	udelay(50);
1300	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1301	udelay(50);
1302
1303	/* Initialize for memory */
1304	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1305	val |= NP_LOAD;
1306	udelay(50);
1307	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1308	udelay(50);
1309	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1310	val &= ~S_LOAD_MEM;
1311	val &= ~S_CLOCK;
1312	udelay(50);
1313	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1314	udelay(50);
1315
1316	/* Serial program the memory clock synthesizer */
1317	bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1318	bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1319	bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1320	udelay(50);
1321
1322	/* Finish memory */
1323	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1324	val |= S_LOAD_MEM;
1325	udelay(50);
1326	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1327	udelay(50);
1328	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1329	val &= ~S_LOAD_MEM;
1330	udelay(50);
1331	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1332
1333	spin_unlock(&adapter->tpi_lock);
1334
1335	return 0;
1336}
1337
1338static inline void t1_sw_reset(struct pci_dev *pdev)
1339{
1340	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1341	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1342}
1343
1344static void __devexit remove_one(struct pci_dev *pdev)
1345{
1346	struct net_device *dev = pci_get_drvdata(pdev);
1347	struct adapter *adapter = dev->priv;
1348	int i;
1349
1350	for_each_port(adapter, i) {
1351		if (test_bit(i, &adapter->registered_device_map))
1352			unregister_netdev(adapter->port[i].dev);
1353	}
1354
1355	t1_free_sw_modules(adapter);
1356	iounmap(adapter->regs);
1357
1358	while (--i >= 0) {
1359		if (adapter->port[i].dev)
1360			free_netdev(adapter->port[i].dev);
1361	}
1362
1363	pci_release_regions(pdev);
1364	pci_disable_device(pdev);
1365	pci_set_drvdata(pdev, NULL);
1366	t1_sw_reset(pdev);
1367}
1368
1369static struct pci_driver driver = {
1370	.name     = DRV_NAME,
1371	.id_table = t1_pci_tbl,
1372	.probe    = init_one,
1373	.remove   = __devexit_p(remove_one),
1374};
1375
1376static int __init t1_init_module(void)
1377{
1378	return pci_register_driver(&driver);
1379}
1380
1381static void __exit t1_cleanup_module(void)
1382{
1383	pci_unregister_driver(&driver);
1384}
1385
1386module_init(t1_init_module);
1387module_exit(t1_cleanup_module);
1388