1/*****************************************************************************
2 *                                                                           *
3 * File: cxgb2.c                                                             *
4 * $Revision: 1.25 $                                                         *
5 * $Date: 2005/06/22 00:43:25 $                                              *
6 * Description:                                                              *
7 *  Chelsio 10Gb Ethernet Driver.                                            *
8 *                                                                           *
9 * This program is free software; you can redistribute it and/or modify      *
10 * it under the terms of the GNU General Public License, version 2, as       *
11 * published by the Free Software Foundation.                                *
12 *                                                                           *
13 * You should have received a copy of the GNU General Public License along   *
14 * with this program; if not, see <http://www.gnu.org/licenses/>.            *
15 *                                                                           *
16 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
17 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
19 *                                                                           *
20 * http://www.chelsio.com                                                    *
21 *                                                                           *
22 * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
23 * All rights reserved.                                                      *
24 *                                                                           *
25 * Maintainers: maintainers@chelsio.com                                      *
26 *                                                                           *
27 * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
28 *          Tina Yang               <tainay@chelsio.com>                     *
29 *          Felix Marti             <felix@chelsio.com>                      *
30 *          Scott Bardone           <sbardone@chelsio.com>                   *
31 *          Kurt Ottaway            <kottaway@chelsio.com>                   *
32 *          Frank DiMambro          <frank@chelsio.com>                      *
33 *                                                                           *
34 * History:                                                                  *
35 *                                                                           *
36 ****************************************************************************/
37
38#include "common.h"
39#include <linux/module.h>
40#include <linux/pci.h>
41#include <linux/netdevice.h>
42#include <linux/etherdevice.h>
43#include <linux/if_vlan.h>
44#include <linux/mii.h>
45#include <linux/sockios.h>
46#include <linux/dma-mapping.h>
47#include <linux/uaccess.h>
48
49#include "cpl5_cmd.h"
50#include "regs.h"
51#include "gmac.h"
52#include "cphy.h"
53#include "sge.h"
54#include "tp.h"
55#include "espi.h"
56#include "elmer0.h"
57
58#include <linux/workqueue.h>
59
60static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
61{
62	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
63}
64
65static inline void cancel_mac_stats_update(struct adapter *ap)
66{
67	cancel_delayed_work(&ap->stats_update_task);
68}
69
70#define MAX_CMDQ_ENTRIES	16384
71#define MAX_CMDQ1_ENTRIES	1024
72#define MAX_RX_BUFFERS		16384
73#define MAX_RX_JUMBO_BUFFERS	16384
74#define MAX_TX_BUFFERS_HIGH	16384U
75#define MAX_TX_BUFFERS_LOW	1536U
76#define MAX_TX_BUFFERS		1460U
77#define MIN_FL_ENTRIES		32
78
79#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82
83/*
84 * The EEPROM is actually bigger but only the first few bytes are used so we
85 * only report those.
86 */
87#define EEPROM_SIZE 32
88
89MODULE_DESCRIPTION(DRV_DESCRIPTION);
90MODULE_AUTHOR("Chelsio Communications");
91MODULE_LICENSE("GPL");
92
93static int dflt_msg_enable = DFLT_MSG_ENABLE;
94
95module_param(dflt_msg_enable, int, 0);
96MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
97
98#define HCLOCK 0x0
99#define LCLOCK 0x1
100
101/* T1 cards powersave mode */
102static int t1_clock(struct adapter *adapter, int mode);
103static int t1powersave = 1;	/* HW default is powersave mode. */
104
105module_param(t1powersave, int, 0);
106MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
107
108static int disable_msi = 0;
109module_param(disable_msi, int, 0);
110MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
111
112/*
113 * Setup MAC to receive the types of packets we want.
114 */
115static void t1_set_rxmode(struct net_device *dev)
116{
117	struct adapter *adapter = dev->ml_priv;
118	struct cmac *mac = adapter->port[dev->if_port].mac;
119	struct t1_rx_mode rm;
120
121	rm.dev = dev;
122	mac->ops->set_rx_mode(mac, &rm);
123}
124
125static void link_report(struct port_info *p)
126{
127	if (!netif_carrier_ok(p->dev))
128		netdev_info(p->dev, "link down\n");
129	else {
130		const char *s = "10Mbps";
131
132		switch (p->link_config.speed) {
133			case SPEED_10000: s = "10Gbps"; break;
134			case SPEED_1000:  s = "1000Mbps"; break;
135			case SPEED_100:   s = "100Mbps"; break;
136		}
137
138		netdev_info(p->dev, "link up, %s, %s-duplex\n",
139			    s, p->link_config.duplex == DUPLEX_FULL
140			    ? "full" : "half");
141	}
142}
143
144void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
145			int speed, int duplex, int pause)
146{
147	struct port_info *p = &adapter->port[port_id];
148
149	if (link_stat != netif_carrier_ok(p->dev)) {
150		if (link_stat)
151			netif_carrier_on(p->dev);
152		else
153			netif_carrier_off(p->dev);
154		link_report(p);
155
156		/* multi-ports: inform toe */
157		if ((speed > 0) && (adapter->params.nports > 1)) {
158			unsigned int sched_speed = 10;
159			switch (speed) {
160			case SPEED_1000:
161				sched_speed = 1000;
162				break;
163			case SPEED_100:
164				sched_speed = 100;
165				break;
166			case SPEED_10:
167				sched_speed = 10;
168				break;
169			}
170			t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
171		}
172	}
173}
174
175static void link_start(struct port_info *p)
176{
177	struct cmac *mac = p->mac;
178
179	mac->ops->reset(mac);
180	if (mac->ops->macaddress_set)
181		mac->ops->macaddress_set(mac, p->dev->dev_addr);
182	t1_set_rxmode(p->dev);
183	t1_link_start(p->phy, mac, &p->link_config);
184	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
185}
186
187static void enable_hw_csum(struct adapter *adapter)
188{
189	if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
190		t1_tp_set_ip_checksum_offload(adapter->tp, 1);	/* for TSO only */
191	t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
192}
193
194/*
195 * Things to do upon first use of a card.
196 * This must run with the rtnl lock held.
197 */
198static int cxgb_up(struct adapter *adapter)
199{
200	int err = 0;
201
202	if (!(adapter->flags & FULL_INIT_DONE)) {
203		err = t1_init_hw_modules(adapter);
204		if (err)
205			goto out_err;
206
207		enable_hw_csum(adapter);
208		adapter->flags |= FULL_INIT_DONE;
209	}
210
211	t1_interrupts_clear(adapter);
212
213	adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
214	err = request_threaded_irq(adapter->pdev->irq, t1_interrupt,
215				   t1_interrupt_thread,
216				   adapter->params.has_msi ? 0 : IRQF_SHARED,
217				   adapter->name, adapter);
218	if (err) {
219		if (adapter->params.has_msi)
220			pci_disable_msi(adapter->pdev);
221
222		goto out_err;
223	}
224
225	t1_sge_start(adapter->sge);
226	t1_interrupts_enable(adapter);
227out_err:
228	return err;
229}
230
231/*
232 * Release resources when all the ports have been stopped.
233 */
234static void cxgb_down(struct adapter *adapter)
235{
236	t1_sge_stop(adapter->sge);
237	t1_interrupts_disable(adapter);
238	free_irq(adapter->pdev->irq, adapter);
239	if (adapter->params.has_msi)
240		pci_disable_msi(adapter->pdev);
241}
242
243static int cxgb_open(struct net_device *dev)
244{
245	int err;
246	struct adapter *adapter = dev->ml_priv;
247	int other_ports = adapter->open_device_map & PORT_MASK;
248
249	napi_enable(&adapter->napi);
250	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
251		napi_disable(&adapter->napi);
252		return err;
253	}
254
255	__set_bit(dev->if_port, &adapter->open_device_map);
256	link_start(&adapter->port[dev->if_port]);
257	netif_start_queue(dev);
258	if (!other_ports && adapter->params.stats_update_period)
259		schedule_mac_stats_update(adapter,
260					  adapter->params.stats_update_period);
261
262	t1_vlan_mode(adapter, dev->features);
263	return 0;
264}
265
266static int cxgb_close(struct net_device *dev)
267{
268	struct adapter *adapter = dev->ml_priv;
269	struct port_info *p = &adapter->port[dev->if_port];
270	struct cmac *mac = p->mac;
271
272	netif_stop_queue(dev);
273	napi_disable(&adapter->napi);
274	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
275	netif_carrier_off(dev);
276
277	clear_bit(dev->if_port, &adapter->open_device_map);
278	if (adapter->params.stats_update_period &&
279	    !(adapter->open_device_map & PORT_MASK)) {
280		/* Stop statistics accumulation. */
281		smp_mb__after_atomic();
282		spin_lock(&adapter->work_lock);   /* sync with update task */
283		spin_unlock(&adapter->work_lock);
284		cancel_mac_stats_update(adapter);
285	}
286
287	if (!adapter->open_device_map)
288		cxgb_down(adapter);
289	return 0;
290}
291
292static struct net_device_stats *t1_get_stats(struct net_device *dev)
293{
294	struct adapter *adapter = dev->ml_priv;
295	struct port_info *p = &adapter->port[dev->if_port];
296	struct net_device_stats *ns = &dev->stats;
297	const struct cmac_statistics *pstats;
298
299	/* Do a full update of the MAC stats */
300	pstats = p->mac->ops->statistics_update(p->mac,
301						MAC_STATS_UPDATE_FULL);
302
303	ns->tx_packets = pstats->TxUnicastFramesOK +
304		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
305
306	ns->rx_packets = pstats->RxUnicastFramesOK +
307		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
308
309	ns->tx_bytes = pstats->TxOctetsOK;
310	ns->rx_bytes = pstats->RxOctetsOK;
311
312	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
313		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
314	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
315		pstats->RxFCSErrors + pstats->RxAlignErrors +
316		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
317		pstats->RxSymbolErrors + pstats->RxRuntErrors;
318
319	ns->multicast  = pstats->RxMulticastFramesOK;
320	ns->collisions = pstats->TxTotalCollisions;
321
322	/* detailed rx_errors */
323	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
324		pstats->RxJabberErrors;
325	ns->rx_over_errors   = 0;
326	ns->rx_crc_errors    = pstats->RxFCSErrors;
327	ns->rx_frame_errors  = pstats->RxAlignErrors;
328	ns->rx_fifo_errors   = 0;
329	ns->rx_missed_errors = 0;
330
331	/* detailed tx_errors */
332	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
333	ns->tx_carrier_errors   = 0;
334	ns->tx_fifo_errors      = pstats->TxUnderrun;
335	ns->tx_heartbeat_errors = 0;
336	ns->tx_window_errors    = pstats->TxLateCollisions;
337	return ns;
338}
339
340static u32 get_msglevel(struct net_device *dev)
341{
342	struct adapter *adapter = dev->ml_priv;
343
344	return adapter->msg_enable;
345}
346
347static void set_msglevel(struct net_device *dev, u32 val)
348{
349	struct adapter *adapter = dev->ml_priv;
350
351	adapter->msg_enable = val;
352}
353
354static const char stats_strings[][ETH_GSTRING_LEN] = {
355	"TxOctetsOK",
356	"TxOctetsBad",
357	"TxUnicastFramesOK",
358	"TxMulticastFramesOK",
359	"TxBroadcastFramesOK",
360	"TxPauseFrames",
361	"TxFramesWithDeferredXmissions",
362	"TxLateCollisions",
363	"TxTotalCollisions",
364	"TxFramesAbortedDueToXSCollisions",
365	"TxUnderrun",
366	"TxLengthErrors",
367	"TxInternalMACXmitError",
368	"TxFramesWithExcessiveDeferral",
369	"TxFCSErrors",
370	"TxJumboFramesOk",
371	"TxJumboOctetsOk",
372
373	"RxOctetsOK",
374	"RxOctetsBad",
375	"RxUnicastFramesOK",
376	"RxMulticastFramesOK",
377	"RxBroadcastFramesOK",
378	"RxPauseFrames",
379	"RxFCSErrors",
380	"RxAlignErrors",
381	"RxSymbolErrors",
382	"RxDataErrors",
383	"RxSequenceErrors",
384	"RxRuntErrors",
385	"RxJabberErrors",
386	"RxInternalMACRcvError",
387	"RxInRangeLengthErrors",
388	"RxOutOfRangeLengthField",
389	"RxFrameTooLongErrors",
390	"RxJumboFramesOk",
391	"RxJumboOctetsOk",
392
393	/* Port stats */
394	"RxCsumGood",
395	"TxCsumOffload",
396	"TxTso",
397	"RxVlan",
398	"TxVlan",
399	"TxNeedHeadroom",
400
401	/* Interrupt stats */
402	"rx drops",
403	"pure_rsps",
404	"unhandled irqs",
405	"respQ_empty",
406	"respQ_overflow",
407	"freelistQ_empty",
408	"pkt_too_big",
409	"pkt_mismatch",
410	"cmdQ_full0",
411	"cmdQ_full1",
412
413	"espi_DIP2ParityErr",
414	"espi_DIP4Err",
415	"espi_RxDrops",
416	"espi_TxDrops",
417	"espi_RxOvfl",
418	"espi_ParityErr"
419};
420
421#define T2_REGMAP_SIZE (3 * 1024)
422
423static int get_regs_len(struct net_device *dev)
424{
425	return T2_REGMAP_SIZE;
426}
427
428static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
429{
430	struct adapter *adapter = dev->ml_priv;
431
432	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
433	strscpy(info->bus_info, pci_name(adapter->pdev),
434		sizeof(info->bus_info));
435}
436
437static int get_sset_count(struct net_device *dev, int sset)
438{
439	switch (sset) {
440	case ETH_SS_STATS:
441		return ARRAY_SIZE(stats_strings);
442	default:
443		return -EOPNOTSUPP;
444	}
445}
446
447static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
448{
449	if (stringset == ETH_SS_STATS)
450		memcpy(data, stats_strings, sizeof(stats_strings));
451}
452
453static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
454		      u64 *data)
455{
456	struct adapter *adapter = dev->ml_priv;
457	struct cmac *mac = adapter->port[dev->if_port].mac;
458	const struct cmac_statistics *s;
459	const struct sge_intr_counts *t;
460	struct sge_port_stats ss;
461
462	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
463	t = t1_sge_get_intr_counts(adapter->sge);
464	t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
465
466	*data++ = s->TxOctetsOK;
467	*data++ = s->TxOctetsBad;
468	*data++ = s->TxUnicastFramesOK;
469	*data++ = s->TxMulticastFramesOK;
470	*data++ = s->TxBroadcastFramesOK;
471	*data++ = s->TxPauseFrames;
472	*data++ = s->TxFramesWithDeferredXmissions;
473	*data++ = s->TxLateCollisions;
474	*data++ = s->TxTotalCollisions;
475	*data++ = s->TxFramesAbortedDueToXSCollisions;
476	*data++ = s->TxUnderrun;
477	*data++ = s->TxLengthErrors;
478	*data++ = s->TxInternalMACXmitError;
479	*data++ = s->TxFramesWithExcessiveDeferral;
480	*data++ = s->TxFCSErrors;
481	*data++ = s->TxJumboFramesOK;
482	*data++ = s->TxJumboOctetsOK;
483
484	*data++ = s->RxOctetsOK;
485	*data++ = s->RxOctetsBad;
486	*data++ = s->RxUnicastFramesOK;
487	*data++ = s->RxMulticastFramesOK;
488	*data++ = s->RxBroadcastFramesOK;
489	*data++ = s->RxPauseFrames;
490	*data++ = s->RxFCSErrors;
491	*data++ = s->RxAlignErrors;
492	*data++ = s->RxSymbolErrors;
493	*data++ = s->RxDataErrors;
494	*data++ = s->RxSequenceErrors;
495	*data++ = s->RxRuntErrors;
496	*data++ = s->RxJabberErrors;
497	*data++ = s->RxInternalMACRcvError;
498	*data++ = s->RxInRangeLengthErrors;
499	*data++ = s->RxOutOfRangeLengthField;
500	*data++ = s->RxFrameTooLongErrors;
501	*data++ = s->RxJumboFramesOK;
502	*data++ = s->RxJumboOctetsOK;
503
504	*data++ = ss.rx_cso_good;
505	*data++ = ss.tx_cso;
506	*data++ = ss.tx_tso;
507	*data++ = ss.vlan_xtract;
508	*data++ = ss.vlan_insert;
509	*data++ = ss.tx_need_hdrroom;
510
511	*data++ = t->rx_drops;
512	*data++ = t->pure_rsps;
513	*data++ = t->unhandled_irqs;
514	*data++ = t->respQ_empty;
515	*data++ = t->respQ_overflow;
516	*data++ = t->freelistQ_empty;
517	*data++ = t->pkt_too_big;
518	*data++ = t->pkt_mismatch;
519	*data++ = t->cmdQ_full[0];
520	*data++ = t->cmdQ_full[1];
521
522	if (adapter->espi) {
523		const struct espi_intr_counts *e;
524
525		e = t1_espi_get_intr_counts(adapter->espi);
526		*data++ = e->DIP2_parity_err;
527		*data++ = e->DIP4_err;
528		*data++ = e->rx_drops;
529		*data++ = e->tx_drops;
530		*data++ = e->rx_ovflw;
531		*data++ = e->parity_err;
532	}
533}
534
535static inline void reg_block_dump(struct adapter *ap, void *buf,
536				  unsigned int start, unsigned int end)
537{
538	u32 *p = buf + start;
539
540	for ( ; start <= end; start += sizeof(u32))
541		*p++ = readl(ap->regs + start);
542}
543
544static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
545		     void *buf)
546{
547	struct adapter *ap = dev->ml_priv;
548
549	/*
550	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
551	 */
552	regs->version = 2;
553
554	memset(buf, 0, T2_REGMAP_SIZE);
555	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
556	reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
557	reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
558	reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
559	reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
560	reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
561	reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
562	reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
563	reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
564	reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
565}
566
567static int get_link_ksettings(struct net_device *dev,
568			      struct ethtool_link_ksettings *cmd)
569{
570	struct adapter *adapter = dev->ml_priv;
571	struct port_info *p = &adapter->port[dev->if_port];
572	u32 supported, advertising;
573
574	supported = p->link_config.supported;
575	advertising = p->link_config.advertising;
576
577	if (netif_carrier_ok(dev)) {
578		cmd->base.speed = p->link_config.speed;
579		cmd->base.duplex = p->link_config.duplex;
580	} else {
581		cmd->base.speed = SPEED_UNKNOWN;
582		cmd->base.duplex = DUPLEX_UNKNOWN;
583	}
584
585	cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
586	cmd->base.phy_address = p->phy->mdio.prtad;
587	cmd->base.autoneg = p->link_config.autoneg;
588
589	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
590						supported);
591	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
592						advertising);
593
594	return 0;
595}
596
597static int speed_duplex_to_caps(int speed, int duplex)
598{
599	int cap = 0;
600
601	switch (speed) {
602	case SPEED_10:
603		if (duplex == DUPLEX_FULL)
604			cap = SUPPORTED_10baseT_Full;
605		else
606			cap = SUPPORTED_10baseT_Half;
607		break;
608	case SPEED_100:
609		if (duplex == DUPLEX_FULL)
610			cap = SUPPORTED_100baseT_Full;
611		else
612			cap = SUPPORTED_100baseT_Half;
613		break;
614	case SPEED_1000:
615		if (duplex == DUPLEX_FULL)
616			cap = SUPPORTED_1000baseT_Full;
617		else
618			cap = SUPPORTED_1000baseT_Half;
619		break;
620	case SPEED_10000:
621		if (duplex == DUPLEX_FULL)
622			cap = SUPPORTED_10000baseT_Full;
623	}
624	return cap;
625}
626
627#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
628		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
629		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
630		      ADVERTISED_10000baseT_Full)
631
632static int set_link_ksettings(struct net_device *dev,
633			      const struct ethtool_link_ksettings *cmd)
634{
635	struct adapter *adapter = dev->ml_priv;
636	struct port_info *p = &adapter->port[dev->if_port];
637	struct link_config *lc = &p->link_config;
638	u32 advertising;
639
640	ethtool_convert_link_mode_to_legacy_u32(&advertising,
641						cmd->link_modes.advertising);
642
643	if (!(lc->supported & SUPPORTED_Autoneg))
644		return -EOPNOTSUPP;             /* can't change speed/duplex */
645
646	if (cmd->base.autoneg == AUTONEG_DISABLE) {
647		u32 speed = cmd->base.speed;
648		int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
649
650		if (!(lc->supported & cap) || (speed == SPEED_1000))
651			return -EINVAL;
652		lc->requested_speed = speed;
653		lc->requested_duplex = cmd->base.duplex;
654		lc->advertising = 0;
655	} else {
656		advertising &= ADVERTISED_MASK;
657		if (advertising & (advertising - 1))
658			advertising = lc->supported;
659		advertising &= lc->supported;
660		if (!advertising)
661			return -EINVAL;
662		lc->requested_speed = SPEED_INVALID;
663		lc->requested_duplex = DUPLEX_INVALID;
664		lc->advertising = advertising | ADVERTISED_Autoneg;
665	}
666	lc->autoneg = cmd->base.autoneg;
667	if (netif_running(dev))
668		t1_link_start(p->phy, p->mac, lc);
669	return 0;
670}
671
672static void get_pauseparam(struct net_device *dev,
673			   struct ethtool_pauseparam *epause)
674{
675	struct adapter *adapter = dev->ml_priv;
676	struct port_info *p = &adapter->port[dev->if_port];
677
678	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
679	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
680	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
681}
682
683static int set_pauseparam(struct net_device *dev,
684			  struct ethtool_pauseparam *epause)
685{
686	struct adapter *adapter = dev->ml_priv;
687	struct port_info *p = &adapter->port[dev->if_port];
688	struct link_config *lc = &p->link_config;
689
690	if (epause->autoneg == AUTONEG_DISABLE)
691		lc->requested_fc = 0;
692	else if (lc->supported & SUPPORTED_Autoneg)
693		lc->requested_fc = PAUSE_AUTONEG;
694	else
695		return -EINVAL;
696
697	if (epause->rx_pause)
698		lc->requested_fc |= PAUSE_RX;
699	if (epause->tx_pause)
700		lc->requested_fc |= PAUSE_TX;
701	if (lc->autoneg == AUTONEG_ENABLE) {
702		if (netif_running(dev))
703			t1_link_start(p->phy, p->mac, lc);
704	} else {
705		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
706		if (netif_running(dev))
707			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
708							 lc->fc);
709	}
710	return 0;
711}
712
713static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
714			  struct kernel_ethtool_ringparam *kernel_e,
715			  struct netlink_ext_ack *extack)
716{
717	struct adapter *adapter = dev->ml_priv;
718	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
719
720	e->rx_max_pending = MAX_RX_BUFFERS;
721	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
722	e->tx_max_pending = MAX_CMDQ_ENTRIES;
723
724	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
725	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
726	e->tx_pending = adapter->params.sge.cmdQ_size[0];
727}
728
729static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e,
730			 struct kernel_ethtool_ringparam *kernel_e,
731			 struct netlink_ext_ack *extack)
732{
733	struct adapter *adapter = dev->ml_priv;
734	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
735
736	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
737	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
738	    e->tx_pending > MAX_CMDQ_ENTRIES ||
739	    e->rx_pending < MIN_FL_ENTRIES ||
740	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
741	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
742		return -EINVAL;
743
744	if (adapter->flags & FULL_INIT_DONE)
745		return -EBUSY;
746
747	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
748	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
749	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
750	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
751		MAX_CMDQ1_ENTRIES : e->tx_pending;
752	return 0;
753}
754
755static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
756			struct kernel_ethtool_coalesce *kernel_coal,
757			struct netlink_ext_ack *extack)
758{
759	struct adapter *adapter = dev->ml_priv;
760
761	adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
762	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
763	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
764	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
765	return 0;
766}
767
768static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c,
769			struct kernel_ethtool_coalesce *kernel_coal,
770			struct netlink_ext_ack *extack)
771{
772	struct adapter *adapter = dev->ml_priv;
773
774	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
775	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
776	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
777	return 0;
778}
779
780static int get_eeprom_len(struct net_device *dev)
781{
782	struct adapter *adapter = dev->ml_priv;
783
784	return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
785}
786
787#define EEPROM_MAGIC(ap) \
788	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
789
790static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
791		      u8 *data)
792{
793	int i;
794	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
795	struct adapter *adapter = dev->ml_priv;
796
797	e->magic = EEPROM_MAGIC(adapter);
798	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
799		t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
800	memcpy(data, buf + e->offset, e->len);
801	return 0;
802}
803
804static const struct ethtool_ops t1_ethtool_ops = {
805	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
806				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
807				     ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL,
808	.get_drvinfo       = get_drvinfo,
809	.get_msglevel      = get_msglevel,
810	.set_msglevel      = set_msglevel,
811	.get_ringparam     = get_sge_param,
812	.set_ringparam     = set_sge_param,
813	.get_coalesce      = get_coalesce,
814	.set_coalesce      = set_coalesce,
815	.get_eeprom_len    = get_eeprom_len,
816	.get_eeprom        = get_eeprom,
817	.get_pauseparam    = get_pauseparam,
818	.set_pauseparam    = set_pauseparam,
819	.get_link          = ethtool_op_get_link,
820	.get_strings       = get_strings,
821	.get_sset_count	   = get_sset_count,
822	.get_ethtool_stats = get_stats,
823	.get_regs_len      = get_regs_len,
824	.get_regs          = get_regs,
825	.get_link_ksettings = get_link_ksettings,
826	.set_link_ksettings = set_link_ksettings,
827};
828
829static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
830{
831	struct adapter *adapter = dev->ml_priv;
832	struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
833
834	return mdio_mii_ioctl(mdio, if_mii(req), cmd);
835}
836
837static int t1_change_mtu(struct net_device *dev, int new_mtu)
838{
839	int ret;
840	struct adapter *adapter = dev->ml_priv;
841	struct cmac *mac = adapter->port[dev->if_port].mac;
842
843	if (!mac->ops->set_mtu)
844		return -EOPNOTSUPP;
845	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
846		return ret;
847	dev->mtu = new_mtu;
848	return 0;
849}
850
851static int t1_set_mac_addr(struct net_device *dev, void *p)
852{
853	struct adapter *adapter = dev->ml_priv;
854	struct cmac *mac = adapter->port[dev->if_port].mac;
855	struct sockaddr *addr = p;
856
857	if (!mac->ops->macaddress_set)
858		return -EOPNOTSUPP;
859
860	eth_hw_addr_set(dev, addr->sa_data);
861	mac->ops->macaddress_set(mac, dev->dev_addr);
862	return 0;
863}
864
865static netdev_features_t t1_fix_features(struct net_device *dev,
866	netdev_features_t features)
867{
868	/*
869	 * Since there is no support for separate rx/tx vlan accel
870	 * enable/disable make sure tx flag is always in same state as rx.
871	 */
872	if (features & NETIF_F_HW_VLAN_CTAG_RX)
873		features |= NETIF_F_HW_VLAN_CTAG_TX;
874	else
875		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
876
877	return features;
878}
879
880static int t1_set_features(struct net_device *dev, netdev_features_t features)
881{
882	netdev_features_t changed = dev->features ^ features;
883	struct adapter *adapter = dev->ml_priv;
884
885	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
886		t1_vlan_mode(adapter, features);
887
888	return 0;
889}
890#ifdef CONFIG_NET_POLL_CONTROLLER
891static void t1_netpoll(struct net_device *dev)
892{
893	unsigned long flags;
894	struct adapter *adapter = dev->ml_priv;
895
896	local_irq_save(flags);
897	t1_interrupt(adapter->pdev->irq, adapter);
898	local_irq_restore(flags);
899}
900#endif
901
902/*
903 * Periodic accumulation of MAC statistics.  This is used only if the MAC
904 * does not have any other way to prevent stats counter overflow.
905 */
906static void mac_stats_task(struct work_struct *work)
907{
908	int i;
909	struct adapter *adapter =
910		container_of(work, struct adapter, stats_update_task.work);
911
912	for_each_port(adapter, i) {
913		struct port_info *p = &adapter->port[i];
914
915		if (netif_running(p->dev))
916			p->mac->ops->statistics_update(p->mac,
917						       MAC_STATS_UPDATE_FAST);
918	}
919
920	/* Schedule the next statistics update if any port is active. */
921	spin_lock(&adapter->work_lock);
922	if (adapter->open_device_map & PORT_MASK)
923		schedule_mac_stats_update(adapter,
924					  adapter->params.stats_update_period);
925	spin_unlock(&adapter->work_lock);
926}
927
928static const struct net_device_ops cxgb_netdev_ops = {
929	.ndo_open		= cxgb_open,
930	.ndo_stop		= cxgb_close,
931	.ndo_start_xmit		= t1_start_xmit,
932	.ndo_get_stats		= t1_get_stats,
933	.ndo_validate_addr	= eth_validate_addr,
934	.ndo_set_rx_mode	= t1_set_rxmode,
935	.ndo_eth_ioctl		= t1_ioctl,
936	.ndo_change_mtu		= t1_change_mtu,
937	.ndo_set_mac_address	= t1_set_mac_addr,
938	.ndo_fix_features	= t1_fix_features,
939	.ndo_set_features	= t1_set_features,
940#ifdef CONFIG_NET_POLL_CONTROLLER
941	.ndo_poll_controller	= t1_netpoll,
942#endif
943};
944
945static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
946{
947	unsigned long mmio_start, mmio_len;
948	const struct board_info *bi;
949	struct adapter *adapter = NULL;
950	struct port_info *pi;
951	int i, err;
952
953	err = pci_enable_device(pdev);
954	if (err)
955		return err;
956
957	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
958		pr_err("%s: cannot find PCI device memory base address\n",
959		       pci_name(pdev));
960		err = -ENODEV;
961		goto out_disable_pdev;
962	}
963
964	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
965	if (err) {
966		pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
967		goto out_disable_pdev;
968	}
969
970	err = pci_request_regions(pdev, DRV_NAME);
971	if (err) {
972		pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
973		goto out_disable_pdev;
974	}
975
976	pci_set_master(pdev);
977
978	mmio_start = pci_resource_start(pdev, 0);
979	mmio_len = pci_resource_len(pdev, 0);
980	bi = t1_get_board_info(ent->driver_data);
981
982	for (i = 0; i < bi->port_number; ++i) {
983		struct net_device *netdev;
984
985		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
986		if (!netdev) {
987			err = -ENOMEM;
988			goto out_free_dev;
989		}
990
991		SET_NETDEV_DEV(netdev, &pdev->dev);
992
993		if (!adapter) {
994			adapter = netdev_priv(netdev);
995			adapter->pdev = pdev;
996			adapter->port[0].dev = netdev;  /* so we don't leak it */
997
998			adapter->regs = ioremap(mmio_start, mmio_len);
999			if (!adapter->regs) {
1000				pr_err("%s: cannot map device registers\n",
1001				       pci_name(pdev));
1002				err = -ENOMEM;
1003				goto out_free_dev;
1004			}
1005
1006			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1007				err = -ENODEV;	  /* Can't handle this chip rev */
1008				goto out_free_dev;
1009			}
1010
1011			adapter->name = pci_name(pdev);
1012			adapter->msg_enable = dflt_msg_enable;
1013			adapter->mmio_len = mmio_len;
1014
1015			spin_lock_init(&adapter->tpi_lock);
1016			spin_lock_init(&adapter->work_lock);
1017			spin_lock_init(&adapter->async_lock);
1018			spin_lock_init(&adapter->mac_lock);
1019
1020			INIT_DELAYED_WORK(&adapter->stats_update_task,
1021					  mac_stats_task);
1022
1023			pci_set_drvdata(pdev, netdev);
1024		}
1025
1026		pi = &adapter->port[i];
1027		pi->dev = netdev;
1028		netif_carrier_off(netdev);
1029		netdev->irq = pdev->irq;
1030		netdev->if_port = i;
1031		netdev->mem_start = mmio_start;
1032		netdev->mem_end = mmio_start + mmio_len - 1;
1033		netdev->ml_priv = adapter;
1034		netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1035			NETIF_F_RXCSUM;
1036		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1037			NETIF_F_RXCSUM | NETIF_F_LLTX | NETIF_F_HIGHDMA;
1038
1039		if (vlan_tso_capable(adapter)) {
1040			netdev->features |=
1041				NETIF_F_HW_VLAN_CTAG_TX |
1042				NETIF_F_HW_VLAN_CTAG_RX;
1043			netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1044
1045			/* T204: disable TSO */
1046			if (!(is_T2(adapter)) || bi->port_number != 4) {
1047				netdev->hw_features |= NETIF_F_TSO;
1048				netdev->features |= NETIF_F_TSO;
1049			}
1050		}
1051
1052		netdev->netdev_ops = &cxgb_netdev_ops;
1053		netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1054			sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1055
1056		netif_napi_add(netdev, &adapter->napi, t1_poll);
1057
1058		netdev->ethtool_ops = &t1_ethtool_ops;
1059
1060		switch (bi->board) {
1061		case CHBT_BOARD_CHT110:
1062		case CHBT_BOARD_N110:
1063		case CHBT_BOARD_N210:
1064		case CHBT_BOARD_CHT210:
1065			netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
1066					  (ETH_HLEN + ETH_FCS_LEN);
1067			break;
1068		case CHBT_BOARD_CHN204:
1069			netdev->max_mtu = VSC7326_MAX_MTU;
1070			break;
1071		default:
1072			netdev->max_mtu = ETH_DATA_LEN;
1073			break;
1074		}
1075	}
1076
1077	if (t1_init_sw_modules(adapter, bi) < 0) {
1078		err = -ENODEV;
1079		goto out_free_dev;
1080	}
1081
1082	/*
1083	 * The card is now ready to go.  If any errors occur during device
1084	 * registration we do not fail the whole card but rather proceed only
1085	 * with the ports we manage to register successfully.  However we must
1086	 * register at least one net device.
1087	 */
1088	for (i = 0; i < bi->port_number; ++i) {
1089		err = register_netdev(adapter->port[i].dev);
1090		if (err)
1091			pr_warn("%s: cannot register net device %s, skipping\n",
1092				pci_name(pdev), adapter->port[i].dev->name);
1093		else {
1094			/*
1095			 * Change the name we use for messages to the name of
1096			 * the first successfully registered interface.
1097			 */
1098			if (!adapter->registered_device_map)
1099				adapter->name = adapter->port[i].dev->name;
1100
1101			__set_bit(i, &adapter->registered_device_map);
1102		}
1103	}
1104	if (!adapter->registered_device_map) {
1105		pr_err("%s: could not register any net devices\n",
1106		       pci_name(pdev));
1107		err = -EINVAL;
1108		goto out_release_adapter_res;
1109	}
1110
1111	pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1112		adapter->name, bi->desc, adapter->params.chip_revision,
1113		adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1114		adapter->params.pci.speed, adapter->params.pci.width);
1115
1116	/*
1117	 * Set the T1B ASIC and memory clocks.
1118	 */
1119	if (t1powersave)
1120		adapter->t1powersave = LCLOCK;	/* HW default is powersave mode. */
1121	else
1122		adapter->t1powersave = HCLOCK;
1123	if (t1_is_T1B(adapter))
1124		t1_clock(adapter, t1powersave);
1125
1126	return 0;
1127
1128out_release_adapter_res:
1129	t1_free_sw_modules(adapter);
1130out_free_dev:
1131	if (adapter) {
1132		if (adapter->regs)
1133			iounmap(adapter->regs);
1134		for (i = bi->port_number - 1; i >= 0; --i)
1135			if (adapter->port[i].dev)
1136				free_netdev(adapter->port[i].dev);
1137	}
1138	pci_release_regions(pdev);
1139out_disable_pdev:
1140	pci_disable_device(pdev);
1141	return err;
1142}
1143
1144static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1145{
1146	int data;
1147	int i;
1148	u32 val;
1149
1150	enum {
1151		S_CLOCK = 1 << 3,
1152		S_DATA = 1 << 4
1153	};
1154
1155	for (i = (nbits - 1); i > -1; i--) {
1156
1157		udelay(50);
1158
1159		data = ((bitdata >> i) & 0x1);
1160		__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1161
1162		if (data)
1163			val |= S_DATA;
1164		else
1165			val &= ~S_DATA;
1166
1167		udelay(50);
1168
1169		/* Set SCLOCK low */
1170		val &= ~S_CLOCK;
1171		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1172
1173		udelay(50);
1174
1175		/* Write SCLOCK high */
1176		val |= S_CLOCK;
1177		__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1178
1179	}
1180}
1181
1182static int t1_clock(struct adapter *adapter, int mode)
1183{
1184	u32 val;
1185	int M_CORE_VAL;
1186	int M_MEM_VAL;
1187
1188	enum {
1189		M_CORE_BITS	= 9,
1190		T_CORE_VAL	= 0,
1191		T_CORE_BITS	= 2,
1192		N_CORE_VAL	= 0,
1193		N_CORE_BITS	= 2,
1194		M_MEM_BITS	= 9,
1195		T_MEM_VAL	= 0,
1196		T_MEM_BITS	= 2,
1197		N_MEM_VAL	= 0,
1198		N_MEM_BITS	= 2,
1199		NP_LOAD		= 1 << 17,
1200		S_LOAD_MEM	= 1 << 5,
1201		S_LOAD_CORE	= 1 << 6,
1202		S_CLOCK		= 1 << 3
1203	};
1204
1205	if (!t1_is_T1B(adapter))
1206		return -ENODEV;	/* Can't re-clock this chip. */
1207
1208	if (mode & 2)
1209		return 0;	/* show current mode. */
1210
1211	if ((adapter->t1powersave & 1) == (mode & 1))
1212		return -EALREADY;	/* ASIC already running in mode. */
1213
1214	if ((mode & 1) == HCLOCK) {
1215		M_CORE_VAL = 0x14;
1216		M_MEM_VAL = 0x18;
1217		adapter->t1powersave = HCLOCK;	/* overclock */
1218	} else {
1219		M_CORE_VAL = 0xe;
1220		M_MEM_VAL = 0x10;
1221		adapter->t1powersave = LCLOCK;	/* underclock */
1222	}
1223
1224	/* Don't interrupt this serial stream! */
1225	spin_lock(&adapter->tpi_lock);
1226
1227	/* Initialize for ASIC core */
1228	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1229	val |= NP_LOAD;
1230	udelay(50);
1231	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1232	udelay(50);
1233	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1234	val &= ~S_LOAD_CORE;
1235	val &= ~S_CLOCK;
1236	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1237	udelay(50);
1238
1239	/* Serial program the ASIC clock synthesizer */
1240	bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1241	bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1242	bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1243	udelay(50);
1244
1245	/* Finish ASIC core */
1246	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1247	val |= S_LOAD_CORE;
1248	udelay(50);
1249	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1250	udelay(50);
1251	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1252	val &= ~S_LOAD_CORE;
1253	udelay(50);
1254	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1255	udelay(50);
1256
1257	/* Initialize for memory */
1258	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1259	val |= NP_LOAD;
1260	udelay(50);
1261	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1262	udelay(50);
1263	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1264	val &= ~S_LOAD_MEM;
1265	val &= ~S_CLOCK;
1266	udelay(50);
1267	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1268	udelay(50);
1269
1270	/* Serial program the memory clock synthesizer */
1271	bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1272	bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1273	bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1274	udelay(50);
1275
1276	/* Finish memory */
1277	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1278	val |= S_LOAD_MEM;
1279	udelay(50);
1280	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1281	udelay(50);
1282	__t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1283	val &= ~S_LOAD_MEM;
1284	udelay(50);
1285	__t1_tpi_write(adapter, A_ELMER0_GPO, val);
1286
1287	spin_unlock(&adapter->tpi_lock);
1288
1289	return 0;
1290}
1291
1292static inline void t1_sw_reset(struct pci_dev *pdev)
1293{
1294	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1295	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1296}
1297
1298static void remove_one(struct pci_dev *pdev)
1299{
1300	struct net_device *dev = pci_get_drvdata(pdev);
1301	struct adapter *adapter = dev->ml_priv;
1302	int i;
1303
1304	for_each_port(adapter, i) {
1305		if (test_bit(i, &adapter->registered_device_map))
1306			unregister_netdev(adapter->port[i].dev);
1307	}
1308
1309	t1_free_sw_modules(adapter);
1310	iounmap(adapter->regs);
1311
1312	while (--i >= 0) {
1313		if (adapter->port[i].dev)
1314			free_netdev(adapter->port[i].dev);
1315	}
1316
1317	pci_release_regions(pdev);
1318	pci_disable_device(pdev);
1319	t1_sw_reset(pdev);
1320}
1321
1322static struct pci_driver cxgb_pci_driver = {
1323	.name     = DRV_NAME,
1324	.id_table = t1_pci_tbl,
1325	.probe    = init_one,
1326	.remove   = remove_one,
1327};
1328
1329module_pci_driver(cxgb_pci_driver);
1330