• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/net/
1/*
2 * This code is derived from the VIA reference driver (copyright message
3 * below) provided to Red Hat by VIA Networking Technologies, Inc. for
4 * addition to the Linux kernel.
5 *
6 * The code has been merged into one source file, cleaned up to follow
7 * Linux coding style,  ported to the Linux 2.6 kernel tree and cleaned
8 * for 64bit hardware platforms.
9 *
10 * TODO
11 *	rx_copybreak/alignment
12 *	More testing
13 *
14 * The changes are (c) Copyright 2004, Red Hat Inc. <alan@lxorguk.ukuu.org.uk>
15 * Additional fixes and clean up: Francois Romieu
16 *
17 * This source has not been verified for use in safety critical systems.
18 *
19 * Please direct queries about the revamped driver to the linux-kernel
20 * list not VIA.
21 *
22 * Original code:
23 *
24 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
25 * All rights reserved.
26 *
27 * This software may be redistributed and/or modified under
28 * the terms of the GNU General Public License as published by the Free
29 * Software Foundation; either version 2 of the License, or
30 * any later version.
31 *
32 * This program is distributed in the hope that it will be useful, but
33 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
34 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
35 * for more details.
36 *
37 * Author: Chuang Liang-Shing, AJ Jiang
38 *
39 * Date: Jan 24, 2003
40 *
41 * MODULE_LICENSE("GPL");
42 *
43 */
44
45
46#include <linux/module.h>
47#include <linux/types.h>
48#include <linux/init.h>
49#include <linux/mm.h>
50#include <linux/errno.h>
51#include <linux/ioport.h>
52#include <linux/pci.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/delay.h>
58#include <linux/timer.h>
59#include <linux/slab.h>
60#include <linux/interrupt.h>
61#include <linux/string.h>
62#include <linux/wait.h>
63#include <linux/io.h>
64#include <linux/if.h>
65#include <linux/uaccess.h>
66#include <linux/proc_fs.h>
67#include <linux/inetdevice.h>
68#include <linux/reboot.h>
69#include <linux/ethtool.h>
70#include <linux/mii.h>
71#include <linux/in.h>
72#include <linux/if_arp.h>
73#include <linux/if_vlan.h>
74#include <linux/ip.h>
75#include <linux/tcp.h>
76#include <linux/udp.h>
77#include <linux/crc-ccitt.h>
78#include <linux/crc32.h>
79
80#include "via-velocity.h"
81
82
83static int velocity_nics;
84static int msglevel = MSG_LEVEL_INFO;
85
86/**
87 *	mac_get_cam_mask	-	Read a CAM mask
88 *	@regs: register block for this velocity
89 *	@mask: buffer to store mask
90 *
91 *	Fetch the mask bits of the selected CAM and store them into the
92 *	provided mask buffer.
93 */
94static void mac_get_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
95{
96	int i;
97
98	/* Select CAM mask */
99	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
100
101	writeb(0, &regs->CAMADDR);
102
103	/* read mask */
104	for (i = 0; i < 8; i++)
105		*mask++ = readb(&(regs->MARCAM[i]));
106
107	/* disable CAMEN */
108	writeb(0, &regs->CAMADDR);
109
110	/* Select mar */
111	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
112}
113
114
115/**
116 *	mac_set_cam_mask	-	Set a CAM mask
117 *	@regs: register block for this velocity
118 *	@mask: CAM mask to load
119 *
120 *	Store a new mask into a CAM
121 */
122static void mac_set_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
123{
124	int i;
125	/* Select CAM mask */
126	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
127
128	writeb(CAMADDR_CAMEN, &regs->CAMADDR);
129
130	for (i = 0; i < 8; i++)
131		writeb(*mask++, &(regs->MARCAM[i]));
132
133	/* disable CAMEN */
134	writeb(0, &regs->CAMADDR);
135
136	/* Select mar */
137	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
138}
139
140static void mac_set_vlan_cam_mask(struct mac_regs __iomem *regs, u8 *mask)
141{
142	int i;
143	/* Select CAM mask */
144	BYTE_REG_BITS_SET(CAMCR_PS_CAM_MASK, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
145
146	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL, &regs->CAMADDR);
147
148	for (i = 0; i < 8; i++)
149		writeb(*mask++, &(regs->MARCAM[i]));
150
151	/* disable CAMEN */
152	writeb(0, &regs->CAMADDR);
153
154	/* Select mar */
155	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
156}
157
158/**
159 *	mac_set_cam	-	set CAM data
160 *	@regs: register block of this velocity
161 *	@idx: Cam index
162 *	@addr: 2 or 6 bytes of CAM data
163 *
164 *	Load an address or vlan tag into a CAM
165 */
166static void mac_set_cam(struct mac_regs __iomem *regs, int idx, const u8 *addr)
167{
168	int i;
169
170	/* Select CAM mask */
171	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
172
173	idx &= (64 - 1);
174
175	writeb(CAMADDR_CAMEN | idx, &regs->CAMADDR);
176
177	for (i = 0; i < 6; i++)
178		writeb(*addr++, &(regs->MARCAM[i]));
179
180	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
181
182	udelay(10);
183
184	writeb(0, &regs->CAMADDR);
185
186	/* Select mar */
187	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
188}
189
190static void mac_set_vlan_cam(struct mac_regs __iomem *regs, int idx,
191			     const u8 *addr)
192{
193
194	/* Select CAM mask */
195	BYTE_REG_BITS_SET(CAMCR_PS_CAM_DATA, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
196
197	idx &= (64 - 1);
198
199	writeb(CAMADDR_CAMEN | CAMADDR_VCAMSL | idx, &regs->CAMADDR);
200	writew(*((u16 *) addr), &regs->MARCAM[0]);
201
202	BYTE_REG_BITS_ON(CAMCR_CAMWR, &regs->CAMCR);
203
204	udelay(10);
205
206	writeb(0, &regs->CAMADDR);
207
208	/* Select mar */
209	BYTE_REG_BITS_SET(CAMCR_PS_MAR, CAMCR_PS1 | CAMCR_PS0, &regs->CAMCR);
210}
211
212
213/**
214 *	mac_wol_reset	-	reset WOL after exiting low power
215 *	@regs: register block of this velocity
216 *
217 *	Called after we drop out of wake on lan mode in order to
218 *	reset the Wake on lan features. This function doesn't restore
219 *	the rest of the logic from the result of sleep/wakeup
220 */
221static void mac_wol_reset(struct mac_regs __iomem *regs)
222{
223
224	/* Turn off SWPTAG right after leaving power mode */
225	BYTE_REG_BITS_OFF(STICKHW_SWPTAG, &regs->STICKHW);
226	/* clear sticky bits */
227	BYTE_REG_BITS_OFF((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
228
229	BYTE_REG_BITS_OFF(CHIPGCR_FCGMII, &regs->CHIPGCR);
230	BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
231	/* disable force PME-enable */
232	writeb(WOLCFG_PMEOVR, &regs->WOLCFGClr);
233	/* disable power-event config bit */
234	writew(0xFFFF, &regs->WOLCRClr);
235	/* clear power status */
236	writew(0xFFFF, &regs->WOLSRClr);
237}
238
239static const struct ethtool_ops velocity_ethtool_ops;
240
241/*
242    Define module options
243*/
244
245MODULE_AUTHOR("VIA Networking Technologies, Inc.");
246MODULE_LICENSE("GPL");
247MODULE_DESCRIPTION("VIA Networking Velocity Family Gigabit Ethernet Adapter Driver");
248
249#define VELOCITY_PARAM(N, D) \
250	static int N[MAX_UNITS] = OPTION_DEFAULT;\
251	module_param_array(N, int, NULL, 0); \
252	MODULE_PARM_DESC(N, D);
253
254#define RX_DESC_MIN     64
255#define RX_DESC_MAX     255
256#define RX_DESC_DEF     64
257VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors");
258
259#define TX_DESC_MIN     16
260#define TX_DESC_MAX     256
261#define TX_DESC_DEF     64
262VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors");
263
264#define RX_THRESH_MIN   0
265#define RX_THRESH_MAX   3
266#define RX_THRESH_DEF   0
267/* rx_thresh[] is used for controlling the receive fifo threshold.
268   0: indicate the rxfifo threshold is 128 bytes.
269   1: indicate the rxfifo threshold is 512 bytes.
270   2: indicate the rxfifo threshold is 1024 bytes.
271   3: indicate the rxfifo threshold is store & forward.
272*/
273VELOCITY_PARAM(rx_thresh, "Receive fifo threshold");
274
275#define DMA_LENGTH_MIN  0
276#define DMA_LENGTH_MAX  7
277#define DMA_LENGTH_DEF  6
278
279/* DMA_length[] is used for controlling the DMA length
280   0: 8 DWORDs
281   1: 16 DWORDs
282   2: 32 DWORDs
283   3: 64 DWORDs
284   4: 128 DWORDs
285   5: 256 DWORDs
286   6: SF(flush till emply)
287   7: SF(flush till emply)
288*/
289VELOCITY_PARAM(DMA_length, "DMA length");
290
291#define IP_ALIG_DEF     0
292/* IP_byte_align[] is used for IP header DWORD byte aligned
293   0: indicate the IP header won't be DWORD byte aligned.(Default) .
294   1: indicate the IP header will be DWORD byte aligned.
295      In some enviroment, the IP header should be DWORD byte aligned,
296      or the packet will be droped when we receive it. (eg: IPVS)
297*/
298VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
299
300#define FLOW_CNTL_DEF   1
301#define FLOW_CNTL_MIN   1
302#define FLOW_CNTL_MAX   5
303
304/* flow_control[] is used for setting the flow control ability of NIC.
305   1: hardware deafult - AUTO (default). Use Hardware default value in ANAR.
306   2: enable TX flow control.
307   3: enable RX flow control.
308   4: enable RX/TX flow control.
309   5: disable
310*/
311VELOCITY_PARAM(flow_control, "Enable flow control ability");
312
313#define MED_LNK_DEF 0
314#define MED_LNK_MIN 0
315#define MED_LNK_MAX 4
316/* speed_duplex[] is used for setting the speed and duplex mode of NIC.
317   0: indicate autonegotiation for both speed and duplex mode
318   1: indicate 100Mbps half duplex mode
319   2: indicate 100Mbps full duplex mode
320   3: indicate 10Mbps half duplex mode
321   4: indicate 10Mbps full duplex mode
322
323   Note:
324   if EEPROM have been set to the force mode, this option is ignored
325   by driver.
326*/
327VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
328
329#define VAL_PKT_LEN_DEF     0
330/* ValPktLen[] is used for setting the checksum offload ability of NIC.
331   0: Receive frame with invalid layer 2 length (Default)
332   1: Drop frame with invalid layer 2 length
333*/
334VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
335
336#define WOL_OPT_DEF     0
337#define WOL_OPT_MIN     0
338#define WOL_OPT_MAX     7
339/* wol_opts[] is used for controlling wake on lan behavior.
340   0: Wake up if recevied a magic packet. (Default)
341   1: Wake up if link status is on/off.
342   2: Wake up if recevied an arp packet.
343   4: Wake up if recevied any unicast packet.
344   Those value can be sumed up to support more than one option.
345*/
346VELOCITY_PARAM(wol_opts, "Wake On Lan options");
347
348static int rx_copybreak = 200;
349module_param(rx_copybreak, int, 0644);
350MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
351
352/*
353 *	Internal board variants. At the moment we have only one
354 */
355static struct velocity_info_tbl chip_info_table[] = {
356	{CHIP_TYPE_VT6110, "VIA Networking Velocity Family Gigabit Ethernet Adapter", 1, 0x00FFFFFFUL},
357	{ }
358};
359
360/*
361 *	Describe the PCI device identifiers that we support in this
362 *	device driver. Used for hotplug autoloading.
363 */
364static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
365	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
366	{ }
367};
368
369MODULE_DEVICE_TABLE(pci, velocity_id_table);
370
371/**
372 *	get_chip_name	- 	identifier to name
373 *	@id: chip identifier
374 *
375 *	Given a chip identifier return a suitable description. Returns
376 *	a pointer a static string valid while the driver is loaded.
377 */
378static const char __devinit *get_chip_name(enum chip_type chip_id)
379{
380	int i;
381	for (i = 0; chip_info_table[i].name != NULL; i++)
382		if (chip_info_table[i].chip_id == chip_id)
383			break;
384	return chip_info_table[i].name;
385}
386
387/**
388 *	velocity_remove1	-	device unplug
389 *	@pdev: PCI device being removed
390 *
391 *	Device unload callback. Called on an unplug or on module
392 *	unload for each active device that is present. Disconnects
393 *	the device from the network layer and frees all the resources
394 */
395static void __devexit velocity_remove1(struct pci_dev *pdev)
396{
397	struct net_device *dev = pci_get_drvdata(pdev);
398	struct velocity_info *vptr = netdev_priv(dev);
399
400	unregister_netdev(dev);
401	iounmap(vptr->mac_regs);
402	pci_release_regions(pdev);
403	pci_disable_device(pdev);
404	pci_set_drvdata(pdev, NULL);
405	free_netdev(dev);
406
407	velocity_nics--;
408}
409
410/**
411 *	velocity_set_int_opt	-	parser for integer options
412 *	@opt: pointer to option value
413 *	@val: value the user requested (or -1 for default)
414 *	@min: lowest value allowed
415 *	@max: highest value allowed
416 *	@def: default value
417 *	@name: property name
418 *	@dev: device name
419 *
420 *	Set an integer property in the module options. This function does
421 *	all the verification and checking as well as reporting so that
422 *	we don't duplicate code for each option.
423 */
424static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
425{
426	if (val == -1)
427		*opt = def;
428	else if (val < min || val > max) {
429		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n",
430					devname, name, min, max);
431		*opt = def;
432	} else {
433		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
434					devname, name, val);
435		*opt = val;
436	}
437}
438
439/**
440 *	velocity_set_bool_opt	-	parser for boolean options
441 *	@opt: pointer to option value
442 *	@val: value the user requested (or -1 for default)
443 *	@def: default value (yes/no)
444 *	@flag: numeric value to set for true.
445 *	@name: property name
446 *	@dev: device name
447 *
448 *	Set a boolean property in the module options. This function does
449 *	all the verification and checking as well as reporting so that
450 *	we don't duplicate code for each option.
451 */
452static void __devinit velocity_set_bool_opt(u32 *opt, int val, int def, u32 flag, char *name, const char *devname)
453{
454	(*opt) &= (~flag);
455	if (val == -1)
456		*opt |= (def ? flag : 0);
457	else if (val < 0 || val > 1) {
458		printk(KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (0-1)\n",
459			devname, name);
460		*opt |= (def ? flag : 0);
461	} else {
462		printk(KERN_INFO "%s: set parameter %s to %s\n",
463			devname, name, val ? "TRUE" : "FALSE");
464		*opt |= (val ? flag : 0);
465	}
466}
467
468/**
469 *	velocity_get_options	-	set options on device
470 *	@opts: option structure for the device
471 *	@index: index of option to use in module options array
472 *	@devname: device name
473 *
474 *	Turn the module and command options into a single structure
475 *	for the current device
476 */
477static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
478{
479
480	velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
481	velocity_set_int_opt(&opts->DMA_length, DMA_length[index], DMA_LENGTH_MIN, DMA_LENGTH_MAX, DMA_LENGTH_DEF, "DMA_length", devname);
482	velocity_set_int_opt(&opts->numrx, RxDescriptors[index], RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, "RxDescriptors", devname);
483	velocity_set_int_opt(&opts->numtx, TxDescriptors[index], TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, "TxDescriptors", devname);
484
485	velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
486	velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
487	velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
488	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
489	velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
490	opts->numrx = (opts->numrx & ~3);
491}
492
493/**
494 *	velocity_init_cam_filter	-	initialise CAM
495 *	@vptr: velocity to program
496 *
497 *	Initialize the content addressable memory used for filters. Load
498 *	appropriately according to the presence of VLAN
499 */
500static void velocity_init_cam_filter(struct velocity_info *vptr)
501{
502	struct mac_regs __iomem *regs = vptr->mac_regs;
503
504	/* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */
505	WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, &regs->MCFG);
506	WORD_REG_BITS_ON(MCFG_VIDFR, &regs->MCFG);
507
508	/* Disable all CAMs */
509	memset(vptr->vCAMmask, 0, sizeof(u8) * 8);
510	memset(vptr->mCAMmask, 0, sizeof(u8) * 8);
511	mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
512	mac_set_cam_mask(regs, vptr->mCAMmask);
513
514	/* Enable VCAMs */
515	if (vptr->vlgrp) {
516		unsigned int vid, i = 0;
517
518		if (!vlan_group_get_device(vptr->vlgrp, 0))
519			WORD_REG_BITS_ON(MCFG_RTGOPT, &regs->MCFG);
520
521		for (vid = 1; (vid < VLAN_VID_MASK); vid++) {
522			if (vlan_group_get_device(vptr->vlgrp, vid)) {
523				mac_set_vlan_cam(regs, i, (u8 *) &vid);
524				vptr->vCAMmask[i / 8] |= 0x1 << (i % 8);
525				if (++i >= VCAM_SIZE)
526					break;
527			}
528		}
529		mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
530	}
531}
532
533static void velocity_vlan_rx_register(struct net_device *dev,
534				      struct vlan_group *grp)
535{
536	struct velocity_info *vptr = netdev_priv(dev);
537
538	vptr->vlgrp = grp;
539}
540
541static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
542{
543	struct velocity_info *vptr = netdev_priv(dev);
544
545	spin_lock_irq(&vptr->lock);
546	velocity_init_cam_filter(vptr);
547	spin_unlock_irq(&vptr->lock);
548}
549
550static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
551{
552	struct velocity_info *vptr = netdev_priv(dev);
553
554	spin_lock_irq(&vptr->lock);
555	vlan_group_set_device(vptr->vlgrp, vid, NULL);
556	velocity_init_cam_filter(vptr);
557	spin_unlock_irq(&vptr->lock);
558}
559
560static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
561{
562	vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0;
563}
564
565/**
566 *	velocity_rx_reset	-	handle a receive reset
567 *	@vptr: velocity we are resetting
568 *
569 *	Reset the ownership and status for the receive ring side.
570 *	Hand all the receive queue to the NIC.
571 */
572static void velocity_rx_reset(struct velocity_info *vptr)
573{
574
575	struct mac_regs __iomem *regs = vptr->mac_regs;
576	int i;
577
578	velocity_init_rx_ring_indexes(vptr);
579
580	/*
581	 *	Init state, all RD entries belong to the NIC
582	 */
583	for (i = 0; i < vptr->options.numrx; ++i)
584		vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC;
585
586	writew(vptr->options.numrx, &regs->RBRDU);
587	writel(vptr->rx.pool_dma, &regs->RDBaseLo);
588	writew(0, &regs->RDIdx);
589	writew(vptr->options.numrx - 1, &regs->RDCSize);
590}
591
592/**
593 *	velocity_get_opt_media_mode	-	get media selection
594 *	@vptr: velocity adapter
595 *
596 *	Get the media mode stored in EEPROM or module options and load
597 *	mii_status accordingly. The requested link state information
598 *	is also returned.
599 */
600static u32 velocity_get_opt_media_mode(struct velocity_info *vptr)
601{
602	u32 status = 0;
603
604	switch (vptr->options.spd_dpx) {
605	case SPD_DPX_AUTO:
606		status = VELOCITY_AUTONEG_ENABLE;
607		break;
608	case SPD_DPX_100_FULL:
609		status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL;
610		break;
611	case SPD_DPX_10_FULL:
612		status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL;
613		break;
614	case SPD_DPX_100_HALF:
615		status = VELOCITY_SPEED_100;
616		break;
617	case SPD_DPX_10_HALF:
618		status = VELOCITY_SPEED_10;
619		break;
620	}
621	vptr->mii_status = status;
622	return status;
623}
624
625/**
626 *	safe_disable_mii_autopoll	-	autopoll off
627 *	@regs: velocity registers
628 *
629 *	Turn off the autopoll and wait for it to disable on the chip
630 */
631static void safe_disable_mii_autopoll(struct mac_regs __iomem *regs)
632{
633	u16 ww;
634
635	/*  turn off MAUTO */
636	writeb(0, &regs->MIICR);
637	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
638		udelay(1);
639		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
640			break;
641	}
642}
643
644/**
645 *	enable_mii_autopoll	-	turn on autopolling
646 *	@regs: velocity registers
647 *
648 *	Enable the MII link status autopoll feature on the Velocity
649 *	hardware. Wait for it to enable.
650 */
651static void enable_mii_autopoll(struct mac_regs __iomem *regs)
652{
653	int ii;
654
655	writeb(0, &(regs->MIICR));
656	writeb(MIIADR_SWMPL, &regs->MIIADR);
657
658	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
659		udelay(1);
660		if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
661			break;
662	}
663
664	writeb(MIICR_MAUTO, &regs->MIICR);
665
666	for (ii = 0; ii < W_MAX_TIMEOUT; ii++) {
667		udelay(1);
668		if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, &regs->MIISR))
669			break;
670	}
671
672}
673
674/**
675 *	velocity_mii_read	-	read MII data
676 *	@regs: velocity registers
677 *	@index: MII register index
678 *	@data: buffer for received data
679 *
680 *	Perform a single read of an MII 16bit register. Returns zero
681 *	on success or -ETIMEDOUT if the PHY did not respond.
682 */
683static int velocity_mii_read(struct mac_regs __iomem *regs, u8 index, u16 *data)
684{
685	u16 ww;
686
687	/*
688	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
689	 */
690	safe_disable_mii_autopoll(regs);
691
692	writeb(index, &regs->MIIADR);
693
694	BYTE_REG_BITS_ON(MIICR_RCMD, &regs->MIICR);
695
696	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
697		if (!(readb(&regs->MIICR) & MIICR_RCMD))
698			break;
699	}
700
701	*data = readw(&regs->MIIDATA);
702
703	enable_mii_autopoll(regs);
704	if (ww == W_MAX_TIMEOUT)
705		return -ETIMEDOUT;
706	return 0;
707}
708
709
710/**
711 *	mii_check_media_mode	-	check media state
712 *	@regs: velocity registers
713 *
714 *	Check the current MII status and determine the link status
715 *	accordingly
716 */
717static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
718{
719	u32 status = 0;
720	u16 ANAR;
721
722	if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
723		status |= VELOCITY_LINK_FAIL;
724
725	if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
726		status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
727	else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
728		status |= (VELOCITY_SPEED_1000);
729	else {
730		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
731		if (ANAR & ADVERTISE_100FULL)
732			status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
733		else if (ANAR & ADVERTISE_100HALF)
734			status |= VELOCITY_SPEED_100;
735		else if (ANAR & ADVERTISE_10FULL)
736			status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
737		else
738			status |= (VELOCITY_SPEED_10);
739	}
740
741	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
742		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
743		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
744		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
745			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
746				status |= VELOCITY_AUTONEG_ENABLE;
747		}
748	}
749
750	return status;
751}
752
753/**
754 *	velocity_mii_write	-	write MII data
755 *	@regs: velocity registers
756 *	@index: MII register index
757 *	@data: 16bit data for the MII register
758 *
759 *	Perform a single write to an MII 16bit register. Returns zero
760 *	on success or -ETIMEDOUT if the PHY did not respond.
761 */
762static int velocity_mii_write(struct mac_regs __iomem *regs, u8 mii_addr, u16 data)
763{
764	u16 ww;
765
766	/*
767	 *	Disable MIICR_MAUTO, so that mii addr can be set normally
768	 */
769	safe_disable_mii_autopoll(regs);
770
771	/* MII reg offset */
772	writeb(mii_addr, &regs->MIIADR);
773	/* set MII data */
774	writew(data, &regs->MIIDATA);
775
776	/* turn on MIICR_WCMD */
777	BYTE_REG_BITS_ON(MIICR_WCMD, &regs->MIICR);
778
779	/* W_MAX_TIMEOUT is the timeout period */
780	for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
781		udelay(5);
782		if (!(readb(&regs->MIICR) & MIICR_WCMD))
783			break;
784	}
785	enable_mii_autopoll(regs);
786
787	if (ww == W_MAX_TIMEOUT)
788		return -ETIMEDOUT;
789	return 0;
790}
791
792/**
793 *	set_mii_flow_control	-	flow control setup
794 *	@vptr: velocity interface
795 *
796 *	Set up the flow control on this interface according to
797 *	the supplied user/eeprom options.
798 */
799static void set_mii_flow_control(struct velocity_info *vptr)
800{
801	/*Enable or Disable PAUSE in ANAR */
802	switch (vptr->options.flow_cntl) {
803	case FLOW_CNTL_TX:
804		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
805		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
806		break;
807
808	case FLOW_CNTL_RX:
809		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
810		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
811		break;
812
813	case FLOW_CNTL_TX_RX:
814		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
815		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
816		break;
817
818	case FLOW_CNTL_DISABLE:
819		MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
820		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
821		break;
822	default:
823		break;
824	}
825}
826
827/**
828 *	mii_set_auto_on		-	autonegotiate on
829 *	@vptr: velocity
830 *
831 *	Enable autonegotation on this interface
832 */
833static void mii_set_auto_on(struct velocity_info *vptr)
834{
835	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
836		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
837	else
838		MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
839}
840
841static u32 check_connection_type(struct mac_regs __iomem *regs)
842{
843	u32 status = 0;
844	u8 PHYSR0;
845	u16 ANAR;
846	PHYSR0 = readb(&regs->PHYSR0);
847
848	/*
849	   if (!(PHYSR0 & PHYSR0_LINKGD))
850	   status|=VELOCITY_LINK_FAIL;
851	 */
852
853	if (PHYSR0 & PHYSR0_FDPX)
854		status |= VELOCITY_DUPLEX_FULL;
855
856	if (PHYSR0 & PHYSR0_SPDG)
857		status |= VELOCITY_SPEED_1000;
858	else if (PHYSR0 & PHYSR0_SPD10)
859		status |= VELOCITY_SPEED_10;
860	else
861		status |= VELOCITY_SPEED_100;
862
863	if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
864		velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
865		if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
866		    == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
867			if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
868				status |= VELOCITY_AUTONEG_ENABLE;
869		}
870	}
871
872	return status;
873}
874
875
876
877/**
878 *	velocity_set_media_mode		-	set media mode
879 *	@mii_status: old MII link state
880 *
881 *	Check the media link state and configure the flow control
882 *	PHY and also velocity hardware setup accordingly. In particular
883 *	we need to set up CD polling and frame bursting.
884 */
885static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
886{
887	u32 curr_status;
888	struct mac_regs __iomem *regs = vptr->mac_regs;
889
890	vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
891	curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
892
893	/* Set mii link status */
894	set_mii_flow_control(vptr);
895
896	/*
897	   Check if new status is consisent with current status
898	   if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
899	       (mii_status==curr_status)) {
900	   vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
901	   vptr->mii_status=check_connection_type(vptr->mac_regs);
902	   VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity link no change\n");
903	   return 0;
904	   }
905	 */
906
907	if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
908		MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
909
910	/*
911	 *	If connection type is AUTO
912	 */
913	if (mii_status & VELOCITY_AUTONEG_ENABLE) {
914		VELOCITY_PRT(MSG_LEVEL_INFO, "Velocity is AUTO mode\n");
915		/* clear force MAC mode bit */
916		BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
917		/* set duplex mode of MAC according to duplex mode of MII */
918		MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
919		MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
920		MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
921
922		/* enable AUTO-NEGO mode */
923		mii_set_auto_on(vptr);
924	} else {
925		u16 ANAR;
926		u8 CHIPGCR;
927
928		/*
929		 * 1. if it's 3119, disable frame bursting in halfduplex mode
930		 *    and enable it in fullduplex mode
931		 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR
932		 * 3. only enable CD heart beat counter in 10HD mode
933		 */
934
935		/* set force MAC mode bit */
936		BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
937
938		CHIPGCR = readb(&regs->CHIPGCR);
939		CHIPGCR &= ~CHIPGCR_FCGMII;
940
941		if (mii_status & VELOCITY_DUPLEX_FULL) {
942			CHIPGCR |= CHIPGCR_FCFDX;
943			writeb(CHIPGCR, &regs->CHIPGCR);
944			VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced full mode\n");
945			if (vptr->rev_id < REV_ID_VT3216_A0)
946				BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
947		} else {
948			CHIPGCR &= ~CHIPGCR_FCFDX;
949			VELOCITY_PRT(MSG_LEVEL_INFO, "set Velocity to forced half mode\n");
950			writeb(CHIPGCR, &regs->CHIPGCR);
951			if (vptr->rev_id < REV_ID_VT3216_A0)
952				BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
953		}
954
955		MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
956
957		if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
958			BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
959		else
960			BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
961
962		/* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
963		velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
964		ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
965		if (mii_status & VELOCITY_SPEED_100) {
966			if (mii_status & VELOCITY_DUPLEX_FULL)
967				ANAR |= ADVERTISE_100FULL;
968			else
969				ANAR |= ADVERTISE_100HALF;
970		} else {
971			if (mii_status & VELOCITY_DUPLEX_FULL)
972				ANAR |= ADVERTISE_10FULL;
973			else
974				ANAR |= ADVERTISE_10HALF;
975		}
976		velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
977		/* enable AUTO-NEGO mode */
978		mii_set_auto_on(vptr);
979		/* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
980	}
981	/* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
982	/* vptr->mii_status=check_connection_type(vptr->mac_regs); */
983	return VELOCITY_LINK_CHANGE;
984}
985
986/**
987 *	velocity_print_link_status	-	link status reporting
988 *	@vptr: velocity to report on
989 *
990 *	Turn the link status of the velocity card into a kernel log
991 *	description of the new link state, detailing speed and duplex
992 *	status
993 */
994static void velocity_print_link_status(struct velocity_info *vptr)
995{
996
997	if (vptr->mii_status & VELOCITY_LINK_FAIL) {
998		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name);
999	} else if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1000		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name);
1001
1002		if (vptr->mii_status & VELOCITY_SPEED_1000)
1003			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps");
1004		else if (vptr->mii_status & VELOCITY_SPEED_100)
1005			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps");
1006		else
1007			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps");
1008
1009		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1010			VELOCITY_PRT(MSG_LEVEL_INFO, " full duplex\n");
1011		else
1012			VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n");
1013	} else {
1014		VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name);
1015		switch (vptr->options.spd_dpx) {
1016		case SPD_DPX_100_HALF:
1017			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps half duplex\n");
1018			break;
1019		case SPD_DPX_100_FULL:
1020			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 100M bps full duplex\n");
1021			break;
1022		case SPD_DPX_10_HALF:
1023			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps half duplex\n");
1024			break;
1025		case SPD_DPX_10_FULL:
1026			VELOCITY_PRT(MSG_LEVEL_INFO, " speed 10M bps full duplex\n");
1027			break;
1028		default:
1029			break;
1030		}
1031	}
1032}
1033
1034/**
1035 *	enable_flow_control_ability	-	flow control
1036 *	@vptr: veloity to configure
1037 *
1038 *	Set up flow control according to the flow control options
1039 *	determined by the eeprom/configuration.
1040 */
1041static void enable_flow_control_ability(struct velocity_info *vptr)
1042{
1043
1044	struct mac_regs __iomem *regs = vptr->mac_regs;
1045
1046	switch (vptr->options.flow_cntl) {
1047
1048	case FLOW_CNTL_DEFAULT:
1049		if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, &regs->PHYSR0))
1050			writel(CR0_FDXRFCEN, &regs->CR0Set);
1051		else
1052			writel(CR0_FDXRFCEN, &regs->CR0Clr);
1053
1054		if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, &regs->PHYSR0))
1055			writel(CR0_FDXTFCEN, &regs->CR0Set);
1056		else
1057			writel(CR0_FDXTFCEN, &regs->CR0Clr);
1058		break;
1059
1060	case FLOW_CNTL_TX:
1061		writel(CR0_FDXTFCEN, &regs->CR0Set);
1062		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1063		break;
1064
1065	case FLOW_CNTL_RX:
1066		writel(CR0_FDXRFCEN, &regs->CR0Set);
1067		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1068		break;
1069
1070	case FLOW_CNTL_TX_RX:
1071		writel(CR0_FDXTFCEN, &regs->CR0Set);
1072		writel(CR0_FDXRFCEN, &regs->CR0Set);
1073		break;
1074
1075	case FLOW_CNTL_DISABLE:
1076		writel(CR0_FDXRFCEN, &regs->CR0Clr);
1077		writel(CR0_FDXTFCEN, &regs->CR0Clr);
1078		break;
1079
1080	default:
1081		break;
1082	}
1083
1084}
1085
1086/**
1087 *	velocity_soft_reset	-	soft reset
1088 *	@vptr: velocity to reset
1089 *
1090 *	Kick off a soft reset of the velocity adapter and then poll
1091 *	until the reset sequence has completed before returning.
1092 */
1093static int velocity_soft_reset(struct velocity_info *vptr)
1094{
1095	struct mac_regs __iomem *regs = vptr->mac_regs;
1096	int i = 0;
1097
1098	writel(CR0_SFRST, &regs->CR0Set);
1099
1100	for (i = 0; i < W_MAX_TIMEOUT; i++) {
1101		udelay(5);
1102		if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, &regs->CR0Set))
1103			break;
1104	}
1105
1106	if (i == W_MAX_TIMEOUT) {
1107		writel(CR0_FORSRST, &regs->CR0Set);
1108		/* delay 2ms */
1109		mdelay(2);
1110	}
1111	return 0;
1112}
1113
1114/**
1115 *	velocity_set_multi	-	filter list change callback
1116 *	@dev: network device
1117 *
1118 *	Called by the network layer when the filter lists need to change
1119 *	for a velocity adapter. Reload the CAMs with the new address
1120 *	filter ruleset.
1121 */
1122static void velocity_set_multi(struct net_device *dev)
1123{
1124	struct velocity_info *vptr = netdev_priv(dev);
1125	struct mac_regs __iomem *regs = vptr->mac_regs;
1126	u8 rx_mode;
1127	int i;
1128	struct netdev_hw_addr *ha;
1129
1130	if (dev->flags & IFF_PROMISC) {	/* Set promiscuous. */
1131		writel(0xffffffff, &regs->MARCAM[0]);
1132		writel(0xffffffff, &regs->MARCAM[4]);
1133		rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
1134	} else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
1135		   (dev->flags & IFF_ALLMULTI)) {
1136		writel(0xffffffff, &regs->MARCAM[0]);
1137		writel(0xffffffff, &regs->MARCAM[4]);
1138		rx_mode = (RCR_AM | RCR_AB);
1139	} else {
1140		int offset = MCAM_SIZE - vptr->multicast_limit;
1141		mac_get_cam_mask(regs, vptr->mCAMmask);
1142
1143		i = 0;
1144		netdev_for_each_mc_addr(ha, dev) {
1145			mac_set_cam(regs, i + offset, ha->addr);
1146			vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1147			i++;
1148		}
1149
1150		mac_set_cam_mask(regs, vptr->mCAMmask);
1151		rx_mode = RCR_AM | RCR_AB | RCR_AP;
1152	}
1153	if (dev->mtu > 1500)
1154		rx_mode |= RCR_AL;
1155
1156	BYTE_REG_BITS_ON(rx_mode, &regs->RCR);
1157
1158}
1159
1160/*
1161 * MII access , media link mode setting functions
1162 */
1163
1164/**
1165 *	mii_init	-	set up MII
1166 *	@vptr: velocity adapter
1167 *	@mii_status:  links tatus
1168 *
1169 *	Set up the PHY for the current link state.
1170 */
1171static void mii_init(struct velocity_info *vptr, u32 mii_status)
1172{
1173	u16 BMCR;
1174
1175	switch (PHYID_GET_PHY_ID(vptr->phy_id)) {
1176	case PHYID_CICADA_CS8201:
1177		/*
1178		 *	Reset to hardware default
1179		 */
1180		MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1181		/*
1182		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1183		 *	off it in NWay-forced half mode for NWay-forced v.s.
1184		 *	legacy-forced issue.
1185		 */
1186		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1187			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1188		else
1189			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1190		/*
1191		 *	Turn on Link/Activity LED enable bit for CIS8201
1192		 */
1193		MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1194		break;
1195	case PHYID_VT3216_32BIT:
1196	case PHYID_VT3216_64BIT:
1197		/*
1198		 *	Reset to hardware default
1199		 */
1200		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1201		/*
1202		 *	Turn on ECHODIS bit in NWay-forced full mode and turn it
1203		 *	off it in NWay-forced half mode for NWay-forced v.s.
1204		 *	legacy-forced issue
1205		 */
1206		if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1207			MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1208		else
1209			MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1210		break;
1211
1212	case PHYID_MARVELL_1000:
1213	case PHYID_MARVELL_1000S:
1214		/*
1215		 *	Assert CRS on Transmit
1216		 */
1217		MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs);
1218		/*
1219		 *	Reset to hardware default
1220		 */
1221		MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1222		break;
1223	default:
1224		;
1225	}
1226	velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1227	if (BMCR & BMCR_ISOLATE) {
1228		BMCR &= ~BMCR_ISOLATE;
1229		velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1230	}
1231}
1232
1233/**
1234 * setup_queue_timers	-	Setup interrupt timers
1235 *
1236 * Setup interrupt frequency during suppression (timeout if the frame
1237 * count isn't filled).
1238 */
1239static void setup_queue_timers(struct velocity_info *vptr)
1240{
1241	/* Only for newer revisions */
1242	if (vptr->rev_id >= REV_ID_VT3216_A0) {
1243		u8 txqueue_timer = 0;
1244		u8 rxqueue_timer = 0;
1245
1246		if (vptr->mii_status & (VELOCITY_SPEED_1000 |
1247				VELOCITY_SPEED_100)) {
1248			txqueue_timer = vptr->options.txqueue_timer;
1249			rxqueue_timer = vptr->options.rxqueue_timer;
1250		}
1251
1252		writeb(txqueue_timer, &vptr->mac_regs->TQETMR);
1253		writeb(rxqueue_timer, &vptr->mac_regs->RQETMR);
1254	}
1255}
1256/**
1257 * setup_adaptive_interrupts  -  Setup interrupt suppression
1258 *
1259 * @vptr velocity adapter
1260 *
1261 * The velocity is able to suppress interrupt during high interrupt load.
1262 * This function turns on that feature.
1263 */
1264static void setup_adaptive_interrupts(struct velocity_info *vptr)
1265{
1266	struct mac_regs __iomem *regs = vptr->mac_regs;
1267	u16 tx_intsup = vptr->options.tx_intsup;
1268	u16 rx_intsup = vptr->options.rx_intsup;
1269
1270	/* Setup default interrupt mask (will be changed below) */
1271	vptr->int_mask = INT_MASK_DEF;
1272
1273	/* Set Tx Interrupt Suppression Threshold */
1274	writeb(CAMCR_PS0, &regs->CAMCR);
1275	if (tx_intsup != 0) {
1276		vptr->int_mask &= ~(ISR_PTXI | ISR_PTX0I | ISR_PTX1I |
1277				ISR_PTX2I | ISR_PTX3I);
1278		writew(tx_intsup, &regs->ISRCTL);
1279	} else
1280		writew(ISRCTL_TSUPDIS, &regs->ISRCTL);
1281
1282	/* Set Rx Interrupt Suppression Threshold */
1283	writeb(CAMCR_PS1, &regs->CAMCR);
1284	if (rx_intsup != 0) {
1285		vptr->int_mask &= ~ISR_PRXI;
1286		writew(rx_intsup, &regs->ISRCTL);
1287	} else
1288		writew(ISRCTL_RSUPDIS, &regs->ISRCTL);
1289
1290	/* Select page to interrupt hold timer */
1291	writeb(0, &regs->CAMCR);
1292}
1293
1294/**
1295 *	velocity_init_registers	-	initialise MAC registers
1296 *	@vptr: velocity to init
1297 *	@type: type of initialisation (hot or cold)
1298 *
1299 *	Initialise the MAC on a reset or on first set up on the
1300 *	hardware.
1301 */
1302static void velocity_init_registers(struct velocity_info *vptr,
1303				    enum velocity_init_type type)
1304{
1305	struct mac_regs __iomem *regs = vptr->mac_regs;
1306	int i, mii_status;
1307
1308	mac_wol_reset(regs);
1309
1310	switch (type) {
1311	case VELOCITY_INIT_RESET:
1312	case VELOCITY_INIT_WOL:
1313
1314		netif_stop_queue(vptr->dev);
1315
1316		/*
1317		 *	Reset RX to prevent RX pointer not on the 4X location
1318		 */
1319		velocity_rx_reset(vptr);
1320		mac_rx_queue_run(regs);
1321		mac_rx_queue_wake(regs);
1322
1323		mii_status = velocity_get_opt_media_mode(vptr);
1324		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1325			velocity_print_link_status(vptr);
1326			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1327				netif_wake_queue(vptr->dev);
1328		}
1329
1330		enable_flow_control_ability(vptr);
1331
1332		mac_clear_isr(regs);
1333		writel(CR0_STOP, &regs->CR0Clr);
1334		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT),
1335							&regs->CR0Set);
1336
1337		break;
1338
1339	case VELOCITY_INIT_COLD:
1340	default:
1341		/*
1342		 *	Do reset
1343		 */
1344		velocity_soft_reset(vptr);
1345		mdelay(5);
1346
1347		mac_eeprom_reload(regs);
1348		for (i = 0; i < 6; i++)
1349			writeb(vptr->dev->dev_addr[i], &(regs->PAR[i]));
1350
1351		/*
1352		 *	clear Pre_ACPI bit.
1353		 */
1354		BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA));
1355		mac_set_rx_thresh(regs, vptr->options.rx_thresh);
1356		mac_set_dma_length(regs, vptr->options.DMA_length);
1357
1358		writeb(WOLCFG_SAM | WOLCFG_SAB, &regs->WOLCFGSet);
1359		/*
1360		 *	Back off algorithm use original IEEE standard
1361		 */
1362		BYTE_REG_BITS_SET(CFGB_OFSET, (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | CFGB_BAKOPT), &regs->CFGB);
1363
1364		/*
1365		 *	Init CAM filter
1366		 */
1367		velocity_init_cam_filter(vptr);
1368
1369		/*
1370		 *	Set packet filter: Receive directed and broadcast address
1371		 */
1372		velocity_set_multi(vptr->dev);
1373
1374		/*
1375		 *	Enable MII auto-polling
1376		 */
1377		enable_mii_autopoll(regs);
1378
1379		setup_adaptive_interrupts(vptr);
1380
1381		writel(vptr->rx.pool_dma, &regs->RDBaseLo);
1382		writew(vptr->options.numrx - 1, &regs->RDCSize);
1383		mac_rx_queue_run(regs);
1384		mac_rx_queue_wake(regs);
1385
1386		writew(vptr->options.numtx - 1, &regs->TDCSize);
1387
1388		for (i = 0; i < vptr->tx.numq; i++) {
1389			writel(vptr->tx.pool_dma[i], &regs->TDBaseLo[i]);
1390			mac_tx_queue_run(regs, i);
1391		}
1392
1393		init_flow_control_register(vptr);
1394
1395		writel(CR0_STOP, &regs->CR0Clr);
1396		writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set);
1397
1398		mii_status = velocity_get_opt_media_mode(vptr);
1399		netif_stop_queue(vptr->dev);
1400
1401		mii_init(vptr, mii_status);
1402
1403		if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) {
1404			velocity_print_link_status(vptr);
1405			if (!(vptr->mii_status & VELOCITY_LINK_FAIL))
1406				netif_wake_queue(vptr->dev);
1407		}
1408
1409		enable_flow_control_ability(vptr);
1410		mac_hw_mibs_init(regs);
1411		mac_write_int_mask(vptr->int_mask, regs);
1412		mac_clear_isr(regs);
1413
1414	}
1415}
1416
1417static void velocity_give_many_rx_descs(struct velocity_info *vptr)
1418{
1419	struct mac_regs __iomem *regs = vptr->mac_regs;
1420	int avail, dirty, unusable;
1421
1422	/*
1423	 * RD number must be equal to 4X per hardware spec
1424	 * (programming guide rev 1.20, p.13)
1425	 */
1426	if (vptr->rx.filled < 4)
1427		return;
1428
1429	wmb();
1430
1431	unusable = vptr->rx.filled & 0x0003;
1432	dirty = vptr->rx.dirty - unusable;
1433	for (avail = vptr->rx.filled & 0xfffc; avail; avail--) {
1434		dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1;
1435		vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC;
1436	}
1437
1438	writew(vptr->rx.filled & 0xfffc, &regs->RBRDU);
1439	vptr->rx.filled = unusable;
1440}
1441
1442/**
1443 *	velocity_init_dma_rings	-	set up DMA rings
1444 *	@vptr: Velocity to set up
1445 *
1446 *	Allocate PCI mapped DMA rings for the receive and transmit layer
1447 *	to use.
1448 */
1449static int velocity_init_dma_rings(struct velocity_info *vptr)
1450{
1451	struct velocity_opt *opt = &vptr->options;
1452	const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
1453	const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
1454	struct pci_dev *pdev = vptr->pdev;
1455	dma_addr_t pool_dma;
1456	void *pool;
1457	unsigned int i;
1458
1459	/*
1460	 * Allocate all RD/TD rings a single pool.
1461	 *
1462	 * pci_alloc_consistent() fulfills the requirement for 64 bytes
1463	 * alignment
1464	 */
1465	pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
1466				    rx_ring_size, &pool_dma);
1467	if (!pool) {
1468		dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
1469			vptr->dev->name);
1470		return -ENOMEM;
1471	}
1472
1473	vptr->rx.ring = pool;
1474	vptr->rx.pool_dma = pool_dma;
1475
1476	pool += rx_ring_size;
1477	pool_dma += rx_ring_size;
1478
1479	for (i = 0; i < vptr->tx.numq; i++) {
1480		vptr->tx.rings[i] = pool;
1481		vptr->tx.pool_dma[i] = pool_dma;
1482		pool += tx_ring_size;
1483		pool_dma += tx_ring_size;
1484	}
1485
1486	return 0;
1487}
1488
1489static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu)
1490{
1491	vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32;
1492}
1493
1494/**
1495 *	velocity_alloc_rx_buf	-	allocate aligned receive buffer
1496 *	@vptr: velocity
1497 *	@idx: ring index
1498 *
1499 *	Allocate a new full sized buffer for the reception of a frame and
1500 *	map it into PCI space for the hardware to use. The hardware
1501 *	requires *64* byte alignment of the buffer which makes life
1502 *	less fun than would be ideal.
1503 */
1504static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1505{
1506	struct rx_desc *rd = &(vptr->rx.ring[idx]);
1507	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
1508
1509	rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64);
1510	if (rd_info->skb == NULL)
1511		return -ENOMEM;
1512
1513	/*
1514	 *	Do the gymnastics to get the buffer head for data at
1515	 *	64byte alignment.
1516	 */
1517	skb_reserve(rd_info->skb,
1518			64 - ((unsigned long) rd_info->skb->data & 63));
1519	rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
1520					vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
1521
1522	/*
1523	 *	Fill in the descriptor to match
1524	 */
1525
1526	*((u32 *) & (rd->rdesc0)) = 0;
1527	rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN;
1528	rd->pa_low = cpu_to_le32(rd_info->skb_dma);
1529	rd->pa_high = 0;
1530	return 0;
1531}
1532
1533
1534static int velocity_rx_refill(struct velocity_info *vptr)
1535{
1536	int dirty = vptr->rx.dirty, done = 0;
1537
1538	do {
1539		struct rx_desc *rd = vptr->rx.ring + dirty;
1540
1541		/* Fine for an all zero Rx desc at init time as well */
1542		if (rd->rdesc0.len & OWNED_BY_NIC)
1543			break;
1544
1545		if (!vptr->rx.info[dirty].skb) {
1546			if (velocity_alloc_rx_buf(vptr, dirty) < 0)
1547				break;
1548		}
1549		done++;
1550		dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0;
1551	} while (dirty != vptr->rx.curr);
1552
1553	if (done) {
1554		vptr->rx.dirty = dirty;
1555		vptr->rx.filled += done;
1556	}
1557
1558	return done;
1559}
1560
1561/**
1562 *	velocity_free_rd_ring	-	free receive ring
1563 *	@vptr: velocity to clean up
1564 *
1565 *	Free the receive buffers for each ring slot and any
1566 *	attached socket buffers that need to go away.
1567 */
1568static void velocity_free_rd_ring(struct velocity_info *vptr)
1569{
1570	int i;
1571
1572	if (vptr->rx.info == NULL)
1573		return;
1574
1575	for (i = 0; i < vptr->options.numrx; i++) {
1576		struct velocity_rd_info *rd_info = &(vptr->rx.info[i]);
1577		struct rx_desc *rd = vptr->rx.ring + i;
1578
1579		memset(rd, 0, sizeof(*rd));
1580
1581		if (!rd_info->skb)
1582			continue;
1583		pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
1584				 PCI_DMA_FROMDEVICE);
1585		rd_info->skb_dma = 0;
1586
1587		dev_kfree_skb(rd_info->skb);
1588		rd_info->skb = NULL;
1589	}
1590
1591	kfree(vptr->rx.info);
1592	vptr->rx.info = NULL;
1593}
1594
1595
1596
1597/**
1598 *	velocity_init_rd_ring	-	set up receive ring
1599 *	@vptr: velocity to configure
1600 *
1601 *	Allocate and set up the receive buffers for each ring slot and
1602 *	assign them to the network adapter.
1603 */
1604static int velocity_init_rd_ring(struct velocity_info *vptr)
1605{
1606	int ret = -ENOMEM;
1607
1608	vptr->rx.info = kcalloc(vptr->options.numrx,
1609				sizeof(struct velocity_rd_info), GFP_KERNEL);
1610	if (!vptr->rx.info)
1611		goto out;
1612
1613	velocity_init_rx_ring_indexes(vptr);
1614
1615	if (velocity_rx_refill(vptr) != vptr->options.numrx) {
1616		VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR
1617			"%s: failed to allocate RX buffer.\n", vptr->dev->name);
1618		velocity_free_rd_ring(vptr);
1619		goto out;
1620	}
1621
1622	ret = 0;
1623out:
1624	return ret;
1625}
1626
1627/**
1628 *	velocity_init_td_ring	-	set up transmit ring
1629 *	@vptr:	velocity
1630 *
1631 *	Set up the transmit ring and chain the ring pointers together.
1632 *	Returns zero on success or a negative posix errno code for
1633 *	failure.
1634 */
1635static int velocity_init_td_ring(struct velocity_info *vptr)
1636{
1637	int j;
1638
1639	/* Init the TD ring entries */
1640	for (j = 0; j < vptr->tx.numq; j++) {
1641
1642		vptr->tx.infos[j] = kcalloc(vptr->options.numtx,
1643					    sizeof(struct velocity_td_info),
1644					    GFP_KERNEL);
1645		if (!vptr->tx.infos[j])	{
1646			while (--j >= 0)
1647				kfree(vptr->tx.infos[j]);
1648			return -ENOMEM;
1649		}
1650
1651		vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0;
1652	}
1653	return 0;
1654}
1655
1656/**
1657 *	velocity_free_dma_rings	-	free PCI ring pointers
1658 *	@vptr: Velocity to free from
1659 *
1660 *	Clean up the PCI ring buffers allocated to this velocity.
1661 */
1662static void velocity_free_dma_rings(struct velocity_info *vptr)
1663{
1664	const int size = vptr->options.numrx * sizeof(struct rx_desc) +
1665		vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
1666
1667	pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
1668}
1669
1670
1671static int velocity_init_rings(struct velocity_info *vptr, int mtu)
1672{
1673	int ret;
1674
1675	velocity_set_rxbufsize(vptr, mtu);
1676
1677	ret = velocity_init_dma_rings(vptr);
1678	if (ret < 0)
1679		goto out;
1680
1681	ret = velocity_init_rd_ring(vptr);
1682	if (ret < 0)
1683		goto err_free_dma_rings_0;
1684
1685	ret = velocity_init_td_ring(vptr);
1686	if (ret < 0)
1687		goto err_free_rd_ring_1;
1688out:
1689	return ret;
1690
1691err_free_rd_ring_1:
1692	velocity_free_rd_ring(vptr);
1693err_free_dma_rings_0:
1694	velocity_free_dma_rings(vptr);
1695	goto out;
1696}
1697
1698/**
1699 *	velocity_free_tx_buf	-	free transmit buffer
1700 *	@vptr: velocity
1701 *	@tdinfo: buffer
1702 *
1703 *	Release an transmit buffer. If the buffer was preallocated then
1704 *	recycle it, if not then unmap the buffer.
1705 */
1706static void velocity_free_tx_buf(struct velocity_info *vptr,
1707		struct velocity_td_info *tdinfo, struct tx_desc *td)
1708{
1709	struct sk_buff *skb = tdinfo->skb;
1710
1711	/*
1712	 *	Don't unmap the pre-allocated tx_bufs
1713	 */
1714	if (tdinfo->skb_dma) {
1715		int i;
1716
1717		for (i = 0; i < tdinfo->nskb_dma; i++) {
1718			size_t pktlen = max_t(size_t, skb->len, ETH_ZLEN);
1719
1720			/* For scatter-gather */
1721			if (skb_shinfo(skb)->nr_frags > 0)
1722				pktlen = max_t(size_t, pktlen,
1723						td->td_buf[i].size & ~TD_QUEUE);
1724
1725			pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
1726					le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
1727		}
1728	}
1729	dev_kfree_skb_irq(skb);
1730	tdinfo->skb = NULL;
1731}
1732
1733
1734static void velocity_free_td_ring_entry(struct velocity_info *vptr,
1735							 int q, int n)
1736{
1737	struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]);
1738	int i;
1739
1740	if (td_info == NULL)
1741		return;
1742
1743	if (td_info->skb) {
1744		for (i = 0; i < td_info->nskb_dma; i++) {
1745			if (td_info->skb_dma[i]) {
1746				pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
1747					td_info->skb->len, PCI_DMA_TODEVICE);
1748				td_info->skb_dma[i] = 0;
1749			}
1750		}
1751		dev_kfree_skb(td_info->skb);
1752		td_info->skb = NULL;
1753	}
1754}
1755
1756/**
1757 *	velocity_free_td_ring	-	free td ring
1758 *	@vptr: velocity
1759 *
1760 *	Free up the transmit ring for this particular velocity adapter.
1761 *	We free the ring contents but not the ring itself.
1762 */
1763static void velocity_free_td_ring(struct velocity_info *vptr)
1764{
1765	int i, j;
1766
1767	for (j = 0; j < vptr->tx.numq; j++) {
1768		if (vptr->tx.infos[j] == NULL)
1769			continue;
1770		for (i = 0; i < vptr->options.numtx; i++)
1771			velocity_free_td_ring_entry(vptr, j, i);
1772
1773		kfree(vptr->tx.infos[j]);
1774		vptr->tx.infos[j] = NULL;
1775	}
1776}
1777
1778
1779static void velocity_free_rings(struct velocity_info *vptr)
1780{
1781	velocity_free_td_ring(vptr);
1782	velocity_free_rd_ring(vptr);
1783	velocity_free_dma_rings(vptr);
1784}
1785
1786/**
1787 *	velocity_error	-	handle error from controller
1788 *	@vptr: velocity
1789 *	@status: card status
1790 *
1791 *	Process an error report from the hardware and attempt to recover
1792 *	the card itself. At the moment we cannot recover from some
1793 *	theoretically impossible errors but this could be fixed using
1794 *	the pci_device_failed logic to bounce the hardware
1795 *
1796 */
1797static void velocity_error(struct velocity_info *vptr, int status)
1798{
1799
1800	if (status & ISR_TXSTLI) {
1801		struct mac_regs __iomem *regs = vptr->mac_regs;
1802
1803		printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0]));
1804		BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR);
1805		writew(TRDCSR_RUN, &regs->TDCSRClr);
1806		netif_stop_queue(vptr->dev);
1807
1808	}
1809
1810	if (status & ISR_SRCI) {
1811		struct mac_regs __iomem *regs = vptr->mac_regs;
1812		int linked;
1813
1814		if (vptr->options.spd_dpx == SPD_DPX_AUTO) {
1815			vptr->mii_status = check_connection_type(regs);
1816
1817			/*
1818			 *	If it is a 3119, disable frame bursting in
1819			 *	halfduplex mode and enable it in fullduplex
1820			 *	 mode
1821			 */
1822			if (vptr->rev_id < REV_ID_VT3216_A0) {
1823				if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1824					BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1825				else
1826					BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
1827			}
1828			/*
1829			 *	Only enable CD heart beat counter in 10HD mode
1830			 */
1831			if (!(vptr->mii_status & VELOCITY_DUPLEX_FULL) && (vptr->mii_status & VELOCITY_SPEED_10))
1832				BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
1833			else
1834				BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
1835
1836			setup_queue_timers(vptr);
1837		}
1838		/*
1839		 *	Get link status from PHYSR0
1840		 */
1841		linked = readb(&regs->PHYSR0) & PHYSR0_LINKGD;
1842
1843		if (linked) {
1844			vptr->mii_status &= ~VELOCITY_LINK_FAIL;
1845			netif_carrier_on(vptr->dev);
1846		} else {
1847			vptr->mii_status |= VELOCITY_LINK_FAIL;
1848			netif_carrier_off(vptr->dev);
1849		}
1850
1851		velocity_print_link_status(vptr);
1852		enable_flow_control_ability(vptr);
1853
1854		/*
1855		 *	Re-enable auto-polling because SRCI will disable
1856		 *	auto-polling
1857		 */
1858
1859		enable_mii_autopoll(regs);
1860
1861		if (vptr->mii_status & VELOCITY_LINK_FAIL)
1862			netif_stop_queue(vptr->dev);
1863		else
1864			netif_wake_queue(vptr->dev);
1865
1866	};
1867	if (status & ISR_MIBFI)
1868		velocity_update_hw_mibs(vptr);
1869	if (status & ISR_LSTEI)
1870		mac_rx_queue_wake(vptr->mac_regs);
1871}
1872
1873/**
1874 *	tx_srv		-	transmit interrupt service
1875 *	@vptr; Velocity
1876 *
1877 *	Scan the queues looking for transmitted packets that
1878 *	we can complete and clean up. Update any statistics as
1879 *	necessary/
1880 */
1881static int velocity_tx_srv(struct velocity_info *vptr)
1882{
1883	struct tx_desc *td;
1884	int qnum;
1885	int full = 0;
1886	int idx;
1887	int works = 0;
1888	struct velocity_td_info *tdinfo;
1889	struct net_device_stats *stats = &vptr->dev->stats;
1890
1891	for (qnum = 0; qnum < vptr->tx.numq; qnum++) {
1892		for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0;
1893			idx = (idx + 1) % vptr->options.numtx) {
1894
1895			/*
1896			 *	Get Tx Descriptor
1897			 */
1898			td = &(vptr->tx.rings[qnum][idx]);
1899			tdinfo = &(vptr->tx.infos[qnum][idx]);
1900
1901			if (td->tdesc0.len & OWNED_BY_NIC)
1902				break;
1903
1904			if ((works++ > 15))
1905				break;
1906
1907			if (td->tdesc0.TSR & TSR0_TERR) {
1908				stats->tx_errors++;
1909				stats->tx_dropped++;
1910				if (td->tdesc0.TSR & TSR0_CDH)
1911					stats->tx_heartbeat_errors++;
1912				if (td->tdesc0.TSR & TSR0_CRS)
1913					stats->tx_carrier_errors++;
1914				if (td->tdesc0.TSR & TSR0_ABT)
1915					stats->tx_aborted_errors++;
1916				if (td->tdesc0.TSR & TSR0_OWC)
1917					stats->tx_window_errors++;
1918			} else {
1919				stats->tx_packets++;
1920				stats->tx_bytes += tdinfo->skb->len;
1921			}
1922			velocity_free_tx_buf(vptr, tdinfo, td);
1923			vptr->tx.used[qnum]--;
1924		}
1925		vptr->tx.tail[qnum] = idx;
1926
1927		if (AVAIL_TD(vptr, qnum) < 1)
1928			full = 1;
1929	}
1930	/*
1931	 *	Look to see if we should kick the transmit network
1932	 *	layer for more work.
1933	 */
1934	if (netif_queue_stopped(vptr->dev) && (full == 0) &&
1935	    (!(vptr->mii_status & VELOCITY_LINK_FAIL))) {
1936		netif_wake_queue(vptr->dev);
1937	}
1938	return works;
1939}
1940
1941/**
1942 *	velocity_rx_csum	-	checksum process
1943 *	@rd: receive packet descriptor
1944 *	@skb: network layer packet buffer
1945 *
1946 *	Process the status bits for the received packet and determine
1947 *	if the checksum was computed and verified by the hardware
1948 */
1949static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
1950{
1951	skb->ip_summed = CHECKSUM_NONE;
1952
1953	if (rd->rdesc1.CSM & CSM_IPKT) {
1954		if (rd->rdesc1.CSM & CSM_IPOK) {
1955			if ((rd->rdesc1.CSM & CSM_TCPKT) ||
1956					(rd->rdesc1.CSM & CSM_UDPKT)) {
1957				if (!(rd->rdesc1.CSM & CSM_TUPOK))
1958					return;
1959			}
1960			skb->ip_summed = CHECKSUM_UNNECESSARY;
1961		}
1962	}
1963}
1964
1965/**
1966 *	velocity_rx_copy	-	in place Rx copy for small packets
1967 *	@rx_skb: network layer packet buffer candidate
1968 *	@pkt_size: received data size
1969 *	@rd: receive packet descriptor
1970 *	@dev: network device
1971 *
1972 *	Replace the current skb that is scheduled for Rx processing by a
1973 *	shorter, immediatly allocated skb, if the received packet is small
1974 *	enough. This function returns a negative value if the received
1975 *	packet is too big or if memory is exhausted.
1976 */
1977static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1978			    struct velocity_info *vptr)
1979{
1980	int ret = -1;
1981	if (pkt_size < rx_copybreak) {
1982		struct sk_buff *new_skb;
1983
1984		new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1985		if (new_skb) {
1986			new_skb->ip_summed = rx_skb[0]->ip_summed;
1987			skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1988			*rx_skb = new_skb;
1989			ret = 0;
1990		}
1991
1992	}
1993	return ret;
1994}
1995
1996/**
1997 *	velocity_iph_realign	-	IP header alignment
1998 *	@vptr: velocity we are handling
1999 *	@skb: network layer packet buffer
2000 *	@pkt_size: received data size
2001 *
2002 *	Align IP header on a 2 bytes boundary. This behavior can be
2003 *	configured by the user.
2004 */
2005static inline void velocity_iph_realign(struct velocity_info *vptr,
2006					struct sk_buff *skb, int pkt_size)
2007{
2008	if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) {
2009		memmove(skb->data + 2, skb->data, pkt_size);
2010		skb_reserve(skb, 2);
2011	}
2012}
2013
2014
2015/**
2016 *	velocity_receive_frame	-	received packet processor
2017 *	@vptr: velocity we are handling
2018 *	@idx: ring index
2019 *
2020 *	A packet has arrived. We process the packet and if appropriate
2021 *	pass the frame up the network stack
2022 */
2023static int velocity_receive_frame(struct velocity_info *vptr, int idx)
2024{
2025	void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
2026	struct net_device_stats *stats = &vptr->dev->stats;
2027	struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
2028	struct rx_desc *rd = &(vptr->rx.ring[idx]);
2029	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
2030	struct sk_buff *skb;
2031
2032	if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
2033		VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name);
2034		stats->rx_length_errors++;
2035		return -EINVAL;
2036	}
2037
2038	if (rd->rdesc0.RSR & RSR_MAR)
2039		stats->multicast++;
2040
2041	skb = rd_info->skb;
2042
2043	pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
2044				    vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
2045
2046	/*
2047	 *	Drop frame not meeting IEEE 802.3
2048	 */
2049
2050	if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
2051		if (rd->rdesc0.RSR & RSR_RL) {
2052			stats->rx_length_errors++;
2053			return -EINVAL;
2054		}
2055	}
2056
2057	pci_action = pci_dma_sync_single_for_device;
2058
2059	velocity_rx_csum(rd, skb);
2060
2061	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
2062		velocity_iph_realign(vptr, skb, pkt_len);
2063		pci_action = pci_unmap_single;
2064		rd_info->skb = NULL;
2065	}
2066
2067	pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
2068		   PCI_DMA_FROMDEVICE);
2069
2070	skb_put(skb, pkt_len - 4);
2071	skb->protocol = eth_type_trans(skb, vptr->dev);
2072
2073	if (vptr->vlgrp && (rd->rdesc0.RSR & RSR_DETAG)) {
2074		vlan_hwaccel_rx(skb, vptr->vlgrp,
2075				swab16(le16_to_cpu(rd->rdesc1.PQTAG)));
2076	} else
2077		netif_rx(skb);
2078
2079	stats->rx_bytes += pkt_len;
2080
2081	return 0;
2082}
2083
2084
2085/**
2086 *	velocity_rx_srv		-	service RX interrupt
2087 *	@vptr: velocity
2088 *
2089 *	Walk the receive ring of the velocity adapter and remove
2090 *	any received packets from the receive queue. Hand the ring
2091 *	slots back to the adapter for reuse.
2092 */
2093static int velocity_rx_srv(struct velocity_info *vptr, int budget_left)
2094{
2095	struct net_device_stats *stats = &vptr->dev->stats;
2096	int rd_curr = vptr->rx.curr;
2097	int works = 0;
2098
2099	while (works < budget_left) {
2100		struct rx_desc *rd = vptr->rx.ring + rd_curr;
2101
2102		if (!vptr->rx.info[rd_curr].skb)
2103			break;
2104
2105		if (rd->rdesc0.len & OWNED_BY_NIC)
2106			break;
2107
2108		rmb();
2109
2110		/*
2111		 *	Don't drop CE or RL error frame although RXOK is off
2112		 */
2113		if (rd->rdesc0.RSR & (RSR_RXOK | RSR_CE | RSR_RL)) {
2114			if (velocity_receive_frame(vptr, rd_curr) < 0)
2115				stats->rx_dropped++;
2116		} else {
2117			if (rd->rdesc0.RSR & RSR_CRC)
2118				stats->rx_crc_errors++;
2119			if (rd->rdesc0.RSR & RSR_FAE)
2120				stats->rx_frame_errors++;
2121
2122			stats->rx_dropped++;
2123		}
2124
2125		rd->size |= RX_INTEN;
2126
2127		rd_curr++;
2128		if (rd_curr >= vptr->options.numrx)
2129			rd_curr = 0;
2130		works++;
2131	}
2132
2133	vptr->rx.curr = rd_curr;
2134
2135	if ((works > 0) && (velocity_rx_refill(vptr) > 0))
2136		velocity_give_many_rx_descs(vptr);
2137
2138	VAR_USED(stats);
2139	return works;
2140}
2141
2142static int velocity_poll(struct napi_struct *napi, int budget)
2143{
2144	struct velocity_info *vptr = container_of(napi,
2145			struct velocity_info, napi);
2146	unsigned int rx_done;
2147	unsigned long flags;
2148
2149	spin_lock_irqsave(&vptr->lock, flags);
2150	/*
2151	 * Do rx and tx twice for performance (taken from the VIA
2152	 * out-of-tree driver).
2153	 */
2154	rx_done = velocity_rx_srv(vptr, budget / 2);
2155	velocity_tx_srv(vptr);
2156	rx_done += velocity_rx_srv(vptr, budget - rx_done);
2157	velocity_tx_srv(vptr);
2158
2159	/* If budget not fully consumed, exit the polling mode */
2160	if (rx_done < budget) {
2161		napi_complete(napi);
2162		mac_enable_int(vptr->mac_regs);
2163	}
2164	spin_unlock_irqrestore(&vptr->lock, flags);
2165
2166	return rx_done;
2167}
2168
2169/**
2170 *	velocity_intr		-	interrupt callback
2171 *	@irq: interrupt number
2172 *	@dev_instance: interrupting device
2173 *
2174 *	Called whenever an interrupt is generated by the velocity
2175 *	adapter IRQ line. We may not be the source of the interrupt
2176 *	and need to identify initially if we are, and if not exit as
2177 *	efficiently as possible.
2178 */
2179static irqreturn_t velocity_intr(int irq, void *dev_instance)
2180{
2181	struct net_device *dev = dev_instance;
2182	struct velocity_info *vptr = netdev_priv(dev);
2183	u32 isr_status;
2184
2185	spin_lock(&vptr->lock);
2186	isr_status = mac_read_isr(vptr->mac_regs);
2187
2188	/* Not us ? */
2189	if (isr_status == 0) {
2190		spin_unlock(&vptr->lock);
2191		return IRQ_NONE;
2192	}
2193
2194	/* Ack the interrupt */
2195	mac_write_isr(vptr->mac_regs, isr_status);
2196
2197	if (likely(napi_schedule_prep(&vptr->napi))) {
2198		mac_disable_int(vptr->mac_regs);
2199		__napi_schedule(&vptr->napi);
2200	}
2201
2202	if (isr_status & (~(ISR_PRXI | ISR_PPRXI | ISR_PTXI | ISR_PPTXI)))
2203		velocity_error(vptr, isr_status);
2204
2205	spin_unlock(&vptr->lock);
2206
2207	return IRQ_HANDLED;
2208}
2209
2210/**
2211 *	velocity_open		-	interface activation callback
2212 *	@dev: network layer device to open
2213 *
2214 *	Called when the network layer brings the interface up. Returns
2215 *	a negative posix error code on failure, or zero on success.
2216 *
2217 *	All the ring allocation and set up is done on open for this
2218 *	adapter to minimise memory usage when inactive
2219 */
2220static int velocity_open(struct net_device *dev)
2221{
2222	struct velocity_info *vptr = netdev_priv(dev);
2223	int ret;
2224
2225	ret = velocity_init_rings(vptr, dev->mtu);
2226	if (ret < 0)
2227		goto out;
2228
2229	/* Ensure chip is running */
2230	pci_set_power_state(vptr->pdev, PCI_D0);
2231
2232	velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2233
2234	ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
2235			  dev->name, dev);
2236	if (ret < 0) {
2237		/* Power down the chip */
2238		pci_set_power_state(vptr->pdev, PCI_D3hot);
2239		velocity_free_rings(vptr);
2240		goto out;
2241	}
2242
2243	velocity_give_many_rx_descs(vptr);
2244
2245	mac_enable_int(vptr->mac_regs);
2246	netif_start_queue(dev);
2247	napi_enable(&vptr->napi);
2248	vptr->flags |= VELOCITY_FLAGS_OPENED;
2249out:
2250	return ret;
2251}
2252
2253/**
2254 *	velocity_shutdown	-	shut down the chip
2255 *	@vptr: velocity to deactivate
2256 *
2257 *	Shuts down the internal operations of the velocity and
2258 *	disables interrupts, autopolling, transmit and receive
2259 */
2260static void velocity_shutdown(struct velocity_info *vptr)
2261{
2262	struct mac_regs __iomem *regs = vptr->mac_regs;
2263	mac_disable_int(regs);
2264	writel(CR0_STOP, &regs->CR0Set);
2265	writew(0xFFFF, &regs->TDCSRClr);
2266	writeb(0xFF, &regs->RDCSRClr);
2267	safe_disable_mii_autopoll(regs);
2268	mac_clear_isr(regs);
2269}
2270
2271/**
2272 *	velocity_change_mtu	-	MTU change callback
2273 *	@dev: network device
2274 *	@new_mtu: desired MTU
2275 *
2276 *	Handle requests from the networking layer for MTU change on
2277 *	this interface. It gets called on a change by the network layer.
2278 *	Return zero for success or negative posix error code.
2279 */
2280static int velocity_change_mtu(struct net_device *dev, int new_mtu)
2281{
2282	struct velocity_info *vptr = netdev_priv(dev);
2283	int ret = 0;
2284
2285	if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) {
2286		VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n",
2287				vptr->dev->name);
2288		ret = -EINVAL;
2289		goto out_0;
2290	}
2291
2292	if (!netif_running(dev)) {
2293		dev->mtu = new_mtu;
2294		goto out_0;
2295	}
2296
2297	if (dev->mtu != new_mtu) {
2298		struct velocity_info *tmp_vptr;
2299		unsigned long flags;
2300		struct rx_info rx;
2301		struct tx_info tx;
2302
2303		tmp_vptr = kzalloc(sizeof(*tmp_vptr), GFP_KERNEL);
2304		if (!tmp_vptr) {
2305			ret = -ENOMEM;
2306			goto out_0;
2307		}
2308
2309		tmp_vptr->dev = dev;
2310		tmp_vptr->pdev = vptr->pdev;
2311		tmp_vptr->options = vptr->options;
2312		tmp_vptr->tx.numq = vptr->tx.numq;
2313
2314		ret = velocity_init_rings(tmp_vptr, new_mtu);
2315		if (ret < 0)
2316			goto out_free_tmp_vptr_1;
2317
2318		spin_lock_irqsave(&vptr->lock, flags);
2319
2320		netif_stop_queue(dev);
2321		velocity_shutdown(vptr);
2322
2323		rx = vptr->rx;
2324		tx = vptr->tx;
2325
2326		vptr->rx = tmp_vptr->rx;
2327		vptr->tx = tmp_vptr->tx;
2328
2329		tmp_vptr->rx = rx;
2330		tmp_vptr->tx = tx;
2331
2332		dev->mtu = new_mtu;
2333
2334		velocity_init_registers(vptr, VELOCITY_INIT_COLD);
2335
2336		velocity_give_many_rx_descs(vptr);
2337
2338		mac_enable_int(vptr->mac_regs);
2339		netif_start_queue(dev);
2340
2341		spin_unlock_irqrestore(&vptr->lock, flags);
2342
2343		velocity_free_rings(tmp_vptr);
2344
2345out_free_tmp_vptr_1:
2346		kfree(tmp_vptr);
2347	}
2348out_0:
2349	return ret;
2350}
2351
2352/**
2353 *	velocity_mii_ioctl		-	MII ioctl handler
2354 *	@dev: network device
2355 *	@ifr: the ifreq block for the ioctl
2356 *	@cmd: the command
2357 *
2358 *	Process MII requests made via ioctl from the network layer. These
2359 *	are used by tools like kudzu to interrogate the link state of the
2360 *	hardware
2361 */
2362static int velocity_mii_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2363{
2364	struct velocity_info *vptr = netdev_priv(dev);
2365	struct mac_regs __iomem *regs = vptr->mac_regs;
2366	unsigned long flags;
2367	struct mii_ioctl_data *miidata = if_mii(ifr);
2368	int err;
2369
2370	switch (cmd) {
2371	case SIOCGMIIPHY:
2372		miidata->phy_id = readb(&regs->MIIADR) & 0x1f;
2373		break;
2374	case SIOCGMIIREG:
2375		if (velocity_mii_read(vptr->mac_regs, miidata->reg_num & 0x1f, &(miidata->val_out)) < 0)
2376			return -ETIMEDOUT;
2377		break;
2378	case SIOCSMIIREG:
2379		spin_lock_irqsave(&vptr->lock, flags);
2380		err = velocity_mii_write(vptr->mac_regs, miidata->reg_num & 0x1f, miidata->val_in);
2381		spin_unlock_irqrestore(&vptr->lock, flags);
2382		check_connection_type(vptr->mac_regs);
2383		if (err)
2384			return err;
2385		break;
2386	default:
2387		return -EOPNOTSUPP;
2388	}
2389	return 0;
2390}
2391
2392
2393/**
2394 *	velocity_ioctl		-	ioctl entry point
2395 *	@dev: network device
2396 *	@rq: interface request ioctl
2397 *	@cmd: command code
2398 *
2399 *	Called when the user issues an ioctl request to the network
2400 *	device in question. The velocity interface supports MII.
2401 */
2402static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2403{
2404	struct velocity_info *vptr = netdev_priv(dev);
2405	int ret;
2406
2407	/* If we are asked for information and the device is power
2408	   saving then we need to bring the device back up to talk to it */
2409
2410	if (!netif_running(dev))
2411		pci_set_power_state(vptr->pdev, PCI_D0);
2412
2413	switch (cmd) {
2414	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
2415	case SIOCGMIIREG:	/* Read MII PHY register. */
2416	case SIOCSMIIREG:	/* Write to MII PHY register. */
2417		ret = velocity_mii_ioctl(dev, rq, cmd);
2418		break;
2419
2420	default:
2421		ret = -EOPNOTSUPP;
2422	}
2423	if (!netif_running(dev))
2424		pci_set_power_state(vptr->pdev, PCI_D3hot);
2425
2426
2427	return ret;
2428}
2429
2430/**
2431 *	velocity_get_status	-	statistics callback
2432 *	@dev: network device
2433 *
2434 *	Callback from the network layer to allow driver statistics
2435 *	to be resynchronized with hardware collected state. In the
2436 *	case of the velocity we need to pull the MIB counters from
2437 *	the hardware into the counters before letting the network
2438 *	layer display them.
2439 */
2440static struct net_device_stats *velocity_get_stats(struct net_device *dev)
2441{
2442	struct velocity_info *vptr = netdev_priv(dev);
2443
2444	/* If the hardware is down, don't touch MII */
2445	if (!netif_running(dev))
2446		return &dev->stats;
2447
2448	spin_lock_irq(&vptr->lock);
2449	velocity_update_hw_mibs(vptr);
2450	spin_unlock_irq(&vptr->lock);
2451
2452	dev->stats.rx_packets = vptr->mib_counter[HW_MIB_ifRxAllPkts];
2453	dev->stats.rx_errors = vptr->mib_counter[HW_MIB_ifRxErrorPkts];
2454	dev->stats.rx_length_errors = vptr->mib_counter[HW_MIB_ifInRangeLengthErrors];
2455
2456//  unsigned long   rx_dropped;     /* no space in linux buffers    */
2457	dev->stats.collisions = vptr->mib_counter[HW_MIB_ifTxEtherCollisions];
2458	/* detailed rx_errors: */
2459//  unsigned long   rx_length_errors;
2460//  unsigned long   rx_over_errors;     /* receiver ring buff overflow  */
2461	dev->stats.rx_crc_errors = vptr->mib_counter[HW_MIB_ifRxPktCRCE];
2462//  unsigned long   rx_frame_errors;    /* recv'd frame alignment error */
2463//  unsigned long   rx_fifo_errors;     /* recv'r fifo overrun      */
2464//  unsigned long   rx_missed_errors;   /* receiver missed packet   */
2465
2466	/* detailed tx_errors */
2467//  unsigned long   tx_fifo_errors;
2468
2469	return &dev->stats;
2470}
2471
2472/**
2473 *	velocity_close		-	close adapter callback
2474 *	@dev: network device
2475 *
2476 *	Callback from the network layer when the velocity is being
2477 *	deactivated by the network layer
2478 */
2479static int velocity_close(struct net_device *dev)
2480{
2481	struct velocity_info *vptr = netdev_priv(dev);
2482
2483	napi_disable(&vptr->napi);
2484	netif_stop_queue(dev);
2485	velocity_shutdown(vptr);
2486
2487	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2488		velocity_get_ip(vptr);
2489	if (dev->irq != 0)
2490		free_irq(dev->irq, dev);
2491
2492	/* Power down the chip */
2493	pci_set_power_state(vptr->pdev, PCI_D3hot);
2494
2495	velocity_free_rings(vptr);
2496
2497	vptr->flags &= (~VELOCITY_FLAGS_OPENED);
2498	return 0;
2499}
2500
2501/**
2502 *	velocity_xmit		-	transmit packet callback
2503 *	@skb: buffer to transmit
2504 *	@dev: network device
2505 *
2506 *	Called by the networ layer to request a packet is queued to
2507 *	the velocity. Returns zero on success.
2508 */
2509static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2510				 struct net_device *dev)
2511{
2512	struct velocity_info *vptr = netdev_priv(dev);
2513	int qnum = 0;
2514	struct tx_desc *td_ptr;
2515	struct velocity_td_info *tdinfo;
2516	unsigned long flags;
2517	int pktlen;
2518	int index, prev;
2519	int i = 0;
2520
2521	if (skb_padto(skb, ETH_ZLEN))
2522		goto out;
2523
2524	/* The hardware can handle at most 7 memory segments, so merge
2525	 * the skb if there are more */
2526	if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
2527		kfree_skb(skb);
2528		return NETDEV_TX_OK;
2529	}
2530
2531	pktlen = skb_shinfo(skb)->nr_frags == 0 ?
2532			max_t(unsigned int, skb->len, ETH_ZLEN) :
2533				skb_headlen(skb);
2534
2535	spin_lock_irqsave(&vptr->lock, flags);
2536
2537	index = vptr->tx.curr[qnum];
2538	td_ptr = &(vptr->tx.rings[qnum][index]);
2539	tdinfo = &(vptr->tx.infos[qnum][index]);
2540
2541	td_ptr->tdesc1.TCR = TCR0_TIC;
2542	td_ptr->td_buf[0].size &= ~TD_QUEUE;
2543
2544	/*
2545	 *	Map the linear network buffer into PCI space and
2546	 *	add it to the transmit ring.
2547	 */
2548	tdinfo->skb = skb;
2549	tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
2550	td_ptr->tdesc0.len = cpu_to_le16(pktlen);
2551	td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
2552	td_ptr->td_buf[0].pa_high = 0;
2553	td_ptr->td_buf[0].size = cpu_to_le16(pktlen);
2554
2555	/* Handle fragments */
2556	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2557		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2558
2559		tdinfo->skb_dma[i + 1] = pci_map_page(vptr->pdev, frag->page,
2560				frag->page_offset, frag->size,
2561				PCI_DMA_TODEVICE);
2562
2563		td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
2564		td_ptr->td_buf[i + 1].pa_high = 0;
2565		td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size);
2566	}
2567	tdinfo->nskb_dma = i + 1;
2568
2569	td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
2570
2571	if (vptr->vlgrp && vlan_tx_tag_present(skb)) {
2572		td_ptr->tdesc1.vlan = cpu_to_le16(vlan_tx_tag_get(skb));
2573		td_ptr->tdesc1.TCR |= TCR0_VETAG;
2574	}
2575
2576	/*
2577	 *	Handle hardware checksum
2578	 */
2579	if ((dev->features & NETIF_F_IP_CSUM) &&
2580	    (skb->ip_summed == CHECKSUM_PARTIAL)) {
2581		const struct iphdr *ip = ip_hdr(skb);
2582		if (ip->protocol == IPPROTO_TCP)
2583			td_ptr->tdesc1.TCR |= TCR0_TCPCK;
2584		else if (ip->protocol == IPPROTO_UDP)
2585			td_ptr->tdesc1.TCR |= (TCR0_UDPCK);
2586		td_ptr->tdesc1.TCR |= TCR0_IPCK;
2587	}
2588
2589	prev = index - 1;
2590	if (prev < 0)
2591		prev = vptr->options.numtx - 1;
2592	td_ptr->tdesc0.len |= OWNED_BY_NIC;
2593	vptr->tx.used[qnum]++;
2594	vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx;
2595
2596	if (AVAIL_TD(vptr, qnum) < 1)
2597		netif_stop_queue(dev);
2598
2599	td_ptr = &(vptr->tx.rings[qnum][prev]);
2600	td_ptr->td_buf[0].size |= TD_QUEUE;
2601	mac_tx_queue_wake(vptr->mac_regs, qnum);
2602
2603	spin_unlock_irqrestore(&vptr->lock, flags);
2604out:
2605	return NETDEV_TX_OK;
2606}
2607
2608
2609static const struct net_device_ops velocity_netdev_ops = {
2610	.ndo_open		= velocity_open,
2611	.ndo_stop		= velocity_close,
2612	.ndo_start_xmit		= velocity_xmit,
2613	.ndo_get_stats		= velocity_get_stats,
2614	.ndo_validate_addr	= eth_validate_addr,
2615	.ndo_set_mac_address 	= eth_mac_addr,
2616	.ndo_set_multicast_list	= velocity_set_multi,
2617	.ndo_change_mtu		= velocity_change_mtu,
2618	.ndo_do_ioctl		= velocity_ioctl,
2619	.ndo_vlan_rx_add_vid	= velocity_vlan_rx_add_vid,
2620	.ndo_vlan_rx_kill_vid	= velocity_vlan_rx_kill_vid,
2621	.ndo_vlan_rx_register	= velocity_vlan_rx_register,
2622};
2623
2624/**
2625 *	velocity_init_info	-	init private data
2626 *	@pdev: PCI device
2627 *	@vptr: Velocity info
2628 *	@info: Board type
2629 *
2630 *	Set up the initial velocity_info struct for the device that has been
2631 *	discovered.
2632 */
2633static void __devinit velocity_init_info(struct pci_dev *pdev,
2634					 struct velocity_info *vptr,
2635					 const struct velocity_info_tbl *info)
2636{
2637	memset(vptr, 0, sizeof(struct velocity_info));
2638
2639	vptr->pdev = pdev;
2640	vptr->chip_id = info->chip_id;
2641	vptr->tx.numq = info->txqueue;
2642	vptr->multicast_limit = MCAM_SIZE;
2643	spin_lock_init(&vptr->lock);
2644}
2645
2646/**
2647 *	velocity_get_pci_info	-	retrieve PCI info for device
2648 *	@vptr: velocity device
2649 *	@pdev: PCI device it matches
2650 *
2651 *	Retrieve the PCI configuration space data that interests us from
2652 *	the kernel PCI layer
2653 */
2654static int __devinit velocity_get_pci_info(struct velocity_info *vptr, struct pci_dev *pdev)
2655{
2656	vptr->rev_id = pdev->revision;
2657
2658	pci_set_master(pdev);
2659
2660	vptr->ioaddr = pci_resource_start(pdev, 0);
2661	vptr->memaddr = pci_resource_start(pdev, 1);
2662
2663	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
2664		dev_err(&pdev->dev,
2665			   "region #0 is not an I/O resource, aborting.\n");
2666		return -EINVAL;
2667	}
2668
2669	if ((pci_resource_flags(pdev, 1) & IORESOURCE_IO)) {
2670		dev_err(&pdev->dev,
2671			   "region #1 is an I/O resource, aborting.\n");
2672		return -EINVAL;
2673	}
2674
2675	if (pci_resource_len(pdev, 1) < VELOCITY_IO_SIZE) {
2676		dev_err(&pdev->dev, "region #1 is too small.\n");
2677		return -EINVAL;
2678	}
2679	vptr->pdev = pdev;
2680
2681	return 0;
2682}
2683
2684/**
2685 *	velocity_print_info	-	per driver data
2686 *	@vptr: velocity
2687 *
2688 *	Print per driver data as the kernel driver finds Velocity
2689 *	hardware
2690 */
2691static void __devinit velocity_print_info(struct velocity_info *vptr)
2692{
2693	struct net_device *dev = vptr->dev;
2694
2695	printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
2696	printk(KERN_INFO "%s: Ethernet Address: %pM\n",
2697		dev->name, dev->dev_addr);
2698}
2699
2700static u32 velocity_get_link(struct net_device *dev)
2701{
2702	struct velocity_info *vptr = netdev_priv(dev);
2703	struct mac_regs __iomem *regs = vptr->mac_regs;
2704	return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, &regs->PHYSR0) ? 1 : 0;
2705}
2706
2707
2708/**
2709 *	velocity_found1		-	set up discovered velocity card
2710 *	@pdev: PCI device
2711 *	@ent: PCI device table entry that matched
2712 *
2713 *	Configure a discovered adapter from scratch. Return a negative
2714 *	errno error code on failure paths.
2715 */
2716static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_device_id *ent)
2717{
2718	static int first = 1;
2719	struct net_device *dev;
2720	int i;
2721	const char *drv_string;
2722	const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
2723	struct velocity_info *vptr;
2724	struct mac_regs __iomem *regs;
2725	int ret = -ENOMEM;
2726
2727	if (velocity_nics >= MAX_UNITS) {
2728		dev_notice(&pdev->dev, "already found %d NICs.\n",
2729			   velocity_nics);
2730		return -ENODEV;
2731	}
2732
2733	dev = alloc_etherdev(sizeof(struct velocity_info));
2734	if (!dev) {
2735		dev_err(&pdev->dev, "allocate net device failed.\n");
2736		goto out;
2737	}
2738
2739	/* Chain it all together */
2740
2741	SET_NETDEV_DEV(dev, &pdev->dev);
2742	vptr = netdev_priv(dev);
2743
2744
2745	if (first) {
2746		printk(KERN_INFO "%s Ver. %s\n",
2747			VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);
2748		printk(KERN_INFO "Copyright (c) 2002, 2003 VIA Networking Technologies, Inc.\n");
2749		printk(KERN_INFO "Copyright (c) 2004 Red Hat Inc.\n");
2750		first = 0;
2751	}
2752
2753	velocity_init_info(pdev, vptr, info);
2754
2755	vptr->dev = dev;
2756
2757	ret = pci_enable_device(pdev);
2758	if (ret < 0)
2759		goto err_free_dev;
2760
2761	dev->irq = pdev->irq;
2762
2763	ret = velocity_get_pci_info(vptr, pdev);
2764	if (ret < 0) {
2765		/* error message already printed */
2766		goto err_disable;
2767	}
2768
2769	ret = pci_request_regions(pdev, VELOCITY_NAME);
2770	if (ret < 0) {
2771		dev_err(&pdev->dev, "No PCI resources.\n");
2772		goto err_disable;
2773	}
2774
2775	regs = ioremap(vptr->memaddr, VELOCITY_IO_SIZE);
2776	if (regs == NULL) {
2777		ret = -EIO;
2778		goto err_release_res;
2779	}
2780
2781	vptr->mac_regs = regs;
2782
2783	mac_wol_reset(regs);
2784
2785	dev->base_addr = vptr->ioaddr;
2786
2787	for (i = 0; i < 6; i++)
2788		dev->dev_addr[i] = readb(&regs->PAR[i]);
2789
2790
2791	drv_string = dev_driver_string(&pdev->dev);
2792
2793	velocity_get_options(&vptr->options, velocity_nics, drv_string);
2794
2795	/*
2796	 *	Mask out the options cannot be set to the chip
2797	 */
2798
2799	vptr->options.flags &= info->flags;
2800
2801	/*
2802	 *	Enable the chip specified capbilities
2803	 */
2804
2805	vptr->flags = vptr->options.flags | (info->flags & 0xFF000000UL);
2806
2807	vptr->wol_opts = vptr->options.wol_opts;
2808	vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
2809
2810	vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2811
2812	dev->irq = pdev->irq;
2813	dev->netdev_ops = &velocity_netdev_ops;
2814	dev->ethtool_ops = &velocity_ethtool_ops;
2815	netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
2816
2817	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2818		NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
2819
2820	ret = register_netdev(dev);
2821	if (ret < 0)
2822		goto err_iounmap;
2823
2824	if (!velocity_get_link(dev)) {
2825		netif_carrier_off(dev);
2826		vptr->mii_status |= VELOCITY_LINK_FAIL;
2827	}
2828
2829	velocity_print_info(vptr);
2830	pci_set_drvdata(pdev, dev);
2831
2832	/* and leave the chip powered down */
2833
2834	pci_set_power_state(pdev, PCI_D3hot);
2835	velocity_nics++;
2836out:
2837	return ret;
2838
2839err_iounmap:
2840	iounmap(regs);
2841err_release_res:
2842	pci_release_regions(pdev);
2843err_disable:
2844	pci_disable_device(pdev);
2845err_free_dev:
2846	free_netdev(dev);
2847	goto out;
2848}
2849
2850
2851#ifdef CONFIG_PM
2852/**
2853 *	wol_calc_crc		-	WOL CRC
2854 *	@pattern: data pattern
2855 *	@mask_pattern: mask
2856 *
2857 *	Compute the wake on lan crc hashes for the packet header
2858 *	we are interested in.
2859 */
2860static u16 wol_calc_crc(int size, u8 *pattern, u8 *mask_pattern)
2861{
2862	u16 crc = 0xFFFF;
2863	u8 mask;
2864	int i, j;
2865
2866	for (i = 0; i < size; i++) {
2867		mask = mask_pattern[i];
2868
2869		/* Skip this loop if the mask equals to zero */
2870		if (mask == 0x00)
2871			continue;
2872
2873		for (j = 0; j < 8; j++) {
2874			if ((mask & 0x01) == 0) {
2875				mask >>= 1;
2876				continue;
2877			}
2878			mask >>= 1;
2879			crc = crc_ccitt(crc, &(pattern[i * 8 + j]), 1);
2880		}
2881	}
2882	/*	Finally, invert the result once to get the correct data */
2883	crc = ~crc;
2884	return bitrev32(crc) >> 16;
2885}
2886
2887static int velocity_set_wol(struct velocity_info *vptr)
2888{
2889	struct mac_regs __iomem *regs = vptr->mac_regs;
2890	static u8 buf[256];
2891	int i;
2892
2893	static u32 mask_pattern[2][4] = {
2894		{0x00203000, 0x000003C0, 0x00000000, 0x0000000}, /* ARP */
2895		{0xfffff000, 0xffffffff, 0xffffffff, 0x000ffff}	 /* Magic Packet */
2896	};
2897
2898	writew(0xFFFF, &regs->WOLCRClr);
2899	writeb(WOLCFG_SAB | WOLCFG_SAM, &regs->WOLCFGSet);
2900	writew(WOLCR_MAGIC_EN, &regs->WOLCRSet);
2901
2902	/*
2903	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
2904	   writew((WOLCR_LINKON_EN|WOLCR_LINKOFF_EN), &regs->WOLCRSet);
2905	 */
2906
2907	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
2908		writew(WOLCR_UNICAST_EN, &regs->WOLCRSet);
2909
2910	if (vptr->wol_opts & VELOCITY_WOL_ARP) {
2911		struct arp_packet *arp = (struct arp_packet *) buf;
2912		u16 crc;
2913		memset(buf, 0, sizeof(struct arp_packet) + 7);
2914
2915		for (i = 0; i < 4; i++)
2916			writel(mask_pattern[0][i], &regs->ByteMask[0][i]);
2917
2918		arp->type = htons(ETH_P_ARP);
2919		arp->ar_op = htons(1);
2920
2921		memcpy(arp->ar_tip, vptr->ip_addr, 4);
2922
2923		crc = wol_calc_crc((sizeof(struct arp_packet) + 7) / 8, buf,
2924				(u8 *) & mask_pattern[0][0]);
2925
2926		writew(crc, &regs->PatternCRC[0]);
2927		writew(WOLCR_ARP_EN, &regs->WOLCRSet);
2928	}
2929
2930	BYTE_REG_BITS_ON(PWCFG_WOLTYPE, &regs->PWCFGSet);
2931	BYTE_REG_BITS_ON(PWCFG_LEGACY_WOLEN, &regs->PWCFGSet);
2932
2933	writew(0x0FFF, &regs->WOLSRClr);
2934
2935	if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2936		if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2937			MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2938
2939		MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2940	}
2941
2942	if (vptr->mii_status & VELOCITY_SPEED_1000)
2943		MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2944
2945	BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2946
2947	{
2948		u8 GCR;
2949		GCR = readb(&regs->CHIPGCR);
2950		GCR = (GCR & ~CHIPGCR_FCGMII) | CHIPGCR_FCFDX;
2951		writeb(GCR, &regs->CHIPGCR);
2952	}
2953
2954	BYTE_REG_BITS_OFF(ISR_PWEI, &regs->ISR);
2955	/* Turn on SWPTAG just before entering power mode */
2956	BYTE_REG_BITS_ON(STICKHW_SWPTAG, &regs->STICKHW);
2957	/* Go to bed ..... */
2958	BYTE_REG_BITS_ON((STICKHW_DS1 | STICKHW_DS0), &regs->STICKHW);
2959
2960	return 0;
2961}
2962
2963/**
2964 *	velocity_save_context	-	save registers
2965 *	@vptr: velocity
2966 *	@context: buffer for stored context
2967 *
2968 *	Retrieve the current configuration from the velocity hardware
2969 *	and stash it in the context structure, for use by the context
2970 *	restore functions. This allows us to save things we need across
2971 *	power down states
2972 */
2973static void velocity_save_context(struct velocity_info *vptr, struct velocity_context *context)
2974{
2975	struct mac_regs __iomem *regs = vptr->mac_regs;
2976	u16 i;
2977	u8 __iomem *ptr = (u8 __iomem *)regs;
2978
2979	for (i = MAC_REG_PAR; i < MAC_REG_CR0_CLR; i += 4)
2980		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2981
2982	for (i = MAC_REG_MAR; i < MAC_REG_TDCSR_CLR; i += 4)
2983		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2984
2985	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
2986		*((u32 *) (context->mac_reg + i)) = readl(ptr + i);
2987
2988}
2989
2990static int velocity_suspend(struct pci_dev *pdev, pm_message_t state)
2991{
2992	struct net_device *dev = pci_get_drvdata(pdev);
2993	struct velocity_info *vptr = netdev_priv(dev);
2994	unsigned long flags;
2995
2996	if (!netif_running(vptr->dev))
2997		return 0;
2998
2999	netif_device_detach(vptr->dev);
3000
3001	spin_lock_irqsave(&vptr->lock, flags);
3002	pci_save_state(pdev);
3003#ifdef ETHTOOL_GWOL
3004	if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) {
3005		velocity_get_ip(vptr);
3006		velocity_save_context(vptr, &vptr->context);
3007		velocity_shutdown(vptr);
3008		velocity_set_wol(vptr);
3009		pci_enable_wake(pdev, PCI_D3hot, 1);
3010		pci_set_power_state(pdev, PCI_D3hot);
3011	} else {
3012		velocity_save_context(vptr, &vptr->context);
3013		velocity_shutdown(vptr);
3014		pci_disable_device(pdev);
3015		pci_set_power_state(pdev, pci_choose_state(pdev, state));
3016	}
3017#else
3018	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3019#endif
3020	spin_unlock_irqrestore(&vptr->lock, flags);
3021	return 0;
3022}
3023
3024/**
3025 *	velocity_restore_context	-	restore registers
3026 *	@vptr: velocity
3027 *	@context: buffer for stored context
3028 *
3029 *	Reload the register configuration from the velocity context
3030 *	created by velocity_save_context.
3031 */
3032static void velocity_restore_context(struct velocity_info *vptr, struct velocity_context *context)
3033{
3034	struct mac_regs __iomem *regs = vptr->mac_regs;
3035	int i;
3036	u8 __iomem *ptr = (u8 __iomem *)regs;
3037
3038	for (i = MAC_REG_PAR; i < MAC_REG_CR0_SET; i += 4)
3039		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3040
3041	/* Just skip cr0 */
3042	for (i = MAC_REG_CR1_SET; i < MAC_REG_CR0_CLR; i++) {
3043		/* Clear */
3044		writeb(~(*((u8 *) (context->mac_reg + i))), ptr + i + 4);
3045		/* Set */
3046		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3047	}
3048
3049	for (i = MAC_REG_MAR; i < MAC_REG_IMR; i += 4)
3050		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3051
3052	for (i = MAC_REG_RDBASE_LO; i < MAC_REG_FIFO_TEST0; i += 4)
3053		writel(*((u32 *) (context->mac_reg + i)), ptr + i);
3054
3055	for (i = MAC_REG_TDCSR_SET; i <= MAC_REG_RDCSR_SET; i++)
3056		writeb(*((u8 *) (context->mac_reg + i)), ptr + i);
3057}
3058
3059static int velocity_resume(struct pci_dev *pdev)
3060{
3061	struct net_device *dev = pci_get_drvdata(pdev);
3062	struct velocity_info *vptr = netdev_priv(dev);
3063	unsigned long flags;
3064	int i;
3065
3066	if (!netif_running(vptr->dev))
3067		return 0;
3068
3069	pci_set_power_state(pdev, PCI_D0);
3070	pci_enable_wake(pdev, 0, 0);
3071	pci_restore_state(pdev);
3072
3073	mac_wol_reset(vptr->mac_regs);
3074
3075	spin_lock_irqsave(&vptr->lock, flags);
3076	velocity_restore_context(vptr, &vptr->context);
3077	velocity_init_registers(vptr, VELOCITY_INIT_WOL);
3078	mac_disable_int(vptr->mac_regs);
3079
3080	velocity_tx_srv(vptr);
3081
3082	for (i = 0; i < vptr->tx.numq; i++) {
3083		if (vptr->tx.used[i])
3084			mac_tx_queue_wake(vptr->mac_regs, i);
3085	}
3086
3087	mac_enable_int(vptr->mac_regs);
3088	spin_unlock_irqrestore(&vptr->lock, flags);
3089	netif_device_attach(vptr->dev);
3090
3091	return 0;
3092}
3093#endif
3094
3095/*
3096 *	Definition for our device driver. The PCI layer interface
3097 *	uses this to handle all our card discover and plugging
3098 */
3099static struct pci_driver velocity_driver = {
3100      .name	= VELOCITY_NAME,
3101      .id_table	= velocity_id_table,
3102      .probe	= velocity_found1,
3103      .remove	= __devexit_p(velocity_remove1),
3104#ifdef CONFIG_PM
3105      .suspend	= velocity_suspend,
3106      .resume	= velocity_resume,
3107#endif
3108};
3109
3110
3111/**
3112 *	velocity_ethtool_up	-	pre hook for ethtool
3113 *	@dev: network device
3114 *
3115 *	Called before an ethtool operation. We need to make sure the
3116 *	chip is out of D3 state before we poke at it.
3117 */
3118static int velocity_ethtool_up(struct net_device *dev)
3119{
3120	struct velocity_info *vptr = netdev_priv(dev);
3121	if (!netif_running(dev))
3122		pci_set_power_state(vptr->pdev, PCI_D0);
3123	return 0;
3124}
3125
3126/**
3127 *	velocity_ethtool_down	-	post hook for ethtool
3128 *	@dev: network device
3129 *
3130 *	Called after an ethtool operation. Restore the chip back to D3
3131 *	state if it isn't running.
3132 */
3133static void velocity_ethtool_down(struct net_device *dev)
3134{
3135	struct velocity_info *vptr = netdev_priv(dev);
3136	if (!netif_running(dev))
3137		pci_set_power_state(vptr->pdev, PCI_D3hot);
3138}
3139
3140static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3141{
3142	struct velocity_info *vptr = netdev_priv(dev);
3143	struct mac_regs __iomem *regs = vptr->mac_regs;
3144	u32 status;
3145	status = check_connection_type(vptr->mac_regs);
3146
3147	cmd->supported = SUPPORTED_TP |
3148			SUPPORTED_Autoneg |
3149			SUPPORTED_10baseT_Half |
3150			SUPPORTED_10baseT_Full |
3151			SUPPORTED_100baseT_Half |
3152			SUPPORTED_100baseT_Full |
3153			SUPPORTED_1000baseT_Half |
3154			SUPPORTED_1000baseT_Full;
3155	if (status & VELOCITY_SPEED_1000)
3156		cmd->speed = SPEED_1000;
3157	else if (status & VELOCITY_SPEED_100)
3158		cmd->speed = SPEED_100;
3159	else
3160		cmd->speed = SPEED_10;
3161	cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
3162	cmd->port = PORT_TP;
3163	cmd->transceiver = XCVR_INTERNAL;
3164	cmd->phy_address = readb(&regs->MIIADR) & 0x1F;
3165
3166	if (status & VELOCITY_DUPLEX_FULL)
3167		cmd->duplex = DUPLEX_FULL;
3168	else
3169		cmd->duplex = DUPLEX_HALF;
3170
3171	return 0;
3172}
3173
3174static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
3175{
3176	struct velocity_info *vptr = netdev_priv(dev);
3177	u32 curr_status;
3178	u32 new_status = 0;
3179	int ret = 0;
3180
3181	curr_status = check_connection_type(vptr->mac_regs);
3182	curr_status &= (~VELOCITY_LINK_FAIL);
3183
3184	new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
3185	new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
3186	new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
3187	new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
3188
3189	if ((new_status & VELOCITY_AUTONEG_ENABLE) && (new_status != (curr_status | VELOCITY_AUTONEG_ENABLE)))
3190		ret = -EINVAL;
3191	else
3192		velocity_set_media_mode(vptr, new_status);
3193
3194	return ret;
3195}
3196
3197static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3198{
3199	struct velocity_info *vptr = netdev_priv(dev);
3200	strcpy(info->driver, VELOCITY_NAME);
3201	strcpy(info->version, VELOCITY_VERSION);
3202	strcpy(info->bus_info, pci_name(vptr->pdev));
3203}
3204
3205static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3206{
3207	struct velocity_info *vptr = netdev_priv(dev);
3208	wol->supported = WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP;
3209	wol->wolopts |= WAKE_MAGIC;
3210	/*
3211	   if (vptr->wol_opts & VELOCITY_WOL_PHY)
3212		   wol.wolopts|=WAKE_PHY;
3213			 */
3214	if (vptr->wol_opts & VELOCITY_WOL_UCAST)
3215		wol->wolopts |= WAKE_UCAST;
3216	if (vptr->wol_opts & VELOCITY_WOL_ARP)
3217		wol->wolopts |= WAKE_ARP;
3218	memcpy(&wol->sopass, vptr->wol_passwd, 6);
3219}
3220
3221static int velocity_ethtool_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3222{
3223	struct velocity_info *vptr = netdev_priv(dev);
3224
3225	if (!(wol->wolopts & (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_ARP)))
3226		return -EFAULT;
3227	vptr->wol_opts = VELOCITY_WOL_MAGIC;
3228
3229	/*
3230	   if (wol.wolopts & WAKE_PHY) {
3231	   vptr->wol_opts|=VELOCITY_WOL_PHY;
3232	   vptr->flags |=VELOCITY_FLAGS_WOL_ENABLED;
3233	   }
3234	 */
3235
3236	if (wol->wolopts & WAKE_MAGIC) {
3237		vptr->wol_opts |= VELOCITY_WOL_MAGIC;
3238		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3239	}
3240	if (wol->wolopts & WAKE_UCAST) {
3241		vptr->wol_opts |= VELOCITY_WOL_UCAST;
3242		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3243	}
3244	if (wol->wolopts & WAKE_ARP) {
3245		vptr->wol_opts |= VELOCITY_WOL_ARP;
3246		vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED;
3247	}
3248	memcpy(vptr->wol_passwd, wol->sopass, 6);
3249	return 0;
3250}
3251
3252static u32 velocity_get_msglevel(struct net_device *dev)
3253{
3254	return msglevel;
3255}
3256
3257static void velocity_set_msglevel(struct net_device *dev, u32 value)
3258{
3259	 msglevel = value;
3260}
3261
3262static int get_pending_timer_val(int val)
3263{
3264	int mult_bits = val >> 6;
3265	int mult = 1;
3266
3267	switch (mult_bits)
3268	{
3269	case 1:
3270		mult = 4; break;
3271	case 2:
3272		mult = 16; break;
3273	case 3:
3274		mult = 64; break;
3275	case 0:
3276	default:
3277		break;
3278	}
3279
3280	return (val & 0x3f) * mult;
3281}
3282
3283static void set_pending_timer_val(int *val, u32 us)
3284{
3285	u8 mult = 0;
3286	u8 shift = 0;
3287
3288	if (us >= 0x3f) {
3289		mult = 1; /* mult with 4 */
3290		shift = 2;
3291	}
3292	if (us >= 0x3f * 4) {
3293		mult = 2; /* mult with 16 */
3294		shift = 4;
3295	}
3296	if (us >= 0x3f * 16) {
3297		mult = 3; /* mult with 64 */
3298		shift = 6;
3299	}
3300
3301	*val = (mult << 6) | ((us >> shift) & 0x3f);
3302}
3303
3304
3305static int velocity_get_coalesce(struct net_device *dev,
3306		struct ethtool_coalesce *ecmd)
3307{
3308	struct velocity_info *vptr = netdev_priv(dev);
3309
3310	ecmd->tx_max_coalesced_frames = vptr->options.tx_intsup;
3311	ecmd->rx_max_coalesced_frames = vptr->options.rx_intsup;
3312
3313	ecmd->rx_coalesce_usecs = get_pending_timer_val(vptr->options.rxqueue_timer);
3314	ecmd->tx_coalesce_usecs = get_pending_timer_val(vptr->options.txqueue_timer);
3315
3316	return 0;
3317}
3318
3319static int velocity_set_coalesce(struct net_device *dev,
3320		struct ethtool_coalesce *ecmd)
3321{
3322	struct velocity_info *vptr = netdev_priv(dev);
3323	int max_us = 0x3f * 64;
3324	unsigned long flags;
3325
3326	/* 6 bits of  */
3327	if (ecmd->tx_coalesce_usecs > max_us)
3328		return -EINVAL;
3329	if (ecmd->rx_coalesce_usecs > max_us)
3330		return -EINVAL;
3331
3332	if (ecmd->tx_max_coalesced_frames > 0xff)
3333		return -EINVAL;
3334	if (ecmd->rx_max_coalesced_frames > 0xff)
3335		return -EINVAL;
3336
3337	vptr->options.rx_intsup = ecmd->rx_max_coalesced_frames;
3338	vptr->options.tx_intsup = ecmd->tx_max_coalesced_frames;
3339
3340	set_pending_timer_val(&vptr->options.rxqueue_timer,
3341			ecmd->rx_coalesce_usecs);
3342	set_pending_timer_val(&vptr->options.txqueue_timer,
3343			ecmd->tx_coalesce_usecs);
3344
3345	/* Setup the interrupt suppression and queue timers */
3346	spin_lock_irqsave(&vptr->lock, flags);
3347	mac_disable_int(vptr->mac_regs);
3348	setup_adaptive_interrupts(vptr);
3349	setup_queue_timers(vptr);
3350
3351	mac_write_int_mask(vptr->int_mask, vptr->mac_regs);
3352	mac_clear_isr(vptr->mac_regs);
3353	mac_enable_int(vptr->mac_regs);
3354	spin_unlock_irqrestore(&vptr->lock, flags);
3355
3356	return 0;
3357}
3358
3359static const struct ethtool_ops velocity_ethtool_ops = {
3360	.get_settings	=	velocity_get_settings,
3361	.set_settings	=	velocity_set_settings,
3362	.get_drvinfo	=	velocity_get_drvinfo,
3363	.set_tx_csum	=	ethtool_op_set_tx_csum,
3364	.get_tx_csum	=	ethtool_op_get_tx_csum,
3365	.get_wol	=	velocity_ethtool_get_wol,
3366	.set_wol	=	velocity_ethtool_set_wol,
3367	.get_msglevel	=	velocity_get_msglevel,
3368	.set_msglevel	=	velocity_set_msglevel,
3369	.set_sg 	=	ethtool_op_set_sg,
3370	.get_link	=	velocity_get_link,
3371	.get_coalesce	=	velocity_get_coalesce,
3372	.set_coalesce	=	velocity_set_coalesce,
3373	.begin		=	velocity_ethtool_up,
3374	.complete	=	velocity_ethtool_down
3375};
3376
3377#ifdef CONFIG_PM
3378#ifdef CONFIG_INET
3379static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3380{
3381	struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3382	struct net_device *dev = ifa->ifa_dev->dev;
3383
3384	if (dev_net(dev) == &init_net &&
3385	    dev->netdev_ops == &velocity_netdev_ops)
3386		velocity_get_ip(netdev_priv(dev));
3387
3388	return NOTIFY_DONE;
3389}
3390#endif	/* CONFIG_INET */
3391#endif	/* CONFIG_PM */
3392
3393#if defined(CONFIG_PM) && defined(CONFIG_INET)
3394static struct notifier_block velocity_inetaddr_notifier = {
3395      .notifier_call	= velocity_netdev_event,
3396};
3397
3398static void velocity_register_notifier(void)
3399{
3400	register_inetaddr_notifier(&velocity_inetaddr_notifier);
3401}
3402
3403static void velocity_unregister_notifier(void)
3404{
3405	unregister_inetaddr_notifier(&velocity_inetaddr_notifier);
3406}
3407
3408#else
3409
3410#define velocity_register_notifier()	do {} while (0)
3411#define velocity_unregister_notifier()	do {} while (0)
3412
3413#endif	/* defined(CONFIG_PM) && defined(CONFIG_INET) */
3414
3415/**
3416 *	velocity_init_module	-	load time function
3417 *
3418 *	Called when the velocity module is loaded. The PCI driver
3419 *	is registered with the PCI layer, and in turn will call
3420 *	the probe functions for each velocity adapter installed
3421 *	in the system.
3422 */
3423static int __init velocity_init_module(void)
3424{
3425	int ret;
3426
3427	velocity_register_notifier();
3428	ret = pci_register_driver(&velocity_driver);
3429	if (ret < 0)
3430		velocity_unregister_notifier();
3431	return ret;
3432}
3433
3434/**
3435 *	velocity_cleanup	-	module unload
3436 *
3437 *	When the velocity hardware is unloaded this function is called.
3438 *	It will clean up the notifiers and the unregister the PCI
3439 *	driver interface for this hardware. This in turn cleans up
3440 *	all discovered interfaces before returning from the function
3441 */
3442static void __exit velocity_cleanup_module(void)
3443{
3444	velocity_unregister_notifier();
3445	pci_unregister_driver(&velocity_driver);
3446}
3447
3448module_init(velocity_init_module);
3449module_exit(velocity_cleanup_module);
3450