1/*
2 * Network device driver for Cell Processor-Based Blade and Celleb platform
3 *
4 * (C) Copyright IBM Corp. 2005
5 * (C) Copyright 2006 TOSHIBA CORPORATION
6 *
7 * Authors : Utz Bacher <utz.bacher@de.ibm.com>
8 *           Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/compiler.h>
26#include <linux/crc32.h>
27#include <linux/delay.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/firmware.h>
31#include <linux/if_vlan.h>
32#include <linux/in.h>
33#include <linux/init.h>
34#include <linux/ioport.h>
35#include <linux/ip.h>
36#include <linux/kernel.h>
37#include <linux/mii.h>
38#include <linux/module.h>
39#include <linux/netdevice.h>
40#include <linux/device.h>
41#include <linux/pci.h>
42#include <linux/skbuff.h>
43#include <linux/slab.h>
44#include <linux/tcp.h>
45#include <linux/types.h>
46#include <linux/vmalloc.h>
47#include <linux/wait.h>
48#include <linux/workqueue.h>
49#include <asm/bitops.h>
50#include <asm/pci-bridge.h>
51#include <net/checksum.h>
52
53#include "spider_net.h"
54
55MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
56	      "<Jens.Osterkamp@de.ibm.com>");
57MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
58MODULE_LICENSE("GPL");
59MODULE_VERSION(VERSION);
60
61static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
62static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
63
64module_param(rx_descriptors, int, 0444);
65module_param(tx_descriptors, int, 0444);
66
67MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
68		 "in rx chains");
69MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
70		 "in tx chain");
71
72char spider_net_driver_name[] = "spidernet";
73
74static struct pci_device_id spider_net_pci_tbl[] = {
75	{ PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
76	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
77	{ 0, }
78};
79
80MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
81
82/**
83 * spider_net_read_reg - reads an SMMIO register of a card
84 * @card: device structure
85 * @reg: register to read from
86 *
87 * returns the content of the specified SMMIO register.
88 */
89static inline u32
90spider_net_read_reg(struct spider_net_card *card, u32 reg)
91{
92	/* We use the powerpc specific variants instead of readl_be() because
93	 * we know spidernet is not a real PCI device and we can thus avoid the
94	 * performance hit caused by the PCI workarounds.
95	 */
96	return in_be32(card->regs + reg);
97}
98
99/**
100 * spider_net_write_reg - writes to an SMMIO register of a card
101 * @card: device structure
102 * @reg: register to write to
103 * @value: value to write into the specified SMMIO register
104 */
105static inline void
106spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
107{
108	/* We use the powerpc specific variants instead of writel_be() because
109	 * we know spidernet is not a real PCI device and we can thus avoid the
110	 * performance hit caused by the PCI workarounds.
111	 */
112	out_be32(card->regs + reg, value);
113}
114
115/** spider_net_write_phy - write to phy register
116 * @netdev: adapter to be written to
117 * @mii_id: id of MII
118 * @reg: PHY register
119 * @val: value to be written to phy register
120 *
121 * spider_net_write_phy_register writes to an arbitrary PHY
122 * register via the spider GPCWOPCMD register. We assume the queue does
123 * not run full (not more than 15 commands outstanding).
124 **/
125static void
126spider_net_write_phy(struct net_device *netdev, int mii_id,
127		     int reg, int val)
128{
129	struct spider_net_card *card = netdev_priv(netdev);
130	u32 writevalue;
131
132	writevalue = ((u32)mii_id << 21) |
133		((u32)reg << 16) | ((u32)val);
134
135	spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
136}
137
138/** spider_net_read_phy - read from phy register
139 * @netdev: network device to be read from
140 * @mii_id: id of MII
141 * @reg: PHY register
142 *
143 * Returns value read from PHY register
144 *
145 * spider_net_write_phy reads from an arbitrary PHY
146 * register via the spider GPCROPCMD register
147 **/
148static int
149spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
150{
151	struct spider_net_card *card = netdev_priv(netdev);
152	u32 readvalue;
153
154	readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
155	spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
156
157	/* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
158	 * interrupt, as we poll for the completion of the read operation
159	 * in spider_net_read_phy. Should take about 50 us */
160	do {
161		readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
162	} while (readvalue & SPIDER_NET_GPREXEC);
163
164	readvalue &= SPIDER_NET_GPRDAT_MASK;
165
166	return readvalue;
167}
168
169/**
170 * spider_net_setup_aneg - initial auto-negotiation setup
171 * @card: device structure
172 **/
173static void
174spider_net_setup_aneg(struct spider_net_card *card)
175{
176	struct mii_phy *phy = &card->phy;
177	u32 advertise = 0;
178	u16 bmsr, estat;
179
180	bmsr  = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
181	estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
182
183	if (bmsr & BMSR_10HALF)
184		advertise |= ADVERTISED_10baseT_Half;
185	if (bmsr & BMSR_10FULL)
186		advertise |= ADVERTISED_10baseT_Full;
187	if (bmsr & BMSR_100HALF)
188		advertise |= ADVERTISED_100baseT_Half;
189	if (bmsr & BMSR_100FULL)
190		advertise |= ADVERTISED_100baseT_Full;
191
192	if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
193		advertise |= SUPPORTED_1000baseT_Full;
194	if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
195		advertise |= SUPPORTED_1000baseT_Half;
196
197	mii_phy_probe(phy, phy->mii_id);
198	phy->def->ops->setup_aneg(phy, advertise);
199
200}
201
202/**
203 * spider_net_rx_irq_off - switch off rx irq on this spider card
204 * @card: device structure
205 *
206 * switches off rx irq by masking them out in the GHIINTnMSK register
207 */
208static void
209spider_net_rx_irq_off(struct spider_net_card *card)
210{
211	u32 regvalue;
212
213	regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
214	spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
215}
216
217/**
218 * spider_net_rx_irq_on - switch on rx irq on this spider card
219 * @card: device structure
220 *
221 * switches on rx irq by enabling them in the GHIINTnMSK register
222 */
223static void
224spider_net_rx_irq_on(struct spider_net_card *card)
225{
226	u32 regvalue;
227
228	regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
229	spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
230}
231
232/**
233 * spider_net_set_promisc - sets the unicast address or the promiscuous mode
234 * @card: card structure
235 *
236 * spider_net_set_promisc sets the unicast destination address filter and
237 * thus either allows for non-promisc mode or promisc mode
238 */
239static void
240spider_net_set_promisc(struct spider_net_card *card)
241{
242	u32 macu, macl;
243	struct net_device *netdev = card->netdev;
244
245	if (netdev->flags & IFF_PROMISC) {
246		/* clear destination entry 0 */
247		spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
248		spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
249		spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
250				     SPIDER_NET_PROMISC_VALUE);
251	} else {
252		macu = netdev->dev_addr[0];
253		macu <<= 8;
254		macu |= netdev->dev_addr[1];
255		memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
256
257		macu |= SPIDER_NET_UA_DESCR_VALUE;
258		spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
259		spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
260		spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
261				     SPIDER_NET_NONPROMISC_VALUE);
262	}
263}
264
265/**
266 * spider_net_get_mac_address - read mac address from spider card
267 * @card: device structure
268 *
269 * reads MAC address from GMACUNIMACU and GMACUNIMACL registers
270 */
271static int
272spider_net_get_mac_address(struct net_device *netdev)
273{
274	struct spider_net_card *card = netdev_priv(netdev);
275	u32 macl, macu;
276
277	macl = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACL);
278	macu = spider_net_read_reg(card, SPIDER_NET_GMACUNIMACU);
279
280	netdev->dev_addr[0] = (macu >> 24) & 0xff;
281	netdev->dev_addr[1] = (macu >> 16) & 0xff;
282	netdev->dev_addr[2] = (macu >> 8) & 0xff;
283	netdev->dev_addr[3] = macu & 0xff;
284	netdev->dev_addr[4] = (macl >> 8) & 0xff;
285	netdev->dev_addr[5] = macl & 0xff;
286
287	if (!is_valid_ether_addr(&netdev->dev_addr[0]))
288		return -EINVAL;
289
290	return 0;
291}
292
293/**
294 * spider_net_get_descr_status -- returns the status of a descriptor
295 * @descr: descriptor to look at
296 *
297 * returns the status as in the dmac_cmd_status field of the descriptor
298 */
299static inline int
300spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
301{
302	return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
303}
304
305/**
306 * spider_net_free_chain - free descriptor chain
307 * @card: card structure
308 * @chain: address of chain
309 *
310 */
311static void
312spider_net_free_chain(struct spider_net_card *card,
313		      struct spider_net_descr_chain *chain)
314{
315	struct spider_net_descr *descr;
316
317	descr = chain->ring;
318	do {
319		descr->bus_addr = 0;
320		descr->hwdescr->next_descr_addr = 0;
321		descr = descr->next;
322	} while (descr != chain->ring);
323
324	dma_free_coherent(&card->pdev->dev, chain->num_desc,
325	    chain->hwring, chain->dma_addr);
326}
327
328/**
329 * spider_net_init_chain - alloc and link descriptor chain
330 * @card: card structure
331 * @chain: address of chain
332 *
333 * We manage a circular list that mirrors the hardware structure,
334 * except that the hardware uses bus addresses.
335 *
336 * Returns 0 on success, <0 on failure
337 */
338static int
339spider_net_init_chain(struct spider_net_card *card,
340		       struct spider_net_descr_chain *chain)
341{
342	int i;
343	struct spider_net_descr *descr;
344	struct spider_net_hw_descr *hwdescr;
345	dma_addr_t buf;
346	size_t alloc_size;
347
348	alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
349
350	chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
351		&chain->dma_addr, GFP_KERNEL);
352
353	if (!chain->hwring)
354		return -ENOMEM;
355
356	memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
357
358	/* Set up the hardware pointers in each descriptor */
359	descr = chain->ring;
360	hwdescr = chain->hwring;
361	buf = chain->dma_addr;
362	for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
363		hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
364		hwdescr->next_descr_addr = 0;
365
366		descr->hwdescr = hwdescr;
367		descr->bus_addr = buf;
368		descr->next = descr + 1;
369		descr->prev = descr - 1;
370
371		buf += sizeof(struct spider_net_hw_descr);
372	}
373	/* do actual circular list */
374	(descr-1)->next = chain->ring;
375	chain->ring->prev = descr-1;
376
377	spin_lock_init(&chain->lock);
378	chain->head = chain->ring;
379	chain->tail = chain->ring;
380	return 0;
381}
382
383/**
384 * spider_net_free_rx_chain_contents - frees descr contents in rx chain
385 * @card: card structure
386 *
387 * returns 0 on success, <0 on failure
388 */
389static void
390spider_net_free_rx_chain_contents(struct spider_net_card *card)
391{
392	struct spider_net_descr *descr;
393
394	descr = card->rx_chain.head;
395	do {
396		if (descr->skb) {
397			pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
398					 SPIDER_NET_MAX_FRAME,
399					 PCI_DMA_BIDIRECTIONAL);
400			dev_kfree_skb(descr->skb);
401			descr->skb = NULL;
402		}
403		descr = descr->next;
404	} while (descr != card->rx_chain.head);
405}
406
407/**
408 * spider_net_prepare_rx_descr - Reinitialize RX descriptor
409 * @card: card structure
410 * @descr: descriptor to re-init
411 *
412 * Return 0 on succes, <0 on failure.
413 *
414 * Allocates a new rx skb, iommu-maps it and attaches it to the
415 * descriptor. Mark the descriptor as activated, ready-to-use.
416 */
417static int
418spider_net_prepare_rx_descr(struct spider_net_card *card,
419			    struct spider_net_descr *descr)
420{
421	struct spider_net_hw_descr *hwdescr = descr->hwdescr;
422	dma_addr_t buf;
423	int offset;
424	int bufsize;
425
426	/* we need to round up the buffer size to a multiple of 128 */
427	bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
428		(~(SPIDER_NET_RXBUF_ALIGN - 1));
429
430	/* and we need to have it 128 byte aligned, therefore we allocate a
431	 * bit more */
432	/* allocate an skb */
433	descr->skb = netdev_alloc_skb(card->netdev,
434				      bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
435	if (!descr->skb) {
436		if (netif_msg_rx_err(card) && net_ratelimit())
437			pr_err("Not enough memory to allocate rx buffer\n");
438		card->spider_stats.alloc_rx_skb_error++;
439		return -ENOMEM;
440	}
441	hwdescr->buf_size = bufsize;
442	hwdescr->result_size = 0;
443	hwdescr->valid_size = 0;
444	hwdescr->data_status = 0;
445	hwdescr->data_error = 0;
446
447	offset = ((unsigned long)descr->skb->data) &
448		(SPIDER_NET_RXBUF_ALIGN - 1);
449	if (offset)
450		skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
451	/* iommu-map the skb */
452	buf = pci_map_single(card->pdev, descr->skb->data,
453			SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
454	if (pci_dma_mapping_error(buf)) {
455		dev_kfree_skb_any(descr->skb);
456		descr->skb = NULL;
457		if (netif_msg_rx_err(card) && net_ratelimit())
458			pr_err("Could not iommu-map rx buffer\n");
459		card->spider_stats.rx_iommu_map_error++;
460		hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
461	} else {
462		hwdescr->buf_addr = buf;
463		wmb();
464		hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
465					 SPIDER_NET_DMAC_NOINTR_COMPLETE;
466	}
467
468	return 0;
469}
470
471/**
472 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
473 * @card: card structure
474 *
475 * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
476 * chip by writing to the appropriate register. DMA is enabled in
477 * spider_net_enable_rxdmac.
478 */
479static inline void
480spider_net_enable_rxchtails(struct spider_net_card *card)
481{
482	/* assume chain is aligned correctly */
483	spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
484			     card->rx_chain.tail->bus_addr);
485}
486
487/**
488 * spider_net_enable_rxdmac - enables a receive DMA controller
489 * @card: card structure
490 *
491 * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
492 * in the GDADMACCNTR register
493 */
494static inline void
495spider_net_enable_rxdmac(struct spider_net_card *card)
496{
497	wmb();
498	spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
499			     SPIDER_NET_DMA_RX_VALUE);
500}
501
502/**
503 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
504 * @card: card structure
505 *
506 * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
507 */
508static void
509spider_net_refill_rx_chain(struct spider_net_card *card)
510{
511	struct spider_net_descr_chain *chain = &card->rx_chain;
512	unsigned long flags;
513
514	/* one context doing the refill (and a second context seeing that
515	 * and omitting it) is ok. If called by NAPI, we'll be called again
516	 * as spider_net_decode_one_descr is called several times. If some
517	 * interrupt calls us, the NAPI is about to clean up anyway. */
518	if (!spin_trylock_irqsave(&chain->lock, flags))
519		return;
520
521	while (spider_net_get_descr_status(chain->head->hwdescr) ==
522			SPIDER_NET_DESCR_NOT_IN_USE) {
523		if (spider_net_prepare_rx_descr(card, chain->head))
524			break;
525		chain->head = chain->head->next;
526	}
527
528	spin_unlock_irqrestore(&chain->lock, flags);
529}
530
531/**
532 * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
533 * @card: card structure
534 *
535 * Returns 0 on success, <0 on failure.
536 */
537static int
538spider_net_alloc_rx_skbs(struct spider_net_card *card)
539{
540	struct spider_net_descr_chain *chain = &card->rx_chain;
541	struct spider_net_descr *start = chain->tail;
542	struct spider_net_descr *descr = start;
543
544	/* Link up the hardware chain pointers */
545	do {
546		descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
547		descr = descr->next;
548	} while (descr != start);
549
550	/* Put at least one buffer into the chain. if this fails,
551	 * we've got a problem. If not, spider_net_refill_rx_chain
552	 * will do the rest at the end of this function. */
553	if (spider_net_prepare_rx_descr(card, chain->head))
554		goto error;
555	else
556		chain->head = chain->head->next;
557
558	/* This will allocate the rest of the rx buffers;
559	 * if not, it's business as usual later on. */
560	spider_net_refill_rx_chain(card);
561	spider_net_enable_rxdmac(card);
562	return 0;
563
564error:
565	spider_net_free_rx_chain_contents(card);
566	return -ENOMEM;
567}
568
569/**
570 * spider_net_get_multicast_hash - generates hash for multicast filter table
571 * @addr: multicast address
572 *
573 * returns the hash value.
574 *
575 * spider_net_get_multicast_hash calculates a hash value for a given multicast
576 * address, that is used to set the multicast filter tables
577 */
578static u8
579spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
580{
581	u32 crc;
582	u8 hash;
583	char addr_for_crc[ETH_ALEN] = { 0, };
584	int i, bit;
585
586	for (i = 0; i < ETH_ALEN * 8; i++) {
587		bit = (addr[i / 8] >> (i % 8)) & 1;
588		addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
589	}
590
591	crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
592
593	hash = (crc >> 27);
594	hash <<= 3;
595	hash |= crc & 7;
596	hash &= 0xff;
597
598	return hash;
599}
600
601/**
602 * spider_net_set_multi - sets multicast addresses and promisc flags
603 * @netdev: interface device structure
604 *
605 * spider_net_set_multi configures multicast addresses as needed for the
606 * netdev interface. It also sets up multicast, allmulti and promisc
607 * flags appropriately
608 */
609static void
610spider_net_set_multi(struct net_device *netdev)
611{
612	struct dev_mc_list *mc;
613	u8 hash;
614	int i;
615	u32 reg;
616	struct spider_net_card *card = netdev_priv(netdev);
617	unsigned long bitmask[SPIDER_NET_MULTICAST_HASHES / BITS_PER_LONG] =
618		{0, };
619
620	spider_net_set_promisc(card);
621
622	if (netdev->flags & IFF_ALLMULTI) {
623		for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
624			set_bit(i, bitmask);
625		}
626		goto write_hash;
627	}
628
629	/* well, we know, what the broadcast hash value is: it's xfd
630	hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
631	set_bit(0xfd, bitmask);
632
633	for (mc = netdev->mc_list; mc; mc = mc->next) {
634		hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
635		set_bit(hash, bitmask);
636	}
637
638write_hash:
639	for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
640		reg = 0;
641		if (test_bit(i * 4, bitmask))
642			reg += 0x08;
643		reg <<= 8;
644		if (test_bit(i * 4 + 1, bitmask))
645			reg += 0x08;
646		reg <<= 8;
647		if (test_bit(i * 4 + 2, bitmask))
648			reg += 0x08;
649		reg <<= 8;
650		if (test_bit(i * 4 + 3, bitmask))
651			reg += 0x08;
652
653		spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
654	}
655}
656
657/**
658 * spider_net_disable_rxdmac - disables the receive DMA controller
659 * @card: card structure
660 *
661 * spider_net_disable_rxdmac terminates processing on the DMA controller by
662 * turing off DMA and issueing a force end
663 */
664static void
665spider_net_disable_rxdmac(struct spider_net_card *card)
666{
667	spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
668			     SPIDER_NET_DMA_RX_FEND_VALUE);
669}
670
671/**
672 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
673 * @card: card structure
674 * @descr: descriptor structure to fill out
675 * @skb: packet to use
676 *
677 * returns 0 on success, <0 on failure.
678 *
679 * fills out the descriptor structure with skb data and len. Copies data,
680 * if needed (32bit DMA!)
681 */
682static int
683spider_net_prepare_tx_descr(struct spider_net_card *card,
684			    struct sk_buff *skb)
685{
686	struct spider_net_descr_chain *chain = &card->tx_chain;
687	struct spider_net_descr *descr;
688	struct spider_net_hw_descr *hwdescr;
689	dma_addr_t buf;
690	unsigned long flags;
691
692	buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
693	if (pci_dma_mapping_error(buf)) {
694		if (netif_msg_tx_err(card) && net_ratelimit())
695			pr_err("could not iommu-map packet (%p, %i). "
696				  "Dropping packet\n", skb->data, skb->len);
697		card->spider_stats.tx_iommu_map_error++;
698		return -ENOMEM;
699	}
700
701	spin_lock_irqsave(&chain->lock, flags);
702	descr = card->tx_chain.head;
703	if (descr->next == chain->tail->prev) {
704		spin_unlock_irqrestore(&chain->lock, flags);
705		pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
706		return -ENOMEM;
707	}
708	hwdescr = descr->hwdescr;
709	chain->head = descr->next;
710
711	descr->skb = skb;
712	hwdescr->buf_addr = buf;
713	hwdescr->buf_size = skb->len;
714	hwdescr->next_descr_addr = 0;
715	hwdescr->data_status = 0;
716
717	hwdescr->dmac_cmd_status =
718			SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
719	spin_unlock_irqrestore(&chain->lock, flags);
720
721	if (skb->ip_summed == CHECKSUM_PARTIAL)
722		switch (ip_hdr(skb)->protocol) {
723		case IPPROTO_TCP:
724			hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
725			break;
726		case IPPROTO_UDP:
727			hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
728			break;
729		}
730
731	/* Chain the bus address, so that the DMA engine finds this descr. */
732	wmb();
733	descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
734
735	card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
736	return 0;
737}
738
739static int
740spider_net_set_low_watermark(struct spider_net_card *card)
741{
742	struct spider_net_descr *descr = card->tx_chain.tail;
743	struct spider_net_hw_descr *hwdescr;
744	unsigned long flags;
745	int status;
746	int cnt=0;
747	int i;
748
749	/* Measure the length of the queue. Measurement does not
750	 * need to be precise -- does not need a lock. */
751	while (descr != card->tx_chain.head) {
752		status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
753		if (status == SPIDER_NET_DESCR_NOT_IN_USE)
754			break;
755		descr = descr->next;
756		cnt++;
757	}
758
759	/* If TX queue is short, don't even bother with interrupts */
760	if (cnt < card->tx_chain.num_desc/4)
761		return cnt;
762
763	/* Set low-watermark 3/4th's of the way into the queue. */
764	descr = card->tx_chain.tail;
765	cnt = (cnt*3)/4;
766	for (i=0;i<cnt; i++)
767		descr = descr->next;
768
769	/* Set the new watermark, clear the old watermark */
770	spin_lock_irqsave(&card->tx_chain.lock, flags);
771	descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
772	if (card->low_watermark && card->low_watermark != descr) {
773		hwdescr = card->low_watermark->hwdescr;
774		hwdescr->dmac_cmd_status =
775		     hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
776	}
777	card->low_watermark = descr;
778	spin_unlock_irqrestore(&card->tx_chain.lock, flags);
779	return cnt;
780}
781
782/**
783 * spider_net_release_tx_chain - processes sent tx descriptors
784 * @card: adapter structure
785 * @brutal: if set, don't care about whether descriptor seems to be in use
786 *
787 * returns 0 if the tx ring is empty, otherwise 1.
788 *
789 * spider_net_release_tx_chain releases the tx descriptors that spider has
790 * finished with (if non-brutal) or simply release tx descriptors (if brutal).
791 * If some other context is calling this function, we return 1 so that we're
792 * scheduled again (if we were scheduled) and will not loose initiative.
793 */
794static int
795spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
796{
797	struct spider_net_descr_chain *chain = &card->tx_chain;
798	struct spider_net_descr *descr;
799	struct spider_net_hw_descr *hwdescr;
800	struct sk_buff *skb;
801	u32 buf_addr;
802	unsigned long flags;
803	int status;
804
805	while (1) {
806		spin_lock_irqsave(&chain->lock, flags);
807		if (chain->tail == chain->head) {
808			spin_unlock_irqrestore(&chain->lock, flags);
809			return 0;
810		}
811		descr = chain->tail;
812		hwdescr = descr->hwdescr;
813
814		status = spider_net_get_descr_status(hwdescr);
815		switch (status) {
816		case SPIDER_NET_DESCR_COMPLETE:
817			card->netdev_stats.tx_packets++;
818			card->netdev_stats.tx_bytes += descr->skb->len;
819			break;
820
821		case SPIDER_NET_DESCR_CARDOWNED:
822			if (!brutal) {
823				spin_unlock_irqrestore(&chain->lock, flags);
824				return 1;
825			}
826
827			/* fallthrough, if we release the descriptors
828			 * brutally (then we don't care about
829			 * SPIDER_NET_DESCR_CARDOWNED) */
830
831		case SPIDER_NET_DESCR_RESPONSE_ERROR:
832		case SPIDER_NET_DESCR_PROTECTION_ERROR:
833		case SPIDER_NET_DESCR_FORCE_END:
834			if (netif_msg_tx_err(card))
835				pr_err("%s: forcing end of tx descriptor "
836				       "with status x%02x\n",
837				       card->netdev->name, status);
838			card->netdev_stats.tx_errors++;
839			break;
840
841		default:
842			card->netdev_stats.tx_dropped++;
843			if (!brutal) {
844				spin_unlock_irqrestore(&chain->lock, flags);
845				return 1;
846			}
847		}
848
849		chain->tail = descr->next;
850		hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
851		skb = descr->skb;
852		descr->skb = NULL;
853		buf_addr = hwdescr->buf_addr;
854		spin_unlock_irqrestore(&chain->lock, flags);
855
856		/* unmap the skb */
857		if (skb) {
858			pci_unmap_single(card->pdev, buf_addr, skb->len,
859					PCI_DMA_TODEVICE);
860			dev_kfree_skb(skb);
861		}
862	}
863	return 0;
864}
865
866/**
867 * spider_net_kick_tx_dma - enables TX DMA processing
868 * @card: card structure
869 * @descr: descriptor address to enable TX processing at
870 *
871 * This routine will start the transmit DMA running if
872 * it is not already running. This routine ned only be
873 * called when queueing a new packet to an empty tx queue.
874 * Writes the current tx chain head as start address
875 * of the tx descriptor chain and enables the transmission
876 * DMA engine.
877 */
878static inline void
879spider_net_kick_tx_dma(struct spider_net_card *card)
880{
881	struct spider_net_descr *descr;
882
883	if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
884			SPIDER_NET_TX_DMA_EN)
885		goto out;
886
887	descr = card->tx_chain.tail;
888	for (;;) {
889		if (spider_net_get_descr_status(descr->hwdescr) ==
890				SPIDER_NET_DESCR_CARDOWNED) {
891			spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
892					descr->bus_addr);
893			spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
894					SPIDER_NET_DMA_TX_VALUE);
895			break;
896		}
897		if (descr == card->tx_chain.head)
898			break;
899		descr = descr->next;
900	}
901
902out:
903	mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
904}
905
906/**
907 * spider_net_xmit - transmits a frame over the device
908 * @skb: packet to send out
909 * @netdev: interface device structure
910 *
911 * returns 0 on success, !0 on failure
912 */
913static int
914spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
915{
916	int cnt;
917	struct spider_net_card *card = netdev_priv(netdev);
918
919	spider_net_release_tx_chain(card, 0);
920
921	if (spider_net_prepare_tx_descr(card, skb) != 0) {
922		card->netdev_stats.tx_dropped++;
923		netif_stop_queue(netdev);
924		return NETDEV_TX_BUSY;
925	}
926
927	cnt = spider_net_set_low_watermark(card);
928	if (cnt < 5)
929		spider_net_kick_tx_dma(card);
930	return NETDEV_TX_OK;
931}
932
933/**
934 * spider_net_cleanup_tx_ring - cleans up the TX ring
935 * @card: card structure
936 *
937 * spider_net_cleanup_tx_ring is called by either the tx_timer
938 * or from the NAPI polling routine.
939 * This routine releases resources associted with transmitted
940 * packets, including updating the queue tail pointer.
941 */
942static void
943spider_net_cleanup_tx_ring(struct spider_net_card *card)
944{
945	if ((spider_net_release_tx_chain(card, 0) != 0) &&
946	    (card->netdev->flags & IFF_UP)) {
947		spider_net_kick_tx_dma(card);
948		netif_wake_queue(card->netdev);
949	}
950}
951
952/**
953 * spider_net_do_ioctl - called for device ioctls
954 * @netdev: interface device structure
955 * @ifr: request parameter structure for ioctl
956 * @cmd: command code for ioctl
957 *
958 * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
959 * -EOPNOTSUPP is returned, if an unknown ioctl was requested
960 */
961static int
962spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
963{
964	switch (cmd) {
965	default:
966		return -EOPNOTSUPP;
967	}
968}
969
970/**
971 * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
972 * @descr: descriptor to process
973 * @card: card structure
974 *
975 * Fills out skb structure and passes the data to the stack.
976 * The descriptor state is not changed.
977 */
978static void
979spider_net_pass_skb_up(struct spider_net_descr *descr,
980		       struct spider_net_card *card)
981{
982	struct spider_net_hw_descr *hwdescr= descr->hwdescr;
983	struct sk_buff *skb;
984	struct net_device *netdev;
985	u32 data_status, data_error;
986
987	data_status = hwdescr->data_status;
988	data_error = hwdescr->data_error;
989	netdev = card->netdev;
990
991	skb = descr->skb;
992	skb_put(skb, hwdescr->valid_size);
993
994	/* the card seems to add 2 bytes of junk in front
995	 * of the ethernet frame */
996#define SPIDER_MISALIGN		2
997	skb_pull(skb, SPIDER_MISALIGN);
998	skb->protocol = eth_type_trans(skb, netdev);
999
1000	/* checksum offload */
1001	if (card->options.rx_csum) {
1002		if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
1003		       SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
1004		     !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
1005			skb->ip_summed = CHECKSUM_UNNECESSARY;
1006		else
1007			skb->ip_summed = CHECKSUM_NONE;
1008	} else
1009		skb->ip_summed = CHECKSUM_NONE;
1010
1011	if (data_status & SPIDER_NET_VLAN_PACKET) {
1012		/* further enhancements: HW-accel VLAN
1013		 * vlan_hwaccel_receive_skb
1014		 */
1015	}
1016
1017	/* update netdevice statistics */
1018	card->netdev_stats.rx_packets++;
1019	card->netdev_stats.rx_bytes += skb->len;
1020
1021	/* pass skb up to stack */
1022	netif_receive_skb(skb);
1023}
1024
1025#ifdef DEBUG
1026static void show_rx_chain(struct spider_net_card *card)
1027{
1028	struct spider_net_descr_chain *chain = &card->rx_chain;
1029	struct spider_net_descr *start= chain->tail;
1030	struct spider_net_descr *descr= start;
1031	int status;
1032
1033	int cnt = 0;
1034	int cstat = spider_net_get_descr_status(descr);
1035	printk(KERN_INFO "RX chain tail at descr=%ld\n",
1036	     (start - card->descr) - card->tx_chain.num_desc);
1037	status = cstat;
1038	do
1039	{
1040		status = spider_net_get_descr_status(descr);
1041		if (cstat != status) {
1042			printk(KERN_INFO "Have %d descrs with stat=x%08x\n", cnt, cstat);
1043			cstat = status;
1044			cnt = 0;
1045		}
1046		cnt ++;
1047		descr = descr->next;
1048	} while (descr != start);
1049	printk(KERN_INFO "Last %d descrs with stat=x%08x\n", cnt, cstat);
1050}
1051#endif
1052
1053/**
1054 * spider_net_resync_head_ptr - Advance head ptr past empty descrs
1055 *
1056 * If the driver fails to keep up and empty the queue, then the
1057 * hardware wil run out of room to put incoming packets. This
1058 * will cause the hardware to skip descrs that are full (instead
1059 * of halting/retrying). Thus, once the driver runs, it wil need
1060 * to "catch up" to where the hardware chain pointer is at.
1061 */
1062static void spider_net_resync_head_ptr(struct spider_net_card *card)
1063{
1064	unsigned long flags;
1065	struct spider_net_descr_chain *chain = &card->rx_chain;
1066	struct spider_net_descr *descr;
1067	int i, status;
1068
1069	/* Advance head pointer past any empty descrs */
1070	descr = chain->head;
1071	status = spider_net_get_descr_status(descr->hwdescr);
1072
1073	if (status == SPIDER_NET_DESCR_NOT_IN_USE)
1074		return;
1075
1076	spin_lock_irqsave(&chain->lock, flags);
1077
1078	descr = chain->head;
1079	status = spider_net_get_descr_status(descr->hwdescr);
1080	for (i=0; i<chain->num_desc; i++) {
1081		if (status != SPIDER_NET_DESCR_CARDOWNED) break;
1082		descr = descr->next;
1083		status = spider_net_get_descr_status(descr->hwdescr);
1084	}
1085	chain->head = descr;
1086
1087	spin_unlock_irqrestore(&chain->lock, flags);
1088}
1089
1090static int spider_net_resync_tail_ptr(struct spider_net_card *card)
1091{
1092	struct spider_net_descr_chain *chain = &card->rx_chain;
1093	struct spider_net_descr *descr;
1094	int i, status;
1095
1096	/* Advance tail pointer past any empty and reaped descrs */
1097	descr = chain->tail;
1098	status = spider_net_get_descr_status(descr->hwdescr);
1099
1100	for (i=0; i<chain->num_desc; i++) {
1101		if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
1102		    (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
1103		descr = descr->next;
1104		status = spider_net_get_descr_status(descr->hwdescr);
1105	}
1106	chain->tail = descr;
1107
1108	if ((i == chain->num_desc) || (i == 0))
1109		return 1;
1110	return 0;
1111}
1112
1113/**
1114 * spider_net_decode_one_descr - processes an RX descriptor
1115 * @card: card structure
1116 *
1117 * Returns 1 if a packet has been sent to the stack, otherwise 0.
1118 *
1119 * Processes an RX descriptor by iommu-unmapping the data buffer
1120 * and passing the packet up to the stack. This function is called
1121 * in softirq context, e.g. either bottom half from interrupt or
1122 * NAPI polling context.
1123 */
1124static int
1125spider_net_decode_one_descr(struct spider_net_card *card)
1126{
1127	struct spider_net_descr_chain *chain = &card->rx_chain;
1128	struct spider_net_descr *descr = chain->tail;
1129	struct spider_net_hw_descr *hwdescr = descr->hwdescr;
1130	int status;
1131
1132	status = spider_net_get_descr_status(hwdescr);
1133
1134	/* Nothing in the descriptor, or ring must be empty */
1135	if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
1136	    (status == SPIDER_NET_DESCR_NOT_IN_USE))
1137		return 0;
1138
1139	/* descriptor definitively used -- move on tail */
1140	chain->tail = descr->next;
1141
1142	/* unmap descriptor */
1143	pci_unmap_single(card->pdev, hwdescr->buf_addr,
1144			SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1145
1146	if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1147	     (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1148	     (status == SPIDER_NET_DESCR_FORCE_END) ) {
1149		if (netif_msg_rx_err(card))
1150			pr_err("%s: dropping RX descriptor with state %d\n",
1151			       card->netdev->name, status);
1152		card->netdev_stats.rx_dropped++;
1153		goto bad_desc;
1154	}
1155
1156	if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1157	     (status != SPIDER_NET_DESCR_FRAME_END) ) {
1158		if (netif_msg_rx_err(card))
1159			pr_err("%s: RX descriptor with unknown state %d\n",
1160			       card->netdev->name, status);
1161		card->spider_stats.rx_desc_unk_state++;
1162		goto bad_desc;
1163	}
1164
1165	/* The cases we'll throw away the packet immediately */
1166	if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1167		if (netif_msg_rx_err(card))
1168			pr_err("%s: error in received descriptor found, "
1169			       "data_status=x%08x, data_error=x%08x\n",
1170			       card->netdev->name,
1171			       hwdescr->data_status, hwdescr->data_error);
1172		goto bad_desc;
1173	}
1174
1175	if (hwdescr->dmac_cmd_status & 0xfcf4) {
1176		pr_err("%s: bad status, cmd_status=x%08x\n",
1177			       card->netdev->name,
1178			       hwdescr->dmac_cmd_status);
1179		pr_err("buf_addr=x%08x\n", hwdescr->buf_addr);
1180		pr_err("buf_size=x%08x\n", hwdescr->buf_size);
1181		pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
1182		pr_err("result_size=x%08x\n", hwdescr->result_size);
1183		pr_err("valid_size=x%08x\n", hwdescr->valid_size);
1184		pr_err("data_status=x%08x\n", hwdescr->data_status);
1185		pr_err("data_error=x%08x\n", hwdescr->data_error);
1186		pr_err("which=%ld\n", descr - card->rx_chain.ring);
1187
1188		card->spider_stats.rx_desc_error++;
1189		goto bad_desc;
1190	}
1191
1192	/* Ok, we've got a packet in descr */
1193	spider_net_pass_skb_up(descr, card);
1194	descr->skb = NULL;
1195	hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1196	return 1;
1197
1198bad_desc:
1199	dev_kfree_skb_irq(descr->skb);
1200	descr->skb = NULL;
1201	hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
1202	return 0;
1203}
1204
1205/**
1206 * spider_net_poll - NAPI poll function called by the stack to return packets
1207 * @netdev: interface device structure
1208 * @budget: number of packets we can pass to the stack at most
1209 *
1210 * returns 0 if no more packets available to the driver/stack. Returns 1,
1211 * if the quota is exceeded, but the driver has still packets.
1212 *
1213 * spider_net_poll returns all packets from the rx descriptors to the stack
1214 * (using netif_receive_skb). If all/enough packets are up, the driver
1215 * reenables interrupts and returns 0. If not, 1 is returned.
1216 */
1217static int
1218spider_net_poll(struct net_device *netdev, int *budget)
1219{
1220	struct spider_net_card *card = netdev_priv(netdev);
1221	int packets_to_do, packets_done = 0;
1222	int no_more_packets = 0;
1223
1224	spider_net_cleanup_tx_ring(card);
1225	packets_to_do = min(*budget, netdev->quota);
1226
1227	while (packets_to_do) {
1228		if (spider_net_decode_one_descr(card)) {
1229			packets_done++;
1230			packets_to_do--;
1231		} else {
1232			/* no more packets for the stack */
1233			no_more_packets = 1;
1234			break;
1235		}
1236	}
1237
1238	if ((packets_done == 0) && (card->num_rx_ints != 0)) {
1239		no_more_packets = spider_net_resync_tail_ptr(card);
1240		spider_net_resync_head_ptr(card);
1241	}
1242	card->num_rx_ints = 0;
1243
1244	netdev->quota -= packets_done;
1245	*budget -= packets_done;
1246	spider_net_refill_rx_chain(card);
1247	spider_net_enable_rxdmac(card);
1248
1249	/* if all packets are in the stack, enable interrupts and return 0 */
1250	/* if not, return 1 */
1251	if (no_more_packets) {
1252		netif_rx_complete(netdev);
1253		spider_net_rx_irq_on(card);
1254		card->ignore_rx_ramfull = 0;
1255		return 0;
1256	}
1257
1258	return 1;
1259}
1260
1261/**
1262 * spider_net_get_stats - get interface statistics
1263 * @netdev: interface device structure
1264 *
1265 * returns the interface statistics residing in the spider_net_card struct
1266 */
1267static struct net_device_stats *
1268spider_net_get_stats(struct net_device *netdev)
1269{
1270	struct spider_net_card *card = netdev_priv(netdev);
1271	struct net_device_stats *stats = &card->netdev_stats;
1272	return stats;
1273}
1274
1275/**
1276 * spider_net_change_mtu - changes the MTU of an interface
1277 * @netdev: interface device structure
1278 * @new_mtu: new MTU value
1279 *
1280 * returns 0 on success, <0 on failure
1281 */
1282static int
1283spider_net_change_mtu(struct net_device *netdev, int new_mtu)
1284{
1285	/* no need to re-alloc skbs or so -- the max mtu is about 2.3k
1286	 * and mtu is outbound only anyway */
1287	if ( (new_mtu < SPIDER_NET_MIN_MTU ) ||
1288		(new_mtu > SPIDER_NET_MAX_MTU) )
1289		return -EINVAL;
1290	netdev->mtu = new_mtu;
1291	return 0;
1292}
1293
1294/**
1295 * spider_net_set_mac - sets the MAC of an interface
1296 * @netdev: interface device structure
1297 * @ptr: pointer to new MAC address
1298 *
1299 * Returns 0 on success, <0 on failure. Currently, we don't support this
1300 * and will always return EOPNOTSUPP.
1301 */
1302static int
1303spider_net_set_mac(struct net_device *netdev, void *p)
1304{
1305	struct spider_net_card *card = netdev_priv(netdev);
1306	u32 macl, macu, regvalue;
1307	struct sockaddr *addr = p;
1308
1309	if (!is_valid_ether_addr(addr->sa_data))
1310		return -EADDRNOTAVAIL;
1311
1312	/* switch off GMACTPE and GMACRPE */
1313	regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1314	regvalue &= ~((1 << 5) | (1 << 6));
1315	spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1316
1317	/* write mac */
1318	macu = (addr->sa_data[0]<<24) + (addr->sa_data[1]<<16) +
1319		(addr->sa_data[2]<<8) + (addr->sa_data[3]);
1320	macl = (addr->sa_data[4]<<8) + (addr->sa_data[5]);
1321	spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
1322	spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
1323
1324	/* switch GMACTPE and GMACRPE back on */
1325	regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
1326	regvalue |= ((1 << 5) | (1 << 6));
1327	spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
1328
1329	spider_net_set_promisc(card);
1330
1331	/* look up, whether we have been successful */
1332	if (spider_net_get_mac_address(netdev))
1333		return -EADDRNOTAVAIL;
1334	if (memcmp(netdev->dev_addr,addr->sa_data,netdev->addr_len))
1335		return -EADDRNOTAVAIL;
1336
1337	return 0;
1338}
1339
1340/**
1341 * spider_net_link_reset
1342 * @netdev: net device structure
1343 *
1344 * This is called when the PHY_LINK signal is asserted. For the blade this is
1345 * not connected so we should never get here.
1346 *
1347 */
1348static void
1349spider_net_link_reset(struct net_device *netdev)
1350{
1351
1352	struct spider_net_card *card = netdev_priv(netdev);
1353
1354	del_timer_sync(&card->aneg_timer);
1355
1356	/* clear interrupt, block further interrupts */
1357	spider_net_write_reg(card, SPIDER_NET_GMACST,
1358			     spider_net_read_reg(card, SPIDER_NET_GMACST));
1359	spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1360
1361	/* reset phy and setup aneg */
1362	spider_net_setup_aneg(card);
1363	mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1364
1365}
1366
1367/**
1368 * spider_net_handle_error_irq - handles errors raised by an interrupt
1369 * @card: card structure
1370 * @status_reg: interrupt status register 0 (GHIINT0STS)
1371 *
1372 * spider_net_handle_error_irq treats or ignores all error conditions
1373 * found when an interrupt is presented
1374 */
1375static void
1376spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1377{
1378	u32 error_reg1, error_reg2;
1379	u32 i;
1380	int show_error = 1;
1381
1382	error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1383	error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1384
1385	/* check GHIINT0STS ************************************/
1386	if (status_reg)
1387		for (i = 0; i < 32; i++)
1388			if (status_reg & (1<<i))
1389				switch (i)
1390	{
1391	/* let error_reg1 and error_reg2 evaluation decide, what to do
1392	case SPIDER_NET_PHYINT:
1393	case SPIDER_NET_GMAC2INT:
1394	case SPIDER_NET_GMAC1INT:
1395	case SPIDER_NET_GFIFOINT:
1396	case SPIDER_NET_DMACINT:
1397	case SPIDER_NET_GSYSINT:
1398		break; */
1399
1400	case SPIDER_NET_GIPSINT:
1401		show_error = 0;
1402		break;
1403
1404	case SPIDER_NET_GPWOPCMPINT:
1405		/* PHY write operation completed */
1406		show_error = 0;
1407		break;
1408	case SPIDER_NET_GPROPCMPINT:
1409		/* PHY read operation completed */
1410		/* we don't use semaphores, as we poll for the completion
1411		 * of the read operation in spider_net_read_phy. Should take
1412		 * about 50 us */
1413		show_error = 0;
1414		break;
1415	case SPIDER_NET_GPWFFINT:
1416		/* PHY command queue full */
1417		if (netif_msg_intr(card))
1418			pr_err("PHY write queue full\n");
1419		show_error = 0;
1420		break;
1421
1422	/* case SPIDER_NET_GRMDADRINT: not used. print a message */
1423	/* case SPIDER_NET_GRMARPINT: not used. print a message */
1424	/* case SPIDER_NET_GRMMPINT: not used. print a message */
1425
1426	case SPIDER_NET_GDTDEN0INT:
1427		/* someone has set TX_DMA_EN to 0 */
1428		show_error = 0;
1429		break;
1430
1431	case SPIDER_NET_GDDDEN0INT: /* fallthrough */
1432	case SPIDER_NET_GDCDEN0INT: /* fallthrough */
1433	case SPIDER_NET_GDBDEN0INT: /* fallthrough */
1434	case SPIDER_NET_GDADEN0INT:
1435		/* someone has set RX_DMA_EN to 0 */
1436		show_error = 0;
1437		break;
1438
1439	/* RX interrupts */
1440	case SPIDER_NET_GDDFDCINT:
1441	case SPIDER_NET_GDCFDCINT:
1442	case SPIDER_NET_GDBFDCINT:
1443	case SPIDER_NET_GDAFDCINT:
1444	/* case SPIDER_NET_GDNMINT: not used. print a message */
1445	/* case SPIDER_NET_GCNMINT: not used. print a message */
1446	/* case SPIDER_NET_GBNMINT: not used. print a message */
1447	/* case SPIDER_NET_GANMINT: not used. print a message */
1448	/* case SPIDER_NET_GRFNMINT: not used. print a message */
1449		show_error = 0;
1450		break;
1451
1452	/* TX interrupts */
1453	case SPIDER_NET_GDTFDCINT:
1454		show_error = 0;
1455		break;
1456	case SPIDER_NET_GTTEDINT:
1457		show_error = 0;
1458		break;
1459	case SPIDER_NET_GDTDCEINT:
1460		/* chain end. If a descriptor should be sent, kick off
1461		 * tx dma
1462		if (card->tx_chain.tail != card->tx_chain.head)
1463			spider_net_kick_tx_dma(card);
1464		*/
1465		show_error = 0;
1466		break;
1467
1468	/* case SPIDER_NET_G1TMCNTINT: not used. print a message */
1469	/* case SPIDER_NET_GFREECNTINT: not used. print a message */
1470	}
1471
1472	/* check GHIINT1STS ************************************/
1473	if (error_reg1)
1474		for (i = 0; i < 32; i++)
1475			if (error_reg1 & (1<<i))
1476				switch (i)
1477	{
1478	case SPIDER_NET_GTMFLLINT:
1479		/* TX RAM full may happen on a usual case.
1480		 * Logging is not needed. */
1481		show_error = 0;
1482		break;
1483	case SPIDER_NET_GRFDFLLINT: /* fallthrough */
1484	case SPIDER_NET_GRFCFLLINT: /* fallthrough */
1485	case SPIDER_NET_GRFBFLLINT: /* fallthrough */
1486	case SPIDER_NET_GRFAFLLINT: /* fallthrough */
1487	case SPIDER_NET_GRMFLLINT:
1488		/* Could happen when rx chain is full */
1489		if (card->ignore_rx_ramfull == 0) {
1490			card->ignore_rx_ramfull = 1;
1491			spider_net_resync_head_ptr(card);
1492			spider_net_refill_rx_chain(card);
1493			spider_net_enable_rxdmac(card);
1494			card->num_rx_ints ++;
1495			netif_rx_schedule(card->netdev);
1496		}
1497		show_error = 0;
1498		break;
1499
1500	/* case SPIDER_NET_GTMSHTINT: problem, print a message */
1501	case SPIDER_NET_GDTINVDINT:
1502		/* allrighty. tx from previous descr ok */
1503		show_error = 0;
1504		break;
1505
1506	/* chain end */
1507	case SPIDER_NET_GDDDCEINT: /* fallthrough */
1508	case SPIDER_NET_GDCDCEINT: /* fallthrough */
1509	case SPIDER_NET_GDBDCEINT: /* fallthrough */
1510	case SPIDER_NET_GDADCEINT:
1511		spider_net_resync_head_ptr(card);
1512		spider_net_refill_rx_chain(card);
1513		spider_net_enable_rxdmac(card);
1514		card->num_rx_ints ++;
1515		netif_rx_schedule(card->netdev);
1516		show_error = 0;
1517		break;
1518
1519	/* invalid descriptor */
1520	case SPIDER_NET_GDDINVDINT: /* fallthrough */
1521	case SPIDER_NET_GDCINVDINT: /* fallthrough */
1522	case SPIDER_NET_GDBINVDINT: /* fallthrough */
1523	case SPIDER_NET_GDAINVDINT:
1524		/* Could happen when rx chain is full */
1525		spider_net_resync_head_ptr(card);
1526		spider_net_refill_rx_chain(card);
1527		spider_net_enable_rxdmac(card);
1528		card->num_rx_ints ++;
1529		netif_rx_schedule(card->netdev);
1530		show_error = 0;
1531		break;
1532
1533	/* case SPIDER_NET_GDTRSERINT: problem, print a message */
1534	/* case SPIDER_NET_GDDRSERINT: problem, print a message */
1535	/* case SPIDER_NET_GDCRSERINT: problem, print a message */
1536	/* case SPIDER_NET_GDBRSERINT: problem, print a message */
1537	/* case SPIDER_NET_GDARSERINT: problem, print a message */
1538	/* case SPIDER_NET_GDSERINT: problem, print a message */
1539	/* case SPIDER_NET_GDTPTERINT: problem, print a message */
1540	/* case SPIDER_NET_GDDPTERINT: problem, print a message */
1541	/* case SPIDER_NET_GDCPTERINT: problem, print a message */
1542	/* case SPIDER_NET_GDBPTERINT: problem, print a message */
1543	/* case SPIDER_NET_GDAPTERINT: problem, print a message */
1544	default:
1545		show_error = 1;
1546		break;
1547	}
1548
1549	/* check GHIINT2STS ************************************/
1550	if (error_reg2)
1551		for (i = 0; i < 32; i++)
1552			if (error_reg2 & (1<<i))
1553				switch (i)
1554	{
1555	/* there is nothing we can (want  to) do at this time. Log a
1556	 * message, we can switch on and off the specific values later on
1557	case SPIDER_NET_GPROPERINT:
1558	case SPIDER_NET_GMCTCRSNGINT:
1559	case SPIDER_NET_GMCTLCOLINT:
1560	case SPIDER_NET_GMCTTMOTINT:
1561	case SPIDER_NET_GMCRCAERINT:
1562	case SPIDER_NET_GMCRCALERINT:
1563	case SPIDER_NET_GMCRALNERINT:
1564	case SPIDER_NET_GMCROVRINT:
1565	case SPIDER_NET_GMCRRNTINT:
1566	case SPIDER_NET_GMCRRXERINT:
1567	case SPIDER_NET_GTITCSERINT:
1568	case SPIDER_NET_GTIFMTERINT:
1569	case SPIDER_NET_GTIPKTRVKINT:
1570	case SPIDER_NET_GTISPINGINT:
1571	case SPIDER_NET_GTISADNGINT:
1572	case SPIDER_NET_GTISPDNGINT:
1573	case SPIDER_NET_GRIFMTERINT:
1574	case SPIDER_NET_GRIPKTRVKINT:
1575	case SPIDER_NET_GRISPINGINT:
1576	case SPIDER_NET_GRISADNGINT:
1577	case SPIDER_NET_GRISPDNGINT:
1578		break;
1579	*/
1580		default:
1581			break;
1582	}
1583
1584	if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
1585		pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
1586		       "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1587		       card->netdev->name,
1588		       status_reg, error_reg1, error_reg2);
1589
1590	/* clear interrupt sources */
1591	spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
1592	spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
1593}
1594
1595/**
1596 * spider_net_interrupt - interrupt handler for spider_net
1597 * @irq: interupt number
1598 * @ptr: pointer to net_device
1599 * @regs: PU registers
1600 *
1601 * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
1602 * interrupt found raised by card.
1603 *
1604 * This is the interrupt handler, that turns off
1605 * interrupts for this device and makes the stack poll the driver
1606 */
1607static irqreturn_t
1608spider_net_interrupt(int irq, void *ptr)
1609{
1610	struct net_device *netdev = ptr;
1611	struct spider_net_card *card = netdev_priv(netdev);
1612	u32 status_reg;
1613
1614	status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1615
1616	if (!status_reg)
1617		return IRQ_NONE;
1618
1619	if (status_reg & SPIDER_NET_RXINT ) {
1620		spider_net_rx_irq_off(card);
1621		netif_rx_schedule(netdev);
1622		card->num_rx_ints ++;
1623	}
1624	if (status_reg & SPIDER_NET_TXINT)
1625		netif_rx_schedule(netdev);
1626
1627	if (status_reg & SPIDER_NET_LINKINT)
1628		spider_net_link_reset(netdev);
1629
1630	if (status_reg & SPIDER_NET_ERRINT )
1631		spider_net_handle_error_irq(card, status_reg);
1632
1633	/* clear interrupt sources */
1634	spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
1635
1636	return IRQ_HANDLED;
1637}
1638
1639#ifdef CONFIG_NET_POLL_CONTROLLER
1640/**
1641 * spider_net_poll_controller - artificial interrupt for netconsole etc.
1642 * @netdev: interface device structure
1643 *
1644 * see Documentation/networking/netconsole.txt
1645 */
1646static void
1647spider_net_poll_controller(struct net_device *netdev)
1648{
1649	disable_irq(netdev->irq);
1650	spider_net_interrupt(netdev->irq, netdev);
1651	enable_irq(netdev->irq);
1652}
1653#endif /* CONFIG_NET_POLL_CONTROLLER */
1654
1655/**
1656 * spider_net_init_card - initializes the card
1657 * @card: card structure
1658 *
1659 * spider_net_init_card initializes the card so that other registers can
1660 * be used
1661 */
1662static void
1663spider_net_init_card(struct spider_net_card *card)
1664{
1665	spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1666			     SPIDER_NET_CKRCTRL_STOP_VALUE);
1667
1668	spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
1669			     SPIDER_NET_CKRCTRL_RUN_VALUE);
1670
1671	/* trigger ETOMOD signal */
1672	spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1673		spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
1674
1675}
1676
1677/**
1678 * spider_net_enable_card - enables the card by setting all kinds of regs
1679 * @card: card structure
1680 *
1681 * spider_net_enable_card sets a lot of SMMIO registers to enable the device
1682 */
1683static void
1684spider_net_enable_card(struct spider_net_card *card)
1685{
1686	int i;
1687	/* the following array consists of (register),(value) pairs
1688	 * that are set in this function. A register of 0 ends the list */
1689	u32 regs[][2] = {
1690		{ SPIDER_NET_GRESUMINTNUM, 0 },
1691		{ SPIDER_NET_GREINTNUM, 0 },
1692
1693		/* set interrupt frame number registers */
1694		/* clear the single DMA engine registers first */
1695		{ SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1696		{ SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1697		{ SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1698		{ SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
1699		/* then set, what we really need */
1700		{ SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
1701
1702		/* timer counter registers and stuff */
1703		{ SPIDER_NET_GFREECNNUM, 0 },
1704		{ SPIDER_NET_GONETIMENUM, 0 },
1705		{ SPIDER_NET_GTOUTFRMNUM, 0 },
1706
1707		/* RX mode setting */
1708		{ SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
1709		/* TX mode setting */
1710		{ SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
1711		/* IPSEC mode setting */
1712		{ SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
1713
1714		{ SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
1715
1716		{ SPIDER_NET_GMRWOLCTRL, 0 },
1717		{ SPIDER_NET_GTESTMD, 0x10000000 },
1718		{ SPIDER_NET_GTTQMSK, 0x00400040 },
1719
1720		{ SPIDER_NET_GMACINTEN, 0 },
1721
1722		/* flow control stuff */
1723		{ SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
1724		{ SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
1725
1726		{ SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
1727		{ 0, 0}
1728	};
1729
1730	i = 0;
1731	while (regs[i][0]) {
1732		spider_net_write_reg(card, regs[i][0], regs[i][1]);
1733		i++;
1734	}
1735
1736	/* clear unicast filter table entries 1 to 14 */
1737	for (i = 1; i <= 14; i++) {
1738		spider_net_write_reg(card,
1739				     SPIDER_NET_GMRUAFILnR + i * 8,
1740				     0x00080000);
1741		spider_net_write_reg(card,
1742				     SPIDER_NET_GMRUAFILnR + i * 8 + 4,
1743				     0x00000000);
1744	}
1745
1746	spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
1747
1748	spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
1749
1750	/* set chain tail adress for RX chains and
1751	 * enable DMA */
1752	spider_net_enable_rxchtails(card);
1753	spider_net_enable_rxdmac(card);
1754
1755	spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
1756
1757	spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
1758			     SPIDER_NET_LENLMT_VALUE);
1759	spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
1760			     SPIDER_NET_OPMODE_VALUE);
1761
1762	/* set interrupt mask registers */
1763	spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
1764			     SPIDER_NET_INT0_MASK_VALUE);
1765	spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
1766			     SPIDER_NET_INT1_MASK_VALUE);
1767	spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
1768			     SPIDER_NET_INT2_MASK_VALUE);
1769
1770	spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
1771			     SPIDER_NET_GDTBSTA);
1772}
1773
1774/**
1775 * spider_net_download_firmware - loads firmware into the adapter
1776 * @card: card structure
1777 * @firmware_ptr: pointer to firmware data
1778 *
1779 * spider_net_download_firmware loads the firmware data into the
1780 * adapter. It assumes the length etc. to be allright.
1781 */
1782static int
1783spider_net_download_firmware(struct spider_net_card *card,
1784			     const void *firmware_ptr)
1785{
1786	int sequencer, i;
1787	const u32 *fw_ptr = firmware_ptr;
1788
1789	/* stop sequencers */
1790	spider_net_write_reg(card, SPIDER_NET_GSINIT,
1791			     SPIDER_NET_STOP_SEQ_VALUE);
1792
1793	for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
1794	     sequencer++) {
1795		spider_net_write_reg(card,
1796				     SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
1797		for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
1798			spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
1799					     sequencer * 8, *fw_ptr);
1800			fw_ptr++;
1801		}
1802	}
1803
1804	if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
1805		return -EIO;
1806
1807	spider_net_write_reg(card, SPIDER_NET_GSINIT,
1808			     SPIDER_NET_RUN_SEQ_VALUE);
1809
1810	return 0;
1811}
1812
1813/**
1814 * spider_net_init_firmware - reads in firmware parts
1815 * @card: card structure
1816 *
1817 * Returns 0 on success, <0 on failure
1818 *
1819 * spider_net_init_firmware opens the sequencer firmware and does some basic
1820 * checks. This function opens and releases the firmware structure. A call
1821 * to download the firmware is performed before the release.
1822 *
1823 * Firmware format
1824 * ===============
1825 * spider_fw.bin is expected to be a file containing 6*1024*4 bytes, 4k being
1826 * the program for each sequencer. Use the command
1827 *    tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt              \
1828 *         Seq_code3_0x098.txt Seq_code4_0x0A0.txt Seq_code5_0x0A8.txt   \
1829 *         Seq_code6_0x0B0.txt | xxd -r -p -c4 > spider_fw.bin
1830 *
1831 * to generate spider_fw.bin, if you have sequencer programs with something
1832 * like the following contents for each sequencer:
1833 *    <ONE LINE COMMENT>
1834 *    <FIRST 4-BYTES-WORD FOR SEQUENCER>
1835 *    <SECOND 4-BYTES-WORD FOR SEQUENCER>
1836 *     ...
1837 *    <1024th 4-BYTES-WORD FOR SEQUENCER>
1838 */
1839static int
1840spider_net_init_firmware(struct spider_net_card *card)
1841{
1842	struct firmware *firmware = NULL;
1843	struct device_node *dn;
1844	const u8 *fw_prop = NULL;
1845	int err = -ENOENT;
1846	int fw_size;
1847
1848	if (request_firmware((const struct firmware **)&firmware,
1849			     SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1850		if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1851		     netif_msg_probe(card) ) {
1852			pr_err("Incorrect size of spidernet firmware in " \
1853			       "filesystem. Looking in host firmware...\n");
1854			goto try_host_fw;
1855		}
1856		err = spider_net_download_firmware(card, firmware->data);
1857
1858		release_firmware(firmware);
1859		if (err)
1860			goto try_host_fw;
1861
1862		goto done;
1863	}
1864
1865try_host_fw:
1866	dn = pci_device_to_OF_node(card->pdev);
1867	if (!dn)
1868		goto out_err;
1869
1870	fw_prop = of_get_property(dn, "firmware", &fw_size);
1871	if (!fw_prop)
1872		goto out_err;
1873
1874	if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1875	     netif_msg_probe(card) ) {
1876		pr_err("Incorrect size of spidernet firmware in " \
1877		       "host firmware\n");
1878		goto done;
1879	}
1880
1881	err = spider_net_download_firmware(card, fw_prop);
1882
1883done:
1884	return err;
1885out_err:
1886	if (netif_msg_probe(card))
1887		pr_err("Couldn't find spidernet firmware in filesystem " \
1888		       "or host firmware\n");
1889	return err;
1890}
1891
1892/**
1893 * spider_net_open - called upon ifonfig up
1894 * @netdev: interface device structure
1895 *
1896 * returns 0 on success, <0 on failure
1897 *
1898 * spider_net_open allocates all the descriptors and memory needed for
1899 * operation, sets up multicast list and enables interrupts
1900 */
1901int
1902spider_net_open(struct net_device *netdev)
1903{
1904	struct spider_net_card *card = netdev_priv(netdev);
1905	int result;
1906
1907	result = spider_net_init_firmware(card);
1908	if (result)
1909		goto init_firmware_failed;
1910
1911	/* start probing with copper */
1912	spider_net_setup_aneg(card);
1913	if (card->phy.def->phy_id)
1914		mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1915
1916	result = spider_net_init_chain(card, &card->tx_chain);
1917	if (result)
1918		goto alloc_tx_failed;
1919	card->low_watermark = NULL;
1920
1921	result = spider_net_init_chain(card, &card->rx_chain);
1922	if (result)
1923		goto alloc_rx_failed;
1924
1925	/* Allocate rx skbs */
1926	if (spider_net_alloc_rx_skbs(card))
1927		goto alloc_skbs_failed;
1928
1929	spider_net_set_multi(netdev);
1930
1931	/* further enhancement: setup hw vlan, if needed */
1932
1933	result = -EBUSY;
1934	if (request_irq(netdev->irq, spider_net_interrupt,
1935			     IRQF_SHARED, netdev->name, netdev))
1936		goto register_int_failed;
1937
1938	spider_net_enable_card(card);
1939
1940	netif_start_queue(netdev);
1941	netif_carrier_on(netdev);
1942	netif_poll_enable(netdev);
1943
1944	return 0;
1945
1946register_int_failed:
1947	spider_net_free_rx_chain_contents(card);
1948alloc_skbs_failed:
1949	spider_net_free_chain(card, &card->rx_chain);
1950alloc_rx_failed:
1951	spider_net_free_chain(card, &card->tx_chain);
1952alloc_tx_failed:
1953	del_timer_sync(&card->aneg_timer);
1954init_firmware_failed:
1955	return result;
1956}
1957
1958/**
1959 * spider_net_link_phy
1960 * @data: used for pointer to card structure
1961 *
1962 */
1963static void spider_net_link_phy(unsigned long data)
1964{
1965	struct spider_net_card *card = (struct spider_net_card *)data;
1966	struct mii_phy *phy = &card->phy;
1967
1968	/* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
1969	if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
1970
1971		pr_info("%s: link is down trying to bring it up\n", card->netdev->name);
1972
1973		switch (card->medium) {
1974		case BCM54XX_COPPER:
1975			/* enable fiber with autonegotiation first */
1976			if (phy->def->ops->enable_fiber)
1977				phy->def->ops->enable_fiber(phy, 1);
1978			card->medium = BCM54XX_FIBER;
1979			break;
1980
1981		case BCM54XX_FIBER:
1982			/* fiber didn't come up, try to disable fiber autoneg */
1983			if (phy->def->ops->enable_fiber)
1984				phy->def->ops->enable_fiber(phy, 0);
1985			card->medium = BCM54XX_UNKNOWN;
1986			break;
1987
1988		case BCM54XX_UNKNOWN:
1989			/* copper, fiber with and without failed,
1990			 * retry from beginning */
1991			spider_net_setup_aneg(card);
1992			card->medium = BCM54XX_COPPER;
1993			break;
1994		}
1995
1996		card->aneg_count = 0;
1997		mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1998		return;
1999	}
2000
2001	/* link still not up, try again later */
2002	if (!(phy->def->ops->poll_link(phy))) {
2003		card->aneg_count++;
2004		mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
2005		return;
2006	}
2007
2008	/* link came up, get abilities */
2009	phy->def->ops->read_link(phy);
2010
2011	spider_net_write_reg(card, SPIDER_NET_GMACST,
2012			     spider_net_read_reg(card, SPIDER_NET_GMACST));
2013	spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0x4);
2014
2015	if (phy->speed == 1000)
2016		spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0x00000001);
2017	else
2018		spider_net_write_reg(card, SPIDER_NET_GMACMODE, 0);
2019
2020	card->aneg_count = 0;
2021
2022	pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n",
2023		phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half",
2024		phy->autoneg==1 ? "" : "no ");
2025
2026	return;
2027}
2028
2029/**
2030 * spider_net_setup_phy - setup PHY
2031 * @card: card structure
2032 *
2033 * returns 0 on success, <0 on failure
2034 *
2035 * spider_net_setup_phy is used as part of spider_net_probe.
2036 **/
2037static int
2038spider_net_setup_phy(struct spider_net_card *card)
2039{
2040	struct mii_phy *phy = &card->phy;
2041
2042	spider_net_write_reg(card, SPIDER_NET_GDTDMASEL,
2043			     SPIDER_NET_DMASEL_VALUE);
2044	spider_net_write_reg(card, SPIDER_NET_GPCCTRL,
2045			     SPIDER_NET_PHY_CTRL_VALUE);
2046
2047	phy->dev = card->netdev;
2048	phy->mdio_read = spider_net_read_phy;
2049	phy->mdio_write = spider_net_write_phy;
2050
2051	for (phy->mii_id = 1; phy->mii_id <= 31; phy->mii_id++) {
2052		unsigned short id;
2053		id = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
2054		if (id != 0x0000 && id != 0xffff) {
2055			if (!mii_phy_probe(phy, phy->mii_id)) {
2056				pr_info("Found %s.\n", phy->def->name);
2057				break;
2058			}
2059		}
2060	}
2061
2062	return 0;
2063}
2064
2065static void
2066spider_net_workaround_rxramfull(struct spider_net_card *card)
2067{
2068	int i, sequencer = 0;
2069
2070	/* cancel reset */
2071	spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2072			     SPIDER_NET_CKRCTRL_RUN_VALUE);
2073
2074	/* empty sequencer data */
2075	for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
2076	     sequencer++) {
2077		spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
2078				     sequencer * 8, 0x0);
2079		for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
2080			spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
2081					     sequencer * 8, 0x0);
2082		}
2083	}
2084
2085	/* set sequencer operation */
2086	spider_net_write_reg(card, SPIDER_NET_GSINIT, 0x000000fe);
2087
2088	/* reset */
2089	spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2090			     SPIDER_NET_CKRCTRL_STOP_VALUE);
2091}
2092
2093/**
2094 * spider_net_stop - called upon ifconfig down
2095 * @netdev: interface device structure
2096 *
2097 * always returns 0
2098 */
2099int
2100spider_net_stop(struct net_device *netdev)
2101{
2102	struct spider_net_card *card = netdev_priv(netdev);
2103
2104	netif_poll_disable(netdev);
2105	netif_carrier_off(netdev);
2106	netif_stop_queue(netdev);
2107	del_timer_sync(&card->tx_timer);
2108	del_timer_sync(&card->aneg_timer);
2109
2110	/* disable/mask all interrupts */
2111	spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
2112	spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
2113	spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
2114	spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
2115
2116	free_irq(netdev->irq, netdev);
2117
2118	spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
2119			     SPIDER_NET_DMA_TX_FEND_VALUE);
2120
2121	/* turn off DMA, force end */
2122	spider_net_disable_rxdmac(card);
2123
2124	/* release chains */
2125	spider_net_release_tx_chain(card, 1);
2126	spider_net_free_rx_chain_contents(card);
2127
2128	spider_net_free_chain(card, &card->tx_chain);
2129	spider_net_free_chain(card, &card->rx_chain);
2130
2131	return 0;
2132}
2133
2134/**
2135 * spider_net_tx_timeout_task - task scheduled by the watchdog timeout
2136 * function (to be called not under interrupt status)
2137 * @data: data, is interface device structure
2138 *
2139 * called as task when tx hangs, resets interface (if interface is up)
2140 */
2141static void
2142spider_net_tx_timeout_task(struct work_struct *work)
2143{
2144	struct spider_net_card *card =
2145		container_of(work, struct spider_net_card, tx_timeout_task);
2146	struct net_device *netdev = card->netdev;
2147
2148	if (!(netdev->flags & IFF_UP))
2149		goto out;
2150
2151	netif_device_detach(netdev);
2152	spider_net_stop(netdev);
2153
2154	spider_net_workaround_rxramfull(card);
2155	spider_net_init_card(card);
2156
2157	if (spider_net_setup_phy(card))
2158		goto out;
2159
2160	spider_net_open(netdev);
2161	spider_net_kick_tx_dma(card);
2162	netif_device_attach(netdev);
2163
2164out:
2165	atomic_dec(&card->tx_timeout_task_counter);
2166}
2167
2168/**
2169 * spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
2170 * @netdev: interface device structure
2171 *
2172 * called, if tx hangs. Schedules a task that resets the interface
2173 */
2174static void
2175spider_net_tx_timeout(struct net_device *netdev)
2176{
2177	struct spider_net_card *card;
2178
2179	card = netdev_priv(netdev);
2180	atomic_inc(&card->tx_timeout_task_counter);
2181	if (netdev->flags & IFF_UP)
2182		schedule_work(&card->tx_timeout_task);
2183	else
2184		atomic_dec(&card->tx_timeout_task_counter);
2185	card->spider_stats.tx_timeouts++;
2186}
2187
2188/**
2189 * spider_net_setup_netdev_ops - initialization of net_device operations
2190 * @netdev: net_device structure
2191 *
2192 * fills out function pointers in the net_device structure
2193 */
2194static void
2195spider_net_setup_netdev_ops(struct net_device *netdev)
2196{
2197	netdev->open = &spider_net_open;
2198	netdev->stop = &spider_net_stop;
2199	netdev->hard_start_xmit = &spider_net_xmit;
2200	netdev->get_stats = &spider_net_get_stats;
2201	netdev->set_multicast_list = &spider_net_set_multi;
2202	netdev->set_mac_address = &spider_net_set_mac;
2203	netdev->change_mtu = &spider_net_change_mtu;
2204	netdev->do_ioctl = &spider_net_do_ioctl;
2205	/* tx watchdog */
2206	netdev->tx_timeout = &spider_net_tx_timeout;
2207	netdev->watchdog_timeo = SPIDER_NET_WATCHDOG_TIMEOUT;
2208	/* NAPI */
2209	netdev->poll = &spider_net_poll;
2210	netdev->weight = SPIDER_NET_NAPI_WEIGHT;
2211	/* HW VLAN */
2212#ifdef CONFIG_NET_POLL_CONTROLLER
2213	/* poll controller */
2214	netdev->poll_controller = &spider_net_poll_controller;
2215#endif /* CONFIG_NET_POLL_CONTROLLER */
2216	/* ethtool ops */
2217	netdev->ethtool_ops = &spider_net_ethtool_ops;
2218}
2219
2220/**
2221 * spider_net_setup_netdev - initialization of net_device
2222 * @card: card structure
2223 *
2224 * Returns 0 on success or <0 on failure
2225 *
2226 * spider_net_setup_netdev initializes the net_device structure
2227 **/
2228static int
2229spider_net_setup_netdev(struct spider_net_card *card)
2230{
2231	int result;
2232	struct net_device *netdev = card->netdev;
2233	struct device_node *dn;
2234	struct sockaddr addr;
2235	const u8 *mac;
2236
2237	SET_MODULE_OWNER(netdev);
2238	SET_NETDEV_DEV(netdev, &card->pdev->dev);
2239
2240	pci_set_drvdata(card->pdev, netdev);
2241
2242	init_timer(&card->tx_timer);
2243	card->tx_timer.function =
2244		(void (*)(unsigned long)) spider_net_cleanup_tx_ring;
2245	card->tx_timer.data = (unsigned long) card;
2246	netdev->irq = card->pdev->irq;
2247
2248	card->aneg_count = 0;
2249	init_timer(&card->aneg_timer);
2250	card->aneg_timer.function = spider_net_link_phy;
2251	card->aneg_timer.data = (unsigned long) card;
2252
2253	card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2254
2255	spider_net_setup_netdev_ops(netdev);
2256
2257	netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX;
2258	/* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2259	 *		NETIF_F_HW_VLAN_FILTER */
2260
2261	netdev->irq = card->pdev->irq;
2262	card->num_rx_ints = 0;
2263	card->ignore_rx_ramfull = 0;
2264
2265	dn = pci_device_to_OF_node(card->pdev);
2266	if (!dn)
2267		return -EIO;
2268
2269	mac = of_get_property(dn, "local-mac-address", NULL);
2270	if (!mac)
2271		return -EIO;
2272	memcpy(addr.sa_data, mac, ETH_ALEN);
2273
2274	result = spider_net_set_mac(netdev, &addr);
2275	if ((result) && (netif_msg_probe(card)))
2276		pr_err("Failed to set MAC address: %i\n", result);
2277
2278	result = register_netdev(netdev);
2279	if (result) {
2280		if (netif_msg_probe(card))
2281			pr_err("Couldn't register net_device: %i\n",
2282				  result);
2283		return result;
2284	}
2285
2286	if (netif_msg_probe(card))
2287		pr_info("Initialized device %s.\n", netdev->name);
2288
2289	return 0;
2290}
2291
2292/**
2293 * spider_net_alloc_card - allocates net_device and card structure
2294 *
2295 * returns the card structure or NULL in case of errors
2296 *
2297 * the card and net_device structures are linked to each other
2298 */
2299static struct spider_net_card *
2300spider_net_alloc_card(void)
2301{
2302	struct net_device *netdev;
2303	struct spider_net_card *card;
2304	size_t alloc_size;
2305
2306	alloc_size = sizeof(struct spider_net_card) +
2307	   (tx_descriptors + rx_descriptors) * sizeof(struct spider_net_descr);
2308	netdev = alloc_etherdev(alloc_size);
2309	if (!netdev)
2310		return NULL;
2311
2312	card = netdev_priv(netdev);
2313	card->netdev = netdev;
2314	card->msg_enable = SPIDER_NET_DEFAULT_MSG;
2315	INIT_WORK(&card->tx_timeout_task, spider_net_tx_timeout_task);
2316	init_waitqueue_head(&card->waitq);
2317	atomic_set(&card->tx_timeout_task_counter, 0);
2318
2319	card->rx_chain.num_desc = rx_descriptors;
2320	card->rx_chain.ring = card->darray;
2321	card->tx_chain.num_desc = tx_descriptors;
2322	card->tx_chain.ring = card->darray + rx_descriptors;
2323
2324	return card;
2325}
2326
2327/**
2328 * spider_net_undo_pci_setup - releases PCI ressources
2329 * @card: card structure
2330 *
2331 * spider_net_undo_pci_setup releases the mapped regions
2332 */
2333static void
2334spider_net_undo_pci_setup(struct spider_net_card *card)
2335{
2336	iounmap(card->regs);
2337	pci_release_regions(card->pdev);
2338}
2339
2340/**
2341 * spider_net_setup_pci_dev - sets up the device in terms of PCI operations
2342 * @card: card structure
2343 * @pdev: PCI device
2344 *
2345 * Returns the card structure or NULL if any errors occur
2346 *
2347 * spider_net_setup_pci_dev initializes pdev and together with the
2348 * functions called in spider_net_open configures the device so that
2349 * data can be transferred over it
2350 * The net_device structure is attached to the card structure, if the
2351 * function returns without error.
2352 **/
2353static struct spider_net_card *
2354spider_net_setup_pci_dev(struct pci_dev *pdev)
2355{
2356	struct spider_net_card *card;
2357	unsigned long mmio_start, mmio_len;
2358
2359	if (pci_enable_device(pdev)) {
2360		pr_err("Couldn't enable PCI device\n");
2361		return NULL;
2362	}
2363
2364	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2365		pr_err("Couldn't find proper PCI device base address.\n");
2366		goto out_disable_dev;
2367	}
2368
2369	if (pci_request_regions(pdev, spider_net_driver_name)) {
2370		pr_err("Couldn't obtain PCI resources, aborting.\n");
2371		goto out_disable_dev;
2372	}
2373
2374	pci_set_master(pdev);
2375
2376	card = spider_net_alloc_card();
2377	if (!card) {
2378		pr_err("Couldn't allocate net_device structure, "
2379			  "aborting.\n");
2380		goto out_release_regions;
2381	}
2382	card->pdev = pdev;
2383
2384	/* fetch base address and length of first resource */
2385	mmio_start = pci_resource_start(pdev, 0);
2386	mmio_len = pci_resource_len(pdev, 0);
2387
2388	card->netdev->mem_start = mmio_start;
2389	card->netdev->mem_end = mmio_start + mmio_len;
2390	card->regs = ioremap(mmio_start, mmio_len);
2391
2392	if (!card->regs) {
2393		pr_err("Couldn't obtain PCI resources, aborting.\n");
2394		goto out_release_regions;
2395	}
2396
2397	return card;
2398
2399out_release_regions:
2400	pci_release_regions(pdev);
2401out_disable_dev:
2402	pci_disable_device(pdev);
2403	pci_set_drvdata(pdev, NULL);
2404	return NULL;
2405}
2406
2407/**
2408 * spider_net_probe - initialization of a device
2409 * @pdev: PCI device
2410 * @ent: entry in the device id list
2411 *
2412 * Returns 0 on success, <0 on failure
2413 *
2414 * spider_net_probe initializes pdev and registers a net_device
2415 * structure for it. After that, the device can be ifconfig'ed up
2416 **/
2417static int __devinit
2418spider_net_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2419{
2420	int err = -EIO;
2421	struct spider_net_card *card;
2422
2423	card = spider_net_setup_pci_dev(pdev);
2424	if (!card)
2425		goto out;
2426
2427	spider_net_workaround_rxramfull(card);
2428	spider_net_init_card(card);
2429
2430	err = spider_net_setup_phy(card);
2431	if (err)
2432		goto out_undo_pci;
2433
2434	err = spider_net_setup_netdev(card);
2435	if (err)
2436		goto out_undo_pci;
2437
2438	return 0;
2439
2440out_undo_pci:
2441	spider_net_undo_pci_setup(card);
2442	free_netdev(card->netdev);
2443out:
2444	return err;
2445}
2446
2447/**
2448 * spider_net_remove - removal of a device
2449 * @pdev: PCI device
2450 *
2451 * Returns 0 on success, <0 on failure
2452 *
2453 * spider_net_remove is called to remove the device and unregisters the
2454 * net_device
2455 **/
2456static void __devexit
2457spider_net_remove(struct pci_dev *pdev)
2458{
2459	struct net_device *netdev;
2460	struct spider_net_card *card;
2461
2462	netdev = pci_get_drvdata(pdev);
2463	card = netdev_priv(netdev);
2464
2465	wait_event(card->waitq,
2466		   atomic_read(&card->tx_timeout_task_counter) == 0);
2467
2468	unregister_netdev(netdev);
2469
2470	/* switch off card */
2471	spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2472			     SPIDER_NET_CKRCTRL_STOP_VALUE);
2473	spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
2474			     SPIDER_NET_CKRCTRL_RUN_VALUE);
2475
2476	spider_net_undo_pci_setup(card);
2477	free_netdev(netdev);
2478}
2479
2480static struct pci_driver spider_net_driver = {
2481	.name		= spider_net_driver_name,
2482	.id_table	= spider_net_pci_tbl,
2483	.probe		= spider_net_probe,
2484	.remove		= __devexit_p(spider_net_remove)
2485};
2486
2487/**
2488 * spider_net_init - init function when the driver is loaded
2489 *
2490 * spider_net_init registers the device driver
2491 */
2492static int __init spider_net_init(void)
2493{
2494	printk(KERN_INFO "Spidernet version %s.\n", VERSION);
2495
2496	if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
2497		rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
2498		pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2499	}
2500	if (rx_descriptors > SPIDER_NET_RX_DESCRIPTORS_MAX) {
2501		rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MAX;
2502		pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
2503	}
2504	if (tx_descriptors < SPIDER_NET_TX_DESCRIPTORS_MIN) {
2505		tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MIN;
2506		pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2507	}
2508	if (tx_descriptors > SPIDER_NET_TX_DESCRIPTORS_MAX) {
2509		tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_MAX;
2510		pr_info("adjusting tx descriptors to %i.\n", tx_descriptors);
2511	}
2512
2513	return pci_register_driver(&spider_net_driver);
2514}
2515
2516/**
2517 * spider_net_cleanup - exit function when driver is unloaded
2518 *
2519 * spider_net_cleanup unregisters the device driver
2520 */
2521static void __exit spider_net_cleanup(void)
2522{
2523	pci_unregister_driver(&spider_net_driver);
2524}
2525
2526module_init(spider_net_init);
2527module_exit(spider_net_cleanup);
2528