1/*
2 * Copyright (C) 2006-2007 PA Semi, Inc
3 *
4 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/pci.h>
23#include <linux/interrupt.h>
24#include <linux/dmaengine.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <asm/dma-mapping.h>
29#include <linux/in.h>
30#include <linux/skbuff.h>
31
32#include <linux/ip.h>
33#include <linux/tcp.h>
34#include <net/checksum.h>
35
36#include <asm/irq.h>
37
38#include "pasemi_mac.h"
39
40
41/* TODO list
42 *
43 * - Get rid of pci_{read,write}_config(), map registers with ioremap
44 *   for performance
45 * - PHY support
46 * - Multicast support
47 * - Large MTU support
48 * - Other performance improvements
49 */
50
51
52/* Must be a power of two */
53#define RX_RING_SIZE 512
54#define TX_RING_SIZE 512
55
56#define DEFAULT_MSG_ENABLE	  \
57	(NETIF_MSG_DRV		| \
58	 NETIF_MSG_PROBE	| \
59	 NETIF_MSG_LINK		| \
60	 NETIF_MSG_TIMER	| \
61	 NETIF_MSG_IFDOWN	| \
62	 NETIF_MSG_IFUP		| \
63	 NETIF_MSG_RX_ERR	| \
64	 NETIF_MSG_TX_ERR)
65
66#define TX_DESC(mac, num)	((mac)->tx->desc[(num) & (TX_RING_SIZE-1)])
67#define TX_DESC_INFO(mac, num)	((mac)->tx->desc_info[(num) & (TX_RING_SIZE-1)])
68#define RX_DESC(mac, num)	((mac)->rx->desc[(num) & (RX_RING_SIZE-1)])
69#define RX_DESC_INFO(mac, num)	((mac)->rx->desc_info[(num) & (RX_RING_SIZE-1)])
70#define RX_BUFF(mac, num)	((mac)->rx->buffers[(num) & (RX_RING_SIZE-1)])
71
72#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
73
74MODULE_LICENSE("GPL");
75MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
76MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");
77
78static int debug = -1;	/* -1 == use DEFAULT_MSG_ENABLE as value */
79module_param(debug, int, 0);
80MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
81
82static struct pasdma_status *dma_status;
83
84static int pasemi_get_mac_addr(struct pasemi_mac *mac)
85{
86	struct pci_dev *pdev = mac->pdev;
87	struct device_node *dn = pci_device_to_OF_node(pdev);
88	int len;
89	const u8 *maddr;
90	u8 addr[6];
91
92	if (!dn) {
93		dev_dbg(&pdev->dev,
94			  "No device node for mac, not configuring\n");
95		return -ENOENT;
96	}
97
98	maddr = of_get_property(dn, "local-mac-address", &len);
99
100	if (maddr && len == 6) {
101		memcpy(mac->mac_addr, maddr, 6);
102		return 0;
103	}
104
105	/* Some old versions of firmware mistakenly uses mac-address
106	 * (and as a string) instead of a byte array in local-mac-address.
107	 */
108
109	if (maddr == NULL)
110		maddr = of_get_property(dn, "mac-address", NULL);
111
112	if (maddr == NULL) {
113		dev_warn(&pdev->dev,
114			 "no mac address in device tree, not configuring\n");
115		return -ENOENT;
116	}
117
118
119	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
120		   &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
121		dev_warn(&pdev->dev,
122			 "can't parse mac address, not configuring\n");
123		return -EINVAL;
124	}
125
126	memcpy(mac->mac_addr, addr, 6);
127
128	return 0;
129}
130
131static int pasemi_mac_setup_rx_resources(struct net_device *dev)
132{
133	struct pasemi_mac_rxring *ring;
134	struct pasemi_mac *mac = netdev_priv(dev);
135	int chan_id = mac->dma_rxch;
136
137	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
138
139	if (!ring)
140		goto out_ring;
141
142	spin_lock_init(&ring->lock);
143
144	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
145				  RX_RING_SIZE, GFP_KERNEL);
146
147	if (!ring->desc_info)
148		goto out_desc_info;
149
150	/* Allocate descriptors */
151	ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
152					RX_RING_SIZE *
153					sizeof(struct pas_dma_xct_descr),
154					&ring->dma, GFP_KERNEL);
155
156	if (!ring->desc)
157		goto out_desc;
158
159	memset(ring->desc, 0, RX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
160
161	ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
162					   RX_RING_SIZE * sizeof(u64),
163					   &ring->buf_dma, GFP_KERNEL);
164	if (!ring->buffers)
165		goto out_buffers;
166
167	memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
168
169	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEL(chan_id),
170			       PAS_DMA_RXCHAN_BASEL_BRBL(ring->dma));
171
172	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_BASEU(chan_id),
173			       PAS_DMA_RXCHAN_BASEU_BRBH(ring->dma >> 32) |
174			       PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 2));
175
176	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXCHAN_CFG(chan_id),
177			       PAS_DMA_RXCHAN_CFG_HBU(1));
178
179	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEL(mac->dma_if),
180			       PAS_DMA_RXINT_BASEL_BRBL(__pa(ring->buffers)));
181
182	pci_write_config_dword(mac->dma_pdev, PAS_DMA_RXINT_BASEU(mac->dma_if),
183			       PAS_DMA_RXINT_BASEU_BRBH(__pa(ring->buffers) >> 32) |
184			       PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
185
186	ring->next_to_fill = 0;
187	ring->next_to_clean = 0;
188
189	snprintf(ring->irq_name, sizeof(ring->irq_name),
190		 "%s rx", dev->name);
191	mac->rx = ring;
192
193	return 0;
194
195out_buffers:
196	dma_free_coherent(&mac->dma_pdev->dev,
197			  RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
198			  mac->rx->desc, mac->rx->dma);
199out_desc:
200	kfree(ring->desc_info);
201out_desc_info:
202	kfree(ring);
203out_ring:
204	return -ENOMEM;
205}
206
207
208static int pasemi_mac_setup_tx_resources(struct net_device *dev)
209{
210	struct pasemi_mac *mac = netdev_priv(dev);
211	u32 val;
212	int chan_id = mac->dma_txch;
213	struct pasemi_mac_txring *ring;
214
215	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
216	if (!ring)
217		goto out_ring;
218
219	spin_lock_init(&ring->lock);
220
221	ring->desc_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
222				  TX_RING_SIZE, GFP_KERNEL);
223	if (!ring->desc_info)
224		goto out_desc_info;
225
226	/* Allocate descriptors */
227	ring->desc = dma_alloc_coherent(&mac->dma_pdev->dev,
228					TX_RING_SIZE *
229					sizeof(struct pas_dma_xct_descr),
230					&ring->dma, GFP_KERNEL);
231	if (!ring->desc)
232		goto out_desc;
233
234	memset(ring->desc, 0, TX_RING_SIZE * sizeof(struct pas_dma_xct_descr));
235
236	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEL(chan_id),
237			       PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
238	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
239	val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
240
241	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_BASEU(chan_id), val);
242
243	pci_write_config_dword(mac->dma_pdev, PAS_DMA_TXCHAN_CFG(chan_id),
244			       PAS_DMA_TXCHAN_CFG_TY_IFACE |
245			       PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
246			       PAS_DMA_TXCHAN_CFG_UP |
247			       PAS_DMA_TXCHAN_CFG_WT(2));
248
249	ring->next_to_use = 0;
250	ring->next_to_clean = 0;
251
252	snprintf(ring->irq_name, sizeof(ring->irq_name),
253		 "%s tx", dev->name);
254	mac->tx = ring;
255
256	return 0;
257
258out_desc:
259	kfree(ring->desc_info);
260out_desc_info:
261	kfree(ring);
262out_ring:
263	return -ENOMEM;
264}
265
266static void pasemi_mac_free_tx_resources(struct net_device *dev)
267{
268	struct pasemi_mac *mac = netdev_priv(dev);
269	unsigned int i;
270	struct pasemi_mac_buffer *info;
271	struct pas_dma_xct_descr *dp;
272
273	for (i = 0; i < TX_RING_SIZE; i++) {
274		info = &TX_DESC_INFO(mac, i);
275		dp = &TX_DESC(mac, i);
276		if (info->dma) {
277			if (info->skb) {
278				pci_unmap_single(mac->dma_pdev,
279						 info->dma,
280						 info->skb->len,
281						 PCI_DMA_TODEVICE);
282				dev_kfree_skb_any(info->skb);
283			}
284			info->dma = 0;
285			info->skb = NULL;
286			dp->mactx = 0;
287			dp->ptr = 0;
288		}
289	}
290
291	dma_free_coherent(&mac->dma_pdev->dev,
292			  TX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
293			  mac->tx->desc, mac->tx->dma);
294
295	kfree(mac->tx->desc_info);
296	kfree(mac->tx);
297	mac->tx = NULL;
298}
299
300static void pasemi_mac_free_rx_resources(struct net_device *dev)
301{
302	struct pasemi_mac *mac = netdev_priv(dev);
303	unsigned int i;
304	struct pasemi_mac_buffer *info;
305	struct pas_dma_xct_descr *dp;
306
307	for (i = 0; i < RX_RING_SIZE; i++) {
308		info = &RX_DESC_INFO(mac, i);
309		dp = &RX_DESC(mac, i);
310		if (info->skb) {
311			if (info->dma) {
312				pci_unmap_single(mac->dma_pdev,
313						 info->dma,
314						 info->skb->len,
315						 PCI_DMA_FROMDEVICE);
316				dev_kfree_skb_any(info->skb);
317			}
318			info->dma = 0;
319			info->skb = NULL;
320			dp->macrx = 0;
321			dp->ptr = 0;
322		}
323	}
324
325	dma_free_coherent(&mac->dma_pdev->dev,
326			  RX_RING_SIZE * sizeof(struct pas_dma_xct_descr),
327			  mac->rx->desc, mac->rx->dma);
328
329	dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
330			  mac->rx->buffers, mac->rx->buf_dma);
331
332	kfree(mac->rx->desc_info);
333	kfree(mac->rx);
334	mac->rx = NULL;
335}
336
337static void pasemi_mac_replenish_rx_ring(struct net_device *dev)
338{
339	struct pasemi_mac *mac = netdev_priv(dev);
340	unsigned int i;
341	int start = mac->rx->next_to_fill;
342	unsigned int limit, count;
343
344	limit = (mac->rx->next_to_clean + RX_RING_SIZE -
345		 mac->rx->next_to_fill) & (RX_RING_SIZE - 1);
346
347	/* Check to see if we're doing first-time setup */
348	if (unlikely(mac->rx->next_to_clean == 0 && mac->rx->next_to_fill == 0))
349		limit = RX_RING_SIZE;
350
351	if (limit <= 0)
352		return;
353
354	i = start;
355	for (count = limit; count; count--) {
356		struct pasemi_mac_buffer *info = &RX_DESC_INFO(mac, i);
357		u64 *buff = &RX_BUFF(mac, i);
358		struct sk_buff *skb;
359		dma_addr_t dma;
360
361		/* skb might still be in there for recycle on short receives */
362		if (info->skb)
363			skb = info->skb;
364		else
365			skb = dev_alloc_skb(BUF_SIZE);
366
367		if (unlikely(!skb))
368			break;
369
370		dma = pci_map_single(mac->dma_pdev, skb->data, skb->len,
371				     PCI_DMA_FROMDEVICE);
372
373		if (unlikely(dma_mapping_error(dma))) {
374			dev_kfree_skb_irq(info->skb);
375			break;
376		}
377
378		info->skb = skb;
379		info->dma = dma;
380		*buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
381		i++;
382	}
383
384	wmb();
385
386	pci_write_config_dword(mac->dma_pdev,
387			       PAS_DMA_RXCHAN_INCR(mac->dma_rxch),
388			       limit - count);
389	pci_write_config_dword(mac->dma_pdev,
390			       PAS_DMA_RXINT_INCR(mac->dma_if),
391			       limit - count);
392
393	mac->rx->next_to_fill += limit - count;
394}
395
396static void pasemi_mac_restart_rx_intr(struct pasemi_mac *mac)
397{
398	unsigned int reg, pcnt;
399	/* Re-enable packet count interrupts: finally
400	 * ack the packet count interrupt we got in rx_intr.
401	 */
402
403	pcnt = *mac->rx_status & PAS_STATUS_PCNT_M;
404
405	reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
406
407	pci_write_config_dword(mac->iob_pdev,
408			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch),
409			       reg);
410}
411
412static void pasemi_mac_restart_tx_intr(struct pasemi_mac *mac)
413{
414	unsigned int reg, pcnt;
415
416	/* Re-enable packet count interrupts */
417	pcnt = *mac->tx_status & PAS_STATUS_PCNT_M;
418
419	reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
420
421	pci_write_config_dword(mac->iob_pdev,
422			       PAS_IOB_DMA_TXCH_RESET(mac->dma_txch), reg);
423}
424
425
426static int pasemi_mac_clean_rx(struct pasemi_mac *mac, int limit)
427{
428	unsigned int n;
429	int count;
430	struct pas_dma_xct_descr *dp;
431	struct pasemi_mac_buffer *info;
432	struct sk_buff *skb;
433	unsigned int i, len;
434	u64 macrx;
435	dma_addr_t dma;
436
437	spin_lock(&mac->rx->lock);
438
439	n = mac->rx->next_to_clean;
440
441	for (count = limit; count; count--) {
442
443		rmb();
444
445		dp = &RX_DESC(mac, n);
446		macrx = dp->macrx;
447
448		if (!(macrx & XCT_MACRX_O))
449			break;
450
451
452		info = NULL;
453
454		/* We have to scan for our skb since there's no way
455		 * to back-map them from the descriptor, and if we
456		 * have several receive channels then they might not
457		 * show up in the same order as they were put on the
458		 * interface ring.
459		 */
460
461		dma = (dp->ptr & XCT_PTR_ADDR_M);
462		for (i = n; i < (n + RX_RING_SIZE); i++) {
463			info = &RX_DESC_INFO(mac, i);
464			if (info->dma == dma)
465				break;
466		}
467
468		skb = info->skb;
469		info->dma = 0;
470
471		pci_unmap_single(mac->dma_pdev, dma, skb->len,
472				 PCI_DMA_FROMDEVICE);
473
474		len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
475
476		if (len < 256) {
477			struct sk_buff *new_skb =
478			    netdev_alloc_skb(mac->netdev, len + NET_IP_ALIGN);
479			if (new_skb) {
480				skb_reserve(new_skb, NET_IP_ALIGN);
481				memcpy(new_skb->data - NET_IP_ALIGN,
482					skb->data - NET_IP_ALIGN,
483					len + NET_IP_ALIGN);
484				/* save the skb in buffer_info as good */
485				skb = new_skb;
486			}
487			/* else just continue with the old one */
488		} else
489			info->skb = NULL;
490
491		skb_put(skb, len);
492
493		skb->protocol = eth_type_trans(skb, mac->netdev);
494
495		if ((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK) {
496			skb->ip_summed = CHECKSUM_COMPLETE;
497			skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
498					   XCT_MACRX_CSUM_S;
499		} else
500			skb->ip_summed = CHECKSUM_NONE;
501
502		mac->stats.rx_bytes += len;
503		mac->stats.rx_packets++;
504
505		netif_receive_skb(skb);
506
507		dp->ptr = 0;
508		dp->macrx = 0;
509
510		n++;
511	}
512
513	mac->rx->next_to_clean += limit - count;
514	pasemi_mac_replenish_rx_ring(mac->netdev);
515
516	spin_unlock(&mac->rx->lock);
517
518	return count;
519}
520
521static int pasemi_mac_clean_tx(struct pasemi_mac *mac)
522{
523	int i;
524	struct pasemi_mac_buffer *info;
525	struct pas_dma_xct_descr *dp;
526	int start, count;
527	int flags;
528
529	spin_lock_irqsave(&mac->tx->lock, flags);
530
531	start = mac->tx->next_to_clean;
532	count = 0;
533
534	for (i = start; i < mac->tx->next_to_use; i++) {
535		dp = &TX_DESC(mac, i);
536		if (!dp || (dp->mactx & XCT_MACTX_O))
537			break;
538
539		count++;
540
541		info = &TX_DESC_INFO(mac, i);
542
543		pci_unmap_single(mac->dma_pdev, info->dma,
544				 info->skb->len, PCI_DMA_TODEVICE);
545		dev_kfree_skb_irq(info->skb);
546
547		info->skb = NULL;
548		info->dma = 0;
549		dp->mactx = 0;
550		dp->ptr = 0;
551	}
552	mac->tx->next_to_clean += count;
553	spin_unlock_irqrestore(&mac->tx->lock, flags);
554
555	netif_wake_queue(mac->netdev);
556
557	return count;
558}
559
560
561static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
562{
563	struct net_device *dev = data;
564	struct pasemi_mac *mac = netdev_priv(dev);
565	unsigned int reg;
566
567	if (!(*mac->rx_status & PAS_STATUS_CAUSE_M))
568		return IRQ_NONE;
569
570	if (*mac->rx_status & PAS_STATUS_ERROR)
571		printk("rx_status reported error\n");
572
573	/* Don't reset packet count so it won't fire again but clear
574	 * all others.
575	 */
576
577	pci_read_config_dword(mac->dma_pdev, PAS_DMA_RXINT_RCMDSTA(mac->dma_if), &reg);
578
579	reg = 0;
580	if (*mac->rx_status & PAS_STATUS_SOFT)
581		reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
582	if (*mac->rx_status & PAS_STATUS_ERROR)
583		reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
584	if (*mac->rx_status & PAS_STATUS_TIMER)
585		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;
586
587	netif_rx_schedule(dev);
588
589	pci_write_config_dword(mac->iob_pdev,
590			       PAS_IOB_DMA_RXCH_RESET(mac->dma_rxch), reg);
591
592
593	return IRQ_HANDLED;
594}
595
596static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
597{
598	struct net_device *dev = data;
599	struct pasemi_mac *mac = netdev_priv(dev);
600	unsigned int reg, pcnt;
601
602	if (!(*mac->tx_status & PAS_STATUS_CAUSE_M))
603		return IRQ_NONE;
604
605	pasemi_mac_clean_tx(mac);
606
607	pcnt = *mac->tx_status & PAS_STATUS_PCNT_M;
608
609	reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
610
611	if (*mac->tx_status & PAS_STATUS_SOFT)
612		reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
613	if (*mac->tx_status & PAS_STATUS_ERROR)
614		reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
615
616	pci_write_config_dword(mac->iob_pdev,
617			       PAS_IOB_DMA_TXCH_RESET(mac->dma_txch),
618			       reg);
619
620	return IRQ_HANDLED;
621}
622
623static void pasemi_adjust_link(struct net_device *dev)
624{
625	struct pasemi_mac *mac = netdev_priv(dev);
626	int msg;
627	unsigned int flags;
628	unsigned int new_flags;
629
630	if (!mac->phydev->link) {
631		/* If no link, MAC speed settings don't matter. Just report
632		 * link down and return.
633		 */
634		if (mac->link && netif_msg_link(mac))
635			printk(KERN_INFO "%s: Link is down.\n", dev->name);
636
637		netif_carrier_off(dev);
638		mac->link = 0;
639
640		return;
641	} else
642		netif_carrier_on(dev);
643
644	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
645	new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
646			      PAS_MAC_CFG_PCFG_TSR_M);
647
648	if (!mac->phydev->duplex)
649		new_flags |= PAS_MAC_CFG_PCFG_HD;
650
651	switch (mac->phydev->speed) {
652	case 1000:
653		new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
654			     PAS_MAC_CFG_PCFG_TSR_1G;
655		break;
656	case 100:
657		new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
658			     PAS_MAC_CFG_PCFG_TSR_100M;
659		break;
660	case 10:
661		new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
662			     PAS_MAC_CFG_PCFG_TSR_10M;
663		break;
664	default:
665		printk("Unsupported speed %d\n", mac->phydev->speed);
666	}
667
668	/* Print on link or speed/duplex change */
669	msg = mac->link != mac->phydev->link || flags != new_flags;
670
671	mac->duplex = mac->phydev->duplex;
672	mac->speed = mac->phydev->speed;
673	mac->link = mac->phydev->link;
674
675	if (new_flags != flags)
676		pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, new_flags);
677
678	if (msg && netif_msg_link(mac))
679		printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
680		       dev->name, mac->speed, mac->duplex ? "full" : "half");
681}
682
683static int pasemi_mac_phy_init(struct net_device *dev)
684{
685	struct pasemi_mac *mac = netdev_priv(dev);
686	struct device_node *dn, *phy_dn;
687	struct phy_device *phydev;
688	unsigned int phy_id;
689	const phandle *ph;
690	const unsigned int *prop;
691	struct resource r;
692	int ret;
693
694	dn = pci_device_to_OF_node(mac->pdev);
695	ph = of_get_property(dn, "phy-handle", NULL);
696	if (!ph)
697		return -ENODEV;
698	phy_dn = of_find_node_by_phandle(*ph);
699
700	prop = of_get_property(phy_dn, "reg", NULL);
701	ret = of_address_to_resource(phy_dn->parent, 0, &r);
702	if (ret)
703		goto err;
704
705	phy_id = *prop;
706	snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);
707
708	of_node_put(phy_dn);
709
710	mac->link = 0;
711	mac->speed = 0;
712	mac->duplex = -1;
713
714	phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);
715
716	if (IS_ERR(phydev)) {
717		printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
718		return PTR_ERR(phydev);
719	}
720
721	mac->phydev = phydev;
722
723	return 0;
724
725err:
726	of_node_put(phy_dn);
727	return -ENODEV;
728}
729
730
731static int pasemi_mac_open(struct net_device *dev)
732{
733	struct pasemi_mac *mac = netdev_priv(dev);
734	int base_irq;
735	unsigned int flags;
736	int ret;
737
738	/* enable rx section */
739	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_RXCMD,
740			       PAS_DMA_COM_RXCMD_EN);
741
742	/* enable tx section */
743	pci_write_config_dword(mac->dma_pdev, PAS_DMA_COM_TXCMD,
744			       PAS_DMA_COM_TXCMD_EN);
745
746	flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
747		PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
748		PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);
749
750	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_TXP, flags);
751
752	flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PE |
753		PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
754
755	flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
756
757	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
758			       PAS_IOB_DMA_RXCH_CFG_CNTTH(1));
759
760	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch),
761			       PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
762
763	/* Clear out any residual packet count state from firmware */
764	pasemi_mac_restart_rx_intr(mac);
765	pasemi_mac_restart_tx_intr(mac);
766
767	/* 0xffffff is max value, about 16ms */
768	pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_COM_TIMEOUTCFG,
769			       PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0xffffff));
770
771	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
772
773	ret = pasemi_mac_setup_rx_resources(dev);
774	if (ret)
775		goto out_rx_resources;
776
777	ret = pasemi_mac_setup_tx_resources(dev);
778	if (ret)
779		goto out_tx_resources;
780
781	pci_write_config_dword(mac->pdev, PAS_MAC_IPC_CHNL,
782			       PAS_MAC_IPC_CHNL_DCHNO(mac->dma_rxch) |
783			       PAS_MAC_IPC_CHNL_BCH(mac->dma_rxch));
784
785	/* enable rx if */
786	pci_write_config_dword(mac->dma_pdev,
787			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
788			       PAS_DMA_RXINT_RCMDSTA_EN);
789
790	/* enable rx channel */
791	pci_write_config_dword(mac->dma_pdev,
792			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
793			       PAS_DMA_RXCHAN_CCMDSTA_EN |
794			       PAS_DMA_RXCHAN_CCMDSTA_DU);
795
796	/* enable tx channel */
797	pci_write_config_dword(mac->dma_pdev,
798			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
799			       PAS_DMA_TXCHAN_TCMDSTA_EN);
800
801	pasemi_mac_replenish_rx_ring(dev);
802
803	ret = pasemi_mac_phy_init(dev);
804	/* Some configs don't have PHYs (XAUI etc), so don't complain about
805	 * failed init due to -ENODEV.
806	 */
807	if (ret && ret != -ENODEV)
808		dev_warn(&mac->pdev->dev, "phy init failed: %d\n", ret);
809
810	netif_start_queue(dev);
811	netif_poll_enable(dev);
812
813	/* Interrupts are a bit different for our DMA controller: While
814	 * it's got one a regular PCI device header, the interrupt there
815	 * is really the base of the range it's using. Each tx and rx
816	 * channel has it's own interrupt source.
817	 */
818
819	base_irq = virq_to_hw(mac->dma_pdev->irq);
820
821	mac->tx_irq = irq_create_mapping(NULL, base_irq + mac->dma_txch);
822	mac->rx_irq = irq_create_mapping(NULL, base_irq + 20 + mac->dma_txch);
823
824	ret = request_irq(mac->tx_irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
825			  mac->tx->irq_name, dev);
826	if (ret) {
827		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
828			base_irq + mac->dma_txch, ret);
829		goto out_tx_int;
830	}
831
832	ret = request_irq(mac->rx_irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
833			  mac->rx->irq_name, dev);
834	if (ret) {
835		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
836			base_irq + 20 + mac->dma_rxch, ret);
837		goto out_rx_int;
838	}
839
840	if (mac->phydev)
841		phy_start(mac->phydev);
842
843	return 0;
844
845out_rx_int:
846	free_irq(mac->tx_irq, dev);
847out_tx_int:
848	netif_poll_disable(dev);
849	netif_stop_queue(dev);
850	pasemi_mac_free_tx_resources(dev);
851out_tx_resources:
852	pasemi_mac_free_rx_resources(dev);
853out_rx_resources:
854
855	return ret;
856}
857
858#define MAX_RETRIES 5000
859
860static int pasemi_mac_close(struct net_device *dev)
861{
862	struct pasemi_mac *mac = netdev_priv(dev);
863	unsigned int stat;
864	int retries;
865
866	if (mac->phydev) {
867		phy_stop(mac->phydev);
868		phy_disconnect(mac->phydev);
869	}
870
871	netif_stop_queue(dev);
872
873	/* Clean out any pending buffers */
874	pasemi_mac_clean_tx(mac);
875	pasemi_mac_clean_rx(mac, RX_RING_SIZE);
876
877	/* Disable interface */
878	pci_write_config_dword(mac->dma_pdev,
879			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
880			       PAS_DMA_TXCHAN_TCMDSTA_ST);
881	pci_write_config_dword(mac->dma_pdev,
882		      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
883		      PAS_DMA_RXINT_RCMDSTA_ST);
884	pci_write_config_dword(mac->dma_pdev,
885		      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
886		      PAS_DMA_RXCHAN_CCMDSTA_ST);
887
888	for (retries = 0; retries < MAX_RETRIES; retries++) {
889		pci_read_config_dword(mac->dma_pdev,
890				      PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch),
891				      &stat);
892		if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
893			break;
894		cond_resched();
895	}
896
897	if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
898		dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
899
900	for (retries = 0; retries < MAX_RETRIES; retries++) {
901		pci_read_config_dword(mac->dma_pdev,
902				      PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch),
903				      &stat);
904		if (!(stat & PAS_DMA_RXCHAN_CCMDSTA_ACT))
905			break;
906		cond_resched();
907	}
908
909	if (stat & PAS_DMA_RXCHAN_CCMDSTA_ACT)
910		dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");
911
912	for (retries = 0; retries < MAX_RETRIES; retries++) {
913		pci_read_config_dword(mac->dma_pdev,
914				      PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
915				      &stat);
916		if (!(stat & PAS_DMA_RXINT_RCMDSTA_ACT))
917			break;
918		cond_resched();
919	}
920
921	if (stat & PAS_DMA_RXINT_RCMDSTA_ACT)
922		dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");
923
924	/* Then, disable the channel. This must be done separately from
925	 * stopping, since you can't disable when active.
926	 */
927
928	pci_write_config_dword(mac->dma_pdev,
929			       PAS_DMA_TXCHAN_TCMDSTA(mac->dma_txch), 0);
930	pci_write_config_dword(mac->dma_pdev,
931			       PAS_DMA_RXCHAN_CCMDSTA(mac->dma_rxch), 0);
932	pci_write_config_dword(mac->dma_pdev,
933			       PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
934
935	free_irq(mac->tx_irq, dev);
936	free_irq(mac->rx_irq, dev);
937
938	/* Free resources */
939	pasemi_mac_free_rx_resources(dev);
940	pasemi_mac_free_tx_resources(dev);
941
942	return 0;
943}
944
945static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
946{
947	struct pasemi_mac *mac = netdev_priv(dev);
948	struct pasemi_mac_txring *txring;
949	struct pasemi_mac_buffer *info;
950	struct pas_dma_xct_descr *dp;
951	u64 dflags;
952	dma_addr_t map;
953	int flags;
954
955	dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD;
956
957	if (skb->ip_summed == CHECKSUM_PARTIAL) {
958		const unsigned char *nh = skb_network_header(skb);
959
960		switch (ip_hdr(skb)->protocol) {
961		case IPPROTO_TCP:
962			dflags |= XCT_MACTX_CSUM_TCP;
963			dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
964			dflags |= XCT_MACTX_IPO(nh - skb->data);
965			break;
966		case IPPROTO_UDP:
967			dflags |= XCT_MACTX_CSUM_UDP;
968			dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
969			dflags |= XCT_MACTX_IPO(nh - skb->data);
970			break;
971		}
972	}
973
974	map = pci_map_single(mac->dma_pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
975
976	if (dma_mapping_error(map))
977		return NETDEV_TX_BUSY;
978
979	txring = mac->tx;
980
981	spin_lock_irqsave(&txring->lock, flags);
982
983	if (txring->next_to_clean - txring->next_to_use == TX_RING_SIZE) {
984		spin_unlock_irqrestore(&txring->lock, flags);
985		pasemi_mac_clean_tx(mac);
986		pasemi_mac_restart_tx_intr(mac);
987		spin_lock_irqsave(&txring->lock, flags);
988
989		if (txring->next_to_clean - txring->next_to_use ==
990		    TX_RING_SIZE) {
991			/* Still no room -- stop the queue and wait for tx
992			 * intr when there's room.
993			 */
994			netif_stop_queue(dev);
995			goto out_err;
996		}
997	}
998
999
1000	dp = &TX_DESC(mac, txring->next_to_use);
1001	info = &TX_DESC_INFO(mac, txring->next_to_use);
1002
1003	dp->mactx = dflags | XCT_MACTX_LLEN(skb->len);
1004	dp->ptr   = XCT_PTR_LEN(skb->len) | XCT_PTR_ADDR(map);
1005	info->dma = map;
1006	info->skb = skb;
1007
1008	txring->next_to_use++;
1009	mac->stats.tx_packets++;
1010	mac->stats.tx_bytes += skb->len;
1011
1012	spin_unlock_irqrestore(&txring->lock, flags);
1013
1014	pci_write_config_dword(mac->dma_pdev,
1015			       PAS_DMA_TXCHAN_INCR(mac->dma_txch), 1);
1016
1017	return NETDEV_TX_OK;
1018
1019out_err:
1020	spin_unlock_irqrestore(&txring->lock, flags);
1021	pci_unmap_single(mac->dma_pdev, map, skb->len, PCI_DMA_TODEVICE);
1022	return NETDEV_TX_BUSY;
1023}
1024
1025static struct net_device_stats *pasemi_mac_get_stats(struct net_device *dev)
1026{
1027	struct pasemi_mac *mac = netdev_priv(dev);
1028
1029	return &mac->stats;
1030}
1031
1032
1033static void pasemi_mac_set_rx_mode(struct net_device *dev)
1034{
1035	struct pasemi_mac *mac = netdev_priv(dev);
1036	unsigned int flags;
1037
1038	pci_read_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, &flags);
1039
1040	/* Set promiscuous */
1041	if (dev->flags & IFF_PROMISC)
1042		flags |= PAS_MAC_CFG_PCFG_PR;
1043	else
1044		flags &= ~PAS_MAC_CFG_PCFG_PR;
1045
1046	pci_write_config_dword(mac->pdev, PAS_MAC_CFG_PCFG, flags);
1047}
1048
1049
1050static int pasemi_mac_poll(struct net_device *dev, int *budget)
1051{
1052	int pkts, limit = min(*budget, dev->quota);
1053	struct pasemi_mac *mac = netdev_priv(dev);
1054
1055	pkts = pasemi_mac_clean_rx(mac, limit);
1056
1057	dev->quota -= pkts;
1058	*budget -= pkts;
1059
1060	if (pkts < limit) {
1061		/* all done, no more packets present */
1062		netif_rx_complete(dev);
1063
1064		pasemi_mac_restart_rx_intr(mac);
1065		return 0;
1066	} else {
1067		/* used up our quantum, so reschedule */
1068		return 1;
1069	}
1070}
1071
1072static int __devinit
1073pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1074{
1075	static int index = 0;
1076	struct net_device *dev;
1077	struct pasemi_mac *mac;
1078	int err;
1079
1080	err = pci_enable_device(pdev);
1081	if (err)
1082		return err;
1083
1084	dev = alloc_etherdev(sizeof(struct pasemi_mac));
1085	if (dev == NULL) {
1086		dev_err(&pdev->dev,
1087			"pasemi_mac: Could not allocate ethernet device.\n");
1088		err = -ENOMEM;
1089		goto out_disable_device;
1090	}
1091
1092	SET_MODULE_OWNER(dev);
1093	pci_set_drvdata(pdev, dev);
1094	SET_NETDEV_DEV(dev, &pdev->dev);
1095
1096	mac = netdev_priv(dev);
1097
1098	mac->pdev = pdev;
1099	mac->netdev = dev;
1100	mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
1101
1102	if (!mac->dma_pdev) {
1103		dev_err(&pdev->dev, "Can't find DMA Controller\n");
1104		err = -ENODEV;
1105		goto out_free_netdev;
1106	}
1107
1108	mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
1109
1110	if (!mac->iob_pdev) {
1111		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
1112		err = -ENODEV;
1113		goto out_put_dma_pdev;
1114	}
1115
1116	/* These should come out of the device tree eventually */
1117	mac->dma_txch = index;
1118	mac->dma_rxch = index;
1119
1120	/* We probe GMAC before XAUI, but the DMA interfaces are
1121	 * in XAUI, GMAC order.
1122	 */
1123	if (index < 4)
1124		mac->dma_if = index + 2;
1125	else
1126		mac->dma_if = index - 4;
1127	index++;
1128
1129	switch (pdev->device) {
1130	case 0xa005:
1131		mac->type = MAC_TYPE_GMAC;
1132		break;
1133	case 0xa006:
1134		mac->type = MAC_TYPE_XAUI;
1135		break;
1136	default:
1137		err = -ENODEV;
1138		goto out;
1139	}
1140
1141	/* get mac addr from device tree */
1142	if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
1143		err = -ENODEV;
1144		goto out;
1145	}
1146	memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
1147
1148	dev->open = pasemi_mac_open;
1149	dev->stop = pasemi_mac_close;
1150	dev->hard_start_xmit = pasemi_mac_start_tx;
1151	dev->get_stats = pasemi_mac_get_stats;
1152	dev->set_multicast_list = pasemi_mac_set_rx_mode;
1153	dev->weight = 64;
1154	dev->poll = pasemi_mac_poll;
1155	dev->features = NETIF_F_HW_CSUM;
1156
1157	/* The dma status structure is located in the I/O bridge, and
1158	 * is cache coherent.
1159	 */
1160	if (!dma_status)
1161		/* XXXOJN This should come from the device tree */
1162		dma_status = __ioremap(0xfd800000, 0x1000, 0);
1163
1164	mac->rx_status = &dma_status->rx_sta[mac->dma_rxch];
1165	mac->tx_status = &dma_status->tx_sta[mac->dma_txch];
1166
1167	mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
1168
1169	/* Enable most messages by default */
1170	mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
1171
1172	err = register_netdev(dev);
1173
1174	if (err) {
1175		dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
1176			err);
1177		goto out;
1178	} else
1179		printk(KERN_INFO "%s: PA Semi %s: intf %d, txch %d, rxch %d, "
1180		       "hw addr %02x:%02x:%02x:%02x:%02x:%02x\n",
1181		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
1182		       mac->dma_if, mac->dma_txch, mac->dma_rxch,
1183		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
1184		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
1185
1186	return err;
1187
1188out:
1189	pci_dev_put(mac->iob_pdev);
1190out_put_dma_pdev:
1191	pci_dev_put(mac->dma_pdev);
1192out_free_netdev:
1193	free_netdev(dev);
1194out_disable_device:
1195	pci_disable_device(pdev);
1196	return err;
1197
1198}
1199
1200static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
1201{
1202	struct net_device *netdev = pci_get_drvdata(pdev);
1203	struct pasemi_mac *mac;
1204
1205	if (!netdev)
1206		return;
1207
1208	mac = netdev_priv(netdev);
1209
1210	unregister_netdev(netdev);
1211
1212	pci_disable_device(pdev);
1213	pci_dev_put(mac->dma_pdev);
1214	pci_dev_put(mac->iob_pdev);
1215
1216	pci_set_drvdata(pdev, NULL);
1217	free_netdev(netdev);
1218}
1219
1220static struct pci_device_id pasemi_mac_pci_tbl[] = {
1221	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
1222	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1223	{ },
1224};
1225
1226MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);
1227
1228static struct pci_driver pasemi_mac_driver = {
1229	.name		= "pasemi_mac",
1230	.id_table	= pasemi_mac_pci_tbl,
1231	.probe		= pasemi_mac_probe,
1232	.remove		= __devexit_p(pasemi_mac_remove),
1233};
1234
1235static void __exit pasemi_mac_cleanup_module(void)
1236{
1237	pci_unregister_driver(&pasemi_mac_driver);
1238	__iounmap(dma_status);
1239	dma_status = NULL;
1240}
1241
1242int pasemi_mac_init_module(void)
1243{
1244	return pci_register_driver(&pasemi_mac_driver);
1245}
1246
1247module_init(pasemi_mac_init_module);
1248module_exit(pasemi_mac_cleanup_module);
1249