1/*
2 * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
17 *
18 *
19 * This driver is designed for the Broadcom SiByte SOC built-in
20 * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
21 */
22#include <linux/module.h>
23#include <linux/kernel.h>
24#include <linux/string.h>
25#include <linux/timer.h>
26#include <linux/errno.h>
27#include <linux/ioport.h>
28#include <linux/slab.h>
29#include <linux/interrupt.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/init.h>
34#include <linux/bitops.h>
35#include <asm/processor.h>		/* Processor type for cache alignment. */
36#include <asm/io.h>
37#include <asm/cache.h>
38
39/* This is only here until the firmware is ready.  In that case,
40   the firmware leaves the ethernet address in the register for us. */
41#ifdef CONFIG_SIBYTE_STANDALONE
42#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
43#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
44#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
45#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
46#endif
47
48
49/* These identify the driver base version and may not be removed. */
50
51
52/* Operational parameters that usually are not changed. */
53
54#define CONFIG_SBMAC_COALESCE
55
56#define MAX_UNITS 4		/* More are supported, limit only on options */
57
58/* Time in jiffies before concluding the transmitter is hung. */
59#define TX_TIMEOUT  (2*HZ)
60
61
62MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
63MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
64
65/* A few user-configurable values which may be modified when a driver
66   module is loaded. */
67
68/* 1 normal messages, 0 quiet .. 7 verbose. */
69static int debug = 1;
70module_param(debug, int, S_IRUGO);
71MODULE_PARM_DESC(debug, "Debug messages");
72
73/* mii status msgs */
74static int noisy_mii = 1;
75module_param(noisy_mii, int, S_IRUGO);
76MODULE_PARM_DESC(noisy_mii, "MII status messages");
77
78/* Used to pass the media type, etc.
79   Both 'options[]' and 'full_duplex[]' should exist for driver
80   interoperability.
81   The media type is usually passed in 'options[]'.
82*/
83#ifdef MODULE
84static int options[MAX_UNITS] = {-1, -1, -1, -1};
85module_param_array(options, int, NULL, S_IRUGO);
86MODULE_PARM_DESC(options, "1-" __MODULE_STRING(MAX_UNITS));
87
88static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1};
89module_param_array(full_duplex, int, NULL, S_IRUGO);
90MODULE_PARM_DESC(full_duplex, "1-" __MODULE_STRING(MAX_UNITS));
91#endif
92
93#ifdef CONFIG_SBMAC_COALESCE
94static int int_pktcnt_tx = 255;
95module_param(int_pktcnt_tx, int, S_IRUGO);
96MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
97
98static int int_timeout_tx = 255;
99module_param(int_timeout_tx, int, S_IRUGO);
100MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
101
102static int int_pktcnt_rx = 64;
103module_param(int_pktcnt_rx, int, S_IRUGO);
104MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
105
106static int int_timeout_rx = 64;
107module_param(int_timeout_rx, int, S_IRUGO);
108MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
109#endif
110
111#include <asm/sibyte/sb1250.h>
112#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
113#include <asm/sibyte/bcm1480_regs.h>
114#include <asm/sibyte/bcm1480_int.h>
115#define R_MAC_DMA_OODPKTLOST_RX	R_MAC_DMA_OODPKTLOST
116#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
117#include <asm/sibyte/sb1250_regs.h>
118#include <asm/sibyte/sb1250_int.h>
119#else
120#error invalid SiByte MAC configuation
121#endif
122#include <asm/sibyte/sb1250_scd.h>
123#include <asm/sibyte/sb1250_mac.h>
124#include <asm/sibyte/sb1250_dma.h>
125
126#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
127#define UNIT_INT(n)		(K_BCM1480_INT_MAC_0 + ((n) * 2))
128#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
129#define UNIT_INT(n)		(K_INT_MAC_0 + (n))
130#else
131#error invalid SiByte MAC configuation
132#endif
133
134/**********************************************************************
135 *  Simple types
136 ********************************************************************* */
137
138
139typedef enum { sbmac_speed_auto, sbmac_speed_10,
140	       sbmac_speed_100, sbmac_speed_1000 } sbmac_speed_t;
141
142typedef enum { sbmac_duplex_auto, sbmac_duplex_half,
143	       sbmac_duplex_full } sbmac_duplex_t;
144
145typedef enum { sbmac_fc_auto, sbmac_fc_disabled, sbmac_fc_frame,
146	       sbmac_fc_collision, sbmac_fc_carrier } sbmac_fc_t;
147
148typedef enum { sbmac_state_uninit, sbmac_state_off, sbmac_state_on,
149	       sbmac_state_broken } sbmac_state_t;
150
151
152/**********************************************************************
153 *  Macros
154 ********************************************************************* */
155
156
157#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
158			  (d)->sbdma_dscrtable : (d)->f+1)
159
160
161#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
162
163#define SBMAC_MAX_TXDESCR	256
164#define SBMAC_MAX_RXDESCR	256
165
166#define ETHER_ALIGN	2
167#define ETHER_ADDR_LEN	6
168#define ENET_PACKET_SIZE	1518
169/*#define ENET_PACKET_SIZE	9216 */
170
171/**********************************************************************
172 *  DMA Descriptor structure
173 ********************************************************************* */
174
175typedef struct sbdmadscr_s {
176	uint64_t  dscr_a;
177	uint64_t  dscr_b;
178} sbdmadscr_t;
179
180typedef unsigned long paddr_t;
181
182/**********************************************************************
183 *  DMA Controller structure
184 ********************************************************************* */
185
186typedef struct sbmacdma_s {
187
188	/*
189	 * This stuff is used to identify the channel and the registers
190	 * associated with it.
191	 */
192
193	struct sbmac_softc *sbdma_eth;	    /* back pointer to associated MAC */
194	int              sbdma_channel;	    /* channel number */
195	int		 sbdma_txdir;       /* direction (1=transmit) */
196	int		 sbdma_maxdescr;    /* total # of descriptors in ring */
197#ifdef CONFIG_SBMAC_COALESCE
198	int		 sbdma_int_pktcnt;  /* # descriptors rx/tx before interrupt*/
199	int		 sbdma_int_timeout; /* # usec rx/tx interrupt */
200#endif
201
202	volatile void __iomem *sbdma_config0;	/* DMA config register 0 */
203	volatile void __iomem *sbdma_config1;	/* DMA config register 1 */
204	volatile void __iomem *sbdma_dscrbase;	/* Descriptor base address */
205	volatile void __iomem *sbdma_dscrcnt;   /* Descriptor count register */
206	volatile void __iomem *sbdma_curdscr;	/* current descriptor address */
207	volatile void __iomem *sbdma_oodpktlost;/* pkt drop (rx only) */
208
209
210	/*
211	 * This stuff is for maintenance of the ring
212	 */
213
214	sbdmadscr_t     *sbdma_dscrtable_unaligned;
215	sbdmadscr_t     *sbdma_dscrtable;	/* base of descriptor table */
216	sbdmadscr_t     *sbdma_dscrtable_end; /* end of descriptor table */
217
218	struct sk_buff **sbdma_ctxtable;    /* context table, one per descr */
219
220	paddr_t          sbdma_dscrtable_phys; /* and also the phys addr */
221	sbdmadscr_t     *sbdma_addptr;	/* next dscr for sw to add */
222	sbdmadscr_t     *sbdma_remptr;	/* next dscr for sw to remove */
223} sbmacdma_t;
224
225
226/**********************************************************************
227 *  Ethernet softc structure
228 ********************************************************************* */
229
230struct sbmac_softc {
231
232	/*
233	 * Linux-specific things
234	 */
235
236	struct net_device *sbm_dev;		/* pointer to linux device */
237	spinlock_t sbm_lock;		/* spin lock */
238	struct timer_list sbm_timer;     	/* for monitoring MII */
239	struct net_device_stats sbm_stats;
240	int sbm_devflags;			/* current device flags */
241
242	int	     sbm_phy_oldbmsr;
243	int	     sbm_phy_oldanlpar;
244	int	     sbm_phy_oldk1stsr;
245	int	     sbm_phy_oldlinkstat;
246	int sbm_buffersize;
247
248	unsigned char sbm_phys[2];
249
250	/*
251	 * Controller-specific things
252	 */
253
254	void __iomem		*sbm_base;          /* MAC's base address */
255	sbmac_state_t    sbm_state;         /* current state */
256
257	volatile void __iomem	*sbm_macenable;	/* MAC Enable Register */
258	volatile void __iomem	*sbm_maccfg;	/* MAC Configuration Register */
259	volatile void __iomem	*sbm_fifocfg;	/* FIFO configuration register */
260	volatile void __iomem	*sbm_framecfg;	/* Frame configuration register */
261	volatile void __iomem	*sbm_rxfilter;	/* receive filter register */
262	volatile void __iomem	*sbm_isr;	/* Interrupt status register */
263	volatile void __iomem	*sbm_imr;	/* Interrupt mask register */
264	volatile void __iomem	*sbm_mdio;	/* MDIO register */
265
266	sbmac_speed_t    sbm_speed;		/* current speed */
267	sbmac_duplex_t   sbm_duplex;	/* current duplex */
268	sbmac_fc_t       sbm_fc;		/* current flow control setting */
269
270	unsigned char    sbm_hwaddr[ETHER_ADDR_LEN];
271
272	sbmacdma_t       sbm_txdma;		/* for now, only use channel 0 */
273	sbmacdma_t       sbm_rxdma;
274	int              rx_hw_checksum;
275	int 		 sbe_idx;
276};
277
278
279/**********************************************************************
280 *  Externs
281 ********************************************************************* */
282
283/**********************************************************************
284 *  Prototypes
285 ********************************************************************* */
286
287static void sbdma_initctx(sbmacdma_t *d,
288			  struct sbmac_softc *s,
289			  int chan,
290			  int txrx,
291			  int maxdescr);
292static void sbdma_channel_start(sbmacdma_t *d, int rxtx);
293static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *m);
294static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *m);
295static void sbdma_emptyring(sbmacdma_t *d);
296static void sbdma_fillring(sbmacdma_t *d);
297static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d, int work_to_do, int poll);
298static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll);
299static int sbmac_initctx(struct sbmac_softc *s);
300static void sbmac_channel_start(struct sbmac_softc *s);
301static void sbmac_channel_stop(struct sbmac_softc *s);
302static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *,sbmac_state_t);
303static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff);
304static uint64_t sbmac_addr2reg(unsigned char *ptr);
305static irqreturn_t sbmac_intr(int irq,void *dev_instance);
306static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
307static void sbmac_setmulti(struct sbmac_softc *sc);
308static int sbmac_init(struct net_device *dev, int idx);
309static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed);
310static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc);
311
312static int sbmac_open(struct net_device *dev);
313static void sbmac_timer(unsigned long data);
314static void sbmac_tx_timeout (struct net_device *dev);
315static struct net_device_stats *sbmac_get_stats(struct net_device *dev);
316static void sbmac_set_rx_mode(struct net_device *dev);
317static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
318static int sbmac_close(struct net_device *dev);
319static int sbmac_poll(struct net_device *poll_dev, int *budget);
320
321static int sbmac_mii_poll(struct sbmac_softc *s,int noisy);
322static int sbmac_mii_probe(struct net_device *dev);
323
324static void sbmac_mii_sync(struct sbmac_softc *s);
325static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt);
326static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx);
327static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
328			    unsigned int regval);
329
330
331/**********************************************************************
332 *  Globals
333 ********************************************************************* */
334
335static uint64_t sbmac_orig_hwaddr[MAX_UNITS];
336
337
338/**********************************************************************
339 *  MDIO constants
340 ********************************************************************* */
341
342#define	MII_COMMAND_START	0x01
343#define	MII_COMMAND_READ	0x02
344#define	MII_COMMAND_WRITE	0x01
345#define	MII_COMMAND_ACK		0x02
346
347#define BMCR_RESET     0x8000
348#define BMCR_LOOPBACK  0x4000
349#define BMCR_SPEED0    0x2000
350#define BMCR_ANENABLE  0x1000
351#define BMCR_POWERDOWN 0x0800
352#define BMCR_ISOLATE   0x0400
353#define BMCR_RESTARTAN 0x0200
354#define BMCR_DUPLEX    0x0100
355#define BMCR_COLTEST   0x0080
356#define BMCR_SPEED1    0x0040
357#define BMCR_SPEED1000	BMCR_SPEED1
358#define BMCR_SPEED100	BMCR_SPEED0
359#define BMCR_SPEED10 	0
360
361#define BMSR_100BT4	0x8000
362#define BMSR_100BT_FDX	0x4000
363#define BMSR_100BT_HDX  0x2000
364#define BMSR_10BT_FDX   0x1000
365#define BMSR_10BT_HDX   0x0800
366#define BMSR_100BT2_FDX 0x0400
367#define BMSR_100BT2_HDX 0x0200
368#define BMSR_1000BT_XSR	0x0100
369#define BMSR_PRESUP	0x0040
370#define BMSR_ANCOMPLT	0x0020
371#define BMSR_REMFAULT	0x0010
372#define BMSR_AUTONEG	0x0008
373#define BMSR_LINKSTAT	0x0004
374#define BMSR_JABDETECT	0x0002
375#define BMSR_EXTCAPAB	0x0001
376
377#define PHYIDR1 	0x2000
378#define PHYIDR2		0x5C60
379
380#define ANAR_NP		0x8000
381#define ANAR_RF		0x2000
382#define ANAR_ASYPAUSE	0x0800
383#define ANAR_PAUSE	0x0400
384#define ANAR_T4		0x0200
385#define ANAR_TXFD	0x0100
386#define ANAR_TXHD	0x0080
387#define ANAR_10FD	0x0040
388#define ANAR_10HD	0x0020
389#define ANAR_PSB	0x0001
390
391#define ANLPAR_NP	0x8000
392#define ANLPAR_ACK	0x4000
393#define ANLPAR_RF	0x2000
394#define ANLPAR_ASYPAUSE	0x0800
395#define ANLPAR_PAUSE	0x0400
396#define ANLPAR_T4	0x0200
397#define ANLPAR_TXFD	0x0100
398#define ANLPAR_TXHD	0x0080
399#define ANLPAR_10FD	0x0040
400#define ANLPAR_10HD	0x0020
401#define ANLPAR_PSB	0x0001	/* 802.3 */
402
403#define ANER_PDF	0x0010
404#define ANER_LPNPABLE	0x0008
405#define ANER_NPABLE	0x0004
406#define ANER_PAGERX	0x0002
407#define ANER_LPANABLE	0x0001
408
409#define ANNPTR_NP	0x8000
410#define ANNPTR_MP	0x2000
411#define ANNPTR_ACK2	0x1000
412#define ANNPTR_TOGTX	0x0800
413#define ANNPTR_CODE	0x0008
414
415#define ANNPRR_NP	0x8000
416#define ANNPRR_MP	0x2000
417#define ANNPRR_ACK3	0x1000
418#define ANNPRR_TOGTX	0x0800
419#define ANNPRR_CODE	0x0008
420
421#define K1TCR_TESTMODE	0x0000
422#define K1TCR_MSMCE	0x1000
423#define K1TCR_MSCV	0x0800
424#define K1TCR_RPTR	0x0400
425#define K1TCR_1000BT_FDX 0x200
426#define K1TCR_1000BT_HDX 0x100
427
428#define K1STSR_MSMCFLT	0x8000
429#define K1STSR_MSCFGRES	0x4000
430#define K1STSR_LRSTAT	0x2000
431#define K1STSR_RRSTAT	0x1000
432#define K1STSR_LP1KFD	0x0800
433#define K1STSR_LP1KHD   0x0400
434#define K1STSR_LPASMDIR	0x0200
435
436#define K1SCR_1KX_FDX	0x8000
437#define K1SCR_1KX_HDX	0x4000
438#define K1SCR_1KT_FDX	0x2000
439#define K1SCR_1KT_HDX	0x1000
440
441#define STRAP_PHY1	0x0800
442#define STRAP_NCMODE	0x0400
443#define STRAP_MANMSCFG	0x0200
444#define STRAP_ANENABLE	0x0100
445#define STRAP_MSVAL	0x0080
446#define STRAP_1KHDXADV	0x0010
447#define STRAP_1KFDXADV	0x0008
448#define STRAP_100ADV	0x0004
449#define STRAP_SPEEDSEL	0x0000
450#define STRAP_SPEED100	0x0001
451
452#define PHYSUP_SPEED1000 0x10
453#define PHYSUP_SPEED100  0x08
454#define PHYSUP_SPEED10   0x00
455#define PHYSUP_LINKUP	 0x04
456#define PHYSUP_FDX       0x02
457
458#define	MII_BMCR	0x00 	/* Basic mode control register (rw) */
459#define	MII_BMSR	0x01	/* Basic mode status register (ro) */
460#define	MII_PHYIDR1	0x02
461#define	MII_PHYIDR2	0x03
462
463#define MII_K1STSR	0x0A	/* 1K Status Register (ro) */
464#define	MII_ANLPAR	0x05	/* Autonegotiation lnk partner abilities (rw) */
465
466
467#define M_MAC_MDIO_DIR_OUTPUT	0		/* for clarity */
468
469#define ENABLE 		1
470#define DISABLE		0
471
472/**********************************************************************
473 *  SBMAC_MII_SYNC(s)
474 *
475 *  Synchronize with the MII - send a pattern of bits to the MII
476 *  that will guarantee that it is ready to accept a command.
477 *
478 *  Input parameters:
479 *  	   s - sbmac structure
480 *
481 *  Return value:
482 *  	   nothing
483 ********************************************************************* */
484
485static void sbmac_mii_sync(struct sbmac_softc *s)
486{
487	int cnt;
488	uint64_t bits;
489	int mac_mdio_genc;
490
491	mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
492
493	bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
494
495	__raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
496
497	for (cnt = 0; cnt < 32; cnt++) {
498		__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
499		__raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
500	}
501}
502
503/**********************************************************************
504 *  SBMAC_MII_SENDDATA(s,data,bitcnt)
505 *
506 *  Send some bits to the MII.  The bits to be sent are right-
507 *  justified in the 'data' parameter.
508 *
509 *  Input parameters:
510 *  	   s - sbmac structure
511 *  	   data - data to send
512 *  	   bitcnt - number of bits to send
513 ********************************************************************* */
514
515static void sbmac_mii_senddata(struct sbmac_softc *s,unsigned int data, int bitcnt)
516{
517	int i;
518	uint64_t bits;
519	unsigned int curmask;
520	int mac_mdio_genc;
521
522	mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
523
524	bits = M_MAC_MDIO_DIR_OUTPUT;
525	__raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
526
527	curmask = 1 << (bitcnt - 1);
528
529	for (i = 0; i < bitcnt; i++) {
530		if (data & curmask)
531			bits |= M_MAC_MDIO_OUT;
532		else bits &= ~M_MAC_MDIO_OUT;
533		__raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
534		__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
535		__raw_writeq(bits | mac_mdio_genc, s->sbm_mdio);
536		curmask >>= 1;
537	}
538}
539
540
541
542/**********************************************************************
543 *  SBMAC_MII_READ(s,phyaddr,regidx)
544 *
545 *  Read a PHY register.
546 *
547 *  Input parameters:
548 *  	   s - sbmac structure
549 *  	   phyaddr - PHY's address
550 *  	   regidx = index of register to read
551 *
552 *  Return value:
553 *  	   value read, or 0 if an error occurred.
554 ********************************************************************* */
555
556static unsigned int sbmac_mii_read(struct sbmac_softc *s,int phyaddr,int regidx)
557{
558	int idx;
559	int error;
560	int regval;
561	int mac_mdio_genc;
562
563	/*
564	 * Synchronize ourselves so that the PHY knows the next
565	 * thing coming down is a command
566	 */
567
568	sbmac_mii_sync(s);
569
570	/*
571	 * Send the data to the PHY.  The sequence is
572	 * a "start" command (2 bits)
573	 * a "read" command (2 bits)
574	 * the PHY addr (5 bits)
575	 * the register index (5 bits)
576	 */
577
578	sbmac_mii_senddata(s,MII_COMMAND_START, 2);
579	sbmac_mii_senddata(s,MII_COMMAND_READ, 2);
580	sbmac_mii_senddata(s,phyaddr, 5);
581	sbmac_mii_senddata(s,regidx, 5);
582
583	mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
584
585	/*
586	 * Switch the port around without a clock transition.
587	 */
588	__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
589
590	/*
591	 * Send out a clock pulse to signal we want the status
592	 */
593
594	__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
595	__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
596
597	/*
598	 * If an error occurred, the PHY will signal '1' back
599	 */
600	error = __raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN;
601
602	/*
603	 * Issue an 'idle' clock pulse, but keep the direction
604	 * the same.
605	 */
606	__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
607	__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
608
609	regval = 0;
610
611	for (idx = 0; idx < 16; idx++) {
612		regval <<= 1;
613
614		if (error == 0) {
615			if (__raw_readq(s->sbm_mdio) & M_MAC_MDIO_IN)
616				regval |= 1;
617		}
618
619		__raw_writeq(M_MAC_MDIO_DIR_INPUT|M_MAC_MDC | mac_mdio_genc, s->sbm_mdio);
620		__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, s->sbm_mdio);
621	}
622
623	/* Switch back to output */
624	__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
625
626	if (error == 0)
627		return regval;
628	return 0;
629}
630
631
632/**********************************************************************
633 *  SBMAC_MII_WRITE(s,phyaddr,regidx,regval)
634 *
635 *  Write a value to a PHY register.
636 *
637 *  Input parameters:
638 *  	   s - sbmac structure
639 *  	   phyaddr - PHY to use
640 *  	   regidx - register within the PHY
641 *  	   regval - data to write to register
642 *
643 *  Return value:
644 *  	   nothing
645 ********************************************************************* */
646
647static void sbmac_mii_write(struct sbmac_softc *s,int phyaddr,int regidx,
648			    unsigned int regval)
649{
650	int mac_mdio_genc;
651
652	sbmac_mii_sync(s);
653
654	sbmac_mii_senddata(s,MII_COMMAND_START,2);
655	sbmac_mii_senddata(s,MII_COMMAND_WRITE,2);
656	sbmac_mii_senddata(s,phyaddr, 5);
657	sbmac_mii_senddata(s,regidx, 5);
658	sbmac_mii_senddata(s,MII_COMMAND_ACK,2);
659	sbmac_mii_senddata(s,regval,16);
660
661	mac_mdio_genc = __raw_readq(s->sbm_mdio) & M_MAC_GENC;
662
663	__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, s->sbm_mdio);
664}
665
666
667
668/**********************************************************************
669 *  SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
670 *
671 *  Initialize a DMA channel context.  Since there are potentially
672 *  eight DMA channels per MAC, it's nice to do this in a standard
673 *  way.
674 *
675 *  Input parameters:
676 *  	   d - sbmacdma_t structure (DMA channel context)
677 *  	   s - sbmac_softc structure (pointer to a MAC)
678 *  	   chan - channel number (0..1 right now)
679 *  	   txrx - Identifies DMA_TX or DMA_RX for channel direction
680 *      maxdescr - number of descriptors
681 *
682 *  Return value:
683 *  	   nothing
684 ********************************************************************* */
685
686static void sbdma_initctx(sbmacdma_t *d,
687			  struct sbmac_softc *s,
688			  int chan,
689			  int txrx,
690			  int maxdescr)
691{
692#ifdef CONFIG_SBMAC_COALESCE
693	int int_pktcnt, int_timeout;
694#endif
695
696	/*
697	 * Save away interesting stuff in the structure
698	 */
699
700	d->sbdma_eth       = s;
701	d->sbdma_channel   = chan;
702	d->sbdma_txdir     = txrx;
703
704
705	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BYTES)));
706	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_COLLISIONS)));
707	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_LATE_COL)));
708	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_EX_COL)));
709	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_FCS_ERROR)));
710	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_ABORT)));
711	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_BAD)));
712	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_GOOD)));
713	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_RUNT)));
714	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_TX_OVERSIZE)));
715	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BYTES)));
716	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_MCAST)));
717	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BCAST)));
718	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_BAD)));
719	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_GOOD)));
720	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_RUNT)));
721	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_OVERSIZE)));
722	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_FCS_ERROR)));
723	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_LENGTH_ERROR)));
724	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_CODE_ERROR)));
725	__raw_writeq(0, IOADDR(A_MAC_REGISTER(s->sbe_idx, R_MAC_RMON_RX_ALIGN_ERROR)));
726
727	/*
728	 * initialize register pointers
729	 */
730
731	d->sbdma_config0 =
732		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
733	d->sbdma_config1 =
734		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
735	d->sbdma_dscrbase =
736		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
737	d->sbdma_dscrcnt =
738		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
739	d->sbdma_curdscr =
740		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
741	if (d->sbdma_txdir)
742		d->sbdma_oodpktlost = NULL;
743	else
744		d->sbdma_oodpktlost =
745			s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX);
746
747	/*
748	 * Allocate memory for the ring
749	 */
750
751	d->sbdma_maxdescr = maxdescr;
752
753	d->sbdma_dscrtable_unaligned =
754	d->sbdma_dscrtable = (sbdmadscr_t *)
755		kmalloc((d->sbdma_maxdescr+1)*sizeof(sbdmadscr_t), GFP_KERNEL);
756
757	/*
758	 * The descriptor table must be aligned to at least 16 bytes or the
759	 * MAC will corrupt it.
760	 */
761	d->sbdma_dscrtable = (sbdmadscr_t *)
762		ALIGN((unsigned long)d->sbdma_dscrtable, sizeof(sbdmadscr_t));
763
764	memset(d->sbdma_dscrtable,0,d->sbdma_maxdescr*sizeof(sbdmadscr_t));
765
766	d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
767
768	d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
769
770	/*
771	 * And context table
772	 */
773
774	d->sbdma_ctxtable = (struct sk_buff **)
775		kmalloc(d->sbdma_maxdescr*sizeof(struct sk_buff *), GFP_KERNEL);
776
777	memset(d->sbdma_ctxtable,0,d->sbdma_maxdescr*sizeof(struct sk_buff *));
778
779#ifdef CONFIG_SBMAC_COALESCE
780	/*
781	 * Setup Rx/Tx DMA coalescing defaults
782	 */
783
784	int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx;
785	if ( int_pktcnt ) {
786		d->sbdma_int_pktcnt = int_pktcnt;
787	} else {
788		d->sbdma_int_pktcnt = 1;
789	}
790
791	int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx;
792	if ( int_timeout ) {
793		d->sbdma_int_timeout = int_timeout;
794	} else {
795		d->sbdma_int_timeout = 0;
796	}
797#endif
798
799}
800
801/**********************************************************************
802 *  SBDMA_CHANNEL_START(d)
803 *
804 *  Initialize the hardware registers for a DMA channel.
805 *
806 *  Input parameters:
807 *  	   d - DMA channel to init (context must be previously init'd
808 *         rxtx - DMA_RX or DMA_TX depending on what type of channel
809 *
810 *  Return value:
811 *  	   nothing
812 ********************************************************************* */
813
814static void sbdma_channel_start(sbmacdma_t *d, int rxtx )
815{
816	/*
817	 * Turn on the DMA channel
818	 */
819
820#ifdef CONFIG_SBMAC_COALESCE
821	__raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
822		       0, d->sbdma_config1);
823	__raw_writeq(M_DMA_EOP_INT_EN |
824		       V_DMA_RINGSZ(d->sbdma_maxdescr) |
825		       V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
826		       0, d->sbdma_config0);
827#else
828	__raw_writeq(0, d->sbdma_config1);
829	__raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
830		       0, d->sbdma_config0);
831#endif
832
833	__raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
834
835	/*
836	 * Initialize ring pointers
837	 */
838
839	d->sbdma_addptr = d->sbdma_dscrtable;
840	d->sbdma_remptr = d->sbdma_dscrtable;
841}
842
843/**********************************************************************
844 *  SBDMA_CHANNEL_STOP(d)
845 *
846 *  Initialize the hardware registers for a DMA channel.
847 *
848 *  Input parameters:
849 *  	   d - DMA channel to init (context must be previously init'd
850 *
851 *  Return value:
852 *  	   nothing
853 ********************************************************************* */
854
855static void sbdma_channel_stop(sbmacdma_t *d)
856{
857	/*
858	 * Turn off the DMA channel
859	 */
860
861	__raw_writeq(0, d->sbdma_config1);
862
863	__raw_writeq(0, d->sbdma_dscrbase);
864
865	__raw_writeq(0, d->sbdma_config0);
866
867	/*
868	 * Zero ring pointers
869	 */
870
871	d->sbdma_addptr = NULL;
872	d->sbdma_remptr = NULL;
873}
874
875static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
876{
877	unsigned long addr;
878	unsigned long newaddr;
879
880	addr = (unsigned long) skb->data;
881
882	newaddr = (addr + power2 - 1) & ~(power2 - 1);
883
884	skb_reserve(skb,newaddr-addr+offset);
885}
886
887
888/**********************************************************************
889 *  SBDMA_ADD_RCVBUFFER(d,sb)
890 *
891 *  Add a buffer to the specified DMA channel.   For receive channels,
892 *  this queues a buffer for inbound packets.
893 *
894 *  Input parameters:
895 *  	   d - DMA channel descriptor
896 * 	   sb - sk_buff to add, or NULL if we should allocate one
897 *
898 *  Return value:
899 *  	   0 if buffer could not be added (ring is full)
900 *  	   1 if buffer added successfully
901 ********************************************************************* */
902
903
904static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
905{
906	sbdmadscr_t *dsc;
907	sbdmadscr_t *nextdsc;
908	struct sk_buff *sb_new = NULL;
909	int pktsize = ENET_PACKET_SIZE;
910
911	/* get pointer to our current place in the ring */
912
913	dsc = d->sbdma_addptr;
914	nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
915
916	/*
917	 * figure out if the ring is full - if the next descriptor
918	 * is the same as the one that we're going to remove from
919	 * the ring, the ring is full
920	 */
921
922	if (nextdsc == d->sbdma_remptr) {
923		return -ENOSPC;
924	}
925
926	/*
927	 * Allocate a sk_buff if we don't already have one.
928	 * If we do have an sk_buff, reset it so that it's empty.
929	 *
930	 * Note: sk_buffs don't seem to be guaranteed to have any sort
931	 * of alignment when they are allocated.  Therefore, allocate enough
932	 * extra space to make sure that:
933	 *
934	 *    1. the data does not start in the middle of a cache line.
935	 *    2. The data does not end in the middle of a cache line
936	 *    3. The buffer can be aligned such that the IP addresses are
937	 *       naturally aligned.
938	 *
939	 *  Remember, the SOCs MAC writes whole cache lines at a time,
940	 *  without reading the old contents first.  So, if the sk_buff's
941	 *  data portion starts in the middle of a cache line, the SOC
942	 *  DMA will trash the beginning (and ending) portions.
943	 */
944
945	if (sb == NULL) {
946		sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN);
947		if (sb_new == NULL) {
948			printk(KERN_INFO "%s: sk_buff allocation failed\n",
949			       d->sbdma_eth->sbm_dev->name);
950			return -ENOBUFS;
951		}
952
953		sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN);
954	}
955	else {
956		sb_new = sb;
957		/*
958		 * nothing special to reinit buffer, it's already aligned
959		 * and sb->data already points to a good place.
960		 */
961	}
962
963	/*
964	 * fill in the descriptor
965	 */
966
967#ifdef CONFIG_SBMAC_COALESCE
968	/*
969	 * Do not interrupt per DMA transfer.
970	 */
971	dsc->dscr_a = virt_to_phys(sb_new->data) |
972		V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0;
973#else
974	dsc->dscr_a = virt_to_phys(sb_new->data) |
975		V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
976		M_DMA_DSCRA_INTERRUPT;
977#endif
978
979	/* receiving: no options */
980	dsc->dscr_b = 0;
981
982	/*
983	 * fill in the context
984	 */
985
986	d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
987
988	/*
989	 * point at next packet
990	 */
991
992	d->sbdma_addptr = nextdsc;
993
994	/*
995	 * Give the buffer to the DMA engine.
996	 */
997
998	__raw_writeq(1, d->sbdma_dscrcnt);
999
1000	return 0;					/* we did it */
1001}
1002
1003/**********************************************************************
1004 *  SBDMA_ADD_TXBUFFER(d,sb)
1005 *
1006 *  Add a transmit buffer to the specified DMA channel, causing a
1007 *  transmit to start.
1008 *
1009 *  Input parameters:
1010 *  	   d - DMA channel descriptor
1011 * 	   sb - sk_buff to add
1012 *
1013 *  Return value:
1014 *  	   0 transmit queued successfully
1015 *  	   otherwise error code
1016 ********************************************************************* */
1017
1018
1019static int sbdma_add_txbuffer(sbmacdma_t *d,struct sk_buff *sb)
1020{
1021	sbdmadscr_t *dsc;
1022	sbdmadscr_t *nextdsc;
1023	uint64_t phys;
1024	uint64_t ncb;
1025	int length;
1026
1027	/* get pointer to our current place in the ring */
1028
1029	dsc = d->sbdma_addptr;
1030	nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
1031
1032	/*
1033	 * figure out if the ring is full - if the next descriptor
1034	 * is the same as the one that we're going to remove from
1035	 * the ring, the ring is full
1036	 */
1037
1038	if (nextdsc == d->sbdma_remptr) {
1039		return -ENOSPC;
1040	}
1041
1042	/*
1043	 * Under Linux, it's not necessary to copy/coalesce buffers
1044	 * like it is on NetBSD.  We think they're all contiguous,
1045	 * but that may not be true for GBE.
1046	 */
1047
1048	length = sb->len;
1049
1050	/*
1051	 * fill in the descriptor.  Note that the number of cache
1052	 * blocks in the descriptor is the number of blocks
1053	 * *spanned*, so we need to add in the offset (if any)
1054	 * while doing the calculation.
1055	 */
1056
1057	phys = virt_to_phys(sb->data);
1058	ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
1059
1060	dsc->dscr_a = phys |
1061		V_DMA_DSCRA_A_SIZE(ncb) |
1062#ifndef CONFIG_SBMAC_COALESCE
1063		M_DMA_DSCRA_INTERRUPT |
1064#endif
1065		M_DMA_ETHTX_SOP;
1066
1067	/* transmitting: set outbound options and length */
1068
1069	dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
1070		V_DMA_DSCRB_PKT_SIZE(length);
1071
1072	/*
1073	 * fill in the context
1074	 */
1075
1076	d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
1077
1078	/*
1079	 * point at next packet
1080	 */
1081
1082	d->sbdma_addptr = nextdsc;
1083
1084	/*
1085	 * Give the buffer to the DMA engine.
1086	 */
1087
1088	__raw_writeq(1, d->sbdma_dscrcnt);
1089
1090	return 0;					/* we did it */
1091}
1092
1093
1094
1095
1096/**********************************************************************
1097 *  SBDMA_EMPTYRING(d)
1098 *
1099 *  Free all allocated sk_buffs on the specified DMA channel;
1100 *
1101 *  Input parameters:
1102 *  	   d  - DMA channel
1103 *
1104 *  Return value:
1105 *  	   nothing
1106 ********************************************************************* */
1107
1108static void sbdma_emptyring(sbmacdma_t *d)
1109{
1110	int idx;
1111	struct sk_buff *sb;
1112
1113	for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
1114		sb = d->sbdma_ctxtable[idx];
1115		if (sb) {
1116			dev_kfree_skb(sb);
1117			d->sbdma_ctxtable[idx] = NULL;
1118		}
1119	}
1120}
1121
1122
1123/**********************************************************************
1124 *  SBDMA_FILLRING(d)
1125 *
1126 *  Fill the specified DMA channel (must be receive channel)
1127 *  with sk_buffs
1128 *
1129 *  Input parameters:
1130 *  	   d - DMA channel
1131 *
1132 *  Return value:
1133 *  	   nothing
1134 ********************************************************************* */
1135
1136static void sbdma_fillring(sbmacdma_t *d)
1137{
1138	int idx;
1139
1140	for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) {
1141		if (sbdma_add_rcvbuffer(d,NULL) != 0)
1142			break;
1143	}
1144}
1145
1146#ifdef CONFIG_NET_POLL_CONTROLLER
1147static void sbmac_netpoll(struct net_device *netdev)
1148{
1149	struct sbmac_softc *sc = netdev_priv(netdev);
1150	int irq = sc->sbm_dev->irq;
1151
1152	__raw_writeq(0, sc->sbm_imr);
1153
1154	sbmac_intr(irq, netdev);
1155
1156#ifdef CONFIG_SBMAC_COALESCE
1157	__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
1158	((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
1159	sc->sbm_imr);
1160#else
1161	__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1162	(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
1163#endif
1164}
1165#endif
1166
1167/**********************************************************************
1168 *  SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
1169 *
1170 *  Process "completed" receive buffers on the specified DMA channel.
1171 *
1172 *  Input parameters:
1173 *            sc - softc structure
1174 *  	       d - DMA channel context
1175 *    work_to_do - no. of packets to process before enabling interrupt
1176 *                 again (for NAPI)
1177 *          poll - 1: using polling (for NAPI)
1178 *
1179 *  Return value:
1180 *  	   nothing
1181 ********************************************************************* */
1182
1183static int sbdma_rx_process(struct sbmac_softc *sc,sbmacdma_t *d,
1184                             int work_to_do, int poll)
1185{
1186	int curidx;
1187	int hwidx;
1188	sbdmadscr_t *dsc;
1189	struct sk_buff *sb;
1190	int len;
1191	int work_done = 0;
1192	int dropped = 0;
1193
1194	prefetch(d);
1195
1196again:
1197	/* Check if the HW dropped any frames */
1198	sc->sbm_stats.rx_fifo_errors
1199	    += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
1200	__raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
1201
1202	while (work_to_do-- > 0) {
1203		/*
1204		 * figure out where we are (as an index) and where
1205		 * the hardware is (also as an index)
1206		 *
1207		 * This could be done faster if (for example) the
1208		 * descriptor table was page-aligned and contiguous in
1209		 * both virtual and physical memory -- you could then
1210		 * just compare the low-order bits of the virtual address
1211		 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1212		 */
1213
1214		dsc = d->sbdma_remptr;
1215		curidx = dsc - d->sbdma_dscrtable;
1216
1217		prefetch(dsc);
1218		prefetch(&d->sbdma_ctxtable[curidx]);
1219
1220		hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1221				d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1222
1223		/*
1224		 * If they're the same, that means we've processed all
1225		 * of the descriptors up to (but not including) the one that
1226		 * the hardware is working on right now.
1227		 */
1228
1229		if (curidx == hwidx)
1230			goto done;
1231
1232		/*
1233		 * Otherwise, get the packet's sk_buff ptr back
1234		 */
1235
1236		sb = d->sbdma_ctxtable[curidx];
1237		d->sbdma_ctxtable[curidx] = NULL;
1238
1239		len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
1240
1241		/*
1242		 * Check packet status.  If good, process it.
1243		 * If not, silently drop it and put it back on the
1244		 * receive ring.
1245		 */
1246
1247		if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
1248
1249			/*
1250			 * Add a new buffer to replace the old one.  If we fail
1251			 * to allocate a buffer, we're going to drop this
1252			 * packet and put it right back on the receive ring.
1253			 */
1254
1255			if (unlikely (sbdma_add_rcvbuffer(d,NULL) ==
1256				      -ENOBUFS)) {
1257 				sc->sbm_stats.rx_dropped++;
1258				sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */
1259				/* No point in continuing at the moment */
1260				printk(KERN_ERR "dropped packet (1)\n");
1261				d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1262				goto done;
1263			} else {
1264				/*
1265				 * Set length into the packet
1266				 */
1267				skb_put(sb,len);
1268
1269				/*
1270				 * Buffer has been replaced on the
1271				 * receive ring.  Pass the buffer to
1272				 * the kernel
1273				 */
1274				sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
1275				/* Check hw IPv4/TCP checksum if supported */
1276				if (sc->rx_hw_checksum == ENABLE) {
1277					if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
1278					    !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
1279						sb->ip_summed = CHECKSUM_UNNECESSARY;
1280						/* don't need to set sb->csum */
1281					} else {
1282						sb->ip_summed = CHECKSUM_NONE;
1283					}
1284				}
1285				prefetch(sb->data);
1286				prefetch((const void *)(((char *)sb->data)+32));
1287				if (poll)
1288					dropped = netif_receive_skb(sb);
1289				else
1290					dropped = netif_rx(sb);
1291
1292				if (dropped == NET_RX_DROP) {
1293					sc->sbm_stats.rx_dropped++;
1294					d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1295					goto done;
1296				}
1297				else {
1298					sc->sbm_stats.rx_bytes += len;
1299					sc->sbm_stats.rx_packets++;
1300				}
1301			}
1302		} else {
1303			/*
1304			 * Packet was mangled somehow.  Just drop it and
1305			 * put it back on the receive ring.
1306			 */
1307			sc->sbm_stats.rx_errors++;
1308			sbdma_add_rcvbuffer(d,sb);
1309		}
1310
1311
1312		/*
1313		 * .. and advance to the next buffer.
1314		 */
1315
1316		d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1317		work_done++;
1318	}
1319	if (!poll) {
1320		work_to_do = 32;
1321		goto again; /* collect fifo drop statistics again */
1322	}
1323done:
1324	return work_done;
1325}
1326
1327/**********************************************************************
1328 *  SBDMA_TX_PROCESS(sc,d)
1329 *
1330 *  Process "completed" transmit buffers on the specified DMA channel.
1331 *  This is normally called within the interrupt service routine.
1332 *  Note that this isn't really ideal for priority channels, since
1333 *  it processes all of the packets on a given channel before
1334 *  returning.
1335 *
1336 *  Input parameters:
1337 *      sc - softc structure
1338 *  	 d - DMA channel context
1339 *    poll - 1: using polling (for NAPI)
1340 *
1341 *  Return value:
1342 *  	   nothing
1343 ********************************************************************* */
1344
1345static void sbdma_tx_process(struct sbmac_softc *sc,sbmacdma_t *d, int poll)
1346{
1347	int curidx;
1348	int hwidx;
1349	sbdmadscr_t *dsc;
1350	struct sk_buff *sb;
1351	unsigned long flags;
1352	int packets_handled = 0;
1353
1354	spin_lock_irqsave(&(sc->sbm_lock), flags);
1355
1356	if (d->sbdma_remptr == d->sbdma_addptr)
1357	  goto end_unlock;
1358
1359	hwidx = (int) (((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
1360			d->sbdma_dscrtable_phys) / sizeof(sbdmadscr_t));
1361
1362	for (;;) {
1363		/*
1364		 * figure out where we are (as an index) and where
1365		 * the hardware is (also as an index)
1366		 *
1367		 * This could be done faster if (for example) the
1368		 * descriptor table was page-aligned and contiguous in
1369		 * both virtual and physical memory -- you could then
1370		 * just compare the low-order bits of the virtual address
1371		 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
1372		 */
1373
1374		curidx = d->sbdma_remptr - d->sbdma_dscrtable;
1375
1376		/*
1377		 * If they're the same, that means we've processed all
1378		 * of the descriptors up to (but not including) the one that
1379		 * the hardware is working on right now.
1380		 */
1381
1382		if (curidx == hwidx)
1383			break;
1384
1385		/*
1386		 * Otherwise, get the packet's sk_buff ptr back
1387		 */
1388
1389		dsc = &(d->sbdma_dscrtable[curidx]);
1390		sb = d->sbdma_ctxtable[curidx];
1391		d->sbdma_ctxtable[curidx] = NULL;
1392
1393		/*
1394		 * Stats
1395		 */
1396
1397		sc->sbm_stats.tx_bytes += sb->len;
1398		sc->sbm_stats.tx_packets++;
1399
1400		/*
1401		 * for transmits, we just free buffers.
1402		 */
1403
1404		dev_kfree_skb_irq(sb);
1405
1406		/*
1407		 * .. and advance to the next buffer.
1408		 */
1409
1410		d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
1411
1412		packets_handled++;
1413
1414	}
1415
1416	/*
1417	 * Decide if we should wake up the protocol or not.
1418	 * Other drivers seem to do this when we reach a low
1419	 * watermark on the transmit queue.
1420	 */
1421
1422	if (packets_handled)
1423		netif_wake_queue(d->sbdma_eth->sbm_dev);
1424
1425end_unlock:
1426	spin_unlock_irqrestore(&(sc->sbm_lock), flags);
1427
1428}
1429
1430
1431
1432/**********************************************************************
1433 *  SBMAC_INITCTX(s)
1434 *
1435 *  Initialize an Ethernet context structure - this is called
1436 *  once per MAC on the 1250.  Memory is allocated here, so don't
1437 *  call it again from inside the ioctl routines that bring the
1438 *  interface up/down
1439 *
1440 *  Input parameters:
1441 *  	   s - sbmac context structure
1442 *
1443 *  Return value:
1444 *  	   0
1445 ********************************************************************* */
1446
1447static int sbmac_initctx(struct sbmac_softc *s)
1448{
1449
1450	/*
1451	 * figure out the addresses of some ports
1452	 */
1453
1454	s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
1455	s->sbm_maccfg    = s->sbm_base + R_MAC_CFG;
1456	s->sbm_fifocfg   = s->sbm_base + R_MAC_THRSH_CFG;
1457	s->sbm_framecfg  = s->sbm_base + R_MAC_FRAMECFG;
1458	s->sbm_rxfilter  = s->sbm_base + R_MAC_ADFILTER_CFG;
1459	s->sbm_isr       = s->sbm_base + R_MAC_STATUS;
1460	s->sbm_imr       = s->sbm_base + R_MAC_INT_MASK;
1461	s->sbm_mdio      = s->sbm_base + R_MAC_MDIO;
1462
1463	s->sbm_phys[0]   = 1;
1464	s->sbm_phys[1]   = 0;
1465
1466	s->sbm_phy_oldbmsr = 0;
1467	s->sbm_phy_oldanlpar = 0;
1468	s->sbm_phy_oldk1stsr = 0;
1469	s->sbm_phy_oldlinkstat = 0;
1470
1471	/*
1472	 * Initialize the DMA channels.  Right now, only one per MAC is used
1473	 * Note: Only do this _once_, as it allocates memory from the kernel!
1474	 */
1475
1476	sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
1477	sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
1478
1479	/*
1480	 * initial state is OFF
1481	 */
1482
1483	s->sbm_state = sbmac_state_off;
1484
1485
1486	s->sbm_speed = sbmac_speed_10;
1487	s->sbm_duplex = sbmac_duplex_half;
1488	s->sbm_fc = sbmac_fc_disabled;
1489
1490	return 0;
1491}
1492
1493
1494static void sbdma_uninitctx(struct sbmacdma_s *d)
1495{
1496	if (d->sbdma_dscrtable_unaligned) {
1497		kfree(d->sbdma_dscrtable_unaligned);
1498		d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
1499	}
1500
1501	if (d->sbdma_ctxtable) {
1502		kfree(d->sbdma_ctxtable);
1503		d->sbdma_ctxtable = NULL;
1504	}
1505}
1506
1507
1508static void sbmac_uninitctx(struct sbmac_softc *sc)
1509{
1510	sbdma_uninitctx(&(sc->sbm_txdma));
1511	sbdma_uninitctx(&(sc->sbm_rxdma));
1512}
1513
1514
1515/**********************************************************************
1516 *  SBMAC_CHANNEL_START(s)
1517 *
1518 *  Start packet processing on this MAC.
1519 *
1520 *  Input parameters:
1521 *  	   s - sbmac structure
1522 *
1523 *  Return value:
1524 *  	   nothing
1525 ********************************************************************* */
1526
1527static void sbmac_channel_start(struct sbmac_softc *s)
1528{
1529	uint64_t reg;
1530	volatile void __iomem *port;
1531	uint64_t cfg,fifo,framecfg;
1532	int idx, th_value;
1533
1534	/*
1535	 * Don't do this if running
1536	 */
1537
1538	if (s->sbm_state == sbmac_state_on)
1539		return;
1540
1541	/*
1542	 * Bring the controller out of reset, but leave it off.
1543	 */
1544
1545	__raw_writeq(0, s->sbm_macenable);
1546
1547	/*
1548	 * Ignore all received packets
1549	 */
1550
1551	__raw_writeq(0, s->sbm_rxfilter);
1552
1553	/*
1554	 * Calculate values for various control registers.
1555	 */
1556
1557	cfg = M_MAC_RETRY_EN |
1558		M_MAC_TX_HOLD_SOP_EN |
1559		V_MAC_TX_PAUSE_CNT_16K |
1560		M_MAC_AP_STAT_EN |
1561		M_MAC_FAST_SYNC |
1562		M_MAC_SS_EN |
1563		0;
1564
1565	/*
1566	 * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
1567	 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
1568	 * Use a larger RD_THRSH for gigabit
1569	 */
1570	if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
1571		th_value = 28;
1572	else
1573		th_value = 64;
1574
1575	fifo = V_MAC_TX_WR_THRSH(4) |	/* Must be '4' or '8' */
1576		((s->sbm_speed == sbmac_speed_1000)
1577		 ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
1578		V_MAC_TX_RL_THRSH(4) |
1579		V_MAC_RX_PL_THRSH(4) |
1580		V_MAC_RX_RD_THRSH(4) |	/* Must be '4' */
1581		V_MAC_RX_PL_THRSH(4) |
1582		V_MAC_RX_RL_THRSH(8) |
1583		0;
1584
1585	framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
1586		V_MAC_MAX_FRAMESZ_DEFAULT |
1587		V_MAC_BACKOFF_SEL(1);
1588
1589	/*
1590	 * Clear out the hash address map
1591	 */
1592
1593	port = s->sbm_base + R_MAC_HASH_BASE;
1594	for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
1595		__raw_writeq(0, port);
1596		port += sizeof(uint64_t);
1597	}
1598
1599	/*
1600	 * Clear out the exact-match table
1601	 */
1602
1603	port = s->sbm_base + R_MAC_ADDR_BASE;
1604	for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
1605		__raw_writeq(0, port);
1606		port += sizeof(uint64_t);
1607	}
1608
1609	/*
1610	 * Clear out the DMA Channel mapping table registers
1611	 */
1612
1613	port = s->sbm_base + R_MAC_CHUP0_BASE;
1614	for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1615		__raw_writeq(0, port);
1616		port += sizeof(uint64_t);
1617	}
1618
1619
1620	port = s->sbm_base + R_MAC_CHLO0_BASE;
1621	for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
1622		__raw_writeq(0, port);
1623		port += sizeof(uint64_t);
1624	}
1625
1626	/*
1627	 * Program the hardware address.  It goes into the hardware-address
1628	 * register as well as the first filter register.
1629	 */
1630
1631	reg = sbmac_addr2reg(s->sbm_hwaddr);
1632
1633	port = s->sbm_base + R_MAC_ADDR_BASE;
1634	__raw_writeq(reg, port);
1635	port = s->sbm_base + R_MAC_ETHERNET_ADDR;
1636
1637#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
1638	/*
1639	 * Pass1 SOCs do not receive packets addressed to the
1640	 * destination address in the R_MAC_ETHERNET_ADDR register.
1641	 * Set the value to zero.
1642	 */
1643	__raw_writeq(0, port);
1644#else
1645	__raw_writeq(reg, port);
1646#endif
1647
1648	/*
1649	 * Set the receive filter for no packets, and write values
1650	 * to the various config registers
1651	 */
1652
1653	__raw_writeq(0, s->sbm_rxfilter);
1654	__raw_writeq(0, s->sbm_imr);
1655	__raw_writeq(framecfg, s->sbm_framecfg);
1656	__raw_writeq(fifo, s->sbm_fifocfg);
1657	__raw_writeq(cfg, s->sbm_maccfg);
1658
1659	/*
1660	 * Initialize DMA channels (rings should be ok now)
1661	 */
1662
1663	sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
1664	sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
1665
1666	/*
1667	 * Configure the speed, duplex, and flow control
1668	 */
1669
1670	sbmac_set_speed(s,s->sbm_speed);
1671	sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
1672
1673	/*
1674	 * Fill the receive ring
1675	 */
1676
1677	sbdma_fillring(&(s->sbm_rxdma));
1678
1679	/*
1680	 * Turn on the rest of the bits in the enable register
1681	 */
1682
1683#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
1684	__raw_writeq(M_MAC_RXDMA_EN0 |
1685		       M_MAC_TXDMA_EN0, s->sbm_macenable);
1686#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
1687	__raw_writeq(M_MAC_RXDMA_EN0 |
1688		       M_MAC_TXDMA_EN0 |
1689		       M_MAC_RX_ENABLE |
1690		       M_MAC_TX_ENABLE, s->sbm_macenable);
1691#else
1692#error invalid SiByte MAC configuation
1693#endif
1694
1695#ifdef CONFIG_SBMAC_COALESCE
1696	__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
1697		       ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
1698#else
1699	__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
1700		       (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
1701#endif
1702
1703	/*
1704	 * Enable receiving unicasts and broadcasts
1705	 */
1706
1707	__raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
1708
1709	/*
1710	 * we're running now.
1711	 */
1712
1713	s->sbm_state = sbmac_state_on;
1714
1715	/*
1716	 * Program multicast addresses
1717	 */
1718
1719	sbmac_setmulti(s);
1720
1721	/*
1722	 * If channel was in promiscuous mode before, turn that on
1723	 */
1724
1725	if (s->sbm_devflags & IFF_PROMISC) {
1726		sbmac_promiscuous_mode(s,1);
1727	}
1728
1729}
1730
1731
1732/**********************************************************************
1733 *  SBMAC_CHANNEL_STOP(s)
1734 *
1735 *  Stop packet processing on this MAC.
1736 *
1737 *  Input parameters:
1738 *  	   s - sbmac structure
1739 *
1740 *  Return value:
1741 *  	   nothing
1742 ********************************************************************* */
1743
1744static void sbmac_channel_stop(struct sbmac_softc *s)
1745{
1746	/* don't do this if already stopped */
1747
1748	if (s->sbm_state == sbmac_state_off)
1749		return;
1750
1751	/* don't accept any packets, disable all interrupts */
1752
1753	__raw_writeq(0, s->sbm_rxfilter);
1754	__raw_writeq(0, s->sbm_imr);
1755
1756	/* Turn off ticker */
1757
1758
1759	/* turn off receiver and transmitter */
1760
1761	__raw_writeq(0, s->sbm_macenable);
1762
1763	/* We're stopped now. */
1764
1765	s->sbm_state = sbmac_state_off;
1766
1767	/*
1768	 * Stop DMA channels (rings should be ok now)
1769	 */
1770
1771	sbdma_channel_stop(&(s->sbm_rxdma));
1772	sbdma_channel_stop(&(s->sbm_txdma));
1773
1774	/* Empty the receive and transmit rings */
1775
1776	sbdma_emptyring(&(s->sbm_rxdma));
1777	sbdma_emptyring(&(s->sbm_txdma));
1778
1779}
1780
1781/**********************************************************************
1782 *  SBMAC_SET_CHANNEL_STATE(state)
1783 *
1784 *  Set the channel's state ON or OFF
1785 *
1786 *  Input parameters:
1787 *  	   state - new state
1788 *
1789 *  Return value:
1790 *  	   old state
1791 ********************************************************************* */
1792static sbmac_state_t sbmac_set_channel_state(struct sbmac_softc *sc,
1793					     sbmac_state_t state)
1794{
1795	sbmac_state_t oldstate = sc->sbm_state;
1796
1797	/*
1798	 * If same as previous state, return
1799	 */
1800
1801	if (state == oldstate) {
1802		return oldstate;
1803	}
1804
1805	/*
1806	 * If new state is ON, turn channel on
1807	 */
1808
1809	if (state == sbmac_state_on) {
1810		sbmac_channel_start(sc);
1811	}
1812	else {
1813		sbmac_channel_stop(sc);
1814	}
1815
1816	/*
1817	 * Return previous state
1818	 */
1819
1820	return oldstate;
1821}
1822
1823
1824/**********************************************************************
1825 *  SBMAC_PROMISCUOUS_MODE(sc,onoff)
1826 *
1827 *  Turn on or off promiscuous mode
1828 *
1829 *  Input parameters:
1830 *  	   sc - softc
1831 *      onoff - 1 to turn on, 0 to turn off
1832 *
1833 *  Return value:
1834 *  	   nothing
1835 ********************************************************************* */
1836
1837static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
1838{
1839	uint64_t reg;
1840
1841	if (sc->sbm_state != sbmac_state_on)
1842		return;
1843
1844	if (onoff) {
1845		reg = __raw_readq(sc->sbm_rxfilter);
1846		reg |= M_MAC_ALLPKT_EN;
1847		__raw_writeq(reg, sc->sbm_rxfilter);
1848	}
1849	else {
1850		reg = __raw_readq(sc->sbm_rxfilter);
1851		reg &= ~M_MAC_ALLPKT_EN;
1852		__raw_writeq(reg, sc->sbm_rxfilter);
1853	}
1854}
1855
1856/**********************************************************************
1857 *  SBMAC_SETIPHDR_OFFSET(sc,onoff)
1858 *
1859 *  Set the iphdr offset as 15 assuming ethernet encapsulation
1860 *
1861 *  Input parameters:
1862 *  	   sc - softc
1863 *
1864 *  Return value:
1865 *  	   nothing
1866 ********************************************************************* */
1867
1868static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
1869{
1870	uint64_t reg;
1871
1872	/* Hard code the off set to 15 for now */
1873	reg = __raw_readq(sc->sbm_rxfilter);
1874	reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
1875	__raw_writeq(reg, sc->sbm_rxfilter);
1876
1877	/* BCM1250 pass1 didn't have hardware checksum.  Everything
1878	   later does.  */
1879	if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) {
1880		sc->rx_hw_checksum = DISABLE;
1881	} else {
1882		sc->rx_hw_checksum = ENABLE;
1883	}
1884}
1885
1886
1887/**********************************************************************
1888 *  SBMAC_ADDR2REG(ptr)
1889 *
1890 *  Convert six bytes into the 64-bit register value that
1891 *  we typically write into the SBMAC's address/mcast registers
1892 *
1893 *  Input parameters:
1894 *  	   ptr - pointer to 6 bytes
1895 *
1896 *  Return value:
1897 *  	   register value
1898 ********************************************************************* */
1899
1900static uint64_t sbmac_addr2reg(unsigned char *ptr)
1901{
1902	uint64_t reg = 0;
1903
1904	ptr += 6;
1905
1906	reg |= (uint64_t) *(--ptr);
1907	reg <<= 8;
1908	reg |= (uint64_t) *(--ptr);
1909	reg <<= 8;
1910	reg |= (uint64_t) *(--ptr);
1911	reg <<= 8;
1912	reg |= (uint64_t) *(--ptr);
1913	reg <<= 8;
1914	reg |= (uint64_t) *(--ptr);
1915	reg <<= 8;
1916	reg |= (uint64_t) *(--ptr);
1917
1918	return reg;
1919}
1920
1921
1922/**********************************************************************
1923 *  SBMAC_SET_SPEED(s,speed)
1924 *
1925 *  Configure LAN speed for the specified MAC.
1926 *  Warning: must be called when MAC is off!
1927 *
1928 *  Input parameters:
1929 *  	   s - sbmac structure
1930 *  	   speed - speed to set MAC to (see sbmac_speed_t enum)
1931 *
1932 *  Return value:
1933 *  	   1 if successful
1934 *      0 indicates invalid parameters
1935 ********************************************************************* */
1936
1937static int sbmac_set_speed(struct sbmac_softc *s,sbmac_speed_t speed)
1938{
1939	uint64_t cfg;
1940	uint64_t framecfg;
1941
1942	/*
1943	 * Save new current values
1944	 */
1945
1946	s->sbm_speed = speed;
1947
1948	if (s->sbm_state == sbmac_state_on)
1949		return 0;	/* save for next restart */
1950
1951	/*
1952	 * Read current register values
1953	 */
1954
1955	cfg = __raw_readq(s->sbm_maccfg);
1956	framecfg = __raw_readq(s->sbm_framecfg);
1957
1958	/*
1959	 * Mask out the stuff we want to change
1960	 */
1961
1962	cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
1963	framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
1964		      M_MAC_SLOT_SIZE);
1965
1966	/*
1967	 * Now add in the new bits
1968	 */
1969
1970	switch (speed) {
1971	case sbmac_speed_10:
1972		framecfg |= V_MAC_IFG_RX_10 |
1973			V_MAC_IFG_TX_10 |
1974			K_MAC_IFG_THRSH_10 |
1975			V_MAC_SLOT_SIZE_10;
1976		cfg |= V_MAC_SPEED_SEL_10MBPS;
1977		break;
1978
1979	case sbmac_speed_100:
1980		framecfg |= V_MAC_IFG_RX_100 |
1981			V_MAC_IFG_TX_100 |
1982			V_MAC_IFG_THRSH_100 |
1983			V_MAC_SLOT_SIZE_100;
1984		cfg |= V_MAC_SPEED_SEL_100MBPS ;
1985		break;
1986
1987	case sbmac_speed_1000:
1988		framecfg |= V_MAC_IFG_RX_1000 |
1989			V_MAC_IFG_TX_1000 |
1990			V_MAC_IFG_THRSH_1000 |
1991			V_MAC_SLOT_SIZE_1000;
1992		cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
1993		break;
1994
1995	case sbmac_speed_auto:
1996		/* fall through */
1997	default:
1998		return 0;
1999	}
2000
2001	/*
2002	 * Send the bits back to the hardware
2003	 */
2004
2005	__raw_writeq(framecfg, s->sbm_framecfg);
2006	__raw_writeq(cfg, s->sbm_maccfg);
2007
2008	return 1;
2009}
2010
2011/**********************************************************************
2012 *  SBMAC_SET_DUPLEX(s,duplex,fc)
2013 *
2014 *  Set Ethernet duplex and flow control options for this MAC
2015 *  Warning: must be called when MAC is off!
2016 *
2017 *  Input parameters:
2018 *  	   s - sbmac structure
2019 *  	   duplex - duplex setting (see sbmac_duplex_t)
2020 *  	   fc - flow control setting (see sbmac_fc_t)
2021 *
2022 *  Return value:
2023 *  	   1 if ok
2024 *  	   0 if an invalid parameter combination was specified
2025 ********************************************************************* */
2026
2027static int sbmac_set_duplex(struct sbmac_softc *s,sbmac_duplex_t duplex,sbmac_fc_t fc)
2028{
2029	uint64_t cfg;
2030
2031	/*
2032	 * Save new current values
2033	 */
2034
2035	s->sbm_duplex = duplex;
2036	s->sbm_fc = fc;
2037
2038	if (s->sbm_state == sbmac_state_on)
2039		return 0;	/* save for next restart */
2040
2041	/*
2042	 * Read current register values
2043	 */
2044
2045	cfg = __raw_readq(s->sbm_maccfg);
2046
2047	/*
2048	 * Mask off the stuff we're about to change
2049	 */
2050
2051	cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
2052
2053
2054	switch (duplex) {
2055	case sbmac_duplex_half:
2056		switch (fc) {
2057		case sbmac_fc_disabled:
2058			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
2059			break;
2060
2061		case sbmac_fc_collision:
2062			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
2063			break;
2064
2065		case sbmac_fc_carrier:
2066			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
2067			break;
2068
2069		case sbmac_fc_auto:
2070			/* fall through */
2071		case sbmac_fc_frame:		/* not valid in half duplex */
2072		default:			/* invalid selection */
2073			return 0;
2074		}
2075		break;
2076
2077	case sbmac_duplex_full:
2078		switch (fc) {
2079		case sbmac_fc_disabled:
2080			cfg |= V_MAC_FC_CMD_DISABLED;
2081			break;
2082
2083		case sbmac_fc_frame:
2084			cfg |= V_MAC_FC_CMD_ENABLED;
2085			break;
2086
2087		case sbmac_fc_collision:	/* not valid in full duplex */
2088		case sbmac_fc_carrier:		/* not valid in full duplex */
2089		case sbmac_fc_auto:
2090			/* fall through */
2091		default:
2092			return 0;
2093		}
2094		break;
2095	case sbmac_duplex_auto:
2096		break;
2097	}
2098
2099	/*
2100	 * Send the bits back to the hardware
2101	 */
2102
2103	__raw_writeq(cfg, s->sbm_maccfg);
2104
2105	return 1;
2106}
2107
2108
2109
2110
2111/**********************************************************************
2112 *  SBMAC_INTR()
2113 *
2114 *  Interrupt handler for MAC interrupts
2115 *
2116 *  Input parameters:
2117 *  	   MAC structure
2118 *
2119 *  Return value:
2120 *  	   nothing
2121 ********************************************************************* */
2122static irqreturn_t sbmac_intr(int irq,void *dev_instance)
2123{
2124	struct net_device *dev = (struct net_device *) dev_instance;
2125	struct sbmac_softc *sc = netdev_priv(dev);
2126	uint64_t isr;
2127	int handled = 0;
2128
2129	/*
2130	 * Read the ISR (this clears the bits in the real
2131	 * register, except for counter addr)
2132	 */
2133
2134	isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
2135
2136	if (isr == 0)
2137		return IRQ_RETVAL(0);
2138	handled = 1;
2139
2140	/*
2141	 * Transmits on channel 0
2142	 */
2143
2144	if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0)) {
2145		sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
2146#ifdef CONFIG_NETPOLL_TRAP
2147		if (netpoll_trap()) {
2148			if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
2149				__netif_schedule(dev);
2150		}
2151#endif
2152	}
2153
2154	if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
2155		if (netif_rx_schedule_prep(dev)) {
2156			__raw_writeq(0, sc->sbm_imr);
2157			__netif_rx_schedule(dev);
2158			/* Depend on the exit from poll to reenable intr */
2159		}
2160		else {
2161			/* may leave some packets behind */
2162			sbdma_rx_process(sc,&(sc->sbm_rxdma),
2163					 SBMAC_MAX_RXDESCR * 2, 0);
2164		}
2165	}
2166	return IRQ_RETVAL(handled);
2167}
2168
2169/**********************************************************************
2170 *  SBMAC_START_TX(skb,dev)
2171 *
2172 *  Start output on the specified interface.  Basically, we
2173 *  queue as many buffers as we can until the ring fills up, or
2174 *  we run off the end of the queue, whichever comes first.
2175 *
2176 *  Input parameters:
2177 *
2178 *
2179 *  Return value:
2180 *  	   nothing
2181 ********************************************************************* */
2182static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
2183{
2184	struct sbmac_softc *sc = netdev_priv(dev);
2185
2186	/* lock eth irq */
2187	spin_lock_irq (&sc->sbm_lock);
2188
2189	/*
2190	 * Put the buffer on the transmit ring.  If we
2191	 * don't have room, stop the queue.
2192	 */
2193
2194	if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
2195		netif_stop_queue(dev);
2196		spin_unlock_irq(&sc->sbm_lock);
2197
2198		return 1;
2199	}
2200
2201	dev->trans_start = jiffies;
2202
2203	spin_unlock_irq (&sc->sbm_lock);
2204
2205	return 0;
2206}
2207
2208/**********************************************************************
2209 *  SBMAC_SETMULTI(sc)
2210 *
2211 *  Reprogram the multicast table into the hardware, given
2212 *  the list of multicasts associated with the interface
2213 *  structure.
2214 *
2215 *  Input parameters:
2216 *  	   sc - softc
2217 *
2218 *  Return value:
2219 *  	   nothing
2220 ********************************************************************* */
2221
2222static void sbmac_setmulti(struct sbmac_softc *sc)
2223{
2224	uint64_t reg;
2225	volatile void __iomem *port;
2226	int idx;
2227	struct dev_mc_list *mclist;
2228	struct net_device *dev = sc->sbm_dev;
2229
2230	/*
2231	 * Clear out entire multicast table.  We do this by nuking
2232	 * the entire hash table and all the direct matches except
2233	 * the first one, which is used for our station address
2234	 */
2235
2236	for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
2237		port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
2238		__raw_writeq(0, port);
2239	}
2240
2241	for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
2242		port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
2243		__raw_writeq(0, port);
2244	}
2245
2246	/*
2247	 * Clear the filter to say we don't want any multicasts.
2248	 */
2249
2250	reg = __raw_readq(sc->sbm_rxfilter);
2251	reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
2252	__raw_writeq(reg, sc->sbm_rxfilter);
2253
2254	if (dev->flags & IFF_ALLMULTI) {
2255		/*
2256		 * Enable ALL multicasts.  Do this by inverting the
2257		 * multicast enable bit.
2258		 */
2259		reg = __raw_readq(sc->sbm_rxfilter);
2260		reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
2261		__raw_writeq(reg, sc->sbm_rxfilter);
2262		return;
2263	}
2264
2265
2266	/*
2267	 * Progam new multicast entries.  For now, only use the
2268	 * perfect filter.  In the future we'll need to use the
2269	 * hash filter if the perfect filter overflows
2270	 */
2271
2272
2273	idx = 1;		/* skip station address */
2274	mclist = dev->mc_list;
2275	while (mclist && (idx < MAC_ADDR_COUNT)) {
2276		reg = sbmac_addr2reg(mclist->dmi_addr);
2277		port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
2278		__raw_writeq(reg, port);
2279		idx++;
2280		mclist = mclist->next;
2281	}
2282
2283	/*
2284	 * Enable the "accept multicast bits" if we programmed at least one
2285	 * multicast.
2286	 */
2287
2288	if (idx > 1) {
2289		reg = __raw_readq(sc->sbm_rxfilter);
2290		reg |= M_MAC_MCAST_EN;
2291		__raw_writeq(reg, sc->sbm_rxfilter);
2292	}
2293}
2294
2295#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || \
2296	defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2297/**********************************************************************
2298 *  SBMAC_PARSE_XDIGIT(str)
2299 *
2300 *  Parse a hex digit, returning its value
2301 *
2302 *  Input parameters:
2303 *  	   str - character
2304 *
2305 *  Return value:
2306 *  	   hex value, or -1 if invalid
2307 ********************************************************************* */
2308
2309static int sbmac_parse_xdigit(char str)
2310{
2311	int digit;
2312
2313	if ((str >= '0') && (str <= '9'))
2314		digit = str - '0';
2315	else if ((str >= 'a') && (str <= 'f'))
2316		digit = str - 'a' + 10;
2317	else if ((str >= 'A') && (str <= 'F'))
2318		digit = str - 'A' + 10;
2319	else
2320		return -1;
2321
2322	return digit;
2323}
2324
2325/**********************************************************************
2326 *  SBMAC_PARSE_HWADDR(str,hwaddr)
2327 *
2328 *  Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2329 *  Ethernet address.
2330 *
2331 *  Input parameters:
2332 *  	   str - string
2333 *  	   hwaddr - pointer to hardware address
2334 *
2335 *  Return value:
2336 *  	   0 if ok, else -1
2337 ********************************************************************* */
2338
2339static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
2340{
2341	int digit1,digit2;
2342	int idx = 6;
2343
2344	while (*str && (idx > 0)) {
2345		digit1 = sbmac_parse_xdigit(*str);
2346		if (digit1 < 0)
2347			return -1;
2348		str++;
2349		if (!*str)
2350			return -1;
2351
2352		if ((*str == ':') || (*str == '-')) {
2353			digit2 = digit1;
2354			digit1 = 0;
2355		}
2356		else {
2357			digit2 = sbmac_parse_xdigit(*str);
2358			if (digit2 < 0)
2359				return -1;
2360			str++;
2361		}
2362
2363		*hwaddr++ = (digit1 << 4) | digit2;
2364		idx--;
2365
2366		if (*str == '-')
2367			str++;
2368		if (*str == ':')
2369			str++;
2370	}
2371	return 0;
2372}
2373#endif
2374
2375static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
2376{
2377	if (new_mtu >  ENET_PACKET_SIZE)
2378		return -EINVAL;
2379	_dev->mtu = new_mtu;
2380	printk(KERN_INFO "changing the mtu to %d\n", new_mtu);
2381	return 0;
2382}
2383
2384/**********************************************************************
2385 *  SBMAC_INIT(dev)
2386 *
2387 *  Attach routine - init hardware and hook ourselves into linux
2388 *
2389 *  Input parameters:
2390 *  	   dev - net_device structure
2391 *
2392 *  Return value:
2393 *  	   status
2394 ********************************************************************* */
2395
2396static int sbmac_init(struct net_device *dev, int idx)
2397{
2398	struct sbmac_softc *sc;
2399	unsigned char *eaddr;
2400	uint64_t ea_reg;
2401	int i;
2402	int err;
2403
2404	sc = netdev_priv(dev);
2405
2406	/* Determine controller base address */
2407
2408	sc->sbm_base = IOADDR(dev->base_addr);
2409	sc->sbm_dev = dev;
2410	sc->sbe_idx = idx;
2411
2412	eaddr = sc->sbm_hwaddr;
2413
2414	/*
2415	 * Read the ethernet address.  The firwmare left this programmed
2416	 * for us in the ethernet address register for each mac.
2417	 */
2418
2419	ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
2420	__raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
2421	for (i = 0; i < 6; i++) {
2422		eaddr[i] = (uint8_t) (ea_reg & 0xFF);
2423		ea_reg >>= 8;
2424	}
2425
2426	for (i = 0; i < 6; i++) {
2427		dev->dev_addr[i] = eaddr[i];
2428	}
2429
2430
2431	/*
2432	 * Init packet size
2433	 */
2434
2435	sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
2436
2437	/*
2438	 * Initialize context (get pointers to registers and stuff), then
2439	 * allocate the memory for the descriptor tables.
2440	 */
2441
2442	sbmac_initctx(sc);
2443
2444	/*
2445	 * Set up Linux device callins
2446	 */
2447
2448	spin_lock_init(&(sc->sbm_lock));
2449
2450	dev->open               = sbmac_open;
2451	dev->hard_start_xmit    = sbmac_start_tx;
2452	dev->stop               = sbmac_close;
2453	dev->get_stats          = sbmac_get_stats;
2454	dev->set_multicast_list = sbmac_set_rx_mode;
2455	dev->do_ioctl           = sbmac_mii_ioctl;
2456	dev->tx_timeout         = sbmac_tx_timeout;
2457	dev->watchdog_timeo     = TX_TIMEOUT;
2458	dev->poll               = sbmac_poll;
2459	dev->weight             = 16;
2460
2461	dev->change_mtu         = sb1250_change_mtu;
2462#ifdef CONFIG_NET_POLL_CONTROLLER
2463	dev->poll_controller = sbmac_netpoll;
2464#endif
2465
2466	/* This is needed for PASS2 for Rx H/W checksum feature */
2467	sbmac_set_iphdr_offset(sc);
2468
2469	err = register_netdev(dev);
2470	if (err)
2471		goto out_uninit;
2472
2473	if (sc->rx_hw_checksum == ENABLE) {
2474		printk(KERN_INFO "%s: enabling TCP rcv checksum\n",
2475			sc->sbm_dev->name);
2476	}
2477
2478	/*
2479	 * Display Ethernet address (this is called during the config
2480	 * process so we need to finish off the config message that
2481	 * was being displayed)
2482	 */
2483	printk(KERN_INFO
2484	       "%s: SiByte Ethernet at 0x%08lX, address: %02X:%02X:%02X:%02X:%02X:%02X\n",
2485	       dev->name, dev->base_addr,
2486	       eaddr[0],eaddr[1],eaddr[2],eaddr[3],eaddr[4],eaddr[5]);
2487
2488
2489	return 0;
2490
2491out_uninit:
2492	sbmac_uninitctx(sc);
2493
2494	return err;
2495}
2496
2497
2498static int sbmac_open(struct net_device *dev)
2499{
2500	struct sbmac_softc *sc = netdev_priv(dev);
2501
2502	if (debug > 1) {
2503		printk(KERN_DEBUG "%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
2504	}
2505
2506	/*
2507	 * map/route interrupt (clear status first, in case something
2508	 * weird is pending; we haven't initialized the mac registers
2509	 * yet)
2510	 */
2511
2512	__raw_readq(sc->sbm_isr);
2513	if (request_irq(dev->irq, &sbmac_intr, IRQF_SHARED, dev->name, dev))
2514		return -EBUSY;
2515
2516	/*
2517	 * Probe phy address
2518	 */
2519
2520	if(sbmac_mii_probe(dev) == -1) {
2521		printk("%s: failed to probe PHY.\n", dev->name);
2522		return -EINVAL;
2523	}
2524
2525	/*
2526	 * Configure default speed
2527	 */
2528
2529	sbmac_mii_poll(sc,noisy_mii);
2530
2531	/*
2532	 * Turn on the channel
2533	 */
2534
2535	sbmac_set_channel_state(sc,sbmac_state_on);
2536
2537
2538	if (dev->if_port == 0)
2539		dev->if_port = 0;
2540
2541	netif_start_queue(dev);
2542
2543	sbmac_set_rx_mode(dev);
2544
2545	/* Set the timer to check for link beat. */
2546	init_timer(&sc->sbm_timer);
2547	sc->sbm_timer.expires = jiffies + 2 * HZ/100;
2548	sc->sbm_timer.data = (unsigned long)dev;
2549	sc->sbm_timer.function = &sbmac_timer;
2550	add_timer(&sc->sbm_timer);
2551
2552	return 0;
2553}
2554
2555static int sbmac_mii_probe(struct net_device *dev)
2556{
2557	int i;
2558	struct sbmac_softc *s = netdev_priv(dev);
2559	u16 bmsr, id1, id2;
2560	u32 vendor, device;
2561
2562	for (i=1; i<31; i++) {
2563	bmsr = sbmac_mii_read(s, i, MII_BMSR);
2564		if (bmsr != 0) {
2565			s->sbm_phys[0] = i;
2566			id1 = sbmac_mii_read(s, i, MII_PHYIDR1);
2567			id2 = sbmac_mii_read(s, i, MII_PHYIDR2);
2568			vendor = ((u32)id1 << 6) | ((id2 >> 10) & 0x3f);
2569			device = (id2 >> 4) & 0x3f;
2570
2571			printk(KERN_INFO "%s: found phy %d, vendor %06x part %02x\n",
2572				dev->name, i, vendor, device);
2573			return i;
2574		}
2575	}
2576	return -1;
2577}
2578
2579
2580static int sbmac_mii_poll(struct sbmac_softc *s,int noisy)
2581{
2582    int bmsr,bmcr,k1stsr,anlpar;
2583    int chg;
2584    char buffer[100];
2585    char *p = buffer;
2586
2587    /* Read the mode status and mode control registers. */
2588    bmsr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMSR);
2589    bmcr = sbmac_mii_read(s,s->sbm_phys[0],MII_BMCR);
2590
2591    /* get the link partner status */
2592    anlpar = sbmac_mii_read(s,s->sbm_phys[0],MII_ANLPAR);
2593
2594    /* if supported, read the 1000baseT register */
2595    if (bmsr & BMSR_1000BT_XSR) {
2596	k1stsr = sbmac_mii_read(s,s->sbm_phys[0],MII_K1STSR);
2597	}
2598    else {
2599	k1stsr = 0;
2600	}
2601
2602    chg = 0;
2603
2604    if ((bmsr & BMSR_LINKSTAT) == 0) {
2605	/*
2606	 * If link status is down, clear out old info so that when
2607	 * it comes back up it will force us to reconfigure speed
2608	 */
2609	s->sbm_phy_oldbmsr = 0;
2610	s->sbm_phy_oldanlpar = 0;
2611	s->sbm_phy_oldk1stsr = 0;
2612	return 0;
2613	}
2614
2615    if ((s->sbm_phy_oldbmsr != bmsr) ||
2616	(s->sbm_phy_oldanlpar != anlpar) ||
2617	(s->sbm_phy_oldk1stsr != k1stsr)) {
2618	if (debug > 1) {
2619	    printk(KERN_DEBUG "%s: bmsr:%x/%x anlpar:%x/%x  k1stsr:%x/%x\n",
2620	       s->sbm_dev->name,
2621	       s->sbm_phy_oldbmsr,bmsr,
2622	       s->sbm_phy_oldanlpar,anlpar,
2623	       s->sbm_phy_oldk1stsr,k1stsr);
2624	    }
2625	s->sbm_phy_oldbmsr = bmsr;
2626	s->sbm_phy_oldanlpar = anlpar;
2627	s->sbm_phy_oldk1stsr = k1stsr;
2628	chg = 1;
2629	}
2630
2631    if (chg == 0)
2632	    return 0;
2633
2634    p += sprintf(p,"Link speed: ");
2635
2636    if (k1stsr & K1STSR_LP1KFD) {
2637	s->sbm_speed = sbmac_speed_1000;
2638	s->sbm_duplex = sbmac_duplex_full;
2639	s->sbm_fc = sbmac_fc_frame;
2640	p += sprintf(p,"1000BaseT FDX");
2641	}
2642    else if (k1stsr & K1STSR_LP1KHD) {
2643	s->sbm_speed = sbmac_speed_1000;
2644	s->sbm_duplex = sbmac_duplex_half;
2645	s->sbm_fc = sbmac_fc_disabled;
2646	p += sprintf(p,"1000BaseT HDX");
2647	}
2648    else if (anlpar & ANLPAR_TXFD) {
2649	s->sbm_speed = sbmac_speed_100;
2650	s->sbm_duplex = sbmac_duplex_full;
2651	s->sbm_fc = (anlpar & ANLPAR_PAUSE) ? sbmac_fc_frame : sbmac_fc_disabled;
2652	p += sprintf(p,"100BaseT FDX");
2653	}
2654    else if (anlpar & ANLPAR_TXHD) {
2655	s->sbm_speed = sbmac_speed_100;
2656	s->sbm_duplex = sbmac_duplex_half;
2657	s->sbm_fc = sbmac_fc_disabled;
2658	p += sprintf(p,"100BaseT HDX");
2659	}
2660    else if (anlpar & ANLPAR_10FD) {
2661	s->sbm_speed = sbmac_speed_10;
2662	s->sbm_duplex = sbmac_duplex_full;
2663	s->sbm_fc = sbmac_fc_frame;
2664	p += sprintf(p,"10BaseT FDX");
2665	}
2666    else if (anlpar & ANLPAR_10HD) {
2667	s->sbm_speed = sbmac_speed_10;
2668	s->sbm_duplex = sbmac_duplex_half;
2669	s->sbm_fc = sbmac_fc_collision;
2670	p += sprintf(p,"10BaseT HDX");
2671	}
2672    else {
2673	p += sprintf(p,"Unknown");
2674	}
2675
2676    if (noisy) {
2677	    printk(KERN_INFO "%s: %s\n",s->sbm_dev->name,buffer);
2678	    }
2679
2680    return 1;
2681}
2682
2683
2684static void sbmac_timer(unsigned long data)
2685{
2686	struct net_device *dev = (struct net_device *)data;
2687	struct sbmac_softc *sc = netdev_priv(dev);
2688	int next_tick = HZ;
2689	int mii_status;
2690
2691	spin_lock_irq (&sc->sbm_lock);
2692
2693	/* make IFF_RUNNING follow the MII status bit "Link established" */
2694	mii_status = sbmac_mii_read(sc, sc->sbm_phys[0], MII_BMSR);
2695
2696	if ( (mii_status & BMSR_LINKSTAT) != (sc->sbm_phy_oldlinkstat) ) {
2697    	        sc->sbm_phy_oldlinkstat = mii_status & BMSR_LINKSTAT;
2698		if (mii_status & BMSR_LINKSTAT) {
2699			netif_carrier_on(dev);
2700		}
2701		else {
2702			netif_carrier_off(dev);
2703		}
2704	}
2705
2706	/*
2707	 * Poll the PHY to see what speed we should be running at
2708	 */
2709
2710	if (sbmac_mii_poll(sc,noisy_mii)) {
2711		if (sc->sbm_state != sbmac_state_off) {
2712			/*
2713			 * something changed, restart the channel
2714			 */
2715			if (debug > 1) {
2716				printk("%s: restarting channel because speed changed\n",
2717				       sc->sbm_dev->name);
2718			}
2719			sbmac_channel_stop(sc);
2720			sbmac_channel_start(sc);
2721		}
2722	}
2723
2724	spin_unlock_irq (&sc->sbm_lock);
2725
2726	sc->sbm_timer.expires = jiffies + next_tick;
2727	add_timer(&sc->sbm_timer);
2728}
2729
2730
2731static void sbmac_tx_timeout (struct net_device *dev)
2732{
2733	struct sbmac_softc *sc = netdev_priv(dev);
2734
2735	spin_lock_irq (&sc->sbm_lock);
2736
2737
2738	dev->trans_start = jiffies;
2739	sc->sbm_stats.tx_errors++;
2740
2741	spin_unlock_irq (&sc->sbm_lock);
2742
2743	printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
2744}
2745
2746
2747
2748
2749static struct net_device_stats *sbmac_get_stats(struct net_device *dev)
2750{
2751	struct sbmac_softc *sc = netdev_priv(dev);
2752	unsigned long flags;
2753
2754	spin_lock_irqsave(&sc->sbm_lock, flags);
2755
2756
2757	spin_unlock_irqrestore(&sc->sbm_lock, flags);
2758
2759	return &sc->sbm_stats;
2760}
2761
2762
2763
2764static void sbmac_set_rx_mode(struct net_device *dev)
2765{
2766	unsigned long flags;
2767	struct sbmac_softc *sc = netdev_priv(dev);
2768
2769	spin_lock_irqsave(&sc->sbm_lock, flags);
2770	if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
2771		/*
2772		 * Promiscuous changed.
2773		 */
2774
2775		if (dev->flags & IFF_PROMISC) {
2776			sbmac_promiscuous_mode(sc,1);
2777		}
2778		else {
2779			sbmac_promiscuous_mode(sc,0);
2780		}
2781	}
2782	spin_unlock_irqrestore(&sc->sbm_lock, flags);
2783
2784	/*
2785	 * Program the multicasts.  Do this every time.
2786	 */
2787
2788	sbmac_setmulti(sc);
2789
2790}
2791
2792static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2793{
2794	struct sbmac_softc *sc = netdev_priv(dev);
2795	u16 *data = (u16 *)&rq->ifr_ifru;
2796	unsigned long flags;
2797	int retval;
2798
2799	spin_lock_irqsave(&sc->sbm_lock, flags);
2800	retval = 0;
2801
2802	switch(cmd) {
2803	case SIOCDEVPRIVATE:		/* Get the address of the PHY in use. */
2804		data[0] = sc->sbm_phys[0] & 0x1f;
2805		/* Fall Through */
2806	case SIOCDEVPRIVATE+1:		/* Read the specified MII register. */
2807		data[3] = sbmac_mii_read(sc, data[0] & 0x1f, data[1] & 0x1f);
2808		break;
2809	case SIOCDEVPRIVATE+2:		/* Write the specified MII register */
2810		if (!capable(CAP_NET_ADMIN)) {
2811			retval = -EPERM;
2812			break;
2813		}
2814		if (debug > 1) {
2815		    printk(KERN_DEBUG "%s: sbmac_mii_ioctl: write %02X %02X %02X\n",dev->name,
2816		       data[0],data[1],data[2]);
2817		    }
2818		sbmac_mii_write(sc, data[0] & 0x1f, data[1] & 0x1f, data[2]);
2819		break;
2820	default:
2821		retval = -EOPNOTSUPP;
2822	}
2823
2824	spin_unlock_irqrestore(&sc->sbm_lock, flags);
2825	return retval;
2826}
2827
2828static int sbmac_close(struct net_device *dev)
2829{
2830	struct sbmac_softc *sc = netdev_priv(dev);
2831	unsigned long flags;
2832	int irq;
2833
2834	sbmac_set_channel_state(sc,sbmac_state_off);
2835
2836	del_timer_sync(&sc->sbm_timer);
2837
2838	spin_lock_irqsave(&sc->sbm_lock, flags);
2839
2840	netif_stop_queue(dev);
2841
2842	if (debug > 1) {
2843		printk(KERN_DEBUG "%s: Shutting down ethercard\n",dev->name);
2844	}
2845
2846	spin_unlock_irqrestore(&sc->sbm_lock, flags);
2847
2848	irq = dev->irq;
2849	synchronize_irq(irq);
2850	free_irq(irq, dev);
2851
2852	sbdma_emptyring(&(sc->sbm_txdma));
2853	sbdma_emptyring(&(sc->sbm_rxdma));
2854
2855	return 0;
2856}
2857
2858static int sbmac_poll(struct net_device *dev, int *budget)
2859{
2860	int work_to_do;
2861	int work_done;
2862	struct sbmac_softc *sc = netdev_priv(dev);
2863
2864	work_to_do = min(*budget, dev->quota);
2865	work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), work_to_do, 1);
2866
2867	if (work_done > work_to_do)
2868		printk(KERN_ERR "%s exceeded work_to_do budget=%d quota=%d work-done=%d\n",
2869		       sc->sbm_dev->name, *budget, dev->quota, work_done);
2870
2871	sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
2872
2873	*budget -= work_done;
2874	dev->quota -= work_done;
2875
2876	if (work_done < work_to_do) {
2877		netif_rx_complete(dev);
2878
2879#ifdef CONFIG_SBMAC_COALESCE
2880		__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
2881			     ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
2882			     sc->sbm_imr);
2883#else
2884		__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
2885			     (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
2886#endif
2887	}
2888
2889	return (work_done >= work_to_do);
2890}
2891
2892#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || \
2893	defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2894static void
2895sbmac_setup_hwaddr(int chan,char *addr)
2896{
2897	uint8_t eaddr[6];
2898	uint64_t val;
2899	unsigned long port;
2900
2901	port = A_MAC_CHANNEL_BASE(chan);
2902	sbmac_parse_hwaddr(addr,eaddr);
2903	val = sbmac_addr2reg(eaddr);
2904	__raw_writeq(val, IOADDR(port+R_MAC_ETHERNET_ADDR));
2905	val = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
2906}
2907#endif
2908
2909static struct net_device *dev_sbmac[MAX_UNITS];
2910
2911static int __init
2912sbmac_init_module(void)
2913{
2914	int idx;
2915	struct net_device *dev;
2916	unsigned long port;
2917	int chip_max_units;
2918
2919	/* Set the number of available units based on the SOC type.  */
2920	switch (soc_type) {
2921	case K_SYS_SOC_TYPE_BCM1250:
2922	case K_SYS_SOC_TYPE_BCM1250_ALT:
2923		chip_max_units = 3;
2924		break;
2925	case K_SYS_SOC_TYPE_BCM1120:
2926	case K_SYS_SOC_TYPE_BCM1125:
2927	case K_SYS_SOC_TYPE_BCM1125H:
2928	case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
2929		chip_max_units = 2;
2930		break;
2931	case K_SYS_SOC_TYPE_BCM1x55:
2932	case K_SYS_SOC_TYPE_BCM1x80:
2933		chip_max_units = 4;
2934		break;
2935	default:
2936		chip_max_units = 0;
2937		break;
2938	}
2939	if (chip_max_units > MAX_UNITS)
2940		chip_max_units = MAX_UNITS;
2941
2942	/*
2943	 * For bringup when not using the firmware, we can pre-fill
2944	 * the MAC addresses using the environment variables
2945	 * specified in this file (or maybe from the config file?)
2946	 */
2947#ifdef SBMAC_ETH0_HWADDR
2948	if (chip_max_units > 0)
2949	  sbmac_setup_hwaddr(0,SBMAC_ETH0_HWADDR);
2950#endif
2951#ifdef SBMAC_ETH1_HWADDR
2952	if (chip_max_units > 1)
2953	  sbmac_setup_hwaddr(1,SBMAC_ETH1_HWADDR);
2954#endif
2955#ifdef SBMAC_ETH2_HWADDR
2956	if (chip_max_units > 2)
2957	  sbmac_setup_hwaddr(2,SBMAC_ETH2_HWADDR);
2958#endif
2959#ifdef SBMAC_ETH3_HWADDR
2960	if (chip_max_units > 3)
2961	  sbmac_setup_hwaddr(3,SBMAC_ETH3_HWADDR);
2962#endif
2963
2964	/*
2965	 * Walk through the Ethernet controllers and find
2966	 * those who have their MAC addresses set.
2967	 */
2968	for (idx = 0; idx < chip_max_units; idx++) {
2969
2970	        /*
2971	         * This is the base address of the MAC.
2972		 */
2973
2974	        port = A_MAC_CHANNEL_BASE(idx);
2975
2976		/*
2977		 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
2978		 * value for us by the firmware if we are going to use this MAC.
2979		 * If we find a zero, skip this MAC.
2980		 */
2981
2982		sbmac_orig_hwaddr[idx] = __raw_readq(IOADDR(port+R_MAC_ETHERNET_ADDR));
2983		if (sbmac_orig_hwaddr[idx] == 0) {
2984			printk(KERN_DEBUG "sbmac: not configuring MAC at "
2985			       "%lx\n", port);
2986		    continue;
2987		}
2988
2989		/*
2990		 * Okay, cool.  Initialize this MAC.
2991		 */
2992
2993		dev = alloc_etherdev(sizeof(struct sbmac_softc));
2994		if (!dev)
2995			return -ENOMEM;
2996
2997		printk(KERN_DEBUG "sbmac: configuring MAC at %lx\n", port);
2998
2999		dev->irq = UNIT_INT(idx);
3000		dev->base_addr = port;
3001		dev->mem_end = 0;
3002		if (sbmac_init(dev, idx)) {
3003			port = A_MAC_CHANNEL_BASE(idx);
3004			__raw_writeq(sbmac_orig_hwaddr[idx], IOADDR(port+R_MAC_ETHERNET_ADDR));
3005			free_netdev(dev);
3006			continue;
3007		}
3008		dev_sbmac[idx] = dev;
3009	}
3010	return 0;
3011}
3012
3013
3014static void __exit
3015sbmac_cleanup_module(void)
3016{
3017	struct net_device *dev;
3018	int idx;
3019
3020	for (idx = 0; idx < MAX_UNITS; idx++) {
3021		struct sbmac_softc *sc;
3022		dev = dev_sbmac[idx];
3023		if (!dev)
3024			continue;
3025
3026		sc = netdev_priv(dev);
3027		unregister_netdev(dev);
3028		sbmac_uninitctx(sc);
3029		free_netdev(dev);
3030	}
3031}
3032
3033module_init(sbmac_init_module);
3034module_exit(sbmac_cleanup_module);
3035