1/*  *********************************************************************
2    *  Broadcom Common Firmware Environment (CFE)
3    *
4    *  BCM5700/Tigon3 (10/100/1000 EthernetMAC) driver	File: dev_bcm5700.c
5    *
6    *  Author:  Ed Satterthwaite
7    *
8    *********************************************************************
9    *
10    *  Copyright 2000,2001,2002,2003
11    *  Broadcom Corporation. All rights reserved.
12    *
13    *  This software is furnished under license and may be used and
14    *  copied only in accordance with the following terms and
15    *  conditions.  Subject to these conditions, you may download,
16    *  copy, install, use, modify and distribute modified or unmodified
17    *  copies of this software in source and/or binary form.  No title
18    *  or ownership is transferred hereby.
19    *
20    *  1) Any source code used, modified or distributed must reproduce
21    *     and retain this copyright notice and list of conditions
22    *     as they appear in the source file.
23    *
24    *  2) No right is granted to use any trade name, trademark, or
25    *     logo of Broadcom Corporation.  The "Broadcom Corporation"
26    *     name may not be used to endorse or promote products derived
27    *     from this software without the prior written permission of
28    *     Broadcom Corporation.
29    *
30    *  3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR
31    *     IMPLIED WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED
32    *     WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
33    *     PURPOSE, OR NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT
34    *     SHALL BROADCOM BE LIABLE FOR ANY DAMAGES WHATSOEVER, AND IN
35    *     PARTICULAR, BROADCOM SHALL NOT BE LIABLE FOR DIRECT, INDIRECT,
36    *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
37    *     (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
38    *     GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39    *     BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
40    *     OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
41    *     TORT (INCLUDING NEGLIGENCE OR OTHERWISE), EVEN IF ADVISED OF
42    *     THE POSSIBILITY OF SUCH DAMAGE.
43    ********************************************************************* */
44
45#include "sbmips.h"
46
47#ifndef _SB_MAKE64
48#define _SB_MAKE64(x) ((uint64_t)(x))
49#endif
50#ifndef _SB_MAKEMASK1
51#define _SB_MAKEMASK1(n) (_SB_MAKE64(1) << _SB_MAKE64(n))
52#endif
53
54#include "lib_types.h"
55#include "lib_physio.h"
56#include "lib_malloc.h"
57#include "lib_string.h"
58#include "lib_printf.h"
59#include "lib_queue.h"
60
61#include "cfe_iocb.h"
62#include "cfe_device.h"
63#include "cfe_ioctl.h"
64#include "cfe_timer.h"
65#include "cfe_error.h"
66#include "cfe_irq.h"
67
68#include "pcivar.h"
69#include "pcireg.h"
70
71#include "bcm5700.h"
72#include "mii.h"
73
74#include "bsp_config.h"
75
76#include "proto/ethernet.h"
77#include "bcmdevs.h"
78#include "bcmutils.h"
79#include "bcmnvram.h"
80#include "sbconfig.h"
81#include "sbutils.h"
82#include "hndgige.h"
83#include "bcmrobo.h"
84
85static int sbgige = -1;
86
87/* This is a driver for the Broadcom 570x ("Tigon 3") 10/100/1000 MAC.
88   Currently, the 5700, 5701, 5703C, 5704C and 5705 have been tested.
89   Only 10/100/1000 BASE-T PHYs are supported; variants with SerDes
90   PHYs are not supported.
91
92   Reference:
93     Host Programmer Interface Specification for the BCM570X Family
94       of Highly-Integrated Media Access Controllers, 570X-PG106-R.
95     Broadcom Corp., 16215 Alton Parkway, Irvine CA, 09/27/02
96
97   This driver takes advantage of DMA coherence in systems that
98   support it (e.g., SB1250).  For systems without coherent DMA (e.g.,
99   BCM47xx SOCs), descriptor and packet buffer memory is explicitly
100   flushed.
101
102   The driver prefers "preserve bit lanes" mode for big-endian
103   systems that provide the option, but it can use "preserve byte
104   lanes" as well.
105
106   Note that the 5705 does not fully map all address ranges.  Per
107   the manual, reads and writes of the unmapped regions are permitted
108   and do not fault; however, it apparently has some poisoned registers,
109   at least in early revs, that should not be touched.  See the
110   conditionals in the code. */
111
112/* PIOSWAP controls whether word-swapping takes place for transactions
113   in which the 570x is the target device.  In theory, either value
114   should work (with access macros adjusted as below) and it should be
115   set to be consistent with the settings for 570x as initiator.
116   Empirically, however, some combinations only work with no swap.
117   For big-endian systems:
118
119                          SWAP=0    SWAP=1
120   5700     32 PCI          OK        OK
121   5700     64 Sturgeon     OK        OK
122   5701-32  32 PCI          OK        OK
123   5701-32  64 Sturgeon     OK        OK
124   5701-32  64 Golem        OK        OK
125   5701-64  64 Sturgeon     OK        OK
126   5701-64  64 Golem        OK       FAIL
127   5705     32 PCI          OK        OK
128   5705     64 Sturgeon    (OK)*     FAIL
129   5705     64 Golem        OK        OK
130
131   For little-endian systems, only SWAP=1 appears to work.
132
133   * PCI status/interrupt ordering problem under load.  */
134
135#if	__MIPSEL
136#define	PIOSWAP	1
137#else
138#define PIOSWAP 0
139#endif
140
141#ifndef T3_DEBUG
142#define T3_DEBUG 0
143#endif
144
145#ifndef T3_BRINGUP
146#define T3_BRINGUP 0
147#endif
148
149
150/* Broadcom recommends using PHY interrupts instead of autopolling,
151   but I haven't made it work yet. */
152#define T3_AUTOPOLL 1
153
154/* Set IPOLL to drive processing through the interrupt dispatcher.
155   Set XPOLL to drive processing by an external polling agent.  One
156   must be set; setting both is ok. */
157
158#ifndef IPOLL
159#define IPOLL 0
160#endif
161#ifndef XPOLL
162#define XPOLL 1
163#endif
164
165#define ENET_ADDR_LEN	6		/* size of an ethernet address */
166#define MIN_ETHER_PACK  64              /* min size of a packet */
167#define MAX_ETHER_PACK  1518		/* max size of a packet */
168#define VLAN_TAG_LEN    4               /* VLAN type plus tag */
169#define CRC_SIZE	4		/* size of CRC field */
170
171/* Packet buffers.  For the Tigon 3, packet buffer alignment is
172   arbitrary and can be to any byte boundary.  We would like it
173   aligned to a cache line boundary for performance, although there is
174   a trade-off with IP/TCP header alignment.  Jumbo frames are not
175   currently supported.  */
176
177#define ETH_PKTBUF_LEN      (((MAX_ETHER_PACK+31)/32)*32)
178
179#if __long64
180typedef struct eth_pkt_s {
181    queue_t next;			/* 16 */
182    uint8_t *buffer;			/*  8 */
183    uint32_t flags;			/*  4 */
184    int32_t length;			/*  4 */
185    uint8_t data[ETH_PKTBUF_LEN];
186} eth_pkt_t;
187#else
188typedef struct eth_pkt_s {
189    queue_t next;			/*  8 */
190    uint8_t *buffer;			/*  4 */
191    uint32_t flags;			/*  4 */
192    int32_t length;			/*  4 */
193    uint32_t unused[3];			/* 12 */
194    uint8_t data[ETH_PKTBUF_LEN];
195} eth_pkt_t;
196#endif
197
198#define CACHE_ALIGN       32
199#define ETH_PKTBUF_LINES  ((sizeof(eth_pkt_t) + (CACHE_ALIGN-1))/CACHE_ALIGN)
200#define ETH_PKTBUF_SIZE   (ETH_PKTBUF_LINES*CACHE_ALIGN)
201#define ETH_PKTBUF_OFFSET (offsetof(eth_pkt_t, data))
202
203#define ETH_PKT_BASE(data) ((eth_pkt_t *)((data) - ETH_PKTBUF_OFFSET))
204
205static void
206show_packet(char c, eth_pkt_t *pkt)
207{
208    int i;
209    int n = (pkt->length < 32 ? pkt->length : 32);
210
211    xprintf("%c[%4d]:", c, pkt->length);
212    for (i = 0; i < n; i++) {
213	if (i % 4 == 0)
214	    xprintf(" ");
215	xprintf("%02x", pkt->buffer[i]);
216	}
217    xprintf("\n");
218}
219
220
221static void t3_ether_probe(cfe_driver_t *drv,
222			   unsigned long probe_a, unsigned long probe_b,
223			   void *probe_ptr);
224
225
226
227/* Chip documentation numbers the rings with 1-origin.  */
228
229#define RI(n)                 ((n)-1)
230
231/* BCM570x Ring Sizes (no external memory).  Pages 97-98 */
232
233#define TXP_MAX_RINGS         16
234#define TXP_INTERNAL_RINGS    4
235#define TXP_RING_ENTRIES      512
236
237#define RXP_STD_ENTRIES       512
238
239#define RXR_MAX_RINGS         16
240#define RXR_RING_ENTRIES      1024
241
242#define RXR_MAX_RINGS_05      1
243#define RXR_RING_ENTRIES_05   512
244
245
246/* BCM570x Send Buffer Descriptors as a struct.  Pages 100-101 */
247
248typedef struct t3_snd_bd_s {
249    uint32_t  bufptr_hi;
250    uint32_t  bufptr_lo;
251#ifdef __MIPSEB
252    uint16_t  length;
253    uint16_t  flags;
254    uint16_t  pad;
255    uint16_t  vlan_tag;
256#elif __MIPSEL
257    uint16_t  flags;
258    uint16_t  length;
259    uint16_t  vlan_tag;
260    uint16_t  pad;
261#else
262#error "bcm5700: endian not set"
263#endif
264} t3_snd_bd_t;
265
266#define SND_BD_SIZE           16
267
268#define TX_FLAG_TCP_CKSUM     0x0001
269#define TX_FLAG_IP_CKSUM      0x0002
270#define TX_FLAG_PACKET_END    0x0004
271#define TX_FLAG_IP_FRAG       0x0008
272#define TX_FLAG_IP_FRAG_END   0x0010
273#define TX_FLAG_VLAN_TAG      0x0040
274#define TX_FLAG_COAL_NOW      0x0080
275#define TX_FLAG_CPU_PRE_DMA   0x0100
276#define TX_FLAG_CPU_POST_DMA  0x0200
277#define TX_FLAG_ADD_SRC       0x1000
278#define TX_FLAG_SRC_ADDR_SEL  0x6000
279#define TX_FLAG_NO_CRC        0x8000
280
281/* BCM570x Receive Buffer Descriptors as a struct.  Pages 105-107 */
282
283typedef struct t3_rcv_bd_s {
284    uint32_t  bufptr_hi;
285    uint32_t  bufptr_lo;
286#ifdef __MIPSEB
287    uint16_t  index;
288    uint16_t  length;
289    uint16_t  type;
290    uint16_t  flags;
291    uint16_t  ip_cksum;
292    uint16_t  tcp_cksum;
293    uint16_t  error_flag;
294    uint16_t  vlan_tag;
295#elif __MIPSEL
296    uint16_t  length;
297    uint16_t  index;
298    uint16_t  flags;
299    uint16_t  type;
300    uint16_t  tcp_cksum;
301    uint16_t  ip_cksum;
302    uint16_t  vlan_tag;
303    uint16_t  error_flag;
304#else
305#error "bcm5700: endian not set"
306#endif
307    uint32_t  pad;
308    uint32_t  opaque;
309} t3_rcv_bd_t;
310
311#define RCV_BD_SIZE           32
312
313#define RX_FLAG_PACKET_END    0x0004
314#define RX_FLAG_JUMBO_RING    0x0020
315#define RX_FLAG_VLAN_TAG      0x0040
316#define RX_FLAG_ERROR         0x0400
317#define RX_FLAG_MINI_RING     0x0800
318#define RX_FLAG_IP_CKSUM      0x1000
319#define RX_FLAG_TCP_CKSUM     0x2000
320#define RX_FLAG_IS_TCP        0x4000
321
322#define RX_ERR_BAD_CRC        0x0001
323#define RX_ERR_COLL_DETECT    0x0002
324#define RX_ERR_LINK_LOST      0x0004
325#define RX_ERR_PHY_DECODE     0x0008
326#define RX_ERR_DRIBBLE        0x0010
327#define RX_ERR_MAC_ABORT      0x0020
328#define RX_ERR_SHORT_PKT      0x0040
329#define RX_ERR_TRUNC_NO_RES   0x0080
330#define RX_ERR_GIANT_PKT      0x0100
331
332/* BCM570x Status Block format as a struct (not BCM5705).  Pages 110-111. */
333
334typedef struct t3_status_s {
335    uint32_t status;
336    uint32_t tag;
337#ifdef __MIPSEB
338    uint16_t rxc_std_index;
339    uint16_t rxc_jumbo_index;
340    uint16_t reserved2;
341    uint16_t rxc_mini_index;
342    struct {
343	uint16_t send_c;
344	uint16_t return_p;
345    } index [16];
346#elif __MIPSEL
347    uint16_t rxc_jumbo_index;
348    uint16_t rxc_std_index;
349    uint16_t rxc_mini_index;
350    uint16_t reserved2;
351    struct {
352	uint16_t return_p;
353	uint16_t send_c;
354    } index [16];
355#else
356#error "bcm5700: endian not set"
357#endif
358} t3_status_t;
359
360#define M_STATUS_UPDATED        0x00000001
361#define M_STATUS_LINKCHNG       0x00000002
362#define M_STATUS_ERROR          0x00000004
363
364/* BCM570x Statistics Block format as a struct.  Pages 112-120 */
365
366typedef struct t3_stats_s {
367    uint64_t stats[L_MAC_STATS/sizeof(uint64_t)];
368} t3_stats_t;
369
370/* Encoded status transfer block size (32, 64 or 80 bytes.  Page 412 */
371
372#define STATUS_BLOCK_SIZE(rings) \
373         ((rings) <= 4  ? K_HCM_SBSIZE_32 : \
374          (rings) <= 12 ? K_HCM_SBSIZE_64 : \
375          K_HCM_SBSIZE_80)
376
377/* End of 570X defined data structures */
378
379/* The maximum supported BD ring index (QOS) for tranmit or receive. */
380
381#define MAX_RI                 1
382
383
384typedef enum {
385    eth_state_uninit,
386    eth_state_off,
387    eth_state_on,
388} eth_state_t;
389
390typedef struct t3_ether_s {
391    /* status block */
392    volatile t3_status_t *status;  /* should be cache-aligned */
393
394    /* PCI access information */
395    uint32_t  regbase;
396    uint32_t  membase;
397    uint8_t   irq;
398    pcitag_t  tag;		   /* tag for configuration registers */
399
400    uint8_t   hwaddr[6];
401    uint16_t  device;              /* chip device code */
402    uint8_t   revision;            /* chip revision */
403    uint16_t  asic_revision;       /* mask revision */
404
405    eth_state_t state;             /* current state */
406    uint32_t intmask;              /* interrupt mask */
407
408    int linkspeed;		   /* encodings from cfe_ioctl */
409
410    /* packet lists */
411    queue_t freelist;
412    uint8_t *pktpool;
413    queue_t rxqueue;
414
415    /* rings */
416    /* For now, support only the standard Rx Producer Ring */
417    t3_rcv_bd_t *rxp_std;          /* Standard Rx Producer Ring */
418    uint32_t  rxp_std_index;
419    uint32_t  prev_rxp_std_index;
420
421   /* For now, support only 1 priority */
422    uint32_t  rxr_entries;
423    t3_rcv_bd_t *rxr_1;            /* Rx Return Ring 1 */
424    uint32_t  rxr_1_index;
425    t3_snd_bd_t *txp_1;            /* Send Ring 1 */
426    uint32_t  txp_1_index;
427    uint32_t  txc_1_index;
428
429    cfe_devctx_t *devctx;
430
431    /* PHY access */
432    int      phy_addr;
433    uint16_t phy_status;
434    uint16_t phy_ability;
435    uint16_t phy_xability;
436    uint32_t phy_vendor;
437    uint16_t phy_device;
438
439    /* MII polling control */
440    int      phy_change;
441    int      mii_polling;
442
443    /* statistics block */
444    volatile t3_stats_t *stats;    /* should be cache-aligned */
445
446    /* additional driver statistics */
447    uint32_t rx_interrupts;
448    uint32_t tx_interrupts;
449    uint32_t bogus_interrupts;
450
451    /* SB specific fields */
452    sb_t     *sbh;
453    uint32_t sbidx;
454    uint32_t flags;
455#define T3_RGMII_MODE 	0x1
456#define T3_SB_CORE	0x2
457#define T3_NO_PHY	0x4
458} t3_ether_t;
459
460
461/* Address mapping macros */
462
463#define PCI_TO_PTR(a)  (PHYS_TO_K1(a))
464#define PTR_TO_PCI(x)  (K1_TO_PHYS((uint32_t)x))
465
466
467/* Chip access macros */
468
469/* These macros attempt to be compatible with match-bits mode,
470   which may put the data and byte masks into the wrong 32-bit word
471   for 64-bit accesses.  See the comment above on PIOSWAP.
472   Externally mastered DMA (control and data) uses match-bits and does
473   specify word-swaps when operating big endian.  */
474
475/* Most registers are 32 bits wide and are accessed by 32-bit
476   transactions.  The mailbox registers and on-chip RAM are 64-bits
477   wide but are generally accessed by 32-bit transactions.
478   Furthermore, the documentation is ambiguous about which 32-bits of
479   the mailbox is significant.  To localize the potential confusions,
480   we define macros for the 3 different cases.  */
481
482#define READCSR(sc,csr)       phys_read32((sc)->regbase + (csr))
483#define WRITECSR(sc,csr,val)  phys_write32((sc)->regbase + (csr), (val))
484
485#if PIOSWAP
486#define READMBOX(sc,csr)      phys_read32((sc)->regbase+((csr)^4))
487#define WRITEMBOX(sc,csr,val) phys_write32((sc)->regbase+((csr)^4), (val))
488
489#define READMEM(sc,csr)       phys_read32((sc)->membase+(csr))
490#define WRITEMEM(sc,csr,val)  phys_write32((sc)->membase+(csr), (val))
491
492#else
493#define READMBOX(sc,csr)      phys_read32((sc)->regbase+(csr))
494#define WRITEMBOX(sc,csr,val) phys_write32((sc)->regbase+(csr), (val))
495
496#define READMEM(sc,csr)       phys_read32((sc)->membase+((csr) ^ 4))
497#define WRITEMEM(sc,csr,val)  phys_write32((sc)->membase+((csr) ^ 4), (val))
498
499#endif
500
501
502/* Entry to and exit from critical sections (currently relative to
503   interrupts only, not SMP) */
504
505#if CFG_INTERRUPTS
506#define CS_ENTER(sc) cfe_disable_irq(sc->irq)
507#define CS_EXIT(sc)  cfe_enable_irq(sc->irq)
508#else
509#define CS_ENTER(sc) ((void)0)
510#define CS_EXIT(sc)  ((void)0)
511#endif
512
513
514static void
515dumpseq(t3_ether_t *sc, int start, int next)
516{
517    int offset, i, j;
518    int columns = 4;
519    int lines = (((next - start)/4 + 1) + 3)/columns;
520    int step = lines*4;
521
522    offset = start;
523    for (i = 0; i < lines; i++) {
524	xprintf("\nCSR");
525	for (j = 0; j < columns; j++) {
526	    if (offset + j*step < next)
527		xprintf(" %04X: %08X ",
528			offset+j*step, READCSR(sc, offset+j*step));
529	    }
530	offset += 4;
531	}
532    xprintf("\n");
533}
534
535static void
536dumpcsrs(t3_ether_t *sc, const char *legend)
537{
538    xprintf("%s:\n", legend);
539
540    /* Some device-specific PCI configuration registers */
541    xprintf("-----PCI-----");
542    dumpseq(sc, 0x68, 0x78);
543
544    /* Some general control registers */
545    xprintf("---General---");
546    dumpseq(sc, 0x6800, 0x6810);
547
548    xprintf("-------------\n");
549}
550
551
552/* Memory allocation */
553
554static void *
555kmalloc_uncached( unsigned int size, unsigned int align )
556{
557    void *   ptr;
558
559    if ((ptr = KMALLOC(size, align)) == NULL)
560        return NULL;
561
562    cfe_flushcache(CFE_CACHE_FLUSH_D);
563
564	return (void *)UNCADDR(PHYSADDR((uint32_t)ptr));
565}
566
567static void
568kfree_uncached( void * ptr )
569{
570	KFREE((void *)KERNADDR(PHYSADDR((uint32_t)ptr)));
571}
572
573
574/* Packet management */
575
576#define ETH_PKTPOOL_SIZE  64
577#define MIN_RXP_STD_BDS   32
578
579
580static eth_pkt_t *
581eth_alloc_pkt(t3_ether_t *sc)
582{
583    eth_pkt_t *pkt;
584
585    CS_ENTER(sc);
586    pkt = (eth_pkt_t *) q_deqnext(&sc->freelist);
587    CS_EXIT(sc);
588    if (!pkt) return NULL;
589
590    pkt->buffer = pkt->data;
591    pkt->length = ETH_PKTBUF_LEN;
592    pkt->flags = 0;
593
594    return pkt;
595}
596
597
598static void
599eth_free_pkt(t3_ether_t *sc, eth_pkt_t *pkt)
600{
601    CS_ENTER(sc);
602    q_enqueue(&sc->freelist, &pkt->next);
603    CS_EXIT(sc);
604}
605
606static void
607eth_initfreelist(t3_ether_t *sc)
608{
609    int idx;
610    uint8_t *ptr;
611    eth_pkt_t *pkt;
612
613    q_init(&sc->freelist);
614
615    ptr = sc->pktpool;
616    for (idx = 0; idx < ETH_PKTPOOL_SIZE; idx++) {
617	pkt = (eth_pkt_t *) ptr;
618	eth_free_pkt(sc, pkt);
619	ptr += ETH_PKTBUF_SIZE;
620	}
621}
622
623
624/* Utilities */
625
626static const char *
627t3_devname(t3_ether_t *sc)
628{
629    return (sc->devctx != NULL ? cfe_device_name(sc->devctx) : "eth?");
630}
631
632
633/* CRCs */
634
635#define IEEE_CRC32_POLY    0xEDB88320UL    /* CRC-32 Poly -- either endian */
636
637uint32_t eth_crc32(const uint8_t *databuf, unsigned int datalen);
638/*static*/ uint32_t
639eth_crc32(const uint8_t *databuf, unsigned int datalen)
640{
641    unsigned int idx, bit, data;
642    uint32_t crc;
643
644    crc = 0xFFFFFFFFUL;
645    for (idx = 0; idx < datalen; idx++)
646	for (data = *databuf++, bit = 0; bit < 8; bit++, data >>= 1)
647	    crc = (crc >> 1) ^ (((crc ^ data) & 1) ? IEEE_CRC32_POLY : 0);
648    return crc;
649}
650
651
652/* Descriptor ring management */
653
654static int
655t3_add_rcvbuf(t3_ether_t *sc, eth_pkt_t *pkt)
656{
657    t3_rcv_bd_t *rxp;
658
659    rxp = &(sc->rxp_std[sc->rxp_std_index]);
660    rxp->bufptr_lo = PTR_TO_PCI(pkt->buffer);
661    rxp->length = ETH_PKTBUF_LEN;
662    sc->rxp_std_index++;
663    if (sc->rxp_std_index == RXP_STD_ENTRIES)
664	sc->rxp_std_index = 0;
665    return 0;
666}
667
668static void
669t3_fillrxring(t3_ether_t *sc)
670{
671    eth_pkt_t *pkt;
672    unsigned rxp_ci, rxp_onring;
673
674    rxp_ci = sc->status->rxc_std_index;  /* Get a snapshot */
675
676    if (sc->rxp_std_index >= rxp_ci)
677	rxp_onring = sc->rxp_std_index - rxp_ci;
678    else
679	rxp_onring = (sc->rxp_std_index + RXP_STD_ENTRIES) - rxp_ci;
680
681    while (rxp_onring < MIN_RXP_STD_BDS) {
682	pkt = eth_alloc_pkt(sc);
683	if (pkt == NULL) {
684	    /* could not allocate a buffer */
685	    break;
686	    }
687
688	/*
689	 * Ensure that the packet memory is flushed out of the data cache
690	 * before posting it to receive an incoming packet.
691	 */
692	cfe_flushcache(CFE_CACHE_FLUSH_D);
693
694	if (t3_add_rcvbuf(sc, pkt) != 0) {
695	    /* could not add buffer to ring */
696	    eth_free_pkt(sc, pkt);
697	    break;
698	    }
699	rxp_onring++;
700	}
701}
702
703static void
704t3_rx_callback(t3_ether_t *sc, eth_pkt_t *pkt)
705{
706    if (T3_DEBUG) show_packet('>', pkt);   /* debug */
707
708    CS_ENTER(sc);
709    q_enqueue(&sc->rxqueue, &pkt->next);
710    CS_EXIT(sc);
711}
712
713static void
714t3_procrxring(t3_ether_t *sc)
715{
716    eth_pkt_t   *pkt;
717    t3_rcv_bd_t *rxc;
718    volatile t3_status_t *status = sc->status;
719
720    rxc = &(sc->rxr_1[sc->rxr_1_index]);
721    do {
722	pkt = ETH_PKT_BASE(PCI_TO_PTR(rxc->bufptr_lo));
723	pkt->length = rxc->length;
724	if ((rxc->flags & RX_FLAG_ERROR) == 0)
725	    t3_rx_callback(sc, pkt);
726	else {
727#if T3_BRINGUP
728	    xprintf("%s: rx error %04X\n", t3_devname(sc), rxc->error_flag);
729#endif
730	    eth_free_pkt(sc, pkt);   /* Could optimize */
731	    }
732	sc->rxr_1_index++;
733	rxc++;
734	if (sc->rxr_1_index == sc->rxr_entries) {
735	    sc->rxr_1_index = 0;
736	    rxc = &(sc->rxr_1[0]);
737	    }
738	} while (status->index[RI(1)].return_p != sc->rxr_1_index);
739
740    /* Update the return ring */
741    WRITEMBOX(sc, R_RCV_BD_RTN_CI(1), sc->rxr_1_index);
742
743    /* Refill the producer ring */
744    t3_fillrxring(sc);
745}
746
747
748static int
749t3_transmit(t3_ether_t *sc, eth_pkt_t *pkt)
750{
751    t3_snd_bd_t *txp;
752
753    if (T3_DEBUG) show_packet('<', pkt);   /* debug */
754
755
756    txp = &(sc->txp_1[sc->txp_1_index]);
757    txp->bufptr_hi = 0;
758    txp->bufptr_lo = PTR_TO_PCI(pkt->buffer);
759    txp->length = pkt->length;
760    txp->flags = TX_FLAG_PACKET_END;
761
762    sc->txp_1_index++;
763    if (sc->txp_1_index == TXP_RING_ENTRIES)
764	sc->txp_1_index = 0;
765
766    WRITEMBOX(sc, R_SND_BD_PI(1), sc->txp_1_index);
767
768    return 0;
769}
770
771
772static void
773t3_proctxring(t3_ether_t *sc)
774{
775    eth_pkt_t   *pkt;
776    t3_snd_bd_t *txc;
777    volatile t3_status_t *status = sc->status;
778
779    txc = &(sc->txp_1[sc->txc_1_index]);
780    do {
781	pkt = ETH_PKT_BASE(PCI_TO_PTR(txc->bufptr_lo));
782	eth_free_pkt(sc, pkt);
783	sc->txc_1_index++;
784	txc++;
785	if (sc->txc_1_index == TXP_RING_ENTRIES) {
786	    sc->txc_1_index = 0;
787	    txc = &(sc->txp_1[0]);
788	    }
789	} while (status->index[RI(1)].send_c != sc->txc_1_index);
790}
791
792
793static void
794t3_initrings(t3_ether_t *sc)
795{
796    int  i;
797    t3_rcv_bd_t *rxp;
798    volatile t3_status_t *status = sc->status;
799
800    /* Clear all Producer BDs */
801    rxp = &(sc->rxp_std[0]);
802    for (i = 0; i < RXP_STD_ENTRIES; i++) {
803        rxp->bufptr_hi = rxp->bufptr_lo = 0;
804	rxp->length = 0;
805	rxp->index = i;
806	rxp->flags = 0;
807	rxp->type = 0;
808	rxp->ip_cksum = rxp->tcp_cksum = 0;
809	rxp++;
810	}
811
812    /* Init the ring pointers */
813
814    sc->rxp_std_index = 0;  status->rxc_std_index = 0;
815    sc->rxr_1_index = 0;    status->index[RI(1)].return_p = 0;
816    sc->txp_1_index = 0;    status->index[RI(1)].send_c = 0;
817
818    /* Allocate some initial buffers for the Producer BD ring */
819    sc->prev_rxp_std_index = 0;
820    t3_fillrxring(sc);
821
822    /* Nothing consumed yet */
823    sc->txc_1_index = 0;
824}
825
826static void
827t3_init(t3_ether_t *sc)
828{
829    /* Allocate buffer pool */
830    sc->pktpool = KMALLOC(ETH_PKTPOOL_SIZE*ETH_PKTBUF_SIZE, CACHE_ALIGN);
831    eth_initfreelist(sc);
832    q_init(&sc->rxqueue);
833    t3_initrings(sc);
834}
835
836static void
837t3_reinit(t3_ether_t *sc)
838{
839    eth_initfreelist(sc);
840    q_init(&sc->rxqueue);
841
842    t3_initrings(sc);
843}
844
845
846#ifdef __MIPSEB
847/* Byte swap utilities. */
848
849#define SWAP4(x) \
850    ((((x) & 0x00FF) << 24) | \
851     (((x) & 0xFF00) << 8)  | \
852     (((x) >> 8) & 0xFF00)  | \
853     (((x) >> 24) & 0x00FF))
854
855static uint32_t
856swap4(uint32_t x)
857{
858    uint32_t t;
859
860    t = ((x & 0xFF00FF00) >> 8) | ((x & 0x00FF00FF) << 8);
861    return (t >> 16) | ((t & 0xFFFF) << 16);
862}
863#endif /* __MIPSEB */
864
865
866/* EEPROM access functions (BCM5700 and BCM5701 version) */
867
868/* The 570x chips support multiple access methods.  We use "Auto Access",
869   which requires that
870     Miscellaneous_Local_Control.Auto_SEEPROM_Access be set,
871     Serial_EEprom.Address.HalfClock be programmed for <= 400 Hz.
872   (both done by initialization code) */
873
874#define EP_MAX_RETRIES  500
875#define EP_DEVICE_ID    0x00           /* default ATMEL device ID */
876
877static void
878eeprom_access_init(t3_ether_t *sc)
879{
880  uint32_t mlctl;
881
882  WRITECSR(sc, R_EEPROM_ADDR, M_EPADDR_RESET | V_EPADDR_HPERIOD(0x60));
883
884  mlctl = READCSR(sc, R_MISC_LOCAL_CTRL);
885  mlctl |= M_MLCTL_EPAUTOACCESS;
886  WRITECSR(sc, R_MISC_LOCAL_CTRL, mlctl);
887}
888
889
890static uint32_t
891eeprom_read_word(t3_ether_t *sc, unsigned int offset)
892{
893    /* Assumes that SEEPROM is already set up for auto access. */
894    uint32_t epaddr, epdata;
895    volatile uint32_t temp;
896    int i;
897
898    epaddr = READCSR(sc, R_EEPROM_ADDR);
899    epaddr &= M_EPADDR_HPERIOD;
900    epaddr |= (V_EPADDR_ADDR(offset) | V_EPADDR_DEVID(EP_DEVICE_ID)
901	       | M_EPADDR_RW | M_EPADDR_START | M_EPADDR_COMPLETE);
902    WRITECSR(sc, R_EEPROM_ADDR, epaddr);
903    temp = READCSR(sc, R_EEPROM_ADDR);   /* push */
904
905    for (i = 0; i < EP_MAX_RETRIES; i++) {
906        temp = READCSR(sc, R_EEPROM_ADDR);
907	if ((temp & M_EPADDR_COMPLETE) != 0)
908	    break;
909	cfe_usleep(10);
910    }
911    if (i == EP_MAX_RETRIES)
912	xprintf("%s: eeprom_read_word: no SEEPROM response @ %x\n",
913		t3_devname(sc), offset);
914
915    epdata = READCSR(sc, R_EEPROM_DATA);   /* little endian */
916#ifdef __MIPSEB
917    return swap4(epdata);
918#else
919    return epdata;
920#endif
921}
922
923static int
924eeprom_read_range(t3_ether_t *sc, unsigned int offset, unsigned int len,
925		  uint32_t buf[])
926{
927    int index;
928
929    offset &= ~3;  len &= ~3;     /* 4-byte words only */
930    index = 0;
931
932    while (len > 0) {
933	buf[index++] = eeprom_read_word(sc, offset);
934	offset += 4;  len -= 4;
935	}
936
937    return index;
938}
939
940static void
941eeprom_dump_range(const char *label,
942		  uint32_t buf[], unsigned int offset, unsigned int len)
943{
944    int index;
945
946    xprintf("EEPROM: %s", label);
947
948    offset &= ~3;  len &= ~3;     /* 4-byte words only */
949    index = 0;
950
951    for (index = 0; len > 0; index++) {
952	if (index % 8 == 0)
953	    xprintf("\n %04x: ", offset);
954	xprintf(" %08x", buf[offset/4]);
955	offset += 4;  len -= 4;
956	}
957    xprintf("\n");
958}
959
960
961/* MII access functions.  */
962
963/* BCM5401 device specific registers */
964
965#define MII_ISR         0x1A    /* Interrupt Status Register */
966#define MII_IMR         0x1B    /* Interrupt Mask Register */
967
968#define M_INT_LINKCHNG  0x0002
969
970
971/* The 570x chips support multiple access methods.  We use "Auto
972   Access", which requires that MDI_Control_Register.MDI_Select be
973   clear (done by initialization code) */
974
975#define MII_MAX_RETRIES 5000
976
977static void
978mii_access_init(t3_ether_t *sc)
979{
980    WRITECSR(sc, R_MDI_CTRL, 0);                    /* here for now */
981#if !T3_AUTOPOLL
982    WRITECSR(sc, R_MI_MODE, V_MIMODE_CLKCNT(0x1F));  /* max divider */
983#endif
984}
985
986
987static uint16_t
988mii_read_register(t3_ether_t *sc, int phy, int index)
989{
990    uint32_t mode;
991    uint32_t comm, val;
992    int   i;
993
994    mode = READCSR(sc, R_MI_MODE);
995
996    comm = (V_MICOMM_CMD_RD | V_MICOMM_PHY(phy) | V_MICOMM_REG(index)
997	    | M_MICOMM_BUSY);
998    WRITECSR(sc, R_MI_COMM, comm);
999
1000    for (i = 0; i < MII_MAX_RETRIES; i++) {
1001	val = READCSR(sc, R_MI_COMM);
1002	if ((val & M_MICOMM_BUSY) == 0)
1003	    break;
1004	}
1005    if (i == MII_MAX_RETRIES)
1006	xprintf("%s: mii_read_register: MII always busy\n", t3_devname(sc));
1007
1008
1009    return G_MICOMM_DATA(val);
1010}
1011
1012/* Register reads occasionally return spurious 0's.  Verify a zero by
1013   doing a second read, or spinning when a zero is "impossible".  */
1014static uint16_t
1015mii_read_register_v(t3_ether_t *sc, int phy, int index, int spin)
1016{
1017    uint32_t val;
1018
1019    val = mii_read_register(sc, phy, index);
1020    if (val == 0) {
1021	do {
1022	    val = mii_read_register(sc, phy, index);
1023	    } while (spin && val == 0);
1024	}
1025    return val;
1026}
1027
1028static void
1029mii_write_register(t3_ether_t *sc, int phy, int index, uint16_t value)
1030{
1031    uint32_t mode;
1032    uint32_t comm, val;
1033    int   i;
1034
1035    mode = READCSR(sc, R_MI_MODE);
1036
1037    comm = (V_MICOMM_CMD_WR | V_MICOMM_PHY(phy) | V_MICOMM_REG(index)
1038	    | V_MICOMM_DATA(value) | M_MICOMM_BUSY);
1039    WRITECSR(sc, R_MI_COMM, comm);
1040
1041    for (i = 0; i < MII_MAX_RETRIES; i++) {
1042	val = READCSR(sc, R_MI_COMM);
1043	if ((val & M_MICOMM_BUSY) == 0)
1044	    break;
1045	}
1046    if (i == MII_MAX_RETRIES)
1047	xprintf("%s: mii_write_register: MII always busy\n", t3_devname(sc));
1048
1049}
1050
1051static int
1052mii_probe(t3_ether_t *sc)
1053{
1054#if T3_AUTOPOLL           /* With autopolling, the code below is not reliable.  */
1055    return 1;     /* Guaranteed for integrated PHYs */
1056#else
1057    int i;
1058    uint16_t id1, id2;
1059
1060    for (i = 0; i < 32; i++) {
1061        id1 = mii_read_register(sc, i, MII_PHYIDR1);
1062	id2 = mii_read_register(sc, i, MII_PHYIDR2);
1063	if ((id1 != 0x0000 && id1 != 0xFFFF) ||
1064	    (id2 != 0x0000 && id2 != 0xFFFF)) {
1065	    if (id1 != id2) return i;
1066	    }
1067	}
1068    return -1;
1069#endif
1070}
1071
1072static uint16_t
1073mii_read_shadow_register(t3_ether_t *sc, int index, int shadow_addr)
1074{
1075        uint16_t val;
1076
1077#if T3_DEBUG
1078	xprintf("\nmii_read_shadow_register: reg=0x%X shadow=0x%X\n", index, shadow_addr);
1079#endif
1080
1081	/* write to the shadow register first with the correct shadow address and write disabled */
1082	mii_write_register(sc, sc->phy_addr, index, (shadow_addr & ~SHDW_WR_EN) );
1083
1084	/* read from the shadow register */
1085	val = mii_read_register(sc, sc->phy_addr, index);
1086
1087#if T3_DEBUG
1088	xprintf("mii_read_shadow_register: reg=0x%X shadow=0x%X value=0x%X\n", index, shadow_addr, val);
1089#endif
1090
1091	return(val);
1092}
1093
1094static void
1095mii_write_shadow_register(t3_ether_t *sc, int index, int shadow_val)
1096{
1097        uint16_t val;
1098
1099#if T3_DEBUG
1100	xprintf("\nmii_write_shadow_register: reg=0x%X shadow=0x%X\n", index, (shadow_val | SHDW_WR_EN) );
1101#endif
1102
1103	/* write to the shadow register first with the correct shadow address and write enabled */
1104	mii_write_register(sc, sc->phy_addr, index, (shadow_val | SHDW_WR_EN));
1105
1106	/* read from the shadow register */
1107	val = mii_read_shadow_register(sc, index, shadow_val);
1108
1109#if T3_DEBUG
1110	xprintf("mii_write_shadow_register: reg=0x%X shadow=0x%X val=0x%X\n", index, shadow_val, val);
1111#endif
1112}
1113
1114#if T3_DEBUG
1115#define OUI_BCM     0x001018
1116#define IDR_BCM     0x000818
1117/* 5400: 4, 5401: 5, 5411: 6, 5421: e, 5701: 11 */
1118
1119static void
1120mii_dump(t3_ether_t *sc, const char *label)
1121{
1122    int i;
1123    uint16_t  r;
1124    uint32_t  idr, part;
1125
1126    xprintf("%s, MII:\n", label);
1127    idr = part = 0;
1128
1129    /* Required registers */
1130    for (i = 0x0; i <= 0x6; ++i) {
1131	r = mii_read_register(sc, sc->phy_addr, i);
1132	xprintf(" REG%02X: %04X", i, r);
1133	if (i == 3 || i == 6)
1134	    xprintf("\n");
1135	if (i == MII_PHYIDR1) {
1136	    idr |= r << 6;
1137	    }
1138	else if (i == MII_PHYIDR2) {
1139	    idr |= (r >> 10) & 0x3F;
1140	    part = (r >> 4) & 0x3F;
1141	    }
1142	}
1143
1144    /* GMII extensions */
1145    for (i = 0x9; i <= 0xA; ++i) {
1146	r = mii_read_register(sc, sc->phy_addr, i);
1147	xprintf(" REG%02X: %04X", i, r);
1148	}
1149    r = mii_read_register(sc, sc->phy_addr, 0xF);
1150    xprintf(" REG%02X: %04X\n", 0xF, r);
1151
1152    /* Broadcom extensions (54xx family) */
1153    if (idr == IDR_BCM) {
1154	for (i = 0x10; i <= 0x14; i++) {
1155	    r = mii_read_register(sc, sc->phy_addr, i);
1156	    xprintf(" REG%02X: %04X", i, r);
1157	    }
1158	xprintf("\n");
1159	for (i = 0x18; i <= 0x1A; i++) {
1160	    r = mii_read_register(sc, sc->phy_addr, i);
1161	    xprintf(" REG%02X: %04X", i, r);
1162	    }
1163	xprintf("\n");
1164	}
1165}
1166#else
1167#define mii_dump(sc,label)
1168#endif
1169
1170static void
1171mii_enable_interrupts(t3_ether_t *sc)
1172{
1173  mii_write_register(sc, sc->phy_addr, MII_IMR, ~M_INT_LINKCHNG);
1174}
1175
1176
1177/* For 5700/5701, LINKCHNG is read-only in the status register and
1178   cleared by writing to CFGCHNG | SYNCCHNG.  For the 5705
1179   (empirically), LINKCHNG is cleared by writing a one, while CFGCHNG
1180   and SYNCCHNG are unimplemented.  Thus we can safely clear the
1181   interrupt by writing ones to all the above bits.  */
1182
1183#define M_LINKCHNG_CLR \
1184    (M_EVT_LINKCHNG | M_MACSTAT_CFGCHNG | M_MACSTAT_SYNCCHNG)
1185
1186static int
1187mii_poll(t3_ether_t *sc)
1188{
1189    uint32_t  macstat;
1190    uint16_t  status, ability, xability;
1191    uint16_t isr;
1192
1193    macstat = READCSR(sc, R_MAC_STATUS);
1194    if ((macstat & (M_EVT_LINKCHNG | M_EVT_MIINT)) != 0)
1195	WRITECSR(sc, R_MAC_STATUS, M_LINKCHNG_CLR);
1196
1197    /* BMSR has read-to-clear bits; read twice.  */
1198
1199    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
1200    status = mii_read_register_v(sc, sc->phy_addr, MII_BMSR, 1);
1201    ability = mii_read_register_v(sc, sc->phy_addr, MII_ANLPAR, 0);
1202    if (status & BMSR_1000BT_XSR)
1203	xability = mii_read_register_v(sc, sc->phy_addr, MII_K1STSR, 0);
1204    else
1205	xability = 0;
1206    isr = mii_read_register(sc, sc->phy_addr, MII_ISR);
1207
1208    if (status != sc->phy_status
1209	|| ability != sc->phy_ability || xability != sc->phy_xability) {
1210#if T3_DEBUG
1211	xprintf("[%04x]", isr);
1212	xprintf((macstat & (M_EVT_LINKCHNG | M_EVT_MIINT)) != 0 ? "+" : "-");
1213
1214	if (status != sc->phy_status)
1215	    xprintf(" ST: %04x %04x", sc->phy_status, status);
1216	if (ability != sc->phy_ability)
1217	    xprintf(" AB: %04x %04x", sc->phy_ability, ability);
1218	if (xability != sc->phy_xability)
1219	    xprintf(" XA: %04x %04x", sc->phy_xability, xability);
1220	xprintf("\n");
1221#endif
1222        sc->phy_status = status;
1223	sc->phy_ability = ability;
1224	sc->phy_xability = xability;
1225	return 1;
1226	}
1227    else if ((macstat & (M_EVT_LINKCHNG | M_EVT_MIINT)) != 0) {
1228	isr = mii_read_register(sc, sc->phy_addr, MII_ISR);
1229	}
1230    return 0;
1231}
1232
1233static void
1234mii_set_speed(t3_ether_t *sc, int speed)
1235{
1236    uint16_t  control;
1237
1238    control = mii_read_register(sc, sc->phy_addr, MII_BMCR);
1239
1240    control &= ~(BMCR_ANENABLE | BMCR_RESTARTAN);
1241    mii_write_register(sc, sc->phy_addr, MII_BMCR, control);
1242    control &= ~(BMCR_SPEED0 | BMCR_SPEED1 | BMCR_DUPLEX);
1243
1244    switch (speed) {
1245	case ETHER_SPEED_10HDX:
1246	default:
1247	    break;
1248	case ETHER_SPEED_10FDX:
1249	    control |= BMCR_DUPLEX;
1250	    break;
1251	case ETHER_SPEED_100HDX:
1252	    control |= BMCR_SPEED100;
1253	    break;
1254	case ETHER_SPEED_100FDX:
1255	    control |= BMCR_SPEED100 | BMCR_DUPLEX ;
1256	    break;
1257	}
1258
1259    mii_write_register(sc, sc->phy_addr, MII_BMCR, control);
1260}
1261
1262static void
1263mii_autonegotiate(t3_ether_t *sc)
1264{
1265    uint16_t  control, status, remote, xremote;
1266    unsigned int  timeout;
1267    int linkspeed;
1268    uint32_t mode, ledCtrl;
1269
1270    linkspeed = ETHER_SPEED_UNKNOWN;
1271
1272    /* Read twice to clear latching bits */
1273    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
1274    status = mii_read_register_v(sc, sc->phy_addr, MII_BMSR, 1);
1275    mii_dump(sc, "query PHY");
1276
1277    if ((status & (BMSR_AUTONEG | BMSR_LINKSTAT)) ==
1278        (BMSR_AUTONEG | BMSR_LINKSTAT))
1279	control = mii_read_register(sc, sc->phy_addr, MII_BMCR);
1280    else {
1281	for (timeout = 4*CFE_HZ; timeout > 0; timeout -= CFE_HZ/2) {
1282	    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
1283	    if ((status & BMSR_ANCOMPLETE) != 0)
1284		break;
1285	    cfe_sleep(CFE_HZ/2);
1286	    }
1287	}
1288
1289    remote = mii_read_register_v(sc, sc->phy_addr, MII_ANLPAR, 0);
1290
1291    mode = READCSR(sc, R_MAC_MODE);
1292
1293    xprintf("%s: Link speed: ", t3_devname(sc));
1294    if ((status & BMSR_ANCOMPLETE) != 0) {
1295	/* A link partner was negogiated... */
1296
1297	if (status & BMSR_1000BT_XSR)
1298	    xremote = mii_read_register_v(sc, sc->phy_addr, MII_K1STSR, 0);
1299	else
1300	    xremote = 0;
1301
1302	mode &= ~(M_MACM_PORTMODE | M_MACM_HALFDUPLEX);
1303
1304	if ((xremote & K1STSR_LP1KFD) != 0) {
1305	    xprintf("1000BaseT FDX\n");
1306	    linkspeed = ETHER_SPEED_1000FDX;
1307	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII);
1308	    }
1309	else if ((xremote & K1STSR_LP1KHD) != 0) {
1310	    xprintf("1000BaseT HDX\n");
1311	    linkspeed = ETHER_SPEED_1000HDX;
1312	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII) | M_MACM_HALFDUPLEX;
1313	    }
1314	else if ((remote & ANLPAR_TXFD) != 0) {
1315	    xprintf("100BaseT FDX\n");
1316	    linkspeed = ETHER_SPEED_100FDX;
1317	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1318	    }
1319	else if ((remote & ANLPAR_TXHD) != 0) {
1320	    xprintf("100BaseT HDX\n");
1321	    linkspeed = ETHER_SPEED_100HDX;
1322	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1323	    }
1324	else if ((remote & ANLPAR_10FD) != 0) {
1325	    xprintf("10BaseT FDX\n");
1326	    linkspeed = ETHER_SPEED_10FDX;
1327	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1328	    }
1329	else if ((remote & ANLPAR_10HD) != 0) {
1330	    xprintf("10BaseT HDX\n");
1331	    linkspeed = ETHER_SPEED_10HDX;
1332	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1333	    }
1334
1335	/* In order for the 5750 core in BCM4785 chip to work properly
1336 	 * in RGMII mode, the Led Control Register must be set up.
1337 	 */
1338	if (sc->flags & T3_RGMII_MODE)
1339	{
1340		ledCtrl = READCSR(sc, R_MAC_LED_CTRL);
1341		ledCtrl &= ~(M_LEDCTRL_1000MBPS | M_LEDCTRL_100MBPS);
1342
1343		if((linkspeed == ETHER_SPEED_10FDX) || (linkspeed == ETHER_SPEED_10HDX))
1344			ledCtrl |= M_LEDCTRL_OVERRIDE;
1345		else if ((linkspeed == ETHER_SPEED_100FDX) || (linkspeed == ETHER_SPEED_100HDX))
1346			ledCtrl |= (M_LEDCTRL_OVERRIDE | M_LEDCTRL_100MBPS);
1347		else /* 1000MBPS */
1348			ledCtrl |= (M_LEDCTRL_OVERRIDE | M_LEDCTRL_1000MBPS);
1349
1350		WRITECSR(sc, R_MAC_LED_CTRL, ledCtrl);
1351
1352		cfe_usleep(40);;
1353	}
1354
1355	WRITECSR(sc, R_MAC_MODE, mode);
1356    }
1357    else {
1358	/* no link partner convergence */
1359	xprintf("Unknown\n");
1360	linkspeed = ETHER_SPEED_UNKNOWN;
1361	remote = xremote = 0;
1362	if (G_MACM_PORTMODE(mode) == K_MACM_PORTMODE_NONE) {
1363	    /* Keep any previous port mode as the one most likely to reappear.
1364	       Otherwise, choose one, and 10/100FDX is more likely. */
1365	    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1366
1367	    /* If 5750 core in RGMII mode, set the speed to 10 MBPS */
1368	    if (sc->flags & T3_RGMII_MODE)
1369	    {
1370		    ledCtrl = READCSR(sc, R_MAC_LED_CTRL);
1371		    ledCtrl &= ~(M_LEDCTRL_1000MBPS | M_LEDCTRL_100MBPS);
1372
1373		    ledCtrl |= M_LEDCTRL_OVERRIDE;
1374
1375		    WRITECSR(sc, R_MAC_LED_CTRL, ledCtrl);
1376
1377		    cfe_usleep(40);
1378	    }
1379
1380	    WRITECSR(sc, R_MAC_MODE, mode);
1381	}
1382    }
1383    sc->linkspeed = linkspeed;
1384
1385    status = mii_read_register_v(sc, sc->phy_addr, MII_BMSR, 1);
1386    (void)mii_read_register(sc, sc->phy_addr, MII_ISR);
1387
1388    sc->phy_status = status;
1389    sc->phy_ability = remote;
1390    sc->phy_xability = xremote;
1391
1392    mii_dump(sc, "final PHY");
1393}
1394
1395static void
1396t3_force_speed(t3_ether_t *sc, int linkspeed)
1397{
1398    uint32_t mode, ledCtrl;
1399
1400
1401    mode = READCSR(sc, R_MAC_MODE);
1402	mode &= ~(M_MACM_PORTMODE | M_MACM_HALFDUPLEX);
1403
1404    xprintf("%s: Link speed: ", t3_devname(sc));
1405
1406	switch (linkspeed)
1407	{
1408		case ETHER_SPEED_1000FDX:
1409			xprintf("1000BaseT FDX\n");
1410			mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII);
1411			break;
1412		case ETHER_SPEED_1000HDX:
1413			xprintf("1000BaseT HDX\n");
1414			mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_GMII) | M_MACM_HALFDUPLEX;
1415			break;
1416		case ETHER_SPEED_100FDX:
1417			xprintf("100BaseT FDX\n");
1418			mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1419			break;
1420		case ETHER_SPEED_100HDX:
1421			xprintf("100BaseT HDX\n");
1422			mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1423			break;
1424		case ETHER_SPEED_10FDX:
1425			xprintf("10BaseT FDX\n");
1426			mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
1427			break;
1428		case ETHER_SPEED_10HDX:
1429			xprintf("10BaseT HDX\n");
1430			mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII) | M_MACM_HALFDUPLEX;
1431			break;
1432		default:
1433			xprintf("Unknown\n");
1434			break;
1435	}
1436
1437	/* In order for the 5750 core in BCM4785 chip to work properly
1438	 * in RGMII mode, the Led Control Register must be set up.
1439	 */
1440	if (sc->flags & T3_RGMII_MODE)
1441	{
1442		ledCtrl = READCSR(sc, R_MAC_LED_CTRL);
1443		ledCtrl &= ~(M_LEDCTRL_1000MBPS | M_LEDCTRL_100MBPS);
1444
1445		if((linkspeed == ETHER_SPEED_10FDX) || (linkspeed == ETHER_SPEED_10HDX))
1446			ledCtrl |= M_LEDCTRL_OVERRIDE;
1447		else if ((linkspeed == ETHER_SPEED_100FDX) || (linkspeed == ETHER_SPEED_100HDX))
1448			ledCtrl |= (M_LEDCTRL_OVERRIDE | M_LEDCTRL_100MBPS);
1449		else /* 1000MBPS */
1450			ledCtrl |= (M_LEDCTRL_OVERRIDE | M_LEDCTRL_1000MBPS);
1451
1452		WRITECSR(sc, R_MAC_LED_CTRL, ledCtrl);
1453
1454		cfe_usleep(40);
1455	}
1456
1457	WRITECSR(sc, R_MAC_MODE, mode);
1458
1459    sc->linkspeed = linkspeed;
1460    sc->phy_status = 0;
1461    sc->phy_ability = 0;
1462    sc->phy_xability = 0;
1463}
1464
1465static void
1466t3_clear(t3_ether_t *sc, unsigned reg, uint32_t mask)
1467{
1468    uint32_t val;
1469    int timeout;
1470
1471    val = READCSR(sc, reg);
1472    val &= ~mask;
1473    WRITECSR(sc, reg, val);
1474    val = READCSR(sc, reg);
1475
1476    for (timeout = 4000; (val & mask) != 0 && timeout > 0; timeout -= 100) {
1477	cfe_usleep(100);
1478	val = READCSR(sc, reg);
1479	}
1480    if (timeout <= 0)
1481	xprintf("%s: cannot clear %04X/%08X\n", t3_devname(sc), reg, mask);
1482}
1483
1484
1485/* The following functions collectively implement the recommended
1486   BCM5700 Initialization Procedure (Section 8: Device Control) */
1487
1488static int
1489t3_coldreset(t3_ether_t *sc)
1490{
1491    pcireg_t cmd;
1492    pcireg_t bhlc, subsysid;
1493    pcireg_t bar0, bar1;
1494    pcireg_t cmdx;
1495    uint32_t mhc, mcr, mcfg;
1496    uint32_t mode;
1497    int timeout;
1498    uint32_t magic;
1499
1500    /* Steps 1-18 */
1501    /* Enable memory, also clear R/WC status bits (1) */
1502    cmd = pci_conf_read(sc->tag, PCI_COMMAND_STATUS_REG);
1503    cmd |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
1504    cmd &= ~PCI_COMMAND_PARITY_ENABLE;
1505    cmd &= ~PCI_COMMAND_SERR_ENABLE;
1506    pci_conf_write(sc->tag, PCI_COMMAND_STATUS_REG, cmd);
1507
1508    /* Clear and disable INTA output. (2) */
1509    mhc = READCSR(sc, R_MISC_HOST_CTRL);
1510    mhc |= M_MHC_MASKPCIINT | M_MHC_CLEARINTA;
1511    WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
1512
1513    /* Save some config registers modified by core clock reset (3). */
1514    bhlc = pci_conf_read(sc->tag, PCI_BHLC_REG);
1515    subsysid = pci_conf_read(sc->tag, PCI_SUBSYS_ID_REG);
1516    /* Empirically, these are clobbered too. */
1517    bar0 = pci_conf_read(sc->tag, PCI_MAPREG(0));
1518    bar1 = pci_conf_read(sc->tag, PCI_MAPREG(1));
1519
1520    /* Reset the core clocks (4, 5). */
1521    mcfg = READCSR(sc, R_MISC_CFG);
1522    mcfg |= M_MCFG_CORERESET;
1523    WRITECSR(sc, R_MISC_CFG, mcfg);
1524    cfe_usleep(100);    /* 100 usec delay */
1525
1526    /* NB: Until the BARs are restored and reenabled, only PCI
1527       configuration reads and writes will succeed.  */
1528
1529    /* Reenable MAC memory (7) */
1530    pci_conf_write(sc->tag, PCI_MAPREG(0), bar0);
1531    pci_conf_write(sc->tag, PCI_MAPREG(1), bar1);
1532    (void)pci_conf_read(sc->tag, PCI_MAPREG(1));  /* push */
1533    pci_conf_write(sc->tag, PCI_COMMAND_STATUS_REG, cmd);
1534    (void)pci_conf_read(sc->tag, PCI_COMMAND_STATUS_REG);  /* push */
1535
1536    /* Undo some of the resets (6) */
1537    mhc = READCSR(sc, R_MISC_HOST_CTRL);
1538    mhc |= M_MHC_MASKPCIINT;
1539    WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
1540
1541    /* Verify that core clock resets completed and autocleared. */
1542    mcfg = READCSR(sc, R_MISC_CFG);
1543	if ((mcfg & M_MCFG_CORERESET) != 0)
1544	{
1545	xprintf("bcm5700: core clocks stuck in reset\n");
1546	}
1547
1548    /* Configure PCI-X (8) */
1549	if (!(sc->device == K_PCI_ID_BCM5705 ||
1550	      sc->device == K_PCI_ID_BCM5750))
1551	{
1552	cmdx = pci_conf_read(sc->tag, PCI_PCIX_CMD_REG);
1553	cmdx &= ~PCIX_CMD_RLXORDER_ENABLE;
1554	pci_conf_write(sc->tag, PCI_PCIX_CMD_REG, cmdx);
1555	}
1556
1557    /* Enable memory arbiter (9)  */
1558    mode = READCSR(sc, R_MEM_MODE);
1559    mode |= M_MAM_ENABLE;    /* enable memory arbiter */
1560    WRITECSR(sc, R_MEM_MODE, mode);
1561
1562    /* Assume no external SRAM for now (10) */
1563
1564    /* Set up MHC for endianness and write enables (11-15) */
1565    mhc = READCSR(sc, R_MISC_HOST_CTRL);
1566    /* Since we use match-bits for Direct PCI access, don't swap bytes. */
1567    mhc &= ~M_MHC_ENBYTESWAP;
1568#ifdef __MIPSEL
1569    mhc |= M_MHC_ENWORDSWAP;
1570#endif
1571#ifdef __MIPSEB
1572#if PIOSWAP
1573    mhc |= M_MHC_ENWORDSWAP;
1574#endif
1575#endif
1576    mhc |= M_MHC_ENINDIRECT | M_MHC_ENPCISTATERW | M_MHC_ENCLKCTRLRW;
1577    WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
1578
1579    /* Set byte swapping (16, 17) */
1580    mcr = READCSR(sc, R_MODE_CTRL);
1581#ifdef __MIPSEL
1582    mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1583    mcr |= M_MCTL_WSWAPCTRL;
1584#endif
1585#ifdef __MIPSEB
1586#if MATCH_BYTES
1587    mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1588    mcr |= M_MCTL_BSWAPCTRL | M_MCTL_WSWAPCTRL;
1589#else
1590    mcr &= ~(M_MCTL_BSWAPCTRL | M_MCTL_BSWAPDATA);
1591    mcr |= M_MCTL_WSWAPCTRL | M_MCTL_WSWAPDATA;
1592#endif
1593#endif
1594    WRITECSR(sc, R_MODE_CTRL, mcr);
1595
1596    /* Disable PXE restart, wait for firmware (18, 19) */
1597    for (timeout = 2 * CFE_HZ; timeout > 0; timeout--)
1598    {
1599        WRITECSR(sc, R_MEMWIN_BASE_ADDR, A_PXE_MAILBOX);
1600        magic = READCSR(sc, R_MEMWIN_DATA);
1601
1602        if (magic == ~T3_MAGIC_NUMBER)
1603            break;
1604
1605        cfe_sleep(1);
1606    }
1607    if (timeout == 0)
1608        xprintf("bcm5700: no firmware rendevous\n");
1609
1610    WRITECSR(sc, R_MEMWIN_BASE_ADDR, 0);	/* restore default memory window */
1611
1612
1613    /* Clear Ethernet MAC Mode (20) */
1614    WRITECSR(sc, R_MAC_MODE, 0x00000000);
1615    (void)READCSR(sc, R_MAC_MODE);
1616    cfe_usleep(40);
1617
1618    /* Restore remaining config registers (21) */
1619    pci_conf_write(sc->tag, PCI_BHLC_REG, bhlc);
1620    pci_conf_write(sc->tag, PCI_SUBSYS_ID_REG, subsysid);
1621
1622	return(0);
1623}
1624
1625static int
1626t3_warmreset(t3_ether_t *sc)
1627{
1628    uint32_t mode;
1629
1630    /* Enable memory arbiter (9)  */
1631    mode = READCSR(sc, R_MEM_MODE);
1632    mode |= M_MAM_ENABLE;    /* enable memory arbiter */
1633    WRITECSR(sc, R_MEM_MODE, mode);
1634
1635    /* Clear Ethernet MAC Mode (20) */
1636    WRITECSR(sc, R_MAC_MODE, 0x00000000);
1637
1638    return 0;
1639}
1640
1641
1642static int
1643t3_init_registers(t3_ether_t *sc)
1644{
1645    unsigned offset;
1646    uint32_t dmac, mcr, mcfg;
1647
1648    /* Steps 22-29 */
1649
1650    /* Clear MAC statistics block (22) */
1651    if(!(sc->device == K_PCI_ID_BCM5705 ||
1652         sc->device == K_PCI_ID_BCM5750)) {
1653        for (offset = A_MAC_STATS; offset < A_MAC_STATS+L_MAC_STATS; offset += 4) {
1654	        WRITEMEM(sc, offset, 0);
1655	    }
1656	}
1657
1658    /* Clear driver status memory region (23) */
1659    /* ASSERT (sizeof(t3_status_t) == L_MAC_STATUS) */
1660    memset((uint8_t *)sc->status, 0, sizeof(t3_status_t));
1661
1662    /* Set up PCI DMA control (24) */
1663    dmac = READCSR(sc, R_DMA_RW_CTRL);
1664    dmac &= ~(M_DMAC_RDCMD | M_DMAC_WRCMD | M_DMAC_MINDMA);
1665    dmac |= V_DMAC_RDCMD(K_PCI_MEMRD) | V_DMAC_WRCMD(K_PCI_MEMWR);
1666    switch (sc->device) {
1667	case K_PCI_ID_BCM5700:
1668	case K_PCI_ID_BCM5701:
1669	case K_PCI_ID_BCM5702:
1670	    dmac |= V_DMAC_MINDMA(0xF);    /* "Recommended" */
1671	    break;
1672	default:
1673	    dmac |= V_DMAC_MINDMA(0x0);
1674	    break;
1675	}
1676    if (sc->flags & T3_SB_CORE) {
1677	if (sb_chip(sc->sbh) == BCM4785_CHIP_ID && sb_chiprev(sc->sbh) < 2)
1678	    dmac |= V_DMAC_ONEDMA(1);
1679    }
1680    WRITECSR(sc, R_DMA_RW_CTRL, dmac);
1681
1682    mcr = READCSR(sc, R_MODE_CTRL);
1683#ifdef __MIPSEL
1684    mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1685    mcr |= M_MCTL_WSWAPCTRL;
1686#endif
1687#ifdef __MIPSEB
1688#if MATCH_BYTES
1689    mcr |= M_MCTL_BSWAPDATA | M_MCTL_WSWAPDATA;
1690    mcr |= M_MCTL_BSWAPCTRL | M_MCTL_WSWAPCTRL;
1691#else
1692    mcr &= ~(M_MCTL_BSWAPCTRL | M_MCTL_BSWAPDATA);
1693    mcr |= M_MCTL_WSWAPCTRL | M_MCTL_WSWAPDATA;
1694#endif
1695#endif
1696    WRITECSR(sc, R_MODE_CTRL, mcr);
1697
1698    /* Configure host rings (26) */
1699    mcr |= M_MCTL_HOSTBDS;
1700    WRITECSR(sc, R_MODE_CTRL, mcr);
1701
1702    /* Indicate driver ready, disable checksums (27, 28) */
1703    mcr |= M_MCTL_HOSTUP;
1704    mcr |= (M_MCTL_NOTXPHSUM | M_MCTL_NORXPHSUM);
1705    WRITECSR(sc, R_MODE_CTRL, mcr);
1706
1707    /* Configure timer (29) */
1708    mcfg = READCSR(sc, R_MISC_CFG);
1709    mcfg &= ~M_MCFG_PRESCALER;
1710    mcfg |= V_MCFG_PRESCALER(66-1);    /* 66 MHz */
1711    WRITECSR(sc, R_MISC_CFG, mcfg);
1712
1713    return 0;
1714}
1715
1716static int
1717t3_init_pools(t3_ether_t *sc)
1718{
1719    uint32_t mode;
1720    int timeout;
1721
1722    /* Steps 30-36.  These use "recommended" settings (p 150) */
1723
1724    /* Configure the MAC memory pool (30) */
1725    if(!(sc->device == K_PCI_ID_BCM5705 ||
1726         sc->device == K_PCI_ID_BCM5750))
1727    {
1728        WRITECSR(sc, R_BMGR_MBUF_BASE, A_BUFFER_POOL);
1729        WRITECSR(sc, R_BMGR_MBUF_LEN, L_BUFFER_POOL);
1730    }
1731    else
1732    {
1733        /* Note: manual appears to recommend not even writing these (?) */
1734        /* WRITECSR(sc, R_BMGR_MBUF_BASE, A_RXMBUF); */
1735        /* WRITECSR(sc, R_BMGR_MBUF_LEN, 0x8000); */
1736    }
1737
1738    /* Configure the MAC DMA resource pool (31) */
1739    WRITECSR(sc, R_BMGR_DMA_BASE, A_DMA_DESCS);
1740    WRITECSR(sc, R_BMGR_DMA_LEN,  L_DMA_DESCS);
1741
1742    /* Configure the MAC memory watermarks (32) */
1743    if(sc->device == K_PCI_ID_BCM5705 ||
1744       sc->device == K_PCI_ID_BCM5750)
1745    {
1746		WRITECSR(sc, R_BMGR_MBUF_DMA_LOW, 0x0);
1747		WRITECSR(sc, R_BMGR_MBUF_RX_LOW,  0x10);
1748		WRITECSR(sc, R_BMGR_MBUF_HIGH,    0x60);
1749    }
1750    else
1751    {
1752		WRITECSR(sc, R_BMGR_MBUF_DMA_LOW, 0x50);
1753		WRITECSR(sc, R_BMGR_MBUF_RX_LOW,  0x20);
1754		WRITECSR(sc, R_BMGR_MBUF_HIGH,    0x60);
1755    }
1756
1757    /* Configure the DMA resource watermarks (33) */
1758    WRITECSR(sc, R_BMGR_DMA_LOW,   5);
1759    WRITECSR(sc, R_BMGR_DMA_HIGH, 10);
1760
1761    /* Enable the buffer manager (34, 35) */
1762    mode = READCSR(sc, R_BMGR_MODE);
1763    mode |= (M_BMODE_ENABLE | M_BMODE_MBUFLOWATTN);
1764    WRITECSR(sc, R_BMGR_MODE, mode);
1765    for (timeout = CFE_HZ/2; timeout > 0; timeout -= CFE_HZ/10) {
1766	mode = READCSR(sc, R_BMGR_MODE);
1767	if ((mode & M_BMODE_ENABLE) != 0)
1768	    break;
1769	cfe_sleep(CFE_HZ/10);
1770	}
1771    if ((mode & M_BMODE_ENABLE) == 0)
1772	xprintf("bcm5700: buffer manager not enabled\n");
1773
1774    /* Enable internal queues (36) */
1775    WRITECSR(sc, R_FTQ_RESET, 0xFFFFFFFF);
1776    cfe_sleep(1);
1777    WRITECSR(sc, R_FTQ_RESET, 0x00000000);
1778
1779	return(0);
1780}
1781
1782static int
1783t3_init_rings(t3_ether_t *sc)
1784{
1785    unsigned rcbp;
1786    int i;
1787
1788    /* Steps 37-46 */
1789
1790    /* Initialize RCBs for Standard Receive Buffer Ring (37) */
1791    WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_HOST_ADDR_HIGH, 0);
1792    WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_HOST_ADDR_LOW, PTR_TO_PCI(sc->rxp_std));
1793    WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_NIC_ADDR, A_STD_RCV_RINGS);
1794    if(sc->device == K_PCI_ID_BCM5705 ||
1795       sc->device == K_PCI_ID_BCM5750)
1796    {
1797        WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_CTRL, V_RCB_MAXLEN(512));
1798    }
1799    else
1800    {
1801        WRITECSR(sc, R_STD_RCV_BD_RCB+RCB_CTRL, V_RCB_MAXLEN(ETH_PKTBUF_LEN));
1802    }
1803
1804    /* Disable RCBs for Jumbo and Mini Receive Buffer Rings (38,39) */
1805    if(!(sc->device == K_PCI_ID_BCM5705 ||
1806         sc->device == K_PCI_ID_BCM5750))
1807	{
1808	    WRITECSR(sc, R_JUMBO_RCV_BD_RCB+RCB_CTRL,
1809				RCB_FLAG_USE_EXT_RCV_BD | RCB_FLAG_RING_DISABLED);
1810		WRITECSR(sc, R_MINI_RCV_BD_RCB+RCB_CTRL, RCB_FLAG_RING_DISABLED);
1811	}
1812
1813    /* Set BD ring replenish thresholds (40) */
1814    WRITECSR(sc, R_MINI_RCV_BD_THRESH, 128);
1815#if T3_BRINGUP
1816    WRITECSR(sc, R_STD_RCV_BD_THRESH, 1);
1817#else
1818    WRITECSR(sc, R_STD_RCV_BD_THRESH, 25);
1819#endif
1820    WRITECSR(sc, R_JUMBO_RCV_BD_THRESH, 16);
1821
1822    /* Disable all send producer rings (41) */
1823    if(!(sc->device == K_PCI_ID_BCM5705 ||
1824         sc->device == K_PCI_ID_BCM5750))
1825    {
1826	    for (rcbp = A_SND_RCB(1); rcbp <= A_SND_RCB(16); rcbp += RCB_SIZE)
1827		    WRITEMEM(sc, rcbp+RCB_CTRL, RCB_FLAG_RING_DISABLED);
1828    }
1829
1830    /* Initialize send producer index registers (42) */
1831    for (i = 1; i <= TXP_MAX_RINGS; i++) {
1832	WRITEMBOX(sc, R_SND_BD_PI(i), 0);
1833	WRITEMBOX(sc, R_SND_BD_NIC_PI(i), 0);
1834	}
1835
1836    /* Initialize send producer ring 1 (43) */
1837    WRITEMEM(sc, A_SND_RCB(1)+RCB_HOST_ADDR_HIGH, 0);
1838    WRITEMEM(sc, A_SND_RCB(1)+RCB_HOST_ADDR_LOW, PTR_TO_PCI(sc->txp_1));
1839    WRITEMEM(sc, A_SND_RCB(1)+RCB_CTRL, V_RCB_MAXLEN(TXP_RING_ENTRIES));
1840    if (!(sc->device == K_PCI_ID_BCM5705 ||
1841          sc->device == K_PCI_ID_BCM5750))
1842		WRITEMEM(sc, A_SND_RCB(1)+RCB_NIC_ADDR, A_SND_RINGS);
1843
1844    /* Disable unused receive return rings (44) */
1845    for (rcbp = A_RTN_RCB(1); rcbp <= A_RTN_RCB(16); rcbp += RCB_SIZE)
1846	WRITEMEM(sc, rcbp+RCB_CTRL, RCB_FLAG_RING_DISABLED);
1847
1848    /* Initialize receive return ring 1 (45) */
1849    WRITEMEM(sc, A_RTN_RCB(1)+RCB_HOST_ADDR_HIGH, 0);
1850    WRITEMEM(sc, A_RTN_RCB(1)+RCB_HOST_ADDR_LOW, PTR_TO_PCI(sc->rxr_1));
1851    WRITEMEM(sc, A_RTN_RCB(1)+RCB_CTRL, V_RCB_MAXLEN(sc->rxr_entries));
1852    WRITEMEM(sc, A_RTN_RCB(1)+RCB_NIC_ADDR, 0x0000);
1853
1854    /* Initialize receive producer ring mailboxes (46) */
1855    WRITEMBOX(sc, R_RCV_BD_STD_PI, 0);
1856    WRITEMBOX(sc, R_RCV_BD_JUMBO_PI, 0);
1857    WRITEMBOX(sc, R_RCV_BD_MINI_PI, 0);
1858
1859	return(0);
1860}
1861
1862static int
1863t3_configure_mac(t3_ether_t *sc)
1864{
1865    uint32_t low, high;
1866    uint32_t seed;
1867    int i;
1868
1869    /* Steps 47-52 */
1870
1871    /* Configure the MAC unicast address (47) */
1872    high = (sc->hwaddr[0] << 8) | (sc->hwaddr[1]);
1873    low = ((sc->hwaddr[2] << 24) | (sc->hwaddr[3] << 16)
1874	   | (sc->hwaddr[4] << 8) | sc->hwaddr[5]);
1875    /* For now, use a single MAC address */
1876    WRITECSR(sc, R_MAC_ADDR1_HIGH, high);  WRITECSR(sc, R_MAC_ADDR1_LOW, low);
1877    WRITECSR(sc, R_MAC_ADDR2_HIGH, high);  WRITECSR(sc, R_MAC_ADDR2_LOW, low);
1878    WRITECSR(sc, R_MAC_ADDR3_HIGH, high);  WRITECSR(sc, R_MAC_ADDR3_LOW, low);
1879    WRITECSR(sc, R_MAC_ADDR4_HIGH, high);  WRITECSR(sc, R_MAC_ADDR4_LOW, low);
1880
1881    /* Configure the random backoff seed (48) */
1882    seed = 0;
1883    for (i = 0; i < 6; i++)
1884      seed += sc->hwaddr[i];
1885    seed &= 0x3FF;
1886    WRITECSR(sc, R_TX_BACKOFF, seed);
1887
1888    /* Configure the MTU (49) */
1889    WRITECSR(sc, R_RX_MTU, MAX_ETHER_PACK+VLAN_TAG_LEN);
1890
1891    /* Configure the tx IPG (50) */
1892    WRITECSR(sc, R_TX_LENS,
1893	     V_TXLEN_SLOT(0x20) | V_TXLEN_IPG(0x6) | V_TXLEN_IPGCRS(0x2));
1894
1895    /* Configure the default rx return ring 1 (51) */
1896    WRITECSR(sc, R_RX_RULES_CFG, V_RULESCFG_DEFAULT(1));
1897
1898    /* Configure the receive lists and enable statistics (52) */
1899    WRITECSR(sc, R_RCV_LIST_CFG,
1900	     V_LISTCFG_GROUP(1) | V_LISTCFG_ACTIVE(1) | V_LISTCFG_BAD(1));
1901    /* was V_LISTCFG_DEFAULT(1) | V_LISTCFG_ACTIVE(16) | V_LISTCFG_BAD(1) */
1902
1903    return 0;
1904}
1905
1906static int
1907t3_enable_stats(t3_ether_t *sc)
1908{
1909    uint32_t ctrl;
1910
1911    /* Steps 53-56 */
1912
1913    /* Enable rx stats (53,54) */
1914    WRITECSR(sc, R_RCV_LIST_STATS_ENB, 0xFFFFFF);
1915    ctrl = READCSR(sc, R_RCV_LIST_STATS_CTRL);
1916    ctrl |= M_STATS_ENABLE;
1917    WRITECSR(sc, R_RCV_LIST_STATS_CTRL, ctrl);
1918
1919    /* Enable tx stats (55,56) */
1920    WRITECSR(sc, R_SND_DATA_STATS_ENB, 0xFFFFFF);
1921    ctrl = READCSR(sc, R_SND_DATA_STATS_CTRL);
1922    ctrl |= (M_STATS_ENABLE | M_STATS_FASTUPDATE);
1923    WRITECSR(sc, R_SND_DATA_STATS_CTRL, ctrl);
1924
1925    return 0;
1926}
1927
1928static int
1929t3_init_coalescing(t3_ether_t *sc)
1930{
1931    uint32_t mode = 0;
1932    int timeout;
1933
1934    /* Steps 57-68 */
1935
1936    /* Disable the host coalescing engine (57, 58) */
1937    WRITECSR(sc, R_HOST_COAL_MODE, 0);
1938    for (timeout = CFE_HZ/2; timeout > 0; timeout -= CFE_HZ/10) {
1939	mode = READCSR(sc, R_HOST_COAL_MODE);
1940	if (mode == 0)
1941	    break;
1942	cfe_sleep(CFE_HZ/10);
1943	}
1944    if (mode != 0)
1945	xprintf("bcm5700: coalescing engine not disabled\n");
1946
1947    /* Set coalescing parameters (59-62) */
1948#if T3_BRINGUP
1949    WRITECSR(sc, R_RCV_COAL_TICKS, 0);
1950    WRITECSR(sc, R_RCV_COAL_MAX_CNT, 1);
1951#else
1952    WRITECSR(sc, R_RCV_COAL_TICKS, 150);
1953    WRITECSR(sc, R_RCV_COAL_MAX_CNT, 10);
1954#endif
1955    if(!(sc->device == K_PCI_ID_BCM5705 ||
1956         sc->device == K_PCI_ID_BCM5750))
1957	    WRITECSR(sc, R_RCV_COAL_INT_TICKS, 0);
1958    WRITECSR(sc, R_RCV_COAL_INT_CNT, 0);
1959#if T3_BRINGUP
1960    WRITECSR(sc, R_SND_COAL_TICKS, 0);
1961    WRITECSR(sc, R_SND_COAL_MAX_CNT, 1);
1962#else
1963    WRITECSR(sc, R_SND_COAL_TICKS, 150);
1964    WRITECSR(sc, R_SND_COAL_MAX_CNT, 10);
1965#endif
1966    if(!(sc->device == K_PCI_ID_BCM5705 ||
1967         sc->device == K_PCI_ID_BCM5750))
1968    	WRITECSR(sc, R_SND_COAL_INT_TICKS, 0);
1969    WRITECSR(sc, R_SND_COAL_INT_CNT, 0);
1970
1971    /* Initialize host status block address (63) */
1972    WRITECSR(sc, R_STATUS_HOST_ADDR, 0);
1973    WRITECSR(sc, R_STATUS_HOST_ADDR+4, PTR_TO_PCI(sc->status));
1974
1975    if(!(sc->device == K_PCI_ID_BCM5705 ||
1976         sc->device == K_PCI_ID_BCM5750))
1977    {
1978	    /* Initialize host statistics block address (64) */
1979	    WRITECSR(sc, R_STATS_HOST_ADDR, 0);
1980	    WRITECSR(sc, R_STATS_HOST_ADDR+4, PTR_TO_PCI(sc->stats));
1981
1982	    /* Set statistics block NIC address and tick count (65, 66) */
1983	    WRITECSR(sc, R_STATS_TICKS, 1000000);
1984	    WRITECSR(sc, R_STATS_BASE_ADDR, A_MAC_STATS);
1985
1986	    /* Set status block NIC address (67) */
1987	    WRITECSR(sc, R_STATUS_BASE_ADDR, A_MAC_STATUS);
1988    }
1989
1990    /* Select the status block transfer size. */
1991    if (sc->device == K_PCI_ID_BCM5700)
1992	    mode = 0;          /* Truncated transfers not supported */
1993    else
1994	    mode = V_HCM_SBSIZE(STATUS_BLOCK_SIZE(MAX_RI));
1995
1996    /* Enable the host coalescing engine (68) */
1997    mode |= M_HCM_ENABLE;
1998    WRITECSR(sc, R_HOST_COAL_MODE, mode);
1999
2000    return(0);
2001}
2002
2003static int
2004t3_init_dma(t3_ether_t *sc)
2005{
2006    uint32_t mode;
2007
2008    /* Steps 69-87 */
2009
2010    /* Enable receive BD completion, placement, and selector blocks (69-71) */
2011    WRITECSR(sc, R_RCV_BD_COMP_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2012    WRITECSR(sc, R_RCV_LIST_MODE, M_MODE_ENABLE);
2013    if(!(sc->device == K_PCI_ID_BCM5705 ||
2014         sc->device == K_PCI_ID_BCM5750))
2015    {
2016        WRITECSR(sc, R_RCV_LIST_SEL_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2017    }
2018
2019    /* Enable DMA engines, enable and clear statistics (72, 73) */
2020    mode = READCSR(sc, R_MAC_MODE);
2021    mode |= (M_MACM_FHDEENB | M_MACM_RDEENB | M_MACM_TDEENB |
2022	     M_MACM_RXSTATSENB | M_MACM_RXSTATSCLR |
2023	     M_MACM_TXSTATSENB | M_MACM_TXSTATSCLR);
2024
2025    if(!(sc->flags & T3_NO_PHY))
2026    {
2027#if T3_AUTOPOLL
2028    mode |= V_MACM_PORTMODE(K_MACM_PORTMODE_MII);
2029#endif
2030    }
2031
2032    WRITECSR(sc, R_MAC_MODE, (mode | M_MACM_RXSTATSCLR |M_MACM_TXSTATSCLR) );
2033
2034    if(!(sc->flags & T3_NO_PHY))
2035    {
2036#if T3_AUTOPOLL
2037    WRITECSR(sc, R_MISC_LOCAL_CTRL, M_MLCTL_INTATTN);
2038#endif
2039    }
2040
2041    /* Configure GPIOs (74) - skipped */
2042
2043    /* Clear interrupt mailbox (75) */
2044    WRITEMBOX(sc, R_INT_MBOX(0), 0);
2045
2046    /* Enable DMA completion block (76) */
2047    if(!(sc->device == K_PCI_ID_BCM5705 ||
2048         sc->device == K_PCI_ID_BCM5750))
2049    {
2050        WRITECSR(sc, R_DMA_COMP_MODE, M_MODE_ENABLE);
2051    }
2052
2053    /* Configure write and read DMA modes (77, 78) */
2054    WRITECSR(sc, R_WR_DMA_MODE, M_MODE_ENABLE | M_ATTN_ALL);
2055    WRITECSR(sc, R_RD_DMA_MODE, M_MODE_ENABLE | M_ATTN_ALL);
2056
2057	return(0);
2058}
2059
2060static int
2061t3_init_enable(t3_ether_t *sc)
2062{
2063    uint32_t mhc;
2064    uint32_t pmcs;
2065#if T3_AUTOPOLL
2066    uint32_t mode, mask;
2067#endif
2068    int  i;
2069
2070    /* Steps 79-97 */
2071
2072    /* Enable completion functional blocks (79-82) */
2073    WRITECSR(sc, R_RCV_COMP_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2074    if(!(sc->device == K_PCI_ID_BCM5705 ||
2075         sc->device == K_PCI_ID_BCM5750))
2076    {
2077        WRITECSR(sc, R_MBUF_FREE_MODE, M_MODE_ENABLE);
2078    }
2079    WRITECSR(sc, R_SND_DATA_COMP_MODE, M_MODE_ENABLE);
2080    WRITECSR(sc, R_SND_BD_COMP_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2081
2082    /* Enable initiator functional blocks (83-86) */
2083    WRITECSR(sc, R_RCV_BD_INIT_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2084    WRITECSR(sc, R_RCV_DATA_INIT_MODE, M_MODE_ENABLE | M_RCVINITMODE_RTNSIZE);
2085    WRITECSR(sc, R_SND_DATA_MODE, M_MODE_ENABLE);
2086    WRITECSR(sc, R_SND_BD_INIT_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2087
2088    /* Enable the send BD selector (87) */
2089    WRITECSR(sc, R_SND_BD_SEL_MODE, M_MODE_ENABLE | M_MODE_ATTNENABLE);
2090
2091    /* Download firmware (88) - skipped */
2092
2093    /* Enable the MAC (89,90) */
2094    WRITECSR(sc, R_TX_MODE, M_MODE_ENABLE);   /* optional flow control */
2095    WRITECSR(sc, R_RX_MODE, M_MODE_ENABLE);   /* other options */
2096
2097    /* Disable auto-polling (91) */
2098    mii_access_init(sc);
2099
2100    /* Configure power state (92) */
2101    pmcs = READCSR(sc, PCI_PMCSR_REG);
2102    pmcs &= ~PCI_PMCSR_STATE_MASK;
2103    pmcs |= PCI_PMCSR_STATE_D0;
2104    WRITECSR(sc, PCI_PMCSR_REG, pmcs);
2105
2106    /* Some chips require a little time to power up */
2107    cfe_sleep(1);
2108
2109    if(!(sc->flags & T3_NO_PHY))
2110    {
2111#if T3_AUTOPOLL
2112	    /* Program hardware LED control (93) */
2113	    WRITECSR(sc, R_MAC_LED_CTRL, 0x00);   /* LEDs at PHY layer */
2114#endif
2115
2116#if T3_AUTOPOLL
2117	    /* Ack/clear link change events */
2118	    WRITECSR(sc, R_MAC_STATUS, M_LINKCHNG_CLR);
2119	    WRITECSR(sc, R_MI_STATUS, 0);
2120
2121	    /* Enable autopolling */
2122	    mode = READCSR(sc, R_MI_MODE);
2123	    mode |= M_MIMODE_POLLING | 0x000c000;
2124	    WRITECSR(sc, R_MI_MODE, mode);
2125
2126	    /* Enable link state attentions */
2127	    mask = READCSR(sc, R_MAC_EVENT_ENB);
2128	    mask |= M_EVT_LINKCHNG;
2129	    WRITECSR(sc, R_MAC_EVENT_ENB, mask);
2130#else
2131	    /* Initialize link (94) */
2132	    WRITECSR(sc, R_MI_STATUS, M_MISTAT_LINKED);
2133
2134	    /* Start autonegotiation (95) - see t3_initlink below */
2135
2136	    /* Setup multicast filters (96) */
2137	    for (i = 0; i < 4; i++)
2138		    WRITECSR(sc, R_MAC_HASH(i), 0);
2139#endif /* T3_AUTOPOLL */
2140    }
2141    else
2142    {
2143	    /* Initialize link (94) */
2144	    WRITECSR(sc, R_MI_STATUS, M_MISTAT_LINKED);
2145
2146	    /* Start autonegotiation (95) - see t3_initlink below */
2147
2148	    /* Setup multicast filters (96) */
2149	    for (i = 0; i < 4; i++)
2150		    WRITECSR(sc, R_MAC_HASH(i), 0);
2151    }
2152
2153    /* Enable interrupts (97) */
2154    mhc = READCSR(sc, R_MISC_HOST_CTRL);
2155    mhc &= ~M_MHC_MASKPCIINT;
2156    WRITECSR(sc, R_MISC_HOST_CTRL, mhc);
2157
2158    if ((sc->flags & T3_NO_PHY))
2159	    cfe_sleep(1);
2160
2161    return(0);
2162}
2163
2164
2165static void
2166t3_initlink(t3_ether_t *sc)
2167{
2168    uint32_t mcr;
2169
2170    if (!(sc->flags & T3_NO_PHY))
2171    {
2172	    sc->phy_addr = mii_probe(sc);
2173	    if (sc->phy_addr < 0)
2174	    {
2175		xprintf("%s: no PHY found\n", t3_devname(sc));
2176		return;
2177	    }
2178#if T3_DEBUG
2179	    xprintf("%s: PHY addr %d\n", t3_devname(sc), sc->phy_addr);
2180#endif
2181
2182	    if (1)
2183		mii_autonegotiate(sc);
2184	    else
2185		mii_set_speed(sc, ETHER_SPEED_10HDX);
2186
2187	    /*
2188	    ** Change the 5461 PHY INTR//ENERGYDET LED pin to function as ENERGY DET  by
2189	    ** writing to the shadow control register 0x1c value 00100 | masks
2190	    */
2191	    mii_write_shadow_register(sc, MII_SHADOW, (SHDW_SPR_CTRL | SHDW_NRG_DET) );
2192
2193	    mii_enable_interrupts(sc);
2194
2195	    mcr = READCSR(sc, R_MODE_CTRL);
2196	    mcr |= M_MCTL_MACINT;
2197	    WRITECSR(sc, R_MODE_CTRL, mcr);
2198    }
2199    else
2200    {
2201	    /* T3_NO_PHY means there is a ROBO switch, configure it */
2202	    robo_info_t *robo;
2203
2204	    robo = bcm_robo_attach(sc->sbh, sc, NULL,
2205	                           (miird_f)mii_read_register, (miiwr_f)mii_write_register);
2206	    if (robo == NULL) {
2207		    xprintf("robo_setup: failed to attach robo switch \n");
2208		    goto robo_fail;
2209	    }
2210
2211	    if (bcm_robo_enable_device(robo)) {
2212		    xprintf("robo_setup: failed to enable robo switch \n");
2213		    goto robo_fail;
2214	    }
2215
2216	    /* Configure the switch to do VLAN */
2217	    if (bcm_robo_config_vlan(robo)) {
2218		    xprintf("robo_setup: robo_config_vlan failed\n");
2219		    goto robo_fail;
2220	    }
2221
2222	    /* Enable the switch */
2223	    if (bcm_robo_enable_switch(robo)) {
2224		    xprintf("robo_setup: robo_enable_switch failed\n");
2225	    robo_fail:
2226		    bcm_robo_detach(robo);
2227	    }
2228
2229	    t3_force_speed(sc, ETHER_SPEED_1000FDX);
2230    }
2231
2232    if (sc->flags & T3_SB_CORE) {
2233	    char etXmacaddr[] = "etXXXXmacaddr";
2234	    uint32_t low, high;
2235
2236	    sprintf(etXmacaddr, "et%umacaddr", sc->sbidx);
2237	    bcm_ether_atoe(getvar(NULL, etXmacaddr),
2238	                   (struct ether_addr *)sc->hwaddr);
2239	    high = (sc->hwaddr[0] << 8) | (sc->hwaddr[1]);
2240	    low = ((sc->hwaddr[2] << 24) | (sc->hwaddr[3] << 16)
2241	           | (sc->hwaddr[4] << 8) | sc->hwaddr[5]);
2242	    /* For now, use a single MAC address */
2243	    WRITECSR(sc, R_MAC_ADDR1_HIGH, high);  WRITECSR(sc, R_MAC_ADDR1_LOW, low);
2244	    WRITECSR(sc, R_MAC_ADDR2_HIGH, high);  WRITECSR(sc, R_MAC_ADDR2_LOW, low);
2245	    WRITECSR(sc, R_MAC_ADDR3_HIGH, high);  WRITECSR(sc, R_MAC_ADDR3_LOW, low);
2246	    WRITECSR(sc, R_MAC_ADDR4_HIGH, high);  WRITECSR(sc, R_MAC_ADDR4_LOW, low);
2247    }
2248
2249    sc->mii_polling = 0;
2250    sc->phy_change = 0;
2251}
2252
2253static void
2254t3_shutdownlink(t3_ether_t *sc)
2255{
2256    uint32_t mcr;
2257
2258    mcr = READCSR(sc, R_MODE_CTRL);
2259    mcr &= ~M_MCTL_MACINT;
2260    WRITECSR(sc, R_MODE_CTRL, mcr);
2261
2262    WRITECSR(sc, R_MAC_EVENT_ENB, 0);
2263
2264    /* The manual is fuzzy about what to do with the PHY at this
2265       point.  Empirically, resetting the 5705 PHY (but not others)
2266       will cause it to get stuck in 10/100 MII mode.  */
2267    if (!(sc->flags & T3_NO_PHY))
2268    {
2269    	if (sc->device != K_PCI_ID_BCM5705)
2270		mii_write_register(sc, sc->phy_addr, MII_BMCR, BMCR_RESET);
2271
2272	    sc->mii_polling = 0;
2273	    sc->phy_change = 0;
2274    }
2275}
2276
2277
2278static void
2279t3_hwinit(t3_ether_t *sc)
2280{
2281    if (sc->state != eth_state_on) {
2282
2283	if (sc->state == eth_state_uninit) {
2284	    t3_coldreset(sc);
2285	    }
2286	else
2287	    t3_warmreset(sc);
2288
2289	t3_init_registers(sc);
2290	t3_init_pools(sc);
2291	t3_init_rings(sc);
2292	t3_configure_mac(sc);
2293	t3_enable_stats(sc);
2294	t3_init_coalescing(sc);
2295	t3_init_dma(sc);
2296	t3_init_enable(sc);
2297#if T3_DEBUG
2298	dumpcsrs(sc, "end init");
2299#else
2300	(void)dumpcsrs;
2301#endif
2302
2303	eeprom_access_init(sc);
2304#if T3_DEBUG
2305	{
2306	    uint32_t eeprom[0x100/4];
2307	    int i;
2308
2309	    cfe_sleep(1);
2310	    for (i = 0; i < 4; i++) {
2311		eeprom_read_range(sc, 0, 4, eeprom);
2312		}
2313
2314	    eeprom_read_range(sc, 0, sizeof(eeprom), eeprom);
2315	    eeprom_dump_range("Boot Strap", eeprom, 0x00, 20);
2316	    eeprom_dump_range("Manufacturing Info", eeprom, 0x74, 140);
2317	}
2318#else
2319	(void)eeprom_read_range;
2320	(void)eeprom_dump_range;
2321#endif
2322
2323	t3_initlink(sc);
2324
2325	sc->state = eth_state_off;
2326	}
2327
2328}
2329
2330static void
2331t3_hwshutdown(t3_ether_t *sc)
2332{
2333    /* Receive path shutdown */
2334    t3_clear(sc, R_RX_MODE, M_MODE_ENABLE);
2335    t3_clear(sc, R_RCV_BD_INIT_MODE, M_MODE_ENABLE);
2336    t3_clear(sc, R_RCV_LIST_MODE, M_MODE_ENABLE);
2337    if(!(sc->device == K_PCI_ID_BCM5705 ||
2338         sc->device == K_PCI_ID_BCM5750))
2339    {
2340        t3_clear(sc, R_RCV_LIST_SEL_MODE, M_MODE_ENABLE);
2341    }
2342    t3_clear(sc, R_RCV_DATA_INIT_MODE, M_MODE_ENABLE);
2343    t3_clear(sc, R_RCV_COMP_MODE, M_MODE_ENABLE);
2344    t3_clear(sc, R_RCV_BD_COMP_MODE, M_MODE_ENABLE);
2345
2346    /* Transmit path shutdown */
2347    t3_clear(sc, R_SND_BD_SEL_MODE, M_MODE_ENABLE);
2348    t3_clear(sc, R_SND_BD_INIT_MODE, M_MODE_ENABLE);
2349    t3_clear(sc, R_SND_DATA_MODE, M_MODE_ENABLE);
2350    t3_clear(sc, R_RD_DMA_MODE, M_MODE_ENABLE);
2351    t3_clear(sc, R_SND_DATA_COMP_MODE, M_MODE_ENABLE);
2352    if(!(sc->device == K_PCI_ID_BCM5705 ||
2353         sc->device == K_PCI_ID_BCM5750))
2354    {
2355        t3_clear(sc, R_DMA_COMP_MODE, M_MODE_ENABLE);
2356    }
2357    t3_clear(sc, R_SND_BD_COMP_MODE, M_MODE_ENABLE);
2358    t3_clear(sc, R_TX_MODE, M_MODE_ENABLE);
2359
2360    /* Memory shutdown */
2361    t3_clear(sc, R_HOST_COAL_MODE, M_HCM_ENABLE);
2362    t3_clear(sc, R_WR_DMA_MODE, M_MODE_ENABLE);
2363    if(!(sc->device == K_PCI_ID_BCM5705 ||
2364         sc->device == K_PCI_ID_BCM5750))
2365    {
2366        t3_clear(sc, R_MBUF_FREE_MODE, M_MODE_ENABLE);
2367    }
2368    WRITECSR(sc, R_FTQ_RESET, 0xFFFFFFFF);
2369    cfe_sleep(1);
2370    WRITECSR(sc, R_FTQ_RESET, 0x00000000);
2371    t3_clear(sc, R_BMGR_MODE, M_BMODE_ENABLE);
2372    t3_clear(sc, R_MEM_MODE, M_MAM_ENABLE);
2373
2374    t3_shutdownlink(sc);
2375
2376    t3_coldreset(sc);
2377
2378    sc->state = eth_state_uninit;
2379}
2380
2381
2382static void
2383t3_isr(void *arg)
2384{
2385    t3_ether_t *sc = (t3_ether_t *)arg;
2386    volatile t3_status_t *status = sc->status;
2387    uint32_t mac_status;
2388    int handled;
2389
2390    do {
2391	WRITEMBOX(sc, R_INT_MBOX(0), 1);
2392
2393	handled = 0;
2394	mac_status = READCSR(sc, R_MAC_STATUS);  /* force ordering */
2395	status->status &= ~M_STATUS_UPDATED;
2396
2397	if (status->index[RI(1)].return_p != sc->rxr_1_index) {
2398	    handled = 1;
2399	    if (IPOLL) sc->rx_interrupts++;
2400	    t3_procrxring(sc);
2401	    }
2402
2403	if (status->index[RI(1)].send_c != sc->txc_1_index) {
2404	    handled = 1;
2405	    if (IPOLL) sc->tx_interrupts++;
2406	    t3_proctxring(sc);
2407	    }
2408
2409	if ((status->status & M_STATUS_LINKCHNG) != 0) {
2410	    handled = 1;
2411
2412	    if (!(sc->flags & T3_NO_PHY))
2413	    {
2414#if T3_AUTOPOLL
2415	    	WRITECSR(sc, R_MAC_STATUS, M_LINKCHNG_CLR);
2416#endif
2417	    }
2418
2419	    WRITECSR(sc, R_MAC_STATUS, M_EVT_MICOMPLETE);
2420
2421	    status->status &= ~M_STATUS_LINKCHNG;
2422	    sc->phy_change = 1;
2423	 }
2424
2425	WRITEMBOX(sc, R_INT_MBOX(0), 0);
2426	(void)READMBOX(sc, R_INT_MBOX(0));  /* push */
2427
2428#if !XPOLL
2429	if (!handled)
2430	    sc->bogus_interrupts++;
2431#endif
2432
2433	} while ((status->status & M_STATUS_UPDATED) != 0);
2434
2435    if (sc->rxp_std_index != sc->prev_rxp_std_index) {
2436	sc->prev_rxp_std_index = sc->rxp_std_index;
2437	WRITEMBOX(sc, R_RCV_BD_STD_PI, sc->rxp_std_index);
2438	}
2439}
2440
2441
2442static void
2443t3_clear_stats(t3_ether_t *sc)
2444{
2445    t3_stats_t zeros;
2446
2447    if (sc->device == K_PCI_ID_BCM5705 ||
2448        sc->device == K_PCI_ID_BCM5750)
2449    	return;
2450
2451    memset(&zeros, 0, sizeof(t3_stats_t));
2452    WRITEMBOX(sc, R_RELOAD_STATS_MBOX + 4, 0);
2453    WRITEMBOX(sc, R_RELOAD_STATS_MBOX, PTR_TO_PCI(&zeros));
2454}
2455
2456
2457static void
2458t3_start(t3_ether_t *sc)
2459{
2460    t3_hwinit(sc);
2461
2462    sc->intmask = 0;
2463
2464#if IPOLL
2465    cfe_request_irq(sc->irq, t3_isr, sc, CFE_IRQ_FLAGS_SHARED, 0);
2466
2467    if (!(sc->flags & T3_NO_PHY))
2468    {
2469#if T3_AUTOPOLL
2470    	sc->intmask |= M_EVT_LINKCHNG;
2471#else
2472    	sc->intmask |= M_EVT_LINKCHNG | M_EVT_MIINT;
2473#endif
2474    }
2475    else
2476    {
2477    	sc->intmask |= M_EVT_LINKCHNG | M_EVT_MIINT;
2478    }
2479    WRITECSR(sc, R_MAC_EVENT_ENB, sc->intmask);
2480#endif
2481
2482    /* Post some Rcv Producer buffers */
2483    sc->prev_rxp_std_index = sc->rxp_std_index;
2484    WRITEMBOX(sc, R_RCV_BD_STD_PI, sc->rxp_std_index);
2485
2486    sc->state = eth_state_on;
2487}
2488
2489static void
2490t3_stop(t3_ether_t *sc)
2491{
2492    WRITECSR(sc, R_MAC_EVENT_ENB, 0);
2493    sc->intmask = 0;
2494#if IPOLL
2495    cfe_free_irq(sc->irq, 0);
2496#endif
2497
2498    if (sc->state == eth_state_on) {
2499	sc->state = eth_state_off;
2500	t3_hwshutdown(sc);
2501	t3_reinit(sc);
2502	}
2503}
2504
2505
2506static int t3_ether_open(cfe_devctx_t *ctx);
2507static int t3_ether_read(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
2508static int t3_ether_inpstat(cfe_devctx_t *ctx,iocb_inpstat_t *inpstat);
2509static int t3_ether_write(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
2510static int t3_ether_ioctl(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
2511static int t3_ether_close(cfe_devctx_t *ctx);
2512static void t3_ether_poll(cfe_devctx_t *ctx, int64_t ticks);
2513static void t3_ether_reset(void *softc);
2514
2515const static cfe_devdisp_t t3_ether_dispatch = {
2516    t3_ether_open,
2517    t3_ether_read,
2518    t3_ether_inpstat,
2519    t3_ether_write,
2520    t3_ether_ioctl,
2521    t3_ether_close,
2522    t3_ether_poll,
2523    t3_ether_reset
2524};
2525
2526cfe_driver_t bcm5700drv = {
2527    "BCM570x Ethernet",
2528    "eth",
2529    CFE_DEV_NETWORK,
2530    &t3_ether_dispatch,
2531    t3_ether_probe
2532};
2533
2534
2535static void
2536t3_delete_sc(t3_ether_t *sc)
2537{
2538    xprintf("BCM570x attach: No memory to complete probe\n");
2539    if (sc != NULL) {
2540	if (sc->txp_1 != NULL)
2541	    kfree_uncached(sc->txp_1);
2542	if (sc->rxr_1 != NULL)
2543	    kfree_uncached(sc->rxr_1);
2544	if (sc->rxp_std != NULL)
2545	    kfree_uncached(sc->rxp_std);
2546	if (sc->stats != NULL)
2547	    kfree_uncached((t3_stats_t *)sc->stats);
2548	if (sc->status != NULL)
2549	    kfree_uncached((t3_ether_t *)sc->status);
2550	KFREE(sc);
2551	}
2552}
2553
2554static int
2555t3_ether_attach(cfe_driver_t *drv, pcitag_t tag, int index)
2556{
2557    t3_ether_t *sc;
2558    char descr[80];
2559    phys_addr_t pa;
2560    uint32_t base;
2561    uint32_t pcictrl;
2562    uint32_t addr;
2563    uint32_t mcfg;
2564    pcireg_t device, class;
2565    const char *devname;
2566    bool rgmii = FALSE;
2567    sb_t *sbh = NULL;
2568    int i;
2569
2570    device = pci_conf_read(tag, PCI_ID_REG);
2571    class = pci_conf_read(tag, PCI_CLASS_REG);
2572
2573    if (PCI_PRODUCT(device) == K_PCI_ID_BCM471F) {
2574	sbh = sb_kattach(SB_OSH);
2575	sb_gige_init(sbh, ++sbgige, &rgmii);
2576    }
2577
2578    pci_map_mem(tag, PCI_MAPREG(0), PCI_MATCH_BITS, &pa);
2579    base = (uint32_t)pa;
2580
2581    sc = (t3_ether_t *) KMALLOC(sizeof(t3_ether_t), 0);
2582    if (sc == NULL) {
2583	t3_delete_sc(sc);
2584	return 0;
2585	}
2586
2587    memset(sc, 0, sizeof(*sc));
2588
2589    sc->status = NULL;
2590    sc->stats = NULL;
2591
2592    sc->tag = tag;
2593    sc->device = PCI_PRODUCT(device);
2594    sc->revision = PCI_REVISION(class);
2595    /* (Some?) 5700s report the 5701 device code */
2596    sc->asic_revision = G_MHC_ASICREV(pci_conf_read(tag, R_MISC_HOST_CTRL));
2597    if (sc->device == K_PCI_ID_BCM5701
2598	&& (sc->asic_revision & 0xF000) == 0x7000)
2599	sc->device = K_PCI_ID_BCM5700;
2600    /* From now on we'll lose our identify to BCM5750 */
2601    if (sbh) {
2602	    sc->flags |= rgmii ? T3_RGMII_MODE : 0;
2603	    sc->flags |= T3_SB_CORE;
2604	    if (getintvar(NULL, "boardflags") & BFL_ENETROBO)
2605		    sc->flags |= T3_NO_PHY;
2606	    sc->device = K_PCI_ID_BCM5750;
2607	    sc->sbh = sbh;
2608	    sc->sbidx = sbgige;
2609    }
2610
2611    sc->status = (t3_status_t *) kmalloc_uncached(sizeof(t3_status_t), CACHE_ALIGN);
2612    if (sc->status == NULL) {
2613	t3_delete_sc(sc);
2614	return 0;
2615	}
2616
2617    sc->stats = (t3_stats_t *) kmalloc_uncached(sizeof(t3_stats_t), CACHE_ALIGN);
2618    if (sc->stats == NULL) {
2619	t3_delete_sc(sc);
2620	return 0;
2621	}
2622
2623    if (sc->device == K_PCI_ID_BCM5705 ||
2624        sc->device == K_PCI_ID_BCM5750)
2625	sc->rxr_entries = RXR_RING_ENTRIES_05;
2626    else
2627	sc->rxr_entries = RXR_RING_ENTRIES;
2628
2629    sc->rxp_std =
2630        (t3_rcv_bd_t *) kmalloc_uncached(RXP_STD_ENTRIES*RCV_BD_SIZE, CACHE_ALIGN);
2631    sc->rxr_1 =
2632        (t3_rcv_bd_t *) kmalloc_uncached(sc->rxr_entries*RCV_BD_SIZE, CACHE_ALIGN);
2633    sc->txp_1 =
2634        (t3_snd_bd_t *) kmalloc_uncached(TXP_RING_ENTRIES*SND_BD_SIZE, CACHE_ALIGN);
2635    if (sc->rxp_std == NULL || sc->rxr_1 == NULL || sc->txp_1 == NULL) {
2636	t3_delete_sc(sc);
2637	return 0;
2638	}
2639
2640    sc->regbase = base;
2641
2642    /* NB: the relative base of memory depends on the access model */
2643    pcictrl = pci_conf_read(tag, R_PCI_STATE);
2644    sc->membase = base + 0x8000;       /* Normal mode: 32K window */
2645    sc->irq = pci_conf_read(tag, PCI_BPARAM_INTERRUPT_REG) & 0xFF;
2646
2647    sc->devctx = NULL;
2648
2649
2650    /*
2651     * Wait until the GbE indicates it is out of reset before continuing.
2652     * This is done to mitigate any issues with slow loading of the
2653     * GbE firmware.  Wait a bit longer as extra insurance in case there
2654     * is more initialization in the core after it is loaded.
2655     *
2656     * This should be removed if at all possible.  It was added during
2657     * the 4785 A1 silicon bringup to mitigate the slow loading of
2658     * the GbE firmware from EEPROM.
2659     */
2660    do {
2661    	mcfg = READCSR(sc, R_MISC_CFG);
2662    	xprintf("  mcfg = %08x\n", mcfg);
2663    	cfe_sleep(CFE_HZ / 10);
2664    }
2665    while ((mcfg & M_MCFG_CORERESET) != 0);
2666    cfe_sleep(2 * CFE_HZ);
2667
2668    /* Assume on-chip firmware has initialized the MAC address. */
2669    addr = READCSR(sc, R_MAC_ADDR1_HIGH);
2670    for (i = 0; i < 2; i++)
2671	sc->hwaddr[i] = (addr >> (8*(1-i))) & 0xff;
2672    addr = READCSR(sc, R_MAC_ADDR1_LOW);
2673    for (i = 0; i < 4; i++)
2674	sc->hwaddr[2+i] = (addr >> (8*(3-i))) & 0xff;
2675
2676    t3_init(sc);
2677
2678    sc->state = eth_state_uninit;
2679
2680    /* print device info */
2681    switch (sc->device) {
2682    case K_PCI_ID_BCM5700:
2683	devname = "BCM5700"; break;
2684    case K_PCI_ID_BCM5701:
2685	devname = "BCM5701"; break;
2686    case K_PCI_ID_BCM5702:
2687	devname = "BCM5702"; break;
2688    case K_PCI_ID_BCM5703:
2689	devname = "BCM5703"; break;
2690    case K_PCI_ID_BCM5705:
2691	devname = "BCM5705"; break;
2692    case K_PCI_ID_BCM5750:
2693	devname = "BCM5750"; break;
2694    default:
2695	devname = "BCM570x"; break;
2696	}
2697    xsprintf(descr, "%s Ethernet at 0x%X", devname, sc->regbase);
2698    printf("ge%d: %s\n", index, descr);
2699
2700    cfe_attach(drv, sc, NULL, descr);
2701    return 1;
2702}
2703
2704static void
2705t3_ether_probe(cfe_driver_t *drv,
2706	       unsigned long probe_a, unsigned long probe_b,
2707	       void *probe_ptr)
2708{
2709    int index;
2710    int n;
2711
2712    n = 0;
2713    index = 0;
2714    for (;;) {
2715	pcitag_t tag;
2716	pcireg_t device;
2717
2718	if (pci_find_class(PCI_CLASS_NETWORK, index, &tag) != 0)
2719	   break;
2720
2721	index++;
2722
2723	device = pci_conf_read(tag, PCI_ID_REG);
2724	if (PCI_VENDOR(device) == K_PCI_VENDOR_BROADCOM) {
2725	    switch (PCI_PRODUCT(device)) {
2726		case K_PCI_ID_BCM5700:
2727		case K_PCI_ID_BCM5701:
2728		case K_PCI_ID_BCM5702:
2729		case K_PCI_ID_BCM5703:
2730		case K_PCI_ID_BCM5703a:
2731		case K_PCI_ID_BCM5703b:
2732		case K_PCI_ID_BCM5704C:
2733		case K_PCI_ID_BCM5705:
2734		case K_PCI_ID_BCM5750:
2735		case K_PCI_ID_BCM471F:
2736		    t3_ether_attach(drv, tag, n);
2737		    n++;
2738		    break;
2739		default:
2740		    break;
2741		}
2742	    }
2743	}
2744}
2745
2746
2747/* The functions below are called via the dispatch vector for the Tigon 3 */
2748
2749static int
2750t3_ether_open(cfe_devctx_t *ctx)
2751{
2752    t3_ether_t *sc = ctx->dev_softc;
2753    volatile t3_stats_t *stats = sc->stats;
2754    int i;
2755
2756    if (sc->state == eth_state_on)
2757	t3_stop(sc);
2758
2759    sc->devctx = ctx;
2760
2761    for (i = 0; i < L_MAC_STATS/sizeof(uint64_t); i++)
2762	{
2763	stats->stats[i] = 0;
2764	}
2765
2766    t3_start(sc);
2767
2768    sc->rx_interrupts = sc->tx_interrupts = sc->bogus_interrupts = 0;
2769    t3_clear_stats(sc);
2770
2771    if (XPOLL)
2772	    t3_isr(sc);
2773
2774    return(0);
2775}
2776
2777static int
2778t3_ether_read(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
2779{
2780    t3_ether_t *sc = ctx->dev_softc;
2781    eth_pkt_t *pkt;
2782    int blen;
2783
2784    if (XPOLL) t3_isr(sc);
2785
2786    if (sc->state != eth_state_on) return -1;
2787
2788    CS_ENTER(sc);
2789    pkt = (eth_pkt_t *) q_deqnext(&(sc->rxqueue));
2790    CS_EXIT(sc);
2791
2792    if (pkt == NULL) {
2793	buffer->buf_retlen = 0;
2794	return 0;
2795	}
2796
2797    blen = buffer->buf_length;
2798    if (blen > pkt->length) blen = pkt->length;
2799
2800    memcpy(buffer->buf_ptr, pkt->buffer, blen);
2801    buffer->buf_retlen = blen;
2802
2803    eth_free_pkt(sc, pkt);
2804
2805    if (XPOLL) t3_isr(sc);
2806    return 0;
2807}
2808
2809static int
2810t3_ether_inpstat(cfe_devctx_t *ctx, iocb_inpstat_t *inpstat)
2811{
2812    t3_ether_t *sc = ctx->dev_softc;
2813
2814    if (XPOLL) t3_isr(sc);
2815
2816    if (sc->state != eth_state_on) return -1;
2817
2818    /* We avoid an interlock here because the result is a hint and an
2819       interrupt cannot turn a non-empty queue into an empty one. */
2820    inpstat->inp_status = (q_isempty(&(sc->rxqueue))) ? 0 : 1;
2821
2822    return 0;
2823}
2824
2825static int
2826t3_ether_write(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
2827{
2828    t3_ether_t *sc = ctx->dev_softc;
2829    eth_pkt_t *pkt;
2830    int blen;
2831
2832    if (XPOLL) t3_isr(sc);
2833
2834    if (sc->state != eth_state_on) return -1;
2835
2836    pkt = eth_alloc_pkt(sc);
2837    if (!pkt) return CFE_ERR_NOMEM;
2838
2839    blen = buffer->buf_length;
2840    if (blen > pkt->length) blen = pkt->length;
2841
2842    memcpy(pkt->buffer, buffer->buf_ptr, blen);
2843    pkt->length = blen;
2844
2845    /*
2846	 * Ensure that the packet memory is flushed out of the data cache
2847	 * before posting it for transmission.
2848     */
2849    cfe_flushcache(CFE_CACHE_FLUSH_D);
2850
2851    if (t3_transmit(sc, pkt) != 0) {
2852	eth_free_pkt(sc,pkt);
2853	return CFE_ERR_IOERR;
2854	}
2855
2856    if (XPOLL) t3_isr(sc);
2857    return 0;
2858}
2859
2860static int
2861t3_ether_ioctl(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
2862{
2863    t3_ether_t *sc = ctx->dev_softc;
2864
2865    switch ((int)buffer->buf_ioctlcmd) {
2866	case IOCTL_ETHER_GETHWADDR:
2867	    memcpy(buffer->buf_ptr, sc->hwaddr, sizeof(sc->hwaddr));
2868	    return 0;
2869
2870	default:
2871	    return -1;
2872	}
2873}
2874
2875static int
2876t3_ether_close(cfe_devctx_t *ctx)
2877{
2878    t3_ether_t *sc = ctx->dev_softc;
2879    volatile t3_stats_t *stats = sc->stats;
2880    uint32_t inpkts, outpkts, interrupts;
2881    int i;
2882
2883    t3_stop(sc);
2884
2885#if T3_BRINGUP
2886    for (i = 0; i < L_MAC_STATS/sizeof(uint64_t); i++) {
2887	if (stats->stats[i] != 0)
2888	    xprintf(" stats[%d] = %8lld\n", i, stats->stats[i]);
2889	}
2890#else
2891    (void) i;
2892#endif
2893
2894    inpkts = stats->stats[ifHCInUcastPkts]
2895	      + stats->stats[ifHCInMulticastPkts]
2896	      + stats->stats[ifHCInBroadcastPkts];
2897    outpkts = stats->stats[ifHCOutUcastPkts]
2898	      + stats->stats[ifHCOutMulticastPkts]
2899	      + stats->stats[ifHCOutBroadcastPkts];
2900    interrupts = stats->stats[nicInterrupts];
2901
2902    /* Empirically, counters on the 5705 are always zero.  */
2903    if (!(sc->device == K_PCI_ID_BCM5705 ||
2904          sc->device == K_PCI_ID_BCM5750)) {
2905	xprintf("%s: %d sent, %d received, %d interrupts\n",
2906		t3_devname(sc), outpkts, inpkts, interrupts);
2907	if (IPOLL) {
2908	    xprintf("  %d rx interrupts, %d tx interrupts",
2909		    sc->rx_interrupts, sc->tx_interrupts);
2910	    if (sc->bogus_interrupts != 0)
2911	        xprintf(", %d bogus interrupts", sc->bogus_interrupts);
2912	    xprintf("\n");
2913	    }
2914	}
2915
2916    sc->devctx = NULL;
2917    return 0;
2918}
2919
2920static void
2921t3_ether_poll(cfe_devctx_t *ctx, int64_t ticks)
2922{
2923    t3_ether_t *sc = ctx->dev_softc;
2924    int changed;
2925
2926    if(!(sc->flags & T3_NO_PHY)) {
2927    	if (sc->phy_change && sc->state != eth_state_uninit && !sc->mii_polling) {
2928		uint32_t mask;
2929
2930		sc->mii_polling++;
2931		mask = READCSR(sc, R_MAC_EVENT_ENB);
2932		WRITECSR(sc, R_MAC_EVENT_ENB, 0);
2933
2934		changed = mii_poll(sc);
2935		if (changed) {
2936			mii_autonegotiate(sc);
2937	    	}
2938		sc->phy_change = 0;
2939		sc->mii_polling--;
2940
2941		WRITECSR(sc, R_MAC_EVENT_ENB, mask);
2942	}
2943    }
2944}
2945
2946static void
2947t3_ether_reset(void *softc)
2948{
2949    t3_ether_t *sc = (t3_ether_t *)softc;
2950
2951    /* Turn off the Ethernet interface. */
2952
2953    if (sc->state == eth_state_on)
2954	t3_stop(sc);
2955
2956    sc->state = eth_state_uninit;
2957}
2958