1/*  *********************************************************************
2    *  Broadcom Common Firmware Environment (CFE)
3    *
4    *  Adaptec AIC-6915 (10/100 EthernetMAC) driver	File: dev_aic6915.c
5    *
6    *  Author:  Ed Satterthwaite
7    *
8    *********************************************************************
9    *
10    *  Copyright 2000,2001,2002,2003
11    *  Broadcom Corporation. All rights reserved.
12    *
13    *  This software is furnished under license and may be used and
14    *  copied only in accordance with the following terms and
15    *  conditions.  Subject to these conditions, you may download,
16    *  copy, install, use, modify and distribute modified or unmodified
17    *  copies of this software in source and/or binary form.  No title
18    *  or ownership is transferred hereby.
19    *
20    *  1) Any source code used, modified or distributed must reproduce
21    *     and retain this copyright notice and list of conditions
22    *     as they appear in the source file.
23    *
24    *  2) No right is granted to use any trade name, trademark, or
25    *     logo of Broadcom Corporation.  The "Broadcom Corporation"
26    *     name may not be used to endorse or promote products derived
27    *     from this software without the prior written permission of
28    *     Broadcom Corporation.
29    *
30    *  3) THIS SOFTWARE IS PROVIDED "AS-IS" AND ANY EXPRESS OR
31    *     IMPLIED WARRANTIES, INCLUDING BUT NOT LIMITED TO, ANY IMPLIED
32    *     WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
33    *     PURPOSE, OR NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT
34    *     SHALL BROADCOM BE LIABLE FOR ANY DAMAGES WHATSOEVER, AND IN
35    *     PARTICULAR, BROADCOM SHALL NOT BE LIABLE FOR DIRECT, INDIRECT,
36    *     INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
37    *     (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
38    *     GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
39    *     BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
40    *     OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
41    *     TORT (INCLUDING NEGLIGENCE OR OTHERWISE), EVEN IF ADVISED OF
42    *     THE POSSIBILITY OF SUCH DAMAGE.
43    ********************************************************************* */
44
45
46#include "cfe.h"
47#include "lib_physio.h"
48
49#include "cfe_irq.h"
50
51#include "pcivar.h"
52#include "pcireg.h"
53
54#include "aic6915.h"
55#include "mii.h"
56
57/* This is a driver for the Adaptec AIC6915 ("Starfire") 10/100 MAC.
58
59   Reference:
60     AIC-6915 Ethernet LAN Controller, Programmer's Manual
61     Adaptec, Inc., 691 South Milpitas Boulevard, Milpitas CA, 1998
62
63   The current version has been developed for the Adaptec 62011/TX
64   single-port NIC with SEEQ/LSI 80220 PHY.  The card is strapped to
65   preload information from a serial EEPROM at power up.
66
67   This is a simple version for understanding the chip.  Optimizations
68   can come later.
69
70   This BCM1250 version takes advantage of DMA coherence and uses
71   "preserve bit lanes" addresses for all accesses that cross the
72   ZBbus-PCI bridge. */
73
74#ifndef AIC_DEBUG
75#define AIC_DEBUG 0
76#endif
77
78/* Set IPOLL to drive processing through the pseudo-interrupt
79   dispatcher.  Set XPOLL to drive processing by an external polling
80   agent.  Setting both is ok. */
81
82#ifndef IPOLL
83#define IPOLL 1
84#endif
85#ifndef XPOLL
86#define XPOLL 0
87#endif
88
89#define ENET_ADDR_LEN	6		/* size of an ethernet address */
90#define MIN_ETHER_PACK  64              /* min size of a packet */
91#define MAX_ETHER_PACK  1518		/* max size of a packet */
92#define VLAN_TAG_LEN    4               /* VLAN type plus tag */
93#define CRC_SIZE	4		/* size of CRC field */
94
95/* Packet buffers.  For the AIC-6915, a receive packet buffer must be
96   aligned to a 32-bit word boundary.  We would like it aligned to a
97   cache line boundary for performance.  Note that the IP/TCP header
98   will be 16-bit, but not 32-bit, aligned with this constraint. */
99
100#define ETH_PKTBUF_LEN      (((MAX_ETHER_PACK+31)/32)*32)
101
102#if __long64
103typedef struct eth_pkt_s {
104    queue_t next;			/* 16 */
105    uint8_t *buffer;			/*  8 */
106    uint32_t flags;			/*  4 */
107    int32_t length;			/*  4 */
108    uint8_t data[ETH_PKTBUF_LEN];
109} eth_pkt_t;
110#else
111typedef struct eth_pkt_s {
112    queue_t next;			/*  8 */
113    uint8_t *buffer;			/*  4 */
114    uint32_t flags;			/*  4 */
115    int32_t length;			/*  4 */
116    uint32_t unused[3];			/* 12 */
117    uint8_t data[ETH_PKTBUF_LEN];
118} eth_pkt_t;
119#endif
120
121#define CACHE_ALIGN       32
122#define ETH_PKTBUF_LINES  ((sizeof(eth_pkt_t) + (CACHE_ALIGN-1))/CACHE_ALIGN)
123#define ETH_PKTBUF_SIZE   (ETH_PKTBUF_LINES*CACHE_ALIGN)
124#define ETH_PKTBUF_OFFSET (offsetof(eth_pkt_t, data))
125
126#define ETH_PKT_BASE(data) ((eth_pkt_t *)((data) - ETH_PKTBUF_OFFSET))
127
128static void
129show_packet(char c, eth_pkt_t *pkt)
130{
131    int i;
132    int n = (pkt->length < 32 ? pkt->length : 32);
133
134    xprintf("%c[%4d]:", c, pkt->length);
135    for (i = 0; i < n; i++) {
136	if (i % 4 == 0)
137	    xprintf(" ");
138	xprintf("%02x", pkt->buffer[i]);
139	}
140    xprintf("\n");
141}
142
143
144/* How often to check MII status */
145#define MII_POLL_INTERVAL (1*CFE_HZ)
146
147/* How often to accumulate PCI usage statistics */
148#define PCI_POLL_INTERVAL (CFE_HZ/8)
149
150static void aic_ether_probe(cfe_driver_t *drv,
151			    unsigned long probe_a, unsigned long probe_b,
152			    void *probe_ptr);
153
154
155/* AIC-6915 Hardware Data Structures
156   XXX These work for 1250 big endian, 32-bit physical addressing.
157   XXX Should move to a header file? */
158
159/* AIC-6915 Ring Sizes */
160
161#define RX_RING_ENTRIES_256    256
162#define RX_RING_ENTRIES_2K     2048
163
164#define TX_RING_MAXSIZE        16384
165
166#define COMP_RING_ENTRIES      1024
167
168/* AIC-6915 Receive Descriptors (producer ring).  Page 2-4.  There are
169   2 types, only 32-bit host addressing is supported for now.  */
170
171typedef struct aic_rx_dscr0_s {
172    uint32_t  bufptr;
173} aic_rx_dscr0_t;
174
175#define M_RX_DSCR_ADDR         0xfffffffc
176#define M_RX_DSCR_END          0x00000002
177#define M_RX_DSCR_VALID        0x00000001
178
179/* AIC-6915 Transmit Descriptors (producer ring).  Pages 3-5 to 3-6.
180   There are 5 types, only buffer descriptors and 32-bit addressing
181   are supported for now. */
182
183typedef struct aic_tx_dscr1_s {
184    uint16_t flags;
185    uint16_t length;
186    uint32_t bufptr;
187} aic_tx_dscr1_t;
188
189#define M_FLAG_ID                 0xf000
190#define V_FLAG_ID_TX              0xb000
191
192#define M_FLAG_INTR               0x0800
193#define M_FLAG_END                0x0400
194#define M_FLAG_CALTCP             0x0200
195#define M_FLAG_CRCEN              0x0100
196#define S_FLAG_NBUFFERS           0
197#define M_FLAG_NBUFFERS           0x00ff
198
199/* AIC-6915 Completion Descriptors.  RX and TX can be configured for
200   shared or disjoint rings.  Leading bits of word 0 distinguish.
201   When intermixed, types must be chosen to give identical sizes.  */
202
203#define M_COMP_TAG                0xc0000000
204#define V_COMP_TAG_NONE           0x00000000
205#define V_COMP_TAG_RX             0x40000000
206#define V_COMP_TAG_TX             0x80000000
207
208/* AIC-6915 Receive Completion Descriptors.  Pages 2-5 to 2-9.
209   There are 4 types, selected globally and not self-identifying. */
210
211typedef struct aic_rx_comp_dscr0_s {
212    uint16_t status1;
213    uint16_t length;
214}  aic_rx_comp_dscr0_t;
215
216typedef struct aic_rx_comp_dscr1_s {
217    uint16_t status1;
218    uint16_t length;
219    uint16_t status2;
220    uint16_t vlan;
221}  aic_rx_comp_dscr1_t;
222
223typedef struct aic_rx_comp_dscr2_s {
224    uint16_t status1;
225    uint16_t length;
226    uint16_t status2;
227    uint16_t tcp_cksum;
228}  aic_rx_comp_dscr2_t;
229
230typedef struct aic_rx_comp_dscr3_s {
231    uint16_t status1;
232    uint16_t length;
233    uint16_t status2;
234    uint16_t status3;
235    uint16_t tcp_cksum;
236    uint16_t vlan;
237    uint32_t timestamp;
238}  aic_rx_comp_dscr3_t;
239
240/* Fields of Status1 */
241#define M_STATUS1_TAG             0xc000
242#define V_STATUS1_TAG_RX          0x4000
243#define M_STATUS1_OK              0x2000
244#define M_STATUS1_FIFOFULL        0x1000
245#define M_STATUS1_Q               0x0800
246#define M_END_INDEX               0x07ff  /* in entries */
247
248/* Fields of Status2 */
249#define M_STATUS2_PERFECT         0x8000
250#define M_STATUS2_HASH            0x4000
251#define M_STATUS2_CRCERR          0x2000
252#define M_STATUS2_ISL_CRCERR      0x1000
253#define M_STATUS2_DRIBBLE         0x0800
254#define M_STATUS2_CODEERR         0x0400
255#define M_STATUS2_VLAN            0x0200
256#define M_STATUS2_CKSUM_OK        0x0100
257#define M_STATUS2_CKSUM_BAD       0x0080
258#define M_STATUS2_PART_CKSUM_OK   0x0040
259#define M_STATUS2_FRAG            0x0020
260#define M_STATUS2_TCP             0x0010
261#define M_STATUS2_UDP             0x0008
262#define M_STATUS2_TYPE            0x0007
263#define K_STATUS2_TYPE_UNKNOWN    0x0
264#define K_STATUS2_TYPE_IPV4       0x1
265#define K_STATUS2_TYPE_IPV6       0x2
266#define K_STATUS2_TYPE_IPX        0x3
267#define K_STATUS2_TYPE_ICMP       0x4
268#define K_STATUS2_TYPE_UNSUPP     0x5
269
270/* Fields of Status3 */
271#define M_STATUS3_ISL             0x8000
272#define M_STATUS3_PAUSE           0x4000
273#define M_STATUS3_CONTROL         0x2000
274#define M_STATUS3_HEADER          0x1000
275#define M_STATUS3_TRAILER         0x0800
276#define M_START_INDEX             0x07ff  /* in entries */
277
278/* AIC-6915 Transmit Completion Descriptors (return ring).  Pages 3-10 to 3-11.
279   There are 2 types */
280
281typedef struct aic_tx_comp_dscr0_s {
282    uint16_t stamp;
283    uint16_t qref;
284}  aic_tx_comp_dscr0_t;
285
286typedef struct aic_tx_comp_dscr1_s {
287    uint16_t status;
288    uint16_t qref;
289}  aic_tx_comp_dscr1_t;
290
291#define M_COMP_TYPE               0x02000000
292#define V_COMP_TYPE_DMA           0x00000000
293#define V_COMP_TYPE_TX            0x20000000
294
295/* DMA Complete Entries */
296#define M_STAMP_TAG               0xc000
297#define M_STAMP_TYPE              0x2000
298#define M_STAMP_TIME              0x1fff
299
300/* Transmit Complete Entries */
301#define M_STATUS_TAG              0xc000
302#define M_STATUS_TYPE             0x2000
303#define M_STATUS_PAUSED           0x1000
304#define M_STATUS_PAUSE            0x0800
305#define M_STATUS_CONTROL          0x0400
306#define M_STATUS_UNDERRUN         0x0200
307#define M_STATUS_OVERSIZE         0x0100
308#define M_STATUS_LATECOLL         0x0080
309#define M_STATUS_EXSCOLL          0x0040
310#define M_STATUS_EXSDEFER         0x0020
311#define M_STATUS_DEFERRED         0x0010
312#define M_STATUS_SUCCESS          0x0008
313#define M_STATUS_FIELDRANGE       0x0004
314#define M_STATUS_FIELDLENGTH      0x0002
315#define M_STATUS_CRCERR           0x0001
316
317/* Common */
318#define M_QREF_Q                  0x8000
319#define M_QREF_OFFSET             0x7FFF    /* in bytes */
320
321/* End of AIC-6915 defined data structures */
322
323
324typedef enum {
325    eth_state_uninit,
326    eth_state_off,
327    eth_state_on,
328} eth_state_t;
329
330/* All rings must be aligned to a 256-byte boundary.  Since all sizes
331   are multiples of 256 bytes, they are allocated contiguously and
332   aligned once.  For now, we use single rings (Q1) for rx and rx
333   complete. */
334
335#define RX_RING_ENTRIES           RX_RING_ENTRIES_256
336#define TX_RING_ENTRIES           256
337
338typedef struct aic_rings_s {
339    aic_rx_dscr0_t       rx_dscr[RX_RING_ENTRIES];
340    aic_rx_comp_dscr3_t  rx_done[COMP_RING_ENTRIES];
341    aic_tx_dscr1_t       tx_dscr[TX_RING_ENTRIES];      /* explicit end bit */
342    aic_tx_comp_dscr1_t  tx_done[COMP_RING_ENTRIES];
343} aic_rings_t;
344
345typedef struct aic_ether_s {
346    uint32_t  regbase;
347    uint8_t irq;
348    pcitag_t tag;		/* tag for configuration registers */
349
350    uint8_t   hwaddr[6];
351    uint16_t  device;           /* chip device code */
352    uint8_t revision;           /* chip revision */
353
354    /* current state */
355    eth_state_t state;
356    uint32_t intmask;
357
358    /* packet lists */
359    queue_t freelist;
360    uint8_t *pktpool;
361    queue_t rxqueue;
362
363    /* rings */
364    aic_rx_dscr0_t      *rx_dscr;
365    aic_rx_comp_dscr3_t *rx_done;
366    aic_tx_dscr1_t      *tx_dscr;
367    aic_tx_comp_dscr1_t *tx_done;
368    aic_rings_t         *rings;        /* must be 256-byte aligned */
369
370    cfe_devctx_t *devctx;
371
372    /* PHY access */
373    int      phy_addr;         /* normally 1 */
374    uint16_t phy_status;
375    uint16_t phy_ability;
376
377    /* MII polling control */
378    int      mii_polling;
379    uint64_t mii_polltime;
380
381    /* driver statistics */
382    uint32_t inpkts;
383    uint32_t outpkts;
384    uint32_t interrupts;
385    uint32_t rx_interrupts;
386    uint32_t tx_interrupts;
387
388    /* cumulative bus usage statistics (Tables 7-30 and 7-31) */
389    uint64_t pci_polltime;
390    uint32_t pci_latency;
391    uint32_t int_latency;
392    uint64_t pci_slave;
393    uint64_t pci_master;
394    uint64_t pci_data;
395} aic_ether_t;
396
397
398/* Address mapping macros */
399
400#define PTR_TO_PHYS(x) (K0_TO_PHYS((uintptr_t)(x)))
401#define PHYS_TO_PTR(a) ((uint8_t *)PHYS_TO_K0(a))
402
403#define PCI_TO_PTR(a)  (PHYS_TO_PTR(PCI_TO_PHYS(a)))
404#define PTR_TO_PCI(x)  (PHYS_TO_PCI(PTR_TO_PHYS(x)))
405
406
407/* Chip access macros */
408
409#define READCSR(sc,csr)      (phys_read32((sc)->regbase+(csr)))
410#define WRITECSR(sc,csr,val) (phys_write32((sc)->regbase+(csr), (val)))
411
412
413/* Entry to and exit from critical sections (currently relative to
414   interrupts only, not SMP) */
415
416#if CFG_INTERRUPTS
417#define CS_ENTER(sc) cfe_disable_irq(sc->irq)
418#define CS_EXIT(sc)  cfe_enable_irq(sc->irq)
419#else
420#define CS_ENTER(sc) ((void)0)
421#define CS_EXIT(sc)  ((void)0)
422#endif
423
424
425static void
426dumpseq(aic_ether_t *sc, int start, int next)
427{
428    int offset, i, j;
429    int columns = 4;
430    int lines = (((next - start)/4 + 1) + 3)/columns;
431    int step = lines*4;
432
433    offset = start;
434    for (i = 0; i < lines; i++) {
435	xprintf("\nCSR");
436	for (j = 0; j < columns; j++) {
437	    if (offset + j*step < next)
438		xprintf(" %04X: %08X ",
439			offset+j*step, READCSR(sc, offset+j*step));
440	    }
441	offset += 4;
442	}
443    xprintf("\n");
444}
445
446static void
447dumpcsrs(aic_ether_t *sc, const char *legend)
448{
449    xprintf("%s:\n", legend);
450    xprintf("-----Ctrl----");
451    /* PCI control registers */
452    dumpseq(sc, 0x40, 0x50);
453    xprintf("---General---");
454    /* Ethernet functional registers */
455    dumpseq(sc, 0x70, 0xF8);
456    xprintf("-----MAC-----");
457    /* MAC registers */
458    dumpseq(sc, 0x5000, 0x5018);
459    xprintf("-------------\n");
460}
461
462
463/* Packet management */
464
465#define ETH_PKTPOOL_SIZE  64
466#define MIN_RX_DSCRS      32
467
468static eth_pkt_t *
469eth_alloc_pkt(aic_ether_t *sc)
470{
471    eth_pkt_t *pkt;
472
473    CS_ENTER(sc);
474    pkt = (eth_pkt_t *) q_deqnext(&sc->freelist);
475    CS_EXIT(sc);
476    if (!pkt) return NULL;
477
478    pkt->buffer = pkt->data;
479    pkt->length = ETH_PKTBUF_LEN;
480    pkt->flags = 0;
481
482    return pkt;
483}
484
485static void
486eth_free_pkt(aic_ether_t *sc, eth_pkt_t *pkt)
487{
488    CS_ENTER(sc);
489    q_enqueue(&sc->freelist, &pkt->next);
490    CS_EXIT(sc);
491}
492
493static void
494eth_initfreelist(aic_ether_t *sc)
495{
496    int idx;
497    uint8_t *ptr;
498    eth_pkt_t *pkt;
499
500    q_init(&sc->freelist);
501
502    ptr = sc->pktpool;
503    for (idx = 0; idx < ETH_PKTPOOL_SIZE; idx++) {
504	pkt = (eth_pkt_t *) ptr;
505	eth_free_pkt(sc, pkt);
506	ptr += ETH_PKTBUF_SIZE;
507	}
508}
509
510
511/* Utilities */
512
513static const char *
514aic_devname(aic_ether_t * sc)
515{
516    return (sc->devctx != NULL ? cfe_device_name(sc->devctx) : "eth?");
517}
518
519/* The delay loop uses uncached PCI reads, each of which requires at
520   least 3 PCI bus clocks (90 ns at 33 MHz) to complete.  The actual
521   delay will be longer.  */
522#define PCI_MIN_DELAY  90
523
524static void
525aic_spin(aic_ether_t *sc, long usec)
526{
527#if 0  /* XXX Can't use this to time PCI resets */
528    long  delay;
529    volatile uint32_t t;
530
531    for (delay = 1000*usec; delay > 0; delay -= PCI_MIN_DELAY)
532	t = pci_conf_read(sc->tag, PCI_ID_REG);
533#else
534    cfe_sleep(2);
535#endif
536}
537
538
539/* Descriptor ring management */
540
541static void
542aic_fillrxring(aic_ether_t *sc)
543{
544    uint32_t ptrs;
545    unsigned rx_dscr_ci, rx_dscr_pi;
546    unsigned rx_on_ring;
547    eth_pkt_t *pkt;
548
549    ptrs = READCSR(sc, R_RxDescQueue1Ptrs);
550    rx_dscr_ci = (ptrs & M_RxDescConsumer) >> S_RxDescConsumer;
551    rx_dscr_pi = (ptrs & M_RxDescProducer) >> S_RxDescProducer;
552    if (rx_dscr_pi >= rx_dscr_ci)
553	rx_on_ring = rx_dscr_pi - rx_dscr_ci;
554    else
555	rx_on_ring = (rx_dscr_pi + RX_RING_ENTRIES) - rx_dscr_ci;
556
557    while (rx_on_ring < MIN_RX_DSCRS) {       /* XXX Check this */
558	pkt = eth_alloc_pkt(sc);
559	if (pkt == NULL) {
560	    /* could not allocate a buffer */
561	    break;
562	    }
563	sc->rx_dscr[rx_dscr_pi].bufptr =
564	  PTR_TO_PCI(pkt->buffer) & M_RX_DSCR_ADDR;
565	rx_dscr_pi = (rx_dscr_pi + 1) % RX_RING_ENTRIES;
566	rx_on_ring++;
567	}
568
569    ptrs &=~ M_RxDescProducer;
570    ptrs |= rx_dscr_pi << S_RxDescProducer;
571    WRITECSR(sc, R_RxDescQueue1Ptrs, ptrs);
572}
573
574static void
575aic_rx_callback(aic_ether_t *sc, eth_pkt_t *pkt)
576{
577    if (AIC_DEBUG) show_packet('>', pkt);
578
579    CS_ENTER(sc);
580    q_enqueue(&sc->rxqueue, &pkt->next);
581    CS_EXIT(sc);
582    sc->inpkts++;
583}
584
585static int
586aic_procrxring(aic_ether_t *sc)
587{
588    uint32_t ptrs;
589    unsigned rx_comp_ci, rx_comp_pi;
590    int consumed;
591    aic_rx_comp_dscr3_t *rxc;
592    aic_rx_dscr0_t *rxd;
593    eth_pkt_t   *pkt;
594
595    /* Assumes that only Q1 is being used */
596    ptrs = READCSR(sc, R_CompletionQueueProducerIndex);
597    rx_comp_pi = G_RxCompletionQ1ProducerIndex(ptrs);
598    ptrs = READCSR(sc, R_CompletionQueueConsumerIndex);
599    rx_comp_ci = G_RxCompletionQ1ConsumerIndex(ptrs);
600    consumed = 0;
601
602    while (rx_comp_ci != rx_comp_pi) {
603	rxc = &(sc->rx_done[rx_comp_ci]);
604	rxd = &(sc->rx_dscr[rxc->status1 & M_END_INDEX]);
605
606	pkt = ETH_PKT_BASE(PCI_TO_PTR(rxd->bufptr & M_RX_DSCR_ADDR));
607	pkt->length = rxc->length;
608	if ((rxc->status1 & M_STATUS1_OK) != 0)
609	    aic_rx_callback(sc, pkt);
610	else {
611#if 1
612	    xprintf("%s: rx error %04X\n", aic_devname(sc), rxc->status2);
613#endif
614	    eth_free_pkt(sc, pkt);
615	}
616
617	consumed++;
618        rx_comp_ci = (rx_comp_ci + 1) % COMP_RING_ENTRIES;
619	}
620
621    /* Update the completion ring pointers */
622    if (consumed != 0) {
623	ptrs &= ~M_RxCompletionQ1ConsumerIndex;
624	ptrs |= V_RxCompletionQ1ConsumerIndex(rx_comp_ci);
625	WRITECSR(sc, R_CompletionQueueConsumerIndex, ptrs);
626	}
627
628    /* Refill the descriptor ring */
629    aic_fillrxring(sc);
630
631    return consumed;
632}
633
634
635static int
636aic_transmit(aic_ether_t *sc, eth_pkt_t *pkt)
637{
638    uint32_t ptrs;
639    unsigned tx_dscr_ci, tx_dscr_pi, next_pi;
640    aic_tx_dscr1_t *txd;
641
642    if (AIC_DEBUG) show_packet('<', pkt);
643
644    ptrs = READCSR(sc, R_TxDescQueueConsumerIndex);
645    tx_dscr_ci = (ptrs & M_LoPrTxConsumerIndex) >> S_LoPrTxConsumerIndex;
646    ptrs = READCSR(sc, R_TxDescQueueProducerIndex);
647    tx_dscr_pi = (ptrs & M_LoPrTxProducerIndex) >> S_LoPrTxProducerIndex;
648
649    /* Pointers are in units of 8 bytes */
650    next_pi = (tx_dscr_pi + sizeof(aic_tx_dscr1_t)/8) % TX_RING_ENTRIES;
651    if (next_pi == tx_dscr_ci)   /* Ring full */
652	return -1;
653
654    txd = &(sc->tx_dscr[(tx_dscr_pi << 3)/sizeof(aic_tx_dscr1_t)]);
655    txd->bufptr = PTR_TO_PCI(pkt->buffer);
656    txd->length = pkt->length;
657
658    ptrs &= ~M_LoPrTxProducerIndex;
659    ptrs |= next_pi << S_LoPrTxProducerIndex;
660    WRITECSR(sc, R_TxDescQueueProducerIndex, ptrs);
661
662    sc->outpkts++;
663    return 0;
664}
665
666
667static int
668aic_proctxring(aic_ether_t *sc)
669{
670    uint32_t ptrs;
671    unsigned tx_comp_ci, tx_comp_pi;
672    int consumed;
673    aic_tx_comp_dscr1_t *txc;
674    aic_tx_dscr1_t *txd;
675    unsigned index;
676    eth_pkt_t   *pkt;
677
678    ptrs = READCSR(sc, R_CompletionQueueProducerIndex);
679    tx_comp_pi = G_TxCompletionProducerIndex(ptrs);
680    ptrs = READCSR(sc, R_CompletionQueueConsumerIndex);
681    tx_comp_ci = G_TxCompletionConsumerIndex(ptrs);
682    consumed = 0;
683
684    while (tx_comp_ci != tx_comp_pi) {
685	txc = &(sc->tx_done[tx_comp_ci]);
686	index = (txc->qref & M_QREF_OFFSET) / sizeof(aic_tx_dscr1_t);
687	txd = &(sc->tx_dscr[index]);
688
689	pkt = ETH_PKT_BASE(PCI_TO_PTR(txd->bufptr));
690	eth_free_pkt(sc, pkt);
691
692	consumed++;
693        tx_comp_ci = (tx_comp_ci + 1) % COMP_RING_ENTRIES;
694	}
695
696    /* Update the completion ring pointers */
697    if (consumed != 0) {
698	ptrs &= ~M_TxCompletionConsumerIndex;
699	ptrs |= V_TxCompletionConsumerIndex(tx_comp_ci);
700	WRITECSR(sc, R_CompletionQueueConsumerIndex, ptrs);
701	}
702
703    return consumed;
704}
705
706
707static void
708aic_init(aic_ether_t *sc)
709{
710    int i;
711
712    /* Allocate buffer pool */
713    sc->pktpool = KMALLOC(ETH_PKTPOOL_SIZE*ETH_PKTBUF_SIZE, CACHE_ALIGN);
714    eth_initfreelist(sc);
715    q_init(&sc->rxqueue);
716
717    /* The tx_dscr ring uses an explicit END bit */
718    for (i = 0; i < TX_RING_ENTRIES; i++) {
719	sc->tx_dscr[i].flags = (V_FLAG_ID_TX
720				| M_FLAG_INTR
721				| M_FLAG_CRCEN
722				| (1 << S_FLAG_NBUFFERS));
723	}
724    sc->tx_dscr[TX_RING_ENTRIES-1].flags |= M_FLAG_END;
725}
726
727
728/* MII access functions.  */
729
730#define MII_MAX_RETRIES 10000
731
732static uint16_t
733mii_read_register(aic_ether_t *sc, int phy, int index)
734{
735    uint32_t csr;
736    uint32_t val;
737    int   i;
738
739    for (i = 0; i < MII_MAX_RETRIES; i++) {
740	val = READCSR(sc, R_MIIStatus);
741	if ((val & M_MIIStatus_MiiBusy) == 0)
742	    break;
743	}
744    if (i == MII_MAX_RETRIES)
745	xprintf("%s: mii_read_register: MII always busy\n", aic_devname(sc));
746
747    csr = R_MIIRegistersAccessPort + ((PHY_REGISTERS*phy + index) << 2);
748    for (i = 0; i < MII_MAX_RETRIES; i++) {
749	val = READCSR(sc, csr);
750	if ((val & M_MiiDataValid) != 0)
751	    break;
752	}
753    if (i == MII_MAX_RETRIES)
754	xprintf("%s: mii_read_register: data never valid\n", aic_devname(sc));
755
756    return (val & M_MiiRegDataPort) >> S_MiiRegDataPort;
757}
758
759static void
760mii_write_register(aic_ether_t *sc, int phy, int index, uint16_t value)
761{
762    uint32_t csr;
763    uint32_t val;
764    int   i;
765
766    for (i = 0; i < MII_MAX_RETRIES; i++) {
767	val = READCSR(sc, R_MIIStatus);
768	if ((val & M_MIIStatus_MiiBusy) == 0)
769	    break;
770	}
771    if (i == MII_MAX_RETRIES)
772	xprintf("%s: mii_write_register: always busy\n", aic_devname(sc));
773
774    csr = R_MIIRegistersAccessPort + ((PHY_REGISTERS*phy + index) << 2);
775    WRITECSR(sc, csr, value);
776}
777
778static int
779mii_probe(aic_ether_t *sc)
780{
781    int i;
782    uint16_t id1, id2;
783
784    for (i = 0; i < 32; i++) {
785        id1 = mii_read_register(sc, i, MII_PHYIDR1);
786	id2 = mii_read_register(sc, i, MII_PHYIDR2);
787	if ((id1 != 0x0000 && id1 != 0xFFFF) ||
788	    (id2 != 0x0000 && id2 != 0xFFFF)) {
789	    if (id1 != id2) return i;
790	    }
791	}
792    return -1;
793}
794
795#if 0
796#define OUI_SEEQ    0x00A07D
797#define IDR_SEEQ    0x0005BE     /* OUI bit-reversed within each byte */
798#define PART_80220  0x03
799
800static void
801mii_dump(aic_ether_t *sc, const char *label)
802{
803    int i;
804    uint16_t  r;
805    uint32_t  idr, part;
806
807    xprintf("%s\n", label);
808    idr = part = 0;
809
810    /* Required registers */
811    for (i = 0; i <= 5; ++i) {
812	r = mii_read_register(sc, sc->phy_addr, i);
813	xprintf("MII_REG%02X: %04X\n", i, r);
814	if (i == MII_PHYIDR1) {
815	    idr |= r << 6;
816	    }
817	else if (i == MII_PHYIDR2) {
818	    idr |= (r >> 10) & 0x3F;
819	    part = (r >> 4) & 0x3F;
820	    }
821	}
822
823    /* Extensions */
824    if (idr == IDR_SEEQ && part == PART_80220) {
825	for (i = 0x10; i <= 0x14; ++i) {
826	    r = mii_read_register(sc, sc->phy_addr, i);
827	    xprintf("MII_REG%02X: %04X\n", i, r);
828	    }
829	}
830}
831#else
832#define mii_dump(sc,label)
833#endif
834
835static int
836mii_poll(aic_ether_t *sc)
837{
838    uint16_t  status, ability;
839
840    /* BMSR has read-to-clear bits; read twice.  */
841
842    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
843    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
844    ability = mii_read_register(sc, sc->phy_addr, MII_ANLPAR);
845
846    if (status != sc->phy_status || ability != sc->phy_ability) {
847        sc->phy_status = status;
848	sc->phy_ability = ability;
849	return 1;
850	}
851    return 0;
852}
853
854static void
855mii_set_speed(aic_ether_t *sc, int speed)
856{
857    uint16_t  control;
858
859    control = mii_read_register(sc, sc->phy_addr, MII_BMCR);
860
861    control &=~ (BMCR_ANENABLE | BMCR_RESTARTAN);
862    mii_write_register(sc, sc->phy_addr, MII_BMCR, control);
863    control &=~ (BMCR_SPEED0 | BMCR_SPEED1 | BMCR_DUPLEX);
864
865    switch (speed) {
866	case ETHER_SPEED_10HDX:
867	default:
868	    break;
869	case ETHER_SPEED_10FDX:
870	    control |= BMCR_DUPLEX;
871	    break;
872	case ETHER_SPEED_100HDX:
873	    control |= BMCR_SPEED100;
874	    break;
875	case ETHER_SPEED_100FDX:
876	    control |= BMCR_SPEED100 | BMCR_DUPLEX ;
877	    break;
878	}
879
880    mii_write_register(sc, sc->phy_addr, MII_BMCR, control);
881}
882
883static void
884mii_autonegotiate(aic_ether_t *sc)
885{
886    uint16_t  control, status, remote;
887    unsigned int  timeout;
888    int linkspeed;
889
890    linkspeed = ETHER_SPEED_UNKNOWN;
891
892    /* Read twice to clear latching bits */
893    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
894    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
895    mii_dump(sc, "query PHY");
896
897    if ((status & (BMSR_AUTONEG | BMSR_LINKSTAT)) ==
898        (BMSR_AUTONEG | BMSR_LINKSTAT))
899	control = mii_read_register(sc, sc->phy_addr, MII_BMCR);
900    else {
901	timeout = 3*CFE_HZ;
902	for (;;) {
903	    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
904	    if ((status & BMSR_ANCOMPLETE) != 0 || timeout <= 0)
905		break;
906	    cfe_sleep(CFE_HZ/2);
907	    timeout -= CFE_HZ/2;
908	    }
909	}
910
911    remote = mii_read_register(sc, sc->phy_addr, MII_ANLPAR);
912
913    xprintf("%s: Link speed: ", aic_devname(sc));
914    if ((status & BMSR_ANCOMPLETE) != 0) {
915	/* A link partner was negogiated... */
916        uint32_t config1;
917	uint32_t ipgt;
918
919	config1 = READCSR(sc, R_MacConfig1);
920	ipgt = READCSR(sc, R_BkToBkIPG);
921	ipgt &=~ M_IPGT;
922
923	if ((remote & ANLPAR_TXFD) != 0) {
924	    xprintf("100BaseT FDX\n");
925	    config1 |= M_FullDuplex;
926	    ipgt |= (K_IPGT_FDX << S_IPGT);
927	    linkspeed = ETHER_SPEED_100FDX;
928	    }
929	else if ((remote & ANLPAR_TXHD) != 0) {
930	    xprintf("100BaseT HDX\n");
931	    config1 &= ~M_FullDuplex;
932	    ipgt |= (K_IPGT_HDX << S_IPGT);
933	    linkspeed = ETHER_SPEED_100HDX;
934	    }
935	else if ((remote & ANLPAR_10FD) != 0) {
936	    xprintf("10BaseT FDX\n");
937	    config1 |= M_FullDuplex;
938	    ipgt |= (K_IPGT_FDX << S_IPGT);
939	    linkspeed = ETHER_SPEED_10FDX;
940	    }
941	else if ((remote & ANLPAR_10HD) != 0) {
942	    xprintf("10BaseT HDX\n");
943	    config1 &= ~M_FullDuplex;
944	    ipgt |= (K_IPGT_HDX << S_IPGT);
945	    linkspeed = ETHER_SPEED_10HDX;
946	    }
947
948	/* Can this be done with the DMA enabled? */
949	WRITECSR(sc, R_BkToBkIPG, ipgt);
950	WRITECSR(sc, R_MacConfig1, config1);
951	WRITECSR(sc, R_MacConfig1, config1 | M_SoftRst);
952	cfe_sleep(1);
953	WRITECSR(sc, R_MacConfig1, config1);
954	}
955    else {
956	/* no link partner negotiation */
957	xprintf("Unknown\n");
958	linkspeed = ETHER_SPEED_UNKNOWN;
959	remote = 0;
960	}
961
962    /* clear latching bits */
963    status = mii_read_register(sc, sc->phy_addr, MII_BMSR);
964    sc->phy_status = status;
965    sc->phy_ability = remote;
966
967    mii_dump(sc, "final PHY");
968
969}
970
971
972/* Bus utilization statistics */
973
974static void
975aic_initstats(aic_ether_t *sc)
976{
977    sc->pci_latency = sc->int_latency = 0;
978    sc->pci_slave = sc->pci_master = 0;
979    sc->pci_data = 0;
980
981    WRITECSR(sc, R_PCIMonitor1, 0);
982    WRITECSR(sc, R_PCIMonitor2, 0);
983
984    sc->pci_polltime = (uint64_t)cfe_ticks + PCI_POLL_INTERVAL;
985}
986
987static void
988aic_pcistats(aic_ether_t *sc)
989{
990    uint32_t monitor1, monitor2;
991    uint32_t t;
992
993    /* The following actions are not atomic but should be close enough
994       for collecting stats.  */
995    monitor1 = READCSR(sc, R_PCIMonitor1);
996    monitor2 = READCSR(sc, R_PCIMonitor2);
997    WRITECSR(sc, R_PCIMonitor1, 0);
998    WRITECSR(sc, R_PCIMonitor2, 0);
999
1000    t = (monitor1 & M_PCIBusMaxLatency) >> S_PCIBusMaxLatency;
1001    if (t > sc->pci_latency)
1002	sc->pci_latency = t;
1003    t = (monitor1 & M_PCIIntMaxLatency) >> S_PCIIntMaxLatency;
1004    if (t > sc->int_latency)
1005	sc->int_latency = t;
1006
1007    t = (monitor1 & M_PCISlaveBusUtilization) >> S_PCISlaveBusUtilization;
1008    sc->pci_slave += (uint64_t) t;
1009    t = (monitor2 & M_PCIMasterBusUtilization) >> S_PCIMasterBusUtilization;
1010    sc->pci_master += (uint64_t) t;
1011    t = (monitor2 & M_ActiveTransferCount) >> S_ActiveTransferCount;
1012    sc->pci_data += (uint64_t) t;
1013}
1014
1015
1016/* The following functions collectively implement the recommended
1017   AIC-6915 Initialization Procedure (Section 8) */
1018
1019static int
1020aic_reset(aic_ether_t *sc)
1021{
1022    uint32_t pci_config;
1023    uint32_t status;
1024    uint32_t mac_config;
1025    uint32_t bac_ctrl;
1026
1027    /* Reset PHY (3) - skipped, see aic_initlink */
1028
1029    /* Disable DMA, MAC (4) */
1030    WRITECSR(sc, R_GeneralEthernetCtrl, 0);
1031
1032    /* Software reset (5) */
1033    pci_config = READCSR(sc, R_PCIDeviceConfig);
1034    WRITECSR(sc, R_PCIDeviceConfig, 0);
1035    aic_spin(sc, 2);      /* at least 2 usec */
1036    WRITECSR(sc, R_PCIDeviceConfig, M_SoftReset);
1037    aic_spin(sc, 2);
1038    WRITECSR(sc, R_PCIDeviceConfig, pci_config);
1039    aic_spin(sc, 2);
1040
1041    /* Clear PCI status (6) */
1042    status = pci_conf_read(sc->tag, PCI_COMMAND_STATUS_REG);
1043    pci_conf_write(sc->tag, PCI_COMMAND_STATUS_REG, status);
1044
1045    /* Configure MAC (7) */
1046    mac_config = READCSR(sc, R_MacConfig1);
1047    mac_config |= M_PadEn;
1048    mac_config |= M_FullDuplex;   /* Default, updated by autonegotiation. */
1049    WRITECSR(sc, R_MacConfig1, mac_config);
1050    mac_config |= M_SoftRst;
1051    WRITECSR(sc, R_MacConfig1, mac_config);
1052    mac_config = READCSR(sc, R_MacConfig1);
1053    mac_config &= ~M_SoftRst;
1054    WRITECSR(sc, R_MacConfig1, mac_config);
1055
1056    bac_ctrl = READCSR(sc, R_BacControl);
1057    bac_ctrl &= ~M_DataSwapMode;
1058    bac_ctrl |= V_DataSwapMode_BE;
1059    WRITECSR(sc, R_BacControl, bac_ctrl);
1060
1061    return 0;
1062}
1063
1064static int
1065aic_coldreset(aic_ether_t *sc)
1066{
1067    pcireg_t cmd;
1068
1069    /* only cold reset needs steps 1 and 2 */
1070
1071    /* Enable memory, also clear R/WC status bits (1) */
1072    cmd = pci_conf_read(sc->tag, PCI_COMMAND_STATUS_REG);
1073    cmd |= PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE;
1074    pci_conf_write(sc->tag, PCI_COMMAND_STATUS_REG, cmd);
1075
1076    aic_reset(sc);       /* continue with warm reset */
1077    return 0;
1078}
1079
1080static int
1081aic_rxinit(aic_ether_t *sc)
1082{
1083    uint32_t ctrl;
1084    unsigned offset;
1085
1086    /* Initialize the Rx Completion rings.  Q1 only and 32-bit
1087       addressing for now.  */
1088    WRITECSR(sc, R_RxCompletionQueue1Ctrl,
1089	     (PTR_TO_PCI(sc->rx_done) & M_RxCompletionBaseAddress)
1090	     | (3 << S_RxCompletionType)
1091	     | (7 << S_RxCompletionThreshold));    /* XXX check */
1092
1093    /* Initialize ring indices */
1094    WRITECSR(sc, R_CompletionQueueConsumerIndex, 0);
1095    WRITECSR(sc, R_CompletionQueueProducerIndex, 0);
1096
1097    /* Increase the maximum burst size. */
1098    ctrl = READCSR(sc, R_RxDmaCtrl);
1099    ctrl &= ~M_RxBurstSize;
1100    ctrl |= (16 << S_RxBurstSize);
1101
1102    /* Initialize the Rx Descriptor rings.  Q1 only and 32-bit
1103       addressing for now. */
1104    WRITECSR(sc, R_RxDescQueue1Ctrl,
1105	     (ETH_PKTBUF_LEN << S_RxBufferLength)
1106	     | (4 << S_RxMinDescriptorsThreshold));   /* XXX check */
1107    WRITECSR(sc, R_RxDescQueueHighAddress, 0);
1108    WRITECSR(sc, R_RxDescQueue1LowAddress, PTR_TO_PCI(sc->rx_dscr));
1109    /* Initialize pointers */
1110    WRITECSR(sc, R_RxDescQueue1Ptrs, 0);
1111
1112    /* Set up address filters (see Table 7-108 for format) */
1113    for (offset = 0; offset < PERFECT_ADDRESS_ENTRIES*4; offset++)
1114        WRITECSR(sc, R_PerfectAddressBase + 4*offset, 0);
1115    for (offset = 0; offset < 3; offset++) {
1116	uint32_t bytes;
1117
1118	bytes = (sc->hwaddr[4-2*offset] << 8) | (sc->hwaddr[5-2*offset]);
1119	WRITECSR(sc, R_PerfectAddressBase + 4*offset, bytes);
1120	}
1121    WRITECSR(sc, R_RxAddressFilteringCtrl,
1122	     (K_PerfectFiltering_16 << S_PerfectFilteringMode)
1123	     | (K_HashFiltering_Off << S_HashFilteringMode)
1124	     | M_PassBroadcast);
1125
1126    return 0;
1127}
1128
1129static int
1130aic_txinit(aic_ether_t *sc)
1131{
1132    uint32_t ctrl;
1133
1134    /* Initialize the Tx Descriptor rings.  32-bit addressing for now.  */
1135    WRITECSR(sc, R_TxDescQueueCtrl,
1136	     (1 << S_TxDescType)
1137	     | (0 << S_SkipLength)
1138	     | (8 << S_TxDmaBurstSize)
1139	     | (2 << S_TxHighPriorityFifoThreshold));  /* XXX check */
1140    /* Use the low priority ring */
1141    WRITECSR(sc, R_TxDescQueueHighAddr, 0);
1142    WRITECSR(sc, R_LoPrTxDescQueueBaseAddr, PTR_TO_PCI(sc->tx_dscr));
1143
1144    /* Empty rings */
1145    WRITECSR(sc, R_TxDescQueueProducerIndex, 0);
1146    WRITECSR(sc, R_TxDescQueueConsumerIndex, 0);
1147
1148    /* Configure for interrupt on DMA Complete */
1149    ctrl = READCSR(sc, R_TxFrameCtrl);
1150    ctrl &= ~M_DmaCompletionAfterTransmitComplete;
1151    WRITECSR(sc, R_TxFrameCtrl, ctrl);
1152
1153    /* Initialize the Tx Completion rings. */
1154    WRITECSR(sc, R_CompletionQueueHighAddr, 0);
1155    WRITECSR(sc, R_TxCompletionQueueCtrl,
1156	     (PTR_TO_PCI(sc->tx_done) & M_TxCompletionBaseAddress)
1157	     | (7 << S_TxCompletionQueueThreshold));   /* XXX check */
1158
1159    /* Initialize ring indices (again) */
1160    WRITECSR(sc, R_CompletionQueueConsumerIndex, 0);
1161    WRITECSR(sc, R_CompletionQueueProducerIndex, 0);
1162
1163    return 0;
1164}
1165
1166
1167static int
1168aic_statsinit(aic_ether_t *sc)
1169{
1170    int i;
1171
1172    for (i = 0; i < STATISTICS_COUNT; i++)
1173	WRITECSR(sc, R_StatisticsBase + 4*i, 0);
1174
1175    return 0;
1176}
1177
1178
1179static void
1180aic_initlink(aic_ether_t *sc)
1181{
1182    sc->phy_addr = mii_probe(sc);
1183    if (sc->phy_addr < 0) {
1184	xprintf("%s: no PHY found\n", aic_devname(sc));
1185	return;
1186	}
1187    if (1)   /* XXX Support only autonegotiation for now */
1188	mii_autonegotiate(sc);
1189    else
1190	mii_set_speed(sc, ETHER_SPEED_10HDX);
1191    sc->mii_polltime = (uint64_t)cfe_ticks + MII_POLL_INTERVAL;
1192
1193    sc->mii_polling = 0;
1194}
1195
1196
1197static void
1198aic_enable(aic_ether_t *sc)
1199{
1200    uint32_t pci_config;
1201
1202    /* Prime the Rx ring.  This must be done before enabling interrupts. */
1203    aic_fillrxring(sc);
1204
1205    /* Configure and enable interrupts */
1206    sc->intmask = M_RxQ1DoneInt | M_TxFrameCompleteInt | M_TxDMADoneInt;
1207    (void) READCSR(sc, R_InterruptStatus);   /* Clear read-to-clear bits */
1208    WRITECSR(sc, R_InterruptEn, sc->intmask);
1209    pci_config = READCSR(sc, R_PCIDeviceConfig);
1210    pci_config |= M_IntEnable;
1211    WRITECSR(sc, R_PCIDeviceConfig, pci_config);
1212
1213    /* Enable DMA and MAC */
1214    WRITECSR(sc, R_GeneralEthernetCtrl,
1215	     M_RxDmaEn | M_ReceiveEn | M_TxDmaEn | M_TransmitEn);
1216}
1217
1218static void
1219aic_disable(aic_ether_t *sc)
1220{
1221    uint32_t ctrl;
1222
1223    ctrl = READCSR(sc, R_GeneralEthernetCtrl);
1224    ctrl &= ~(M_RxDmaEn | M_ReceiveEn | M_TxDmaEn | M_TransmitEn);
1225    WRITECSR(sc, R_GeneralEthernetCtrl, ctrl);
1226
1227    sc->intmask = 0;
1228    WRITECSR(sc, R_InterruptEn, sc->intmask);
1229}
1230
1231
1232static void
1233aic_hwinit(aic_ether_t *sc)
1234{
1235    if (sc->state == eth_state_uninit) {
1236	aic_coldreset(sc);
1237	aic_rxinit(sc);
1238	aic_txinit(sc);
1239	aic_statsinit(sc);
1240
1241	aic_initlink(sc);
1242
1243	sc->state = eth_state_off;
1244#if AIC_DEBUG
1245	dumpcsrs(sc, "end init");
1246#else
1247	(void)dumpcsrs;
1248#endif
1249	}
1250}
1251
1252
1253static void
1254aic_isr(void *arg)
1255{
1256    aic_ether_t *sc = (aic_ether_t *)arg;
1257    int received, sent;
1258    uint32_t status;
1259
1260    if (IPOLL) sc->interrupts++;
1261
1262    received = sent = 0;
1263
1264    for (;;) {
1265	status = READCSR(sc, R_InterruptStatus);
1266
1267	if ((status & sc->intmask) == 0)
1268	    break;
1269
1270	if (status & M_RxQ1DoneInt) {
1271	    if (IPOLL) sc->rx_interrupts++;
1272	    received += aic_procrxring(sc);
1273	    }
1274
1275	/* XXX choose only one tx completion interrupt. */
1276	if (status & (M_TxFrameCompleteInt | M_TxDMADoneInt)) {
1277	    if (IPOLL) sc->tx_interrupts++;
1278	    sent += aic_proctxring(sc);
1279	    }
1280	}
1281}
1282
1283
1284static void
1285aic_start(aic_ether_t *sc)
1286{
1287    aic_hwinit(sc);
1288
1289    sc->intmask = 0;
1290#if IPOLL
1291    cfe_request_irq(sc->irq, aic_isr, sc, CFE_IRQ_FLAGS_SHARED, 0);
1292#endif
1293
1294    aic_enable(sc);
1295    sc->state = eth_state_on;
1296}
1297
1298static void
1299aic_stop(aic_ether_t *sc)
1300{
1301    aic_disable(sc);
1302
1303#if IPOLL
1304    cfe_free_irq(sc->irq, 0);
1305#endif
1306}
1307
1308
1309static int aic_ether_open(cfe_devctx_t *ctx);
1310static int aic_ether_read(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
1311static int aic_ether_inpstat(cfe_devctx_t *ctx,iocb_inpstat_t *inpstat);
1312static int aic_ether_write(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
1313static int aic_ether_ioctl(cfe_devctx_t *ctx,iocb_buffer_t *buffer);
1314static int aic_ether_close(cfe_devctx_t *ctx);
1315static void aic_ether_poll(cfe_devctx_t *ctx, int64_t ticks);
1316static void aic_ether_reset(void *softc);
1317
1318const static cfe_devdisp_t aic_ether_dispatch = {
1319    aic_ether_open,
1320    aic_ether_read,
1321    aic_ether_inpstat,
1322    aic_ether_write,
1323    aic_ether_ioctl,
1324    aic_ether_close,
1325    aic_ether_poll,
1326    aic_ether_reset
1327};
1328
1329cfe_driver_t aic6915drv = {
1330    "AIC-6915 Ethernet",
1331    "eth",
1332    CFE_DEV_NETWORK,
1333    &aic_ether_dispatch,
1334    aic_ether_probe
1335};
1336
1337
1338static int
1339aic_ether_attach(cfe_driver_t *drv, pcitag_t tag, int index)
1340{
1341    aic_ether_t *sc;
1342    char descr[80];
1343    phys_addr_t pa;
1344    uint32_t base;
1345    uint32_t addr;
1346    pcireg_t device, class;
1347    int i;
1348
1349    pci_map_mem(tag, PCI_MAPREG(0), PCI_MATCH_BITS, &pa);
1350    base = (uint32_t) pa;
1351
1352    sc = (aic_ether_t *) KMALLOC(sizeof(aic_ether_t), 0);
1353    if (sc == NULL) {
1354	xprintf("AIC-6915: No memory to complete probe\n");
1355	return 0;
1356	}
1357    memset(sc, 0, sizeof(aic_ether_t));
1358
1359    /* All descriptor rings must be aligned to a 256-byte boundary.  */
1360    sc->rings = (aic_rings_t *) KMALLOC(sizeof(aic_rings_t), 256);
1361    if (sc->rings == NULL) {
1362	xprintf("AIC-6915: No memory for descriptor rings\n");
1363	KFREE(sc);
1364	return 0;
1365	}
1366    memset(sc->rings, 0, sizeof(aic_rings_t));
1367    sc->rx_dscr = sc->rings->rx_dscr;
1368    sc->rx_done = sc->rings->rx_done;
1369    sc->tx_dscr = sc->rings->tx_dscr;
1370    sc->tx_done = sc->rings->tx_done;
1371
1372    sc->regbase = base + K_AIC_REG_OFFSET;
1373
1374    sc->irq = pci_conf_read(tag, PCI_BPARAM_INTERRUPT_REG) & 0xFF;
1375
1376    device = pci_conf_read(tag, PCI_ID_REG);
1377    class = pci_conf_read(tag, PCI_CLASS_REG);
1378
1379    sc->tag = tag;
1380    sc->device = PCI_PRODUCT(device);
1381    sc->revision = PCI_REVISION(class);
1382    sc->devctx = NULL;
1383
1384    /* Assume on-chip firmware has initialized the MAC address.
1385       Empirically, attempts to read the EEPROM directly give bus
1386       errors. */
1387    addr = READCSR(sc, R_MacAddr2);
1388    for (i = 0; i < 2; i++)
1389	sc->hwaddr[i] = (addr >> (8*(1-i))) & 0xff;
1390    addr = READCSR(sc, R_MacAddr1);
1391    for (i = 0; i < 4; i++)
1392	sc->hwaddr[2+i] = (addr >> (8*(3-i))) & 0xff;
1393
1394    aic_init(sc);
1395
1396    sc->state = eth_state_uninit;
1397
1398    xsprintf(descr, "%s at 0x%X (%a)",
1399	     drv->drv_description, base, sc->hwaddr);
1400
1401    cfe_attach(drv, sc, NULL, descr);
1402    return 1;
1403}
1404
1405static void
1406aic_ether_probe(cfe_driver_t *drv,
1407		unsigned long probe_a, unsigned long probe_b,
1408		void *probe_ptr)
1409{
1410    int n;
1411
1412    n = 0;
1413    for (;;) {
1414	pcitag_t tag;
1415
1416	if (pci_find_device(K_PCI_VENDOR_ADAPTEC, K_PCI_ID_AIC6915, n, &tag)
1417	    != 0)
1418	   break;
1419	aic_ether_attach(drv, tag, n);
1420	n++;
1421	}
1422}
1423
1424
1425/* The functions below are called via the dispatch vector for the AIC-6915 */
1426
1427static int
1428aic_ether_open(cfe_devctx_t *ctx)
1429{
1430    aic_ether_t *sc = ctx->dev_softc;
1431
1432    if (sc->state == eth_state_on)
1433	aic_stop(sc);
1434
1435    sc->devctx = ctx;
1436
1437    sc->inpkts = sc->outpkts = 0;
1438    sc->interrupts = 0;
1439    sc->rx_interrupts = sc->tx_interrupts = 0;
1440
1441    aic_start(sc);
1442    aic_initstats(sc);
1443
1444    if (XPOLL) aic_isr(sc);
1445    return 0;
1446}
1447
1448static int
1449aic_ether_read(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
1450{
1451    aic_ether_t *sc = ctx->dev_softc;
1452    eth_pkt_t *pkt;
1453    int blen;
1454
1455    if (XPOLL) aic_isr(sc);
1456
1457    if (sc->state != eth_state_on) return -1;
1458
1459    CS_ENTER(sc);
1460    pkt = (eth_pkt_t *) q_deqnext(&sc->rxqueue);
1461    CS_EXIT(sc);
1462
1463    if (pkt == NULL) {
1464	buffer->buf_retlen = 0;
1465	return 0;
1466	}
1467
1468    blen = buffer->buf_length;
1469    if (blen > pkt->length) blen = pkt->length;
1470
1471    hs_memcpy_to_hs(buffer->buf_ptr, pkt->buffer, blen);
1472    buffer->buf_retlen = blen;
1473
1474    eth_free_pkt(sc, pkt);
1475
1476    if (XPOLL) aic_isr(sc);
1477    return 0;
1478}
1479
1480static int
1481aic_ether_inpstat(cfe_devctx_t *ctx, iocb_inpstat_t *inpstat)
1482{
1483    aic_ether_t *sc = ctx->dev_softc;
1484
1485    if (XPOLL) aic_isr(sc);
1486
1487    if (sc->state != eth_state_on) return -1;
1488
1489    /* We avoid an interlock here because the result is a hint and an
1490       interrupt cannot turn a non-empty queue into an empty one. */
1491    inpstat->inp_status = (q_isempty(&(sc->rxqueue))) ? 0 : 1;
1492
1493    return 0;
1494}
1495
1496static int
1497aic_ether_write(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
1498{
1499    aic_ether_t *sc = ctx->dev_softc;
1500    eth_pkt_t *pkt;
1501    int blen;
1502
1503    if (XPOLL) aic_isr(sc);
1504
1505    if (sc->state != eth_state_on) return -1;
1506
1507    pkt = eth_alloc_pkt(sc);
1508    if (!pkt) return CFE_ERR_NOMEM;
1509
1510    blen = buffer->buf_length;
1511    if (blen > pkt->length) blen = pkt->length;
1512
1513    hs_memcpy_from_hs(pkt->buffer, buffer->buf_ptr, blen);
1514    pkt->length = blen;
1515
1516    if (aic_transmit(sc, pkt) != 0) {
1517	eth_free_pkt(sc,pkt);
1518	return CFE_ERR_IOERR;
1519	}
1520
1521    if (XPOLL) aic_isr(sc);
1522    return 0;
1523}
1524
1525static int
1526aic_ether_ioctl(cfe_devctx_t *ctx, iocb_buffer_t *buffer)
1527{
1528    aic_ether_t *sc = ctx->dev_softc;
1529
1530    switch ((int)buffer->buf_ioctlcmd) {
1531	case IOCTL_ETHER_GETHWADDR:
1532	    hs_memcpy_to_hs(buffer->buf_ptr, sc->hwaddr, sizeof(sc->hwaddr));
1533	    return 0;
1534
1535	default:
1536	    return -1;
1537	}
1538}
1539
1540static int
1541aic_ether_close(cfe_devctx_t *ctx)
1542{
1543    aic_ether_t *sc = ctx->dev_softc;
1544
1545    sc->state = eth_state_off;
1546    aic_stop(sc);
1547
1548    xprintf("%s: max latency: pci %d, int %d\n",
1549	    aic_devname(sc), sc->pci_latency, sc->int_latency);
1550    xprintf("  %lld active in %lld master, %lld slave\n",
1551	    sc->pci_data, sc->pci_master, (sc->pci_slave + 32)/64);
1552
1553    xprintf("%s: %d sent, %d received, %d interrupts\n",
1554	    aic_devname(sc), sc->outpkts, sc->inpkts, sc->interrupts);
1555    xprintf("  %d rx interrupts, %d tx interrupts\n",
1556	    sc->rx_interrupts, sc->tx_interrupts);
1557
1558    sc->devctx = NULL;
1559    return 0;
1560}
1561
1562static void
1563aic_ether_poll(cfe_devctx_t *ctx, int64_t ticks)
1564{
1565    aic_ether_t *sc = ctx->dev_softc;
1566    int changed;
1567
1568    if (sc->state != eth_state_uninit) {
1569	uint64_t now = cfe_ticks;
1570
1571	if (now >= sc->pci_polltime) {
1572	    aic_pcistats(sc);
1573	    sc->pci_polltime = now + PCI_POLL_INTERVAL;
1574	    }
1575
1576	if (now >= sc->mii_polltime) {
1577	    changed = mii_poll(sc);
1578	    if (changed) {
1579		mii_autonegotiate(sc);
1580		}
1581	    sc->mii_polltime = (uint64_t)cfe_ticks + MII_POLL_INTERVAL;
1582	    }
1583	}
1584}
1585
1586static void
1587aic_ether_reset(void *softc)
1588{
1589    aic_ether_t *sc = (aic_ether_t *)softc;
1590
1591    /* Turn off the Ethernet interface. */
1592
1593    if (sc->state == eth_state_on)
1594	aic_stop(sc);
1595
1596    sc->state = eth_state_uninit;
1597}
1598