Deleted Added
full compact
if_tx.c (33095) if_tx.c (33181)
1/*-
2 * Copyright (c) 1997 Semen Ustimenko (semen@iclub.nsu.ru)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * version: stable-166
27 *
28 */
29
30/*
31 * EtherPower II 10/100 Fast Ethernet (tx0)
32 * (aka SMC9432TX based on SMC83c170 EPIC chip)
33 *
34 * Written by Semen Ustimenko.
35 *
36 * TODO:
37 * Fix TX_FRAG_LIST option
38 * Rewrite autonegotiation to remove DELAY(300000)
39 *
40 * stable-140:
41 * first stable version
42 *
43 * stable-160:
44 * added BPF support
45 * fixed several bugs
46 *
47 * stable-161:
48 * fixed BPF support
49 * fixed several bugs
50 *
51 * stable-162:
52 * fixed IFF_PROMISC mode support
53 * added speed info displayed at startup (MII info)
54 *
55 * stable-163:
56 * added media control code
57 *
58 * stable-164:
59 * fixed some bugs
60 *
61 * stable-165:
62 * fixed media control code
63 *
64 * stable-166:
65 * fixed RX_TO_MBUF option and set as default
66 * fixed bug caused ``tx0: device timeout 1 packets'' in 100Mbps mode
67 * implemented fragment list transmit method (TX_FRAG_LIST) (BUGGY)
68 * applyed patch to autoneg fullduplex modes ( Thank to Steve Bauer )
69 * added more coments, removed some debug printfs
70 */
71
72#include "pci.h"
73#if NPCI > 0
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/mbuf.h>
78#include <sys/socket.h>
79#include <sys/malloc.h>
80#include <sys/kernel.h>
81#include <sys/sockio.h>
82#include <net/if.h>
83#include <net/if_mib.h>
84#include <netinet/in.h>
85#include <netinet/if_ether.h>
86#include <vm/vm.h>
87#include <vm/pmap.h>
88#include <machine/clock.h>
89
90#include <pci/pcivar.h>
91#include <pci/smc83c170.h>
92
93#include "bpfilter.h"
94#if NBPFILTER > 0
95#include <net/bpf.h>
96#endif
97
98/*
99 * Global variables
100 */
101static u_long epic_pci_count;
102static epic_softc_t * epics[EPIC_MAX_DEVICES];
1/*-
2 * Copyright (c) 1997 Semen Ustimenko (semen@iclub.nsu.ru)
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * version: stable-166
27 *
28 */
29
30/*
31 * EtherPower II 10/100 Fast Ethernet (tx0)
32 * (aka SMC9432TX based on SMC83c170 EPIC chip)
33 *
34 * Written by Semen Ustimenko.
35 *
36 * TODO:
37 * Fix TX_FRAG_LIST option
38 * Rewrite autonegotiation to remove DELAY(300000)
39 *
40 * stable-140:
41 * first stable version
42 *
43 * stable-160:
44 * added BPF support
45 * fixed several bugs
46 *
47 * stable-161:
48 * fixed BPF support
49 * fixed several bugs
50 *
51 * stable-162:
52 * fixed IFF_PROMISC mode support
53 * added speed info displayed at startup (MII info)
54 *
55 * stable-163:
56 * added media control code
57 *
58 * stable-164:
59 * fixed some bugs
60 *
61 * stable-165:
62 * fixed media control code
63 *
64 * stable-166:
65 * fixed RX_TO_MBUF option and set as default
66 * fixed bug caused ``tx0: device timeout 1 packets'' in 100Mbps mode
67 * implemented fragment list transmit method (TX_FRAG_LIST) (BUGGY)
68 * applyed patch to autoneg fullduplex modes ( Thank to Steve Bauer )
69 * added more coments, removed some debug printfs
70 */
71
72#include "pci.h"
73#if NPCI > 0
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/mbuf.h>
78#include <sys/socket.h>
79#include <sys/malloc.h>
80#include <sys/kernel.h>
81#include <sys/sockio.h>
82#include <net/if.h>
83#include <net/if_mib.h>
84#include <netinet/in.h>
85#include <netinet/if_ether.h>
86#include <vm/vm.h>
87#include <vm/pmap.h>
88#include <machine/clock.h>
89
90#include <pci/pcivar.h>
91#include <pci/smc83c170.h>
92
93#include "bpfilter.h"
94#if NBPFILTER > 0
95#include <net/bpf.h>
96#endif
97
98/*
99 * Global variables
100 */
101static u_long epic_pci_count;
102static epic_softc_t * epics[EPIC_MAX_DEVICES];
103struct pci_device txdevice = {
103static struct pci_device txdevice = {
104 "tx",
105 epic_pci_probe,
106 epic_pci_attach,
107 &epic_pci_count,
108 NULL };
109
110/*
111 * Append this driver to pci drivers list
112 */
113DATA_SET ( pcidevice_set, txdevice );
114
115/*
116 * ifioctl function
117 *
118 * splimp() invoked here
119 */
120static int
121epic_ifioctl(register struct ifnet * ifp, int command, caddr_t data){
122 epic_softc_t *sc = ifp->if_softc;
123 struct ifreq *ifr = (struct ifreq *) data;
124 int x, error = 0;
125
126 x = splimp();
127
128 switch (command) {
129
130 case SIOCSIFADDR:
131 case SIOCGIFADDR:
132 ether_ioctl(ifp, command, data);
133 break;
134
135 case SIOCSIFFLAGS:
136 /*
137 * If the interface is marked up and stopped, then start it.
138 * If it is marked down and running, then stop it.
139 */
140 if (ifp->if_flags & IFF_UP) {
141 if ((ifp->if_flags & IFF_RUNNING) == 0) {
142 epic_init(sc);
143 break;
144 }
145 } else {
146 if (ifp->if_flags & IFF_RUNNING) {
147 epic_stop(sc);
148 ifp->if_flags &= ~IFF_RUNNING;
149 break;
150 }
151 }
152
153 /* Handle IFF_PROMISC flag */
154 epic_set_rx_mode(sc);
155
156 break;
157
158 case SIOCADDMULTI:
159 case SIOCDELMULTI:
160
161 /* Update out multicast list */
162#if defined(__FreeBSD__) && __FreeBSD__ >= 3
163 epic_set_mc_table(sc);
164 error = 0;
165#else
166 error = (command == SIOCADDMULTI) ?
167 ether_addmulti(ifr, &sc->epic_ac) :
168 ether_delmulti(ifr, &sc->epic_ac);
169
170 if (error == ENETRESET) {
171 epic_set_mc_table(sc);
172 error = 0;
173 }
174#endif
175 break;
176
177 case SIOCSIFMTU:
178 /*
179 * Set the interface MTU.
180 */
181 if (ifr->ifr_mtu > ETHERMTU) {
182 error = EINVAL;
183 } else {
184 ifp->if_mtu = ifr->ifr_mtu;
185 }
186 break;
187
188 default:
189 error = EINVAL;
190 }
191 splx(x);
192
193 return error;
194}
195
196/*
197 * ifstart function
198 *
199 * splimp() assumed to be done
200 */
201static void
202epic_ifstart(struct ifnet * const ifp){
203 epic_softc_t *sc = ifp->if_softc;
204
205 while( sc->pending_txs < TX_RING_SIZE ){
206 int entry = sc->cur_tx % TX_RING_SIZE;
207 struct epic_tx_buffer * buf = sc->tx_buffer + entry;
208 struct mbuf *m,*m0;
209 int len;
210
211 /* If descriptor is busy, set IFF_OACTIVE and exit */
212 if( buf->desc.status & 0x8000 ) break;
213
214 /* Get next packet to send */
215 IF_DEQUEUE( &(sc->epic_if.if_snd), m );
216
217 /* If no more mbuf's to send, return */
218 if( NULL == m ) return;
219 /* Save mbuf header */
220 m0 = m;
221
222#if defined(TX_FRAG_LIST)
223 if( buf->mbuf ) m_freem( buf->mbuf );
224 buf->mbuf = m;
225 buf->flist.numfrags = 0;
226
227 for(len=0;(m0!=0)&&(buf->flist.numfrags<63);m0=m0->m_next) {
228 buf->flist.frag[buf->flist.numfrags].fraglen =
229 m0->m_len;
230 buf->flist.frag[buf->flist.numfrags].fragaddr =
231 vtophys( mtod(m0, caddr_t) );
232 len += m0->m_len;
233 buf->flist.numfrags++;
234 }
235
236 /* Does not generate TXC unless ring is full more then a half */
237 buf->desc.control =
238 (sc->pending_txs>TX_RING_SIZE/2)?0x05:0x01;
239#else
240 for (len = 0; m0 != 0; m0 = m0->m_next) {
241 bcopy(mtod(m0, caddr_t), buf->data + len, m0->m_len);
242 len += m0->m_len;
243 }
244
245 /* Does not generate TXC unless ring is full more then a half */
246 buf->desc.control =
247 (sc->pending_txs>TX_RING_SIZE/2)?0x14:0x10;
248#endif
249
250 /* Packet should be at least ETHER_MIN_LEN */
251 buf->desc.txlength = max(len,ETHER_MIN_LEN-ETHER_CRC_LEN);
252
253 /* Pass ownership to the chip */
254 buf->desc.status = 0x8000;
255
256 /* Set watchdog timer */
257 ifp->if_timer = 2;
258
259#if NBPFILTER > 0
260 if( ifp->if_bpf ) bpf_mtap( ifp, m );
261#endif
262
263#if !defined(TX_FRAG_LIST)
264 /* We don't need mbuf anyway */
265 m_freem( m );
266#endif
267 /* Trigger an immediate transmit demand. */
268 outl( sc->iobase + COMMAND, COMMAND_TXQUEUED );
269
270 /* Packet queued successful */
271 sc->pending_txs++;
272
273 /* Switch to next descriptor */
274 sc->cur_tx = ( sc->cur_tx + 1 ) % TX_RING_SIZE;
275 }
276
277 sc->epic_if.if_flags |= IFF_OACTIVE;
278
279 return;
280
281}
282
283/*
284 * IFWATCHDOG function
285 *
286 * splimp() invoked here
287 */
288static void
289epic_ifwatchdog(
290 struct ifnet *ifp)
291{
292 epic_softc_t *sc = ifp->if_softc;
293 int x;
294 int i;
295
296 x = splimp();
297
298 printf("tx%d: device timeout %d packets\n",
299 sc->unit,sc->pending_txs);
300
301 ifp->if_oerrors+=sc->pending_txs;
302
303 epic_stop(sc);
304 epic_init(sc);
305
306 epic_ifstart(&sc->epic_if);
307
308 splx(x);
309}
310
311/*
312 * Interrupt function
313 *
314 * splimp() assumed to be done
315 */
316static void
317epic_intr_normal(
318 void *arg)
319{
320 epic_softc_t * sc = (epic_softc_t *) arg;
321 int iobase = sc->iobase;
322 int status;
323
324 status = inl(iobase + INTSTAT);
325
326 if( status & (INTSTAT_RQE|INTSTAT_HCC|INTSTAT_RCC) ) {
327 epic_rx_done( sc );
328 outl( iobase + INTSTAT,
329 status & (INTSTAT_RQE|INTSTAT_HCC|INTSTAT_RCC) );
330 }
331
332 if( status & (INTSTAT_TXC|INTSTAT_TCC) ) {
333 epic_tx_done( sc );
334 outl( iobase + INTSTAT,
335 status & (INTSTAT_TXC|INTSTAT_TCC) );
336 }
337
338 if( (status & INTSTAT_TQE) && !(sc->epic_if.if_flags & IFF_OACTIVE) ) {
339 epic_ifstart( &sc->epic_if );
340 outl( iobase + INTSTAT, INTSTAT_TQE );
341 }
342
343#if 0
344 if( status & INTSTAT_GP2 ){
345 printf("tx%d: GP2 int occured\n",sc->unit);
346 epic_read_phy_register(sc->iobase,DP83840_BMSR);
347 epic_read_phy_register(sc->iobase,DP83840_BMCR);
348 outl( iobase + INTSTAT, INTSTAT_GP2 );
349 }
350#endif
351
352 if( status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|INTSTAT_APE|INTSTAT_DPE) ){
353 int j;
354 struct epic_tx_buffer * buf;
355
356 printf("tx%d: PCI fatal error occured (%s%s%s%s)\n",
357 sc->unit,
358 (status&INTSTAT_PMA)?"PMA":"",
359 (status&INTSTAT_PTA)?" PTA":"",
360 (status&INTSTAT_APE)?" APE":"",
361 (status&INTSTAT_DPE)?" DPE":"");
362
363#if defined(EPIC_DEBUG)
364 printf("tx%d: dumping descriptors\n",sc->unit);
365 for(j=0;j<TX_RING_SIZE;j++){
366 buf = sc->tx_buffer + j;
367 printf("desc%d: %d %04x, %08x, %04x %d, %08x\n",
368 j,
369 buf->desc.txlength,buf->desc.status,
370 buf->desc.bufaddr,
371 buf->desc.control,buf->desc.buflength,
372 buf->desc.next
373 );
374 }
375#endif
376 epic_stop(sc);
377 epic_init(sc);
378
379 return;
380 }
381
382 /* UPDATE statistics */
383 if (status & (INTSTAT_CNT | INTSTAT_TXU | INTSTAT_OVW | INTSTAT_RXE)) {
384
385 /* update dot3 Rx statistics */
386 sc->dot3stats.dot3StatsMissedFrames += inb(iobase + MPCNT);
387 sc->dot3stats.dot3StatsFrameTooLongs += inb(iobase + ALICNT);
388 sc->dot3stats.dot3StatsFCSErrors += inb(iobase + CRCCNT);
389
390 /* Update if Rx statistics */
391 if (status & (INTSTAT_OVW | INTSTAT_RXE))
392 sc->epic_if.if_ierrors++;
393
394 /* Tx FIFO underflow. */
395 if (status & INTSTAT_TXU) {
396 /* Inc. counters */
397 sc->dot3stats.dot3StatsInternalMacTransmitErrors++;
398 sc->epic_if.if_oerrors++;
399
400 /* Restart the transmit process. */
401 outl(iobase + COMMAND, COMMAND_TXUGO);
402 }
403
404 /* Clear all error sources. */
405 outl(iobase + INTSTAT,
406 status&(INTSTAT_CNT|INTSTAT_TXU|INTSTAT_OVW|INTSTAT_RXE));
407
408 }
409
410 /* If no packets are pending, thus no timeouts */
411 if( sc->pending_txs == 0 )
412 sc->epic_if.if_timer = 0;
413
414 return;
415}
416
417/*
418 *
419 * splimp() invoked before epic_intr_normal()
420 */
421void
422epic_rx_done(
423 epic_softc_t *sc )
424{
425 int i = 0;
426 u_int16_t len;
427 struct epic_rx_buffer * buf;
428 struct mbuf *m;
429#if defined(RX_TO_MBUF)
430 struct mbuf *m0;
431#endif
432 struct ether_header *eh;
433 int stt;
434
435
436 while( !(sc->rx_buffer[sc->cur_rx].desc.status & 0x8000) && \
437 i++ < RX_RING_SIZE ){
438
439 buf = sc->rx_buffer + sc->cur_rx;
440
441 stt = buf->desc.status;
442
443 /* Check for errors */
444 if( !(buf->desc.status&1) ) {
445 sc->epic_if.if_ierrors++;
446 goto rxerror;
447 }
448
449 /* This is received frame actual length */
450 len = buf->desc.rxlength - ETHER_CRC_LEN;
451
452#if !defined(RX_TO_MBUF)
453 /* Allocate mbuf to pass to OS */
454 MGETHDR(m, M_DONTWAIT, MT_DATA);
455 if( NULL == m ){
456 printf("tx%d: cannot allocate mbuf header\n",sc->unit);
457 sc->epic_if.if_ierrors++;
458 goto rxerror;
459 }
460 if( len > MHLEN ){
461 MCLGET(m,M_DONTWAIT);
462 if( NULL == (m->m_flags & M_EXT) ){
463 printf("tx%d: cannot allocate mbuf cluster\n",
464 sc->unit);
465 m_freem( m );
466 sc->epic_if.if_ierrors++;
467 goto rxerror;
468 }
469 }
470
471 /* Copy packet to new allocated mbuf */
472 memcpy( mtod(m,void*), buf->data, len );
473
474#else /* RX_TO_MBUF */
475
476 /* Try to allocate mbuf cluster */
477 MGETHDR(m0,M_DONTWAIT,MT_DATA);
478 if( NULL == m0 ) {
479 printf("tx%d: cannot allocate mbuf header/n",sc->unit);
480 sc->epic_if.if_ierrors++;
481 goto rxerror;
482 }
483 MCLGET(m0,M_DONTWAIT);
484 if( NULL == (m0->m_flags & M_EXT) ){
485 printf("tx%d: cannot allocate mbuf cluster/n",sc->unit);
486 m_freem(m0);
487 sc->epic_if.if_ierrors++;
488 goto rxerror;
489 }
490
491 /* Swap new allocated mbuf with mbuf, containing packet */
492 m = buf->mbuf;
493 buf->mbuf = m0;
494
495 /* Insert new allocated mbuf into device queue */
496 buf->data = mtod( buf->mbuf, caddr_t );
497 buf->desc.bufaddr = vtophys( buf->data );
498
499#endif
500
501 /* First mbuf in packet holds the ethernet and packet headers */
502 eh = mtod( m, struct ether_header * );
503 m->m_pkthdr.rcvif = &(sc->epic_if);
504 m->m_pkthdr.len = len;
505 m->m_len = len;
506
507#if NBPFILTER > 0
508 /* Give mbuf to BPFILTER */
509 if( sc->epic_if.if_bpf ) bpf_mtap( &sc->epic_if, m );
510
511 /* Accept only our packets, broadcasts and multicasts */
512 if( (eh->ether_dhost[0] & 1) == 0 &&
513 bcmp(eh->ether_dhost,sc->epic_ac.ac_enaddr,ETHER_ADDR_LEN)){
514 m_freem(m);
515 goto rxerror;
516 }
517#endif
518
519 /* Second mbuf holds packet ifself */
520 m->m_pkthdr.len = len - sizeof(struct ether_header);
521 m->m_len = len - sizeof( struct ether_header );
522 m->m_data += sizeof( struct ether_header );
523
524 /* Give mbuf to OS */
525 ether_input(&sc->epic_if, eh, m);
526
527 /* Successfuly received frame */
528 sc->epic_if.if_ipackets++;
529
530rxerror:
531 /* Mark current descriptor as free */
532 buf->desc.rxlength = 0;
533 buf->desc.status = 0x8000;
534
535 /* Switch to next descriptor */
536 sc->cur_rx = (sc->cur_rx+1) % RX_RING_SIZE;
537 }
538
539 return;
540}
541
542/*
543 *
544 * splimp() invoked before epic_intr_normal()
545 */
546void
547epic_tx_done( epic_softc_t *sc ){
548 int i = 0;
549 u_int32_t if_flags=~0;
550 int coll;
551 u_int16_t stt;
552
553 while( i++ < TX_RING_SIZE ){
554 struct epic_tx_buffer *buf = sc->tx_buffer + sc->dirty_tx;
555 u_int16_t len = buf->desc.txlength;
556 stt = buf->desc.status;
557
558 if( stt & 0x8000 )
559 break; /* following packets are not Txed yet */
560
561 if( stt == 0 ){
562 if_flags = ~IFF_OACTIVE;
563 break;
564 }
565
566 sc->pending_txs--; /* packet is finished */
567 sc->dirty_tx = (sc->dirty_tx + 1) % TX_RING_SIZE;
568
569 coll = (stt >> 8) & 0xF; /* number of collisions*/
570
571 if( stt & 0x0001 ){
572 sc->epic_if.if_opackets++;
573 } else {
574 if(stt & 0x0008)
575 sc->dot3stats.dot3StatsCarrierSenseErrors++;
576
577 if(stt & 0x1050)
578 sc->dot3stats.dot3StatsInternalMacTransmitErrors++;
579
580 if(stt & 0x1000) coll = 16;
581
582 sc->epic_if.if_oerrors++;
583 }
584
585 if(stt & 0x0002) /* What does it mean? */
586 sc->dot3stats.dot3StatsDeferredTransmissions++;
587
588 sc->epic_if.if_collisions += coll;
589
590 switch( coll ){
591 case 0:
592 break;
593 case 16:
594 sc->dot3stats.dot3StatsExcessiveCollisions++;
595 sc->dot3stats.dot3StatsCollFrequencies[15]++;
596 break;
597 case 1:
598 sc->dot3stats.dot3StatsSingleCollisionFrames++;
599 sc->dot3stats.dot3StatsCollFrequencies[0]++;
600 break;
601 default:
602 sc->dot3stats.dot3StatsMultipleCollisionFrames++;
603 sc->dot3stats.dot3StatsCollFrequencies[coll-1]++;
604 break;
605 }
606
607 buf->desc.status = 0;
608 buf->desc.txlength = 0;
609
610#if defined(TX_FRAG_LIST)
611 buf->flist.numfrags = 0;
612 m_freem( buf->mbuf );
613 buf->mbuf = NULL;
614#endif
615
616 if_flags = ~IFF_OACTIVE;
617 }
618
619 sc->epic_if.if_flags &= if_flags;
620
621 if( !(sc->epic_if.if_flags & IFF_OACTIVE) )
622 epic_ifstart( &sc->epic_if );
623
624}
625
626/*
627 * Probe function
628 */
629static char*
630epic_pci_probe(
631 pcici_t config_id,
632 pcidi_t device_id)
633{
634 if( PCI_VENDORID(device_id) != SMC_VENDORID )
635 return NULL;
636
637 if( PCI_CHIPID(device_id) == CHIPID_83C170 )
638 return "SMC 83c170";
639
640 return NULL;
641}
642
643/*
644 * PCI_Attach function
645 *
646 * splimp() invoked here
647 */
648static void
649epic_pci_attach(
650 pcici_t config_id,
651 int unit)
652{
653 struct ifnet * ifp;
654 epic_softc_t *sc;
655 u_int32_t iobase;
656 u_int32_t irq;
657 u_int32_t phyid;
658 int i,s;
659 int phy, phy_idx;
660
661 /*
662 * Get iobase and irq level
663 */
664 irq = PCI_CONF_READ(PCI_CFIT) & (0xFF);
665 if (!pci_map_port(config_id, PCI_CBIO,(u_short *) &iobase))
666 return;
667
668 /*
669 * Allocate and preinitialize softc structure
670 */
671 sc = (epic_softc_t *) malloc(sizeof(epic_softc_t), M_DEVBUF, M_NOWAIT);
672 if (sc == NULL) return;
673 epics[ unit ] = sc;
674
675 /*
676 * Zero softc structure
677 */
678 bzero(sc, sizeof(epic_softc_t));
679
680 /*
681 * Initialize softc
682 */
683 sc->unit = unit;
684 sc->iobase = iobase;
685 sc->irq = irq;
686
687 /* Bring the chip out of low-power mode. */
688 outl( iobase + GENCTL, 0x0000 );
689
690 /* Magic?! If we don't set this bit the MII interface won't work. */
691 outl( iobase + TEST1, 0x0008 );
692
693 /* Read mac address (may be better is read from EEPROM?) */
694 for (i = 0; i < ETHER_ADDR_LEN / sizeof( u_int16_t); i++)
695 ((u_int16_t *)sc->epic_macaddr)[i] = inw(iobase + LAN0 + i*4);
696
697 /* Display some info */
698 printf("tx%d: address %02x:%02x:%02x:%02x:%02x:%02x,",sc->unit,
699 sc->epic_macaddr[0],sc->epic_macaddr[1],sc->epic_macaddr[2],
700 sc->epic_macaddr[3],sc->epic_macaddr[4],sc->epic_macaddr[5]);
701
702
703 s = splimp();
704
705 /* Map interrupt */
706 if( !pci_map_int(config_id, epic_intr_normal, (void*)sc, &net_imask) ) {
707 printf("tx%d: couldn't map interrupt\n",unit);
708 epics[ unit ] = NULL;
709 free(sc, M_DEVBUF);
710 return;
711 }
712
713 /* Fill ifnet structure */
714 ifp = &sc->epic_if;
715
716 ifp->if_unit = unit;
717 ifp->if_name = "tx";
718 ifp->if_softc = sc;
719 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI;
720 ifp->if_ioctl = epic_ifioctl;
721 ifp->if_start = epic_ifstart;
722 ifp->if_watchdog = epic_ifwatchdog;
723 ifp->if_init = (if_init_f_t*)epic_init;
724 ifp->if_timer = 0;
725 ifp->if_output = ether_output;
726 ifp->if_linkmib = &sc->dot3stats;
727 ifp->if_linkmiblen = sizeof(struct ifmib_iso_8802_3);
728
729 sc->dot3stats.dot3StatsEtherChipSet =
730 DOT3CHIPSET(dot3VendorSMC,
731 dot3ChipSetSMC83c170);
732
733 sc->dot3stats.dot3Compliance = DOT3COMPLIANCE_COLLS;
734
735 printf(" type SMC9432TX");
736
737 i = epic_read_phy_register(iobase, DP83840_BMCR);
738
739 if( i & BMCR_AUTONEGOTIATION ){
740 printf(" [Auto-Neg.");
741
742 if( i & BMCR_100MBPS ) printf(" 100Mbps");
743 else printf(" 10Mbps");
744
745 if( i & BMCR_FULL_DUPLEX ) printf(" FD");
746
747 printf("]\n");
748
749 if( i & BMCR_FULL_DUPLEX )
750 printf("tx%d: WARNING! FD autonegotiated, not supported\n",sc->unit);
751
752 } else {
753 ifp->if_flags |= IFF_LINK0;
754 if( i & BMCR_100MBPS ) {
755 printf(" [100Mbps");
756 ifp->if_flags |= IFF_LINK2;
757 } else printf(" [10Mbps");
758
759 if( i & BMCR_FULL_DUPLEX ) {
760 printf(" FD");
761 ifp->if_flags |= IFF_LINK1;
762 }
763 printf("]\n");
764 }
765#if defined(EPIC_DEBUG)
766 printf("tx%d: PHY id: (",sc->unit);
767 i=epic_read_phy_register(iobase,DP83840_PHYIDR1);
768 printf("%04x:",i);
769 phyid=i<<6;
770 i=epic_read_phy_register(iobase,DP83840_PHYIDR2);
771 printf("%04x)",i);
772 phyid|=((i>>10)&0x3F);
773 printf(" %08x, rev %x, mod %x\n",phyid,(i)&0xF, (i>>4)&0x3f);
774#endif
775
776 epic_read_phy_register(iobase,DP83840_BMSR);
777 epic_read_phy_register(iobase,DP83840_BMSR);
778 epic_read_phy_register(iobase,DP83840_BMSR);
779 i=epic_read_phy_register(iobase,DP83840_BMSR);
780
781 if( !(i & BMSR_LINK_STATUS) )
782 printf("tx%d: WARNING! no link estabilished\n",sc->unit);
783
784 /*
785 * Attach to if manager
786 */
787 if_attach(ifp);
788 ether_ifattach(ifp);
789
790#if NBPFILTER > 0
791 bpfattach(ifp,DLT_EN10MB, sizeof(struct ether_header));
792#endif
793
794 splx(s);
795
796 return;
797}
798
799/*
800 * IFINIT function
801 *
802 * splimp() invoked here
803 */
804static int
805epic_init(
806 epic_softc_t * sc)
807{
808 struct ifnet *ifp = &sc->epic_if;
809 int iobase = sc->iobase;
810 int i,s;
811
812 s = splimp();
813
814 /* Soft reset the chip. */
815 outl(iobase + GENCTL, GENCTL_SOFT_RESET );
816
817 /* Reset takes 15 ticks */
818 for(i=0;i<0x100;i++);
819
820 /* Wake up */
821 outl( iobase + GENCTL, 0 );
822
823 /* ?????? */
824 outl( iobase + TEST1, 0x0008);
825
826 /* Initialize rings */
827 if( -1 == epic_init_rings( sc ) ) {
828 printf("tx%d: failed to initialize rings\n",sc->unit);
829 epic_free_rings( sc );
830 splx(s);
831 return -1;
832 }
833
834 /* Put node address to EPIC */
835 outl( iobase + LAN0 + 0x0, ((u_int16_t *)sc->epic_macaddr)[0] );
836 outl( iobase + LAN0 + 0x4, ((u_int16_t *)sc->epic_macaddr)[1] );
837 outl( iobase + LAN0 + 0x8, ((u_int16_t *)sc->epic_macaddr)[2] );
838
839 /* Enable interrupts, set for PCI read multiple and etc */
840 outl( iobase + GENCTL,
841 GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE |
842 GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64 );
843
844 /* Set transmit threshold */
845 outl( iobase + ETXTHR, 0x40 );
846
847 /* Compute and set RXCON. */
848 epic_set_rx_mode( sc );
849
850 /* Set MII speed mode */
851 epic_set_media_speed( sc );
852
853 /* Set multicast table */
854 epic_set_mc_table( sc );
855
856 /* Enable interrupts by setting the interrupt mask. */
857 outl( iobase + INTMASK,
858 INTSTAT_RCC | INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE |
859 INTSTAT_TXC | INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU |
860 INTSTAT_CNT | /*INTSTAT_GP2 |*/ INTSTAT_FATAL |
861 INTSTAT_PTA | INTSTAT_PMA | INTSTAT_APE | INTSTAT_DPE );
862
863 /* Start rx process */
864 outl( iobase + COMMAND, COMMAND_RXQUEUED | COMMAND_START_RX );
865
866 /* Mark interface running ... */
867 if( ifp->if_flags & IFF_UP ) ifp->if_flags |= IFF_RUNNING;
868 else ifp->if_flags &= ~IFF_RUNNING;
869
870 /* ... and free */
871 ifp->if_flags &= ~IFF_OACTIVE;
872
873 splx(s);
874 return 0;
875}
876
877/*
878 * This function should set EPIC's registers according IFF_* flags
879 */
880static void
881epic_set_rx_mode(
882 epic_softc_t * sc)
883{
884 struct ifnet *ifp = &sc->epic_if;
885 u_int16_t rxcon = 0;
886
887#if NBPFILTER > 0
888 if( sc->epic_if.if_flags & IFF_PROMISC )
889 rxcon |= RXCON_PROMISCUOUS_MODE;
890#endif
891
892 if( sc->epic_if.if_flags & IFF_BROADCAST )
893 rxcon |= RXCON_RECEIVE_BROADCAST_FRAMES;
894
895 if( sc->epic_if.if_flags & IFF_MULTICAST )
896 rxcon |= RXCON_RECEIVE_MULTICAST_FRAMES;
897
898 outl( sc->iobase + RXCON, rxcon );
899
900 return;
901}
902
903/*
904 * This function should set MII to mode specified by IFF_LINK* flags
905 */
906static void
907epic_set_media_speed(
908 epic_softc_t * sc)
909{
910 struct ifnet *ifp = &sc->epic_if;
911 u_int16_t media;
912 u_int32_t i;
913
914 /* Set media speed */
915 if( ifp->if_flags & IFF_LINK0 ){
916 /* Allow only manual fullduplex modes */
917 media = epic_read_phy_register( sc->iobase, DP83840_ANAR );
918 media |= ANAR_100|ANAR_10|ANAR_100_FD|ANAR_10_FD;
919 epic_write_phy_register( sc->iobase, DP83840_ANAR, media );
920
921 /* Set mode */
922 media = (ifp->if_flags&IFF_LINK2)?BMCR_100MBPS:0;
923 media |= (ifp->if_flags&IFF_LINK1)?BMCR_FULL_DUPLEX:0;
924 epic_write_phy_register( sc->iobase, DP83840_BMCR, media );
925
926 ifp->if_baudrate =
927 (ifp->if_flags&IFF_LINK2)?100000000:10000000;
928
929 outl( sc->iobase + TXCON,(ifp->if_flags&IFF_LINK1)?TXCON_LOOPBACK_MODE_FULL_DUPLEX|TXCON_DEFAULT:TXCON_DEFAULT );
930
931 } else {
932 /* If autoneg is set, IFF_LINK flags are meaningless */
933 ifp->if_flags &= ~(IFF_LINK0|IFF_LINK1|IFF_LINK2);
934 ifp->if_baudrate = 100000000;
935
936 outl( sc->iobase + TXCON, TXCON_DEFAULT );
937
938 /* Did it autoneg full duplex? */
939 if (epic_autoneg(sc) == EPIC_FULL_DUPLEX)
940 outl( sc->iobase + TXCON,
941 TXCON_LOOPBACK_MODE_FULL_DUPLEX|TXCON_DEFAULT);
942 }
943
944 return;
945}
946
947/*
948 * This functions controls the autoneg processes of the phy
949 * It implements the workaround that is described in section 7.2 & 7.3 of the
950 * DP83840A data sheet
951 * http://www.national.com/ds/DP/DP83840A.pdf
952 */
953static int
954epic_autoneg(
955 epic_softc_t * sc)
956{
957 struct ifnet *ifp = &sc->epic_if;
958 u_int16_t media;
959 u_int16_t i;
960
961 media = epic_read_phy_register( sc->iobase, DP83840_ANAR );
962 media |= ANAR_100|ANAR_100_FD|ANAR_10|ANAR_10_FD;
963 epic_write_phy_register( sc->iobase, DP83840_ANAR, media );
964
965 /* Set and restart autoneg */
966 epic_write_phy_register( sc->iobase, DP83840_BMCR,
967 BMCR_AUTONEGOTIATION | BMCR_RESTART_AUTONEG );
968
969 /* Wait 3 seconds for the autoneg to finish
970 * This is the recommended time from the DP83840A data sheet
971 * Section 7.1
972 */
973 DELAY(3000000);
974
975 epic_read_phy_register( sc->iobase, DP83840_BMSR);
976
977 /* BMSR must be read twice to update the link status bit/
978 * since that bit is a latch bit
979 */
980 i = epic_read_phy_register( sc->iobase, DP83840_BMSR);
981
982 if ((i & BMSR_LINK_STATUS) && ( i & BMSR_AUTONEG_COMPLETE)){
983 i = epic_read_phy_register( sc->iobase, DP83840_PAR);
984
985 if ( i & PAR_FULL_DUPLEX )
986 return EPIC_FULL_DUPLEX;
987 else
988 return EPIC_HALF_DUPLEX;
989 }
990 else { /*Auto-negotiation or link status is not 1
991 Thus the auto-negotiation failed and one
992 must take other means to fix it.
993 */
994
995 /* ANER must be read twice to get the correct reading for the
996 * Multiple link fault bit -- it is a latched bit
997 */
998 epic_read_phy_register (sc->iobase, DP83840_ANER);
999
1000 i = epic_read_phy_register (sc->iobase, DP83840_ANER);
1001
1002 if ( i & ANER_MULTIPLE_LINK_FAULT ) {
1003 /* it can be forced to 100Mb/s Half-Duplex */
1004 media = epic_read_phy_register(sc->iobase,DP83840_BMCR);
1005 media &= ~(BMCR_AUTONEGOTIATION | BMCR_FULL_DUPLEX);
1006 media |= BMCR_100MBPS;
1007 epic_write_phy_register(sc->iobase,DP83840_BMCR,media);
1008
1009 /* read BMSR again to determine link status */
1010 epic_read_phy_register(sc->iobase, DP83840_BMSR);
1011 i=epic_read_phy_register( sc->iobase, DP83840_BMSR);
1012
1013 if (i & BMSR_LINK_STATUS){
1014 /* port is linked to the non Auto-Negotiation
1015 * 100Mbs partner.
1016 */
1017 return EPIC_HALF_DUPLEX;
1018 }
1019 else {
1020 media = epic_read_phy_register (sc->iobase, DP83840_BMCR);
1021 media &= !(BMCR_AUTONEGOTIATION | BMCR_FULL_DUPLEX | BMCR_100MBPS);
1022 epic_write_phy_register(sc->iobase, DP83840_BMCR, media);
1023 epic_read_phy_register(sc->iobase, DP83840_BMSR);
1024 i=epic_read_phy_register( sc->iobase, DP83840_BMSR);
1025
1026 if (i & BMSR_LINK_STATUS) {
1027 /*port is linked to the non
1028 * Auto-Negotiation10Mbs partner
1029 */
1030 return EPIC_HALF_DUPLEX;
1031 }
1032 }
1033 }
1034 /* If we get here we are most likely not connected
1035 * so lets default it to half duplex
1036 */
1037 return EPIC_HALF_DUPLEX;
1038 }
1039
1040}
1041
1042/*
1043 * This function sets EPIC multicast table
1044 */
1045static void
1046epic_set_mc_table(
1047 epic_softc_t * sc)
1048{
1049 struct ifnet *ifp = &sc->epic_if;
1050
1051 if( ifp->if_flags & IFF_MULTICAST ){
1052 outl( sc->iobase + MC0, 0xFFFF );
1053 outl( sc->iobase + MC1, 0xFFFF );
1054 outl( sc->iobase + MC2, 0xFFFF );
1055 outl( sc->iobase + MC3, 0xFFFF );
1056 }
1057
1058 return;
1059}
1060
1061/*
1062 * This function should completely stop rx and tx processes
1063 *
1064 * splimp() invoked here
1065 */
1066static void
1067epic_stop(
1068 epic_softc_t * sc)
1069{
1070 int iobase = sc->iobase;
1071 int i,s;
1072
1073 s = splimp();
1074 sc->epic_if.if_timer = 0;
1075
1076 /* Disable interrupts, stop processes */
1077 outl( iobase + INTMASK, 0 );
1078 outl( iobase + GENCTL, 0 );
1079 outl( iobase + COMMAND,
1080 COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA );
1081
1082 /* Wait RX and TX DMA to stop */
1083 for(i=0;i<0x100000;i++){
1084 if( (inl(iobase+INTSTAT)&(INTSTAT_RXIDLE|INTSTAT_TXIDLE)) ==
1085 (INTSTAT_RXIDLE|INTSTAT_TXIDLE) ) break;
1086 }
1087
1088 if( !(inl(iobase+INTSTAT)&INTSTAT_RXIDLE) )
1089 printf("tx%d: can't stop RX DMA\n",sc->unit);
1090
1091 if( !(inl(iobase+INTSTAT)&INTSTAT_TXIDLE) )
1092 printf("tx%d: can't stop TX DMA\n",sc->unit);
1093
1094 /* Reset chip */
1095 outl( iobase + GENCTL, GENCTL_SOFT_RESET );
1096 for(i=0;i<0x100;i++);
1097
1098 /* Free memory allocated for rings */
1099 epic_free_rings( sc );
1100
1101 splx(s);
1102
1103}
1104
1105/*
1106 * This function should free all allocated for rings memory.
1107 * NB: The DMA processes must be stopped.
1108 *
1109 * splimp() assumed to be done
1110 */
1111static void
1112epic_free_rings(epic_softc_t * sc){
1113 int i;
1114
1115 for(i=0;i<RX_RING_SIZE;i++){
1116 struct epic_rx_buffer *buf = sc->rx_buffer + i;
1117
1118 buf->desc.status = 0;
1119 buf->desc.buflength = 0;
1120 buf->desc.bufaddr = 0;
1121 buf->data = NULL;
1122
1123#if defined(RX_TO_MBUF)
1124 if( buf->mbuf ) m_freem( buf->mbuf );
1125 buf->mbuf = NULL;
1126#else
1127 if( buf->data ) free( buf->data, M_DEVBUF );
1128 buf->data = NULL;
1129#endif
1130 }
1131
1132 for(i=0;i<TX_RING_SIZE;i++){
1133 struct epic_tx_buffer *buf = sc->tx_buffer + i;
1134
1135 buf->desc.status = 0;
1136 buf->desc.buflength = 0;
1137 buf->desc.bufaddr = 0;
1138
1139#if defined(TX_FRAG_LIST)
1140 if( buf->mbuf ) m_freem( buf->mbuf );
1141 buf->mbuf = NULL;
1142#else
1143 if( buf->data ) free( buf->data, M_DEVBUF );
1144 buf->data = NULL;
1145#endif
1146 }
1147}
1148
1149/*
1150 * Initialize Rx and Tx rings and give them to EPIC
1151 *
1152 * If RX_TO_MBUF option is enabled, mbuf cluster is allocated instead of
1153 * static buffer for RX ringi element.
1154 * If TX_FRAG_LIST option is enabled, nothig is done, except chaining
1155 * descriptors to ring and point them to static fraglists.
1156 *
1157 * splimp() assumed to be done
1158 */
1159static int
1160epic_init_rings(epic_softc_t * sc){
1161 int i;
1162 struct mbuf *m;
1163
1164 sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0;
1165
1166 for (i = 0; i < RX_RING_SIZE; i++) {
1167 struct epic_rx_buffer *buf = sc->rx_buffer + i;
1168
1169 buf->desc.status = 0; /* Owned by driver */
1170 buf->desc.next =
1171 vtophys(&(sc->rx_buffer[(i+1)%RX_RING_SIZE].desc) );
1172
1173#if defined(RX_TO_MBUF)
1174 MGETHDR(buf->mbuf,M_DONTWAIT,MT_DATA);
1175 if( NULL == buf->mbuf ) return -1;
1176 MCLGET(buf->mbuf,M_DONTWAIT);
1177 if( NULL == (buf->mbuf->m_flags & M_EXT) ) return -1;
1178
1179 buf->data = mtod( buf->mbuf, caddr_t );
1180#else
1181 buf->data = malloc(ETHER_MAX_FRAME_LEN, M_DEVBUF, M_NOWAIT);
1182 if( buf->data == NULL ) return -1;
1183#endif
1184
1185 buf->desc.bufaddr = vtophys( buf->data );
1186 buf->desc.buflength = ETHER_MAX_FRAME_LEN;
1187 buf->desc.status = 0x8000; /* Give to EPIC */
1188
1189 }
1190
1191 for (i = 0; i < TX_RING_SIZE; i++) {
1192 struct epic_tx_buffer *buf = sc->tx_buffer + i;
1193
1194 buf->desc.status = 0;
1195 buf->desc.next =
1196 vtophys(&(sc->tx_buffer[(i+1)%TX_RING_SIZE].desc) );
1197
1198#if defined(TX_FRAG_LIST)
1199 buf->mbuf = NULL;
1200 buf->desc.bufaddr = vtophys( &(buf->flist) );
1201#else
1202 /* Allocate buffer */
1203 buf->data = malloc(ETHER_MAX_FRAME_LEN, M_DEVBUF, M_NOWAIT);
1204
1205 if( buf->data == NULL ) return -1;
1206
1207 buf->desc.bufaddr = vtophys( buf->data );
1208 buf->desc.buflength = ETHER_MAX_FRAME_LEN;
1209#endif
1210 }
1211
1212 /* Give rings to EPIC */
1213 outl( sc->iobase + PRCDAR, vtophys(&(sc->rx_buffer[0].desc)) );
1214 outl( sc->iobase + PTCDAR, vtophys(&(sc->tx_buffer[0].desc)) );
1215
1216 return 0;
1217}
1218
1219/*
1220 * EEPROM operation functions
1221 */
1222static void epic_write_eepromreg(u_int16_t regaddr, u_int8_t val){
1223 u_int16_t i;
1224
1225 outb( regaddr, val );
1226
1227 for( i=0;i<0xFF; i++)
1228 if( !(inb( regaddr ) & 0x20) ) break;
1229
1230 return;
1231}
1232
1233static u_int8_t epic_read_eepromreg(u_int16_t regaddr){
1234 return inb( regaddr );
1235}
1236
1237static u_int8_t epic_eeprom_clock( u_int16_t ioaddr, u_int8_t val ){
1238
1239 epic_write_eepromreg( ioaddr + EECTL, val );
1240 epic_write_eepromreg( ioaddr + EECTL, (val | 0x4) );
1241 epic_write_eepromreg( ioaddr + EECTL, val );
1242
1243 return epic_read_eepromreg( ioaddr + EECTL );
1244}
1245
1246static void epic_output_eepromw(u_int16_t ioaddr, u_int16_t val){
1247 int i;
1248 for( i = 0xF; i >= 0; i--){
1249 if( (val & (1 << i)) ) epic_eeprom_clock( ioaddr, 0x0B );
1250 else epic_eeprom_clock( ioaddr, 3);
1251 }
1252}
1253
1254static u_int16_t epic_input_eepromw(u_int16_t ioaddr){
1255 int i;
1256 int tmp;
1257 u_int16_t retval = 0;
1258
1259 for( i = 0xF; i >= 0; i--) {
1260 tmp = epic_eeprom_clock( ioaddr, 0x3 );
1261 if( tmp & 0x10 ){
1262 retval |= (1 << i);
1263 }
1264 }
1265 return retval;
1266}
1267
1268static int epic_read_eeprom(u_int16_t ioaddr, u_int16_t loc){
1269 int i;
1270 u_int16_t dataval;
1271 u_int16_t read_cmd;
1272
1273 epic_write_eepromreg(ioaddr + EECTL , 3);
1274
1275 if( epic_read_eepromreg(ioaddr + EECTL) & 0x40 )
1276 read_cmd = ( loc & 0x3F ) | 0x180;
1277 else
1278 read_cmd = ( loc & 0xFF ) | 0x600;
1279
1280 epic_output_eepromw( ioaddr, read_cmd );
1281
1282 dataval = epic_input_eepromw( ioaddr );
1283
1284 epic_write_eepromreg( ioaddr + EECTL, 1 );
1285
1286 return dataval;
1287}
1288
1289static int epic_read_phy_register(u_int16_t iobase, u_int16_t loc){
1290 int i;
1291
1292 outl( iobase + MIICTL, ((loc << 4) | 0x0601) );
1293
1294 for( i=0;i<0x1000;i++) if( !(inl( iobase + MIICTL )&1) ) break;
1295
1296 return inl( iobase + MIIDATA );
1297}
1298
1299static void epic_write_phy_register(u_int16_t iobase, u_int16_t loc,u_int16_t val){
1300 int i;
1301
1302 outl( iobase + MIIDATA, val );
1303 outl( iobase + MIICTL, ((loc << 4) | 0x0602) );
1304
1305 for( i=0;i<0x1000;i++) if( !(inl( iobase + MIICTL )&2) ) break;
1306
1307 return;
1308}
1309
1310#endif /* NPCI > 0 */
104 "tx",
105 epic_pci_probe,
106 epic_pci_attach,
107 &epic_pci_count,
108 NULL };
109
110/*
111 * Append this driver to pci drivers list
112 */
113DATA_SET ( pcidevice_set, txdevice );
114
115/*
116 * ifioctl function
117 *
118 * splimp() invoked here
119 */
120static int
121epic_ifioctl(register struct ifnet * ifp, int command, caddr_t data){
122 epic_softc_t *sc = ifp->if_softc;
123 struct ifreq *ifr = (struct ifreq *) data;
124 int x, error = 0;
125
126 x = splimp();
127
128 switch (command) {
129
130 case SIOCSIFADDR:
131 case SIOCGIFADDR:
132 ether_ioctl(ifp, command, data);
133 break;
134
135 case SIOCSIFFLAGS:
136 /*
137 * If the interface is marked up and stopped, then start it.
138 * If it is marked down and running, then stop it.
139 */
140 if (ifp->if_flags & IFF_UP) {
141 if ((ifp->if_flags & IFF_RUNNING) == 0) {
142 epic_init(sc);
143 break;
144 }
145 } else {
146 if (ifp->if_flags & IFF_RUNNING) {
147 epic_stop(sc);
148 ifp->if_flags &= ~IFF_RUNNING;
149 break;
150 }
151 }
152
153 /* Handle IFF_PROMISC flag */
154 epic_set_rx_mode(sc);
155
156 break;
157
158 case SIOCADDMULTI:
159 case SIOCDELMULTI:
160
161 /* Update out multicast list */
162#if defined(__FreeBSD__) && __FreeBSD__ >= 3
163 epic_set_mc_table(sc);
164 error = 0;
165#else
166 error = (command == SIOCADDMULTI) ?
167 ether_addmulti(ifr, &sc->epic_ac) :
168 ether_delmulti(ifr, &sc->epic_ac);
169
170 if (error == ENETRESET) {
171 epic_set_mc_table(sc);
172 error = 0;
173 }
174#endif
175 break;
176
177 case SIOCSIFMTU:
178 /*
179 * Set the interface MTU.
180 */
181 if (ifr->ifr_mtu > ETHERMTU) {
182 error = EINVAL;
183 } else {
184 ifp->if_mtu = ifr->ifr_mtu;
185 }
186 break;
187
188 default:
189 error = EINVAL;
190 }
191 splx(x);
192
193 return error;
194}
195
196/*
197 * ifstart function
198 *
199 * splimp() assumed to be done
200 */
201static void
202epic_ifstart(struct ifnet * const ifp){
203 epic_softc_t *sc = ifp->if_softc;
204
205 while( sc->pending_txs < TX_RING_SIZE ){
206 int entry = sc->cur_tx % TX_RING_SIZE;
207 struct epic_tx_buffer * buf = sc->tx_buffer + entry;
208 struct mbuf *m,*m0;
209 int len;
210
211 /* If descriptor is busy, set IFF_OACTIVE and exit */
212 if( buf->desc.status & 0x8000 ) break;
213
214 /* Get next packet to send */
215 IF_DEQUEUE( &(sc->epic_if.if_snd), m );
216
217 /* If no more mbuf's to send, return */
218 if( NULL == m ) return;
219 /* Save mbuf header */
220 m0 = m;
221
222#if defined(TX_FRAG_LIST)
223 if( buf->mbuf ) m_freem( buf->mbuf );
224 buf->mbuf = m;
225 buf->flist.numfrags = 0;
226
227 for(len=0;(m0!=0)&&(buf->flist.numfrags<63);m0=m0->m_next) {
228 buf->flist.frag[buf->flist.numfrags].fraglen =
229 m0->m_len;
230 buf->flist.frag[buf->flist.numfrags].fragaddr =
231 vtophys( mtod(m0, caddr_t) );
232 len += m0->m_len;
233 buf->flist.numfrags++;
234 }
235
236 /* Does not generate TXC unless ring is full more then a half */
237 buf->desc.control =
238 (sc->pending_txs>TX_RING_SIZE/2)?0x05:0x01;
239#else
240 for (len = 0; m0 != 0; m0 = m0->m_next) {
241 bcopy(mtod(m0, caddr_t), buf->data + len, m0->m_len);
242 len += m0->m_len;
243 }
244
245 /* Does not generate TXC unless ring is full more then a half */
246 buf->desc.control =
247 (sc->pending_txs>TX_RING_SIZE/2)?0x14:0x10;
248#endif
249
250 /* Packet should be at least ETHER_MIN_LEN */
251 buf->desc.txlength = max(len,ETHER_MIN_LEN-ETHER_CRC_LEN);
252
253 /* Pass ownership to the chip */
254 buf->desc.status = 0x8000;
255
256 /* Set watchdog timer */
257 ifp->if_timer = 2;
258
259#if NBPFILTER > 0
260 if( ifp->if_bpf ) bpf_mtap( ifp, m );
261#endif
262
263#if !defined(TX_FRAG_LIST)
264 /* We don't need mbuf anyway */
265 m_freem( m );
266#endif
267 /* Trigger an immediate transmit demand. */
268 outl( sc->iobase + COMMAND, COMMAND_TXQUEUED );
269
270 /* Packet queued successful */
271 sc->pending_txs++;
272
273 /* Switch to next descriptor */
274 sc->cur_tx = ( sc->cur_tx + 1 ) % TX_RING_SIZE;
275 }
276
277 sc->epic_if.if_flags |= IFF_OACTIVE;
278
279 return;
280
281}
282
283/*
284 * IFWATCHDOG function
285 *
286 * splimp() invoked here
287 */
288static void
289epic_ifwatchdog(
290 struct ifnet *ifp)
291{
292 epic_softc_t *sc = ifp->if_softc;
293 int x;
294 int i;
295
296 x = splimp();
297
298 printf("tx%d: device timeout %d packets\n",
299 sc->unit,sc->pending_txs);
300
301 ifp->if_oerrors+=sc->pending_txs;
302
303 epic_stop(sc);
304 epic_init(sc);
305
306 epic_ifstart(&sc->epic_if);
307
308 splx(x);
309}
310
311/*
312 * Interrupt function
313 *
314 * splimp() assumed to be done
315 */
316static void
317epic_intr_normal(
318 void *arg)
319{
320 epic_softc_t * sc = (epic_softc_t *) arg;
321 int iobase = sc->iobase;
322 int status;
323
324 status = inl(iobase + INTSTAT);
325
326 if( status & (INTSTAT_RQE|INTSTAT_HCC|INTSTAT_RCC) ) {
327 epic_rx_done( sc );
328 outl( iobase + INTSTAT,
329 status & (INTSTAT_RQE|INTSTAT_HCC|INTSTAT_RCC) );
330 }
331
332 if( status & (INTSTAT_TXC|INTSTAT_TCC) ) {
333 epic_tx_done( sc );
334 outl( iobase + INTSTAT,
335 status & (INTSTAT_TXC|INTSTAT_TCC) );
336 }
337
338 if( (status & INTSTAT_TQE) && !(sc->epic_if.if_flags & IFF_OACTIVE) ) {
339 epic_ifstart( &sc->epic_if );
340 outl( iobase + INTSTAT, INTSTAT_TQE );
341 }
342
343#if 0
344 if( status & INTSTAT_GP2 ){
345 printf("tx%d: GP2 int occured\n",sc->unit);
346 epic_read_phy_register(sc->iobase,DP83840_BMSR);
347 epic_read_phy_register(sc->iobase,DP83840_BMCR);
348 outl( iobase + INTSTAT, INTSTAT_GP2 );
349 }
350#endif
351
352 if( status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA|INTSTAT_APE|INTSTAT_DPE) ){
353 int j;
354 struct epic_tx_buffer * buf;
355
356 printf("tx%d: PCI fatal error occured (%s%s%s%s)\n",
357 sc->unit,
358 (status&INTSTAT_PMA)?"PMA":"",
359 (status&INTSTAT_PTA)?" PTA":"",
360 (status&INTSTAT_APE)?" APE":"",
361 (status&INTSTAT_DPE)?" DPE":"");
362
363#if defined(EPIC_DEBUG)
364 printf("tx%d: dumping descriptors\n",sc->unit);
365 for(j=0;j<TX_RING_SIZE;j++){
366 buf = sc->tx_buffer + j;
367 printf("desc%d: %d %04x, %08x, %04x %d, %08x\n",
368 j,
369 buf->desc.txlength,buf->desc.status,
370 buf->desc.bufaddr,
371 buf->desc.control,buf->desc.buflength,
372 buf->desc.next
373 );
374 }
375#endif
376 epic_stop(sc);
377 epic_init(sc);
378
379 return;
380 }
381
382 /* UPDATE statistics */
383 if (status & (INTSTAT_CNT | INTSTAT_TXU | INTSTAT_OVW | INTSTAT_RXE)) {
384
385 /* update dot3 Rx statistics */
386 sc->dot3stats.dot3StatsMissedFrames += inb(iobase + MPCNT);
387 sc->dot3stats.dot3StatsFrameTooLongs += inb(iobase + ALICNT);
388 sc->dot3stats.dot3StatsFCSErrors += inb(iobase + CRCCNT);
389
390 /* Update if Rx statistics */
391 if (status & (INTSTAT_OVW | INTSTAT_RXE))
392 sc->epic_if.if_ierrors++;
393
394 /* Tx FIFO underflow. */
395 if (status & INTSTAT_TXU) {
396 /* Inc. counters */
397 sc->dot3stats.dot3StatsInternalMacTransmitErrors++;
398 sc->epic_if.if_oerrors++;
399
400 /* Restart the transmit process. */
401 outl(iobase + COMMAND, COMMAND_TXUGO);
402 }
403
404 /* Clear all error sources. */
405 outl(iobase + INTSTAT,
406 status&(INTSTAT_CNT|INTSTAT_TXU|INTSTAT_OVW|INTSTAT_RXE));
407
408 }
409
410 /* If no packets are pending, thus no timeouts */
411 if( sc->pending_txs == 0 )
412 sc->epic_if.if_timer = 0;
413
414 return;
415}
416
417/*
418 *
419 * splimp() invoked before epic_intr_normal()
420 */
421void
422epic_rx_done(
423 epic_softc_t *sc )
424{
425 int i = 0;
426 u_int16_t len;
427 struct epic_rx_buffer * buf;
428 struct mbuf *m;
429#if defined(RX_TO_MBUF)
430 struct mbuf *m0;
431#endif
432 struct ether_header *eh;
433 int stt;
434
435
436 while( !(sc->rx_buffer[sc->cur_rx].desc.status & 0x8000) && \
437 i++ < RX_RING_SIZE ){
438
439 buf = sc->rx_buffer + sc->cur_rx;
440
441 stt = buf->desc.status;
442
443 /* Check for errors */
444 if( !(buf->desc.status&1) ) {
445 sc->epic_if.if_ierrors++;
446 goto rxerror;
447 }
448
449 /* This is received frame actual length */
450 len = buf->desc.rxlength - ETHER_CRC_LEN;
451
452#if !defined(RX_TO_MBUF)
453 /* Allocate mbuf to pass to OS */
454 MGETHDR(m, M_DONTWAIT, MT_DATA);
455 if( NULL == m ){
456 printf("tx%d: cannot allocate mbuf header\n",sc->unit);
457 sc->epic_if.if_ierrors++;
458 goto rxerror;
459 }
460 if( len > MHLEN ){
461 MCLGET(m,M_DONTWAIT);
462 if( NULL == (m->m_flags & M_EXT) ){
463 printf("tx%d: cannot allocate mbuf cluster\n",
464 sc->unit);
465 m_freem( m );
466 sc->epic_if.if_ierrors++;
467 goto rxerror;
468 }
469 }
470
471 /* Copy packet to new allocated mbuf */
472 memcpy( mtod(m,void*), buf->data, len );
473
474#else /* RX_TO_MBUF */
475
476 /* Try to allocate mbuf cluster */
477 MGETHDR(m0,M_DONTWAIT,MT_DATA);
478 if( NULL == m0 ) {
479 printf("tx%d: cannot allocate mbuf header/n",sc->unit);
480 sc->epic_if.if_ierrors++;
481 goto rxerror;
482 }
483 MCLGET(m0,M_DONTWAIT);
484 if( NULL == (m0->m_flags & M_EXT) ){
485 printf("tx%d: cannot allocate mbuf cluster/n",sc->unit);
486 m_freem(m0);
487 sc->epic_if.if_ierrors++;
488 goto rxerror;
489 }
490
491 /* Swap new allocated mbuf with mbuf, containing packet */
492 m = buf->mbuf;
493 buf->mbuf = m0;
494
495 /* Insert new allocated mbuf into device queue */
496 buf->data = mtod( buf->mbuf, caddr_t );
497 buf->desc.bufaddr = vtophys( buf->data );
498
499#endif
500
501 /* First mbuf in packet holds the ethernet and packet headers */
502 eh = mtod( m, struct ether_header * );
503 m->m_pkthdr.rcvif = &(sc->epic_if);
504 m->m_pkthdr.len = len;
505 m->m_len = len;
506
507#if NBPFILTER > 0
508 /* Give mbuf to BPFILTER */
509 if( sc->epic_if.if_bpf ) bpf_mtap( &sc->epic_if, m );
510
511 /* Accept only our packets, broadcasts and multicasts */
512 if( (eh->ether_dhost[0] & 1) == 0 &&
513 bcmp(eh->ether_dhost,sc->epic_ac.ac_enaddr,ETHER_ADDR_LEN)){
514 m_freem(m);
515 goto rxerror;
516 }
517#endif
518
519 /* Second mbuf holds packet ifself */
520 m->m_pkthdr.len = len - sizeof(struct ether_header);
521 m->m_len = len - sizeof( struct ether_header );
522 m->m_data += sizeof( struct ether_header );
523
524 /* Give mbuf to OS */
525 ether_input(&sc->epic_if, eh, m);
526
527 /* Successfuly received frame */
528 sc->epic_if.if_ipackets++;
529
530rxerror:
531 /* Mark current descriptor as free */
532 buf->desc.rxlength = 0;
533 buf->desc.status = 0x8000;
534
535 /* Switch to next descriptor */
536 sc->cur_rx = (sc->cur_rx+1) % RX_RING_SIZE;
537 }
538
539 return;
540}
541
542/*
543 *
544 * splimp() invoked before epic_intr_normal()
545 */
546void
547epic_tx_done( epic_softc_t *sc ){
548 int i = 0;
549 u_int32_t if_flags=~0;
550 int coll;
551 u_int16_t stt;
552
553 while( i++ < TX_RING_SIZE ){
554 struct epic_tx_buffer *buf = sc->tx_buffer + sc->dirty_tx;
555 u_int16_t len = buf->desc.txlength;
556 stt = buf->desc.status;
557
558 if( stt & 0x8000 )
559 break; /* following packets are not Txed yet */
560
561 if( stt == 0 ){
562 if_flags = ~IFF_OACTIVE;
563 break;
564 }
565
566 sc->pending_txs--; /* packet is finished */
567 sc->dirty_tx = (sc->dirty_tx + 1) % TX_RING_SIZE;
568
569 coll = (stt >> 8) & 0xF; /* number of collisions*/
570
571 if( stt & 0x0001 ){
572 sc->epic_if.if_opackets++;
573 } else {
574 if(stt & 0x0008)
575 sc->dot3stats.dot3StatsCarrierSenseErrors++;
576
577 if(stt & 0x1050)
578 sc->dot3stats.dot3StatsInternalMacTransmitErrors++;
579
580 if(stt & 0x1000) coll = 16;
581
582 sc->epic_if.if_oerrors++;
583 }
584
585 if(stt & 0x0002) /* What does it mean? */
586 sc->dot3stats.dot3StatsDeferredTransmissions++;
587
588 sc->epic_if.if_collisions += coll;
589
590 switch( coll ){
591 case 0:
592 break;
593 case 16:
594 sc->dot3stats.dot3StatsExcessiveCollisions++;
595 sc->dot3stats.dot3StatsCollFrequencies[15]++;
596 break;
597 case 1:
598 sc->dot3stats.dot3StatsSingleCollisionFrames++;
599 sc->dot3stats.dot3StatsCollFrequencies[0]++;
600 break;
601 default:
602 sc->dot3stats.dot3StatsMultipleCollisionFrames++;
603 sc->dot3stats.dot3StatsCollFrequencies[coll-1]++;
604 break;
605 }
606
607 buf->desc.status = 0;
608 buf->desc.txlength = 0;
609
610#if defined(TX_FRAG_LIST)
611 buf->flist.numfrags = 0;
612 m_freem( buf->mbuf );
613 buf->mbuf = NULL;
614#endif
615
616 if_flags = ~IFF_OACTIVE;
617 }
618
619 sc->epic_if.if_flags &= if_flags;
620
621 if( !(sc->epic_if.if_flags & IFF_OACTIVE) )
622 epic_ifstart( &sc->epic_if );
623
624}
625
626/*
627 * Probe function
628 */
629static char*
630epic_pci_probe(
631 pcici_t config_id,
632 pcidi_t device_id)
633{
634 if( PCI_VENDORID(device_id) != SMC_VENDORID )
635 return NULL;
636
637 if( PCI_CHIPID(device_id) == CHIPID_83C170 )
638 return "SMC 83c170";
639
640 return NULL;
641}
642
643/*
644 * PCI_Attach function
645 *
646 * splimp() invoked here
647 */
648static void
649epic_pci_attach(
650 pcici_t config_id,
651 int unit)
652{
653 struct ifnet * ifp;
654 epic_softc_t *sc;
655 u_int32_t iobase;
656 u_int32_t irq;
657 u_int32_t phyid;
658 int i,s;
659 int phy, phy_idx;
660
661 /*
662 * Get iobase and irq level
663 */
664 irq = PCI_CONF_READ(PCI_CFIT) & (0xFF);
665 if (!pci_map_port(config_id, PCI_CBIO,(u_short *) &iobase))
666 return;
667
668 /*
669 * Allocate and preinitialize softc structure
670 */
671 sc = (epic_softc_t *) malloc(sizeof(epic_softc_t), M_DEVBUF, M_NOWAIT);
672 if (sc == NULL) return;
673 epics[ unit ] = sc;
674
675 /*
676 * Zero softc structure
677 */
678 bzero(sc, sizeof(epic_softc_t));
679
680 /*
681 * Initialize softc
682 */
683 sc->unit = unit;
684 sc->iobase = iobase;
685 sc->irq = irq;
686
687 /* Bring the chip out of low-power mode. */
688 outl( iobase + GENCTL, 0x0000 );
689
690 /* Magic?! If we don't set this bit the MII interface won't work. */
691 outl( iobase + TEST1, 0x0008 );
692
693 /* Read mac address (may be better is read from EEPROM?) */
694 for (i = 0; i < ETHER_ADDR_LEN / sizeof( u_int16_t); i++)
695 ((u_int16_t *)sc->epic_macaddr)[i] = inw(iobase + LAN0 + i*4);
696
697 /* Display some info */
698 printf("tx%d: address %02x:%02x:%02x:%02x:%02x:%02x,",sc->unit,
699 sc->epic_macaddr[0],sc->epic_macaddr[1],sc->epic_macaddr[2],
700 sc->epic_macaddr[3],sc->epic_macaddr[4],sc->epic_macaddr[5]);
701
702
703 s = splimp();
704
705 /* Map interrupt */
706 if( !pci_map_int(config_id, epic_intr_normal, (void*)sc, &net_imask) ) {
707 printf("tx%d: couldn't map interrupt\n",unit);
708 epics[ unit ] = NULL;
709 free(sc, M_DEVBUF);
710 return;
711 }
712
713 /* Fill ifnet structure */
714 ifp = &sc->epic_if;
715
716 ifp->if_unit = unit;
717 ifp->if_name = "tx";
718 ifp->if_softc = sc;
719 ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI;
720 ifp->if_ioctl = epic_ifioctl;
721 ifp->if_start = epic_ifstart;
722 ifp->if_watchdog = epic_ifwatchdog;
723 ifp->if_init = (if_init_f_t*)epic_init;
724 ifp->if_timer = 0;
725 ifp->if_output = ether_output;
726 ifp->if_linkmib = &sc->dot3stats;
727 ifp->if_linkmiblen = sizeof(struct ifmib_iso_8802_3);
728
729 sc->dot3stats.dot3StatsEtherChipSet =
730 DOT3CHIPSET(dot3VendorSMC,
731 dot3ChipSetSMC83c170);
732
733 sc->dot3stats.dot3Compliance = DOT3COMPLIANCE_COLLS;
734
735 printf(" type SMC9432TX");
736
737 i = epic_read_phy_register(iobase, DP83840_BMCR);
738
739 if( i & BMCR_AUTONEGOTIATION ){
740 printf(" [Auto-Neg.");
741
742 if( i & BMCR_100MBPS ) printf(" 100Mbps");
743 else printf(" 10Mbps");
744
745 if( i & BMCR_FULL_DUPLEX ) printf(" FD");
746
747 printf("]\n");
748
749 if( i & BMCR_FULL_DUPLEX )
750 printf("tx%d: WARNING! FD autonegotiated, not supported\n",sc->unit);
751
752 } else {
753 ifp->if_flags |= IFF_LINK0;
754 if( i & BMCR_100MBPS ) {
755 printf(" [100Mbps");
756 ifp->if_flags |= IFF_LINK2;
757 } else printf(" [10Mbps");
758
759 if( i & BMCR_FULL_DUPLEX ) {
760 printf(" FD");
761 ifp->if_flags |= IFF_LINK1;
762 }
763 printf("]\n");
764 }
765#if defined(EPIC_DEBUG)
766 printf("tx%d: PHY id: (",sc->unit);
767 i=epic_read_phy_register(iobase,DP83840_PHYIDR1);
768 printf("%04x:",i);
769 phyid=i<<6;
770 i=epic_read_phy_register(iobase,DP83840_PHYIDR2);
771 printf("%04x)",i);
772 phyid|=((i>>10)&0x3F);
773 printf(" %08x, rev %x, mod %x\n",phyid,(i)&0xF, (i>>4)&0x3f);
774#endif
775
776 epic_read_phy_register(iobase,DP83840_BMSR);
777 epic_read_phy_register(iobase,DP83840_BMSR);
778 epic_read_phy_register(iobase,DP83840_BMSR);
779 i=epic_read_phy_register(iobase,DP83840_BMSR);
780
781 if( !(i & BMSR_LINK_STATUS) )
782 printf("tx%d: WARNING! no link estabilished\n",sc->unit);
783
784 /*
785 * Attach to if manager
786 */
787 if_attach(ifp);
788 ether_ifattach(ifp);
789
790#if NBPFILTER > 0
791 bpfattach(ifp,DLT_EN10MB, sizeof(struct ether_header));
792#endif
793
794 splx(s);
795
796 return;
797}
798
799/*
800 * IFINIT function
801 *
802 * splimp() invoked here
803 */
804static int
805epic_init(
806 epic_softc_t * sc)
807{
808 struct ifnet *ifp = &sc->epic_if;
809 int iobase = sc->iobase;
810 int i,s;
811
812 s = splimp();
813
814 /* Soft reset the chip. */
815 outl(iobase + GENCTL, GENCTL_SOFT_RESET );
816
817 /* Reset takes 15 ticks */
818 for(i=0;i<0x100;i++);
819
820 /* Wake up */
821 outl( iobase + GENCTL, 0 );
822
823 /* ?????? */
824 outl( iobase + TEST1, 0x0008);
825
826 /* Initialize rings */
827 if( -1 == epic_init_rings( sc ) ) {
828 printf("tx%d: failed to initialize rings\n",sc->unit);
829 epic_free_rings( sc );
830 splx(s);
831 return -1;
832 }
833
834 /* Put node address to EPIC */
835 outl( iobase + LAN0 + 0x0, ((u_int16_t *)sc->epic_macaddr)[0] );
836 outl( iobase + LAN0 + 0x4, ((u_int16_t *)sc->epic_macaddr)[1] );
837 outl( iobase + LAN0 + 0x8, ((u_int16_t *)sc->epic_macaddr)[2] );
838
839 /* Enable interrupts, set for PCI read multiple and etc */
840 outl( iobase + GENCTL,
841 GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE |
842 GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64 );
843
844 /* Set transmit threshold */
845 outl( iobase + ETXTHR, 0x40 );
846
847 /* Compute and set RXCON. */
848 epic_set_rx_mode( sc );
849
850 /* Set MII speed mode */
851 epic_set_media_speed( sc );
852
853 /* Set multicast table */
854 epic_set_mc_table( sc );
855
856 /* Enable interrupts by setting the interrupt mask. */
857 outl( iobase + INTMASK,
858 INTSTAT_RCC | INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE |
859 INTSTAT_TXC | INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU |
860 INTSTAT_CNT | /*INTSTAT_GP2 |*/ INTSTAT_FATAL |
861 INTSTAT_PTA | INTSTAT_PMA | INTSTAT_APE | INTSTAT_DPE );
862
863 /* Start rx process */
864 outl( iobase + COMMAND, COMMAND_RXQUEUED | COMMAND_START_RX );
865
866 /* Mark interface running ... */
867 if( ifp->if_flags & IFF_UP ) ifp->if_flags |= IFF_RUNNING;
868 else ifp->if_flags &= ~IFF_RUNNING;
869
870 /* ... and free */
871 ifp->if_flags &= ~IFF_OACTIVE;
872
873 splx(s);
874 return 0;
875}
876
877/*
878 * This function should set EPIC's registers according IFF_* flags
879 */
880static void
881epic_set_rx_mode(
882 epic_softc_t * sc)
883{
884 struct ifnet *ifp = &sc->epic_if;
885 u_int16_t rxcon = 0;
886
887#if NBPFILTER > 0
888 if( sc->epic_if.if_flags & IFF_PROMISC )
889 rxcon |= RXCON_PROMISCUOUS_MODE;
890#endif
891
892 if( sc->epic_if.if_flags & IFF_BROADCAST )
893 rxcon |= RXCON_RECEIVE_BROADCAST_FRAMES;
894
895 if( sc->epic_if.if_flags & IFF_MULTICAST )
896 rxcon |= RXCON_RECEIVE_MULTICAST_FRAMES;
897
898 outl( sc->iobase + RXCON, rxcon );
899
900 return;
901}
902
903/*
904 * This function should set MII to mode specified by IFF_LINK* flags
905 */
906static void
907epic_set_media_speed(
908 epic_softc_t * sc)
909{
910 struct ifnet *ifp = &sc->epic_if;
911 u_int16_t media;
912 u_int32_t i;
913
914 /* Set media speed */
915 if( ifp->if_flags & IFF_LINK0 ){
916 /* Allow only manual fullduplex modes */
917 media = epic_read_phy_register( sc->iobase, DP83840_ANAR );
918 media |= ANAR_100|ANAR_10|ANAR_100_FD|ANAR_10_FD;
919 epic_write_phy_register( sc->iobase, DP83840_ANAR, media );
920
921 /* Set mode */
922 media = (ifp->if_flags&IFF_LINK2)?BMCR_100MBPS:0;
923 media |= (ifp->if_flags&IFF_LINK1)?BMCR_FULL_DUPLEX:0;
924 epic_write_phy_register( sc->iobase, DP83840_BMCR, media );
925
926 ifp->if_baudrate =
927 (ifp->if_flags&IFF_LINK2)?100000000:10000000;
928
929 outl( sc->iobase + TXCON,(ifp->if_flags&IFF_LINK1)?TXCON_LOOPBACK_MODE_FULL_DUPLEX|TXCON_DEFAULT:TXCON_DEFAULT );
930
931 } else {
932 /* If autoneg is set, IFF_LINK flags are meaningless */
933 ifp->if_flags &= ~(IFF_LINK0|IFF_LINK1|IFF_LINK2);
934 ifp->if_baudrate = 100000000;
935
936 outl( sc->iobase + TXCON, TXCON_DEFAULT );
937
938 /* Did it autoneg full duplex? */
939 if (epic_autoneg(sc) == EPIC_FULL_DUPLEX)
940 outl( sc->iobase + TXCON,
941 TXCON_LOOPBACK_MODE_FULL_DUPLEX|TXCON_DEFAULT);
942 }
943
944 return;
945}
946
947/*
948 * This functions controls the autoneg processes of the phy
949 * It implements the workaround that is described in section 7.2 & 7.3 of the
950 * DP83840A data sheet
951 * http://www.national.com/ds/DP/DP83840A.pdf
952 */
953static int
954epic_autoneg(
955 epic_softc_t * sc)
956{
957 struct ifnet *ifp = &sc->epic_if;
958 u_int16_t media;
959 u_int16_t i;
960
961 media = epic_read_phy_register( sc->iobase, DP83840_ANAR );
962 media |= ANAR_100|ANAR_100_FD|ANAR_10|ANAR_10_FD;
963 epic_write_phy_register( sc->iobase, DP83840_ANAR, media );
964
965 /* Set and restart autoneg */
966 epic_write_phy_register( sc->iobase, DP83840_BMCR,
967 BMCR_AUTONEGOTIATION | BMCR_RESTART_AUTONEG );
968
969 /* Wait 3 seconds for the autoneg to finish
970 * This is the recommended time from the DP83840A data sheet
971 * Section 7.1
972 */
973 DELAY(3000000);
974
975 epic_read_phy_register( sc->iobase, DP83840_BMSR);
976
977 /* BMSR must be read twice to update the link status bit/
978 * since that bit is a latch bit
979 */
980 i = epic_read_phy_register( sc->iobase, DP83840_BMSR);
981
982 if ((i & BMSR_LINK_STATUS) && ( i & BMSR_AUTONEG_COMPLETE)){
983 i = epic_read_phy_register( sc->iobase, DP83840_PAR);
984
985 if ( i & PAR_FULL_DUPLEX )
986 return EPIC_FULL_DUPLEX;
987 else
988 return EPIC_HALF_DUPLEX;
989 }
990 else { /*Auto-negotiation or link status is not 1
991 Thus the auto-negotiation failed and one
992 must take other means to fix it.
993 */
994
995 /* ANER must be read twice to get the correct reading for the
996 * Multiple link fault bit -- it is a latched bit
997 */
998 epic_read_phy_register (sc->iobase, DP83840_ANER);
999
1000 i = epic_read_phy_register (sc->iobase, DP83840_ANER);
1001
1002 if ( i & ANER_MULTIPLE_LINK_FAULT ) {
1003 /* it can be forced to 100Mb/s Half-Duplex */
1004 media = epic_read_phy_register(sc->iobase,DP83840_BMCR);
1005 media &= ~(BMCR_AUTONEGOTIATION | BMCR_FULL_DUPLEX);
1006 media |= BMCR_100MBPS;
1007 epic_write_phy_register(sc->iobase,DP83840_BMCR,media);
1008
1009 /* read BMSR again to determine link status */
1010 epic_read_phy_register(sc->iobase, DP83840_BMSR);
1011 i=epic_read_phy_register( sc->iobase, DP83840_BMSR);
1012
1013 if (i & BMSR_LINK_STATUS){
1014 /* port is linked to the non Auto-Negotiation
1015 * 100Mbs partner.
1016 */
1017 return EPIC_HALF_DUPLEX;
1018 }
1019 else {
1020 media = epic_read_phy_register (sc->iobase, DP83840_BMCR);
1021 media &= !(BMCR_AUTONEGOTIATION | BMCR_FULL_DUPLEX | BMCR_100MBPS);
1022 epic_write_phy_register(sc->iobase, DP83840_BMCR, media);
1023 epic_read_phy_register(sc->iobase, DP83840_BMSR);
1024 i=epic_read_phy_register( sc->iobase, DP83840_BMSR);
1025
1026 if (i & BMSR_LINK_STATUS) {
1027 /*port is linked to the non
1028 * Auto-Negotiation10Mbs partner
1029 */
1030 return EPIC_HALF_DUPLEX;
1031 }
1032 }
1033 }
1034 /* If we get here we are most likely not connected
1035 * so lets default it to half duplex
1036 */
1037 return EPIC_HALF_DUPLEX;
1038 }
1039
1040}
1041
1042/*
1043 * This function sets EPIC multicast table
1044 */
1045static void
1046epic_set_mc_table(
1047 epic_softc_t * sc)
1048{
1049 struct ifnet *ifp = &sc->epic_if;
1050
1051 if( ifp->if_flags & IFF_MULTICAST ){
1052 outl( sc->iobase + MC0, 0xFFFF );
1053 outl( sc->iobase + MC1, 0xFFFF );
1054 outl( sc->iobase + MC2, 0xFFFF );
1055 outl( sc->iobase + MC3, 0xFFFF );
1056 }
1057
1058 return;
1059}
1060
1061/*
1062 * This function should completely stop rx and tx processes
1063 *
1064 * splimp() invoked here
1065 */
1066static void
1067epic_stop(
1068 epic_softc_t * sc)
1069{
1070 int iobase = sc->iobase;
1071 int i,s;
1072
1073 s = splimp();
1074 sc->epic_if.if_timer = 0;
1075
1076 /* Disable interrupts, stop processes */
1077 outl( iobase + INTMASK, 0 );
1078 outl( iobase + GENCTL, 0 );
1079 outl( iobase + COMMAND,
1080 COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA );
1081
1082 /* Wait RX and TX DMA to stop */
1083 for(i=0;i<0x100000;i++){
1084 if( (inl(iobase+INTSTAT)&(INTSTAT_RXIDLE|INTSTAT_TXIDLE)) ==
1085 (INTSTAT_RXIDLE|INTSTAT_TXIDLE) ) break;
1086 }
1087
1088 if( !(inl(iobase+INTSTAT)&INTSTAT_RXIDLE) )
1089 printf("tx%d: can't stop RX DMA\n",sc->unit);
1090
1091 if( !(inl(iobase+INTSTAT)&INTSTAT_TXIDLE) )
1092 printf("tx%d: can't stop TX DMA\n",sc->unit);
1093
1094 /* Reset chip */
1095 outl( iobase + GENCTL, GENCTL_SOFT_RESET );
1096 for(i=0;i<0x100;i++);
1097
1098 /* Free memory allocated for rings */
1099 epic_free_rings( sc );
1100
1101 splx(s);
1102
1103}
1104
1105/*
1106 * This function should free all allocated for rings memory.
1107 * NB: The DMA processes must be stopped.
1108 *
1109 * splimp() assumed to be done
1110 */
1111static void
1112epic_free_rings(epic_softc_t * sc){
1113 int i;
1114
1115 for(i=0;i<RX_RING_SIZE;i++){
1116 struct epic_rx_buffer *buf = sc->rx_buffer + i;
1117
1118 buf->desc.status = 0;
1119 buf->desc.buflength = 0;
1120 buf->desc.bufaddr = 0;
1121 buf->data = NULL;
1122
1123#if defined(RX_TO_MBUF)
1124 if( buf->mbuf ) m_freem( buf->mbuf );
1125 buf->mbuf = NULL;
1126#else
1127 if( buf->data ) free( buf->data, M_DEVBUF );
1128 buf->data = NULL;
1129#endif
1130 }
1131
1132 for(i=0;i<TX_RING_SIZE;i++){
1133 struct epic_tx_buffer *buf = sc->tx_buffer + i;
1134
1135 buf->desc.status = 0;
1136 buf->desc.buflength = 0;
1137 buf->desc.bufaddr = 0;
1138
1139#if defined(TX_FRAG_LIST)
1140 if( buf->mbuf ) m_freem( buf->mbuf );
1141 buf->mbuf = NULL;
1142#else
1143 if( buf->data ) free( buf->data, M_DEVBUF );
1144 buf->data = NULL;
1145#endif
1146 }
1147}
1148
1149/*
1150 * Initialize Rx and Tx rings and give them to EPIC
1151 *
1152 * If RX_TO_MBUF option is enabled, mbuf cluster is allocated instead of
1153 * static buffer for RX ringi element.
1154 * If TX_FRAG_LIST option is enabled, nothig is done, except chaining
1155 * descriptors to ring and point them to static fraglists.
1156 *
1157 * splimp() assumed to be done
1158 */
1159static int
1160epic_init_rings(epic_softc_t * sc){
1161 int i;
1162 struct mbuf *m;
1163
1164 sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0;
1165
1166 for (i = 0; i < RX_RING_SIZE; i++) {
1167 struct epic_rx_buffer *buf = sc->rx_buffer + i;
1168
1169 buf->desc.status = 0; /* Owned by driver */
1170 buf->desc.next =
1171 vtophys(&(sc->rx_buffer[(i+1)%RX_RING_SIZE].desc) );
1172
1173#if defined(RX_TO_MBUF)
1174 MGETHDR(buf->mbuf,M_DONTWAIT,MT_DATA);
1175 if( NULL == buf->mbuf ) return -1;
1176 MCLGET(buf->mbuf,M_DONTWAIT);
1177 if( NULL == (buf->mbuf->m_flags & M_EXT) ) return -1;
1178
1179 buf->data = mtod( buf->mbuf, caddr_t );
1180#else
1181 buf->data = malloc(ETHER_MAX_FRAME_LEN, M_DEVBUF, M_NOWAIT);
1182 if( buf->data == NULL ) return -1;
1183#endif
1184
1185 buf->desc.bufaddr = vtophys( buf->data );
1186 buf->desc.buflength = ETHER_MAX_FRAME_LEN;
1187 buf->desc.status = 0x8000; /* Give to EPIC */
1188
1189 }
1190
1191 for (i = 0; i < TX_RING_SIZE; i++) {
1192 struct epic_tx_buffer *buf = sc->tx_buffer + i;
1193
1194 buf->desc.status = 0;
1195 buf->desc.next =
1196 vtophys(&(sc->tx_buffer[(i+1)%TX_RING_SIZE].desc) );
1197
1198#if defined(TX_FRAG_LIST)
1199 buf->mbuf = NULL;
1200 buf->desc.bufaddr = vtophys( &(buf->flist) );
1201#else
1202 /* Allocate buffer */
1203 buf->data = malloc(ETHER_MAX_FRAME_LEN, M_DEVBUF, M_NOWAIT);
1204
1205 if( buf->data == NULL ) return -1;
1206
1207 buf->desc.bufaddr = vtophys( buf->data );
1208 buf->desc.buflength = ETHER_MAX_FRAME_LEN;
1209#endif
1210 }
1211
1212 /* Give rings to EPIC */
1213 outl( sc->iobase + PRCDAR, vtophys(&(sc->rx_buffer[0].desc)) );
1214 outl( sc->iobase + PTCDAR, vtophys(&(sc->tx_buffer[0].desc)) );
1215
1216 return 0;
1217}
1218
1219/*
1220 * EEPROM operation functions
1221 */
1222static void epic_write_eepromreg(u_int16_t regaddr, u_int8_t val){
1223 u_int16_t i;
1224
1225 outb( regaddr, val );
1226
1227 for( i=0;i<0xFF; i++)
1228 if( !(inb( regaddr ) & 0x20) ) break;
1229
1230 return;
1231}
1232
1233static u_int8_t epic_read_eepromreg(u_int16_t regaddr){
1234 return inb( regaddr );
1235}
1236
1237static u_int8_t epic_eeprom_clock( u_int16_t ioaddr, u_int8_t val ){
1238
1239 epic_write_eepromreg( ioaddr + EECTL, val );
1240 epic_write_eepromreg( ioaddr + EECTL, (val | 0x4) );
1241 epic_write_eepromreg( ioaddr + EECTL, val );
1242
1243 return epic_read_eepromreg( ioaddr + EECTL );
1244}
1245
1246static void epic_output_eepromw(u_int16_t ioaddr, u_int16_t val){
1247 int i;
1248 for( i = 0xF; i >= 0; i--){
1249 if( (val & (1 << i)) ) epic_eeprom_clock( ioaddr, 0x0B );
1250 else epic_eeprom_clock( ioaddr, 3);
1251 }
1252}
1253
1254static u_int16_t epic_input_eepromw(u_int16_t ioaddr){
1255 int i;
1256 int tmp;
1257 u_int16_t retval = 0;
1258
1259 for( i = 0xF; i >= 0; i--) {
1260 tmp = epic_eeprom_clock( ioaddr, 0x3 );
1261 if( tmp & 0x10 ){
1262 retval |= (1 << i);
1263 }
1264 }
1265 return retval;
1266}
1267
1268static int epic_read_eeprom(u_int16_t ioaddr, u_int16_t loc){
1269 int i;
1270 u_int16_t dataval;
1271 u_int16_t read_cmd;
1272
1273 epic_write_eepromreg(ioaddr + EECTL , 3);
1274
1275 if( epic_read_eepromreg(ioaddr + EECTL) & 0x40 )
1276 read_cmd = ( loc & 0x3F ) | 0x180;
1277 else
1278 read_cmd = ( loc & 0xFF ) | 0x600;
1279
1280 epic_output_eepromw( ioaddr, read_cmd );
1281
1282 dataval = epic_input_eepromw( ioaddr );
1283
1284 epic_write_eepromreg( ioaddr + EECTL, 1 );
1285
1286 return dataval;
1287}
1288
1289static int epic_read_phy_register(u_int16_t iobase, u_int16_t loc){
1290 int i;
1291
1292 outl( iobase + MIICTL, ((loc << 4) | 0x0601) );
1293
1294 for( i=0;i<0x1000;i++) if( !(inl( iobase + MIICTL )&1) ) break;
1295
1296 return inl( iobase + MIIDATA );
1297}
1298
1299static void epic_write_phy_register(u_int16_t iobase, u_int16_t loc,u_int16_t val){
1300 int i;
1301
1302 outl( iobase + MIIDATA, val );
1303 outl( iobase + MIICTL, ((loc << 4) | 0x0602) );
1304
1305 for( i=0;i<0x1000;i++) if( !(inl( iobase + MIICTL )&2) ) break;
1306
1307 return;
1308}
1309
1310#endif /* NPCI > 0 */