if_sq.c revision 1.20
1/*	$NetBSD: if_sq.c,v 1.20 2003/12/30 23:48:07 sekiya Exp $	*/
2
3/*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <sys/cdefs.h>
36__KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.20 2003/12/30 23:48:07 sekiya Exp $");
37
38#include "bpfilter.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/device.h>
43#include <sys/callout.h>
44#include <sys/mbuf.h>
45#include <sys/malloc.h>
46#include <sys/kernel.h>
47#include <sys/socket.h>
48#include <sys/ioctl.h>
49#include <sys/errno.h>
50#include <sys/syslog.h>
51
52#include <uvm/uvm_extern.h>
53
54#include <machine/endian.h>
55
56#include <net/if.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59#include <net/if_ether.h>
60
61#if NBPFILTER > 0
62#include <net/bpf.h>
63#endif
64
65#include <machine/bus.h>
66#include <machine/intr.h>
67
68#include <dev/ic/seeq8003reg.h>
69
70#include <sgimips/hpc/sqvar.h>
71#include <sgimips/hpc/hpcvar.h>
72#include <sgimips/hpc/hpcreg.h>
73
74#include <dev/arcbios/arcbios.h>
75#include <dev/arcbios/arcbiosvar.h>
76
77#define static
78
79/*
80 * Short TODO list:
81 *	(1) Do counters for bad-RX packets.
82 *	(2) Allow multi-segment transmits, instead of copying to a single,
83 *	    contiguous mbuf.
84 *	(3) Verify sq_stop() turns off enough stuff; I was still getting
85 *	    seeq interrupts after sq_stop().
86 *	(4) Implement EDLC modes: especially packet auto-pad and simplex
87 *	    mode.
88 *	(5) Should the driver filter out its own transmissions in non-EDLC
89 *	    mode?
90 *	(6) Multicast support -- multicast filter, address management, ...
91 *	(7) Deal with RB0 (recv buffer overflow) on reception.  Will need
92 *	    to figure out if RB0 is read-only as stated in one spot in the
93 *	    HPC spec or read-write (ie, is the 'write a one to clear it')
94 *	    the correct thing?
95 */
96
97#if defined(SQ_DEBUG)
98 int sq_debug = 0;
99 #define SQ_DPRINTF(x) if (sq_debug) printf x
100#else
101 #define SQ_DPRINTF(x)
102#endif
103
104static int	sq_match(struct device *, struct cfdata *, void *);
105static void	sq_attach(struct device *, struct device *, void *);
106static int	sq_init(struct ifnet *);
107static void	sq_start(struct ifnet *);
108static void	sq_stop(struct ifnet *, int);
109static void	sq_watchdog(struct ifnet *);
110static int	sq_ioctl(struct ifnet *, u_long, caddr_t);
111
112static void	sq_set_filter(struct sq_softc *);
113static int	sq_intr(void *);
114static int	sq_rxintr(struct sq_softc *);
115static int	sq_txintr(struct sq_softc *);
116static void	sq_reset(struct sq_softc *);
117static int 	sq_add_rxbuf(struct sq_softc *, int);
118static void 	sq_dump_buffer(u_int32_t addr, u_int32_t len);
119
120static void	enaddr_aton(const char*, u_int8_t*);
121
122/* Actions */
123#define SQ_RESET		1
124#define SQ_ADD_TO_DMA		2
125#define SQ_START_DMA		3
126#define SQ_DONE_DMA		4
127#define SQ_RESTART_DMA		5
128#define SQ_TXINTR_ENTER		6
129#define SQ_TXINTR_EXIT		7
130#define SQ_TXINTR_BUSY		8
131
132struct sq_action_trace {
133	int action;
134	int bufno;
135	int status;
136	int freebuf;
137};
138
139#define SQ_TRACEBUF_SIZE	100
140int sq_trace_idx = 0;
141struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE];
142
143void sq_trace_dump(struct sq_softc* sc);
144
145#define SQ_TRACE(act, buf, stat, free) do {				\
146	sq_trace[sq_trace_idx].action = (act);				\
147	sq_trace[sq_trace_idx].bufno = (buf);				\
148	sq_trace[sq_trace_idx].status = (stat);				\
149	sq_trace[sq_trace_idx].freebuf = (free);			\
150	if (++sq_trace_idx == SQ_TRACEBUF_SIZE) {			\
151		memset(&sq_trace, 0, sizeof(sq_trace));			\
152		sq_trace_idx = 0;					\
153	}								\
154} while (0)
155
156CFATTACH_DECL(sq, sizeof(struct sq_softc),
157    sq_match, sq_attach, NULL, NULL);
158
159#define        ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
160
161static int
162sq_match(struct device *parent, struct cfdata *cf, void *aux)
163{
164	struct hpc_attach_args *ha = aux;
165
166	if (strcmp(ha->ha_name, cf->cf_name) == 0)
167		return (1);
168
169	return (0);
170}
171
172static void
173sq_attach(struct device *parent, struct device *self, void *aux)
174{
175	int i, err;
176	char* macaddr;
177	struct sq_softc *sc = (void *)self;
178	struct hpc_attach_args *haa = aux;
179	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
180
181	sc->sc_hpct = haa->ha_st;
182	sc->hpc_regs = haa->hpc_regs;      /* HPC register definitions */
183
184	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
185				       haa->ha_dmaoff,
186				       sc->hpc_regs->enet_regs_size,
187				       &sc->sc_hpch)) != 0) {
188		printf(": unable to map HPC DMA registers, error = %d\n", err);
189		goto fail_0;
190	}
191
192	sc->sc_regt = haa->ha_st;
193	if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh,
194				       haa->ha_devoff,
195				       sc->hpc_regs->enet_devregs_size,
196				       &sc->sc_regh)) != 0) {
197		printf(": unable to map Seeq registers, error = %d\n", err);
198		goto fail_0;
199	}
200
201	sc->sc_dmat = haa->ha_dmat;
202
203	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
204				    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
205				    1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
206		printf(": unable to allocate control data, error = %d\n", err);
207		goto fail_0;
208	}
209
210	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
211				  sizeof(struct sq_control),
212				  (caddr_t *)&sc->sc_control,
213				  BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
214		printf(": unable to map control data, error = %d\n", err);
215		goto fail_1;
216	}
217
218	if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
219				     1, sizeof(struct sq_control), PAGE_SIZE,
220				     BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
221		printf(": unable to create DMA map for control data, error "
222			"= %d\n", err);
223		goto fail_2;
224	}
225
226	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
227				   sizeof(struct sq_control),
228				   NULL, BUS_DMA_NOWAIT)) != 0) {
229		printf(": unable to load DMA map for control data, error "
230			"= %d\n", err);
231		goto fail_3;
232	}
233
234	memset(sc->sc_control, 0, sizeof(struct sq_control));
235
236	/* Create transmit buffer DMA maps */
237	for (i = 0; i < SQ_NTXDESC; i++) {
238	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
239					 0, BUS_DMA_NOWAIT,
240					 &sc->sc_txmap[i])) != 0) {
241		    printf(": unable to create tx DMA map %d, error = %d\n",
242			   i, err);
243		    goto fail_4;
244	    }
245	}
246
247	/* Create receive buffer DMA maps */
248	for (i = 0; i < SQ_NRXDESC; i++) {
249	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
250					 0, BUS_DMA_NOWAIT,
251					 &sc->sc_rxmap[i])) != 0) {
252		    printf(": unable to create rx DMA map %d, error = %d\n",
253			   i, err);
254		    goto fail_5;
255	    }
256	}
257
258	/* Pre-allocate the receive buffers.  */
259	for (i = 0; i < SQ_NRXDESC; i++) {
260		if ((err = sq_add_rxbuf(sc, i)) != 0) {
261			printf(": unable to allocate or map rx buffer %d\n,"
262			       " error = %d\n", i, err);
263			goto fail_6;
264		}
265	}
266
267	if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
268		printf(": unable to get MAC address!\n");
269		goto fail_6;
270	}
271
272	evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL,
273					      self->dv_xname, "intr");
274
275	if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) {
276		printf(": unable to establish interrupt!\n");
277		goto fail_6;
278	}
279
280	/* Reset the chip to a known state. */
281	sq_reset(sc);
282
283	/*
284	 * Determine if we're an 8003 or 80c03 by setting the first
285	 * MAC address register to non-zero, and then reading it back.
286	 * If it's zero, we have an 80c03, because we will have read
287	 * the TxCollLSB register.
288	 */
289	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5);
290	if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0)
291		sc->sc_type = SQ_TYPE_80C03;
292	else
293		sc->sc_type = SQ_TYPE_8003;
294	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00);
295
296	printf(": SGI Seeq %s\n",
297	    sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
298
299	enaddr_aton(macaddr, sc->sc_enaddr);
300
301	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
302					   ether_sprintf(sc->sc_enaddr));
303
304	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
305	ifp->if_softc = sc;
306	ifp->if_mtu = ETHERMTU;
307	ifp->if_init = sq_init;
308	ifp->if_stop = sq_stop;
309	ifp->if_start = sq_start;
310	ifp->if_ioctl = sq_ioctl;
311	ifp->if_watchdog = sq_watchdog;
312	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
313	IFQ_SET_READY(&ifp->if_snd);
314
315	if_attach(ifp);
316	ether_ifattach(ifp, sc->sc_enaddr);
317
318	memset(&sq_trace, 0, sizeof(sq_trace));
319	/* Done! */
320	return;
321
322	/*
323	 * Free any resources we've allocated during the failed attach
324	 * attempt.  Do this in reverse order and fall through.
325	 */
326fail_6:
327	for (i = 0; i < SQ_NRXDESC; i++) {
328		if (sc->sc_rxmbuf[i] != NULL) {
329			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
330			m_freem(sc->sc_rxmbuf[i]);
331		}
332	}
333fail_5:
334	for (i = 0; i < SQ_NRXDESC; i++) {
335	    if (sc->sc_rxmap[i] != NULL)
336		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
337	}
338fail_4:
339	for (i = 0; i < SQ_NTXDESC; i++) {
340	    if (sc->sc_txmap[i] !=  NULL)
341		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
342	}
343	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
344fail_3:
345	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
346fail_2:
347	bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
348				      sizeof(struct sq_control));
349fail_1:
350	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
351fail_0:
352	return;
353}
354
355/* Set up data to get the interface up and running. */
356int
357sq_init(struct ifnet *ifp)
358{
359	int i;
360	u_int32_t reg;
361	struct sq_softc *sc = ifp->if_softc;
362
363	/* Cancel any in-progress I/O */
364	sq_stop(ifp, 0);
365
366	sc->sc_nextrx = 0;
367
368	sc->sc_nfreetx = SQ_NTXDESC;
369	sc->sc_nexttx = sc->sc_prevtx = 0;
370
371	SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx);
372
373	/* Set into 8003 mode, bank 0 to program ethernet address */
374	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0);
375
376	/* Now write the address */
377	for (i = 0; i < ETHER_ADDR_LEN; i++)
378		bus_space_write_1(sc->sc_regt, sc->sc_regh, i,
379		    sc->sc_enaddr[i]);
380
381	sc->sc_rxcmd = RXCMD_IE_CRC |
382		       RXCMD_IE_DRIB |
383		       RXCMD_IE_SHORT |
384		       RXCMD_IE_END |
385		       RXCMD_IE_GOOD;
386
387	/*
388	 * Set the receive filter -- this will add some bits to the
389	 * prototype RXCMD register.  Do this before setting the
390	 * transmit config register, since we might need to switch
391	 * banks.
392	 */
393	sq_set_filter(sc);
394
395	/* Set up Seeq transmit command register */
396	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD,
397						    TXCMD_IE_UFLOW |
398						    TXCMD_IE_COLL |
399						    TXCMD_IE_16COLL |
400						    TXCMD_IE_GOOD);
401
402	/* Now write the receive command register. */
403	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd);
404
405	/* Set up HPC ethernet DMA config */
406	if (sc->hpc_regs->revision == 3) {
407		reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
408				sc->hpc_regs->enetr_dmacfg);
409		bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
410				sc->hpc_regs->enetr_dmacfg,
411			    	reg | ENETR_DMACFG_FIX_RXDC |
412				ENETR_DMACFG_FIX_INTR |
413				ENETR_DMACFG_FIX_EOP);
414	}
415
416	/* Pass the start of the receive ring to the HPC */
417	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ndbp,
418						    SQ_CDRXADDR(sc, 0));
419
420	/* And turn on the HPC ethernet receive channel */
421	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ctl,
422				       sc->hpc_regs->enetr_ctl_active);
423
424	ifp->if_flags |= IFF_RUNNING;
425	ifp->if_flags &= ~IFF_OACTIVE;
426
427	return 0;
428}
429
430static void
431sq_set_filter(struct sq_softc *sc)
432{
433	struct ethercom *ec = &sc->sc_ethercom;
434	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
435	struct ether_multi *enm;
436	struct ether_multistep step;
437
438	/*
439	 * Check for promiscuous mode.  Also implies
440	 * all-multicast.
441	 */
442	if (ifp->if_flags & IFF_PROMISC) {
443		sc->sc_rxcmd |= RXCMD_REC_ALL;
444		ifp->if_flags |= IFF_ALLMULTI;
445		return;
446	}
447
448	/*
449	 * The 8003 has no hash table.  If we have any multicast
450	 * addresses on the list, enable reception of all multicast
451	 * frames.
452	 *
453	 * XXX The 80c03 has a hash table.  We should use it.
454	 */
455
456	ETHER_FIRST_MULTI(step, ec, enm);
457
458	if (enm == NULL) {
459		sc->sc_rxcmd &= ~RXCMD_REC_MASK;
460		sc->sc_rxcmd |= RXCMD_REC_BROAD;
461
462		ifp->if_flags &= ~IFF_ALLMULTI;
463		return;
464	}
465
466	sc->sc_rxcmd |= RXCMD_REC_MULTI;
467	ifp->if_flags |= IFF_ALLMULTI;
468}
469
470int
471sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
472{
473	int s, error = 0;
474
475	s = splnet();
476
477	error = ether_ioctl(ifp, cmd, data);
478	if (error == ENETRESET) {
479		/*
480		 * Multicast list has changed; set the hardware filter
481		 * accordingly.
482		 */
483		error = sq_init(ifp);
484	}
485
486	splx(s);
487	return (error);
488}
489
490void
491sq_start(struct ifnet *ifp)
492{
493	struct sq_softc *sc = ifp->if_softc;
494	u_int32_t status;
495	struct mbuf *m0, *m;
496	bus_dmamap_t dmamap;
497	int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg;
498
499	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
500		return;
501
502	/*
503	 * Remember the previous number of free descriptors and
504	 * the first descriptor we'll use.
505	 */
506	ofree = sc->sc_nfreetx;
507	firsttx = sc->sc_nexttx;
508
509	/*
510	 * Loop through the send queue, setting up transmit descriptors
511	 * until we drain the queue, or use up all available transmit
512	 * descriptors.
513	 */
514	while (sc->sc_nfreetx != 0) {
515		/*
516		 * Grab a packet off the queue.
517		 */
518		IFQ_POLL(&ifp->if_snd, m0);
519		if (m0 == NULL)
520			break;
521		m = NULL;
522
523		dmamap = sc->sc_txmap[sc->sc_nexttx];
524
525		/*
526		 * Load the DMA map.  If this fails, the packet either
527		 * didn't fit in the alloted number of segments, or we were
528		 * short on resources.  In this case, we'll copy and try
529		 * again.
530		 * Also copy it if we need to pad, so that we are sure there
531		 * is room for the pad buffer.
532		 * XXX the right way of doing this is to use a static buffer
533		 * for padding and adding it to the transmit descriptor (see
534		 * sys/dev/pci/if_tl.c for example). We can't do this here yet
535		 * because we can't send packets with more than one fragment.
536		 */
537		if (m0->m_pkthdr.len < ETHER_PAD_LEN ||
538		    bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
539						      BUS_DMA_NOWAIT) != 0) {
540			MGETHDR(m, M_DONTWAIT, MT_DATA);
541			if (m == NULL) {
542				printf("%s: unable to allocate Tx mbuf\n",
543				    sc->sc_dev.dv_xname);
544				break;
545			}
546			if (m0->m_pkthdr.len > MHLEN) {
547				MCLGET(m, M_DONTWAIT);
548				if ((m->m_flags & M_EXT) == 0) {
549					printf("%s: unable to allocate Tx "
550					    "cluster\n", sc->sc_dev.dv_xname);
551					m_freem(m);
552					break;
553				}
554			}
555
556			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
557			if (m0->m_pkthdr.len < ETHER_PAD_LEN) {
558				memset(mtod(m, char *) + m0->m_pkthdr.len, 0,
559				    ETHER_PAD_LEN - m0->m_pkthdr.len);
560				m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN;
561			} else
562				m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
563
564			if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
565						m, BUS_DMA_NOWAIT)) != 0) {
566				printf("%s: unable to load Tx buffer, "
567				    "error = %d\n", sc->sc_dev.dv_xname, err);
568				break;
569			}
570		}
571
572		/*
573		 * Ensure we have enough descriptors free to describe
574		 * the packet.
575		 */
576		if (dmamap->dm_nsegs > sc->sc_nfreetx) {
577			/*
578			 * Not enough free descriptors to transmit this
579			 * packet.  We haven't committed to anything yet,
580			 * so just unload the DMA map, put the packet
581			 * back on the queue, and punt.  Notify the upper
582			 * layer that there are no more slots left.
583			 *
584			 * XXX We could allocate an mbuf and copy, but
585			 * XXX it is worth it?
586			 */
587			ifp->if_flags |= IFF_OACTIVE;
588			bus_dmamap_unload(sc->sc_dmat, dmamap);
589			if (m != NULL)
590				m_freem(m);
591			break;
592		}
593
594		IFQ_DEQUEUE(&ifp->if_snd, m0);
595#if NBPFILTER > 0
596		/*
597		 * Pass the packet to any BPF listeners.
598		 */
599		if (ifp->if_bpf)
600			bpf_mtap(ifp->if_bpf, m0);
601#endif /* NBPFILTER > 0 */
602		if (m != NULL) {
603			m_freem(m0);
604			m0 = m;
605		}
606
607		/*
608		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
609		 */
610
611		/* Sync the DMA map. */
612		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
613		    BUS_DMASYNC_PREWRITE);
614
615		/*
616		 * Initialize the transmit descriptors.
617		 */
618		for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
619		     seg < dmamap->dm_nsegs;
620		     seg++, nexttx = SQ_NEXTTX(nexttx)) {
621			if (sc->hpc_regs->revision == 3) {
622				sc->sc_txdesc[nexttx].hpc3_hdd_bufptr =
623					    dmamap->dm_segs[seg].ds_addr;
624				sc->sc_txdesc[nexttx].hpc3_hdd_ctl =
625					    dmamap->dm_segs[seg].ds_len;
626			} else {
627				sc->sc_txdesc[nexttx].hpc1_hdd_bufptr =
628					    dmamap->dm_segs[seg].ds_addr;
629				sc->sc_txdesc[nexttx].hpc1_hdd_ctl =
630					    dmamap->dm_segs[seg].ds_len;
631			}
632			sc->sc_txdesc[nexttx].hdd_descptr=
633					    SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
634			lasttx = nexttx;
635			totlen += dmamap->dm_segs[seg].ds_len;
636		}
637
638		/* Last descriptor gets end-of-packet */
639		KASSERT(lasttx != -1);
640		if (sc->hpc_regs->revision == 3)
641			sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_EOPACKET;
642		else
643			sc->sc_txdesc[lasttx].hpc1_hdd_ctl |=
644							HPC1_HDD_CTL_EOPACKET;
645
646		SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
647						       sc->sc_nexttx, lasttx,
648						       totlen));
649
650		if (ifp->if_flags & IFF_DEBUG) {
651			printf("     transmit chain:\n");
652			for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
653				printf("     descriptor %d:\n", seg);
654				printf("       hdd_bufptr:      0x%08x\n",
655					(sc->hpc_regs->revision == 3) ?
656					    sc->sc_txdesc[seg].hpc3_hdd_bufptr :
657					    sc->sc_txdesc[seg].hpc1_hdd_bufptr);
658				printf("       hdd_ctl: 0x%08x\n",
659					(sc->hpc_regs->revision == 3) ?
660					    sc->sc_txdesc[seg].hpc3_hdd_ctl:
661					    sc->sc_txdesc[seg].hpc1_hdd_ctl);
662				printf("       hdd_descptr:      0x%08x\n",
663					sc->sc_txdesc[seg].hdd_descptr);
664
665				if (seg == lasttx)
666					break;
667			}
668		}
669
670		/* Sync the descriptors we're using. */
671		SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
672				BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
673
674		/* Store a pointer to the packet so we can free it later */
675		sc->sc_txmbuf[sc->sc_nexttx] = m0;
676
677		/* Advance the tx pointer. */
678		sc->sc_nfreetx -= dmamap->dm_nsegs;
679		sc->sc_nexttx = nexttx;
680
681	}
682
683	/* All transmit descriptors used up, let upper layers know */
684	if (sc->sc_nfreetx == 0)
685		ifp->if_flags |= IFF_OACTIVE;
686
687	if (sc->sc_nfreetx != ofree) {
688		SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n",
689			    sc->sc_dev.dv_xname, lasttx - firsttx + 1,
690			    firsttx, lasttx));
691
692		/*
693		 * Cause a transmit interrupt to happen on the
694		 * last packet we enqueued, mark it as the last
695		 * descriptor.
696		 *
697		 * HDD_CTL_EOPACKET && HDD_CTL_INTR cause an
698		 * interrupt.
699		 */
700		KASSERT(lasttx != -1);
701		if (sc->hpc_regs->revision == 3) {
702			sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_INTR |
703							HDD_CTL_EOCHAIN;
704		} else {
705			sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR;
706			sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |=
707							HPC1_HDD_CTL_EOCHAIN;
708		}
709
710		SQ_CDTXSYNC(sc, lasttx, 1,
711				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
712
713		/*
714		 * There is a potential race condition here if the HPC
715		 * DMA channel is active and we try and either update
716		 * the 'next descriptor' pointer in the HPC PIO space
717		 * or the 'next descriptor' pointer in a previous desc-
718		 * riptor.
719		 *
720		 * To avoid this, if the channel is active, we rely on
721		 * the transmit interrupt routine noticing that there
722		 * are more packets to send and restarting the HPC DMA
723		 * engine, rather than mucking with the DMA state here.
724		 */
725		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
726				sc->hpc_regs->enetx_ctl);
727
728		if ((status & sc->hpc_regs->enetx_ctl_active) != 0) {
729			SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status,
730			    sc->sc_nfreetx);
731
732			/* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */
733			sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &=
734			    ~HDD_CTL_EOCHAIN;
735
736			SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx),  1,
737			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
738		} else {
739			SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx);
740
741			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
742			    sc->hpc_regs->enetx_ndbp, SQ_CDTXADDR(sc, firsttx));
743
744			if (sc->hpc_regs->revision != 3) {
745				bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
746				  HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, firsttx));
747				bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
748				  HPC1_ENETX_CBP, SQ_CDTXADDR(sc, firsttx));
749			}
750
751			/* Kick DMA channel into life */
752			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
753					  sc->hpc_regs->enetx_ctl,
754					  sc->hpc_regs->enetx_ctl_active);
755		}
756
757		/* Set a watchdog timer in case the chip flakes out. */
758		ifp->if_timer = 5;
759	}
760}
761
762void
763sq_stop(struct ifnet *ifp, int disable)
764{
765	int i;
766	struct sq_softc *sc = ifp->if_softc;
767
768	for (i =0; i < SQ_NTXDESC; i++) {
769		if (sc->sc_txmbuf[i] != NULL) {
770			bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
771			m_freem(sc->sc_txmbuf[i]);
772			sc->sc_txmbuf[i] = NULL;
773		}
774	}
775
776	/* Clear Seeq transmit/receive command registers */
777	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0);
778	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0);
779
780	sq_reset(sc);
781
782	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
783	ifp->if_timer = 0;
784}
785
786/* Device timeout/watchdog routine. */
787void
788sq_watchdog(struct ifnet *ifp)
789{
790	u_int32_t status;
791	struct sq_softc *sc = ifp->if_softc;
792
793	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
794				  sc->hpc_regs->enetx_ctl);
795	log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
796		     "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
797				       sc->sc_nexttx, sc->sc_nfreetx, status);
798
799	sq_trace_dump(sc);
800
801	memset(&sq_trace, 0, sizeof(sq_trace));
802	sq_trace_idx = 0;
803
804	++ifp->if_oerrors;
805
806	sq_init(ifp);
807}
808
809void sq_trace_dump(struct sq_softc* sc)
810{
811	int i;
812
813	for(i = 0; i < sq_trace_idx; i++) {
814		printf("%s: [%d] action %d, buf %d, free %d, status %08x\n",
815			sc->sc_dev.dv_xname, i, sq_trace[i].action,
816			sq_trace[i].bufno, sq_trace[i].freebuf,
817			sq_trace[i].status);
818	}
819}
820
821static int
822sq_intr(void * arg)
823{
824	struct sq_softc *sc = arg;
825	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
826	int handled = 0;
827	u_int32_t stat;
828
829	stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
830				sc->hpc_regs->enetr_reset);
831
832	if ((stat & 2) == 0) {
833		printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname);
834		return 0;
835	}
836
837	bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
838			  sc->hpc_regs->enetr_reset, (stat | 2));
839
840	/*
841	 * If the interface isn't running, the interrupt couldn't
842	 * possibly have come from us.
843	 */
844	if ((ifp->if_flags & IFF_RUNNING) == 0)
845		return 0;
846
847	sc->sq_intrcnt.ev_count++;
848
849	/* Always check for received packets */
850	if (sq_rxintr(sc) != 0)
851		handled++;
852
853	/* Only handle transmit interrupts if we actually sent something */
854	if (sc->sc_nfreetx < SQ_NTXDESC) {
855		sq_txintr(sc);
856		handled++;
857	}
858
859#if NRND > 0
860	if (handled)
861		rnd_add_uint32(&sc->rnd_source, stat);
862#endif
863	return (handled);
864}
865
866static int
867sq_rxintr(struct sq_softc *sc)
868{
869	int count = 0;
870	struct mbuf* m;
871	int i, framelen;
872	u_int8_t pktstat;
873	u_int32_t status;
874	u_int32_t ctl_reg;
875	int new_end, orig_end;
876	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
877
878	for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
879		SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
880
881		/* If this is a CPU-owned buffer, we're at the end of the list */
882		if (sc->hpc_regs->revision == 3)
883			ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl & HDD_CTL_OWN;
884		else
885			ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl &
886							HPC1_HDD_CTL_OWN;
887
888		if (ctl_reg) {
889#if defined(SQ_DEBUG)
890			u_int32_t reg;
891
892			reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
893			    sc->hpc_regs->enetr_ctl);
894			SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n",
895			    sc->sc_dev.dv_xname, i, reg));
896#endif
897			break;
898		}
899
900		count++;
901
902		m = sc->sc_rxmbuf[i];
903		framelen = m->m_ext.ext_size - 3;
904		if (sc->hpc_regs->revision == 3)
905		    framelen -=
906			HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl);
907		else
908		    framelen -=
909			HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl);
910
911		/* Now sync the actual packet data */
912		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
913		    sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
914
915		pktstat = *((u_int8_t*)m->m_data + framelen + 2);
916
917		if ((pktstat & RXSTAT_GOOD) == 0) {
918			ifp->if_ierrors++;
919
920			if (pktstat & RXSTAT_OFLOW)
921				printf("%s: receive FIFO overflow\n",
922				    sc->sc_dev.dv_xname);
923
924			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
925			    sc->sc_rxmap[i]->dm_mapsize,
926			    BUS_DMASYNC_PREREAD);
927			SQ_INIT_RXDESC(sc, i);
928			continue;
929		}
930
931		if (sq_add_rxbuf(sc, i) != 0) {
932			ifp->if_ierrors++;
933			bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
934			    sc->sc_rxmap[i]->dm_mapsize,
935			    BUS_DMASYNC_PREREAD);
936			SQ_INIT_RXDESC(sc, i);
937			continue;
938		}
939
940
941		m->m_data += 2;
942		m->m_pkthdr.rcvif = ifp;
943		m->m_pkthdr.len = m->m_len = framelen;
944
945		ifp->if_ipackets++;
946
947		SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n",
948			    sc->sc_dev.dv_xname, i, framelen));
949
950#if NBPFILTER > 0
951		if (ifp->if_bpf)
952			bpf_mtap(ifp->if_bpf, m);
953#endif
954		(*ifp->if_input)(ifp, m);
955	}
956
957
958	/* If anything happened, move ring start/end pointers to new spot */
959	if (i != sc->sc_nextrx) {
960		/* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */
961
962		new_end = SQ_PREVRX(i);
963		sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HDD_CTL_EOCHAIN;
964		SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
965		    BUS_DMASYNC_PREWRITE);
966
967		orig_end = SQ_PREVRX(sc->sc_nextrx);
968		sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HDD_CTL_EOCHAIN;
969		SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
970		    BUS_DMASYNC_PREWRITE);
971
972		sc->sc_nextrx = i;
973	}
974
975	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
976				  sc->hpc_regs->enetr_ctl);
977
978	/* If receive channel is stopped, restart it... */
979	if ((status & sc->hpc_regs->enetr_ctl_active) == 0) {
980		/* Pass the start of the receive ring to the HPC */
981		bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
982		    sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, sc->sc_nextrx));
983
984		/* And turn on the HPC ethernet receive channel */
985		bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
986		    sc->hpc_regs->enetr_ctl, sc->hpc_regs->enetr_ctl_active);
987	}
988
989	return count;
990}
991
992static int
993sq_txintr(struct sq_softc *sc)
994{
995	int i;
996	int shift = 0;
997	u_int32_t status;
998	u_int32_t hpc1_ready = 0;
999	u_int32_t hpc3_not_ready = 1;
1000	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1001
1002	if (sc->hpc_regs->revision != 3)
1003		shift = 16;
1004
1005	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
1006				  sc->hpc_regs->enetx_ctl) >> shift;
1007
1008	SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx);
1009
1010	if ((status & ( (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD)) == 0) {
1011/* XXX */ printf("txstat: %x\n", status);
1012		if (status & TXSTAT_COLL)
1013			ifp->if_collisions++;
1014
1015		if (status & TXSTAT_UFLOW) {
1016			printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
1017			ifp->if_oerrors++;
1018		}
1019
1020		if (status & TXSTAT_16COLL) {
1021			printf("%s: max collisions reached\n", sc->sc_dev.dv_xname);
1022			ifp->if_oerrors++;
1023			ifp->if_collisions += 16;
1024		}
1025	}
1026
1027	i = sc->sc_prevtx;
1028	while (sc->sc_nfreetx < SQ_NTXDESC) {
1029		/*
1030		 * Check status first so we don't end up with a case of
1031		 * the buffer not being finished while the DMA channel
1032		 * has gone idle.
1033		 */
1034		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
1035					sc->hpc_regs->enetx_ctl) >> shift;
1036
1037		SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
1038				BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1039
1040		/*
1041		 * If not yet transmitted, try and start DMA engine again.
1042		 * HPC3 tags transmitted descriptors with XMITDONE whereas
1043		 * HPC1 will not halt before sending through EOCHAIN.
1044		 */
1045		if (sc->hpc_regs->revision == 3) {
1046			hpc3_not_ready =
1047			    sc->sc_txdesc[i].hpc3_hdd_ctl & HDD_CTL_XMITDONE;
1048		} else {
1049			if (hpc1_ready)
1050				hpc1_ready++;
1051			else {
1052				if (sc->sc_txdesc[i].hpc1_hdd_ctl &
1053							HPC1_HDD_CTL_EOPACKET)
1054					hpc1_ready = 1;
1055			}
1056		}
1057
1058		if (hpc3_not_ready == 0 || hpc1_ready == 2) {
1059			if ((status & (sc->hpc_regs->enetx_ctl_active >> shift)) == 0) { // XXX
1060				SQ_TRACE(SQ_RESTART_DMA, i, status,
1061				    sc->sc_nfreetx);
1062
1063				bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1064				  sc->hpc_regs->enetx_ndbp, SQ_CDTXADDR(sc, i));
1065
1066				if (sc->hpc_regs->revision != 3) {
1067				  bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1068                                          HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, i));
1069                                  bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1070                                          HPC1_ENETX_CBP, SQ_CDTXADDR(sc, i));
1071				}
1072
1073				/* Kick DMA channel into life */
1074				bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
1075					  sc->hpc_regs->enetx_ctl,
1076					  sc->hpc_regs->enetx_ctl_active);
1077
1078				/*
1079				 * Set a watchdog timer in case the chip
1080				 * flakes out.
1081				 */
1082				ifp->if_timer = 5;
1083			} else {
1084				SQ_TRACE(SQ_TXINTR_BUSY, i, status,
1085				    sc->sc_nfreetx);
1086			}
1087			break;
1088		}
1089
1090		/* Sync the packet data, unload DMA map, free mbuf */
1091		bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
1092				sc->sc_txmap[i]->dm_mapsize,
1093				BUS_DMASYNC_POSTWRITE);
1094		bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
1095		m_freem(sc->sc_txmbuf[i]);
1096		sc->sc_txmbuf[i] = NULL;
1097
1098		ifp->if_opackets++;
1099		sc->sc_nfreetx++;
1100
1101		SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx);
1102		i = SQ_NEXTTX(i);
1103	}
1104
1105	/* prevtx now points to next xmit packet not yet finished */
1106	sc->sc_prevtx = i;
1107
1108	/* If we have buffers free, let upper layers know */
1109	if (sc->sc_nfreetx > 0)
1110		ifp->if_flags &= ~IFF_OACTIVE;
1111
1112	/* If all packets have left the coop, cancel watchdog */
1113	if (sc->sc_nfreetx == SQ_NTXDESC)
1114		ifp->if_timer = 0;
1115
1116	SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx);
1117	sq_start(ifp);
1118
1119	return 1;
1120}
1121
1122
1123void
1124sq_reset(struct sq_softc *sc)
1125{
1126	/* Stop HPC dma channels */
1127	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ctl, 0);
1128	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetx_ctl, 0);
1129
1130	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_reset, 3);
1131	delay(20);
1132	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_reset, 0);
1133}
1134
1135/* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */
1136int
1137sq_add_rxbuf(struct sq_softc *sc, int idx)
1138{
1139	int err;
1140	struct mbuf *m;
1141
1142	MGETHDR(m, M_DONTWAIT, MT_DATA);
1143	if (m == NULL)
1144		return (ENOBUFS);
1145
1146	MCLGET(m, M_DONTWAIT);
1147	if ((m->m_flags & M_EXT) == 0) {
1148		m_freem(m);
1149		return (ENOBUFS);
1150	}
1151
1152	if (sc->sc_rxmbuf[idx] != NULL)
1153		bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1154
1155	sc->sc_rxmbuf[idx] = m;
1156
1157	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1158				   m->m_ext.ext_buf, m->m_ext.ext_size,
1159				   NULL, BUS_DMA_NOWAIT)) != 0) {
1160		printf("%s: can't load rx DMA map %d, error = %d\n",
1161		    sc->sc_dev.dv_xname, idx, err);
1162		panic("sq_add_rxbuf");	/* XXX */
1163	}
1164
1165	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1166			sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1167
1168	SQ_INIT_RXDESC(sc, idx);
1169
1170	return 0;
1171}
1172
1173void
1174sq_dump_buffer(u_int32_t addr, u_int32_t len)
1175{
1176	u_int i;
1177	u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1178
1179	if (len == 0)
1180		return;
1181
1182	printf("%p: ", physaddr);
1183
1184	for(i = 0; i < len; i++) {
1185		printf("%02x ", *(physaddr + i) & 0xff);
1186		if ((i % 16) ==  15 && i != len - 1)
1187		    printf("\n%p: ", physaddr + i);
1188	}
1189
1190	printf("\n");
1191}
1192
1193
1194void
1195enaddr_aton(const char* str, u_int8_t* eaddr)
1196{
1197	int i;
1198	char c;
1199
1200	for(i = 0; i < ETHER_ADDR_LEN; i++) {
1201		if (*str == ':')
1202			str++;
1203
1204		c = *str++;
1205		if (isdigit(c)) {
1206			eaddr[i] = (c - '0');
1207		} else if (isxdigit(c)) {
1208			eaddr[i] = (toupper(c) + 10 - 'A');
1209		}
1210
1211		c = *str++;
1212		if (isdigit(c)) {
1213			eaddr[i] = (eaddr[i] << 4) | (c - '0');
1214		} else if (isxdigit(c)) {
1215			eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1216		}
1217	}
1218}
1219