if_sq.c revision 1.2
1/*	$NetBSD: if_sq.c,v 1.2 2001/06/07 12:20:42 rafal Exp $	*/
2
3/*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include "opt_inet.h"
36#include "opt_ns.h"
37#include "bpfilter.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/device.h>
42#include <sys/callout.h>
43#include <sys/mbuf.h>
44#include <sys/malloc.h>
45#include <sys/kernel.h>
46#include <sys/socket.h>
47#include <sys/ioctl.h>
48#include <sys/errno.h>
49#include <sys/syslog.h>
50
51#include <uvm/uvm_extern.h>
52
53#include <machine/endian.h>
54
55#include <net/if.h>
56#include <net/if_dl.h>
57#include <net/if_media.h>
58#include <net/if_ether.h>
59
60#if NBPFILTER > 0
61#include <net/bpf.h>
62#endif
63
64#ifdef INET
65#include <netinet/in.h>
66#include <netinet/if_inarp.h>
67#endif
68
69#ifdef NS
70#include <netns/ns.h>
71#include <netns/ns_if.h>
72#endif
73
74/* XXXrkb: cheap hack until parents pass in DMA tags */
75#define _SGIMIPS_BUS_DMA_PRIVATE
76
77#include <machine/bus.h>
78#include <machine/arcs.h>
79#include <machine/intr.h>
80
81#include <dev/ic/seeq8003reg.h>
82
83#include <sgimips/hpc/sqvar.h>
84#include <sgimips/hpc/hpcvar.h>
85#include <sgimips/hpc/hpcreg.h>
86
87#define static
88
89/*
90 * Short TODO list:
91 *	(1) Do counters for bad-RX packets.
92 *	(2) Inherit DMA tag via config machinery, don't hard-code it.
93 *	(3) Allow multi-segment transmits, instead of copying to a single,
94 *	    contiguous mbuf.
95 *	(4) Verify sq_stop() turns off enough stuff; I was still getting
96 *	    seeq interrupts after sq_stop().
97 *	(5) Fix up printfs in driver (most should only fire ifdef SQ_DEBUG
98 *	    or something similar.
99 *	(6) Implement EDLC modes: especially packet auto-pad and simplex
100 *	    mode.
101 *	(7) Should the driver filter out its own transmissions in non-EDLC
102 *	    mode?
103 *	(8) Multicast support -- multicast filter, address management, ...
104 *	(9) Deal with RB0 (recv buffer overflow) on reception.  Will need
105 *	    to figure out if RB0 is read-only as stated in one spot in the
106 *	    HPC spec or read-write (ie, is the 'write a one to clear it')
107 *	    the correct thing?
108 */
109
110static int	sq_match(struct device *, struct cfdata *, void *);
111static void	sq_attach(struct device *, struct device *, void *);
112static int	sq_init(struct ifnet *);
113static void	sq_start(struct ifnet *);
114static void	sq_stop(struct ifnet *, int);
115static void	sq_watchdog(struct ifnet *);
116static int	sq_ioctl(struct ifnet *, u_long, caddr_t);
117
118static int	sq_intr(void *);
119static int	sq_rxintr(struct sq_softc *);
120static int	sq_txintr(struct sq_softc *);
121static void	sq_reset(struct sq_softc *);
122static int 	sq_add_rxbuf(struct sq_softc *, int);
123static void 	sq_dump_buffer(u_int32_t addr, u_int32_t len);
124
125static void	enaddr_aton(const char*, u_int8_t*);
126
127/* Actions */
128#define SQ_RESET		1
129#define SQ_ADD_TO_DMA		2
130#define SQ_START_DMA		3
131#define SQ_DONE_DMA		4
132#define SQ_RESTART_DMA		5
133#define SQ_TXINTR_ENTER		6
134#define SQ_TXINTR_EXIT		7
135#define SQ_TXINTR_BUSY		8
136
137struct sq_action_trace {
138	int action;
139	int bufno;
140	int status;
141	int freebuf;
142};
143
144#define SQ_TRACEBUF_SIZE	100
145int sq_trace_idx = 0;
146struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE];
147
148void sq_trace_dump(struct sq_softc* sc);
149
150#define SQ_TRACE(act, buf, stat, free) do {				\
151	sq_trace[sq_trace_idx].action = (act);				\
152	sq_trace[sq_trace_idx].bufno = (buf);				\
153	sq_trace[sq_trace_idx].status = (stat);				\
154	sq_trace[sq_trace_idx].freebuf = (free);			\
155	if (++sq_trace_idx == SQ_TRACEBUF_SIZE) {			\
156		bzero(&sq_trace, sizeof(sq_trace));			\
157		sq_trace_idx = 0;					\
158	}								\
159} while (0)
160
161struct cfattach sq_ca = {
162	sizeof(struct sq_softc), sq_match, sq_attach
163};
164
165static int
166sq_match(struct device *parent, struct cfdata *match, void *aux)
167{
168	/* XXX! */
169	return 1;
170}
171
172static void
173sq_attach(struct device *parent, struct device *self, void *aux)
174{
175	int i, err;
176	char* macaddr;
177	struct sq_softc *sc = (void *)self;
178	struct hpc_attach_args *haa = aux;
179	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
180
181	sc->sc_hpct = haa->ha_iot;
182	if ((err = bus_space_subregion(haa->ha_iot, haa->ha_ioh,
183				       HPC_ENET_REGS,
184				       HPC_ENET_REGS_SIZE,
185				       &sc->sc_hpch)) != 0) {
186		printf(": unable to map HPC DMA registers, error = %d\n", err);
187		goto fail_0;
188	}
189
190	sc->sc_regt = haa->ha_iot;
191	if ((err = bus_space_subregion(haa->ha_iot, haa->ha_ioh,
192				       HPC_ENET_DEVREGS,
193				       HPC_ENET_DEVREGS_SIZE,
194				       &sc->sc_regh)) != 0) {
195		printf(": unable to map Seeq registers, error = %d\n", err);
196		goto fail_0;
197	}
198
199	/* XXXrkb: should be inherited from parent bus, but works for now */
200	sc->sc_dmat = &sgimips_default_bus_dma_tag;
201
202	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
203				    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
204				    1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
205		printf(": unable to allocate control data, error = %d\n", err);
206		goto fail_0;
207	}
208
209	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
210				  sizeof(struct sq_control),
211				  (caddr_t *)&sc->sc_control,
212				  BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
213		printf(": unable to map control data, error = %d\n", err);
214		goto fail_1;
215	}
216
217	if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
218				     1, sizeof(struct sq_control), PAGE_SIZE,
219				     BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
220		printf(": unable to create DMA map for control data, error "
221			"= %d\n", err);
222		goto fail_2;
223	}
224
225	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
226				   sizeof(struct sq_control),
227				   NULL, BUS_DMA_NOWAIT)) != 0) {
228		printf(": unable to load DMA map for control data, error "
229			"= %d\n", err);
230		goto fail_3;
231	}
232
233	bzero(sc->sc_control, sizeof(struct sq_control));
234
235	/* Create transmit buffer DMA maps */
236	for (i = 0; i < SQ_NTXDESC; i++) {
237	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
238					 0, BUS_DMA_NOWAIT,
239					 &sc->sc_txmap[i])) != 0) {
240		    printf(": unable to create tx DMA map %d, error = %d\n",
241			   i, err);
242		    goto fail_4;
243	    }
244	}
245
246	/* Create transmit buffer DMA maps */
247	for (i = 0; i < SQ_NRXDESC; i++) {
248	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
249					 0, BUS_DMA_NOWAIT,
250					 &sc->sc_rxmap[i])) != 0) {
251		    printf(": unable to create rx DMA map %d, error = %d\n",
252			   i, err);
253		    goto fail_5;
254	    }
255	}
256
257	/* Pre-allocate the receive buffers.  */
258	for (i = 0; i < SQ_NRXDESC; i++) {
259		if ((err = sq_add_rxbuf(sc, i)) != 0) {
260			printf(": unable to allocate or map rx buffer %d\n,"
261			       " error = %d\n", i, err);
262			goto fail_6;
263		}
264	}
265
266	if ((macaddr = ARCS->GetEnvironmentVariable("eaddr")) == NULL) {
267		printf(": unable to get MAC address!\n");
268		goto fail_6;
269	}
270
271	if ((cpu_intr_establish(3, IPL_NET, sq_intr, sc)) == NULL) {
272		printf(": unable to establish interrupt!\n");
273		goto fail_6;
274	}
275
276
277	printf(": SGI Seeq-8003\n");
278
279	enaddr_aton(macaddr, sc->sc_enaddr);
280
281	printf("%s: station address %s\n", sc->sc_dev.dv_xname,
282					   ether_sprintf(sc->sc_enaddr));
283
284	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
285	ifp->if_softc = sc;
286	ifp->if_mtu = ETHERMTU;
287	ifp->if_init = sq_init;
288	ifp->if_stop = sq_stop;
289	ifp->if_start = sq_start;
290	ifp->if_ioctl = sq_ioctl;
291	ifp->if_watchdog = sq_watchdog;
292	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS;
293	IFQ_SET_READY(&ifp->if_snd);
294
295	if_attach(ifp);
296	ether_ifattach(ifp, sc->sc_enaddr);
297
298	bzero(&sq_trace, sizeof(sq_trace));
299	/* Done! */
300	return;
301
302	/*
303	 * Free any resources we've allocated during the failed attach
304	 * attempt.  Do this in reverse order and fall through.
305	 */
306fail_6:
307	for (i = 0; i < SQ_NRXDESC; i++) {
308		if (sc->sc_rxmbuf[i] != NULL) {
309			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
310			m_freem(sc->sc_rxmbuf[i]);
311		}
312	}
313fail_5:
314	for (i = 0; i < SQ_NRXDESC; i++) {
315	    if (sc->sc_rxmap[i] !=  NULL)
316		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
317	}
318fail_4:
319	for (i = 0; i < SQ_NTXDESC; i++) {
320	    if (sc->sc_txmap[i] !=  NULL)
321		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
322	}
323	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
324fail_3:
325	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
326fail_2:
327	bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
328				      sizeof(struct sq_control));
329fail_1:
330	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
331fail_0:
332	return;
333}
334
335/* Set up data to get the interface up and running. */
336int
337sq_init(struct ifnet *ifp)
338{
339	int i;
340	u_int32_t reg;
341	struct sq_softc *sc = ifp->if_softc;
342
343	/* Cancel any in-progress I/O */
344	sq_stop(ifp, 0);
345
346	sc->sc_nextrx = 0;
347
348	sc->sc_nfreetx = SQ_NTXDESC;
349	sc->sc_nexttx = sc->sc_prevtx = 0;
350
351	SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx);
352
353	/* Set into 8003 mode, bank 0 to program ethernet address */
354	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0);
355
356	/* Now write the address */
357	for (i = 0; i < ETHER_ADDR_LEN; i++)
358	    bus_space_write_1(sc->sc_regt, sc->sc_regh, i, sc->sc_enaddr[i]);
359
360	/* Set up Seeq transmit command register */
361	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD,
362						    TXCMD_IE_UFLOW |
363						    TXCMD_IE_COLL |
364						    TXCMD_IE_16COLL |
365						    TXCMD_IE_GOOD);
366
367	/* And the receive command register */
368	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD,
369						    RXCMD_REC_BROAD |
370						    RXCMD_IE_CRC |
371						    RXCMD_IE_DRIB |
372						    RXCMD_IE_SHORT |
373						    RXCMD_IE_END |
374						    RXCMD_IE_GOOD);
375
376	/* Set up HPC ethernet DMA config */
377	reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG);
378	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG,
379			    	reg | ENETR_DMACFG_FIX_RXDC |
380				ENETR_DMACFG_FIX_INTR |
381				ENETR_DMACFG_FIX_EOP);
382
383	/* Pass the start of the receive ring to the HPC */
384        bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_NDBP,
385						    SQ_CDRXADDR(sc, 0));
386
387	/* And turn on the HPC ethernet receive channel */
388	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL,
389						    ENETR_CTL_ACTIVE);
390
391        ifp->if_flags |= IFF_RUNNING;
392	ifp->if_flags &= ~IFF_OACTIVE;
393
394	return 0;
395}
396
397int
398sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
399{
400	int s, error = 0;
401
402	s = splnet();
403
404	error = ether_ioctl(ifp, cmd, data);
405	if (error == ENETRESET) {
406		/*
407		 * Multicast list has changed; set the hardware filter
408		 * accordingly.
409		 */
410		error = 0;
411	}
412
413	splx(s);
414	return (error);
415}
416
417void
418sq_start(struct ifnet *ifp)
419{
420	struct sq_softc *sc = ifp->if_softc;
421	u_int32_t status;
422	struct mbuf *m0, *m;
423	bus_dmamap_t dmamap;
424	int err, totlen, nexttx, firsttx, lasttx, ofree, seg;
425
426	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
427		return;
428
429	/*
430	 * Remember the previous number of free descriptors and
431	 * the first descriptor we'll use.
432	 */
433	ofree = sc->sc_nfreetx;
434	firsttx = sc->sc_nexttx;
435
436	/*
437	 * Loop through the send queue, setting up transmit descriptors
438	 * until we drain the queue, or use up all available transmit
439	 * descriptors.
440	 */
441	while (sc->sc_nfreetx != 0) {
442		/*
443		 * Grab a packet off the queue.
444		 */
445		IFQ_POLL(&ifp->if_snd, m0);
446		if (m0 == NULL)
447			break;
448		m = NULL;
449
450		dmamap = sc->sc_txmap[sc->sc_nexttx];
451
452		/*
453		 * Load the DMA map.  If this fails, the packet either
454		 * didn't fit in the alloted number of segments, or we were
455		 * short on resources.  In this case, we'll copy and try
456		 * again.
457		 */
458		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
459						      BUS_DMA_NOWAIT) != 0) {
460			MGETHDR(m, M_DONTWAIT, MT_DATA);
461			if (m == NULL) {
462				printf("%s: unable to allocate Tx mbuf\n",
463				    sc->sc_dev.dv_xname);
464				break;
465			}
466			if (m0->m_pkthdr.len > MHLEN) {
467				MCLGET(m, M_DONTWAIT);
468				if ((m->m_flags & M_EXT) == 0) {
469					printf("%s: unable to allocate Tx "
470					    "cluster\n", sc->sc_dev.dv_xname);
471					m_freem(m);
472					break;
473				}
474			}
475
476			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
477			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
478
479			if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
480						m, BUS_DMA_NOWAIT)) != 0) {
481				printf("%s: unable to load Tx buffer, "
482				    "error = %d\n", sc->sc_dev.dv_xname, err);
483				break;
484			}
485		}
486
487		/*
488		 * Ensure we have enough descriptors free to describe
489		 * the packet.
490		 */
491		if (dmamap->dm_nsegs > sc->sc_nfreetx) {
492			/*
493			 * Not enough free descriptors to transmit this
494			 * packet.  We haven't committed to anything yet,
495			 * so just unload the DMA map, put the packet
496			 * back on the queue, and punt.  Notify the upper
497			 * layer that there are no more slots left.
498			 *
499			 * XXX We could allocate an mbuf and copy, but
500			 * XXX it is worth it?
501			 */
502			ifp->if_flags |= IFF_OACTIVE;
503			bus_dmamap_unload(sc->sc_dmat, dmamap);
504			if (m != NULL)
505				m_freem(m);
506			break;
507		}
508
509		IFQ_DEQUEUE(&ifp->if_snd, m0);
510		if (m != NULL) {
511			m_freem(m0);
512			m0 = m;
513		}
514
515		/*
516		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
517		 */
518
519		/* Sync the DMA map. */
520		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
521		    BUS_DMASYNC_PREWRITE);
522
523		/*
524		 * Initialize the transmit descriptors.
525		 */
526		for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
527		     seg < dmamap->dm_nsegs;
528		     seg++, nexttx = SQ_NEXTTX(nexttx)) {
529			sc->sc_txdesc[nexttx].hdd_bufptr =
530					    dmamap->dm_segs[seg].ds_addr;
531			sc->sc_txdesc[nexttx].hdd_ctl =
532					    dmamap->dm_segs[seg].ds_len;
533			sc->sc_txdesc[nexttx].hdd_descptr=
534					    SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
535			lasttx = nexttx;
536			totlen += dmamap->dm_segs[seg].ds_len;
537		}
538
539		/* Last descriptor gets end-of-packet */
540		sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET;
541
542		/* XXXrkb: if not EDLC, pad to min len manually */
543		if (totlen < ETHER_MIN_LEN) {
544		    sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen);
545		    totlen = ETHER_MIN_LEN;
546		}
547
548#if 0
549		printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
550						       sc->sc_nexttx, lasttx,
551						       totlen);
552#endif
553
554		if (ifp->if_flags & IFF_DEBUG) {
555			printf("     transmit chain:\n");
556			for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
557				printf("     descriptor %d:\n", seg);
558				printf("       hdd_bufptr:      0x%08x\n",
559					sc->sc_txdesc[seg].hdd_bufptr);
560				printf("       hdd_ctl: 0x%08x\n",
561					sc->sc_txdesc[seg].hdd_ctl);
562				printf("       hdd_descptr:      0x%08x\n",
563					sc->sc_txdesc[seg].hdd_descptr);
564
565				if (seg == lasttx)
566					break;
567			}
568		}
569
570		/* Sync the descriptors we're using. */
571		SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
572				BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
573
574		/* Store a pointer to the packet so we can free it later */
575		sc->sc_txmbuf[sc->sc_nexttx] = m0;
576
577		/* Advance the tx pointer. */
578		sc->sc_nfreetx -= dmamap->dm_nsegs;
579		sc->sc_nexttx = nexttx;
580
581#if NBPFILTER > 0
582		/*
583		 * Pass the packet to any BPF listeners.
584		 */
585		if (ifp->if_bpf)
586			bpf_mtap(ifp->if_bpf, m0);
587#endif /* NBPFILTER > 0 */
588	}
589
590	/* All transmit descriptors used up, let upper layers know */
591	if (sc->sc_nfreetx == 0)
592		ifp->if_flags |= IFF_OACTIVE;
593
594	if (sc->sc_nfreetx != ofree) {
595#if 0
596		printf("%s: %d packets enqueued, first %d, INTR on %d\n",
597			    sc->sc_dev.dv_xname, lasttx - firsttx + 1,
598			    firsttx, lasttx);
599#endif
600
601		/*
602		 * Cause a transmit interrupt to happen on the
603		 * last packet we enqueued, mark it as the last
604		 * descriptor.
605		 */
606		sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR |
607						  HDD_CTL_EOCHAIN);
608		SQ_CDTXSYNC(sc, lasttx, 1,
609				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
610
611		/*
612		 * There is a potential race condition here if the HPC
613		 * DMA channel is active and we try and either update
614		 * the 'next descriptor' pointer in the HPC PIO space
615		 * or the 'next descriptor' pointer in a previous desc-
616		 * riptor.
617		 *
618		 * To avoid this, if the channel is active, we rely on
619		 * the transmit interrupt routine noticing that there
620		 * are more packets to send and restarting the HPC DMA
621		 * engine, rather than mucking with the DMA state here.
622		 */
623		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
624						       HPC_ENETX_CTL);
625
626		if ((status & ENETX_CTL_ACTIVE) != 0) {
627		    SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status, sc->sc_nfreetx);
628
629		    sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &=
630						      	~HDD_CTL_EOCHAIN;
631		    SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx),  1,
632				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
633		} else {
634		    SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx);
635
636		    bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
637				  HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx));
638
639		    /* Kick DMA channel into life */
640		    bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
641				      HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
642		}
643
644		    /* Set a watchdog timer in case the chip flakes out. */
645		    ifp->if_timer = 5;
646		}
647}
648
649void
650sq_stop(struct ifnet *ifp, int disable)
651{
652	int i;
653	struct sq_softc *sc = ifp->if_softc;
654
655	for (i =0; i < SQ_NTXDESC; i++) {
656		if (sc->sc_txmbuf[i] != NULL) {
657			bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
658			m_freem(sc->sc_txmbuf[i]);
659			sc->sc_txmbuf[i] = NULL;
660		}
661	}
662
663	/* Clear Seeq transmit/receive command registers */
664	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0);
665	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0);
666
667	sq_reset(sc);
668
669        ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
670	ifp->if_timer = 0;
671}
672
673/* Device timeout/watchdog routine. */
674void
675sq_watchdog(struct ifnet *ifp)
676{
677	u_int32_t status;
678	struct sq_softc *sc = ifp->if_softc;
679
680	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL);
681	log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
682		     "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
683				       sc->sc_nexttx, sc->sc_nfreetx, status);
684
685	sq_trace_dump(sc);
686
687	bzero(&sq_trace, sizeof(sq_trace));
688	sq_trace_idx = 0;
689
690	++ifp->if_oerrors;
691
692	sq_init(ifp);
693}
694
695void sq_trace_dump(struct sq_softc* sc)
696{
697	int i;
698
699	for(i = 0; i < sq_trace_idx; i++) {
700		printf("%s: [%d] action %d, buf %d, free %d, status %08x\n",
701			sc->sc_dev.dv_xname, i, sq_trace[i].action,
702			sq_trace[i].bufno, sq_trace[i].freebuf,
703			sq_trace[i].status);
704	}
705}
706
707static int
708sq_intr(void * arg)
709{
710	struct sq_softc *sc = arg;
711	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
712	int handled = 0;
713	u_int32_t stat;
714
715        stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET);
716
717	if ((stat & 2) == 0) {
718		printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname);
719		return 0;
720	}
721
722	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 2);
723
724	/*
725	 * If the interface isn't running, the interrupt couldn't
726	 * possibly have come from us.
727	 */
728	if ((ifp->if_flags & IFF_RUNNING) == 0)
729		return 0;
730
731	/* Always check for received packets */
732	if (sq_rxintr(sc) != 0)
733		handled++;
734
735	/* Only handle transmit interrupts if we actually sent something */
736	if (sc->sc_nfreetx < SQ_NTXDESC) {
737		sq_txintr(sc);
738		handled++;
739	}
740
741#if NRND > 0
742	if (handled)
743		rnd_add_uint32(&sc->sc_rnd_source, status);
744#endif
745	return (handled);
746}
747
748static int
749sq_rxintr(struct sq_softc *sc)
750{
751	int count = 0;
752	struct mbuf* m;
753	int i, framelen;
754	u_int8_t pktstat;
755	u_int32_t status;
756	int new_end, orig_end;
757	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
758
759	for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
760	    SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
761
762	    /* If this is a CPU-owned buffer, we're at the end of the list */
763	    if (sc->sc_rxdesc[i].hdd_ctl & HDD_CTL_OWN) {
764#if 0
765		u_int32_t reg;
766
767		reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL);
768		printf("%s: rxintr: done at %d (ctl %08x)\n",
769				sc->sc_dev.dv_xname, i, reg);
770#endif
771		break;
772	    }
773
774	    count++;
775
776	    m = sc->sc_rxmbuf[i];
777	    framelen = m->m_ext.ext_size -
778			HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hdd_ctl) - 3;
779
780	    /* Now sync the actual packet data */
781	    bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
782			    sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
783
784	    pktstat = *((u_int8_t*)m->m_data + framelen + 2);
785
786	    if ((pktstat & RXSTAT_GOOD) == 0) {
787		ifp->if_ierrors++;
788
789		if (pktstat & RXSTAT_OFLOW)
790		    printf("%s: receive FIFO overflow\n", sc->sc_dev.dv_xname);
791
792		SQ_INIT_RXDESC(sc, i);
793		continue;
794	    }
795
796	    if (sq_add_rxbuf(sc, i) != 0) {
797		ifp->if_ierrors++;
798		SQ_INIT_RXDESC(sc, i);
799		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
800				sc->sc_rxmap[i]->dm_mapsize,
801				BUS_DMASYNC_PREREAD);
802		continue;
803	    }
804
805
806	    m->m_data += 2;
807	    m->m_pkthdr.rcvif = ifp;
808	    m->m_pkthdr.len = m->m_len = framelen;
809
810	    ifp->if_ipackets++;
811
812#if 0
813	    printf("%s: sq_rxintr: buf %d len %d\n", sc->sc_dev.dv_xname,
814						     i, framelen);
815#endif
816
817#if NBPFILTER > 0
818	    if (ifp->if_bpf)
819		    bpf_mtap(ifp->if_bpf, m);
820#endif
821	    (*ifp->if_input)(ifp, m);
822	}
823
824
825	/* If anything happened, move ring start/end pointers to new spot */
826	if (i != sc->sc_nextrx) {
827	    new_end = SQ_PREVRX(i);
828	    sc->sc_rxdesc[new_end].hdd_ctl |= HDD_CTL_EOCHAIN;
829	    SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
830				     BUS_DMASYNC_PREWRITE);
831
832	    orig_end = SQ_PREVRX(sc->sc_nextrx);
833	    sc->sc_rxdesc[orig_end].hdd_ctl &= ~HDD_CTL_EOCHAIN;
834	    SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
835				      BUS_DMASYNC_PREWRITE);
836
837	    sc->sc_nextrx = i;
838	}
839
840	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
841					       HPC_ENETR_CTL);
842
843	/* If receive channel is stopped, restart it... */
844	if ((status & ENETR_CTL_ACTIVE) == 0) {
845	    /* Pass the start of the receive ring to the HPC */
846	    bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
847			      HPC_ENETR_NDBP, SQ_CDRXADDR(sc, sc->sc_nextrx));
848
849	    /* And turn on the HPC ethernet receive channel */
850	    bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL,
851							ENETR_CTL_ACTIVE);
852	}
853
854	return count;
855}
856
857static int
858sq_txintr(struct sq_softc *sc)
859{
860	int i;
861	u_int32_t status;
862	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
863
864	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL);
865
866	SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx);
867
868	if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) {
869		if (status & TXSTAT_COLL)
870		    ifp->if_collisions++;
871
872		if (status & TXSTAT_UFLOW) {
873		    printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
874		    ifp->if_oerrors++;
875		}
876
877		if (status & TXSTAT_16COLL) {
878		    printf("%s: max collisions reached\n", sc->sc_dev.dv_xname);
879		    ifp->if_oerrors++;
880		    ifp->if_collisions += 16;
881		}
882	}
883
884	i = sc->sc_prevtx;
885	while (sc->sc_nfreetx < SQ_NTXDESC) {
886		/*
887		 * Check status first so we don't end up with a case of
888		 * the buffer not being finished while the DMA channel
889		 * has gone idle.
890		 */
891		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
892							HPC_ENETX_CTL);
893
894		SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
895				BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
896
897		/* If not yet transmitted, try and start DMA engine again */
898		if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) {
899		    if ((status & ENETX_CTL_ACTIVE) == 0) {
900			SQ_TRACE(SQ_RESTART_DMA, i, status, sc->sc_nfreetx);
901
902			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
903					  HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i));
904
905			/* Kick DMA channel into life */
906			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
907					  HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
908
909			/* Set a watchdog timer in case the chip flakes out. */
910			ifp->if_timer = 5;
911		    } else {
912			SQ_TRACE(SQ_TXINTR_BUSY, i, status, sc->sc_nfreetx);
913		    }
914		    break;
915		}
916
917		/* Sync the packet data, unload DMA map, free mbuf */
918		bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
919				sc->sc_txmap[i]->dm_mapsize,
920				BUS_DMASYNC_POSTWRITE);
921		bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
922		m_freem(sc->sc_txmbuf[i]);
923		sc->sc_txmbuf[i] = NULL;
924
925		ifp->if_opackets++;
926		sc->sc_nfreetx++;
927
928		SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx);
929		i = SQ_NEXTTX(i);
930	}
931
932	/* prevtx now points to next xmit packet not yet finished */
933	sc->sc_prevtx = i;
934
935	/* If we have buffers free, let upper layers know */
936	if (sc->sc_nfreetx > 0)
937	    ifp->if_flags &= ~IFF_OACTIVE;
938
939	/* If all packets have left the coop, cancel watchdog */
940	if (sc->sc_nfreetx == SQ_NTXDESC)
941	    ifp->if_timer = 0;
942
943	SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx);
944    	sq_start(ifp);
945
946	return 1;
947}
948
949
950void
951sq_reset(struct sq_softc *sc)
952{
953	/* Stop HPC dma channels */
954	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 0);
955	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, 0);
956
957        bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 3);
958        delay(20);
959        bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 0);
960}
961
962/* sq_add_rxbuf: Add a receive buffer to the indicated descriptor.  */
963int
964sq_add_rxbuf(struct sq_softc *sc, int idx)
965{
966	int err;
967	struct mbuf *m;
968
969	MGETHDR(m, M_DONTWAIT, MT_DATA);
970	if (m == NULL)
971		return (ENOBUFS);
972
973	MCLGET(m, M_DONTWAIT);
974	if ((m->m_flags & M_EXT) == 0) {
975		m_freem(m);
976		return (ENOBUFS);
977	}
978
979	if (sc->sc_rxmbuf[idx] != NULL)
980		bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
981
982	sc->sc_rxmbuf[idx] = m;
983
984	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
985				   m->m_ext.ext_buf, m->m_ext.ext_size,
986				   NULL, BUS_DMA_NOWAIT)) != 0) {
987		printf("%s: can't load rx DMA map %d, error = %d\n",
988		    sc->sc_dev.dv_xname, idx, err);
989		panic("sq_add_rxbuf");	/* XXX */
990	}
991
992	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
993			sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
994
995	SQ_INIT_RXDESC(sc, idx);
996
997	return 0;
998}
999
1000void
1001sq_dump_buffer(u_int32_t addr, u_int32_t len)
1002{
1003	int i;
1004	u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1005
1006	if (len == 0)
1007		return;
1008
1009	printf("%p: ", physaddr);
1010
1011	for(i = 0; i < len; i++) {
1012		printf("%02x ", *(physaddr + i) & 0xff);
1013		if ((i % 16) ==  15 && i != len - 1)
1014		    printf("\n%p: ", physaddr + i);
1015	}
1016
1017	printf("\n");
1018}
1019
1020
1021void
1022enaddr_aton(const char* str, u_int8_t* eaddr)
1023{
1024	int i;
1025	char c;
1026
1027	for(i = 0; i < ETHER_ADDR_LEN; i++) {
1028		if (*str == ':')
1029			str++;
1030
1031		c = *str++;
1032		if (isdigit(c)) {
1033			eaddr[i] = (c - '0');
1034		} else if (isxdigit(c)) {
1035			eaddr[i] = (toupper(c) + 10 - 'A');
1036		}
1037
1038		c = *str++;
1039		if (isdigit(c)) {
1040			eaddr[i] = (eaddr[i] << 4) | (c - '0');
1041		} else if (isxdigit(c)) {
1042			eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1043		}
1044	}
1045}
1046
1047