if_sq.c revision 1.6
1/*	$NetBSD: if_sq.c,v 1.6 2001/07/08 20:57:34 thorpej Exp $	*/
2
3/*
4 * Copyright (c) 2001 Rafal K. Boni
5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * Portions of this code are derived from software contributed to The
9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace
10 * Simulation Facility, NASA Ames Research Center.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote products
21 *    derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include "bpfilter.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/device.h>
40#include <sys/callout.h>
41#include <sys/mbuf.h>
42#include <sys/malloc.h>
43#include <sys/kernel.h>
44#include <sys/socket.h>
45#include <sys/ioctl.h>
46#include <sys/errno.h>
47#include <sys/syslog.h>
48
49#include <uvm/uvm_extern.h>
50
51#include <machine/endian.h>
52
53#include <net/if.h>
54#include <net/if_dl.h>
55#include <net/if_media.h>
56#include <net/if_ether.h>
57
58#if NBPFILTER > 0
59#include <net/bpf.h>
60#endif
61
62/* XXXrkb: cheap hack until parents pass in DMA tags */
63#define _SGIMIPS_BUS_DMA_PRIVATE
64
65#include <machine/bus.h>
66#include <machine/intr.h>
67
68#include <dev/ic/seeq8003reg.h>
69
70#include <sgimips/hpc/sqvar.h>
71#include <sgimips/hpc/hpcvar.h>
72#include <sgimips/hpc/hpcreg.h>
73
74#include <dev/arcbios/arcbios.h>
75#include <dev/arcbios/arcbiosvar.h>
76
77#define static
78
79/*
80 * Short TODO list:
81 *	(1) Do counters for bad-RX packets.
82 *	(2) Inherit DMA tag via config machinery, don't hard-code it.
83 *	(3) Allow multi-segment transmits, instead of copying to a single,
84 *	    contiguous mbuf.
85 *	(4) Verify sq_stop() turns off enough stuff; I was still getting
86 *	    seeq interrupts after sq_stop().
87 *	(5) Fix up printfs in driver (most should only fire ifdef SQ_DEBUG
88 *	    or something similar.
89 *	(6) Implement EDLC modes: especially packet auto-pad and simplex
90 *	    mode.
91 *	(7) Should the driver filter out its own transmissions in non-EDLC
92 *	    mode?
93 *	(8) Multicast support -- multicast filter, address management, ...
94 *	(9) Deal with RB0 (recv buffer overflow) on reception.  Will need
95 *	    to figure out if RB0 is read-only as stated in one spot in the
96 *	    HPC spec or read-write (ie, is the 'write a one to clear it')
97 *	    the correct thing?
98 */
99
100static int	sq_match(struct device *, struct cfdata *, void *);
101static void	sq_attach(struct device *, struct device *, void *);
102static int	sq_init(struct ifnet *);
103static void	sq_start(struct ifnet *);
104static void	sq_stop(struct ifnet *, int);
105static void	sq_watchdog(struct ifnet *);
106static int	sq_ioctl(struct ifnet *, u_long, caddr_t);
107
108static void	sq_set_filter(struct sq_softc *);
109static int	sq_intr(void *);
110static int	sq_rxintr(struct sq_softc *);
111static int	sq_txintr(struct sq_softc *);
112static void	sq_reset(struct sq_softc *);
113static int 	sq_add_rxbuf(struct sq_softc *, int);
114static void 	sq_dump_buffer(u_int32_t addr, u_int32_t len);
115
116static void	enaddr_aton(const char*, u_int8_t*);
117
118/* Actions */
119#define SQ_RESET		1
120#define SQ_ADD_TO_DMA		2
121#define SQ_START_DMA		3
122#define SQ_DONE_DMA		4
123#define SQ_RESTART_DMA		5
124#define SQ_TXINTR_ENTER		6
125#define SQ_TXINTR_EXIT		7
126#define SQ_TXINTR_BUSY		8
127
128struct sq_action_trace {
129	int action;
130	int bufno;
131	int status;
132	int freebuf;
133};
134
135#define SQ_TRACEBUF_SIZE	100
136int sq_trace_idx = 0;
137struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE];
138
139void sq_trace_dump(struct sq_softc* sc);
140
141#define SQ_TRACE(act, buf, stat, free) do {				\
142	sq_trace[sq_trace_idx].action = (act);				\
143	sq_trace[sq_trace_idx].bufno = (buf);				\
144	sq_trace[sq_trace_idx].status = (stat);				\
145	sq_trace[sq_trace_idx].freebuf = (free);			\
146	if (++sq_trace_idx == SQ_TRACEBUF_SIZE) {			\
147		bzero(&sq_trace, sizeof(sq_trace));			\
148		sq_trace_idx = 0;					\
149	}								\
150} while (0)
151
152struct cfattach sq_ca = {
153	sizeof(struct sq_softc), sq_match, sq_attach
154};
155
156static int
157sq_match(struct device *parent, struct cfdata *match, void *aux)
158{
159	/* XXX! */
160	return 1;
161}
162
163static void
164sq_attach(struct device *parent, struct device *self, void *aux)
165{
166	int i, err;
167	char* macaddr;
168	struct sq_softc *sc = (void *)self;
169	struct hpc_attach_args *haa = aux;
170	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
171
172	sc->sc_hpct = haa->ha_iot;
173	if ((err = bus_space_subregion(haa->ha_iot, haa->ha_ioh,
174				       HPC_ENET_REGS,
175				       HPC_ENET_REGS_SIZE,
176				       &sc->sc_hpch)) != 0) {
177		printf(": unable to map HPC DMA registers, error = %d\n", err);
178		goto fail_0;
179	}
180
181	sc->sc_regt = haa->ha_iot;
182	if ((err = bus_space_subregion(haa->ha_iot, haa->ha_ioh,
183				       HPC_ENET_DEVREGS,
184				       HPC_ENET_DEVREGS_SIZE,
185				       &sc->sc_regh)) != 0) {
186		printf(": unable to map Seeq registers, error = %d\n", err);
187		goto fail_0;
188	}
189
190	/* XXXrkb: should be inherited from parent bus, but works for now */
191	sc->sc_dmat = &sgimips_default_bus_dma_tag;
192
193	if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control),
194				    PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg,
195				    1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) {
196		printf(": unable to allocate control data, error = %d\n", err);
197		goto fail_0;
198	}
199
200	if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg,
201				  sizeof(struct sq_control),
202				  (caddr_t *)&sc->sc_control,
203				  BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
204		printf(": unable to map control data, error = %d\n", err);
205		goto fail_1;
206	}
207
208	if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control),
209				     1, sizeof(struct sq_control), PAGE_SIZE,
210				     BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) {
211		printf(": unable to create DMA map for control data, error "
212			"= %d\n", err);
213		goto fail_2;
214	}
215
216	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control,
217				   sizeof(struct sq_control),
218				   NULL, BUS_DMA_NOWAIT)) != 0) {
219		printf(": unable to load DMA map for control data, error "
220			"= %d\n", err);
221		goto fail_3;
222	}
223
224	bzero(sc->sc_control, sizeof(struct sq_control));
225
226	/* Create transmit buffer DMA maps */
227	for (i = 0; i < SQ_NTXDESC; i++) {
228	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
229					 0, BUS_DMA_NOWAIT,
230					 &sc->sc_txmap[i])) != 0) {
231		    printf(": unable to create tx DMA map %d, error = %d\n",
232			   i, err);
233		    goto fail_4;
234	    }
235	}
236
237	/* Create transmit buffer DMA maps */
238	for (i = 0; i < SQ_NRXDESC; i++) {
239	    if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES,
240					 0, BUS_DMA_NOWAIT,
241					 &sc->sc_rxmap[i])) != 0) {
242		    printf(": unable to create rx DMA map %d, error = %d\n",
243			   i, err);
244		    goto fail_5;
245	    }
246	}
247
248	/* Pre-allocate the receive buffers.  */
249	for (i = 0; i < SQ_NRXDESC; i++) {
250		if ((err = sq_add_rxbuf(sc, i)) != 0) {
251			printf(": unable to allocate or map rx buffer %d\n,"
252			       " error = %d\n", i, err);
253			goto fail_6;
254		}
255	}
256
257	if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) {
258		printf(": unable to get MAC address!\n");
259		goto fail_6;
260	}
261
262	if ((cpu_intr_establish(3, IPL_NET, sq_intr, sc)) == NULL) {
263		printf(": unable to establish interrupt!\n");
264		goto fail_6;
265	}
266
267	/* Reset the chip to a known state. */
268	sq_reset(sc);
269
270	/*
271	 * Determine if we're an 8003 or 80c03 by setting the first
272	 * MAC address register to non-zero, and then reading it back.
273	 * If it's zero, we have an 80c03, because we will have read
274	 * the TxCollLSB register.
275	 */
276	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5);
277	if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0)
278		sc->sc_type = SQ_TYPE_80C03;
279	else
280		sc->sc_type = SQ_TYPE_8003;
281	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00);
282
283	printf(": SGI Seeq %s\n",
284	    sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003");
285
286	enaddr_aton(macaddr, sc->sc_enaddr);
287
288	printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
289					   ether_sprintf(sc->sc_enaddr));
290
291	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
292	ifp->if_softc = sc;
293	ifp->if_mtu = ETHERMTU;
294	ifp->if_init = sq_init;
295	ifp->if_stop = sq_stop;
296	ifp->if_start = sq_start;
297	ifp->if_ioctl = sq_ioctl;
298	ifp->if_watchdog = sq_watchdog;
299	ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST;
300	IFQ_SET_READY(&ifp->if_snd);
301
302	if_attach(ifp);
303	ether_ifattach(ifp, sc->sc_enaddr);
304
305	bzero(&sq_trace, sizeof(sq_trace));
306	/* Done! */
307	return;
308
309	/*
310	 * Free any resources we've allocated during the failed attach
311	 * attempt.  Do this in reverse order and fall through.
312	 */
313fail_6:
314	for (i = 0; i < SQ_NRXDESC; i++) {
315		if (sc->sc_rxmbuf[i] != NULL) {
316			bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]);
317			m_freem(sc->sc_rxmbuf[i]);
318		}
319	}
320fail_5:
321	for (i = 0; i < SQ_NRXDESC; i++) {
322	    if (sc->sc_rxmap[i] !=  NULL)
323		bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]);
324	}
325fail_4:
326	for (i = 0; i < SQ_NTXDESC; i++) {
327	    if (sc->sc_txmap[i] !=  NULL)
328		bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]);
329	}
330	bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap);
331fail_3:
332	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap);
333fail_2:
334	bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control,
335				      sizeof(struct sq_control));
336fail_1:
337	bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg);
338fail_0:
339	return;
340}
341
342/* Set up data to get the interface up and running. */
343int
344sq_init(struct ifnet *ifp)
345{
346	int i;
347	u_int32_t reg;
348	struct sq_softc *sc = ifp->if_softc;
349
350	/* Cancel any in-progress I/O */
351	sq_stop(ifp, 0);
352
353	sc->sc_nextrx = 0;
354
355	sc->sc_nfreetx = SQ_NTXDESC;
356	sc->sc_nexttx = sc->sc_prevtx = 0;
357
358	SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx);
359
360	/* Set into 8003 mode, bank 0 to program ethernet address */
361	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0);
362
363	/* Now write the address */
364	for (i = 0; i < ETHER_ADDR_LEN; i++)
365		bus_space_write_1(sc->sc_regt, sc->sc_regh, i,
366		    sc->sc_enaddr[i]);
367
368	sc->sc_rxcmd = RXCMD_IE_CRC |
369		       RXCMD_IE_DRIB |
370		       RXCMD_IE_SHORT |
371		       RXCMD_IE_END |
372		       RXCMD_IE_GOOD;
373
374	/*
375	 * Set the receive filter -- this will add some bits to the
376	 * prototype RXCMD register.  Do this before setting the
377	 * transmit config register, since we might need to switch
378	 * banks.
379	 */
380	sq_set_filter(sc);
381
382	/* Set up Seeq transmit command register */
383	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD,
384						    TXCMD_IE_UFLOW |
385						    TXCMD_IE_COLL |
386						    TXCMD_IE_16COLL |
387						    TXCMD_IE_GOOD);
388
389	/* Now write the receive command register. */
390	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd);
391
392	/* Set up HPC ethernet DMA config */
393	reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG);
394	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG,
395			    	reg | ENETR_DMACFG_FIX_RXDC |
396				ENETR_DMACFG_FIX_INTR |
397				ENETR_DMACFG_FIX_EOP);
398
399	/* Pass the start of the receive ring to the HPC */
400        bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_NDBP,
401						    SQ_CDRXADDR(sc, 0));
402
403	/* And turn on the HPC ethernet receive channel */
404	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL,
405						    ENETR_CTL_ACTIVE);
406
407        ifp->if_flags |= IFF_RUNNING;
408	ifp->if_flags &= ~IFF_OACTIVE;
409
410	return 0;
411}
412
413static void
414sq_set_filter(struct sq_softc *sc)
415{
416	struct ethercom *ec = &sc->sc_ethercom;
417	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
418	struct ether_multi *enm;
419	struct ether_multistep step;
420
421	/*
422	 * Check for promiscuous mode.  Also implies
423	 * all-multicast.
424	 */
425	if (ifp->if_flags & IFF_PROMISC) {
426		sc->sc_rxcmd |= RXCMD_REC_ALL;
427		ifp->if_flags |= IFF_ALLMULTI;
428		return;
429	}
430
431	/*
432	 * The 8003 has no hash table.  If we have any multicast
433	 * addresses on the list, enable reception of all multicast
434	 * frames.
435	 *
436	 * XXX The 80c03 has a hash table.  We should use it.
437	 */
438
439	ETHER_FIRST_MULTI(step, ec, enm);
440
441	if (enm == NULL) {
442		sc->sc_rxcmd |= RXCMD_REC_BROAD;
443		return;
444	}
445
446	sc->sc_rxcmd |= RXCMD_REC_MULTI;
447	ifp->if_flags |= IFF_ALLMULTI;
448}
449
450int
451sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
452{
453	int s, error = 0;
454
455	s = splnet();
456
457	error = ether_ioctl(ifp, cmd, data);
458	if (error == ENETRESET) {
459		/*
460		 * Multicast list has changed; set the hardware filter
461		 * accordingly.
462		 */
463		error = sq_init(ifp);
464	}
465
466	splx(s);
467	return (error);
468}
469
470void
471sq_start(struct ifnet *ifp)
472{
473	struct sq_softc *sc = ifp->if_softc;
474	u_int32_t status;
475	struct mbuf *m0, *m;
476	bus_dmamap_t dmamap;
477	int err, totlen, nexttx, firsttx, lasttx, ofree, seg;
478
479	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
480		return;
481
482	/*
483	 * Remember the previous number of free descriptors and
484	 * the first descriptor we'll use.
485	 */
486	ofree = sc->sc_nfreetx;
487	firsttx = sc->sc_nexttx;
488
489	/*
490	 * Loop through the send queue, setting up transmit descriptors
491	 * until we drain the queue, or use up all available transmit
492	 * descriptors.
493	 */
494	while (sc->sc_nfreetx != 0) {
495		/*
496		 * Grab a packet off the queue.
497		 */
498		IFQ_POLL(&ifp->if_snd, m0);
499		if (m0 == NULL)
500			break;
501		m = NULL;
502
503		dmamap = sc->sc_txmap[sc->sc_nexttx];
504
505		/*
506		 * Load the DMA map.  If this fails, the packet either
507		 * didn't fit in the alloted number of segments, or we were
508		 * short on resources.  In this case, we'll copy and try
509		 * again.
510		 */
511		if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
512						      BUS_DMA_NOWAIT) != 0) {
513			MGETHDR(m, M_DONTWAIT, MT_DATA);
514			if (m == NULL) {
515				printf("%s: unable to allocate Tx mbuf\n",
516				    sc->sc_dev.dv_xname);
517				break;
518			}
519			if (m0->m_pkthdr.len > MHLEN) {
520				MCLGET(m, M_DONTWAIT);
521				if ((m->m_flags & M_EXT) == 0) {
522					printf("%s: unable to allocate Tx "
523					    "cluster\n", sc->sc_dev.dv_xname);
524					m_freem(m);
525					break;
526				}
527			}
528
529			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t));
530			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
531
532			if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
533						m, BUS_DMA_NOWAIT)) != 0) {
534				printf("%s: unable to load Tx buffer, "
535				    "error = %d\n", sc->sc_dev.dv_xname, err);
536				break;
537			}
538		}
539
540		/*
541		 * Ensure we have enough descriptors free to describe
542		 * the packet.
543		 */
544		if (dmamap->dm_nsegs > sc->sc_nfreetx) {
545			/*
546			 * Not enough free descriptors to transmit this
547			 * packet.  We haven't committed to anything yet,
548			 * so just unload the DMA map, put the packet
549			 * back on the queue, and punt.  Notify the upper
550			 * layer that there are no more slots left.
551			 *
552			 * XXX We could allocate an mbuf and copy, but
553			 * XXX it is worth it?
554			 */
555			ifp->if_flags |= IFF_OACTIVE;
556			bus_dmamap_unload(sc->sc_dmat, dmamap);
557			if (m != NULL)
558				m_freem(m);
559			break;
560		}
561
562		IFQ_DEQUEUE(&ifp->if_snd, m0);
563		if (m != NULL) {
564			m_freem(m0);
565			m0 = m;
566		}
567
568		/*
569		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
570		 */
571
572		/* Sync the DMA map. */
573		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
574		    BUS_DMASYNC_PREWRITE);
575
576		/*
577		 * Initialize the transmit descriptors.
578		 */
579		for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0;
580		     seg < dmamap->dm_nsegs;
581		     seg++, nexttx = SQ_NEXTTX(nexttx)) {
582			sc->sc_txdesc[nexttx].hdd_bufptr =
583					    dmamap->dm_segs[seg].ds_addr;
584			sc->sc_txdesc[nexttx].hdd_ctl =
585					    dmamap->dm_segs[seg].ds_len;
586			sc->sc_txdesc[nexttx].hdd_descptr=
587					    SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx));
588			lasttx = nexttx;
589			totlen += dmamap->dm_segs[seg].ds_len;
590		}
591
592		/* Last descriptor gets end-of-packet */
593		sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET;
594
595		/* XXXrkb: if not EDLC, pad to min len manually */
596		if (totlen < ETHER_MIN_LEN) {
597		    sc->sc_txdesc[lasttx].hdd_ctl += (ETHER_MIN_LEN - totlen);
598		    totlen = ETHER_MIN_LEN;
599		}
600
601#if 0
602		printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname,
603						       sc->sc_nexttx, lasttx,
604						       totlen);
605#endif
606
607		if (ifp->if_flags & IFF_DEBUG) {
608			printf("     transmit chain:\n");
609			for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) {
610				printf("     descriptor %d:\n", seg);
611				printf("       hdd_bufptr:      0x%08x\n",
612					sc->sc_txdesc[seg].hdd_bufptr);
613				printf("       hdd_ctl: 0x%08x\n",
614					sc->sc_txdesc[seg].hdd_ctl);
615				printf("       hdd_descptr:      0x%08x\n",
616					sc->sc_txdesc[seg].hdd_descptr);
617
618				if (seg == lasttx)
619					break;
620			}
621		}
622
623		/* Sync the descriptors we're using. */
624		SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs,
625				BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
626
627		/* Store a pointer to the packet so we can free it later */
628		sc->sc_txmbuf[sc->sc_nexttx] = m0;
629
630		/* Advance the tx pointer. */
631		sc->sc_nfreetx -= dmamap->dm_nsegs;
632		sc->sc_nexttx = nexttx;
633
634#if NBPFILTER > 0
635		/*
636		 * Pass the packet to any BPF listeners.
637		 */
638		if (ifp->if_bpf)
639			bpf_mtap(ifp->if_bpf, m0);
640#endif /* NBPFILTER > 0 */
641	}
642
643	/* All transmit descriptors used up, let upper layers know */
644	if (sc->sc_nfreetx == 0)
645		ifp->if_flags |= IFF_OACTIVE;
646
647	if (sc->sc_nfreetx != ofree) {
648#if 0
649		printf("%s: %d packets enqueued, first %d, INTR on %d\n",
650			    sc->sc_dev.dv_xname, lasttx - firsttx + 1,
651			    firsttx, lasttx);
652#endif
653
654		/*
655		 * Cause a transmit interrupt to happen on the
656		 * last packet we enqueued, mark it as the last
657		 * descriptor.
658		 */
659		sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR |
660						  HDD_CTL_EOCHAIN);
661		SQ_CDTXSYNC(sc, lasttx, 1,
662				BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
663
664		/*
665		 * There is a potential race condition here if the HPC
666		 * DMA channel is active and we try and either update
667		 * the 'next descriptor' pointer in the HPC PIO space
668		 * or the 'next descriptor' pointer in a previous desc-
669		 * riptor.
670		 *
671		 * To avoid this, if the channel is active, we rely on
672		 * the transmit interrupt routine noticing that there
673		 * are more packets to send and restarting the HPC DMA
674		 * engine, rather than mucking with the DMA state here.
675		 */
676		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
677						       HPC_ENETX_CTL);
678
679		if ((status & ENETX_CTL_ACTIVE) != 0) {
680			SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status,
681			    sc->sc_nfreetx);
682			sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &=
683			    ~HDD_CTL_EOCHAIN;
684			SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx),  1,
685			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
686		} else {
687			SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx);
688
689			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
690			    HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx));
691
692			/* Kick DMA channel into life */
693			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
694			    HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
695		}
696
697		/* Set a watchdog timer in case the chip flakes out. */
698		ifp->if_timer = 5;
699	}
700}
701
702void
703sq_stop(struct ifnet *ifp, int disable)
704{
705	int i;
706	struct sq_softc *sc = ifp->if_softc;
707
708	for (i =0; i < SQ_NTXDESC; i++) {
709		if (sc->sc_txmbuf[i] != NULL) {
710			bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
711			m_freem(sc->sc_txmbuf[i]);
712			sc->sc_txmbuf[i] = NULL;
713		}
714	}
715
716	/* Clear Seeq transmit/receive command registers */
717	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0);
718	bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0);
719
720	sq_reset(sc);
721
722        ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
723	ifp->if_timer = 0;
724}
725
726/* Device timeout/watchdog routine. */
727void
728sq_watchdog(struct ifnet *ifp)
729{
730	u_int32_t status;
731	struct sq_softc *sc = ifp->if_softc;
732
733	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL);
734	log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, "
735		     "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx,
736				       sc->sc_nexttx, sc->sc_nfreetx, status);
737
738	sq_trace_dump(sc);
739
740	bzero(&sq_trace, sizeof(sq_trace));
741	sq_trace_idx = 0;
742
743	++ifp->if_oerrors;
744
745	sq_init(ifp);
746}
747
748void sq_trace_dump(struct sq_softc* sc)
749{
750	int i;
751
752	for(i = 0; i < sq_trace_idx; i++) {
753		printf("%s: [%d] action %d, buf %d, free %d, status %08x\n",
754			sc->sc_dev.dv_xname, i, sq_trace[i].action,
755			sq_trace[i].bufno, sq_trace[i].freebuf,
756			sq_trace[i].status);
757	}
758}
759
760static int
761sq_intr(void * arg)
762{
763	struct sq_softc *sc = arg;
764	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
765	int handled = 0;
766	u_int32_t stat;
767
768        stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET);
769
770	if ((stat & 2) == 0) {
771		printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname);
772		return 0;
773	}
774
775	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 2);
776
777	/*
778	 * If the interface isn't running, the interrupt couldn't
779	 * possibly have come from us.
780	 */
781	if ((ifp->if_flags & IFF_RUNNING) == 0)
782		return 0;
783
784	/* Always check for received packets */
785	if (sq_rxintr(sc) != 0)
786		handled++;
787
788	/* Only handle transmit interrupts if we actually sent something */
789	if (sc->sc_nfreetx < SQ_NTXDESC) {
790		sq_txintr(sc);
791		handled++;
792	}
793
794#if NRND > 0
795	if (handled)
796		rnd_add_uint32(&sc->rnd_source, stat);
797#endif
798	return (handled);
799}
800
801static int
802sq_rxintr(struct sq_softc *sc)
803{
804	int count = 0;
805	struct mbuf* m;
806	int i, framelen;
807	u_int8_t pktstat;
808	u_int32_t status;
809	int new_end, orig_end;
810	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
811
812	for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) {
813	    SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
814
815	    /* If this is a CPU-owned buffer, we're at the end of the list */
816	    if (sc->sc_rxdesc[i].hdd_ctl & HDD_CTL_OWN) {
817#if 0
818		u_int32_t reg;
819
820		reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL);
821		printf("%s: rxintr: done at %d (ctl %08x)\n",
822				sc->sc_dev.dv_xname, i, reg);
823#endif
824		break;
825	    }
826
827	    count++;
828
829	    m = sc->sc_rxmbuf[i];
830	    framelen = m->m_ext.ext_size -
831			HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hdd_ctl) - 3;
832
833	    /* Now sync the actual packet data */
834	    bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
835			    sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD);
836
837	    pktstat = *((u_int8_t*)m->m_data + framelen + 2);
838
839	    if ((pktstat & RXSTAT_GOOD) == 0) {
840		ifp->if_ierrors++;
841
842		if (pktstat & RXSTAT_OFLOW)
843		    printf("%s: receive FIFO overflow\n", sc->sc_dev.dv_xname);
844
845		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
846				sc->sc_rxmap[i]->dm_mapsize,
847				BUS_DMASYNC_PREREAD);
848		SQ_INIT_RXDESC(sc, i);
849		continue;
850	    }
851
852	    if (sq_add_rxbuf(sc, i) != 0) {
853		ifp->if_ierrors++;
854		bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0,
855				sc->sc_rxmap[i]->dm_mapsize,
856				BUS_DMASYNC_PREREAD);
857		SQ_INIT_RXDESC(sc, i);
858		continue;
859	    }
860
861
862	    m->m_data += 2;
863	    m->m_pkthdr.rcvif = ifp;
864	    m->m_pkthdr.len = m->m_len = framelen;
865
866	    ifp->if_ipackets++;
867
868#if 0
869	    printf("%s: sq_rxintr: buf %d len %d\n", sc->sc_dev.dv_xname,
870						     i, framelen);
871#endif
872
873#if NBPFILTER > 0
874	    if (ifp->if_bpf)
875		    bpf_mtap(ifp->if_bpf, m);
876#endif
877	    (*ifp->if_input)(ifp, m);
878	}
879
880
881	/* If anything happened, move ring start/end pointers to new spot */
882	if (i != sc->sc_nextrx) {
883	    new_end = SQ_PREVRX(i);
884	    sc->sc_rxdesc[new_end].hdd_ctl |= HDD_CTL_EOCHAIN;
885	    SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD |
886				     BUS_DMASYNC_PREWRITE);
887
888	    orig_end = SQ_PREVRX(sc->sc_nextrx);
889	    sc->sc_rxdesc[orig_end].hdd_ctl &= ~HDD_CTL_EOCHAIN;
890	    SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD |
891				      BUS_DMASYNC_PREWRITE);
892
893	    sc->sc_nextrx = i;
894	}
895
896	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
897					       HPC_ENETR_CTL);
898
899	/* If receive channel is stopped, restart it... */
900	if ((status & ENETR_CTL_ACTIVE) == 0) {
901	    /* Pass the start of the receive ring to the HPC */
902	    bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
903			      HPC_ENETR_NDBP, SQ_CDRXADDR(sc, sc->sc_nextrx));
904
905	    /* And turn on the HPC ethernet receive channel */
906	    bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL,
907							ENETR_CTL_ACTIVE);
908	}
909
910	return count;
911}
912
913static int
914sq_txintr(struct sq_softc *sc)
915{
916	int i;
917	u_int32_t status;
918	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
919
920	status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL);
921
922	SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx);
923
924	if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) {
925		if (status & TXSTAT_COLL)
926		    ifp->if_collisions++;
927
928		if (status & TXSTAT_UFLOW) {
929		    printf("%s: transmit underflow\n", sc->sc_dev.dv_xname);
930		    ifp->if_oerrors++;
931		}
932
933		if (status & TXSTAT_16COLL) {
934		    printf("%s: max collisions reached\n", sc->sc_dev.dv_xname);
935		    ifp->if_oerrors++;
936		    ifp->if_collisions += 16;
937		}
938	}
939
940	i = sc->sc_prevtx;
941	while (sc->sc_nfreetx < SQ_NTXDESC) {
942		/*
943		 * Check status first so we don't end up with a case of
944		 * the buffer not being finished while the DMA channel
945		 * has gone idle.
946		 */
947		status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch,
948							HPC_ENETX_CTL);
949
950		SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs,
951				BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
952
953		/* If not yet transmitted, try and start DMA engine again */
954		if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) {
955		    if ((status & ENETX_CTL_ACTIVE) == 0) {
956			SQ_TRACE(SQ_RESTART_DMA, i, status, sc->sc_nfreetx);
957
958			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
959					  HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i));
960
961			/* Kick DMA channel into life */
962			bus_space_write_4(sc->sc_hpct, sc->sc_hpch,
963					  HPC_ENETX_CTL, ENETX_CTL_ACTIVE);
964
965			/* Set a watchdog timer in case the chip flakes out. */
966			ifp->if_timer = 5;
967		    } else {
968			SQ_TRACE(SQ_TXINTR_BUSY, i, status, sc->sc_nfreetx);
969		    }
970		    break;
971		}
972
973		/* Sync the packet data, unload DMA map, free mbuf */
974		bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0,
975				sc->sc_txmap[i]->dm_mapsize,
976				BUS_DMASYNC_POSTWRITE);
977		bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]);
978		m_freem(sc->sc_txmbuf[i]);
979		sc->sc_txmbuf[i] = NULL;
980
981		ifp->if_opackets++;
982		sc->sc_nfreetx++;
983
984		SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx);
985		i = SQ_NEXTTX(i);
986	}
987
988	/* prevtx now points to next xmit packet not yet finished */
989	sc->sc_prevtx = i;
990
991	/* If we have buffers free, let upper layers know */
992	if (sc->sc_nfreetx > 0)
993	    ifp->if_flags &= ~IFF_OACTIVE;
994
995	/* If all packets have left the coop, cancel watchdog */
996	if (sc->sc_nfreetx == SQ_NTXDESC)
997	    ifp->if_timer = 0;
998
999	SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx);
1000    	sq_start(ifp);
1001
1002	return 1;
1003}
1004
1005
1006void
1007sq_reset(struct sq_softc *sc)
1008{
1009	/* Stop HPC dma channels */
1010	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 0);
1011	bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, 0);
1012
1013        bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 3);
1014        delay(20);
1015        bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 0);
1016}
1017
1018/* sq_add_rxbuf: Add a receive buffer to the indicated descriptor.  */
1019int
1020sq_add_rxbuf(struct sq_softc *sc, int idx)
1021{
1022	int err;
1023	struct mbuf *m;
1024
1025	MGETHDR(m, M_DONTWAIT, MT_DATA);
1026	if (m == NULL)
1027		return (ENOBUFS);
1028
1029	MCLGET(m, M_DONTWAIT);
1030	if ((m->m_flags & M_EXT) == 0) {
1031		m_freem(m);
1032		return (ENOBUFS);
1033	}
1034
1035	if (sc->sc_rxmbuf[idx] != NULL)
1036		bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]);
1037
1038	sc->sc_rxmbuf[idx] = m;
1039
1040	if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx],
1041				   m->m_ext.ext_buf, m->m_ext.ext_size,
1042				   NULL, BUS_DMA_NOWAIT)) != 0) {
1043		printf("%s: can't load rx DMA map %d, error = %d\n",
1044		    sc->sc_dev.dv_xname, idx, err);
1045		panic("sq_add_rxbuf");	/* XXX */
1046	}
1047
1048	bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0,
1049			sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD);
1050
1051	SQ_INIT_RXDESC(sc, idx);
1052
1053	return 0;
1054}
1055
1056void
1057sq_dump_buffer(u_int32_t addr, u_int32_t len)
1058{
1059	int i;
1060	u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr);
1061
1062	if (len == 0)
1063		return;
1064
1065	printf("%p: ", physaddr);
1066
1067	for(i = 0; i < len; i++) {
1068		printf("%02x ", *(physaddr + i) & 0xff);
1069		if ((i % 16) ==  15 && i != len - 1)
1070		    printf("\n%p: ", physaddr + i);
1071	}
1072
1073	printf("\n");
1074}
1075
1076
1077void
1078enaddr_aton(const char* str, u_int8_t* eaddr)
1079{
1080	int i;
1081	char c;
1082
1083	for(i = 0; i < ETHER_ADDR_LEN; i++) {
1084		if (*str == ':')
1085			str++;
1086
1087		c = *str++;
1088		if (isdigit(c)) {
1089			eaddr[i] = (c - '0');
1090		} else if (isxdigit(c)) {
1091			eaddr[i] = (toupper(c) + 10 - 'A');
1092		}
1093
1094		c = *str++;
1095		if (isdigit(c)) {
1096			eaddr[i] = (eaddr[i] << 4) | (c - '0');
1097		} else if (isxdigit(c)) {
1098			eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A');
1099		}
1100	}
1101}
1102