1/*	$OpenBSD: qe.c,v 1.43 2022/10/16 01:22:40 jsg Exp $	*/
2/*	$NetBSD: qe.c,v 1.16 2001/03/30 17:30:18 christos Exp $	*/
3
4/*-
5 * Copyright (c) 1999 The NetBSD Foundation, Inc.
6 * All rights reserved.
7 *
8 * This code is derived from software contributed to The NetBSD Foundation
9 * by Paul Kranenburg.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33/*
34 * Copyright (c) 1998 Jason L. Wright.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 *    notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 *    notice, this list of conditions and the following disclaimer in the
44 *    documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
50 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
51 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
52 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
53 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
54 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
55 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 */
57
58/*
59 * Driver for the SBus qec+qe QuadEthernet board.
60 *
61 * This driver was written using the AMD MACE Am79C940 documentation, some
62 * ideas gleaned from the S/Linux driver for this card, Solaris header files,
63 * and a loan of a card from Paul Southworth of the Internet Engineering
64 * Group (www.ieng.com).
65 */
66
67#define QEDEBUG
68
69#include "bpfilter.h"
70
71#include <sys/param.h>
72#include <sys/systm.h>
73#include <sys/kernel.h>
74#include <sys/errno.h>
75#include <sys/ioctl.h>
76#include <sys/mbuf.h>
77#include <sys/socket.h>
78#include <sys/syslog.h>
79#include <sys/device.h>
80#include <sys/malloc.h>
81
82#include <net/if.h>
83#include <net/if_media.h>
84
85#include <netinet/in.h>
86#include <netinet/if_ether.h>
87
88#if NBPFILTER > 0
89#include <net/bpf.h>
90#endif
91
92#include <machine/bus.h>
93#include <machine/intr.h>
94#include <machine/autoconf.h>
95
96#include <dev/sbus/sbusvar.h>
97#include <dev/sbus/qecreg.h>
98#include <dev/sbus/qecvar.h>
99#include <dev/sbus/qereg.h>
100
101struct qe_softc {
102	struct	device	sc_dev;		/* base device */
103	bus_space_tag_t	sc_bustag;	/* bus & dma tags */
104	bus_dma_tag_t	sc_dmatag;
105	bus_dmamap_t	sc_dmamap;
106	struct	arpcom sc_arpcom;
107	struct	ifmedia sc_ifmedia;	/* interface media */
108
109	struct	qec_softc *sc_qec;	/* QEC parent */
110
111	bus_space_handle_t	sc_qr;	/* QEC registers */
112	bus_space_handle_t	sc_mr;	/* MACE registers */
113	bus_space_handle_t	sc_cr;	/* channel registers */
114
115	int	sc_channel;		/* channel number */
116	u_int	sc_rev;			/* board revision */
117
118	int	sc_burst;
119
120	struct  qec_ring	sc_rb;	/* Packet Ring Buffer */
121
122#ifdef QEDEBUG
123	int	sc_debug;
124#endif
125};
126
127int	qematch(struct device *, void *, void *);
128void	qeattach(struct device *, struct device *, void *);
129
130void	qeinit(struct qe_softc *);
131void	qestart(struct ifnet *);
132void	qestop(struct qe_softc *);
133void	qewatchdog(struct ifnet *);
134int	qeioctl(struct ifnet *, u_long, caddr_t);
135void	qereset(struct qe_softc *);
136
137int	qeintr(void *);
138int	qe_eint(struct qe_softc *, u_int32_t);
139int	qe_rint(struct qe_softc *);
140int	qe_tint(struct qe_softc *);
141void	qe_mcreset(struct qe_softc *);
142
143int	qe_put(struct qe_softc *, int, struct mbuf *);
144void	qe_read(struct qe_softc *, int, int);
145struct mbuf	*qe_get(struct qe_softc *, int, int);
146
147/* ifmedia callbacks */
148void	qe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
149int	qe_ifmedia_upd(struct ifnet *);
150
151const struct cfattach qe_ca = {
152	sizeof(struct qe_softc), qematch, qeattach
153};
154
155struct cfdriver qe_cd = {
156	NULL, "qe", DV_IFNET
157};
158
159int
160qematch(struct device *parent, void *vcf, void *aux)
161{
162	struct cfdata *cf = vcf;
163	struct sbus_attach_args *sa = aux;
164
165	return (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0);
166}
167
168void
169qeattach(struct device *parent, struct device *self, void *aux)
170{
171	struct sbus_attach_args *sa = aux;
172	struct qec_softc *qec = (struct qec_softc *)parent;
173	struct qe_softc *sc = (struct qe_softc *)self;
174	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
175	int node = sa->sa_node;
176	bus_dma_tag_t dmatag = sa->sa_dmatag;
177	bus_dma_segment_t seg;
178	bus_size_t size;
179	int rseg, error;
180	extern void myetheraddr(u_char *);
181
182	/* Pass on the bus tags */
183	sc->sc_bustag = sa->sa_bustag;
184	sc->sc_dmatag = sa->sa_dmatag;
185
186	if (sa->sa_nreg < 2) {
187		printf("%s: only %d register sets\n",
188		    self->dv_xname, sa->sa_nreg);
189		return;
190	}
191
192	if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[0].sbr_slot,
193	    (bus_addr_t)sa->sa_reg[0].sbr_offset,
194	    (bus_size_t)sa->sa_reg[0].sbr_size, 0, 0, &sc->sc_cr) != 0) {
195		printf("%s: cannot map registers\n", self->dv_xname);
196		return;
197	}
198
199	if (sbus_bus_map(sa->sa_bustag, sa->sa_reg[1].sbr_slot,
200	    (bus_addr_t)sa->sa_reg[1].sbr_offset,
201	    (bus_size_t)sa->sa_reg[1].sbr_size, 0, 0, &sc->sc_mr) != 0) {
202		printf("%s: cannot map registers\n", self->dv_xname);
203		return;
204	}
205
206	sc->sc_rev = getpropint(node, "mace-version", -1);
207	printf(" rev %x", sc->sc_rev);
208
209	sc->sc_qec = qec;
210	sc->sc_qr = qec->sc_regs;
211
212	sc->sc_channel = getpropint(node, "channel#", -1);
213	sc->sc_burst = qec->sc_burst;
214
215	qestop(sc);
216
217	/* Note: no interrupt level passed */
218	if (bus_intr_establish(sa->sa_bustag, 0, IPL_NET, 0, qeintr, sc,
219	    self->dv_xname) == NULL) {
220		printf(": no interrupt established\n");
221		return;
222	}
223
224	myetheraddr(sc->sc_arpcom.ac_enaddr);
225
226	/*
227	 * Allocate descriptor ring and buffers.
228	 */
229
230	/* for now, allocate as many bufs as there are ring descriptors */
231	sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE;
232	sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE;
233
234	size =
235	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
236	    QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) +
237	    sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ +
238	    sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ;
239
240	/* Get a DMA handle */
241	if ((error = bus_dmamap_create(dmatag, size, 1, size, 0,
242	    BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
243		printf("%s: DMA map create error %d\n", self->dv_xname, error);
244		return;
245	}
246
247	/* Allocate DMA buffer */
248	if ((error = bus_dmamem_alloc(dmatag, size, 0, 0,
249	    &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
250		printf("%s: DMA buffer alloc error %d\n",
251			self->dv_xname, error);
252		return;
253	}
254
255	/* Map DMA buffer in CPU addressable space */
256	if ((error = bus_dmamem_map(dmatag, &seg, rseg, size,
257	    &sc->sc_rb.rb_membase,
258	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
259		printf("%s: DMA buffer map error %d\n",
260		    self->dv_xname, error);
261		bus_dmamem_free(dmatag, &seg, rseg);
262		return;
263	}
264
265	/* Load the buffer */
266	if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap,
267	    sc->sc_rb.rb_membase, size, NULL, BUS_DMA_NOWAIT)) != 0) {
268		printf("%s: DMA buffer map load error %d\n",
269			self->dv_xname, error);
270		bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size);
271		bus_dmamem_free(dmatag, &seg, rseg);
272		return;
273	}
274	sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr;
275
276	/* Initialize media properties */
277	ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts);
278	ifmedia_add(&sc->sc_ifmedia,
279	    IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 0, NULL);
280	ifmedia_add(&sc->sc_ifmedia,
281	    IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 0, NULL);
282	ifmedia_add(&sc->sc_ifmedia,
283	    IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 0, NULL);
284	ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO);
285
286	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
287	ifp->if_softc = sc;
288	ifp->if_start = qestart;
289	ifp->if_ioctl = qeioctl;
290	ifp->if_watchdog = qewatchdog;
291	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX |
292	    IFF_MULTICAST;
293
294	/* Attach the interface. */
295	if_attach(ifp);
296	ether_ifattach(ifp);
297
298	printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr));
299}
300
301/*
302 * Pull data off an interface.
303 * Len is the length of data, with local net header stripped.
304 * We copy the data into mbufs.  When full cluster sized units are present,
305 * we copy into clusters.
306 */
307struct mbuf *
308qe_get(struct qe_softc *sc, int idx, int totlen)
309{
310	struct mbuf *m;
311	struct mbuf *top, **mp;
312	int len, pad, boff = 0;
313	caddr_t bp;
314
315	bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ;
316
317	MGETHDR(m, M_DONTWAIT, MT_DATA);
318	if (m == NULL)
319		return (NULL);
320	m->m_pkthdr.len = totlen;
321	pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header);
322	m->m_data += pad;
323	len = MHLEN - pad;
324	top = NULL;
325	mp = &top;
326
327	while (totlen > 0) {
328		if (top) {
329			MGET(m, M_DONTWAIT, MT_DATA);
330			if (m == NULL) {
331				m_freem(top);
332				return (NULL);
333			}
334			len = MLEN;
335		}
336		if (top && totlen >= MINCLSIZE) {
337			MCLGET(m, M_DONTWAIT);
338			if (m->m_flags & M_EXT)
339				len = MCLBYTES;
340		}
341		m->m_len = len = min(totlen, len);
342		bcopy(bp + boff, mtod(m, caddr_t), len);
343		boff += len;
344		totlen -= len;
345		*mp = m;
346		mp = &m->m_next;
347	}
348
349	return (top);
350}
351
352/*
353 * Routine to copy from mbuf chain to transmit buffer in
354 * network buffer memory.
355 */
356__inline__ int
357qe_put(struct qe_softc *sc, int idx, struct mbuf *m)
358{
359	struct mbuf *n;
360	int len, tlen = 0, boff = 0;
361	caddr_t bp;
362
363	bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ;
364
365	for (; m; m = n) {
366		len = m->m_len;
367		if (len == 0) {
368			n = m_free(m);
369			continue;
370		}
371		bcopy(mtod(m, caddr_t), bp+boff, len);
372		boff += len;
373		tlen += len;
374		n = m_free(m);
375	}
376	return (tlen);
377}
378
379/*
380 * Pass a packet to the higher levels.
381 */
382__inline__ void
383qe_read(struct qe_softc *sc, int idx, int len)
384{
385	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
386	struct mbuf_list ml = MBUF_LIST_INITIALIZER();
387	struct mbuf *m;
388
389	if (len <= sizeof(struct ether_header) ||
390	    len > ETHERMTU + sizeof(struct ether_header)) {
391
392		printf("%s: invalid packet size %d; dropping\n",
393		    ifp->if_xname, len);
394
395		ifp->if_ierrors++;
396		return;
397	}
398
399	/*
400	 * Pull packet off interface.
401	 */
402	m = qe_get(sc, idx, len);
403	if (m == NULL) {
404		ifp->if_ierrors++;
405		return;
406	}
407
408	ml_enqueue(&ml, m);
409	if_input(ifp, &ml);
410}
411
412/*
413 * Start output on interface.
414 * We make two assumptions here:
415 *  1) that the current priority is set to splnet _before_ this code
416 *     is called *and* is returned to the appropriate priority after
417 *     return
418 *  2) that the IFF_OACTIVE flag is checked before this code is called
419 *     (i.e. that the output part of the interface is idle)
420 */
421void
422qestart(struct ifnet *ifp)
423{
424	struct qe_softc *sc = (struct qe_softc *)ifp->if_softc;
425	struct qec_xd *txd = sc->sc_rb.rb_txd;
426	struct mbuf *m;
427	unsigned int bix, len;
428	unsigned int ntbuf = sc->sc_rb.rb_ntbuf;
429
430	if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd))
431		return;
432
433	bix = sc->sc_rb.rb_tdhead;
434
435	for (;;) {
436		m = ifq_dequeue(&ifp->if_snd);
437		if (m == NULL)
438			break;
439
440
441#if NBPFILTER > 0
442		/*
443		 * If BPF is listening on this interface, let it see the
444		 * packet before we commit it to the wire.
445		 */
446		if (ifp->if_bpf)
447			bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
448#endif
449
450		/*
451		 * Copy the mbuf chain into the transmit buffer.
452		 */
453		len = qe_put(sc, bix, m);
454
455		/*
456		 * Initialize transmit registers and start transmission
457		 */
458		txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP |
459		    (len & QEC_XD_LENGTH);
460		bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL,
461		    QE_CR_CTRL_TWAKEUP);
462
463		if (++bix == QEC_XD_RING_MAXSIZE)
464			bix = 0;
465
466		if (++sc->sc_rb.rb_td_nbusy == ntbuf) {
467			ifq_set_oactive(&ifp->if_snd);
468			break;
469		}
470	}
471
472	sc->sc_rb.rb_tdhead = bix;
473}
474
475void
476qestop(struct qe_softc *sc)
477{
478	bus_space_tag_t t = sc->sc_bustag;
479	bus_space_handle_t mr = sc->sc_mr;
480	bus_space_handle_t cr = sc->sc_cr;
481	int n;
482
483	/* Stop the schwurst */
484	bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST);
485	for (n = 200; n > 0; n--) {
486		if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) &
487		    QE_MR_BIUCC_SWRST) == 0)
488			break;
489		DELAY(20);
490	}
491
492	/* then reset */
493	bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET);
494	for (n = 200; n > 0; n--) {
495		if ((bus_space_read_4(t, cr, QE_CRI_CTRL) &
496		    QE_CR_CTRL_RESET) == 0)
497			break;
498		DELAY(20);
499	}
500}
501
502/*
503 * Reset interface.
504 */
505void
506qereset(struct qe_softc *sc)
507{
508	int s;
509
510	s = splnet();
511	qestop(sc);
512	qeinit(sc);
513	splx(s);
514}
515
516void
517qewatchdog(struct ifnet *ifp)
518{
519	struct qe_softc *sc = ifp->if_softc;
520
521	log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname);
522	ifp->if_oerrors++;
523
524	qereset(sc);
525}
526
527/*
528 * Interrupt dispatch.
529 */
530int
531qeintr(void *arg)
532{
533	struct qe_softc *sc = (struct qe_softc *)arg;
534	bus_space_tag_t t = sc->sc_bustag;
535	u_int32_t qecstat, qestat;
536	int r = 0;
537
538	/* Read QEC status and channel status */
539	qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT);
540#ifdef QEDEBUG
541	if (sc->sc_debug) {
542		printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat);
543	}
544#endif
545
546	/* Filter out status for this channel */
547	qecstat = qecstat >> (4 * sc->sc_channel);
548	if ((qecstat & 0xf) == 0)
549		return (r);
550
551	qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT);
552
553#ifdef QEDEBUG
554	if (sc->sc_debug) {
555		int i;
556		bus_space_tag_t t = sc->sc_bustag;
557		bus_space_handle_t mr = sc->sc_mr;
558
559		printf("qe%d: intr: qestat=%b\n", sc->sc_channel,
560		    qestat, QE_CR_STAT_BITS);
561
562		printf("MACE registers:\n");
563		for (i = 0 ; i < 32; i++) {
564			printf("  m[%d]=%x,", i, bus_space_read_1(t, mr, i));
565			if (((i+1) & 7) == 0)
566				printf("\n");
567		}
568	}
569#endif
570
571	if (qestat & QE_CR_STAT_ALLERRORS) {
572#ifdef QEDEBUG
573		if (sc->sc_debug)
574			printf("qe%d: eint: qestat=%b\n", sc->sc_channel,
575			    qestat, QE_CR_STAT_BITS);
576#endif
577		r |= qe_eint(sc, qestat);
578		if (r == -1)
579			return (1);
580	}
581
582	if (qestat & QE_CR_STAT_TXIRQ)
583		r |= qe_tint(sc);
584
585	if (qestat & QE_CR_STAT_RXIRQ)
586		r |= qe_rint(sc);
587
588	return (1);
589}
590
591/*
592 * Transmit interrupt.
593 */
594int
595qe_tint(struct qe_softc *sc)
596{
597	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
598	unsigned int bix, txflags;
599
600	bix = sc->sc_rb.rb_tdtail;
601
602	for (;;) {
603		if (sc->sc_rb.rb_td_nbusy <= 0)
604			break;
605
606		txflags = sc->sc_rb.rb_txd[bix].xd_flags;
607
608		if (txflags & QEC_XD_OWN)
609			break;
610
611		ifq_clr_oactive(&ifp->if_snd);
612
613		if (++bix == QEC_XD_RING_MAXSIZE)
614			bix = 0;
615
616		--sc->sc_rb.rb_td_nbusy;
617	}
618
619	if (sc->sc_rb.rb_td_nbusy == 0)
620		ifp->if_timer = 0;
621
622	if (sc->sc_rb.rb_tdtail != bix) {
623		sc->sc_rb.rb_tdtail = bix;
624		if (ifq_is_oactive(&ifp->if_snd)) {
625			ifq_clr_oactive(&ifp->if_snd);
626			qestart(ifp);
627		}
628	}
629
630	return (1);
631}
632
633/*
634 * Receive interrupt.
635 */
636int
637qe_rint(struct qe_softc *sc)
638{
639	struct qec_xd *xd = sc->sc_rb.rb_rxd;
640	unsigned int bix, len;
641	unsigned int nrbuf = sc->sc_rb.rb_nrbuf;
642#ifdef QEDEBUG
643	int npackets = 0;
644#endif
645
646	bix = sc->sc_rb.rb_rdtail;
647
648	/*
649	 * Process all buffers with valid data.
650	 */
651	for (;;) {
652		len = xd[bix].xd_flags;
653		if (len & QEC_XD_OWN)
654			break;
655
656#ifdef QEDEBUG
657		npackets++;
658#endif
659
660		len &= QEC_XD_LENGTH;
661		len -= 4;
662		qe_read(sc, bix, len);
663
664		/* ... */
665		xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags =
666		    QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH);
667
668		if (++bix == QEC_XD_RING_MAXSIZE)
669			bix = 0;
670	}
671#ifdef QEDEBUG
672	if (npackets == 0 && sc->sc_debug)
673		printf("%s: rint: no packets; rb index %d; status 0x%x\n",
674		    sc->sc_dev.dv_xname, bix, len);
675#endif
676
677	sc->sc_rb.rb_rdtail = bix;
678
679	return (1);
680}
681
682/*
683 * Error interrupt.
684 */
685int
686qe_eint(struct qe_softc *sc, u_int32_t why)
687{
688	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
689	int r = 0, rst = 0;
690
691	if (why & QE_CR_STAT_EDEFER) {
692		printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname);
693		r |= 1;
694		ifp->if_oerrors++;
695	}
696
697	if (why & QE_CR_STAT_CLOSS) {
698		ifp->if_oerrors++;
699		r |= 1;
700	}
701
702	if (why & QE_CR_STAT_ERETRIES) {
703		printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname);
704		ifp->if_oerrors++;
705		r |= 1;
706		rst = 1;
707	}
708
709
710	if (why & QE_CR_STAT_LCOLL) {
711		printf("%s: late tx transmission\n", sc->sc_dev.dv_xname);
712		ifp->if_oerrors++;
713		r |= 1;
714		rst = 1;
715	}
716
717	if (why & QE_CR_STAT_FUFLOW) {
718		printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname);
719		ifp->if_oerrors++;
720		r |= 1;
721		rst = 1;
722	}
723
724	if (why & QE_CR_STAT_JERROR) {
725		printf("%s: jabber seen\n", sc->sc_dev.dv_xname);
726		r |= 1;
727	}
728
729	if (why & QE_CR_STAT_BERROR) {
730		printf("%s: babble seen\n", sc->sc_dev.dv_xname);
731		r |= 1;
732	}
733
734	if (why & QE_CR_STAT_TCCOFLOW) {
735		ifp->if_collisions += 256;
736		ifp->if_oerrors += 256;
737		r |= 1;
738	}
739
740	if (why & QE_CR_STAT_TXDERROR) {
741		printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname);
742		rst = 1;
743		r |= 1;
744	}
745
746	if (why & QE_CR_STAT_TXLERR) {
747		printf("%s: tx late error\n", sc->sc_dev.dv_xname);
748		ifp->if_oerrors++;
749		rst = 1;
750		r |= 1;
751	}
752
753	if (why & QE_CR_STAT_TXPERR) {
754		printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname);
755		ifp->if_oerrors++;
756		rst = 1;
757		r |= 1;
758	}
759
760	if (why & QE_CR_STAT_TXSERR) {
761		printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname);
762		ifp->if_oerrors++;
763		rst = 1;
764		r |= 1;
765	}
766
767	if (why & QE_CR_STAT_RCCOFLOW) {
768		ifp->if_collisions += 256;
769		ifp->if_ierrors += 256;
770		r |= 1;
771	}
772
773	if (why & QE_CR_STAT_RUOFLOW) {
774		ifp->if_ierrors += 256;
775		r |= 1;
776	}
777
778	if (why & QE_CR_STAT_MCOFLOW) {
779		ifp->if_ierrors += 256;
780		r |= 1;
781	}
782
783	if (why & QE_CR_STAT_RXFOFLOW) {
784		printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname);
785		ifp->if_ierrors++;
786		r |= 1;
787	}
788
789	if (why & QE_CR_STAT_RLCOLL) {
790		printf("%s: rx late collision\n", sc->sc_dev.dv_xname);
791		ifp->if_ierrors++;
792		ifp->if_collisions++;
793		r |= 1;
794	}
795
796	if (why & QE_CR_STAT_FCOFLOW) {
797		ifp->if_ierrors += 256;
798		r |= 1;
799	}
800
801	if (why & QE_CR_STAT_CECOFLOW) {
802		ifp->if_ierrors += 256;
803		r |= 1;
804	}
805
806	if (why & QE_CR_STAT_RXDROP) {
807		printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname);
808		ifp->if_ierrors++;
809		r |= 1;
810	}
811
812	if (why & QE_CR_STAT_RXSMALL) {
813		printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname);
814		ifp->if_ierrors++;
815		r |= 1;
816		rst = 1;
817	}
818
819	if (why & QE_CR_STAT_RXLERR) {
820		printf("%s: rx late error\n", sc->sc_dev.dv_xname);
821		ifp->if_ierrors++;
822		r |= 1;
823		rst = 1;
824	}
825
826	if (why & QE_CR_STAT_RXPERR) {
827		printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname);
828		ifp->if_ierrors++;
829		r |= 1;
830		rst = 1;
831	}
832
833	if (why & QE_CR_STAT_RXSERR) {
834		printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname);
835		ifp->if_ierrors++;
836		r |= 1;
837		rst = 1;
838	}
839
840	if (r == 0)
841		printf("%s: unexpected interrupt error: %08x\n",
842			sc->sc_dev.dv_xname, why);
843
844	if (rst) {
845		printf("%s: resetting...\n", sc->sc_dev.dv_xname);
846		qereset(sc);
847		return (-1);
848	}
849
850	return (r);
851}
852
853int
854qeioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
855{
856	struct qe_softc *sc = ifp->if_softc;
857	struct ifreq *ifr = (struct ifreq *)data;
858	int s, error = 0;
859
860	s = splnet();
861
862	switch (cmd) {
863	case SIOCSIFADDR:
864		ifp->if_flags |= IFF_UP;
865		qeinit(sc);
866		break;
867
868	case SIOCSIFFLAGS:
869		if ((ifp->if_flags & IFF_UP) == 0 &&
870		    (ifp->if_flags & IFF_RUNNING) != 0) {
871			/*
872			 * If interface is marked down and it is running, then
873			 * stop it.
874			 */
875			qestop(sc);
876			ifp->if_flags &= ~IFF_RUNNING;
877		} else if ((ifp->if_flags & IFF_UP) != 0 &&
878			   (ifp->if_flags & IFF_RUNNING) == 0) {
879			/*
880			 * If interface is marked up and it is stopped, then
881			 * start it.
882			 */
883			qeinit(sc);
884		} else {
885			/*
886			 * Reset the interface to pick up changes in any other
887			 * flags that affect hardware registers.
888			 */
889			qestop(sc);
890			qeinit(sc);
891		}
892#ifdef QEDEBUG
893		sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0;
894#endif
895		break;
896
897	case SIOCGIFMEDIA:
898	case SIOCSIFMEDIA:
899		error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd);
900		break;
901
902	default:
903		error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data);
904	}
905
906	if (error == ENETRESET) {
907		if (ifp->if_flags & IFF_RUNNING)
908			qe_mcreset(sc);
909		error = 0;
910	}
911
912	splx(s);
913	return (error);
914}
915
916
917void
918qeinit(struct qe_softc *sc)
919{
920	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
921	bus_space_tag_t t = sc->sc_bustag;
922	bus_space_handle_t cr = sc->sc_cr;
923	bus_space_handle_t mr = sc->sc_mr;
924	struct qec_softc *qec = sc->sc_qec;
925	u_int32_t qecaddr;
926	u_int8_t *ea;
927	int s;
928
929	s = splnet();
930
931	qestop(sc);
932
933	/*
934	 * Allocate descriptor ring and buffers
935	 */
936	qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ);
937
938	/* Channel registers: */
939	bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma);
940	bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma);
941
942	bus_space_write_4(t, cr, QE_CRI_RIMASK, 0);
943	bus_space_write_4(t, cr, QE_CRI_TIMASK, 0);
944	bus_space_write_4(t, cr, QE_CRI_QMASK, 0);
945	bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL);
946	bus_space_write_4(t, cr, QE_CRI_CCNT, 0);
947	bus_space_write_4(t, cr, QE_CRI_PIPG, 0);
948
949	qecaddr = sc->sc_channel * qec->sc_msize;
950	bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr);
951	bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr);
952	bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize);
953	bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize);
954
955	/*
956	 * When switching from mace<->qec always guarantee an sbus
957	 * turnaround (if last op was read, perform a dummy write, and
958	 * vice versa).
959	 */
960	bus_space_read_4(t, cr, QE_CRI_QMASK);
961
962	/* MACE registers: */
963	bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL);
964	bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT);
965	bus_space_write_1(t, mr, QE_MRI_RCVFC, 0);
966
967	/*
968	 * Mask MACE's receive interrupt, since we're being notified
969	 * by the QEC after DMA completes.
970	 */
971	bus_space_write_1(t, mr, QE_MRI_IMR,
972	    QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM);
973
974	bus_space_write_1(t, mr, QE_MRI_BIUCC,
975	    QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS);
976
977	bus_space_write_1(t, mr, QE_MRI_FIFOFC,
978	    QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 |
979	    QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU);
980
981	bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP);
982
983	/*
984	 * Station address
985	 */
986	ea = sc->sc_arpcom.ac_enaddr;
987	bus_space_write_1(t, mr, QE_MRI_IAC,
988	    QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR);
989	bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6);
990
991	/* Apply media settings */
992	qe_ifmedia_upd(ifp);
993
994	/*
995	 * Clear Logical address filter
996	 */
997	bus_space_write_1(t, mr, QE_MRI_IAC,
998	    QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
999	bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8);
1000	bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1001
1002	/* Clear missed packet count (register cleared on read) */
1003	(void)bus_space_read_1(t, mr, QE_MRI_MPC);
1004
1005#if 0
1006	/* test register: */
1007	bus_space_write_1(t, mr, QE_MRI_UTR, 0);
1008#endif
1009
1010	/* Reset multicast filter */
1011	qe_mcreset(sc);
1012
1013	ifp->if_flags |= IFF_RUNNING;
1014	ifq_clr_oactive(&ifp->if_snd);
1015	splx(s);
1016}
1017
1018/*
1019 * Reset multicast filter.
1020 */
1021void
1022qe_mcreset(struct qe_softc *sc)
1023{
1024	struct arpcom *ac = &sc->sc_arpcom;
1025	struct ifnet *ifp = &sc->sc_arpcom.ac_if;
1026	bus_space_tag_t t = sc->sc_bustag;
1027	bus_space_handle_t mr = sc->sc_mr;
1028	struct ether_multi *enm;
1029	struct ether_multistep step;
1030	u_int32_t crc;
1031	u_int16_t hash[4];
1032	u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0];
1033	int i, j;
1034
1035	/* We also enable transmitter & receiver here */
1036	maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV;
1037
1038	if (ifp->if_flags & IFF_PROMISC) {
1039		maccc |= QE_MR_MACCC_PROM;
1040		bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1041		return;
1042	}
1043
1044	if (ac->ac_multirangecnt > 0)
1045		ifp->if_flags |= IFF_ALLMULTI;
1046
1047	if (ifp->if_flags & IFF_ALLMULTI) {
1048		bus_space_write_1(t, mr, QE_MRI_IAC,
1049		    QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1050		bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8);
1051		bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1052		bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1053		return;
1054	}
1055
1056	hash[3] = hash[2] = hash[1] = hash[0] = 0;
1057
1058	ETHER_FIRST_MULTI(step, ac, enm);
1059	while (enm != NULL) {
1060		crc = 0xffffffff;
1061
1062		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1063			octet = enm->enm_addrlo[i];
1064
1065			for (j = 0; j < 8; j++) {
1066				if ((crc & 1) ^ (octet & 1)) {
1067					crc >>= 1;
1068					crc ^= MC_POLY_LE;
1069				}
1070				else
1071					crc >>= 1;
1072				octet >>= 1;
1073			}
1074		}
1075
1076		crc >>= 26;
1077		hash[crc >> 4] |= 1 << (crc & 0xf);
1078		ETHER_NEXT_MULTI(step, enm);
1079	}
1080
1081	bus_space_write_1(t, mr, QE_MRI_IAC,
1082	    QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR);
1083	bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8);
1084	bus_space_write_1(t, mr, QE_MRI_IAC, 0);
1085	bus_space_write_1(t, mr, QE_MRI_MACCC, maccc);
1086}
1087
1088/*
1089 * Get current media settings.
1090 */
1091void
1092qe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1093{
1094	struct qe_softc *sc = ifp->if_softc;
1095	u_int8_t phycc;
1096
1097	ifmr->ifm_active = IFM_ETHER | IFM_10_T;
1098	phycc = bus_space_read_1(sc->sc_bustag, sc->sc_mr, QE_MRI_PHYCC);
1099	if ((phycc & QE_MR_PHYCC_DLNKTST) == 0) {
1100		ifmr->ifm_status |= IFM_AVALID;
1101		if (phycc & QE_MR_PHYCC_LNKFL)
1102			ifmr->ifm_status &= ~IFM_ACTIVE;
1103		else
1104			ifmr->ifm_status |= IFM_ACTIVE;
1105	}
1106}
1107
1108/*
1109 * Set media options.
1110 */
1111int
1112qe_ifmedia_upd(struct ifnet *ifp)
1113{
1114	struct qe_softc *sc = ifp->if_softc;
1115	uint64_t media = sc->sc_ifmedia.ifm_media;
1116
1117	if (IFM_TYPE(media) != IFM_ETHER)
1118		return (EINVAL);
1119
1120	if (IFM_SUBTYPE(media) != IFM_10_T)
1121		return (EINVAL);
1122
1123	return (0);
1124}
1125