1/*      $NetBSD: if_qe.c,v 1.70 2010/01/19 22:07:43 pooka Exp $ */
2/*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *      This product includes software developed at Ludd, University of
16 *      Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Driver for DEQNA/DELQA ethernet cards.
34 * Things that is still to do:
35 *	Handle ubaresets. Does not work at all right now.
36 *	Fix ALLMULTI reception. But someone must tell me how...
37 *	Collect statistics.
38 */
39
40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: if_qe.c,v 1.70 2010/01/19 22:07:43 pooka Exp $");
42
43#include "opt_inet.h"
44
45#include <sys/param.h>
46#include <sys/mbuf.h>
47#include <sys/socket.h>
48#include <sys/device.h>
49#include <sys/systm.h>
50#include <sys/sockio.h>
51
52#include <net/if.h>
53#include <net/if_ether.h>
54#include <net/if_dl.h>
55
56#include <netinet/in.h>
57#include <netinet/if_inarp.h>
58
59#include <net/bpf.h>
60#include <net/bpfdesc.h>
61
62#include <sys/bus.h>
63
64#include <dev/qbus/ubavar.h>
65#include <dev/qbus/if_qereg.h>
66
67#include "ioconf.h"
68
69#define RXDESCS	30	/* # of receive descriptors */
70#define TXDESCS	60	/* # transmit descs */
71
72/*
73 * Structure containing the elements that must be in DMA-safe memory.
74 */
75struct qe_cdata {
76	struct qe_ring	qc_recv[RXDESCS+1];	/* Receive descriptors */
77	struct qe_ring	qc_xmit[TXDESCS+1];	/* Transmit descriptors */
78	u_int8_t	qc_setup[128];		/* Setup packet layout */
79};
80
81struct	qe_softc {
82	device_t	sc_dev;		/* Configuration common part	*/
83	struct uba_softc *sc_uh;	/* our parent */
84	struct evcnt	sc_intrcnt;	/* Interrupt counting		*/
85	struct ethercom sc_ec;		/* Ethernet common part		*/
86#define sc_if	sc_ec.ec_if		/* network-visible interface	*/
87	bus_space_tag_t sc_iot;
88	bus_addr_t	sc_ioh;
89	bus_dma_tag_t	sc_dmat;
90	struct qe_cdata *sc_qedata;	/* Descriptor struct		*/
91	struct qe_cdata *sc_pqedata;	/* Unibus address of above	*/
92	struct mbuf*	sc_txmbuf[TXDESCS];
93	struct mbuf*	sc_rxmbuf[RXDESCS];
94	bus_dmamap_t	sc_xmtmap[TXDESCS];
95	bus_dmamap_t	sc_rcvmap[RXDESCS];
96	bus_dmamap_t	sc_nulldmamap;	/* ethernet padding buffer	*/
97	struct ubinfo	sc_ui;
98	int		sc_intvec;	/* Interrupt vector		*/
99	int		sc_nexttx;
100	int		sc_inq;
101	int		sc_lastack;
102	int		sc_nextrx;
103	int		sc_setup;	/* Setup packet in queue	*/
104};
105
106static	int	qematch(device_t, cfdata_t, void *);
107static	void	qeattach(device_t, device_t, void *);
108static	void	qeinit(struct qe_softc *);
109static	void	qestart(struct ifnet *);
110static	void	qeintr(void *);
111static	int	qeioctl(struct ifnet *, u_long, void *);
112static	int	qe_add_rxbuf(struct qe_softc *, int);
113static	void	qe_setup(struct qe_softc *);
114static	void	qetimeout(struct ifnet *);
115
116CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc),
117    qematch, qeattach, NULL, NULL);
118
119#define	QE_WCSR(csr, val) \
120	bus_space_write_2(sc->sc_iot, sc->sc_ioh, csr, val)
121#define	QE_RCSR(csr) \
122	bus_space_read_2(sc->sc_iot, sc->sc_ioh, csr)
123
124#define	LOWORD(x)	((int)(x) & 0xffff)
125#define	HIWORD(x)	(((int)(x) >> 16) & 0x3f)
126
127#define	ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
128
129/*
130 * Check for present DEQNA. Done by sending a fake setup packet
131 * and wait for interrupt.
132 */
133int
134qematch(device_t parent, cfdata_t cf, void *aux)
135{
136	struct	qe_softc ssc;
137	struct	qe_softc *sc = &ssc;
138	struct	uba_attach_args *ua = aux;
139	struct	uba_softc *uh = device_private(parent);
140	struct ubinfo ui;
141
142#define	PROBESIZE	4096
143	struct qe_ring *ring;
144	struct	qe_ring *rp;
145	int error;
146
147	ring = malloc(PROBESIZE, M_TEMP, M_WAITOK|M_ZERO);
148	memset(sc, 0, sizeof(*sc));
149	sc->sc_iot = ua->ua_iot;
150	sc->sc_ioh = ua->ua_ioh;
151	sc->sc_dmat = ua->ua_dmat;
152
153	uh->uh_lastiv -= 4;
154	QE_WCSR(QE_CSR_CSR, QE_RESET);
155	QE_WCSR(QE_CSR_VECTOR, uh->uh_lastiv);
156
157	/*
158	 * Map the ring area. Actually this is done only to be able to
159	 * send and receive a internal packet; some junk is loopbacked
160	 * so that the DEQNA has a reason to interrupt.
161	 */
162	ui.ui_size = PROBESIZE;
163	ui.ui_vaddr = (void *)&ring[0];
164	if ((error = uballoc(uh, &ui, UBA_CANTWAIT)))
165		return 0;
166
167	/*
168	 * Init a simple "fake" receive and transmit descriptor that
169	 * points to some unused area. Send a fake setup packet.
170	 */
171	rp = (void *)ui.ui_baddr;
172	ring[0].qe_flag = ring[0].qe_status1 = QE_NOTYET;
173	ring[0].qe_addr_lo = LOWORD(&rp[4]);
174	ring[0].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID | QE_EOMSG | QE_SETUP;
175	ring[0].qe_buf_len = -64;
176
177	ring[2].qe_flag = ring[2].qe_status1 = QE_NOTYET;
178	ring[2].qe_addr_lo = LOWORD(&rp[4]);
179	ring[2].qe_addr_hi = HIWORD(&rp[4]) | QE_VALID;
180	ring[2].qe_buf_len = -(1500/2);
181
182	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
183	DELAY(1000);
184
185	/*
186	 * Start the interface and wait for the packet.
187	 */
188	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
189	QE_WCSR(QE_CSR_RCLL, LOWORD(&rp[2]));
190	QE_WCSR(QE_CSR_RCLH, HIWORD(&rp[2]));
191	QE_WCSR(QE_CSR_XMTL, LOWORD(rp));
192	QE_WCSR(QE_CSR_XMTH, HIWORD(rp));
193	DELAY(10000);
194
195	/*
196	 * All done with the bus resources.
197	 */
198	ubfree(uh, &ui);
199	free(ring, M_TEMP);
200	return 1;
201}
202
203/*
204 * Interface exists: make available by filling in network interface
205 * record.  System will initialize the interface when it is ready
206 * to accept packets.
207 */
208void
209qeattach(device_t parent, device_t self, void *aux)
210{
211	struct uba_attach_args *ua = aux;
212	struct qe_softc *sc = device_private(self);
213	struct ifnet *ifp = &sc->sc_if;
214	struct qe_ring *rp;
215	u_int8_t enaddr[ETHER_ADDR_LEN];
216	int i, error;
217	char *nullbuf;
218
219	sc->sc_dev = self;
220	sc->sc_uh = device_private(parent);
221	sc->sc_iot = ua->ua_iot;
222	sc->sc_ioh = ua->ua_ioh;
223	sc->sc_dmat = ua->ua_dmat;
224
225	/*
226	 * Allocate DMA safe memory for descriptors and setup memory.
227	 */
228
229	sc->sc_ui.ui_size = sizeof(struct qe_cdata) + ETHER_PAD_LEN;
230	if ((error = ubmemalloc(sc->sc_uh, &sc->sc_ui, 0))) {
231		aprint_error(": unable to ubmemalloc(), error = %d\n", error);
232		return;
233	}
234	sc->sc_pqedata = (struct qe_cdata *)sc->sc_ui.ui_baddr;
235	sc->sc_qedata = (struct qe_cdata *)sc->sc_ui.ui_vaddr;
236
237	/*
238	 * Zero the newly allocated memory.
239	 */
240	memset(sc->sc_qedata, 0, sizeof(struct qe_cdata) + ETHER_PAD_LEN);
241	nullbuf = ((char*)sc->sc_qedata) + sizeof(struct qe_cdata);
242	/*
243	 * Create the transmit descriptor DMA maps. We take advantage
244	 * of the fact that the Qbus address space is big, and therefore
245	 * allocate map registers for all transmit descriptors also,
246	 * so that we can avoid this each time we send a packet.
247	 */
248	for (i = 0; i < TXDESCS; i++) {
249		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
250		    1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
251		    &sc->sc_xmtmap[i]))) {
252			aprint_error(
253			    ": unable to create tx DMA map %d, error = %d\n",
254			    i, error);
255			goto fail_4;
256		}
257	}
258
259	/*
260	 * Create receive buffer DMA maps.
261	 */
262	for (i = 0; i < RXDESCS; i++) {
263		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
264		    MCLBYTES, 0, BUS_DMA_NOWAIT,
265		    &sc->sc_rcvmap[i]))) {
266			aprint_error(
267			    ": unable to create rx DMA map %d, error = %d\n",
268			    i, error);
269			goto fail_5;
270		}
271	}
272	/*
273	 * Pre-allocate the receive buffers.
274	 */
275	for (i = 0; i < RXDESCS; i++) {
276		if ((error = qe_add_rxbuf(sc, i)) != 0) {
277			aprint_error(
278			    ": unable to allocate or map rx buffer %d,"
279			    " error = %d\n", i, error);
280			goto fail_6;
281		}
282	}
283
284	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
285	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT,&sc->sc_nulldmamap)) != 0) {
286		aprint_error(
287		    ": unable to create pad buffer DMA map, error = %d\n",
288		    error);
289		goto fail_6;
290	}
291	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
292	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
293		aprint_error(
294		    ": unable to load pad buffer DMA map, error = %d\n",
295		    error);
296		goto fail_7;
297	}
298	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
299	    BUS_DMASYNC_PREWRITE);
300
301	/*
302	 * Create ring loops of the buffer chains.
303	 * This is only done once.
304	 */
305
306	rp = sc->sc_qedata->qc_recv;
307	rp[RXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_recv[0]);
308	rp[RXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_recv[0]) |
309	    QE_VALID | QE_CHAIN;
310	rp[RXDESCS].qe_flag = rp[RXDESCS].qe_status1 = QE_NOTYET;
311
312	rp = sc->sc_qedata->qc_xmit;
313	rp[TXDESCS].qe_addr_lo = LOWORD(&sc->sc_pqedata->qc_xmit[0]);
314	rp[TXDESCS].qe_addr_hi = HIWORD(&sc->sc_pqedata->qc_xmit[0]) |
315	    QE_VALID | QE_CHAIN;
316	rp[TXDESCS].qe_flag = rp[TXDESCS].qe_status1 = QE_NOTYET;
317
318	/*
319	 * Get the vector that were set at match time, and remember it.
320	 */
321	sc->sc_intvec = sc->sc_uh->uh_lastiv;
322	QE_WCSR(QE_CSR_CSR, QE_RESET);
323	DELAY(1000);
324	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
325
326	/*
327	 * Read out ethernet address and tell which type this card is.
328	 */
329	for (i = 0; i < 6; i++)
330		enaddr[i] = QE_RCSR(i * 2) & 0xff;
331
332	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec | 1);
333	aprint_normal(": %s, hardware address %s\n",
334		QE_RCSR(QE_CSR_VECTOR) & 1 ? "delqa":"deqna",
335		ether_sprintf(enaddr));
336
337	QE_WCSR(QE_CSR_VECTOR, QE_RCSR(QE_CSR_VECTOR) & ~1); /* ??? */
338
339	uba_intr_establish(ua->ua_icookie, ua->ua_cvec, qeintr,
340		sc, &sc->sc_intrcnt);
341	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, ua->ua_evcnt,
342		device_xname(sc->sc_dev), "intr");
343
344	strcpy(ifp->if_xname, device_xname(sc->sc_dev));
345	ifp->if_softc = sc;
346	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
347	ifp->if_start = qestart;
348	ifp->if_ioctl = qeioctl;
349	ifp->if_watchdog = qetimeout;
350	IFQ_SET_READY(&ifp->if_snd);
351
352	/*
353	 * Attach the interface.
354	 */
355	if_attach(ifp);
356	ether_ifattach(ifp, enaddr);
357
358	return;
359
360	/*
361	 * Free any resources we've allocated during the failed attach
362	 * attempt.  Do this in reverse order and fall through.
363	 */
364 fail_7:
365	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
366 fail_6:
367	for (i = 0; i < RXDESCS; i++) {
368		if (sc->sc_rxmbuf[i] != NULL) {
369			bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
370			m_freem(sc->sc_rxmbuf[i]);
371		}
372	}
373 fail_5:
374	for (i = 0; i < RXDESCS; i++) {
375		if (sc->sc_xmtmap[i] != NULL)
376			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
377	}
378 fail_4:
379	for (i = 0; i < TXDESCS; i++) {
380		if (sc->sc_rcvmap[i] != NULL)
381			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
382	}
383}
384
385/*
386 * Initialization of interface.
387 */
388void
389qeinit(struct qe_softc *sc)
390{
391	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
392	struct qe_cdata *qc = sc->sc_qedata;
393	int i;
394
395
396	/*
397	 * Reset the interface.
398	 */
399	QE_WCSR(QE_CSR_CSR, QE_RESET);
400	DELAY(1000);
401	QE_WCSR(QE_CSR_CSR, QE_RCSR(QE_CSR_CSR) & ~QE_RESET);
402	QE_WCSR(QE_CSR_VECTOR, sc->sc_intvec);
403
404	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0;
405	/*
406	 * Release and init transmit descriptors.
407	 */
408	for (i = 0; i < TXDESCS; i++) {
409		if (sc->sc_txmbuf[i]) {
410			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
411			m_freem(sc->sc_txmbuf[i]);
412			sc->sc_txmbuf[i] = 0;
413		}
414		qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */
415		qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET;
416	}
417
418
419	/*
420	 * Init receive descriptors.
421	 */
422	for (i = 0; i < RXDESCS; i++)
423		qc->qc_recv[i].qe_status1 = qc->qc_recv[i].qe_flag = QE_NOTYET;
424	sc->sc_nextrx = 0;
425
426	/*
427	 * Write the descriptor addresses to the device.
428	 * Receiving packets will be enabled in the interrupt routine.
429	 */
430	QE_WCSR(QE_CSR_CSR, QE_INT_ENABLE|QE_XMIT_INT|QE_RCV_INT);
431	QE_WCSR(QE_CSR_RCLL, LOWORD(sc->sc_pqedata->qc_recv));
432	QE_WCSR(QE_CSR_RCLH, HIWORD(sc->sc_pqedata->qc_recv));
433
434	ifp->if_flags |= IFF_RUNNING;
435	ifp->if_flags &= ~IFF_OACTIVE;
436
437	/*
438	 * Send a setup frame.
439	 * This will start the transmit machinery as well.
440	 */
441	qe_setup(sc);
442
443}
444
445/*
446 * Start output on interface.
447 */
448void
449qestart(struct ifnet *ifp)
450{
451	struct qe_softc *sc = ifp->if_softc;
452	struct qe_cdata *qc = sc->sc_qedata;
453	paddr_t	buffer;
454	struct mbuf *m, *m0;
455	int idx, len, s, i, totlen, buflen, error;
456	short orword, csr;
457
458	if ((QE_RCSR(QE_CSR_CSR) & QE_RCV_ENABLE) == 0)
459		return;
460
461	s = splnet();
462	while (sc->sc_inq < (TXDESCS - 1)) {
463
464		if (sc->sc_setup) {
465			qe_setup(sc);
466			continue;
467		}
468		idx = sc->sc_nexttx;
469		IFQ_POLL(&ifp->if_snd, m);
470		if (m == 0)
471			goto out;
472		/*
473		 * Count number of mbufs in chain.
474		 * Always do DMA directly from mbufs, therefore the transmit
475		 * ring is really big.
476		 */
477		for (m0 = m, i = 0; m0; m0 = m0->m_next)
478			if (m0->m_len)
479				i++;
480		if (m->m_pkthdr.len < ETHER_PAD_LEN) {
481			buflen = ETHER_PAD_LEN;
482			i++;
483		} else
484			buflen = m->m_pkthdr.len;
485		if (i >= TXDESCS)
486			panic("qestart");
487
488		if ((i + sc->sc_inq) >= (TXDESCS - 1)) {
489			ifp->if_flags |= IFF_OACTIVE;
490			goto out;
491		}
492
493		IFQ_DEQUEUE(&ifp->if_snd, m);
494
495		bpf_mtap(ifp, m);
496		/*
497		 * m now points to a mbuf chain that can be loaded.
498		 * Loop around and set it.
499		 */
500		totlen = 0;
501		for (m0 = m; ; m0 = m0->m_next) {
502			if (m0) {
503				if (m0->m_len == 0)
504					continue;
505				error = bus_dmamap_load(sc->sc_dmat,
506				    sc->sc_xmtmap[idx], mtod(m0, void *),
507				    m0->m_len, 0, 0);
508				buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr;
509				len = m0->m_len;
510			} else if (totlen < ETHER_PAD_LEN) {
511				buffer = sc->sc_nulldmamap->dm_segs[0].ds_addr;
512				len = ETHER_PAD_LEN - totlen;
513			} else {
514				break;
515			}
516
517			totlen += len;
518			/* Word alignment calc */
519			orword = 0;
520			if (totlen == buflen) {
521				orword |= QE_EOMSG;
522				sc->sc_txmbuf[idx] = m;
523			}
524			if ((buffer & 1) || (len & 1))
525				len += 2;
526			if (buffer & 1)
527				orword |= QE_ODDBEGIN;
528			if ((buffer + len) & 1)
529				orword |= QE_ODDEND;
530			qc->qc_xmit[idx].qe_buf_len = -(len/2);
531			qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer);
532			qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer);
533			qc->qc_xmit[idx].qe_flag =
534			    qc->qc_xmit[idx].qe_status1 = QE_NOTYET;
535			qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword);
536			if (++idx == TXDESCS)
537				idx = 0;
538			sc->sc_inq++;
539			if (m0 == NULL)
540				break;
541		}
542#ifdef DIAGNOSTIC
543		if (totlen != buflen)
544			panic("qestart: len fault");
545#endif
546
547		/*
548		 * Kick off the transmit logic, if it is stopped.
549		 */
550		csr = QE_RCSR(QE_CSR_CSR);
551		if (csr & QE_XL_INVALID) {
552			QE_WCSR(QE_CSR_XMTL,
553			    LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
554			QE_WCSR(QE_CSR_XMTH,
555			    HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx]));
556		}
557		sc->sc_nexttx = idx;
558	}
559	if (sc->sc_inq == (TXDESCS - 1))
560		ifp->if_flags |= IFF_OACTIVE;
561
562out:	if (sc->sc_inq)
563		ifp->if_timer = 5; /* If transmit logic dies */
564	splx(s);
565}
566
567static void
568qeintr(void *arg)
569{
570	struct qe_softc *sc = arg;
571	struct qe_cdata *qc = sc->sc_qedata;
572	struct ifnet *ifp = &sc->sc_if;
573	struct mbuf *m;
574	int csr, status1, status2, len;
575
576	csr = QE_RCSR(QE_CSR_CSR);
577
578	QE_WCSR(QE_CSR_CSR, QE_RCV_ENABLE | QE_INT_ENABLE | QE_XMIT_INT |
579	    QE_RCV_INT | QE_ILOOP);
580
581	if (csr & QE_RCV_INT)
582		while (qc->qc_recv[sc->sc_nextrx].qe_status1 != QE_NOTYET) {
583			status1 = qc->qc_recv[sc->sc_nextrx].qe_status1;
584			status2 = qc->qc_recv[sc->sc_nextrx].qe_status2;
585
586			m = sc->sc_rxmbuf[sc->sc_nextrx];
587			len = ((status1 & QE_RBL_HI) |
588			    (status2 & QE_RBL_LO)) + 60;
589			qe_add_rxbuf(sc, sc->sc_nextrx);
590			m->m_pkthdr.rcvif = ifp;
591			m->m_pkthdr.len = m->m_len = len;
592			if (++sc->sc_nextrx == RXDESCS)
593				sc->sc_nextrx = 0;
594			bpf_mtap(ifp, m);
595			if ((status1 & QE_ESETUP) == 0)
596				(*ifp->if_input)(ifp, m);
597			else
598				m_freem(m);
599		}
600
601	if (csr & (QE_XMIT_INT|QE_XL_INVALID)) {
602		while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) {
603			int idx = sc->sc_lastack;
604
605			sc->sc_inq--;
606			if (++sc->sc_lastack == TXDESCS)
607				sc->sc_lastack = 0;
608
609			/* XXX collect statistics */
610			qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID;
611			qc->qc_xmit[idx].qe_status1 =
612			    qc->qc_xmit[idx].qe_flag = QE_NOTYET;
613
614			if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP)
615				continue;
616			if (sc->sc_txmbuf[idx] == NULL ||
617			    sc->sc_txmbuf[idx]->m_pkthdr.len < ETHER_PAD_LEN)
618				bus_dmamap_unload(sc->sc_dmat,
619				    sc->sc_xmtmap[idx]);
620			if (sc->sc_txmbuf[idx]) {
621				m_freem(sc->sc_txmbuf[idx]);
622				sc->sc_txmbuf[idx] = NULL;
623			}
624		}
625		ifp->if_timer = 0;
626		ifp->if_flags &= ~IFF_OACTIVE;
627		qestart(ifp); /* Put in more in queue */
628	}
629	/*
630	 * How can the receive list get invalid???
631	 * Verified that it happens anyway.
632	 */
633	if ((qc->qc_recv[sc->sc_nextrx].qe_status1 == QE_NOTYET) &&
634	    (QE_RCSR(QE_CSR_CSR) & QE_RL_INVALID)) {
635		QE_WCSR(QE_CSR_RCLL,
636		    LOWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
637		QE_WCSR(QE_CSR_RCLH,
638		    HIWORD(&sc->sc_pqedata->qc_recv[sc->sc_nextrx]));
639	}
640}
641
642/*
643 * Process an ioctl request.
644 */
645int
646qeioctl(struct ifnet *ifp, u_long cmd, void *data)
647{
648	struct qe_softc *sc = ifp->if_softc;
649	struct ifaddr *ifa = (struct ifaddr *)data;
650	int s = splnet(), error = 0;
651
652	switch (cmd) {
653
654	case SIOCINITIFADDR:
655		ifp->if_flags |= IFF_UP;
656		switch(ifa->ifa_addr->sa_family) {
657#ifdef INET
658		case AF_INET:
659			qeinit(sc);
660			arp_ifinit(ifp, ifa);
661			break;
662#endif
663		}
664		break;
665
666	case SIOCSIFFLAGS:
667		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
668			break;
669		/* XXX re-use ether_ioctl() */
670		switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) {
671		case IFF_RUNNING:
672			/*
673			 * If interface is marked down and it is running,
674			 * stop it. (by disabling receive mechanism).
675			 */
676			QE_WCSR(QE_CSR_CSR,
677			    QE_RCSR(QE_CSR_CSR) & ~QE_RCV_ENABLE);
678			ifp->if_flags &= ~IFF_RUNNING;
679			break;
680		case IFF_UP:
681			/*
682			 * If interface it marked up and it is stopped, then
683			 * start it.
684			 */
685			qeinit(sc);
686			break;
687		case IFF_UP|IFF_RUNNING:
688			/*
689			 * Send a new setup packet to match any new changes.
690			 * (Like IFF_PROMISC etc)
691			 */
692			qe_setup(sc);
693			break;
694		case 0:
695			break;
696		}
697		break;
698
699	case SIOCADDMULTI:
700	case SIOCDELMULTI:
701		/*
702		 * Update our multicast list.
703		 */
704		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
705			/*
706			 * Multicast list has changed; set the hardware filter
707			 * accordingly.
708			 */
709			if (ifp->if_flags & IFF_RUNNING)
710				qe_setup(sc);
711			error = 0;
712		}
713		break;
714
715	default:
716		error = ether_ioctl(ifp, cmd, data);
717	}
718	splx(s);
719	return (error);
720}
721
722/*
723 * Add a receive buffer to the indicated descriptor.
724 */
725int
726qe_add_rxbuf(struct qe_softc *sc, int i)
727{
728	struct mbuf *m;
729	struct qe_ring *rp;
730	vaddr_t addr;
731	int error;
732
733	MGETHDR(m, M_DONTWAIT, MT_DATA);
734	if (m == NULL)
735		return (ENOBUFS);
736
737	MCLGET(m, M_DONTWAIT);
738	if ((m->m_flags & M_EXT) == 0) {
739		m_freem(m);
740		return (ENOBUFS);
741	}
742
743	if (sc->sc_rxmbuf[i] != NULL)
744		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
745
746	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
747	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT);
748	if (error)
749		panic("%s: can't load rx DMA map %d, error = %d",
750		    device_xname(sc->sc_dev), i, error);
751	sc->sc_rxmbuf[i] = m;
752
753	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
754	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
755
756	/*
757	 * We know that the mbuf cluster is page aligned. Also, be sure
758	 * that the IP header will be longword aligned.
759	 */
760	m->m_data += 2;
761	addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
762	rp = &sc->sc_qedata->qc_recv[i];
763	rp->qe_flag = rp->qe_status1 = QE_NOTYET;
764	rp->qe_addr_lo = LOWORD(addr);
765	rp->qe_addr_hi = HIWORD(addr) | QE_VALID;
766	rp->qe_buf_len = -(m->m_ext.ext_size - 2)/2;
767
768	return (0);
769}
770
771/*
772 * Create a setup packet and put in queue for sending.
773 */
774void
775qe_setup(struct qe_softc *sc)
776{
777	struct ether_multi *enm;
778	struct ether_multistep step;
779	struct qe_cdata *qc = sc->sc_qedata;
780	struct ifnet *ifp = &sc->sc_if;
781	u_int8_t enaddr[ETHER_ADDR_LEN];
782	int i, j, k, idx, s;
783
784	s = splnet();
785	if (sc->sc_inq == (TXDESCS - 1)) {
786		sc->sc_setup = 1;
787		splx(s);
788		return;
789	}
790	sc->sc_setup = 0;
791	/*
792	 * Init the setup packet with valid info.
793	 */
794	memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */
795	memcpy(enaddr, CLLADDR(ifp->if_sadl), sizeof(enaddr));
796	for (i = 0; i < ETHER_ADDR_LEN; i++)
797		qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */
798
799	/*
800	 * Multicast handling. The DEQNA can handle up to 12 direct
801	 * ethernet addresses.
802	 */
803	j = 3; k = 0;
804	ifp->if_flags &= ~IFF_ALLMULTI;
805	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
806	while (enm != NULL) {
807		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
808			ifp->if_flags |= IFF_ALLMULTI;
809			break;
810		}
811		for (i = 0; i < ETHER_ADDR_LEN; i++)
812			qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i];
813		j++;
814		if (j == 8) {
815			j = 1; k += 64;
816		}
817		if (k > 64) {
818			ifp->if_flags |= IFF_ALLMULTI;
819			break;
820		}
821		ETHER_NEXT_MULTI(step, enm);
822	}
823	idx = sc->sc_nexttx;
824	qc->qc_xmit[idx].qe_buf_len = -64;
825
826	/*
827	 * How is the DEQNA turned in ALLMULTI mode???
828	 * Until someone tells me, fall back to PROMISC when more than
829	 * 12 ethernet addresses.
830	 */
831	if (ifp->if_flags & IFF_ALLMULTI)
832		ifp->if_flags |= IFF_PROMISC;
833	else if (ifp->if_pcount == 0)
834		ifp->if_flags &= ~IFF_PROMISC;
835	if (ifp->if_flags & IFF_PROMISC)
836		qc->qc_xmit[idx].qe_buf_len = -65;
837
838	qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup);
839	qc->qc_xmit[idx].qe_addr_hi =
840	    HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG;
841	qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET;
842	qc->qc_xmit[idx].qe_addr_hi |= QE_VALID;
843
844	if (QE_RCSR(QE_CSR_CSR) & QE_XL_INVALID) {
845		QE_WCSR(QE_CSR_XMTL,
846		    LOWORD(&sc->sc_pqedata->qc_xmit[idx]));
847		QE_WCSR(QE_CSR_XMTH,
848		    HIWORD(&sc->sc_pqedata->qc_xmit[idx]));
849	}
850
851	sc->sc_inq++;
852	if (++sc->sc_nexttx == TXDESCS)
853		sc->sc_nexttx = 0;
854	splx(s);
855}
856
857/*
858 * Check for dead transmit logic. Not uncommon.
859 */
860void
861qetimeout(struct ifnet *ifp)
862{
863	struct qe_softc *sc = ifp->if_softc;
864
865	if (sc->sc_inq == 0)
866		return;
867
868	aprint_error_dev(sc->sc_dev, "xmit logic died, resetting...\n");
869	/*
870	 * Do a reset of interface, to get it going again.
871	 * Will it work by just restart the transmit logic?
872	 */
873	qeinit(sc);
874}
875