sgec.c revision 1.34
1/*      $NetBSD: sgec.c,v 1.34 2007/10/19 12:00:00 ad Exp $ */
2/*
3 * Copyright (c) 1999 Ludd, University of Lule}, Sweden. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 *    must display the following acknowledgement:
15 *      This product includes software developed at Ludd, University of
16 *      Lule}, Sweden and its contributors.
17 * 4. The name of the author may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Driver for the SGEC (Second Generation Ethernet Controller), sitting
34 * on for example the VAX 4000/300 (KA670).
35 *
36 * The SGEC looks like a mixture of the DEQNA and the TULIP. Fun toy.
37 *
38 * Even though the chip is capable to use virtual addresses (read the
39 * System Page Table directly) this driver doesn't do so, and there
40 * is no benefit in doing it either in NetBSD of today.
41 *
42 * Things that is still to do:
43 *	Collect statistics.
44 *	Use imperfect filtering when many multicast addresses.
45 */
46
47#include <sys/cdefs.h>
48__KERNEL_RCSID(0, "$NetBSD: sgec.c,v 1.34 2007/10/19 12:00:00 ad Exp $");
49
50#include "opt_inet.h"
51#include "bpfilter.h"
52
53#include <sys/param.h>
54#include <sys/mbuf.h>
55#include <sys/socket.h>
56#include <sys/device.h>
57#include <sys/systm.h>
58#include <sys/sockio.h>
59
60#include <uvm/uvm_extern.h>
61
62#include <net/if.h>
63#include <net/if_ether.h>
64#include <net/if_dl.h>
65
66#include <netinet/in.h>
67#include <netinet/if_inarp.h>
68
69#if NBPFILTER > 0
70#include <net/bpf.h>
71#include <net/bpfdesc.h>
72#endif
73
74#include <sys/bus.h>
75
76#include <dev/ic/sgecreg.h>
77#include <dev/ic/sgecvar.h>
78
79static	void	zeinit(struct ze_softc *);
80static	void	zestart(struct ifnet *);
81static	int	zeioctl(struct ifnet *, u_long, void *);
82static	int	ze_add_rxbuf(struct ze_softc *, int);
83static	void	ze_setup(struct ze_softc *);
84static	void	zetimeout(struct ifnet *);
85static	int	zereset(struct ze_softc *);
86
87#define	ZE_WCSR(csr, val) \
88	bus_space_write_4(sc->sc_iot, sc->sc_ioh, csr, val)
89#define	ZE_RCSR(csr) \
90	bus_space_read_4(sc->sc_iot, sc->sc_ioh, csr)
91
92/*
93 * Interface exists: make available by filling in network interface
94 * record.  System will initialize the interface when it is ready
95 * to accept packets.
96 */
97void
98sgec_attach(sc)
99	struct ze_softc *sc;
100{
101	struct	ifnet *ifp = (struct ifnet *)&sc->sc_if;
102	struct	ze_tdes *tp;
103	struct	ze_rdes *rp;
104	bus_dma_segment_t seg;
105	int i, rseg, error;
106
107        /*
108         * Allocate DMA safe memory for descriptors and setup memory.
109         */
110	if ((error = bus_dmamem_alloc(sc->sc_dmat,
111	    sizeof(struct ze_cdata), PAGE_SIZE, 0, &seg, 1, &rseg,
112	    BUS_DMA_NOWAIT)) != 0) {
113		printf(": unable to allocate control data, error = %d\n",
114		    error);
115		goto fail_0;
116	}
117
118	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
119	    sizeof(struct ze_cdata), (void **)&sc->sc_zedata,
120	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
121		printf(": unable to map control data, error = %d\n", error);
122		goto fail_1;
123	}
124
125	if ((error = bus_dmamap_create(sc->sc_dmat,
126	    sizeof(struct ze_cdata), 1,
127	    sizeof(struct ze_cdata), 0, BUS_DMA_NOWAIT,
128	    &sc->sc_cmap)) != 0) {
129		printf(": unable to create control data DMA map, error = %d\n",
130		    error);
131		goto fail_2;
132	}
133
134	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cmap,
135	    sc->sc_zedata, sizeof(struct ze_cdata), NULL,
136	    BUS_DMA_NOWAIT)) != 0) {
137		printf(": unable to load control data DMA map, error = %d\n",
138		    error);
139		goto fail_3;
140	}
141
142	/*
143	 * Zero the newly allocated memory.
144	 */
145	memset(sc->sc_zedata, 0, sizeof(struct ze_cdata));
146	/*
147	 * Create the transmit descriptor DMA maps.
148	 */
149	for (i = 0; i < TXDESCS; i++) {
150		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
151		    TXDESCS - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW,
152		    &sc->sc_xmtmap[i]))) {
153			printf(": unable to create tx DMA map %d, error = %d\n",
154			    i, error);
155			goto fail_4;
156		}
157	}
158
159	/*
160	 * Create receive buffer DMA maps.
161	 */
162	for (i = 0; i < RXDESCS; i++) {
163		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
164		    MCLBYTES, 0, BUS_DMA_NOWAIT,
165		    &sc->sc_rcvmap[i]))) {
166			printf(": unable to create rx DMA map %d, error = %d\n",
167			    i, error);
168			goto fail_5;
169		}
170	}
171	/*
172	 * Pre-allocate the receive buffers.
173	 */
174	for (i = 0; i < RXDESCS; i++) {
175		if ((error = ze_add_rxbuf(sc, i)) != 0) {
176			printf(": unable to allocate or map rx buffer %d\n,"
177			    " error = %d\n", i, error);
178			goto fail_6;
179		}
180	}
181
182	/* For vmstat -i
183	 */
184	evcnt_attach_dynamic(&sc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
185	    sc->sc_dev.dv_xname, "intr");
186	evcnt_attach_dynamic(&sc->sc_rxintrcnt, EVCNT_TYPE_INTR,
187	    &sc->sc_intrcnt, sc->sc_dev.dv_xname, "rx intr");
188	evcnt_attach_dynamic(&sc->sc_txintrcnt, EVCNT_TYPE_INTR,
189	    &sc->sc_intrcnt, sc->sc_dev.dv_xname, "tx intr");
190	evcnt_attach_dynamic(&sc->sc_txdraincnt, EVCNT_TYPE_INTR,
191	    &sc->sc_intrcnt, sc->sc_dev.dv_xname, "tx drain");
192	evcnt_attach_dynamic(&sc->sc_nobufintrcnt, EVCNT_TYPE_INTR,
193	    &sc->sc_intrcnt, sc->sc_dev.dv_xname, "nobuf intr");
194	evcnt_attach_dynamic(&sc->sc_nointrcnt, EVCNT_TYPE_INTR,
195	    &sc->sc_intrcnt, sc->sc_dev.dv_xname, "no intr");
196
197	/*
198	 * Create ring loops of the buffer chains.
199	 * This is only done once.
200	 */
201	sc->sc_pzedata = (struct ze_cdata *)sc->sc_cmap->dm_segs[0].ds_addr;
202
203	rp = sc->sc_zedata->zc_recv;
204	rp[RXDESCS].ze_framelen = ZE_FRAMELEN_OW;
205	rp[RXDESCS].ze_rdes1 = ZE_RDES1_CA;
206	rp[RXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_recv;
207
208	tp = sc->sc_zedata->zc_xmit;
209	tp[TXDESCS].ze_tdr = ZE_TDR_OW;
210	tp[TXDESCS].ze_tdes1 = ZE_TDES1_CA;
211	tp[TXDESCS].ze_bufaddr = (char *)sc->sc_pzedata->zc_xmit;
212
213	if (zereset(sc))
214		return;
215
216	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
217	ifp->if_softc = sc;
218	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
219	ifp->if_start = zestart;
220	ifp->if_ioctl = zeioctl;
221	ifp->if_watchdog = zetimeout;
222	IFQ_SET_READY(&ifp->if_snd);
223
224	/*
225	 * Attach the interface.
226	 */
227	if_attach(ifp);
228	ether_ifattach(ifp, sc->sc_enaddr);
229
230	printf("\n%s: hardware address %s\n", sc->sc_dev.dv_xname,
231	    ether_sprintf(sc->sc_enaddr));
232	return;
233
234	/*
235	 * Free any resources we've allocated during the failed attach
236	 * attempt.  Do this in reverse order and fall through.
237	 */
238 fail_6:
239	for (i = 0; i < RXDESCS; i++) {
240		if (sc->sc_rxmbuf[i] != NULL) {
241			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
242			m_freem(sc->sc_rxmbuf[i]);
243		}
244	}
245 fail_5:
246	for (i = 0; i < RXDESCS; i++) {
247		if (sc->sc_xmtmap[i] != NULL)
248			bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]);
249	}
250 fail_4:
251	for (i = 0; i < TXDESCS; i++) {
252		if (sc->sc_rcvmap[i] != NULL)
253			bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]);
254	}
255	bus_dmamap_unload(sc->sc_dmat, sc->sc_cmap);
256 fail_3:
257	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cmap);
258 fail_2:
259	bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_zedata,
260	    sizeof(struct ze_cdata));
261 fail_1:
262	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
263 fail_0:
264	return;
265}
266
267/*
268 * Initialization of interface.
269 */
270void
271zeinit(sc)
272	struct ze_softc *sc;
273{
274	struct ifnet *ifp = (struct ifnet *)&sc->sc_if;
275	struct ze_cdata *zc = sc->sc_zedata;
276	int i;
277
278	/*
279	 * Reset the interface.
280	 */
281	if (zereset(sc))
282		return;
283
284	sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = sc->sc_txcnt = 0;
285	/*
286	 * Release and init transmit descriptors.
287	 */
288	for (i = 0; i < TXDESCS; i++) {
289		if (sc->sc_xmtmap[i]->dm_nsegs > 0)
290			bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]);
291		if (sc->sc_txmbuf[i]) {
292			m_freem(sc->sc_txmbuf[i]);
293			sc->sc_txmbuf[i] = 0;
294		}
295		zc->zc_xmit[i].ze_tdr = 0; /* Clear valid bit */
296	}
297
298
299	/*
300	 * Init receive descriptors.
301	 */
302	for (i = 0; i < RXDESCS; i++)
303		zc->zc_recv[i].ze_framelen = ZE_FRAMELEN_OW;
304	sc->sc_nextrx = 0;
305
306	ZE_WCSR(ZE_CSR6, ZE_NICSR6_IE|ZE_NICSR6_BL_8|ZE_NICSR6_ST|
307	    ZE_NICSR6_SR|ZE_NICSR6_DC);
308
309	ifp->if_flags |= IFF_RUNNING;
310	ifp->if_flags &= ~IFF_OACTIVE;
311
312	/*
313	 * Send a setup frame.
314	 * This will start the transmit machinery as well.
315	 */
316	ze_setup(sc);
317
318}
319
320/*
321 * Start output on interface.
322 */
323void
324zestart(ifp)
325	struct ifnet *ifp;
326{
327	struct ze_softc *sc = ifp->if_softc;
328	struct ze_cdata *zc = sc->sc_zedata;
329	paddr_t	buffer;
330	struct mbuf *m;
331	int nexttx, starttx;
332	int len, i, totlen, error;
333	int old_inq = sc->sc_inq;
334	uint16_t orword, tdr;
335	bus_dmamap_t map;
336
337	while (sc->sc_inq < (TXDESCS - 1)) {
338
339		if (sc->sc_setup) {
340			ze_setup(sc);
341			continue;
342		}
343		nexttx = sc->sc_nexttx;
344		IFQ_POLL(&sc->sc_if.if_snd, m);
345		if (m == 0)
346			goto out;
347		/*
348		 * Count number of mbufs in chain.
349		 * Always do DMA directly from mbufs, therefore the transmit
350		 * ring is really big.
351		 */
352		map = sc->sc_xmtmap[nexttx];
353		error = bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
354		    BUS_DMA_WRITE);
355		if (error) {
356			printf("zestart: load_mbuf failed: %d", error);
357			goto out;
358		}
359
360		if (map->dm_nsegs >= TXDESCS)
361			panic("zestart"); /* XXX */
362
363		if ((map->dm_nsegs + sc->sc_inq) >= (TXDESCS - 1)) {
364			bus_dmamap_unload(sc->sc_dmat, map);
365			ifp->if_flags |= IFF_OACTIVE;
366			goto out;
367		}
368
369		/*
370		 * m now points to a mbuf chain that can be loaded.
371		 * Loop around and set it.
372		 */
373		totlen = 0;
374		orword = ZE_TDES1_FS;
375		starttx = nexttx;
376		for (i = 0; i < map->dm_nsegs; i++) {
377			buffer = map->dm_segs[i].ds_addr;
378			len = map->dm_segs[i].ds_len;
379
380			KASSERT(len > 0);
381
382			totlen += len;
383			/* Word alignment calc */
384			if (totlen == m->m_pkthdr.len) {
385				sc->sc_txcnt += map->dm_nsegs;
386				if (sc->sc_txcnt >= TXDESCS * 3 / 4) {
387					orword |= ZE_TDES1_IC;
388					sc->sc_txcnt = 0;
389				}
390				orword |= ZE_TDES1_LS;
391				sc->sc_txmbuf[nexttx] = m;
392			}
393			zc->zc_xmit[nexttx].ze_bufsize = len;
394			zc->zc_xmit[nexttx].ze_bufaddr = (char *)buffer;
395			zc->zc_xmit[nexttx].ze_tdes1 = orword;
396			zc->zc_xmit[nexttx].ze_tdr = tdr;
397
398			if (++nexttx == TXDESCS)
399				nexttx = 0;
400			orword = 0;
401			tdr = ZE_TDR_OW;
402		}
403
404		sc->sc_inq += map->dm_nsegs;
405
406		IFQ_DEQUEUE(&ifp->if_snd, m);
407#ifdef DIAGNOSTIC
408		if (totlen != m->m_pkthdr.len)
409			panic("zestart: len fault");
410#endif
411		/*
412		 * Turn ownership of the packet over to the device.
413		 */
414		zc->zc_xmit[starttx].ze_tdr = ZE_TDR_OW;
415
416		/*
417		 * Kick off the transmit logic, if it is stopped.
418		 */
419		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
420			ZE_WCSR(ZE_CSR1, -1);
421		sc->sc_nexttx = nexttx;
422	}
423	if (sc->sc_inq == (TXDESCS - 1))
424		ifp->if_flags |= IFF_OACTIVE;
425
426out:	if (old_inq < sc->sc_inq)
427		ifp->if_timer = 5; /* If transmit logic dies */
428}
429
430int
431sgec_intr(sc)
432	struct ze_softc *sc;
433{
434	struct ze_cdata *zc = sc->sc_zedata;
435	struct ifnet *ifp = &sc->sc_if;
436	struct mbuf *m;
437	int csr, len;
438
439	csr = ZE_RCSR(ZE_CSR5);
440	if ((csr & ZE_NICSR5_IS) == 0) { /* Wasn't we */
441		sc->sc_nointrcnt.ev_count++;
442		return 0;
443	}
444	ZE_WCSR(ZE_CSR5, csr);
445
446	if (csr & ZE_NICSR5_RU)
447		sc->sc_nobufintrcnt.ev_count++;
448
449	if (csr & ZE_NICSR5_RI) {
450		sc->sc_rxintrcnt.ev_count++;
451		while ((zc->zc_recv[sc->sc_nextrx].ze_framelen &
452		    ZE_FRAMELEN_OW) == 0) {
453
454			ifp->if_ipackets++;
455			m = sc->sc_rxmbuf[sc->sc_nextrx];
456			len = zc->zc_recv[sc->sc_nextrx].ze_framelen;
457			ze_add_rxbuf(sc, sc->sc_nextrx);
458			if (++sc->sc_nextrx == RXDESCS)
459				sc->sc_nextrx = 0;
460			if (len < ETHER_MIN_LEN) {
461				ifp->if_ierrors++;
462				m_freem(m);
463			} else {
464				m->m_pkthdr.rcvif = ifp;
465				m->m_pkthdr.len = m->m_len =
466				    len - ETHER_CRC_LEN;
467#if NBPFILTER > 0
468				if (ifp->if_bpf)
469					bpf_mtap(ifp->if_bpf, m);
470#endif
471				(*ifp->if_input)(ifp, m);
472			}
473		}
474	}
475
476	if (csr & ZE_NICSR5_TI)
477		sc->sc_txintrcnt.ev_count++;
478	if (sc->sc_lastack != sc->sc_nexttx) {
479		int lastack;
480		for (lastack = sc->sc_lastack; lastack != sc->sc_nexttx; ) {
481			bus_dmamap_t map;
482			int nlastack;
483
484			if ((zc->zc_xmit[lastack].ze_tdr & ZE_TDR_OW) != 0)
485				break;
486
487			if ((zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_DT) ==
488			    ZE_TDES1_DT_SETUP) {
489				if (++lastack == TXDESCS)
490					lastack = 0;
491				sc->sc_inq--;
492				continue;
493			}
494
495			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_FS);
496			map = sc->sc_xmtmap[lastack];
497			KASSERT(map->dm_nsegs > 0);
498			nlastack = (lastack + map->dm_nsegs - 1) % TXDESCS;
499			if (zc->zc_xmit[nlastack].ze_tdr & ZE_TDR_OW)
500				break;
501			lastack = nlastack;
502			if (sc->sc_txcnt > map->dm_nsegs)
503			    sc->sc_txcnt -= map->dm_nsegs;
504			else
505			    sc->sc_txcnt = 0;
506			sc->sc_inq -= map->dm_nsegs;
507			KASSERT(zc->zc_xmit[lastack].ze_tdes1 & ZE_TDES1_LS);
508			ifp->if_opackets++;
509			bus_dmamap_unload(sc->sc_dmat, map);
510			KASSERT(sc->sc_txmbuf[lastack]);
511#if NBPFILTER > 0
512			if (ifp->if_bpf)
513				bpf_mtap(ifp->if_bpf, sc->sc_txmbuf[lastack]);
514#endif
515			m_freem(sc->sc_txmbuf[lastack]);
516			sc->sc_txmbuf[lastack] = 0;
517			if (++lastack == TXDESCS)
518				lastack = 0;
519		}
520		if (lastack != sc->sc_lastack) {
521			sc->sc_txdraincnt.ev_count++;
522			sc->sc_lastack = lastack;
523			if (sc->sc_inq == 0)
524				ifp->if_timer = 0;
525			ifp->if_flags &= ~IFF_OACTIVE;
526			zestart(ifp); /* Put in more in queue */
527		}
528	}
529	return 1;
530}
531
532/*
533 * Process an ioctl request.
534 */
535int
536zeioctl(ifp, cmd, data)
537	struct ifnet *ifp;
538	u_long cmd;
539	void *data;
540{
541	struct ze_softc *sc = ifp->if_softc;
542	struct ifaddr *ifa = (struct ifaddr *)data;
543	int s = splnet(), error = 0;
544
545	switch (cmd) {
546
547	case SIOCSIFADDR:
548		ifp->if_flags |= IFF_UP;
549		switch(ifa->ifa_addr->sa_family) {
550#ifdef INET
551		case AF_INET:
552			zeinit(sc);
553			arp_ifinit(ifp, ifa);
554			break;
555#endif
556		}
557		break;
558
559	case SIOCSIFFLAGS:
560		if ((ifp->if_flags & IFF_UP) == 0 &&
561		    (ifp->if_flags & IFF_RUNNING) != 0) {
562			/*
563			 * If interface is marked down and it is running,
564			 * stop it. (by disabling receive mechanism).
565			 */
566			ZE_WCSR(ZE_CSR6, ZE_RCSR(ZE_CSR6) &
567			    ~(ZE_NICSR6_ST|ZE_NICSR6_SR));
568			ifp->if_flags &= ~IFF_RUNNING;
569		} else if ((ifp->if_flags & IFF_UP) != 0 &&
570			   (ifp->if_flags & IFF_RUNNING) == 0) {
571			/*
572			 * If interface it marked up and it is stopped, then
573			 * start it.
574			 */
575			zeinit(sc);
576		} else if ((ifp->if_flags & IFF_UP) != 0) {
577			/*
578			 * Send a new setup packet to match any new changes.
579			 * (Like IFF_PROMISC etc)
580			 */
581			ze_setup(sc);
582		}
583		break;
584
585	case SIOCADDMULTI:
586	case SIOCDELMULTI:
587		/*
588		 * Update our multicast list.
589		 */
590		if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) {
591			/*
592			 * Multicast list has changed; set the hardware filter
593			 * accordingly.
594			 */
595			if (ifp->if_flags & IFF_RUNNING)
596				ze_setup(sc);
597			error = 0;
598		}
599		break;
600
601	default:
602		error = EINVAL;
603
604	}
605	splx(s);
606	return (error);
607}
608
609/*
610 * Add a receive buffer to the indicated descriptor.
611 */
612int
613ze_add_rxbuf(sc, i)
614	struct ze_softc *sc;
615	int i;
616{
617	struct mbuf *m;
618	struct ze_rdes *rp;
619	int error;
620
621	MGETHDR(m, M_DONTWAIT, MT_DATA);
622	if (m == NULL)
623		return (ENOBUFS);
624
625	MCLAIM(m, &sc->sc_ec.ec_rx_mowner);
626	MCLGET(m, M_DONTWAIT);
627	if ((m->m_flags & M_EXT) == 0) {
628		m_freem(m);
629		return (ENOBUFS);
630	}
631
632	if (sc->sc_rxmbuf[i] != NULL)
633		bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]);
634
635	error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i],
636	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
637	    BUS_DMA_READ|BUS_DMA_NOWAIT);
638	if (error)
639		panic("%s: can't load rx DMA map %d, error = %d",
640		    sc->sc_dev.dv_xname, i, error);
641	sc->sc_rxmbuf[i] = m;
642
643	bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0,
644	    sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD);
645
646	/*
647	 * We know that the mbuf cluster is page aligned. Also, be sure
648	 * that the IP header will be longword aligned.
649	 */
650	m->m_data += 2;
651	rp = &sc->sc_zedata->zc_recv[i];
652	rp->ze_bufsize = (m->m_ext.ext_size - 2);
653	rp->ze_bufaddr = (char *)sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2;
654	rp->ze_framelen = ZE_FRAMELEN_OW;
655
656	return (0);
657}
658
659/*
660 * Create a setup packet and put in queue for sending.
661 */
662void
663ze_setup(sc)
664	struct ze_softc *sc;
665{
666	struct ether_multi *enm;
667	struct ether_multistep step;
668	struct ze_cdata *zc = sc->sc_zedata;
669	struct ifnet *ifp = &sc->sc_if;
670	const u_int8_t *enaddr = CLLADDR(ifp->if_sadl);
671	int j, idx, reg;
672
673	if (sc->sc_inq == (TXDESCS - 1)) {
674		sc->sc_setup = 1;
675		return;
676	}
677	sc->sc_setup = 0;
678	/*
679	 * Init the setup packet with valid info.
680	 */
681	memset(zc->zc_setup, 0xff, sizeof(zc->zc_setup)); /* Broadcast */
682	memcpy(zc->zc_setup, enaddr, ETHER_ADDR_LEN);
683
684	/*
685	 * Multicast handling. The SGEC can handle up to 16 direct
686	 * ethernet addresses.
687	 */
688	j = 16;
689	ifp->if_flags &= ~IFF_ALLMULTI;
690	ETHER_FIRST_MULTI(step, &sc->sc_ec, enm);
691	while (enm != NULL) {
692		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6)) {
693			ifp->if_flags |= IFF_ALLMULTI;
694			break;
695		}
696		memcpy(&zc->zc_setup[j], enm->enm_addrlo, ETHER_ADDR_LEN);
697		j += 8;
698		ETHER_NEXT_MULTI(step, enm);
699		if ((enm != NULL)&& (j == 128)) {
700			ifp->if_flags |= IFF_ALLMULTI;
701			break;
702		}
703	}
704
705	/*
706	 * ALLMULTI implies PROMISC in this driver.
707	 */
708	if (ifp->if_flags & IFF_ALLMULTI)
709		ifp->if_flags |= IFF_PROMISC;
710	else if (ifp->if_pcount == 0)
711		ifp->if_flags &= ~IFF_PROMISC;
712
713	/*
714	 * Fiddle with the receive logic.
715	 */
716	reg = ZE_RCSR(ZE_CSR6);
717	DELAY(10);
718	ZE_WCSR(ZE_CSR6, reg & ~ZE_NICSR6_SR); /* Stop rx */
719	reg &= ~ZE_NICSR6_AF;
720	if (ifp->if_flags & IFF_PROMISC)
721		reg |= ZE_NICSR6_AF_PROM;
722	else if (ifp->if_flags & IFF_ALLMULTI)
723		reg |= ZE_NICSR6_AF_ALLM;
724	DELAY(10);
725	ZE_WCSR(ZE_CSR6, reg);
726	/*
727	 * Only send a setup packet if needed.
728	 */
729	if ((ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) == 0) {
730		idx = sc->sc_nexttx;
731		zc->zc_xmit[idx].ze_tdes1 = ZE_TDES1_DT_SETUP;
732		zc->zc_xmit[idx].ze_bufsize = 128;
733		zc->zc_xmit[idx].ze_bufaddr = sc->sc_pzedata->zc_setup;
734		zc->zc_xmit[idx].ze_tdr = ZE_TDR_OW;
735
736		if ((ZE_RCSR(ZE_CSR5) & ZE_NICSR5_TS) != ZE_NICSR5_TS_RUN)
737			ZE_WCSR(ZE_CSR1, -1);
738
739		sc->sc_inq++;
740		if (++sc->sc_nexttx == TXDESCS)
741			sc->sc_nexttx = 0;
742	}
743}
744
745/*
746 * Check for dead transmit logic.
747 */
748void
749zetimeout(ifp)
750	struct ifnet *ifp;
751{
752	struct ze_softc *sc = ifp->if_softc;
753
754	if (sc->sc_inq == 0)
755		return;
756
757	printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);
758	/*
759	 * Do a reset of interface, to get it going again.
760	 * Will it work by just restart the transmit logic?
761	 */
762	zeinit(sc);
763}
764
765/*
766 * Reset chip:
767 * Set/reset the reset flag.
768 *  Write interrupt vector.
769 *  Write ring buffer addresses.
770 *  Write SBR.
771 */
772int
773zereset(sc)
774	struct ze_softc *sc;
775{
776	int reg, i;
777
778	ZE_WCSR(ZE_CSR6, ZE_NICSR6_RE);
779	DELAY(50000);
780	if (ZE_RCSR(ZE_CSR6) & ZE_NICSR5_SF) {
781		printf("%s: selftest failed\n", sc->sc_dev.dv_xname);
782		return 1;
783	}
784
785	/*
786	 * Get the vector that were set at match time, and remember it.
787	 * WHICH VECTOR TO USE? Take one unused. XXX
788	 * Funny way to set vector described in the programmers manual.
789	 */
790	reg = ZE_NICSR0_IPL14 | sc->sc_intvec | 0x1fff0003; /* SYNC/ASYNC??? */
791	i = 10;
792	do {
793		if (i-- == 0) {
794			printf("Failing SGEC CSR0 init\n");
795			return 1;
796		}
797		ZE_WCSR(ZE_CSR0, reg);
798	} while (ZE_RCSR(ZE_CSR0) != reg);
799
800	ZE_WCSR(ZE_CSR3, (vaddr_t)sc->sc_pzedata->zc_recv);
801	ZE_WCSR(ZE_CSR4, (vaddr_t)sc->sc_pzedata->zc_xmit);
802	return 0;
803}
804