dp83932.c revision 1.23
1/*	$NetBSD: dp83932.c,v 1.23 2008/04/08 12:07:26 cegger Exp $	*/
2
3/*-
4 * Copyright (c) 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the NetBSD
21 *	Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 *    contributors may be used to endorse or promote products derived
24 *    from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39/*
40 * Device driver for the National Semiconductor DP83932
41 * Systems-Oriented Network Interface Controller (SONIC).
42 */
43
44#include <sys/cdefs.h>
45__KERNEL_RCSID(0, "$NetBSD: dp83932.c,v 1.23 2008/04/08 12:07:26 cegger Exp $");
46
47#include "bpfilter.h"
48
49#include <sys/param.h>
50#include <sys/systm.h>
51#include <sys/mbuf.h>
52#include <sys/malloc.h>
53#include <sys/kernel.h>
54#include <sys/socket.h>
55#include <sys/ioctl.h>
56#include <sys/errno.h>
57#include <sys/device.h>
58
59#include <uvm/uvm_extern.h>
60
61#include <net/if.h>
62#include <net/if_dl.h>
63#include <net/if_ether.h>
64
65#if NBPFILTER > 0
66#include <net/bpf.h>
67#endif
68
69#include <sys/bus.h>
70#include <sys/intr.h>
71
72#include <dev/ic/dp83932reg.h>
73#include <dev/ic/dp83932var.h>
74
75void	sonic_start(struct ifnet *);
76void	sonic_watchdog(struct ifnet *);
77int	sonic_ioctl(struct ifnet *, u_long, void *);
78int	sonic_init(struct ifnet *);
79void	sonic_stop(struct ifnet *, int);
80
81void	sonic_shutdown(void *);
82
83void	sonic_reset(struct sonic_softc *);
84void	sonic_rxdrain(struct sonic_softc *);
85int	sonic_add_rxbuf(struct sonic_softc *, int);
86void	sonic_set_filter(struct sonic_softc *);
87
88uint16_t sonic_txintr(struct sonic_softc *);
89void	sonic_rxintr(struct sonic_softc *);
90
91int	sonic_copy_small = 0;
92
93#define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN)
94
95/*
96 * sonic_attach:
97 *
98 *	Attach a SONIC interface to the system.
99 */
100void
101sonic_attach(struct sonic_softc *sc, const uint8_t *enaddr)
102{
103	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
104	int i, rseg, error;
105	bus_dma_segment_t seg;
106	size_t cdatasize;
107	char *nullbuf;
108
109	/*
110	 * Allocate the control data structures, and create and load the
111	 * DMA map for it.
112	 */
113	if (sc->sc_32bit)
114		cdatasize = sizeof(struct sonic_control_data32);
115	else
116		cdatasize = sizeof(struct sonic_control_data16);
117
118	if ((error = bus_dmamem_alloc(sc->sc_dmat, cdatasize + ETHER_PAD_LEN,
119	     PAGE_SIZE, (64 * 1024), &seg, 1, &rseg,
120	     BUS_DMA_NOWAIT)) != 0) {
121		aprint_error_dev(&sc->sc_dev, "unable to allocate control data, error = %d\n", error);
122		goto fail_0;
123	}
124
125	if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg,
126	    cdatasize + ETHER_PAD_LEN, (void **) &sc->sc_cdata16,
127	    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
128		aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n", error);
129		goto fail_1;
130	}
131	nullbuf = (char *)sc->sc_cdata16 + cdatasize;
132	memset(nullbuf, 0, ETHER_PAD_LEN);
133
134	if ((error = bus_dmamap_create(sc->sc_dmat,
135	     cdatasize, 1, cdatasize, 0, BUS_DMA_NOWAIT,
136	     &sc->sc_cddmamap)) != 0) {
137		aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, "
138		    "error = %d\n", error);
139		goto fail_2;
140	}
141
142	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap,
143	     sc->sc_cdata16, cdatasize, NULL, BUS_DMA_NOWAIT)) != 0) {
144		aprint_error_dev(&sc->sc_dev, "unable to load control data DMA map, error = %d\n", error);
145		goto fail_3;
146	}
147
148	/*
149	 * Create the transmit buffer DMA maps.
150	 */
151	for (i = 0; i < SONIC_NTXDESC; i++) {
152		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
153		     SONIC_NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT,
154		     &sc->sc_txsoft[i].ds_dmamap)) != 0) {
155			aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, "
156			    "error = %d\n", i, error);
157			goto fail_4;
158		}
159	}
160
161	/*
162	 * Create the receive buffer DMA maps.
163	 */
164	for (i = 0; i < SONIC_NRXDESC; i++) {
165		if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
166		     MCLBYTES, 0, BUS_DMA_NOWAIT,
167		     &sc->sc_rxsoft[i].ds_dmamap)) != 0) {
168			aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, "
169			    "error = %d\n", i, error);
170			goto fail_5;
171		}
172		sc->sc_rxsoft[i].ds_mbuf = NULL;
173	}
174
175	/*
176	 * create and map the pad buffer
177	 */
178	if ((error = bus_dmamap_create(sc->sc_dmat, ETHER_PAD_LEN, 1,
179	    ETHER_PAD_LEN, 0, BUS_DMA_NOWAIT, &sc->sc_nulldmamap)) != 0) {
180		aprint_error_dev(&sc->sc_dev, "unable to create pad buffer DMA map, "
181		    "error = %d\n", error);
182		goto fail_5;
183	}
184
185	if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_nulldmamap,
186	    nullbuf, ETHER_PAD_LEN, NULL, BUS_DMA_NOWAIT)) != 0) {
187		aprint_error_dev(&sc->sc_dev, "unable to load pad buffer DMA map, "
188		    "error = %d\n", error);
189		goto fail_6;
190	}
191	bus_dmamap_sync(sc->sc_dmat, sc->sc_nulldmamap, 0, ETHER_PAD_LEN,
192	    BUS_DMASYNC_PREWRITE);
193
194	/*
195	 * Reset the chip to a known state.
196	 */
197	sonic_reset(sc);
198
199	printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev),
200	    ether_sprintf(enaddr));
201
202	strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ);
203	ifp->if_softc = sc;
204	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
205	ifp->if_ioctl = sonic_ioctl;
206	ifp->if_start = sonic_start;
207	ifp->if_watchdog = sonic_watchdog;
208	ifp->if_init = sonic_init;
209	ifp->if_stop = sonic_stop;
210	IFQ_SET_READY(&ifp->if_snd);
211
212	/*
213	 * We can suport 802.1Q VLAN-sized frames.
214	 */
215	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
216
217	/*
218	 * Attach the interface.
219	 */
220	if_attach(ifp);
221	ether_ifattach(ifp, enaddr);
222
223	/*
224	 * Make sure the interface is shutdown during reboot.
225	 */
226	sc->sc_sdhook = shutdownhook_establish(sonic_shutdown, sc);
227	if (sc->sc_sdhook == NULL)
228		aprint_error_dev(&sc->sc_dev, "WARNING: unable to establish shutdown hook\n");
229	return;
230
231	/*
232	 * Free any resources we've allocated during the failed attach
233	 * attempt.  Do this in reverse order and fall through.
234	 */
235 fail_6:
236	bus_dmamap_destroy(sc->sc_dmat, sc->sc_nulldmamap);
237 fail_5:
238	for (i = 0; i < SONIC_NRXDESC; i++) {
239		if (sc->sc_rxsoft[i].ds_dmamap != NULL)
240			bus_dmamap_destroy(sc->sc_dmat,
241			    sc->sc_rxsoft[i].ds_dmamap);
242	}
243 fail_4:
244	for (i = 0; i < SONIC_NTXDESC; i++) {
245		if (sc->sc_txsoft[i].ds_dmamap != NULL)
246			bus_dmamap_destroy(sc->sc_dmat,
247			    sc->sc_txsoft[i].ds_dmamap);
248	}
249	bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap);
250 fail_3:
251	bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap);
252 fail_2:
253	bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_cdata16, cdatasize);
254 fail_1:
255	bus_dmamem_free(sc->sc_dmat, &seg, rseg);
256 fail_0:
257	return;
258}
259
260/*
261 * sonic_shutdown:
262 *
263 *	Make sure the interface is stopped at reboot.
264 */
265void
266sonic_shutdown(void *arg)
267{
268	struct sonic_softc *sc = arg;
269
270	sonic_stop(&sc->sc_ethercom.ec_if, 1);
271}
272
273/*
274 * sonic_start:		[ifnet interface function]
275 *
276 *	Start packet transmission on the interface.
277 */
278void
279sonic_start(struct ifnet *ifp)
280{
281	struct sonic_softc *sc = ifp->if_softc;
282	struct mbuf *m0, *m;
283	struct sonic_tda16 *tda16;
284	struct sonic_tda32 *tda32;
285	struct sonic_descsoft *ds;
286	bus_dmamap_t dmamap;
287	int error, olasttx, nexttx, opending, totlen, olseg;
288	int seg = 0;	/* XXX: gcc */
289
290	if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING)
291		return;
292
293	/*
294	 * Remember the previous txpending and the current "last txdesc
295	 * used" index.
296	 */
297	opending = sc->sc_txpending;
298	olasttx = sc->sc_txlast;
299
300	/*
301	 * Loop through the send queue, setting up transmit descriptors
302	 * until we drain the queue, or use up all available transmit
303	 * descriptors.  Leave one at the end for sanity's sake.
304	 */
305	while (sc->sc_txpending < (SONIC_NTXDESC - 1)) {
306		/*
307		 * Grab a packet off the queue.
308		 */
309		IFQ_POLL(&ifp->if_snd, m0);
310		if (m0 == NULL)
311			break;
312		m = NULL;
313
314		/*
315		 * Get the next available transmit descriptor.
316		 */
317		nexttx = SONIC_NEXTTX(sc->sc_txlast);
318		ds = &sc->sc_txsoft[nexttx];
319		dmamap = ds->ds_dmamap;
320
321		/*
322		 * Load the DMA map.  If this fails, the packet either
323		 * didn't fit in the allotted number of frags, or we were
324		 * short on resources.  In this case, we'll copy and try
325		 * again.
326		 */
327		if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0,
328		    BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0 ||
329		    (m0->m_pkthdr.len < ETHER_PAD_LEN &&
330		    dmamap->dm_nsegs == SONIC_NTXFRAGS)) {
331			if (error == 0)
332				bus_dmamap_unload(sc->sc_dmat, dmamap);
333			MGETHDR(m, M_DONTWAIT, MT_DATA);
334			if (m == NULL) {
335				aprint_error_dev(&sc->sc_dev, "unable to allocate Tx mbuf\n");
336				break;
337			}
338			if (m0->m_pkthdr.len > MHLEN) {
339				MCLGET(m, M_DONTWAIT);
340				if ((m->m_flags & M_EXT) == 0) {
341					aprint_error_dev(&sc->sc_dev, "unable to allocate Tx "
342					    "cluster\n");
343					m_freem(m);
344					break;
345				}
346			}
347			m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *));
348			m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len;
349			error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap,
350			    m, BUS_DMA_WRITE|BUS_DMA_NOWAIT);
351			if (error) {
352				aprint_error_dev(&sc->sc_dev, "unable to load Tx buffer, "
353				    "error = %d\n", error);
354				m_freem(m);
355				break;
356			}
357		}
358		IFQ_DEQUEUE(&ifp->if_snd, m0);
359		if (m != NULL) {
360			m_freem(m0);
361			m0 = m;
362		}
363
364		/*
365		 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET.
366		 */
367
368		/* Sync the DMA map. */
369		bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize,
370		    BUS_DMASYNC_PREWRITE);
371
372		/*
373		 * Store a pointer to the packet so we can free it later.
374		 */
375		ds->ds_mbuf = m0;
376
377		/*
378		 * Initialize the transmit descriptor.
379		 */
380		totlen = 0;
381		if (sc->sc_32bit) {
382			tda32 = &sc->sc_tda32[nexttx];
383			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
384				tda32->tda_frags[seg].frag_ptr1 =
385				    htosonic32(sc,
386				    (dmamap->dm_segs[seg].ds_addr >> 16) &
387				    0xffff);
388				tda32->tda_frags[seg].frag_ptr0 =
389				    htosonic32(sc,
390				    dmamap->dm_segs[seg].ds_addr & 0xffff);
391				tda32->tda_frags[seg].frag_size =
392				    htosonic32(sc, dmamap->dm_segs[seg].ds_len);
393				totlen += dmamap->dm_segs[seg].ds_len;
394			}
395			if (totlen < ETHER_PAD_LEN) {
396				tda32->tda_frags[seg].frag_ptr1 =
397				    htosonic32(sc,
398				    (sc->sc_nulldma >> 16) & 0xffff);
399				tda32->tda_frags[seg].frag_ptr0 =
400				    htosonic32(sc, sc->sc_nulldma & 0xffff);
401				tda32->tda_frags[seg].frag_size =
402				    htosonic32(sc, ETHER_PAD_LEN - totlen);
403				totlen = ETHER_PAD_LEN;
404				seg++;
405			}
406
407			tda32->tda_status = 0;
408			tda32->tda_pktconfig = 0;
409			tda32->tda_pktsize = htosonic32(sc, totlen);
410			tda32->tda_fragcnt = htosonic32(sc, seg);
411
412			/* Link it up. */
413			tda32->tda_frags[seg].frag_ptr0 =
414			    htosonic32(sc, SONIC_CDTXADDR32(sc,
415			    SONIC_NEXTTX(nexttx)) & 0xffff);
416
417			/* Sync the Tx descriptor. */
418			SONIC_CDTXSYNC32(sc, nexttx,
419			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
420		} else {
421			tda16 = &sc->sc_tda16[nexttx];
422			for (seg = 0; seg < dmamap->dm_nsegs; seg++) {
423				tda16->tda_frags[seg].frag_ptr1 =
424				    htosonic16(sc,
425				    (dmamap->dm_segs[seg].ds_addr >> 16) &
426				    0xffff);
427				tda16->tda_frags[seg].frag_ptr0 =
428				    htosonic16(sc,
429				    dmamap->dm_segs[seg].ds_addr & 0xffff);
430				tda16->tda_frags[seg].frag_size =
431				    htosonic16(sc, dmamap->dm_segs[seg].ds_len);
432				totlen += dmamap->dm_segs[seg].ds_len;
433			}
434			if (totlen < ETHER_PAD_LEN) {
435				tda16->tda_frags[seg].frag_ptr1 =
436				    htosonic16(sc,
437				    (sc->sc_nulldma >> 16) & 0xffff);
438				tda16->tda_frags[seg].frag_ptr0 =
439				    htosonic16(sc, sc->sc_nulldma & 0xffff);
440				tda16->tda_frags[seg].frag_size =
441				    htosonic16(sc, ETHER_PAD_LEN - totlen);
442				totlen = ETHER_PAD_LEN;
443				seg++;
444			}
445
446			tda16->tda_status = 0;
447			tda16->tda_pktconfig = 0;
448			tda16->tda_pktsize = htosonic16(sc, totlen);
449			tda16->tda_fragcnt = htosonic16(sc, seg);
450
451			/* Link it up. */
452			tda16->tda_frags[seg].frag_ptr0 =
453			    htosonic16(sc, SONIC_CDTXADDR16(sc,
454			    SONIC_NEXTTX(nexttx)) & 0xffff);
455
456			/* Sync the Tx descriptor. */
457			SONIC_CDTXSYNC16(sc, nexttx,
458			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
459		}
460
461		/* Advance the Tx pointer. */
462		sc->sc_txpending++;
463		sc->sc_txlast = nexttx;
464
465#if NBPFILTER > 0
466		/*
467		 * Pass the packet to any BPF listeners.
468		 */
469		if (ifp->if_bpf)
470			bpf_mtap(ifp->if_bpf, m0);
471#endif
472	}
473
474	if (sc->sc_txpending == (SONIC_NTXDESC - 1)) {
475		/* No more slots left; notify upper layer. */
476		ifp->if_flags |= IFF_OACTIVE;
477	}
478
479	if (sc->sc_txpending != opending) {
480		/*
481		 * We enqueued packets.  If the transmitter was idle,
482		 * reset the txdirty pointer.
483		 */
484		if (opending == 0)
485			sc->sc_txdirty = SONIC_NEXTTX(olasttx);
486
487		/*
488		 * Stop the SONIC on the last packet we've set up,
489		 * and clear end-of-list on the descriptor previous
490		 * to our new chain.
491		 *
492		 * NOTE: our `seg' variable should still be valid!
493		 */
494		if (sc->sc_32bit) {
495			olseg =
496			    sonic32toh(sc, sc->sc_tda32[olasttx].tda_fragcnt);
497			sc->sc_tda32[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
498			    htosonic32(sc, TDA_LINK_EOL);
499			SONIC_CDTXSYNC32(sc, sc->sc_txlast,
500			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
501			sc->sc_tda32[olasttx].tda_frags[olseg].frag_ptr0 &=
502			    htosonic32(sc, ~TDA_LINK_EOL);
503			SONIC_CDTXSYNC32(sc, olasttx,
504			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
505		} else {
506			olseg =
507			    sonic16toh(sc, sc->sc_tda16[olasttx].tda_fragcnt);
508			sc->sc_tda16[sc->sc_txlast].tda_frags[seg].frag_ptr0 |=
509			    htosonic16(sc, TDA_LINK_EOL);
510			SONIC_CDTXSYNC16(sc, sc->sc_txlast,
511			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
512			sc->sc_tda16[olasttx].tda_frags[olseg].frag_ptr0 &=
513			    htosonic16(sc, ~TDA_LINK_EOL);
514			SONIC_CDTXSYNC16(sc, olasttx,
515			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
516		}
517
518		/* Start the transmitter. */
519		CSR_WRITE(sc, SONIC_CR, CR_TXP);
520
521		/* Set a watchdog timer in case the chip flakes out. */
522		ifp->if_timer = 5;
523	}
524}
525
526/*
527 * sonic_watchdog:	[ifnet interface function]
528 *
529 *	Watchdog timer handler.
530 */
531void
532sonic_watchdog(struct ifnet *ifp)
533{
534	struct sonic_softc *sc = ifp->if_softc;
535
536	printf("%s: device timeout\n", device_xname(&sc->sc_dev));
537	ifp->if_oerrors++;
538
539	(void) sonic_init(ifp);
540}
541
542/*
543 * sonic_ioctl:		[ifnet interface function]
544 *
545 *	Handle control requests from the operator.
546 */
547int
548sonic_ioctl(struct ifnet *ifp, u_long cmd, void *data)
549{
550	int s, error;
551
552	s = splnet();
553
554	error = ether_ioctl(ifp, cmd, data);
555	if (error == ENETRESET) {
556		/*
557		 * Multicast list has changed; set the hardware
558		 * filter accordingly.
559		 */
560		if (ifp->if_flags & IFF_RUNNING)
561			(void) sonic_init(ifp);
562		error = 0;
563	}
564
565	splx(s);
566	return (error);
567}
568
569/*
570 * sonic_intr:
571 *
572 *	Interrupt service routine.
573 */
574int
575sonic_intr(void *arg)
576{
577	struct sonic_softc *sc = arg;
578	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
579	uint16_t isr;
580	int handled = 0, wantinit;
581
582	for (wantinit = 0; wantinit == 0;) {
583		isr = CSR_READ(sc, SONIC_ISR) & sc->sc_imr;
584		if (isr == 0)
585			break;
586		CSR_WRITE(sc, SONIC_ISR, isr);	/* ACK */
587
588		handled = 1;
589
590		if (isr & IMR_PRX)
591			sonic_rxintr(sc);
592
593		if (isr & (IMR_PTX|IMR_TXER)) {
594			if (sonic_txintr(sc) & TCR_FU) {
595				printf("%s: transmit FIFO underrun\n",
596				    device_xname(&sc->sc_dev));
597				wantinit = 1;
598			}
599		}
600
601		if (isr & (IMR_RFO|IMR_RBA|IMR_RBE|IMR_RDE)) {
602#define	PRINTERR(bit, str)						\
603			if (isr & (bit))				\
604				printf("%s: %s\n", device_xname(&sc->sc_dev), str)
605			PRINTERR(IMR_RFO, "receive FIFO overrun");
606			PRINTERR(IMR_RBA, "receive buffer exceeded");
607			PRINTERR(IMR_RBE, "receive buffers exhausted");
608			PRINTERR(IMR_RDE, "receive descriptors exhausted");
609			wantinit = 1;
610		}
611	}
612
613	if (handled) {
614		if (wantinit)
615			(void) sonic_init(ifp);
616		sonic_start(ifp);
617	}
618
619	return (handled);
620}
621
622/*
623 * sonic_txintr:
624 *
625 *	Helper; handle transmit complete interrupts.
626 */
627uint16_t
628sonic_txintr(struct sonic_softc *sc)
629{
630	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
631	struct sonic_descsoft *ds;
632	struct sonic_tda32 *tda32;
633	struct sonic_tda16 *tda16;
634	uint16_t status, totstat = 0;
635	int i;
636
637	ifp->if_flags &= ~IFF_OACTIVE;
638
639	for (i = sc->sc_txdirty; sc->sc_txpending != 0;
640	     i = SONIC_NEXTTX(i), sc->sc_txpending--) {
641		ds = &sc->sc_txsoft[i];
642
643		if (sc->sc_32bit) {
644			SONIC_CDTXSYNC32(sc, i,
645			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
646			tda32 = &sc->sc_tda32[i];
647			status = sonic32toh(sc, tda32->tda_status);
648			SONIC_CDTXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
649		} else {
650			SONIC_CDTXSYNC16(sc, i,
651			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
652			tda16 = &sc->sc_tda16[i];
653			status = sonic16toh(sc, tda16->tda_status);
654			SONIC_CDTXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
655		}
656
657		if ((status & ~(TCR_EXDIS|TCR_CRCI|TCR_POWC|TCR_PINT)) == 0)
658			break;
659
660		totstat |= status;
661
662		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
663		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE);
664		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
665		m_freem(ds->ds_mbuf);
666		ds->ds_mbuf = NULL;
667
668		/*
669		 * Check for errors and collisions.
670		 */
671		if (status & TCR_PTX)
672			ifp->if_opackets++;
673		else
674			ifp->if_oerrors++;
675		ifp->if_collisions += TDA_STATUS_NCOL(status);
676	}
677
678	/* Update the dirty transmit buffer pointer. */
679	sc->sc_txdirty = i;
680
681	/*
682	 * Cancel the watchdog timer if there are no pending
683	 * transmissions.
684	 */
685	if (sc->sc_txpending == 0)
686		ifp->if_timer = 0;
687
688	return (totstat);
689}
690
691/*
692 * sonic_rxintr:
693 *
694 *	Helper; handle receive interrupts.
695 */
696void
697sonic_rxintr(struct sonic_softc *sc)
698{
699	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
700	struct sonic_descsoft *ds;
701	struct sonic_rda32 *rda32;
702	struct sonic_rda16 *rda16;
703	struct mbuf *m;
704	int i, len;
705	uint16_t status, bytecount, ptr0, ptr1, seqno;
706
707	for (i = sc->sc_rxptr;; i = SONIC_NEXTRX(i)) {
708		ds = &sc->sc_rxsoft[i];
709
710		if (sc->sc_32bit) {
711			SONIC_CDRXSYNC32(sc, i,
712			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
713			rda32 = &sc->sc_rda32[i];
714			SONIC_CDRXSYNC32(sc, i, BUS_DMASYNC_PREREAD);
715			if (rda32->rda_inuse != 0)
716				break;
717			status = sonic32toh(sc, rda32->rda_status);
718			bytecount = sonic32toh(sc, rda32->rda_bytecount);
719			ptr0 = sonic32toh(sc, rda32->rda_pkt_ptr0);
720			ptr1 = sonic32toh(sc, rda32->rda_pkt_ptr1);
721			seqno = sonic32toh(sc, rda32->rda_seqno);
722		} else {
723			SONIC_CDRXSYNC16(sc, i,
724			    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
725			rda16 = &sc->sc_rda16[i];
726			SONIC_CDRXSYNC16(sc, i, BUS_DMASYNC_PREREAD);
727			if (rda16->rda_inuse != 0)
728				break;
729			status = sonic16toh(sc, rda16->rda_status);
730			bytecount = sonic16toh(sc, rda16->rda_bytecount);
731			ptr0 = sonic16toh(sc, rda16->rda_pkt_ptr0);
732			ptr1 = sonic16toh(sc, rda16->rda_pkt_ptr1);
733			seqno = sonic16toh(sc, rda16->rda_seqno);
734		}
735
736		/*
737		 * Make absolutely sure this is the only packet
738		 * in this receive buffer.  Our entire Rx buffer
739		 * management scheme depends on this, and if the
740		 * SONIC didn't follow our rule, it means we've
741		 * misconfigured it.
742		 */
743		KASSERT(status & RCR_LPKT);
744
745		/*
746		 * Make sure the packet arrived OK.  If an error occurred,
747		 * update stats and reset the descriptor.  The buffer will
748		 * be reused the next time the descriptor comes up in the
749		 * ring.
750		 */
751		if ((status & RCR_PRX) == 0) {
752			if (status & RCR_FAER)
753				aprint_error_dev(&sc->sc_dev, "Rx frame alignment error\n");
754			else if (status & RCR_CRCR)
755				aprint_error_dev(&sc->sc_dev, "Rx CRC error\n");
756			ifp->if_ierrors++;
757			SONIC_INIT_RXDESC(sc, i);
758			continue;
759		}
760
761		bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
762		    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD);
763
764		/*
765		 * The SONIC includes the CRC with every packet.
766		 */
767		len = bytecount - ETHER_CRC_LEN;
768
769		/*
770		 * Ok, if the chip is in 32-bit mode, then receive
771		 * buffers must be aligned to 32-bit boundaries,
772		 * which means the payload is misaligned.  In this
773		 * case, we must allocate a new mbuf, and copy the
774		 * packet into it, scooted forward 2 bytes to ensure
775		 * proper alignment.
776		 *
777		 * Note, in 16-bit mode, we can configure the SONIC
778		 * to do what we want, and we have.
779		 */
780#ifndef __NO_STRICT_ALIGNMENT
781		if (sc->sc_32bit) {
782			MGETHDR(m, M_DONTWAIT, MT_DATA);
783			if (m == NULL)
784				goto dropit;
785			if (len > (MHLEN - 2)) {
786				MCLGET(m, M_DONTWAIT);
787				if ((m->m_flags & M_EXT) == 0)
788					goto dropit;
789			}
790			m->m_data += 2;
791			/*
792			 * Note that we use a cluster for incoming frames,
793			 * so the buffer is virtually contiguous.
794			 */
795			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
796			    len);
797			SONIC_INIT_RXDESC(sc, i);
798			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
799			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
800		} else
801#endif /* ! __NO_STRICT_ALIGNMENT */
802		/*
803		 * If the packet is small enough to fit in a single
804		 * header mbuf, allocate one and copy the data into
805		 * it.  This greatly reduces memory consumption when
806		 * we receive lots of small packets.
807		 */
808		if (sonic_copy_small != 0 && len <= (MHLEN - 2)) {
809			MGETHDR(m, M_DONTWAIT, MT_DATA);
810			if (m == NULL)
811				goto dropit;
812			m->m_data += 2;
813			/*
814			 * Note that we use a cluster for incoming frames,
815			 * so the buffer is virtually contiguous.
816			 */
817			memcpy(mtod(m, void *), mtod(ds->ds_mbuf, void *),
818			    len);
819			SONIC_INIT_RXDESC(sc, i);
820			bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
821			    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
822		} else {
823			m = ds->ds_mbuf;
824			if (sonic_add_rxbuf(sc, i) != 0) {
825 dropit:
826				ifp->if_ierrors++;
827				SONIC_INIT_RXDESC(sc, i);
828				bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
829				    ds->ds_dmamap->dm_mapsize,
830				    BUS_DMASYNC_PREREAD);
831				continue;
832			}
833		}
834
835		ifp->if_ipackets++;
836		m->m_pkthdr.rcvif = ifp;
837		m->m_pkthdr.len = m->m_len = len;
838
839#if NBPFILTER > 0
840		/*
841		 * Pass this up to any BPF listeners.
842		 */
843		if (ifp->if_bpf)
844			bpf_mtap(ifp->if_bpf, m);
845#endif /* NBPFILTER > 0 */
846
847		/* Pass it on. */
848		(*ifp->if_input)(ifp, m);
849	}
850
851	/* Update the receive pointer. */
852	sc->sc_rxptr = i;
853	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_PREVRX(i)));
854}
855
856/*
857 * sonic_reset:
858 *
859 *	Perform a soft reset on the SONIC.
860 */
861void
862sonic_reset(struct sonic_softc *sc)
863{
864
865	/* stop TX, RX and timer, and ensure RST is clear */
866	CSR_WRITE(sc, SONIC_CR, CR_STP | CR_RXDIS | CR_HTX);
867	delay(1000);
868
869	CSR_WRITE(sc, SONIC_CR, CR_RST);
870	delay(1000);
871
872	/* clear all interrupts */
873	CSR_WRITE(sc, SONIC_IMR, 0);
874	CSR_WRITE(sc, SONIC_ISR, IMR_ALL);
875
876	CSR_WRITE(sc, SONIC_CR, 0);
877	delay(1000);
878}
879
880/*
881 * sonic_init:		[ifnet interface function]
882 *
883 *	Initialize the interface.  Must be called at splnet().
884 */
885int
886sonic_init(struct ifnet *ifp)
887{
888	struct sonic_softc *sc = ifp->if_softc;
889	struct sonic_descsoft *ds;
890	int i, error = 0;
891	uint16_t reg;
892
893	/*
894	 * Cancel any pending I/O.
895	 */
896	sonic_stop(ifp, 0);
897
898	/*
899	 * Reset the SONIC to a known state.
900	 */
901	sonic_reset(sc);
902
903	/*
904	 * Bring the SONIC into reset state, and program the DCR.
905	 *
906	 * Note: We don't bother optimizing the transmit and receive
907	 * thresholds, here. TFT/RFT values should be set in MD attachments.
908	 */
909	reg = sc->sc_dcr;
910	if (sc->sc_32bit)
911		reg |= DCR_DW;
912	CSR_WRITE(sc, SONIC_CR, CR_RST);
913	CSR_WRITE(sc, SONIC_DCR, reg);
914	CSR_WRITE(sc, SONIC_DCR2, sc->sc_dcr2);
915	CSR_WRITE(sc, SONIC_CR, 0);
916
917	/*
918	 * Initialize the transmit descriptors.
919	 */
920	if (sc->sc_32bit) {
921		for (i = 0; i < SONIC_NTXDESC; i++) {
922			memset(&sc->sc_tda32[i], 0, sizeof(struct sonic_tda32));
923			SONIC_CDTXSYNC32(sc, i,
924			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
925		}
926	} else {
927		for (i = 0; i < SONIC_NTXDESC; i++) {
928			memset(&sc->sc_tda16[i], 0, sizeof(struct sonic_tda16));
929			SONIC_CDTXSYNC16(sc, i,
930			    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
931		}
932	}
933	sc->sc_txpending = 0;
934	sc->sc_txdirty = 0;
935	sc->sc_txlast = SONIC_NTXDESC - 1;
936
937	/*
938	 * Initialize the receive descriptor ring.
939	 */
940	for (i = 0; i < SONIC_NRXDESC; i++) {
941		ds = &sc->sc_rxsoft[i];
942		if (ds->ds_mbuf == NULL) {
943			if ((error = sonic_add_rxbuf(sc, i)) != 0) {
944				aprint_error_dev(&sc->sc_dev, "unable to allocate or map Rx "
945				    "buffer %d, error = %d\n",
946				    i, error);
947				/*
948				 * XXX Should attempt to run with fewer receive
949				 * XXX buffers instead of just failing.
950				 */
951				sonic_rxdrain(sc);
952				goto out;
953			}
954		} else
955			SONIC_INIT_RXDESC(sc, i);
956	}
957	sc->sc_rxptr = 0;
958
959	/* Give the transmit ring to the SONIC. */
960	CSR_WRITE(sc, SONIC_UTDAR, (SONIC_CDTXADDR(sc, 0) >> 16) & 0xffff);
961	CSR_WRITE(sc, SONIC_CTDAR, SONIC_CDTXADDR(sc, 0) & 0xffff);
962
963	/* Give the receive descriptor ring to the SONIC. */
964	CSR_WRITE(sc, SONIC_URDAR, (SONIC_CDRXADDR(sc, 0) >> 16) & 0xffff);
965	CSR_WRITE(sc, SONIC_CRDAR, SONIC_CDRXADDR(sc, 0) & 0xffff);
966
967	/* Give the receive buffer ring to the SONIC. */
968	CSR_WRITE(sc, SONIC_URRAR, (SONIC_CDRRADDR(sc, 0) >> 16) & 0xffff);
969	CSR_WRITE(sc, SONIC_RSAR, SONIC_CDRRADDR(sc, 0) & 0xffff);
970	if (sc->sc_32bit)
971		CSR_WRITE(sc, SONIC_REAR,
972		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
973		    sizeof(struct sonic_rra32)) & 0xffff);
974	else
975		CSR_WRITE(sc, SONIC_REAR,
976		    (SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1) +
977		    sizeof(struct sonic_rra16)) & 0xffff);
978	CSR_WRITE(sc, SONIC_RRR, SONIC_CDRRADDR(sc, 0) & 0xffff);
979	CSR_WRITE(sc, SONIC_RWR, SONIC_CDRRADDR(sc, SONIC_NRXDESC - 1));
980
981	/*
982	 * Set the End-Of-Buffer counter such that only one packet
983	 * will be placed into each buffer we provide.  Note we are
984	 * following the recommendation of section 3.4.4 of the manual
985	 * here, and have "lengthened" the receive buffers accordingly.
986	 */
987	if (sc->sc_32bit)
988		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN + 2) / 2);
989	else
990		CSR_WRITE(sc, SONIC_EOBC, (ETHER_MAX_LEN / 2));
991
992	/* Reset the receive sequence counter. */
993	CSR_WRITE(sc, SONIC_RSC, 0);
994
995	/* Clear the tally registers. */
996	CSR_WRITE(sc, SONIC_CRCETC, 0xffff);
997	CSR_WRITE(sc, SONIC_FAET, 0xffff);
998	CSR_WRITE(sc, SONIC_MPT, 0xffff);
999
1000	/* Set the receive filter. */
1001	sonic_set_filter(sc);
1002
1003	/*
1004	 * Set the interrupt mask register.
1005	 */
1006	sc->sc_imr = IMR_RFO | IMR_RBA | IMR_RBE | IMR_RDE |
1007	    IMR_TXER | IMR_PTX | IMR_PRX;
1008	CSR_WRITE(sc, SONIC_IMR, sc->sc_imr);
1009
1010	/*
1011	 * Start the receive process in motion.  Note, we don't
1012	 * start the transmit process until we actually try to
1013	 * transmit packets.
1014	 */
1015	CSR_WRITE(sc, SONIC_CR, CR_RXEN | CR_RRRA);
1016
1017	/*
1018	 * ...all done!
1019	 */
1020	ifp->if_flags |= IFF_RUNNING;
1021	ifp->if_flags &= ~IFF_OACTIVE;
1022
1023 out:
1024	if (error)
1025		printf("%s: interface not running\n", device_xname(&sc->sc_dev));
1026	return (error);
1027}
1028
1029/*
1030 * sonic_rxdrain:
1031 *
1032 *	Drain the receive queue.
1033 */
1034void
1035sonic_rxdrain(struct sonic_softc *sc)
1036{
1037	struct sonic_descsoft *ds;
1038	int i;
1039
1040	for (i = 0; i < SONIC_NRXDESC; i++) {
1041		ds = &sc->sc_rxsoft[i];
1042		if (ds->ds_mbuf != NULL) {
1043			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1044			m_freem(ds->ds_mbuf);
1045			ds->ds_mbuf = NULL;
1046		}
1047	}
1048}
1049
1050/*
1051 * sonic_stop:		[ifnet interface function]
1052 *
1053 *	Stop transmission on the interface.
1054 */
1055void
1056sonic_stop(struct ifnet *ifp, int disable)
1057{
1058	struct sonic_softc *sc = ifp->if_softc;
1059	struct sonic_descsoft *ds;
1060	int i;
1061
1062	/*
1063	 * Disable interrupts.
1064	 */
1065	CSR_WRITE(sc, SONIC_IMR, 0);
1066
1067	/*
1068	 * Stop the transmitter, receiver, and timer.
1069	 */
1070	CSR_WRITE(sc, SONIC_CR, CR_HTX|CR_RXDIS|CR_STP);
1071	for (i = 0; i < 1000; i++) {
1072		if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) == 0)
1073			break;
1074		delay(2);
1075	}
1076	if ((CSR_READ(sc, SONIC_CR) & (CR_TXP|CR_RXEN|CR_ST)) != 0)
1077		aprint_error_dev(&sc->sc_dev, "SONIC failed to stop\n");
1078
1079	/*
1080	 * Release any queued transmit buffers.
1081	 */
1082	for (i = 0; i < SONIC_NTXDESC; i++) {
1083		ds = &sc->sc_txsoft[i];
1084		if (ds->ds_mbuf != NULL) {
1085			bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1086			m_freem(ds->ds_mbuf);
1087			ds->ds_mbuf = NULL;
1088		}
1089	}
1090
1091	/*
1092	 * Mark the interface down and cancel the watchdog timer.
1093	 */
1094	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1095	ifp->if_timer = 0;
1096
1097	if (disable)
1098		sonic_rxdrain(sc);
1099}
1100
1101/*
1102 * sonic_add_rxbuf:
1103 *
1104 *	Add a receive buffer to the indicated descriptor.
1105 */
1106int
1107sonic_add_rxbuf(struct sonic_softc *sc, int idx)
1108{
1109	struct sonic_descsoft *ds = &sc->sc_rxsoft[idx];
1110	struct mbuf *m;
1111	int error;
1112
1113	MGETHDR(m, M_DONTWAIT, MT_DATA);
1114	if (m == NULL)
1115		return (ENOBUFS);
1116
1117	MCLGET(m, M_DONTWAIT);
1118	if ((m->m_flags & M_EXT) == 0) {
1119		m_freem(m);
1120		return (ENOBUFS);
1121	}
1122
1123	if (ds->ds_mbuf != NULL)
1124		bus_dmamap_unload(sc->sc_dmat, ds->ds_dmamap);
1125
1126	ds->ds_mbuf = m;
1127
1128	error = bus_dmamap_load(sc->sc_dmat, ds->ds_dmamap,
1129	    m->m_ext.ext_buf, m->m_ext.ext_size, NULL,
1130	    BUS_DMA_READ|BUS_DMA_NOWAIT);
1131	if (error) {
1132		aprint_error_dev(&sc->sc_dev, "can't load rx DMA map %d, error = %d\n",
1133		    idx, error);
1134		panic("sonic_add_rxbuf");	/* XXX */
1135	}
1136
1137	bus_dmamap_sync(sc->sc_dmat, ds->ds_dmamap, 0,
1138	    ds->ds_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1139
1140	SONIC_INIT_RXDESC(sc, idx);
1141
1142	return (0);
1143}
1144
1145static void
1146sonic_set_camentry(struct sonic_softc *sc, int entry, const uint8_t *enaddr)
1147{
1148
1149	if (sc->sc_32bit) {
1150		struct sonic_cda32 *cda = &sc->sc_cda32[entry];
1151
1152		cda->cda_entry = htosonic32(sc, entry);
1153		cda->cda_addr0 = htosonic32(sc, enaddr[0] | (enaddr[1] << 8));
1154		cda->cda_addr1 = htosonic32(sc, enaddr[2] | (enaddr[3] << 8));
1155		cda->cda_addr2 = htosonic32(sc, enaddr[4] | (enaddr[5] << 8));
1156	} else {
1157		struct sonic_cda16 *cda = &sc->sc_cda16[entry];
1158
1159		cda->cda_entry = htosonic16(sc, entry);
1160		cda->cda_addr0 = htosonic16(sc, enaddr[0] | (enaddr[1] << 8));
1161		cda->cda_addr1 = htosonic16(sc, enaddr[2] | (enaddr[3] << 8));
1162		cda->cda_addr2 = htosonic16(sc, enaddr[4] | (enaddr[5] << 8));
1163	}
1164}
1165
1166/*
1167 * sonic_set_filter:
1168 *
1169 *	Set the SONIC receive filter.
1170 */
1171void
1172sonic_set_filter(struct sonic_softc *sc)
1173{
1174	struct ethercom *ec = &sc->sc_ethercom;
1175	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
1176	struct ether_multi *enm;
1177	struct ether_multistep step;
1178	int i, entry = 0;
1179	uint16_t camvalid = 0;
1180	uint16_t rcr = 0;
1181
1182	if (ifp->if_flags & IFF_BROADCAST)
1183		rcr |= RCR_BRD;
1184
1185	if (ifp->if_flags & IFF_PROMISC) {
1186		rcr |= RCR_PRO;
1187		goto allmulti;
1188	}
1189
1190	/* Put our station address in the first CAM slot. */
1191	sonic_set_camentry(sc, entry, CLLADDR(ifp->if_sadl));
1192	camvalid |= (1U << entry);
1193	entry++;
1194
1195	/* Add the multicast addresses to the CAM. */
1196	ETHER_FIRST_MULTI(step, ec, enm);
1197	while (enm != NULL) {
1198		if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) {
1199			/*
1200			 * We must listen to a range of multicast addresses.
1201			 * The only way to do this on the SONIC is to enable
1202			 * reception of all multicast packets.
1203			 */
1204			goto allmulti;
1205		}
1206
1207		if (entry == 16) {
1208			/*
1209			 * Out of CAM slots.  Have to enable reception
1210			 * of all multicast addresses.
1211			 */
1212			goto allmulti;
1213		}
1214
1215		sonic_set_camentry(sc, entry, enm->enm_addrlo);
1216		camvalid |= (1U << entry);
1217		entry++;
1218
1219		ETHER_NEXT_MULTI(step, enm);
1220	}
1221
1222	ifp->if_flags &= ~IFF_ALLMULTI;
1223	goto setit;
1224
1225 allmulti:
1226	/* Use only the first CAM slot (station address). */
1227	camvalid = 0x0001;
1228	entry = 1;
1229	rcr |= RCR_AMC;
1230
1231 setit:
1232	/* Load the CAM. */
1233	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_PREWRITE);
1234	CSR_WRITE(sc, SONIC_CDP, SONIC_CDCAMADDR(sc) & 0xffff);
1235	CSR_WRITE(sc, SONIC_CDC, entry);
1236	CSR_WRITE(sc, SONIC_CR, CR_LCAM);
1237	for (i = 0; i < 10000; i++) {
1238		if ((CSR_READ(sc, SONIC_CR) & CR_LCAM) == 0)
1239			break;
1240		delay(2);
1241	}
1242	if (CSR_READ(sc, SONIC_CR) & CR_LCAM)
1243		aprint_error_dev(&sc->sc_dev, "CAM load failed\n");
1244	SONIC_CDCAMSYNC(sc, BUS_DMASYNC_POSTWRITE);
1245
1246	/* Set the CAM enable resgiter. */
1247	CSR_WRITE(sc, SONIC_CER, camvalid);
1248
1249	/* Set the receive control register. */
1250	CSR_WRITE(sc, SONIC_RCR, rcr);
1251}
1252