if_fxp.c revision 61820
1/*
2 * Copyright (c) 1995, David Greenman
3 * All rights reserved.
4 *
5 * Modifications to support NetBSD and media selection:
6 * Copyright (c) 1997 Jason R. Thorpe.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD: head/sys/dev/fxp/if_fxp.c 61820 2000-06-19 00:58:34Z dg $
31 */
32
33/*
34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
35 */
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/mbuf.h>
40#include <sys/malloc.h>
41#include <sys/kernel.h>
42#include <sys/socket.h>
43
44#include <net/if.h>
45#include <net/if_dl.h>
46#include <net/if_media.h>
47
48#ifdef NS
49#include <netns/ns.h>
50#include <netns/ns_if.h>
51#endif
52
53#include <net/bpf.h>
54
55#if defined(__NetBSD__)
56
57#include <sys/ioctl.h>
58#include <sys/errno.h>
59#include <sys/device.h>
60
61#include <net/if_dl.h>
62#include <net/if_ether.h>
63
64#include <netinet/if_inarp.h>
65
66#include <vm/vm.h>
67
68#include <machine/cpu.h>
69#include <machine/bus.h>
70#include <machine/intr.h>
71
72#include <dev/pci/if_fxpreg.h>
73#include <dev/pci/if_fxpvar.h>
74
75#include <dev/pci/pcivar.h>
76#include <dev/pci/pcireg.h>
77#include <dev/pci/pcidevs.h>
78
79
80#else /* __FreeBSD__ */
81
82#include <sys/sockio.h>
83#include <sys/bus.h>
84#include <machine/bus.h>
85#include <sys/rman.h>
86#include <machine/resource.h>
87
88#include <net/ethernet.h>
89#include <net/if_arp.h>
90
91#include <vm/vm.h>		/* for vtophys */
92#include <vm/pmap.h>		/* for vtophys */
93#include <machine/clock.h>	/* for DELAY */
94
95#include <pci/pcivar.h>
96#include <pci/pcireg.h>		/* for PCIM_CMD_xxx */
97#include <pci/if_fxpreg.h>
98#include <pci/if_fxpvar.h>
99
100#endif /* __NetBSD__ */
101
102#ifdef __alpha__		/* XXX */
103/* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */
104#undef vtophys
105#define	vtophys(va)	alpha_XXX_dmamap((vm_offset_t)(va))
106#endif /* __alpha__ */
107
108/*
109 * NOTE!  On the Alpha, we have an alignment constraint.  The
110 * card DMAs the packet immediately following the RFA.  However,
111 * the first thing in the packet is a 14-byte Ethernet header.
112 * This means that the packet is misaligned.  To compensate,
113 * we actually offset the RFA 2 bytes into the cluster.  This
114 * alignes the packet after the Ethernet header at a 32-bit
115 * boundary.  HOWEVER!  This means that the RFA is misaligned!
116 */
117#define	RFA_ALIGNMENT_FUDGE	2
118
119/*
120 * Inline function to copy a 16-bit aligned 32-bit quantity.
121 */
122static __inline void fxp_lwcopy __P((volatile u_int32_t *,
123	volatile u_int32_t *));
124static __inline void
125fxp_lwcopy(src, dst)
126	volatile u_int32_t *src, *dst;
127{
128#ifdef __i386__
129	*dst = *src;
130#else
131	volatile u_int16_t *a = (volatile u_int16_t *)src;
132	volatile u_int16_t *b = (volatile u_int16_t *)dst;
133
134	b[0] = a[0];
135	b[1] = a[1];
136#endif
137}
138
139/*
140 * Template for default configuration parameters.
141 * See struct fxp_cb_config for the bit definitions.
142 */
143static u_char fxp_cb_config_template[] = {
144	0x0, 0x0,		/* cb_status */
145	0x80, 0x2,		/* cb_command */
146	0xff, 0xff, 0xff, 0xff,	/* link_addr */
147	0x16,	/*  0 */
148	0x8,	/*  1 */
149	0x0,	/*  2 */
150	0x0,	/*  3 */
151	0x0,	/*  4 */
152	0x80,	/*  5 */
153	0xb2,	/*  6 */
154	0x3,	/*  7 */
155	0x1,	/*  8 */
156	0x0,	/*  9 */
157	0x26,	/* 10 */
158	0x0,	/* 11 */
159	0x60,	/* 12 */
160	0x0,	/* 13 */
161	0xf2,	/* 14 */
162	0x48,	/* 15 */
163	0x0,	/* 16 */
164	0x40,	/* 17 */
165	0xf3,	/* 18 */
166	0x0,	/* 19 */
167	0x3f,	/* 20 */
168	0x5	/* 21 */
169};
170
171/* Supported media types. */
172struct fxp_supported_media {
173	const int	fsm_phy;	/* PHY type */
174	const int	*fsm_media;	/* the media array */
175	const int	fsm_nmedia;	/* the number of supported media */
176	const int	fsm_defmedia;	/* default media for this PHY */
177};
178
179static const int fxp_media_standard[] = {
180	IFM_ETHER|IFM_10_T,
181	IFM_ETHER|IFM_10_T|IFM_FDX,
182	IFM_ETHER|IFM_100_TX,
183	IFM_ETHER|IFM_100_TX|IFM_FDX,
184	IFM_ETHER|IFM_AUTO,
185};
186#define	FXP_MEDIA_STANDARD_DEFMEDIA	(IFM_ETHER|IFM_AUTO)
187
188static const int fxp_media_default[] = {
189	IFM_ETHER|IFM_MANUAL,		/* XXX IFM_AUTO ? */
190};
191#define	FXP_MEDIA_DEFAULT_DEFMEDIA	(IFM_ETHER|IFM_MANUAL)
192
193static const struct fxp_supported_media fxp_media[] = {
194	{ FXP_PHY_DP83840, fxp_media_standard,
195	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
196	  FXP_MEDIA_STANDARD_DEFMEDIA },
197	{ FXP_PHY_DP83840A, fxp_media_standard,
198	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
199	  FXP_MEDIA_STANDARD_DEFMEDIA },
200	{ FXP_PHY_82553A, fxp_media_standard,
201	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
202	  FXP_MEDIA_STANDARD_DEFMEDIA },
203	{ FXP_PHY_82553C, fxp_media_standard,
204	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
205	  FXP_MEDIA_STANDARD_DEFMEDIA },
206	{ FXP_PHY_82555, fxp_media_standard,
207	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
208	  FXP_MEDIA_STANDARD_DEFMEDIA },
209	{ FXP_PHY_82555B, fxp_media_standard,
210	  sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]),
211	  FXP_MEDIA_STANDARD_DEFMEDIA },
212	{ FXP_PHY_80C24, fxp_media_default,
213	  sizeof(fxp_media_default) / sizeof(fxp_media_default[0]),
214	  FXP_MEDIA_DEFAULT_DEFMEDIA },
215};
216#define	NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0]))
217
218static int fxp_mediachange	__P((struct ifnet *));
219static void fxp_mediastatus	__P((struct ifnet *, struct ifmediareq *));
220static void fxp_set_media	__P((struct fxp_softc *, int));
221static __inline void fxp_scb_wait __P((struct fxp_softc *));
222static FXP_INTR_TYPE fxp_intr	__P((void *));
223static void fxp_start		__P((struct ifnet *));
224static int fxp_ioctl		__P((struct ifnet *,
225				     FXP_IOCTLCMD_TYPE, caddr_t));
226static void fxp_init		__P((void *));
227static void fxp_stop		__P((struct fxp_softc *));
228static void fxp_watchdog	__P((struct ifnet *));
229static int fxp_add_rfabuf	__P((struct fxp_softc *, struct mbuf *));
230static int fxp_mdi_read		__P((struct fxp_softc *, int, int));
231static void fxp_mdi_write	__P((struct fxp_softc *, int, int, int));
232static void fxp_autosize_eeprom __P((struct fxp_softc *));
233static void fxp_read_eeprom	__P((struct fxp_softc *, u_int16_t *,
234				     int, int));
235static int fxp_attach_common	__P((struct fxp_softc *, u_int8_t *));
236static void fxp_stats_update	__P((void *));
237static void fxp_mc_setup	__P((struct fxp_softc *));
238
239/*
240 * Set initial transmit threshold at 64 (512 bytes). This is
241 * increased by 64 (512 bytes) at a time, to maximum of 192
242 * (1536 bytes), if an underrun occurs.
243 */
244static int tx_threshold = 64;
245
246/*
247 * Number of transmit control blocks. This determines the number
248 * of transmit buffers that can be chained in the CB list.
249 * This must be a power of two.
250 */
251#define FXP_NTXCB	128
252
253/*
254 * Number of completed TX commands at which point an interrupt
255 * will be generated to garbage collect the attached buffers.
256 * Must be at least one less than FXP_NTXCB, and should be
257 * enough less so that the transmitter doesn't becomes idle
258 * during the buffer rundown (which would reduce performance).
259 */
260#define FXP_CXINT_THRESH 120
261
262/*
263 * TxCB list index mask. This is used to do list wrap-around.
264 */
265#define FXP_TXCB_MASK	(FXP_NTXCB - 1)
266
267/*
268 * Number of receive frame area buffers. These are large so chose
269 * wisely.
270 */
271#define FXP_NRFABUFS	64
272
273/*
274 * Maximum number of seconds that the receiver can be idle before we
275 * assume it's dead and attempt to reset it by reprogramming the
276 * multicast filter. This is part of a work-around for a bug in the
277 * NIC. See fxp_stats_update().
278 */
279#define FXP_MAX_RX_IDLE	15
280
281/*
282 * Wait for the previous command to be accepted (but not necessarily
283 * completed).
284 */
285static __inline void
286fxp_scb_wait(sc)
287	struct fxp_softc *sc;
288{
289	int i = 10000;
290
291	while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i);
292}
293
294/*************************************************************
295 * Operating system-specific autoconfiguration glue
296 *************************************************************/
297
298#if defined(__NetBSD__)
299
300#ifdef __BROKEN_INDIRECT_CONFIG
301static int fxp_match __P((struct device *, void *, void *));
302#else
303static int fxp_match __P((struct device *, struct cfdata *, void *));
304#endif
305static void fxp_attach __P((struct device *, struct device *, void *));
306
307static void	fxp_shutdown __P((void *));
308
309/* Compensate for lack of a generic ether_ioctl() */
310static int	fxp_ether_ioctl __P((struct ifnet *,
311				    FXP_IOCTLCMD_TYPE, caddr_t));
312#define	ether_ioctl	fxp_ether_ioctl
313
314struct cfattach fxp_ca = {
315	sizeof(struct fxp_softc), fxp_match, fxp_attach
316};
317
318struct cfdriver fxp_cd = {
319	NULL, "fxp", DV_IFNET
320};
321
322/*
323 * Check if a device is an 82557.
324 */
325static int
326fxp_match(parent, match, aux)
327	struct device *parent;
328#ifdef __BROKEN_INDIRECT_CONFIG
329	void *match;
330#else
331	struct cfdata *match;
332#endif
333	void *aux;
334{
335	struct pci_attach_args *pa = aux;
336
337	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
338		return (0);
339
340	switch (PCI_PRODUCT(pa->pa_id)) {
341	case PCI_PRODUCT_INTEL_82557:
342		return (1);
343	}
344
345	return (0);
346}
347
348static void
349fxp_attach(parent, self, aux)
350	struct device *parent, *self;
351	void *aux;
352{
353	struct fxp_softc *sc = (struct fxp_softc *)self;
354	struct pci_attach_args *pa = aux;
355	pci_chipset_tag_t pc = pa->pa_pc;
356	pci_intr_handle_t ih;
357	const char *intrstr = NULL;
358	u_int8_t enaddr[6];
359	struct ifnet *ifp;
360
361	/*
362	 * Map control/status registers.
363	 */
364	if (pci_mapreg_map(pa, FXP_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0,
365	    &sc->sc_st, &sc->sc_sh, NULL, NULL)) {
366		printf(": can't map registers\n");
367		return;
368	}
369	printf(": Intel EtherExpress Pro 10/100B Ethernet\n");
370
371	/*
372	 * Allocate our interrupt.
373	 */
374	if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin,
375	    pa->pa_intrline, &ih)) {
376		printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname);
377		return;
378	}
379	intrstr = pci_intr_string(pc, ih);
380	sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, fxp_intr, sc);
381	if (sc->sc_ih == NULL) {
382		printf("%s: couldn't establish interrupt",
383		    sc->sc_dev.dv_xname);
384		if (intrstr != NULL)
385			printf(" at %s", intrstr);
386		printf("\n");
387		return;
388	}
389	printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr);
390
391	/* Do generic parts of attach. */
392	if (fxp_attach_common(sc, enaddr)) {
393		/* Failed! */
394		return;
395	}
396
397	printf("%s: Ethernet address %s%s\n", sc->sc_dev.dv_xname,
398	    ether_sprintf(enaddr), sc->phy_10Mbps_only ? ", 10Mbps" : "");
399
400	ifp = &sc->sc_ethercom.ec_if;
401	bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
402	ifp->if_softc = sc;
403	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
404	ifp->if_ioctl = fxp_ioctl;
405	ifp->if_start = fxp_start;
406	ifp->if_watchdog = fxp_watchdog;
407
408	/*
409	 * Attach the interface.
410	 */
411	if_attach(ifp);
412	/*
413	 * Let the system queue as many packets as we have available
414	 * TX descriptors.
415	 */
416	ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1;
417	ether_ifattach(ifp, enaddr);
418	bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB,
419	    sizeof(struct ether_header));
420
421	/*
422	 * Add shutdown hook so that DMA is disabled prior to reboot. Not
423	 * doing do could allow DMA to corrupt kernel memory during the
424	 * reboot before the driver initializes.
425	 */
426	shutdownhook_establish(fxp_shutdown, sc);
427}
428
429/*
430 * Device shutdown routine. Called at system shutdown after sync. The
431 * main purpose of this routine is to shut off receiver DMA so that
432 * kernel memory doesn't get clobbered during warmboot.
433 */
434static void
435fxp_shutdown(sc)
436	void *sc;
437{
438	fxp_stop((struct fxp_softc *) sc);
439}
440
441static int
442fxp_ether_ioctl(ifp, cmd, data)
443	struct ifnet *ifp;
444	FXP_IOCTLCMD_TYPE cmd;
445	caddr_t data;
446{
447	struct ifaddr *ifa = (struct ifaddr *) data;
448	struct fxp_softc *sc = ifp->if_softc;
449
450	switch (cmd) {
451	case SIOCSIFADDR:
452		ifp->if_flags |= IFF_UP;
453
454		switch (ifa->ifa_addr->sa_family) {
455#ifdef INET
456		case AF_INET:
457			fxp_init(sc);
458			arp_ifinit(ifp, ifa);
459			break;
460#endif
461#ifdef NS
462		case AF_NS:
463		    {
464			 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr;
465
466			 if (ns_nullhost(*ina))
467				ina->x_host = *(union ns_host *)
468				    LLADDR(ifp->if_sadl);
469			 else
470				bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl),
471				    ifp->if_addrlen);
472			 /* Set new address. */
473			 fxp_init(sc);
474			 break;
475		    }
476#endif
477		default:
478			fxp_init(sc);
479			break;
480		}
481		break;
482
483	default:
484		return (EINVAL);
485	}
486
487	return (0);
488}
489
490#else /* __FreeBSD__ */
491
492/*
493 * Return identification string if this is device is ours.
494 */
495static int
496fxp_probe(device_t dev)
497{
498	if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
499		switch (pci_get_device(dev)) {
500
501		case FXP_DEVICEID_i82557:
502			device_set_desc(dev, "Intel Pro 10/100B/100+ Ethernet");
503			return 0;
504		case FXP_DEVICEID_i82559:
505			device_set_desc(dev, "Intel InBusiness 10/100 Ethernet");
506			return 0;
507		case FXP_DEVICEID_i82559ER:
508			device_set_desc(dev, "Intel Embedded 10/100 Ethernet");
509			return 0;
510		default:
511			break;
512		}
513	}
514
515	return ENXIO;
516}
517
518static int
519fxp_attach(device_t dev)
520{
521	int error = 0;
522	struct fxp_softc *sc = device_get_softc(dev);
523	struct ifnet *ifp;
524	int s;
525	u_long val;
526	int rid;
527
528	callout_handle_init(&sc->stat_ch);
529
530	s = splimp();
531
532	/*
533	 * Enable bus mastering.
534	 */
535	val = pci_read_config(dev, PCIR_COMMAND, 2);
536	val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
537	pci_write_config(dev, PCIR_COMMAND, val, 2);
538
539	/*
540	 * Map control/status registers.
541	 */
542	rid = FXP_PCI_MMBA;
543	sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
544				     0, ~0, 1, RF_ACTIVE);
545	if (!sc->mem) {
546		device_printf(dev, "could not map memory\n");
547		error = ENXIO;
548		goto fail;
549        }
550
551	sc->sc_st = rman_get_bustag(sc->mem);
552	sc->sc_sh = rman_get_bushandle(sc->mem);
553
554	/*
555	 * Allocate our interrupt.
556	 */
557	rid = 0;
558	sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
559				 RF_SHAREABLE | RF_ACTIVE);
560	if (sc->irq == NULL) {
561		device_printf(dev, "could not map interrupt\n");
562		error = ENXIO;
563		goto fail;
564	}
565
566	error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET,
567			       fxp_intr, sc, &sc->ih);
568	if (error) {
569		device_printf(dev, "could not setup irq\n");
570		goto fail;
571	}
572
573	/* Do generic parts of attach. */
574	if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) {
575		/* Failed! */
576		bus_teardown_intr(dev, sc->irq, sc->ih);
577		bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
578		bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem);
579		error = ENXIO;
580		goto fail;
581	}
582
583	device_printf(dev, "Ethernet address %6D%s\n",
584	    sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : "");
585
586	ifp = &sc->arpcom.ac_if;
587	ifp->if_unit = device_get_unit(dev);
588	ifp->if_name = "fxp";
589	ifp->if_output = ether_output;
590	ifp->if_baudrate = 100000000;
591	ifp->if_init = fxp_init;
592	ifp->if_softc = sc;
593	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
594	ifp->if_ioctl = fxp_ioctl;
595	ifp->if_start = fxp_start;
596	ifp->if_watchdog = fxp_watchdog;
597
598	/*
599	 * Attach the interface.
600	 */
601	if_attach(ifp);
602	/*
603	 * Let the system queue as many packets as we have available
604	 * TX descriptors.
605	 */
606	ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1;
607	ether_ifattach(ifp);
608	bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header));
609
610	splx(s);
611	return 0;
612
613 fail:
614	splx(s);
615	return error;
616}
617
618/*
619 * Detach interface.
620 */
621static int
622fxp_detach(device_t dev)
623{
624	struct fxp_softc *sc = device_get_softc(dev);
625	int s;
626
627	s = splimp();
628
629	/*
630	 * Close down routes etc.
631	 */
632	if_detach(&sc->arpcom.ac_if);
633
634	/*
635	 * Stop DMA and drop transmit queue.
636	 */
637	fxp_stop(sc);
638
639	/*
640	 * Deallocate resources.
641	 */
642	bus_teardown_intr(dev, sc->irq, sc->ih);
643	bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
644	bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem);
645
646	/*
647	 * Free all the receive buffers.
648	 */
649	if (sc->rfa_headm != NULL)
650		m_freem(sc->rfa_headm);
651
652	/*
653	 * Free all media structures.
654	 */
655	ifmedia_removeall(&sc->sc_media);
656
657	/*
658	 * Free anciliary structures.
659	 */
660	free(sc->cbl_base, M_DEVBUF);
661	free(sc->fxp_stats, M_DEVBUF);
662	free(sc->mcsp, M_DEVBUF);
663
664	splx(s);
665
666	return 0;
667}
668
669/*
670 * Device shutdown routine. Called at system shutdown after sync. The
671 * main purpose of this routine is to shut off receiver DMA so that
672 * kernel memory doesn't get clobbered during warmboot.
673 */
674static int
675fxp_shutdown(device_t dev)
676{
677	/*
678	 * Make sure that DMA is disabled prior to reboot. Not doing
679	 * do could allow DMA to corrupt kernel memory during the
680	 * reboot before the driver initializes.
681	 */
682	fxp_stop((struct fxp_softc *) device_get_softc(dev));
683	return 0;
684}
685
686static device_method_t fxp_methods[] = {
687	/* Device interface */
688	DEVMETHOD(device_probe,		fxp_probe),
689	DEVMETHOD(device_attach,	fxp_attach),
690	DEVMETHOD(device_detach,	fxp_detach),
691	DEVMETHOD(device_shutdown,	fxp_shutdown),
692
693	{ 0, 0 }
694};
695
696static driver_t fxp_driver = {
697	"fxp",
698	fxp_methods,
699	sizeof(struct fxp_softc),
700};
701
702static devclass_t fxp_devclass;
703
704DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0);
705
706#endif /* __NetBSD__ */
707
708/*************************************************************
709 * End of operating system-specific autoconfiguration glue
710 *************************************************************/
711
712/*
713 * Do generic parts of attach.
714 */
715static int
716fxp_attach_common(sc, enaddr)
717	struct fxp_softc *sc;
718	u_int8_t *enaddr;
719{
720	u_int16_t data;
721	int i, nmedia, defmedia;
722	const int *media;
723
724	/*
725	 * Reset to a stable state.
726	 */
727	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
728	DELAY(10);
729
730	sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB,
731	    M_DEVBUF, M_NOWAIT);
732	if (sc->cbl_base == NULL)
733		goto fail;
734	bzero(sc->cbl_base, sizeof(struct fxp_cb_tx) * FXP_NTXCB);
735
736	sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT);
737	if (sc->fxp_stats == NULL)
738		goto fail;
739	bzero(sc->fxp_stats, sizeof(struct fxp_stats));
740
741	sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT);
742	if (sc->mcsp == NULL)
743		goto fail;
744
745	/*
746	 * Pre-allocate our receive buffers.
747	 */
748	for (i = 0; i < FXP_NRFABUFS; i++) {
749		if (fxp_add_rfabuf(sc, NULL) != 0) {
750			goto fail;
751		}
752	}
753
754	/*
755	 * Find out how large of an SEEPROM we have.
756	 */
757	fxp_autosize_eeprom(sc);
758
759	/*
760	 * Get info about the primary PHY
761	 */
762	fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1);
763	sc->phy_primary_addr = data & 0xff;
764	sc->phy_primary_device = (data >> 8) & 0x3f;
765	sc->phy_10Mbps_only = data >> 15;
766
767	/*
768	 * Read MAC address.
769	 */
770	fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3);
771
772	/*
773	 * Initialize the media structures.
774	 */
775
776	media = fxp_media_default;
777	nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]);
778	defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA;
779
780	for (i = 0; i < NFXPMEDIA; i++) {
781		if (sc->phy_primary_device == fxp_media[i].fsm_phy) {
782			media = fxp_media[i].fsm_media;
783			nmedia = fxp_media[i].fsm_nmedia;
784			defmedia = fxp_media[i].fsm_defmedia;
785		}
786	}
787
788	ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus);
789	for (i = 0; i < nmedia; i++) {
790		if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only)
791			continue;
792		ifmedia_add(&sc->sc_media, media[i], 0, NULL);
793	}
794	ifmedia_set(&sc->sc_media, defmedia);
795
796	return (0);
797
798 fail:
799	printf(FXP_FORMAT ": Failed to malloc memory\n", FXP_ARGS(sc));
800	if (sc->cbl_base)
801		free(sc->cbl_base, M_DEVBUF);
802	if (sc->fxp_stats)
803		free(sc->fxp_stats, M_DEVBUF);
804	if (sc->mcsp)
805		free(sc->mcsp, M_DEVBUF);
806	/* frees entire chain */
807	if (sc->rfa_headm)
808		m_freem(sc->rfa_headm);
809
810	return (ENOMEM);
811}
812
813/*
814 * From NetBSD:
815 *
816 * Figure out EEPROM size.
817 *
818 * 559's can have either 64-word or 256-word EEPROMs, the 558
819 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
820 * talks about the existance of 16 to 256 word EEPROMs.
821 *
822 * The only known sizes are 64 and 256, where the 256 version is used
823 * by CardBus cards to store CIS information.
824 *
825 * The address is shifted in msb-to-lsb, and after the last
826 * address-bit the EEPROM is supposed to output a `dummy zero' bit,
827 * after which follows the actual data. We try to detect this zero, by
828 * probing the data-out bit in the EEPROM control register just after
829 * having shifted in a bit. If the bit is zero, we assume we've
830 * shifted enough address bits. The data-out should be tri-state,
831 * before this, which should translate to a logical one.
832 *
833 * Other ways to do this would be to try to read a register with known
834 * contents with a varying number of address bits, but no such
835 * register seem to be available. The high bits of register 10 are 01
836 * on the 558 and 559, but apparently not on the 557.
837 *
838 * The Linux driver computes a checksum on the EEPROM data, but the
839 * value of this checksum is not very well documented.
840 */
841static void
842fxp_autosize_eeprom(sc)
843	struct fxp_softc *sc;
844{
845	u_int16_t reg;
846	int x;
847
848	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
849	/*
850	 * Shift in read opcode.
851	 */
852	for (x = 3; x > 0; x--) {
853		if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
854			reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
855		} else {
856			reg = FXP_EEPROM_EECS;
857		}
858		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
859		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
860		    reg | FXP_EEPROM_EESK);
861		DELAY(1);
862		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
863		DELAY(1);
864	}
865	/*
866	 * Shift in address.
867	 * Wait for the dummy zero following a correct address shift.
868	 */
869	for (x = 1; x <= 8; x++) {
870		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
871		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
872			FXP_EEPROM_EECS | FXP_EEPROM_EESK);
873		DELAY(1);
874		if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0)
875			break;
876		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
877		DELAY(1);
878	}
879	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
880	DELAY(1);
881	sc->eeprom_size = x;
882}
883/*
884 * Read from the serial EEPROM. Basically, you manually shift in
885 * the read opcode (one bit at a time) and then shift in the address,
886 * and then you shift out the data (all of this one bit at a time).
887 * The word size is 16 bits, so you have to provide the address for
888 * every 16 bits of data.
889 */
890static void
891fxp_read_eeprom(sc, data, offset, words)
892	struct fxp_softc *sc;
893	u_short *data;
894	int offset;
895	int words;
896{
897	u_int16_t reg;
898	int i, x;
899
900	for (i = 0; i < words; i++) {
901		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
902		/*
903		 * Shift in read opcode.
904		 */
905		for (x = 3; x > 0; x--) {
906			if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) {
907				reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
908			} else {
909				reg = FXP_EEPROM_EECS;
910			}
911			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
912			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
913			    reg | FXP_EEPROM_EESK);
914			DELAY(1);
915			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
916			DELAY(1);
917		}
918		/*
919		 * Shift in address.
920		 */
921		for (x = sc->eeprom_size; x > 0; x--) {
922			if ((i + offset) & (1 << (x - 1))) {
923				reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
924			} else {
925				reg = FXP_EEPROM_EECS;
926			}
927			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
928			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
929			    reg | FXP_EEPROM_EESK);
930			DELAY(1);
931			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
932			DELAY(1);
933		}
934		reg = FXP_EEPROM_EECS;
935		data[i] = 0;
936		/*
937		 * Shift out data.
938		 */
939		for (x = 16; x > 0; x--) {
940			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL,
941			    reg | FXP_EEPROM_EESK);
942			DELAY(1);
943			if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) &
944			    FXP_EEPROM_EEDO)
945				data[i] |= (1 << (x - 1));
946			CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
947			DELAY(1);
948		}
949		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
950		DELAY(1);
951	}
952}
953
954/*
955 * Start packet transmission on the interface.
956 */
957static void
958fxp_start(ifp)
959	struct ifnet *ifp;
960{
961	struct fxp_softc *sc = ifp->if_softc;
962	struct fxp_cb_tx *txp;
963
964	/*
965	 * See if we need to suspend xmit until the multicast filter
966	 * has been reprogrammed (which can only be done at the head
967	 * of the command chain).
968	 */
969	if (sc->need_mcsetup)
970		return;
971
972	txp = NULL;
973
974	/*
975	 * We're finished if there is nothing more to add to the list or if
976	 * we're all filled up with buffers to transmit.
977	 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
978	 *       a NOP command when needed.
979	 */
980	while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) {
981		struct mbuf *m, *mb_head;
982		int segment;
983
984		/*
985		 * Grab a packet to transmit.
986		 */
987		IF_DEQUEUE(&ifp->if_snd, mb_head);
988
989		/*
990		 * Get pointer to next available tx desc.
991		 */
992		txp = sc->cbl_last->next;
993
994		/*
995		 * Go through each of the mbufs in the chain and initialize
996		 * the transmit buffer descriptors with the physical address
997		 * and size of the mbuf.
998		 */
999tbdinit:
1000		for (m = mb_head, segment = 0; m != NULL; m = m->m_next) {
1001			if (m->m_len != 0) {
1002				if (segment == FXP_NTXSEG)
1003					break;
1004				txp->tbd[segment].tb_addr =
1005				    vtophys(mtod(m, vm_offset_t));
1006				txp->tbd[segment].tb_size = m->m_len;
1007				segment++;
1008			}
1009		}
1010		if (m != NULL) {
1011			struct mbuf *mn;
1012
1013			/*
1014			 * We ran out of segments. We have to recopy this mbuf
1015			 * chain first. Bail out if we can't get the new buffers.
1016			 */
1017			MGETHDR(mn, M_DONTWAIT, MT_DATA);
1018			if (mn == NULL) {
1019				m_freem(mb_head);
1020				break;
1021			}
1022			if (mb_head->m_pkthdr.len > MHLEN) {
1023				MCLGET(mn, M_DONTWAIT);
1024				if ((mn->m_flags & M_EXT) == 0) {
1025					m_freem(mn);
1026					m_freem(mb_head);
1027					break;
1028				}
1029			}
1030			m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1031			    mtod(mn, caddr_t));
1032			mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1033			m_freem(mb_head);
1034			mb_head = mn;
1035			goto tbdinit;
1036		}
1037
1038		txp->tbd_number = segment;
1039		txp->mb_head = mb_head;
1040		txp->cb_status = 0;
1041		if (sc->tx_queued != FXP_CXINT_THRESH - 1) {
1042			txp->cb_command =
1043			    FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S;
1044		} else {
1045			txp->cb_command =
1046			    FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
1047			/*
1048			 * Set a 5 second timer just in case we don't hear from the
1049			 * card again.
1050			 */
1051			ifp->if_timer = 5;
1052		}
1053		txp->tx_threshold = tx_threshold;
1054
1055		/*
1056		 * Advance the end of list forward.
1057		 */
1058		sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S;
1059		sc->cbl_last = txp;
1060
1061		/*
1062		 * Advance the beginning of the list forward if there are
1063		 * no other packets queued (when nothing is queued, cbl_first
1064		 * sits on the last TxCB that was sent out).
1065		 */
1066		if (sc->tx_queued == 0)
1067			sc->cbl_first = txp;
1068
1069		sc->tx_queued++;
1070
1071		/*
1072		 * Pass packet to bpf if there is a listener.
1073		 */
1074		if (ifp->if_bpf)
1075			bpf_mtap(FXP_BPFTAP_ARG(ifp), mb_head);
1076	}
1077
1078	/*
1079	 * We're finished. If we added to the list, issue a RESUME to get DMA
1080	 * going again if suspended.
1081	 */
1082	if (txp != NULL) {
1083		fxp_scb_wait(sc);
1084		CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME);
1085	}
1086}
1087
1088/*
1089 * Process interface interrupts.
1090 */
1091static FXP_INTR_TYPE
1092fxp_intr(arg)
1093	void *arg;
1094{
1095	struct fxp_softc *sc = arg;
1096	struct ifnet *ifp = &sc->sc_if;
1097	u_int8_t statack;
1098#if defined(__NetBSD__)
1099	int claimed = 0;
1100#endif
1101
1102	while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
1103#if defined(__NetBSD__)
1104		claimed = 1;
1105#endif
1106		/*
1107		 * First ACK all the interrupts in this pass.
1108		 */
1109		CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
1110
1111		/*
1112		 * Free any finished transmit mbuf chains.
1113		 */
1114		if (statack & FXP_SCB_STATACK_CXTNO) {
1115			struct fxp_cb_tx *txp;
1116
1117			for (txp = sc->cbl_first; sc->tx_queued &&
1118			    (txp->cb_status & FXP_CB_STATUS_C) != 0;
1119			    txp = txp->next) {
1120				if (txp->mb_head != NULL) {
1121					m_freem(txp->mb_head);
1122					txp->mb_head = NULL;
1123				}
1124				sc->tx_queued--;
1125			}
1126			sc->cbl_first = txp;
1127			ifp->if_timer = 0;
1128			if (sc->tx_queued == 0) {
1129				if (sc->need_mcsetup)
1130					fxp_mc_setup(sc);
1131			}
1132			/*
1133			 * Try to start more packets transmitting.
1134			 */
1135			if (ifp->if_snd.ifq_head != NULL)
1136				fxp_start(ifp);
1137		}
1138		/*
1139		 * Process receiver interrupts. If a no-resource (RNR)
1140		 * condition exists, get whatever packets we can and
1141		 * re-start the receiver.
1142		 */
1143		if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) {
1144			struct mbuf *m;
1145			struct fxp_rfa *rfa;
1146rcvloop:
1147			m = sc->rfa_headm;
1148			rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
1149			    RFA_ALIGNMENT_FUDGE);
1150
1151			if (rfa->rfa_status & FXP_RFA_STATUS_C) {
1152				/*
1153				 * Remove first packet from the chain.
1154				 */
1155				sc->rfa_headm = m->m_next;
1156				m->m_next = NULL;
1157
1158				/*
1159				 * Add a new buffer to the receive chain.
1160				 * If this fails, the old buffer is recycled
1161				 * instead.
1162				 */
1163				if (fxp_add_rfabuf(sc, m) == 0) {
1164					struct ether_header *eh;
1165					int total_len;
1166
1167					total_len = rfa->actual_size &
1168					    (MCLBYTES - 1);
1169					if (total_len <
1170					    sizeof(struct ether_header)) {
1171						m_freem(m);
1172						goto rcvloop;
1173					}
1174					m->m_pkthdr.rcvif = ifp;
1175					m->m_pkthdr.len = m->m_len = total_len;
1176					eh = mtod(m, struct ether_header *);
1177					m->m_data +=
1178					    sizeof(struct ether_header);
1179					m->m_len -=
1180					    sizeof(struct ether_header);
1181					m->m_pkthdr.len = m->m_len;
1182					ether_input(ifp, eh, m);
1183				}
1184				goto rcvloop;
1185			}
1186			if (statack & FXP_SCB_STATACK_RNR) {
1187				fxp_scb_wait(sc);
1188				CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1189				    vtophys(sc->rfa_headm->m_ext.ext_buf) +
1190					RFA_ALIGNMENT_FUDGE);
1191				CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
1192				    FXP_SCB_COMMAND_RU_START);
1193			}
1194		}
1195	}
1196#if defined(__NetBSD__)
1197	return (claimed);
1198#endif
1199}
1200
1201/*
1202 * Update packet in/out/collision statistics. The i82557 doesn't
1203 * allow you to access these counters without doing a fairly
1204 * expensive DMA to get _all_ of the statistics it maintains, so
1205 * we do this operation here only once per second. The statistics
1206 * counters in the kernel are updated from the previous dump-stats
1207 * DMA and then a new dump-stats DMA is started. The on-chip
1208 * counters are zeroed when the DMA completes. If we can't start
1209 * the DMA immediately, we don't wait - we just prepare to read
1210 * them again next time.
1211 */
1212static void
1213fxp_stats_update(arg)
1214	void *arg;
1215{
1216	struct fxp_softc *sc = arg;
1217	struct ifnet *ifp = &sc->sc_if;
1218	struct fxp_stats *sp = sc->fxp_stats;
1219	struct fxp_cb_tx *txp;
1220	int s;
1221
1222	ifp->if_opackets += sp->tx_good;
1223	ifp->if_collisions += sp->tx_total_collisions;
1224	if (sp->rx_good) {
1225		ifp->if_ipackets += sp->rx_good;
1226		sc->rx_idle_secs = 0;
1227	} else {
1228		/*
1229		 * Receiver's been idle for another second.
1230		 */
1231		sc->rx_idle_secs++;
1232	}
1233	ifp->if_ierrors +=
1234	    sp->rx_crc_errors +
1235	    sp->rx_alignment_errors +
1236	    sp->rx_rnr_errors +
1237	    sp->rx_overrun_errors;
1238	/*
1239	 * If any transmit underruns occured, bump up the transmit
1240	 * threshold by another 512 bytes (64 * 8).
1241	 */
1242	if (sp->tx_underruns) {
1243		ifp->if_oerrors += sp->tx_underruns;
1244		if (tx_threshold < 192)
1245			tx_threshold += 64;
1246	}
1247	s = splimp();
1248	/*
1249	 * Release any xmit buffers that have completed DMA. This isn't
1250	 * strictly necessary to do here, but it's advantagous for mbufs
1251	 * with external storage to be released in a timely manner rather
1252	 * than being defered for a potentially long time. This limits
1253	 * the delay to a maximum of one second.
1254	 */
1255	for (txp = sc->cbl_first; sc->tx_queued &&
1256	    (txp->cb_status & FXP_CB_STATUS_C) != 0;
1257	    txp = txp->next) {
1258		if (txp->mb_head != NULL) {
1259			m_freem(txp->mb_head);
1260			txp->mb_head = NULL;
1261		}
1262		sc->tx_queued--;
1263	}
1264	sc->cbl_first = txp;
1265	/*
1266	 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
1267	 * then assume the receiver has locked up and attempt to clear
1268	 * the condition by reprogramming the multicast filter. This is
1269	 * a work-around for a bug in the 82557 where the receiver locks
1270	 * up if it gets certain types of garbage in the syncronization
1271	 * bits prior to the packet header. This bug is supposed to only
1272	 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
1273	 * mode as well (perhaps due to a 10/100 speed transition).
1274	 */
1275	if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
1276		sc->rx_idle_secs = 0;
1277		fxp_mc_setup(sc);
1278	}
1279	/*
1280	 * If there is no pending command, start another stats
1281	 * dump. Otherwise punt for now.
1282	 */
1283	if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
1284		/*
1285		 * Start another stats dump.
1286		 */
1287		CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND,
1288		    FXP_SCB_COMMAND_CU_DUMPRESET);
1289	} else {
1290		/*
1291		 * A previous command is still waiting to be accepted.
1292		 * Just zero our copy of the stats and wait for the
1293		 * next timer event to update them.
1294		 */
1295		sp->tx_good = 0;
1296		sp->tx_underruns = 0;
1297		sp->tx_total_collisions = 0;
1298
1299		sp->rx_good = 0;
1300		sp->rx_crc_errors = 0;
1301		sp->rx_alignment_errors = 0;
1302		sp->rx_rnr_errors = 0;
1303		sp->rx_overrun_errors = 0;
1304	}
1305	splx(s);
1306	/*
1307	 * Schedule another timeout one second from now.
1308	 */
1309	sc->stat_ch = timeout(fxp_stats_update, sc, hz);
1310}
1311
1312/*
1313 * Stop the interface. Cancels the statistics updater and resets
1314 * the interface.
1315 */
1316static void
1317fxp_stop(sc)
1318	struct fxp_softc *sc;
1319{
1320	struct ifnet *ifp = &sc->sc_if;
1321	struct fxp_cb_tx *txp;
1322	int i;
1323
1324	/*
1325	 * Cancel stats updater.
1326	 */
1327	untimeout(fxp_stats_update, sc, sc->stat_ch);
1328
1329	/*
1330	 * Issue software reset
1331	 */
1332	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
1333	DELAY(10);
1334
1335	/*
1336	 * Release any xmit buffers.
1337	 */
1338	txp = sc->cbl_base;
1339	if (txp != NULL) {
1340		for (i = 0; i < FXP_NTXCB; i++) {
1341			if (txp[i].mb_head != NULL) {
1342				m_freem(txp[i].mb_head);
1343				txp[i].mb_head = NULL;
1344			}
1345		}
1346	}
1347	sc->tx_queued = 0;
1348
1349	/*
1350	 * Free all the receive buffers then reallocate/reinitialize
1351	 */
1352	if (sc->rfa_headm != NULL)
1353		m_freem(sc->rfa_headm);
1354	sc->rfa_headm = NULL;
1355	sc->rfa_tailm = NULL;
1356	for (i = 0; i < FXP_NRFABUFS; i++) {
1357		if (fxp_add_rfabuf(sc, NULL) != 0) {
1358			/*
1359			 * This "can't happen" - we're at splimp()
1360			 * and we just freed all the buffers we need
1361			 * above.
1362			 */
1363			panic("fxp_stop: no buffers!");
1364		}
1365	}
1366
1367	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1368	ifp->if_timer = 0;
1369}
1370
1371/*
1372 * Watchdog/transmission transmit timeout handler. Called when a
1373 * transmission is started on the interface, but no interrupt is
1374 * received before the timeout. This usually indicates that the
1375 * card has wedged for some reason.
1376 */
1377static void
1378fxp_watchdog(ifp)
1379	struct ifnet *ifp;
1380{
1381	struct fxp_softc *sc = ifp->if_softc;
1382
1383	printf(FXP_FORMAT ": device timeout\n", FXP_ARGS(sc));
1384	ifp->if_oerrors++;
1385
1386	fxp_init(sc);
1387}
1388
1389static void
1390fxp_init(xsc)
1391	void *xsc;
1392{
1393	struct fxp_softc *sc = xsc;
1394	struct ifnet *ifp = &sc->sc_if;
1395	struct fxp_cb_config *cbp;
1396	struct fxp_cb_ias *cb_ias;
1397	struct fxp_cb_tx *txp;
1398	int i, s, prm;
1399
1400	s = splimp();
1401	/*
1402	 * Cancel any pending I/O
1403	 */
1404	fxp_stop(sc);
1405
1406	prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1407
1408	/*
1409	 * Initialize base of CBL and RFA memory. Loading with zero
1410	 * sets it up for regular linear addressing.
1411	 */
1412	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1413	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE);
1414
1415	fxp_scb_wait(sc);
1416	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE);
1417
1418	/*
1419	 * Initialize base of dump-stats buffer.
1420	 */
1421	fxp_scb_wait(sc);
1422	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats));
1423	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR);
1424
1425	/*
1426	 * We temporarily use memory that contains the TxCB list to
1427	 * construct the config CB. The TxCB list memory is rebuilt
1428	 * later.
1429	 */
1430	cbp = (struct fxp_cb_config *) sc->cbl_base;
1431
1432	/*
1433	 * This bcopy is kind of disgusting, but there are a bunch of must be
1434	 * zero and must be one bits in this structure and this is the easiest
1435	 * way to initialize them all to proper values.
1436	 */
1437	bcopy(fxp_cb_config_template, (volatile void *)&cbp->cb_status,
1438		sizeof(fxp_cb_config_template));
1439
1440	cbp->cb_status =	0;
1441	cbp->cb_command =	FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1442	cbp->link_addr =	-1;	/* (no) next command */
1443	cbp->byte_count =	22;	/* (22) bytes to config */
1444	cbp->rx_fifo_limit =	8;	/* rx fifo threshold (32 bytes) */
1445	cbp->tx_fifo_limit =	0;	/* tx fifo threshold (0 bytes) */
1446	cbp->adaptive_ifs =	0;	/* (no) adaptive interframe spacing */
1447	cbp->rx_dma_bytecount =	0;	/* (no) rx DMA max */
1448	cbp->tx_dma_bytecount =	0;	/* (no) tx DMA max */
1449	cbp->dma_bce =		0;	/* (disable) dma max counters */
1450	cbp->late_scb =		0;	/* (don't) defer SCB update */
1451	cbp->tno_int =		0;	/* (disable) tx not okay interrupt */
1452	cbp->ci_int =		1;	/* interrupt on CU idle */
1453	cbp->save_bf =		prm;	/* save bad frames */
1454	cbp->disc_short_rx =	!prm;	/* discard short packets */
1455	cbp->underrun_retry =	1;	/* retry mode (1) on DMA underrun */
1456	cbp->mediatype =	!sc->phy_10Mbps_only; /* interface mode */
1457	cbp->nsai =		1;	/* (don't) disable source addr insert */
1458	cbp->preamble_length =	2;	/* (7 byte) preamble */
1459	cbp->loopback =		0;	/* (don't) loopback */
1460	cbp->linear_priority =	0;	/* (normal CSMA/CD operation) */
1461	cbp->linear_pri_mode =	0;	/* (wait after xmit only) */
1462	cbp->interfrm_spacing =	6;	/* (96 bits of) interframe spacing */
1463	cbp->promiscuous =	prm;	/* promiscuous mode */
1464	cbp->bcast_disable =	0;	/* (don't) disable broadcasts */
1465	cbp->crscdt =		0;	/* (CRS only) */
1466	cbp->stripping =	!prm;	/* truncate rx packet to byte count */
1467	cbp->padding =		1;	/* (do) pad short tx packets */
1468	cbp->rcv_crc_xfer =	0;	/* (don't) xfer CRC to host */
1469	cbp->force_fdx =	0;	/* (don't) force full duplex */
1470	cbp->fdx_pin_en =	1;	/* (enable) FDX# pin */
1471	cbp->multi_ia =		0;	/* (don't) accept multiple IAs */
1472	cbp->mc_all =		sc->all_mcasts;/* accept all multicasts */
1473
1474	/*
1475	 * Start the config command/DMA.
1476	 */
1477	fxp_scb_wait(sc);
1478	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status));
1479	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1480	/* ...and wait for it to complete. */
1481	while (!(cbp->cb_status & FXP_CB_STATUS_C));
1482
1483	/*
1484	 * Now initialize the station address. Temporarily use the TxCB
1485	 * memory area like we did above for the config CB.
1486	 */
1487	cb_ias = (struct fxp_cb_ias *) sc->cbl_base;
1488	cb_ias->cb_status = 0;
1489	cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL;
1490	cb_ias->link_addr = -1;
1491#if defined(__NetBSD__)
1492	bcopy(LLADDR(ifp->if_sadl), (void *)cb_ias->macaddr, 6);
1493#else
1494	bcopy(sc->arpcom.ac_enaddr, (volatile void *)cb_ias->macaddr,
1495	    sizeof(sc->arpcom.ac_enaddr));
1496#endif /* __NetBSD__ */
1497
1498	/*
1499	 * Start the IAS (Individual Address Setup) command/DMA.
1500	 */
1501	fxp_scb_wait(sc);
1502	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1503	/* ...and wait for it to complete. */
1504	while (!(cb_ias->cb_status & FXP_CB_STATUS_C));
1505
1506	/*
1507	 * Initialize transmit control block (TxCB) list.
1508	 */
1509
1510	txp = sc->cbl_base;
1511	bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB);
1512	for (i = 0; i < FXP_NTXCB; i++) {
1513		txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK;
1514		txp[i].cb_command = FXP_CB_COMMAND_NOP;
1515		txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status);
1516		txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]);
1517		txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK];
1518	}
1519	/*
1520	 * Set the suspend flag on the first TxCB and start the control
1521	 * unit. It will execute the NOP and then suspend.
1522	 */
1523	txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S;
1524	sc->cbl_first = sc->cbl_last = txp;
1525	sc->tx_queued = 1;
1526
1527	fxp_scb_wait(sc);
1528	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
1529
1530	/*
1531	 * Initialize receiver buffer area - RFA.
1532	 */
1533	fxp_scb_wait(sc);
1534	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1535	    vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE);
1536	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START);
1537
1538	/*
1539	 * Set current media.
1540	 */
1541	fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media);
1542
1543	ifp->if_flags |= IFF_RUNNING;
1544	ifp->if_flags &= ~IFF_OACTIVE;
1545	splx(s);
1546
1547	/*
1548	 * Start stats updater.
1549	 */
1550	sc->stat_ch = timeout(fxp_stats_update, sc, hz);
1551}
1552
1553static void
1554fxp_set_media(sc, media)
1555	struct fxp_softc *sc;
1556	int media;
1557{
1558
1559	switch (sc->phy_primary_device) {
1560	case FXP_PHY_DP83840:
1561	case FXP_PHY_DP83840A:
1562		fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR,
1563		    fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) |
1564		    FXP_DP83840_PCR_LED4_MODE |	/* LED4 always indicates duplex */
1565		    FXP_DP83840_PCR_F_CONNECT |	/* force link disconnect bypass */
1566		    FXP_DP83840_PCR_BIT10);	/* XXX I have no idea */
1567		/* fall through */
1568	case FXP_PHY_82553A:
1569	case FXP_PHY_82553C: /* untested */
1570	case FXP_PHY_82555:
1571	case FXP_PHY_82555B:
1572		if (IFM_SUBTYPE(media) != IFM_AUTO) {
1573			int flags;
1574
1575			flags = (IFM_SUBTYPE(media) == IFM_100_TX) ?
1576			    FXP_PHY_BMCR_SPEED_100M : 0;
1577			flags |= (media & IFM_FDX) ?
1578			    FXP_PHY_BMCR_FULLDUPLEX : 0;
1579			fxp_mdi_write(sc, sc->phy_primary_addr,
1580			    FXP_PHY_BMCR,
1581			    (fxp_mdi_read(sc, sc->phy_primary_addr,
1582			    FXP_PHY_BMCR) &
1583			    ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M |
1584			     FXP_PHY_BMCR_FULLDUPLEX)) | flags);
1585		} else {
1586			fxp_mdi_write(sc, sc->phy_primary_addr,
1587			    FXP_PHY_BMCR,
1588			    (fxp_mdi_read(sc, sc->phy_primary_addr,
1589			    FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN));
1590		}
1591		break;
1592	/*
1593	 * The Seeq 80c24 doesn't have a PHY programming interface, so do
1594	 * nothing.
1595	 */
1596	case FXP_PHY_80C24:
1597		break;
1598	default:
1599		printf(FXP_FORMAT
1600		    ": warning: unsupported PHY, type = %d, addr = %d\n",
1601		     FXP_ARGS(sc), sc->phy_primary_device,
1602		     sc->phy_primary_addr);
1603	}
1604}
1605
1606/*
1607 * Change media according to request.
1608 */
1609int
1610fxp_mediachange(ifp)
1611	struct ifnet *ifp;
1612{
1613	struct fxp_softc *sc = ifp->if_softc;
1614	struct ifmedia *ifm = &sc->sc_media;
1615
1616	if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
1617		return (EINVAL);
1618
1619	fxp_set_media(sc, ifm->ifm_media);
1620	return (0);
1621}
1622
1623/*
1624 * Notify the world which media we're using.
1625 */
1626void
1627fxp_mediastatus(ifp, ifmr)
1628	struct ifnet *ifp;
1629	struct ifmediareq *ifmr;
1630{
1631	struct fxp_softc *sc = ifp->if_softc;
1632	int flags, stsflags;
1633
1634	switch (sc->phy_primary_device) {
1635	case FXP_PHY_82555:
1636	case FXP_PHY_82555B:
1637	case FXP_PHY_DP83840:
1638	case FXP_PHY_DP83840A:
1639		ifmr->ifm_status = IFM_AVALID; /* IFM_ACTIVE will be valid */
1640		ifmr->ifm_active = IFM_ETHER;
1641		/*
1642		 * the following is not an error.
1643		 * You need to read this register twice to get current
1644		 * status. This is correct documented behaviour, the
1645		 * first read gets latched values.
1646		 */
1647		stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS);
1648		stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS);
1649		if (stsflags & FXP_PHY_STS_LINK_STS)
1650				ifmr->ifm_status |= IFM_ACTIVE;
1651
1652		/*
1653		 * If we are in auto mode, then try report the result.
1654		 */
1655		flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR);
1656		if (flags & FXP_PHY_BMCR_AUTOEN) {
1657			ifmr->ifm_active |= IFM_AUTO; /* XXX presently 0 */
1658			if (stsflags & FXP_PHY_STS_AUTO_DONE) {
1659				/*
1660				 * Intel and National parts report
1661				 * differently on what they found.
1662				 */
1663				if ((sc->phy_primary_device == FXP_PHY_82555)
1664				|| (sc->phy_primary_device == FXP_PHY_82555B)) {
1665					flags = fxp_mdi_read(sc,
1666						sc->phy_primary_addr,
1667						FXP_PHY_USC);
1668
1669					if (flags & FXP_PHY_USC_SPEED)
1670						ifmr->ifm_active |= IFM_100_TX;
1671					else
1672						ifmr->ifm_active |= IFM_10_T;
1673
1674					if (flags & FXP_PHY_USC_DUPLEX)
1675						ifmr->ifm_active |= IFM_FDX;
1676				} else { /* it's National. only know speed  */
1677					flags = fxp_mdi_read(sc,
1678						sc->phy_primary_addr,
1679						FXP_DP83840_PAR);
1680
1681					if (flags & FXP_DP83840_PAR_SPEED_10)
1682						ifmr->ifm_active |= IFM_10_T;
1683					else
1684						ifmr->ifm_active |= IFM_100_TX;
1685				}
1686			}
1687		} else { /* in manual mode.. just report what we were set to */
1688			if (flags & FXP_PHY_BMCR_SPEED_100M)
1689				ifmr->ifm_active |= IFM_100_TX;
1690			else
1691				ifmr->ifm_active |= IFM_10_T;
1692
1693			if (flags & FXP_PHY_BMCR_FULLDUPLEX)
1694				ifmr->ifm_active |= IFM_FDX;
1695		}
1696		break;
1697
1698	case FXP_PHY_80C24:
1699	default:
1700		ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */
1701	}
1702}
1703
1704/*
1705 * Add a buffer to the end of the RFA buffer list.
1706 * Return 0 if successful, 1 for failure. A failure results in
1707 * adding the 'oldm' (if non-NULL) on to the end of the list -
1708 * tossing out its old contents and recycling it.
1709 * The RFA struct is stuck at the beginning of mbuf cluster and the
1710 * data pointer is fixed up to point just past it.
1711 */
1712static int
1713fxp_add_rfabuf(sc, oldm)
1714	struct fxp_softc *sc;
1715	struct mbuf *oldm;
1716{
1717	u_int32_t v;
1718	struct mbuf *m;
1719	struct fxp_rfa *rfa, *p_rfa;
1720
1721	MGETHDR(m, M_DONTWAIT, MT_DATA);
1722	if (m != NULL) {
1723		MCLGET(m, M_DONTWAIT);
1724		if ((m->m_flags & M_EXT) == 0) {
1725			m_freem(m);
1726			if (oldm == NULL)
1727				return 1;
1728			m = oldm;
1729			m->m_data = m->m_ext.ext_buf;
1730		}
1731	} else {
1732		if (oldm == NULL)
1733			return 1;
1734		m = oldm;
1735		m->m_data = m->m_ext.ext_buf;
1736	}
1737
1738	/*
1739	 * Move the data pointer up so that the incoming data packet
1740	 * will be 32-bit aligned.
1741	 */
1742	m->m_data += RFA_ALIGNMENT_FUDGE;
1743
1744	/*
1745	 * Get a pointer to the base of the mbuf cluster and move
1746	 * data start past it.
1747	 */
1748	rfa = mtod(m, struct fxp_rfa *);
1749	m->m_data += sizeof(struct fxp_rfa);
1750	rfa->size = (u_int16_t)(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE);
1751
1752	/*
1753	 * Initialize the rest of the RFA.  Note that since the RFA
1754	 * is misaligned, we cannot store values directly.  Instead,
1755	 * we use an optimized, inline copy.
1756	 */
1757
1758	rfa->rfa_status = 0;
1759	rfa->rfa_control = FXP_RFA_CONTROL_EL;
1760	rfa->actual_size = 0;
1761
1762	v = -1;
1763	fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr);
1764	fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr);
1765
1766	/*
1767	 * If there are other buffers already on the list, attach this
1768	 * one to the end by fixing up the tail to point to this one.
1769	 */
1770	if (sc->rfa_headm != NULL) {
1771		p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf +
1772		    RFA_ALIGNMENT_FUDGE);
1773		sc->rfa_tailm->m_next = m;
1774		v = vtophys(rfa);
1775		fxp_lwcopy(&v, (volatile u_int32_t *) p_rfa->link_addr);
1776		p_rfa->rfa_control = 0;
1777	} else {
1778		sc->rfa_headm = m;
1779	}
1780	sc->rfa_tailm = m;
1781
1782	return (m == oldm);
1783}
1784
1785static volatile int
1786fxp_mdi_read(sc, phy, reg)
1787	struct fxp_softc *sc;
1788	int phy;
1789	int reg;
1790{
1791	int count = 10000;
1792	int value;
1793
1794	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1795	    (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
1796
1797	while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
1798	    && count--)
1799		DELAY(10);
1800
1801	if (count <= 0)
1802		printf(FXP_FORMAT ": fxp_mdi_read: timed out\n",
1803		    FXP_ARGS(sc));
1804
1805	return (value & 0xffff);
1806}
1807
1808static void
1809fxp_mdi_write(sc, phy, reg, value)
1810	struct fxp_softc *sc;
1811	int phy;
1812	int reg;
1813	int value;
1814{
1815	int count = 10000;
1816
1817	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
1818	    (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
1819	    (value & 0xffff));
1820
1821	while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
1822	    count--)
1823		DELAY(10);
1824
1825	if (count <= 0)
1826		printf(FXP_FORMAT ": fxp_mdi_write: timed out\n",
1827		    FXP_ARGS(sc));
1828}
1829
1830static int
1831fxp_ioctl(ifp, command, data)
1832	struct ifnet *ifp;
1833	FXP_IOCTLCMD_TYPE command;
1834	caddr_t data;
1835{
1836	struct fxp_softc *sc = ifp->if_softc;
1837	struct ifreq *ifr = (struct ifreq *)data;
1838	int s, error = 0;
1839
1840	s = splimp();
1841
1842	switch (command) {
1843
1844	case SIOCSIFADDR:
1845#if !defined(__NetBSD__)
1846	case SIOCGIFADDR:
1847	case SIOCSIFMTU:
1848#endif
1849		error = ether_ioctl(ifp, command, data);
1850		break;
1851
1852	case SIOCSIFFLAGS:
1853		sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1854
1855		/*
1856		 * If interface is marked up and not running, then start it.
1857		 * If it is marked down and running, stop it.
1858		 * XXX If it's up then re-initialize it. This is so flags
1859		 * such as IFF_PROMISC are handled.
1860		 */
1861		if (ifp->if_flags & IFF_UP) {
1862			fxp_init(sc);
1863		} else {
1864			if (ifp->if_flags & IFF_RUNNING)
1865				fxp_stop(sc);
1866		}
1867		break;
1868
1869	case SIOCADDMULTI:
1870	case SIOCDELMULTI:
1871		sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0;
1872#if defined(__NetBSD__)
1873		error = (command == SIOCADDMULTI) ?
1874		    ether_addmulti(ifr, &sc->sc_ethercom) :
1875		    ether_delmulti(ifr, &sc->sc_ethercom);
1876
1877		if (error == ENETRESET) {
1878			/*
1879			 * Multicast list has changed; set the hardware
1880			 * filter accordingly.
1881			 */
1882			if (!sc->all_mcasts)
1883				fxp_mc_setup(sc);
1884			/*
1885			 * fxp_mc_setup() can turn on all_mcasts if we run
1886			 * out of space, so check it again rather than else {}.
1887			 */
1888			if (sc->all_mcasts)
1889				fxp_init(sc);
1890			error = 0;
1891		}
1892#else /* __FreeBSD__ */
1893		/*
1894		 * Multicast list has changed; set the hardware filter
1895		 * accordingly.
1896		 */
1897		if (!sc->all_mcasts)
1898			fxp_mc_setup(sc);
1899		/*
1900		 * fxp_mc_setup() can turn on sc->all_mcasts, so check it
1901		 * again rather than else {}.
1902		 */
1903		if (sc->all_mcasts)
1904			fxp_init(sc);
1905		error = 0;
1906#endif /* __NetBSD__ */
1907		break;
1908
1909	case SIOCSIFMEDIA:
1910	case SIOCGIFMEDIA:
1911		error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
1912		break;
1913
1914	default:
1915		error = EINVAL;
1916	}
1917	(void) splx(s);
1918	return (error);
1919}
1920
1921/*
1922 * Program the multicast filter.
1923 *
1924 * We have an artificial restriction that the multicast setup command
1925 * must be the first command in the chain, so we take steps to ensure
1926 * this. By requiring this, it allows us to keep up the performance of
1927 * the pre-initialized command ring (esp. link pointers) by not actually
1928 * inserting the mcsetup command in the ring - i.e. its link pointer
1929 * points to the TxCB ring, but the mcsetup descriptor itself is not part
1930 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
1931 * lead into the regular TxCB ring when it completes.
1932 *
1933 * This function must be called at splimp.
1934 */
1935static void
1936fxp_mc_setup(sc)
1937	struct fxp_softc *sc;
1938{
1939	struct fxp_cb_mcs *mcsp = sc->mcsp;
1940	struct ifnet *ifp = &sc->sc_if;
1941	struct ifmultiaddr *ifma;
1942	int nmcasts;
1943
1944	/*
1945	 * If there are queued commands, we must wait until they are all
1946	 * completed. If we are already waiting, then add a NOP command
1947	 * with interrupt option so that we're notified when all commands
1948	 * have been completed - fxp_start() ensures that no additional
1949	 * TX commands will be added when need_mcsetup is true.
1950	 */
1951	if (sc->tx_queued) {
1952		struct fxp_cb_tx *txp;
1953
1954		/*
1955		 * need_mcsetup will be true if we are already waiting for the
1956		 * NOP command to be completed (see below). In this case, bail.
1957		 */
1958		if (sc->need_mcsetup)
1959			return;
1960		sc->need_mcsetup = 1;
1961
1962		/*
1963		 * Add a NOP command with interrupt so that we are notified when all
1964		 * TX commands have been processed.
1965		 */
1966		txp = sc->cbl_last->next;
1967		txp->mb_head = NULL;
1968		txp->cb_status = 0;
1969		txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
1970		/*
1971		 * Advance the end of list forward.
1972		 */
1973		sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S;
1974		sc->cbl_last = txp;
1975		sc->tx_queued++;
1976		/*
1977		 * Issue a resume in case the CU has just suspended.
1978		 */
1979		fxp_scb_wait(sc);
1980		CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME);
1981		/*
1982		 * Set a 5 second timer just in case we don't hear from the
1983		 * card again.
1984		 */
1985		ifp->if_timer = 5;
1986
1987		return;
1988	}
1989	sc->need_mcsetup = 0;
1990
1991	/*
1992	 * Initialize multicast setup descriptor.
1993	 */
1994	mcsp->next = sc->cbl_base;
1995	mcsp->mb_head = NULL;
1996	mcsp->cb_status = 0;
1997	mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
1998	mcsp->link_addr = vtophys(&sc->cbl_base->cb_status);
1999
2000	nmcasts = 0;
2001	if (!sc->all_mcasts) {
2002		for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL;
2003		    ifma = ifma->ifma_link.le_next) {
2004			if (ifma->ifma_addr->sa_family != AF_LINK)
2005				continue;
2006			if (nmcasts >= MAXMCADDR) {
2007				sc->all_mcasts = 1;
2008				nmcasts = 0;
2009				break;
2010			}
2011			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2012			    (volatile void *) &sc->mcsp->mc_addr[nmcasts][0], 6);
2013			nmcasts++;
2014		}
2015	}
2016	mcsp->mc_cnt = nmcasts * 6;
2017	sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp;
2018	sc->tx_queued = 1;
2019
2020	/*
2021	 * Wait until command unit is not active. This should never
2022	 * be the case when nothing is queued, but make sure anyway.
2023	 */
2024	while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
2025	    FXP_SCB_CUS_ACTIVE) ;
2026
2027	/*
2028	 * Start the multicast setup command.
2029	 */
2030	fxp_scb_wait(sc);
2031	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status));
2032	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START);
2033
2034	ifp->if_timer = 2;
2035	return;
2036}
2037