if_fxp.c revision 158651
1112758Ssam/*-
2112758Ssam * Copyright (c) 1995, David Greenman
3315514Sae * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
4112758Ssam * All rights reserved.
5112758Ssam *
6112758Ssam * Redistribution and use in source and binary forms, with or without
7112758Ssam * modification, are permitted provided that the following conditions
8112758Ssam * are met:
9112758Ssam * 1. Redistributions of source code must retain the above copyright
10112758Ssam *    notice unmodified, this list of conditions, and the following
11112758Ssam *    disclaimer.
12112758Ssam * 2. Redistributions in binary form must reproduce the above copyright
13112758Ssam *    notice, this list of conditions and the following disclaimer in the
14112758Ssam *    documentation and/or other materials provided with the distribution.
15112758Ssam *
16112758Ssam * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17112758Ssam * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18112758Ssam * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19112758Ssam * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20112758Ssam * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21112758Ssam * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22112758Ssam * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23112758Ssam * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24112758Ssam * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25112758Ssam * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26112758Ssam * SUCH DAMAGE.
27112758Ssam *
28112758Ssam */
29105197Ssam
30105197Ssam#include <sys/cdefs.h>
31105197Ssam__FBSDID("$FreeBSD: head/sys/dev/fxp/if_fxp.c 158651 2006-05-16 14:37:58Z phk $");
32105197Ssam
33105197Ssam/*
34105197Ssam * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
35105197Ssam */
36315514Sae
37105197Ssam#ifdef HAVE_KERNEL_OPTION_HEADERS
38105197Ssam#include "opt_device_polling.h"
39105197Ssam#endif
40105197Ssam
41105197Ssam#include <sys/param.h>
42105197Ssam#include <sys/systm.h>
43105197Ssam#include <sys/endian.h>
44105197Ssam#include <sys/mbuf.h>
45291292Sae		/* #include <sys/mutex.h> */
46105197Ssam#include <sys/kernel.h>
47105197Ssam#include <sys/module.h>
48105197Ssam#include <sys/socket.h>
49291292Sae#include <sys/sysctl.h>
50257176Sglebius
51195699Srwatson#include <net/if.h>
52105197Ssam#include <net/if_dl.h>
53105197Ssam#include <net/if_media.h>
54105197Ssam
55105197Ssam#include <net/bpf.h>
56105197Ssam#include <sys/sockio.h>
57105197Ssam#include <sys/bus.h>
58105197Ssam#include <machine/bus.h>
59105197Ssam#include <sys/rman.h>
60105197Ssam#include <machine/resource.h>
61105197Ssam
62105197Ssam#include <net/ethernet.h>
63105197Ssam#include <net/if_arp.h>
64105197Ssam
65105197Ssam
66281692Sae#include <net/if_types.h>
67105197Ssam#include <net/if_vlan_var.h>
68105197Ssam
69105197Ssam#ifdef FXP_IP_CSUM_WAR
70105197Ssam#include <netinet/in.h>
71105197Ssam#include <netinet/in_systm.h>
72315514Sae#include <netinet/ip.h>
73315514Sae#include <machine/in_cksum.h>
74315514Sae#endif
75105197Ssam
76315514Sae#include <dev/pci/pcivar.h>
77315514Sae#include <dev/pci/pcireg.h>		/* for PCIM_CMD_xxx */
78315514Sae
79105197Ssam#include <dev/mii/mii.h>
80105197Ssam#include <dev/mii/miivar.h>
81105197Ssam
82105197Ssam#include <dev/fxp/if_fxpreg.h>
83105197Ssam#include <dev/fxp/if_fxpvar.h>
84105197Ssam#include <dev/fxp/rcvbundl.h>
85105197Ssam
86105197SsamMODULE_DEPEND(fxp, pci, 1, 1, 1);
87105197SsamMODULE_DEPEND(fxp, ether, 1, 1, 1);
88105197SsamMODULE_DEPEND(fxp, miibus, 1, 1, 1);
89105197Ssam#include "miibus_if.h"
90105197Ssam
91105197Ssam/*
92105197Ssam * NOTE!  On the Alpha, we have an alignment constraint.  The
93105197Ssam * card DMAs the packet immediately following the RFA.  However,
94105197Ssam * the first thing in the packet is a 14-byte Ethernet header.
95315514Sae * This means that the packet is misaligned.  To compensate,
96315514Sae * we actually offset the RFA 2 bytes into the cluster.  This
97315514Sae * alignes the packet after the Ethernet header at a 32-bit
98315514Sae * boundary.  HOWEVER!  This means that the RFA is misaligned!
99315514Sae */
100315514Sae#define	RFA_ALIGNMENT_FUDGE	2
101315514Sae
102315514Sae/*
103315514Sae * Set initial transmit threshold at 64 (512 bytes). This is
104315514Sae * increased by 64 (512 bytes) at a time, to maximum of 192
105315514Sae * (1536 bytes), if an underrun occurs.
106315514Sae */
107315514Saestatic int tx_threshold = 64;
108315514Sae
109315514Sae/*
110315514Sae * The configuration byte map has several undefined fields which
111315514Sae * must be one or must be zero.  Set up a template for these bits
112315514Sae * only, (assuming a 82557 chip) leaving the actual configuration
113315514Sae * to fxp_init.
114315514Sae *
115315514Sae * See struct fxp_cb_config for the bit definitions.
116315514Sae */
117315514Saestatic u_char fxp_cb_config_template[] = {
118315514Sae	0x0, 0x0,		/* cb_status */
119315514Sae	0x0, 0x0,		/* cb_command */
120315514Sae	0x0, 0x0, 0x0, 0x0,	/* link_addr */
121315514Sae	0x0,	/*  0 */
122315514Sae	0x0,	/*  1 */
123315514Sae	0x0,	/*  2 */
124315514Sae	0x0,	/*  3 */
125315514Sae	0x0,	/*  4 */
126315514Sae	0x0,	/*  5 */
127315514Sae	0x32,	/*  6 */
128315514Sae	0x0,	/*  7 */
129315514Sae	0x0,	/*  8 */
130315514Sae	0x0,	/*  9 */
131315514Sae	0x6,	/* 10 */
132315514Sae	0x0,	/* 11 */
133315514Sae	0x0,	/* 12 */
134315514Sae	0x0,	/* 13 */
135315514Sae	0xf2,	/* 14 */
136315514Sae	0x48,	/* 15 */
137315514Sae	0x0,	/* 16 */
138315514Sae	0x40,	/* 17 */
139315514Sae	0xf0,	/* 18 */
140315514Sae	0x0,	/* 19 */
141315514Sae	0x3f,	/* 20 */
142315514Sae	0x5	/* 21 */
143315514Sae};
144315514Sae
145315514Saestruct fxp_ident {
146315514Sae	uint16_t	devid;
147315514Sae	int16_t		revid;		/* -1 matches anything */
148315514Sae	char 		*name;
149315514Sae};
150315514Sae
151315514Sae/*
152315514Sae * Claim various Intel PCI device identifiers for this driver.  The
153315514Sae * sub-vendor and sub-device field are extensively used to identify
154315514Sae * particular variants, but we don't currently differentiate between
155315514Sae * them.
156315514Sae */
157315514Saestatic struct fxp_ident fxp_ident_table[] = {
158315514Sae    { 0x1029,	-1,	"Intel 82559 PCI/CardBus Pro/100" },
159315514Sae    { 0x1030,	-1,	"Intel 82559 Pro/100 Ethernet" },
160315514Sae    { 0x1031,	-1,	"Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
161315514Sae    { 0x1032,	-1,	"Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
162315514Sae    { 0x1033,	-1,	"Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
163315514Sae    { 0x1034,	-1,	"Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
164315514Sae    { 0x1035,	-1,	"Intel 82801CAM (ICH3) Pro/100 Ethernet" },
165315514Sae    { 0x1036,	-1,	"Intel 82801CAM (ICH3) Pro/100 Ethernet" },
166315514Sae    { 0x1037,	-1,	"Intel 82801CAM (ICH3) Pro/100 Ethernet" },
167315514Sae    { 0x1038,	-1,	"Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
168315514Sae    { 0x1039,	-1,	"Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
169315514Sae    { 0x103A,	-1,	"Intel 82801DB (ICH4) Pro/100 Ethernet" },
170315514Sae    { 0x103B,	-1,	"Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
171315514Sae    { 0x103C,	-1,	"Intel 82801DB (ICH4) Pro/100 Ethernet" },
172315514Sae    { 0x103D,	-1,	"Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
173315514Sae    { 0x103E,	-1,	"Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
174315514Sae    { 0x1050,	-1,	"Intel 82801BA (D865) Pro/100 VE Ethernet" },
175315514Sae    { 0x1051,	-1,	"Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" },
176315514Sae    { 0x1059,	-1,	"Intel 82551QM Pro/100 M Mobile Connection" },
177315514Sae    { 0x1064,	-1,	"Intel 82562EZ (ICH6)" },
178315514Sae    { 0x1068,	-1,	"Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" },
179315514Sae    { 0x1069,	-1,	"Intel 82562EM/EX/GX Pro/100 Ethernet" },
180315514Sae    { 0x1092,	-1,	"Intel Pro/100 VE Network Connection" },
181315514Sae    { 0x1209,	-1,	"Intel 82559ER Embedded 10/100 Ethernet" },
182315514Sae    { 0x1229,	0x01,	"Intel 82557 Pro/100 Ethernet" },
183315514Sae    { 0x1229,	0x02,	"Intel 82557 Pro/100 Ethernet" },
184322741Sae    { 0x1229,	0x03,	"Intel 82557 Pro/100 Ethernet" },
185322741Sae    { 0x1229,	0x04,	"Intel 82558 Pro/100 Ethernet" },
186315514Sae    { 0x1229,	0x05,	"Intel 82558 Pro/100 Ethernet" },
187315514Sae    { 0x1229,	0x06,	"Intel 82559 Pro/100 Ethernet" },
188315514Sae    { 0x1229,	0x07,	"Intel 82559 Pro/100 Ethernet" },
189315514Sae    { 0x1229,	0x08,	"Intel 82559 Pro/100 Ethernet" },
190315514Sae    { 0x1229,	0x09,	"Intel 82559ER Pro/100 Ethernet" },
191315514Sae    { 0x1229,	0x0c,	"Intel 82550 Pro/100 Ethernet" },
192315514Sae    { 0x1229,	0x0d,	"Intel 82550 Pro/100 Ethernet" },
193315514Sae    { 0x1229,	0x0e,	"Intel 82550 Pro/100 Ethernet" },
194315514Sae    { 0x1229,	0x0f,	"Intel 82551 Pro/100 Ethernet" },
195315514Sae    { 0x1229,	0x10,	"Intel 82551 Pro/100 Ethernet" },
196315514Sae    { 0x1229,	-1,	"Intel 82557/8/9 Pro/100 Ethernet" },
197315514Sae    { 0x2449,	-1,	"Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
198315514Sae    { 0x27dc,	-1,	"Intel 82801GB (ICH7) 10/100 Ethernet" },
199315514Sae    { 0,	-1,	NULL },
200315514Sae};
201315514Sae
202315514Sae#ifdef FXP_IP_CSUM_WAR
203315514Sae#define FXP_CSUM_FEATURES    (CSUM_IP | CSUM_TCP | CSUM_UDP)
204315514Sae#else
205315514Sae#define FXP_CSUM_FEATURES    (CSUM_TCP | CSUM_UDP)
206315514Sae#endif
207315514Sae
208315514Saestatic int		fxp_probe(device_t dev);
209315514Saestatic int		fxp_attach(device_t dev);
210315514Saestatic int		fxp_detach(device_t dev);
211315514Saestatic int		fxp_shutdown(device_t dev);
212315514Saestatic int		fxp_suspend(device_t dev);
213315514Saestatic int		fxp_resume(device_t dev);
214315514Sae
215322741Saestatic void		fxp_intr(void *xsc);
216315514Saestatic void		fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp,
217315514Sae			    uint8_t statack, int count);
218315514Saestatic void 		fxp_init(void *xsc);
219315514Saestatic void 		fxp_init_body(struct fxp_softc *sc);
220315514Saestatic void 		fxp_tick(void *xsc);
221315514Saestatic void 		fxp_start(struct ifnet *ifp);
222315514Saestatic void 		fxp_start_body(struct ifnet *ifp);
223315514Saestatic int		fxp_encap(struct fxp_softc *sc, struct mbuf *m_head);
224315514Saestatic void		fxp_stop(struct fxp_softc *sc);
225315514Saestatic void 		fxp_release(struct fxp_softc *sc);
226315514Saestatic int		fxp_ioctl(struct ifnet *ifp, u_long command,
227315514Sae			    caddr_t data);
228315514Saestatic void 		fxp_watchdog(struct ifnet *ifp);
229315514Saestatic int		fxp_add_rfabuf(struct fxp_softc *sc,
230315514Sae    			    struct fxp_rx *rxp);
231315514Saestatic int		fxp_mc_addrs(struct fxp_softc *sc);
232315514Saestatic void		fxp_mc_setup(struct fxp_softc *sc);
233319599Saestatic uint16_t		fxp_eeprom_getword(struct fxp_softc *sc, int offset,
234319599Sae			    int autosize);
235319599Saestatic void 		fxp_eeprom_putword(struct fxp_softc *sc, int offset,
236315514Sae			    uint16_t data);
237315514Saestatic void		fxp_autosize_eeprom(struct fxp_softc *sc);
238315514Saestatic void		fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
239322741Sae			    int offset, int words);
240315514Saestatic void		fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
241315514Sae			    int offset, int words);
242322741Saestatic int		fxp_ifmedia_upd(struct ifnet *ifp);
243315514Saestatic void		fxp_ifmedia_sts(struct ifnet *ifp,
244315514Sae			    struct ifmediareq *ifmr);
245315514Saestatic int		fxp_serial_ifmedia_upd(struct ifnet *ifp);
246315514Saestatic void		fxp_serial_ifmedia_sts(struct ifnet *ifp,
247315514Sae			    struct ifmediareq *ifmr);
248315514Saestatic volatile int	fxp_miibus_readreg(device_t dev, int phy, int reg);
249315514Saestatic void		fxp_miibus_writereg(device_t dev, int phy, int reg,
250315514Sae			    int value);
251315514Saestatic void		fxp_load_ucode(struct fxp_softc *sc);
252315514Saestatic int		sysctl_int_range(SYSCTL_HANDLER_ARGS,
253315514Sae			    int low, int high);
254315514Saestatic int		sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
255315514Saestatic int		sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
256315514Saestatic void 		fxp_scb_wait(struct fxp_softc *sc);
257315514Saestatic void		fxp_scb_cmd(struct fxp_softc *sc, int cmd);
258315514Saestatic void		fxp_dma_wait(struct fxp_softc *sc,
259315514Sae    			    volatile uint16_t *status, bus_dma_tag_t dmat,
260315514Sae			    bus_dmamap_t map);
261315514Sae
262315514Saestatic device_method_t fxp_methods[] = {
263315514Sae	/* Device interface */
264315514Sae	DEVMETHOD(device_probe,		fxp_probe),
265315514Sae	DEVMETHOD(device_attach,	fxp_attach),
266315514Sae	DEVMETHOD(device_detach,	fxp_detach),
267315514Sae	DEVMETHOD(device_shutdown,	fxp_shutdown),
268315514Sae	DEVMETHOD(device_suspend,	fxp_suspend),
269315514Sae	DEVMETHOD(device_resume,	fxp_resume),
270315514Sae
271315514Sae	/* MII interface */
272315514Sae	DEVMETHOD(miibus_readreg,	fxp_miibus_readreg),
273315514Sae	DEVMETHOD(miibus_writereg,	fxp_miibus_writereg),
274315514Sae
275315514Sae	{ 0, 0 }
276315514Sae};
277315514Sae
278315514Saestatic driver_t fxp_driver = {
279315514Sae	"fxp",
280315514Sae	fxp_methods,
281315514Sae	sizeof(struct fxp_softc),
282315514Sae};
283315514Sae
284315514Saestatic devclass_t fxp_devclass;
285315514Sae
286315514SaeDRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0);
287315514SaeDRIVER_MODULE(fxp, cardbus, fxp_driver, fxp_devclass, 0, 0);
288315514SaeDRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
289315514Sae
290322741Saestatic struct resource_spec fxp_res_spec_mem[] = {
291315514Sae	{ SYS_RES_MEMORY,	FXP_PCI_MMBA,	RF_ACTIVE },
292315514Sae	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
293315514Sae	{ -1, 0 }
294315514Sae};
295315514Sae
296315514Saestatic struct resource_spec fxp_res_spec_io[] = {
297315514Sae	{ SYS_RES_IOPORT,	FXP_PCI_IOBA,	RF_ACTIVE },
298315514Sae	{ SYS_RES_IRQ,		0,		RF_ACTIVE | RF_SHAREABLE },
299315514Sae	{ -1, 0 }
300322966Sae};
301315514Sae
302315514Sae/*
303315514Sae * Wait for the previous command to be accepted (but not necessarily
304315514Sae * completed).
305315514Sae */
306315514Saestatic void
307315514Saefxp_scb_wait(struct fxp_softc *sc)
308315514Sae{
309315514Sae	union {
310315514Sae		uint16_t w;
311315514Sae		uint8_t b[2];
312315514Sae	} flowctl;
313315514Sae	int i = 10000;
314315514Sae
315315514Sae	while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
316315514Sae		DELAY(2);
317315514Sae	if (i == 0) {
318315514Sae		flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL);
319315514Sae		flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL + 1);
320315514Sae		device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
321315514Sae		    CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
322315514Sae		    CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
323315514Sae		    CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w);
324315514Sae	}
325315514Sae}
326315514Sae
327315514Saestatic void
328315514Saefxp_scb_cmd(struct fxp_softc *sc, int cmd)
329315514Sae{
330315514Sae
331315514Sae	if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
332315514Sae		CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
333315514Sae		fxp_scb_wait(sc);
334194062Svanhu	}
335315514Sae	CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
336315514Sae}
337315514Sae
338315514Saestatic void
339315514Saefxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status,
340315514Sae    bus_dma_tag_t dmat, bus_dmamap_t map)
341315514Sae{
342315514Sae	int i = 10000;
343315514Sae
344315514Sae	bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
345315514Sae	while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) {
346315514Sae		DELAY(2);
347315514Sae		bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
348315514Sae	}
349315514Sae	if (i == 0)
350194062Svanhu		device_printf(sc->dev, "DMA timeout\n");
351315514Sae}
352315514Sae
353315514Sae/*
354315514Sae * Return identification string if this device is ours.
355315514Sae */
356105197Ssamstatic int
357315514Saefxp_probe(device_t dev)
358105197Ssam{
359315514Sae	uint16_t devid;
360315514Sae	uint8_t revid;
361315514Sae	struct fxp_ident *ident;
362315514Sae
363315514Sae	if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
364315514Sae		devid = pci_get_device(dev);
365315514Sae		revid = pci_get_revid(dev);
366315514Sae		for (ident = fxp_ident_table; ident->name != NULL; ident++) {
367315514Sae			if (ident->devid == devid &&
368315514Sae			    (ident->revid == revid || ident->revid == -1)) {
369315514Sae				device_set_desc(dev, ident->name);
370315514Sae				return (BUS_PROBE_DEFAULT);
371315514Sae			}
372315514Sae		}
373315514Sae	}
374315514Sae	return (ENXIO);
375315514Sae}
376315514Sae
377315514Saestatic void
378315514Saefxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
379315514Sae{
380315514Sae	uint32_t *addr;
381315514Sae
382315514Sae	if (error)
383315514Sae		return;
384315514Sae
385315514Sae	KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
386315514Sae	addr = arg;
387315514Sae	*addr = segs->ds_addr;
388315514Sae}
389315514Sae
390315514Saestatic int
391315514Saefxp_attach(device_t dev)
392315514Sae{
393315514Sae	struct fxp_softc *sc;
394315514Sae	struct fxp_cb_tx *tcbp;
395315514Sae	struct fxp_tx *txp;
396315514Sae	struct fxp_rx *rxp;
397315514Sae	struct ifnet *ifp;
398315514Sae	uint32_t val;
399315514Sae	uint16_t data, myea[ETHER_ADDR_LEN / 2];
400315514Sae	u_char eaddr[ETHER_ADDR_LEN];
401315514Sae	int i, prefer_iomap;
402315514Sae	int error;
403315514Sae
404315514Sae	error = 0;
405315514Sae	sc = device_get_softc(dev);
406315514Sae	sc->dev = dev;
407315514Sae	mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
408315514Sae	    MTX_DEF);
409315514Sae	callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0);
410315514Sae	ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
411315514Sae	    fxp_serial_ifmedia_sts);
412315514Sae
413105197Ssam	ifp = sc->ifp = if_alloc(IFT_ETHER);
414315514Sae	if (ifp == NULL) {
415315514Sae		device_printf(dev, "can not if_alloc()\n");
416315514Sae		error = ENOSPC;
417315514Sae		goto fail;
418315514Sae	}
419315514Sae
420315514Sae	/*
421315514Sae	 * Enable bus mastering.
422315514Sae	 */
423315514Sae	pci_enable_busmaster(dev);
424315514Sae	val = pci_read_config(dev, PCIR_COMMAND, 2);
425315514Sae
426315514Sae	/*
427315514Sae	 * Figure out which we should try first - memory mapping or i/o mapping?
428315514Sae	 * We default to memory mapping. Then we accept an override from the
429315514Sae	 * command line. Then we check to see which one is enabled.
430315514Sae	 */
431315514Sae	prefer_iomap = 0;
432315514Sae	resource_int_value(device_get_name(dev), device_get_unit(dev),
433315514Sae	    "prefer_iomap", &prefer_iomap);
434315514Sae	if (prefer_iomap)
435315514Sae		sc->fxp_spec = fxp_res_spec_io;
436315514Sae	else
437315514Sae		sc->fxp_spec = fxp_res_spec_mem;
438315514Sae
439315514Sae	error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
440315514Sae	if (error) {
441315514Sae		if (sc->fxp_spec == fxp_res_spec_mem)
442315514Sae			sc->fxp_spec = fxp_res_spec_io;
443315514Sae		else
444315514Sae			sc->fxp_spec = fxp_res_spec_mem;
445315514Sae		error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
446315514Sae	}
447315514Sae	if (error) {
448315514Sae		device_printf(dev, "could not allocate resources\n");
449315514Sae		error = ENXIO;
450315514Sae		goto fail;
451315514Sae	}
452315514Sae
453315514Sae	if (bootverbose) {
454315514Sae		device_printf(dev, "using %s space register mapping\n",
455315514Sae		   sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O");
456315514Sae	}
457315514Sae
458315514Sae	/*
459315514Sae	 * Reset to a stable state.
460315514Sae	 */
461315514Sae	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
462315514Sae	DELAY(10);
463315514Sae
464315514Sae	/*
465315514Sae	 * Find out how large of an SEEPROM we have.
466315514Sae	 */
467315514Sae	fxp_autosize_eeprom(sc);
468315514Sae
469315514Sae	/*
470315514Sae	 * Find out the chip revision; lump all 82557 revs together.
471315514Sae	 */
472315514Sae	fxp_read_eeprom(sc, &data, 5, 1);
473315514Sae	if ((data >> 8) == 1)
474315514Sae		sc->revision = FXP_REV_82557;
475315514Sae	else
476315514Sae		sc->revision = pci_get_revid(dev);
477315514Sae
478315514Sae	/*
479315514Sae	 * Determine whether we must use the 503 serial interface.
480315514Sae	 */
481315514Sae	fxp_read_eeprom(sc, &data, 6, 1);
482315514Sae	if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0
483315514Sae	    && (data & FXP_PHY_SERIAL_ONLY))
484315514Sae		sc->flags |= FXP_FLAG_SERIAL_MEDIA;
485315514Sae
486315514Sae	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
487315514Sae	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
488315514Sae	    OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW,
489315514Sae	    &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
490315514Sae	    "FXP driver receive interrupt microcode bundling delay");
491315514Sae	SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
492315514Sae	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
493315514Sae	    OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW,
494315514Sae	    &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
495315514Sae	    "FXP driver receive interrupt microcode bundle size limit");
496322741Sae	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
497322741Sae	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
498315514Sae	    OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0,
499315514Sae	    "FXP RNR events");
500315514Sae	SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
501315514Sae	    SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
502315514Sae	    OID_AUTO, "noflow", CTLFLAG_RW, &sc->tunable_noflow, 0,
503315514Sae	    "FXP flow control disabled");
504315514Sae
505315514Sae	/*
506315514Sae	 * Pull in device tunables.
507315514Sae	 */
508315514Sae	sc->tunable_int_delay = TUNABLE_INT_DELAY;
509315514Sae	sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
510315514Sae	sc->tunable_noflow = 1;
511315514Sae	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
512315514Sae	    "int_delay", &sc->tunable_int_delay);
513315514Sae	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
514315514Sae	    "bundle_max", &sc->tunable_bundle_max);
515315514Sae	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
516315514Sae	    "noflow", &sc->tunable_noflow);
517315514Sae	sc->rnr = 0;
518315514Sae
519315514Sae	/*
520322741Sae	 * Enable workarounds for certain chip revision deficiencies.
521315514Sae	 *
522315514Sae	 * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
523315514Sae	 * some systems based a normal 82559 design, have a defect where
524315514Sae	 * the chip can cause a PCI protocol violation if it receives
525315514Sae	 * a CU_RESUME command when it is entering the IDLE state.  The
526315514Sae	 * workaround is to disable Dynamic Standby Mode, so the chip never
527315514Sae	 * deasserts CLKRUN#, and always remains in an active state.
528315514Sae	 *
529315514Sae	 * See Intel 82801BA/82801BAM Specification Update, Errata #30.
530315514Sae	 */
531315514Sae	i = pci_get_device(dev);
532315514Sae	if (i == 0x2449 || (i > 0x1030 && i < 0x1039) ||
533315514Sae	    sc->revision >= FXP_REV_82559_A0) {
534315514Sae		fxp_read_eeprom(sc, &data, 10, 1);
535315514Sae		if (data & 0x02) {			/* STB enable */
536315514Sae			uint16_t cksum;
537315514Sae			int i;
538315514Sae
539315514Sae			device_printf(dev,
540319599Sae			    "Disabling dynamic standby mode in EEPROM\n");
541319599Sae			data &= ~0x02;
542319599Sae			fxp_write_eeprom(sc, &data, 10, 1);
543315514Sae			device_printf(dev, "New EEPROM ID: 0x%x\n", data);
544315514Sae			cksum = 0;
545315514Sae			for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
546322741Sae				fxp_read_eeprom(sc, &data, i, 1);
547315514Sae				cksum += data;
548315514Sae			}
549322741Sae			i = (1 << sc->eeprom_size) - 1;
550315514Sae			cksum = 0xBABA - cksum;
551315514Sae			fxp_read_eeprom(sc, &data, i, 1);
552315514Sae			fxp_write_eeprom(sc, &cksum, i, 1);
553315514Sae			device_printf(dev,
554315514Sae			    "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
555315514Sae			    i, data, cksum);
556315514Sae#if 1
557315514Sae			/*
558315514Sae			 * If the user elects to continue, try the software
559315514Sae			 * workaround, as it is better than nothing.
560315514Sae			 */
561315514Sae			sc->flags |= FXP_FLAG_CU_RESUME_BUG;
562315514Sae#endif
563315514Sae		}
564315514Sae	}
565315514Sae
566315514Sae	/*
567315514Sae	 * If we are not a 82557 chip, we can enable extended features.
568315514Sae	 */
569315514Sae	if (sc->revision != FXP_REV_82557) {
570315514Sae		/*
571315514Sae		 * If MWI is enabled in the PCI configuration, and there
572315514Sae		 * is a valid cacheline size (8 or 16 dwords), then tell
573315514Sae		 * the board to turn on MWI.
574315514Sae		 */
575315514Sae		if (val & PCIM_CMD_MWRICEN &&
576315514Sae		    pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
577315514Sae			sc->flags |= FXP_FLAG_MWI_ENABLE;
578315514Sae
579315514Sae		/* turn on the extended TxCB feature */
580315514Sae		sc->flags |= FXP_FLAG_EXT_TXCB;
581315514Sae
582315514Sae		/* enable reception of long frames for VLAN */
583315514Sae		sc->flags |= FXP_FLAG_LONG_PKT_EN;
584315514Sae	} else {
585315514Sae		/* a hack to get long VLAN frames on a 82557 */
586315514Sae		sc->flags |= FXP_FLAG_SAVE_BAD;
587315514Sae	}
588315514Sae
589315514Sae	/*
590315514Sae	 * Enable use of extended RFDs and TCBs for 82550
591315514Sae	 * and later chips. Note: we need extended TXCB support
592322741Sae	 * too, but that's already enabled by the code above.
593315514Sae	 * Be careful to do this only on the right devices.
594315514Sae	 */
595315514Sae	if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C ||
596315514Sae	    sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F
597315514Sae	    || sc->revision == FXP_REV_82551_10) {
598315514Sae		sc->rfa_size = sizeof (struct fxp_rfa);
599315514Sae		sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
600315514Sae		sc->flags |= FXP_FLAG_EXT_RFA;
601315514Sae	} else {
602322966Sae		sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
603315514Sae		sc->tx_cmd = FXP_CB_COMMAND_XMIT;
604315514Sae	}
605315514Sae
606315514Sae	/*
607315514Sae	 * Allocate DMA tags and DMA safe memory.
608315514Sae	 */
609315514Sae	sc->maxtxseg = FXP_NTXSEG;
610315514Sae	if (sc->flags & FXP_FLAG_EXT_RFA)
611315514Sae		sc->maxtxseg--;
612315514Sae	error = bus_dma_tag_create(NULL, 2, 0, BUS_SPACE_MAXADDR_32BIT,
613315514Sae	    BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * sc->maxtxseg,
614315514Sae	    sc->maxtxseg, MCLBYTES, 0, busdma_lock_mutex, &Giant,
615315514Sae	    &sc->fxp_mtag);
616315514Sae	if (error) {
617315514Sae		device_printf(dev, "could not allocate dma tag\n");
618315514Sae		goto fail;
619315514Sae	}
620315514Sae
621315514Sae	error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
622315514Sae	    BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1,
623315514Sae	    sizeof(struct fxp_stats), 0, busdma_lock_mutex, &Giant,
624315514Sae	    &sc->fxp_stag);
625315514Sae	if (error) {
626315514Sae		device_printf(dev, "could not allocate dma tag\n");
627315514Sae		goto fail;
628315514Sae	}
629315514Sae
630315514Sae	error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
631315514Sae	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap);
632315514Sae	if (error)
633315514Sae		goto fail;
634315514Sae	error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
635315514Sae	    sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0);
636315514Sae	if (error) {
637315514Sae		device_printf(dev, "could not map the stats buffer\n");
638315514Sae		goto fail;
639315514Sae	}
640315514Sae
641315514Sae	error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
642315514Sae	    BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1,
643315514Sae	    FXP_TXCB_SZ, 0, busdma_lock_mutex, &Giant, &sc->cbl_tag);
644315514Sae	if (error) {
645315514Sae		device_printf(dev, "could not allocate dma tag\n");
646315514Sae		goto fail;
647315514Sae	}
648315514Sae
649315514Sae	error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
650315514Sae	    BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map);
651315514Sae	if (error)
652315514Sae		goto fail;
653315514Sae
654315514Sae	error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
655315514Sae	    sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
656315514Sae	    &sc->fxp_desc.cbl_addr, 0);
657315514Sae	if (error) {
658315514Sae		device_printf(dev, "could not map DMA memory\n");
659315514Sae		goto fail;
660315514Sae	}
661315514Sae
662315514Sae	error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
663315514Sae	    BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1,
664315514Sae	    sizeof(struct fxp_cb_mcs), 0, busdma_lock_mutex, &Giant,
665315514Sae	    &sc->mcs_tag);
666315514Sae	if (error) {
667315514Sae		device_printf(dev, "could not allocate dma tag\n");
668315514Sae		goto fail;
669315514Sae	}
670315514Sae
671315514Sae	error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
672315514Sae	    BUS_DMA_NOWAIT, &sc->mcs_map);
673315514Sae	if (error)
674315514Sae		goto fail;
675315514Sae	error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
676315514Sae	    sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0);
677315514Sae	if (error) {
678315514Sae		device_printf(dev, "can't map the multicast setup command\n");
679315514Sae		goto fail;
680315514Sae	}
681315514Sae
682315514Sae	/*
683315514Sae	 * Pre-allocate the TX DMA maps and setup the pointers to
684315514Sae	 * the TX command blocks.
685315514Sae	 */
686315514Sae	txp = sc->fxp_desc.tx_list;
687315514Sae	tcbp = sc->fxp_desc.cbl_list;
688315514Sae	for (i = 0; i < FXP_NTXCB; i++) {
689315514Sae		txp[i].tx_cb = tcbp + i;
690105197Ssam		error = bus_dmamap_create(sc->fxp_mtag, 0, &txp[i].tx_map);
691315514Sae		if (error) {
692105197Ssam			device_printf(dev, "can't create DMA map for TX\n");
693105197Ssam			goto fail;
694105197Ssam		}
695105197Ssam	}
696105197Ssam	error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map);
697105197Ssam	if (error) {
698105197Ssam		device_printf(dev, "can't create spare DMA map\n");
699105197Ssam		goto fail;
700105197Ssam	}
701105197Ssam
702105197Ssam	/*
703105197Ssam	 * Pre-allocate our receive buffers.
704105197Ssam	 */
705105197Ssam	sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
706105197Ssam	for (i = 0; i < FXP_NRFABUFS; i++) {
707105197Ssam		rxp = &sc->fxp_desc.rx_list[i];
708105197Ssam		error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map);
709105197Ssam		if (error) {
710105197Ssam			device_printf(dev, "can't create DMA map for RX\n");
711105197Ssam			goto fail;
712105197Ssam		}
713105197Ssam		if (fxp_add_rfabuf(sc, rxp) != 0) {
714105197Ssam			error = ENOMEM;
715105197Ssam			goto fail;
716105197Ssam		}
717105197Ssam	}
718105197Ssam
719120585Ssam	/*
720105197Ssam	 * Read MAC address.
721105197Ssam	 */
722105197Ssam	fxp_read_eeprom(sc, myea, 0, 3);
723105197Ssam	eaddr[0] = myea[0] & 0xff;
724105197Ssam	eaddr[1] = myea[0] >> 8;
725105197Ssam	eaddr[2] = myea[1] & 0xff;
726315514Sae	eaddr[3] = myea[1] >> 8;
727105197Ssam	eaddr[4] = myea[2] & 0xff;
728315514Sae	eaddr[5] = myea[2] >> 8;
729105197Ssam	if (bootverbose) {
730120585Ssam		device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
731105197Ssam		    pci_get_vendor(dev), pci_get_device(dev),
732105197Ssam		    pci_get_subvendor(dev), pci_get_subdevice(dev),
733105197Ssam		    pci_get_revid(dev));
734105197Ssam		fxp_read_eeprom(sc, &data, 10, 1);
735315514Sae		device_printf(dev, "Dynamic Standby mode is %s\n",
736315514Sae		    data & 0x02 ? "enabled" : "disabled");
737315514Sae	}
738315514Sae
739315514Sae	/*
740105197Ssam	 * If this is only a 10Mbps device, then there is no MII, and
741105197Ssam	 * the PHY will use a serial interface instead.
742286095Seri	 *
743286095Seri	 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
744105197Ssam	 * doesn't have a programming interface of any sort.  The
745105197Ssam	 * media is sensed automatically based on how the link partner
746105197Ssam	 * is configured.  This is, in essence, manual configuration.
747105197Ssam	 */
748105197Ssam	if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
749105197Ssam		ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
750105197Ssam		ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
751315514Sae	} else {
752221129Sbz		if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd,
753221129Sbz		    fxp_ifmedia_sts)) {
754221129Sbz	                device_printf(dev, "MII without any PHY!\n");
755315514Sae			error = ENXIO;
756274467Sae			goto fail;
757322741Sae		}
758221129Sbz	}
759221129Sbz
760221129Sbz	if_initname(ifp, device_get_name(dev), device_get_unit(dev));
761221129Sbz	ifp->if_init = fxp_init;
762315514Sae	ifp->if_softc = sc;
763274467Sae	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
764322741Sae	ifp->if_ioctl = fxp_ioctl;
765221129Sbz	ifp->if_start = fxp_start;
766221129Sbz	ifp->if_watchdog = fxp_watchdog;
767221129Sbz
768221129Sbz	ifp->if_capabilities = ifp->if_capenable = 0;
769221129Sbz
770315514Sae	/* Enable checksum offload for 82550 or better chips */
771221129Sbz	if (sc->flags & FXP_FLAG_EXT_RFA) {
772221129Sbz		ifp->if_hwassist = FXP_CSUM_FEATURES;
773105197Ssam		ifp->if_capabilities |= IFCAP_HWCSUM;
774105197Ssam		ifp->if_capenable |= IFCAP_HWCSUM;
775315514Sae	}
776315514Sae
777105197Ssam#ifdef DEVICE_POLLING
778315514Sae	/* Inform the world we support polling. */
779315514Sae	ifp->if_capabilities |= IFCAP_POLLING;
780315514Sae#endif
781315514Sae
782315514Sae	/*
783315514Sae	 * Attach the interface.
784315514Sae	 */
785315514Sae	ether_ifattach(ifp, eaddr);
786315514Sae
787105197Ssam	/*
788282139Sae	 * Tell the upper layer(s) we support long frames.
789105197Ssam	 * Must appear after the call to ether_ifattach() because
790105197Ssam	 * ether_ifattach() sets ifi_hdrlen to the default value.
791105197Ssam	 */
792105197Ssam	ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
793315514Sae	ifp->if_capabilities |= IFCAP_VLAN_MTU;
794105197Ssam	ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
795105197Ssam
796105197Ssam	/*
797105197Ssam	 * Let the system queue as many packets as we have available
798315514Sae	 * TX descriptors.
799105197Ssam	 */
800105197Ssam	IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1);
801105197Ssam	ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1;
802105197Ssam	IFQ_SET_READY(&ifp->if_snd);
803105197Ssam
804105197Ssam	/*
805315514Sae	 * Hook our interrupt after all initialization is complete.
806315514Sae	 */
807315514Sae	error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE,
808105197Ssam			       fxp_intr, sc, &sc->ih);
809105197Ssam	if (error) {
810105197Ssam		device_printf(dev, "could not setup irq\n");
811315514Sae		ether_ifdetach(sc->ifp);
812315514Sae		goto fail;
813315514Sae	}
814315514Sae
815315514Saefail:
816315514Sae	if (error)
817315514Sae		fxp_release(sc);
818315514Sae	return (error);
819315514Sae}
820315514Sae
821315514Sae/*
822315514Sae * Release all resources.  The softc lock should not be held and the
823315514Sae * interrupt should already be torn down.
824315514Sae */
825315514Saestatic void
826315514Saefxp_release(struct fxp_softc *sc)
827315514Sae{
828315514Sae	struct fxp_rx *rxp;
829315514Sae	struct fxp_tx *txp;
830315514Sae	int i;
831105197Ssam
832315514Sae	FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
833105197Ssam	KASSERT(sc->ih == NULL,
834315514Sae	    ("fxp_release() called with intr handle still active"));
835315514Sae	if (sc->miibus)
836315514Sae		device_delete_child(sc->dev, sc->miibus);
837315514Sae	bus_generic_detach(sc->dev);
838315514Sae	ifmedia_removeall(&sc->sc_media);
839315514Sae	if (sc->fxp_desc.cbl_list) {
840315514Sae		bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
841315514Sae		bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
842105197Ssam		    sc->cbl_map);
843315514Sae	}
844315514Sae	if (sc->fxp_stats) {
845315514Sae		bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
846315514Sae		bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
847105197Ssam	}
848315514Sae	if (sc->mcsp) {
849315514Sae		bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
850315514Sae		bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
851315514Sae	}
852315514Sae	bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res);
853315514Sae	if (sc->fxp_mtag) {
854315514Sae		for (i = 0; i < FXP_NRFABUFS; i++) {
855105197Ssam			rxp = &sc->fxp_desc.rx_list[i];
856105197Ssam			if (rxp->rx_mbuf != NULL) {
857281692Sae				bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
858281692Sae				    BUS_DMASYNC_POSTREAD);
859281692Sae				bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
860281692Sae				m_freem(rxp->rx_mbuf);
861281692Sae			}
862281692Sae			bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
863281692Sae		}
864281692Sae		bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
865281692Sae		for (i = 0; i < FXP_NTXCB; i++) {
866281692Sae			txp = &sc->fxp_desc.tx_list[i];
867281692Sae			if (txp->tx_mbuf != NULL) {
868281692Sae				bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
869105197Ssam				    BUS_DMASYNC_POSTWRITE);
870281692Sae				bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
871281692Sae				m_freem(txp->tx_mbuf);
872281692Sae			}
873281692Sae			bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
874281692Sae		}
875281692Sae		bus_dma_tag_destroy(sc->fxp_mtag);
876281692Sae	}
877281692Sae	if (sc->fxp_stag)
878281692Sae		bus_dma_tag_destroy(sc->fxp_stag);
879281692Sae	if (sc->cbl_tag)
880281692Sae		bus_dma_tag_destroy(sc->cbl_tag);
881281692Sae	if (sc->mcs_tag)
882297014Sae		bus_dma_tag_destroy(sc->mcs_tag);
883281692Sae	if (sc->ifp)
884281692Sae		if_free(sc->ifp);
885281692Sae
886281692Sae	mtx_destroy(&sc->sc_mtx);
887281692Sae}
888281692Sae
889281692Sae/*
890281692Sae * Detach interface.
891281692Sae */
892281692Saestatic int
893281692Saefxp_detach(device_t dev)
894281692Sae{
895281692Sae	struct fxp_softc *sc = device_get_softc(dev);
896281692Sae
897281692Sae#ifdef DEVICE_POLLING
898281692Sae	if (sc->ifp->if_capenable & IFCAP_POLLING)
899281692Sae		ether_poll_deregister(sc->ifp);
900281692Sae#endif
901281692Sae
902281692Sae	FXP_LOCK(sc);
903281692Sae	sc->suspended = 1;	/* Do same thing as we do for suspend */
904281692Sae	/*
905281692Sae	 * Stop DMA and drop transmit queue, but disable interrupts first.
906281692Sae	 */
907281692Sae	CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
908315514Sae	fxp_stop(sc);
909281692Sae	FXP_UNLOCK(sc);
910281692Sae	callout_drain(&sc->stat_ch);
911281692Sae
912281692Sae	/*
913281692Sae	 * Close down routes etc.
914281692Sae	 */
915281692Sae	ether_ifdetach(sc->ifp);
916281692Sae
917281692Sae	/*
918281692Sae	 * Unhook interrupt before dropping lock. This is to prevent
919281692Sae	 * races with fxp_intr().
920281692Sae	 */
921281692Sae	bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih);
922281692Sae	sc->ih = NULL;
923281692Sae
924281692Sae	/* Release our allocated resources. */
925281692Sae	fxp_release(sc);
926281692Sae	return (0);
927281692Sae}
928281692Sae
929281692Sae/*
930281692Sae * Device shutdown routine. Called at system shutdown after sync. The
931315514Sae * main purpose of this routine is to shut off receiver DMA so that
932281692Sae * kernel memory doesn't get clobbered during warmboot.
933281692Sae */
934281692Saestatic int
935281692Saefxp_shutdown(device_t dev)
936281692Sae{
937281692Sae	struct fxp_softc *sc = device_get_softc(dev);
938281692Sae
939281692Sae	/*
940281693Sae	 * Make sure that DMA is disabled prior to reboot. Not doing
941281693Sae	 * do could allow DMA to corrupt kernel memory during the
942281693Sae	 * reboot before the driver initializes.
943281693Sae	 */
944281692Sae	FXP_LOCK(sc);
945281693Sae	fxp_stop(sc);
946281693Sae	FXP_UNLOCK(sc);
947281693Sae	return (0);
948281692Sae}
949281692Sae
950281692Sae/*
951281692Sae * Device suspend routine.  Stop the interface and save some PCI
952281692Sae * settings in case the BIOS doesn't restore them properly on
953281692Sae * resume.
954281692Sae */
955281692Saestatic int
956315514Saefxp_suspend(device_t dev)
957281692Sae{
958281692Sae	struct fxp_softc *sc = device_get_softc(dev);
959281692Sae
960	FXP_LOCK(sc);
961
962	fxp_stop(sc);
963
964	sc->suspended = 1;
965
966	FXP_UNLOCK(sc);
967	return (0);
968}
969
970/*
971 * Device resume routine. re-enable busmastering, and restart the interface if
972 * appropriate.
973 */
974static int
975fxp_resume(device_t dev)
976{
977	struct fxp_softc *sc = device_get_softc(dev);
978	struct ifnet *ifp = sc->ifp;
979
980	FXP_LOCK(sc);
981
982	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
983	DELAY(10);
984
985	/* reinitialize interface if necessary */
986	if (ifp->if_flags & IFF_UP)
987		fxp_init_body(sc);
988
989	sc->suspended = 0;
990
991	FXP_UNLOCK(sc);
992	return (0);
993}
994
995static void
996fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
997{
998	uint16_t reg;
999	int x;
1000
1001	/*
1002	 * Shift in data.
1003	 */
1004	for (x = 1 << (length - 1); x; x >>= 1) {
1005		if (data & x)
1006			reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1007		else
1008			reg = FXP_EEPROM_EECS;
1009		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1010		DELAY(1);
1011		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1012		DELAY(1);
1013		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1014		DELAY(1);
1015	}
1016}
1017
1018/*
1019 * Read from the serial EEPROM. Basically, you manually shift in
1020 * the read opcode (one bit at a time) and then shift in the address,
1021 * and then you shift out the data (all of this one bit at a time).
1022 * The word size is 16 bits, so you have to provide the address for
1023 * every 16 bits of data.
1024 */
1025static uint16_t
1026fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
1027{
1028	uint16_t reg, data;
1029	int x;
1030
1031	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1032	/*
1033	 * Shift in read opcode.
1034	 */
1035	fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
1036	/*
1037	 * Shift in address.
1038	 */
1039	data = 0;
1040	for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
1041		if (offset & x)
1042			reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1043		else
1044			reg = FXP_EEPROM_EECS;
1045		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1046		DELAY(1);
1047		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1048		DELAY(1);
1049		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1050		DELAY(1);
1051		reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
1052		data++;
1053		if (autosize && reg == 0) {
1054			sc->eeprom_size = data;
1055			break;
1056		}
1057	}
1058	/*
1059	 * Shift out data.
1060	 */
1061	data = 0;
1062	reg = FXP_EEPROM_EECS;
1063	for (x = 1 << 15; x; x >>= 1) {
1064		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1065		DELAY(1);
1066		if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1067			data |= x;
1068		CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1069		DELAY(1);
1070	}
1071	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1072	DELAY(1);
1073
1074	return (data);
1075}
1076
1077static void
1078fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data)
1079{
1080	int i;
1081
1082	/*
1083	 * Erase/write enable.
1084	 */
1085	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1086	fxp_eeprom_shiftin(sc, 0x4, 3);
1087	fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
1088	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1089	DELAY(1);
1090	/*
1091	 * Shift in write opcode, address, data.
1092	 */
1093	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1094	fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
1095	fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
1096	fxp_eeprom_shiftin(sc, data, 16);
1097	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1098	DELAY(1);
1099	/*
1100	 * Wait for EEPROM to finish up.
1101	 */
1102	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1103	DELAY(1);
1104	for (i = 0; i < 1000; i++) {
1105		if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1106			break;
1107		DELAY(50);
1108	}
1109	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1110	DELAY(1);
1111	/*
1112	 * Erase/write disable.
1113	 */
1114	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1115	fxp_eeprom_shiftin(sc, 0x4, 3);
1116	fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
1117	CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1118	DELAY(1);
1119}
1120
1121/*
1122 * From NetBSD:
1123 *
1124 * Figure out EEPROM size.
1125 *
1126 * 559's can have either 64-word or 256-word EEPROMs, the 558
1127 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
1128 * talks about the existance of 16 to 256 word EEPROMs.
1129 *
1130 * The only known sizes are 64 and 256, where the 256 version is used
1131 * by CardBus cards to store CIS information.
1132 *
1133 * The address is shifted in msb-to-lsb, and after the last
1134 * address-bit the EEPROM is supposed to output a `dummy zero' bit,
1135 * after which follows the actual data. We try to detect this zero, by
1136 * probing the data-out bit in the EEPROM control register just after
1137 * having shifted in a bit. If the bit is zero, we assume we've
1138 * shifted enough address bits. The data-out should be tri-state,
1139 * before this, which should translate to a logical one.
1140 */
1141static void
1142fxp_autosize_eeprom(struct fxp_softc *sc)
1143{
1144
1145	/* guess maximum size of 256 words */
1146	sc->eeprom_size = 8;
1147
1148	/* autosize */
1149	(void) fxp_eeprom_getword(sc, 0, 1);
1150}
1151
1152static void
1153fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1154{
1155	int i;
1156
1157	for (i = 0; i < words; i++)
1158		data[i] = fxp_eeprom_getword(sc, offset + i, 0);
1159}
1160
1161static void
1162fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1163{
1164	int i;
1165
1166	for (i = 0; i < words; i++)
1167		fxp_eeprom_putword(sc, offset + i, data[i]);
1168}
1169
1170/*
1171 * Grab the softc lock and call the real fxp_start_body() routine
1172 */
1173static void
1174fxp_start(struct ifnet *ifp)
1175{
1176	struct fxp_softc *sc = ifp->if_softc;
1177
1178	FXP_LOCK(sc);
1179	fxp_start_body(ifp);
1180	FXP_UNLOCK(sc);
1181}
1182
1183/*
1184 * Start packet transmission on the interface.
1185 * This routine must be called with the softc lock held, and is an
1186 * internal entry point only.
1187 */
1188static void
1189fxp_start_body(struct ifnet *ifp)
1190{
1191	struct fxp_softc *sc = ifp->if_softc;
1192	struct mbuf *mb_head;
1193	int error, txqueued;
1194
1195	FXP_LOCK_ASSERT(sc, MA_OWNED);
1196
1197	/*
1198	 * See if we need to suspend xmit until the multicast filter
1199	 * has been reprogrammed (which can only be done at the head
1200	 * of the command chain).
1201	 */
1202	if (sc->need_mcsetup)
1203		return;
1204
1205	/*
1206	 * We're finished if there is nothing more to add to the list or if
1207	 * we're all filled up with buffers to transmit.
1208	 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
1209	 *       a NOP command when needed.
1210	 */
1211	txqueued = 0;
1212	while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1213	    sc->tx_queued < FXP_NTXCB - 1) {
1214
1215		/*
1216		 * Grab a packet to transmit.
1217		 */
1218		IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
1219		if (mb_head == NULL)
1220			break;
1221
1222		error = fxp_encap(sc, mb_head);
1223		if (error)
1224			break;
1225		txqueued = 1;
1226	}
1227	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1228
1229	/*
1230	 * We're finished. If we added to the list, issue a RESUME to get DMA
1231	 * going again if suspended.
1232	 */
1233	if (txqueued) {
1234		fxp_scb_wait(sc);
1235		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
1236	}
1237}
1238
1239static int
1240fxp_encap(struct fxp_softc *sc, struct mbuf *m_head)
1241{
1242	struct ifnet *ifp;
1243	struct mbuf *m;
1244	struct fxp_tx *txp;
1245	struct fxp_cb_tx *cbp;
1246	bus_dma_segment_t segs[FXP_NTXSEG];
1247	int chainlen, error, i, nseg;
1248
1249	FXP_LOCK_ASSERT(sc, MA_OWNED);
1250	ifp = sc->ifp;
1251
1252	/*
1253	 * Get pointer to next available tx desc.
1254	 */
1255	txp = sc->fxp_desc.tx_last->tx_next;
1256
1257	/*
1258	 * A note in Appendix B of the Intel 8255x 10/100 Mbps
1259	 * Ethernet Controller Family Open Source Software
1260	 * Developer Manual says:
1261	 *   Using software parsing is only allowed with legal
1262	 *   TCP/IP or UDP/IP packets.
1263	 *   ...
1264	 *   For all other datagrams, hardware parsing must
1265	 *   be used.
1266	 * Software parsing appears to truncate ICMP and
1267	 * fragmented UDP packets that contain one to three
1268	 * bytes in the second (and final) mbuf of the packet.
1269	 */
1270	if (sc->flags & FXP_FLAG_EXT_RFA)
1271		txp->tx_cb->ipcb_ip_activation_high =
1272		    FXP_IPCB_HARDWAREPARSING_ENABLE;
1273
1274	/*
1275	 * Deal with TCP/IP checksum offload. Note that
1276	 * in order for TCP checksum offload to work,
1277	 * the pseudo header checksum must have already
1278	 * been computed and stored in the checksum field
1279	 * in the TCP header. The stack should have
1280	 * already done this for us.
1281	 */
1282	if (m_head->m_pkthdr.csum_flags) {
1283		if (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
1284			txp->tx_cb->ipcb_ip_schedule =
1285			    FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
1286			if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
1287				txp->tx_cb->ipcb_ip_schedule |=
1288				    FXP_IPCB_TCP_PACKET;
1289		}
1290
1291#ifdef FXP_IP_CSUM_WAR
1292		/*
1293		 * XXX The 82550 chip appears to have trouble
1294		 * dealing with IP header checksums in very small
1295		 * datagrams, namely fragments from 1 to 3 bytes
1296		 * in size. For example, say you want to transmit
1297		 * a UDP packet of 1473 bytes. The packet will be
1298		 * fragmented over two IP datagrams, the latter
1299		 * containing only one byte of data. The 82550 will
1300		 * botch the header checksum on the 1-byte fragment.
1301		 * As long as the datagram contains 4 or more bytes
1302		 * of data, you're ok.
1303		 *
1304                 * The following code attempts to work around this
1305		 * problem: if the datagram is less than 38 bytes
1306		 * in size (14 bytes ether header, 20 bytes IP header,
1307		 * plus 4 bytes of data), we punt and compute the IP
1308		 * header checksum by hand. This workaround doesn't
1309		 * work very well, however, since it can be fooled
1310		 * by things like VLAN tags and IP options that make
1311		 * the header sizes/offsets vary.
1312		 */
1313
1314		if (m_head->m_pkthdr.csum_flags & CSUM_IP) {
1315			if (m_head->m_pkthdr.len < 38) {
1316				struct ip *ip;
1317				m_head->m_data += ETHER_HDR_LEN;
1318				ip = mtod(mb_head, struct ip *);
1319				ip->ip_sum = in_cksum(mb_head, ip->ip_hl << 2);
1320				m_head->m_data -= ETHER_HDR_LEN;
1321			} else {
1322				txp->tx_cb->ipcb_ip_activation_high =
1323				    FXP_IPCB_HARDWAREPARSING_ENABLE;
1324				txp->tx_cb->ipcb_ip_schedule |=
1325				    FXP_IPCB_IP_CHECKSUM_ENABLE;
1326			}
1327		}
1328#endif
1329	}
1330
1331	chainlen = 0;
1332	for (m = m_head; m != NULL && chainlen <= sc->maxtxseg; m = m->m_next)
1333		chainlen++;
1334	if (chainlen > sc->maxtxseg) {
1335		struct mbuf *mn;
1336
1337		/*
1338		 * We ran out of segments. We have to recopy this
1339		 * mbuf chain first. Bail out if we can't get the
1340		 * new buffers.
1341		 */
1342		mn = m_defrag(m_head, M_DONTWAIT);
1343		if (mn == NULL) {
1344			m_freem(m_head);
1345			return (-1);
1346		} else {
1347			m_head = mn;
1348		}
1349	}
1350
1351	/*
1352	 * Go through each of the mbufs in the chain and initialize
1353	 * the transmit buffer descriptors with the physical address
1354	 * and size of the mbuf.
1355	 */
1356	error = bus_dmamap_load_mbuf_sg(sc->fxp_mtag, txp->tx_map,
1357	    m_head, segs, &nseg, 0);
1358	if (error) {
1359		device_printf(sc->dev, "can't map mbuf (error %d)\n", error);
1360		m_freem(m_head);
1361		return (-1);
1362	}
1363
1364	KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments"));
1365
1366	cbp = txp->tx_cb;
1367	for (i = 0; i < nseg; i++) {
1368		KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
1369		/*
1370		 * If this is an 82550/82551, then we're using extended
1371		 * TxCBs _and_ we're using checksum offload. This means
1372		 * that the TxCB is really an IPCB. One major difference
1373		 * between the two is that with plain extended TxCBs,
1374		 * the bottom half of the TxCB contains two entries from
1375		 * the TBD array, whereas IPCBs contain just one entry:
1376		 * one entry (8 bytes) has been sacrificed for the TCP/IP
1377		 * checksum offload control bits. So to make things work
1378		 * right, we have to start filling in the TBD array
1379		 * starting from a different place depending on whether
1380		 * the chip is an 82550/82551 or not.
1381		 */
1382		if (sc->flags & FXP_FLAG_EXT_RFA) {
1383			cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr);
1384			cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len);
1385		} else {
1386			cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
1387			cbp->tbd[i].tb_size = htole32(segs[i].ds_len);
1388		}
1389	}
1390	cbp->tbd_number = nseg;
1391
1392	bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_PREWRITE);
1393	txp->tx_mbuf = m_head;
1394	txp->tx_cb->cb_status = 0;
1395	txp->tx_cb->byte_count = 0;
1396	if (sc->tx_queued != FXP_CXINT_THRESH - 1) {
1397		txp->tx_cb->cb_command =
1398		    htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
1399		    FXP_CB_COMMAND_S);
1400	} else {
1401		txp->tx_cb->cb_command =
1402		    htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
1403		    FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
1404		/*
1405		 * Set a 5 second timer just in case we don't hear
1406		 * from the card again.
1407		 */
1408		ifp->if_timer = 5;
1409	}
1410	txp->tx_cb->tx_threshold = tx_threshold;
1411
1412	/*
1413	 * Advance the end of list forward.
1414	 */
1415
1416#ifdef __alpha__
1417	/*
1418	 * On platforms which can't access memory in 16-bit
1419	 * granularities, we must prevent the card from DMA'ing
1420	 * up the status while we update the command field.
1421	 * This could cause us to overwrite the completion status.
1422	 * XXX This is probably bogus and we're _not_ looking
1423	 * for atomicity here.
1424	 */
1425	atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command,
1426	    htole16(FXP_CB_COMMAND_S));
1427#else
1428	sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S);
1429#endif /*__alpha__*/
1430	sc->fxp_desc.tx_last = txp;
1431
1432	/*
1433	 * Advance the beginning of the list forward if there are
1434	 * no other packets queued (when nothing is queued, tx_first
1435	 * sits on the last TxCB that was sent out).
1436	 */
1437	if (sc->tx_queued == 0)
1438		sc->fxp_desc.tx_first = txp;
1439
1440	sc->tx_queued++;
1441
1442	/*
1443	 * Pass packet to bpf if there is a listener.
1444	 */
1445	BPF_MTAP(ifp, m_head);
1446	return (0);
1447}
1448
1449#ifdef DEVICE_POLLING
1450static poll_handler_t fxp_poll;
1451
1452static void
1453fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1454{
1455	struct fxp_softc *sc = ifp->if_softc;
1456	uint8_t statack;
1457
1458	FXP_LOCK(sc);
1459	if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1460		FXP_UNLOCK(sc);
1461		return;
1462	}
1463
1464	statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
1465	    FXP_SCB_STATACK_FR;
1466	if (cmd == POLL_AND_CHECK_STATUS) {
1467		uint8_t tmp;
1468
1469		tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
1470		if (tmp == 0xff || tmp == 0) {
1471			FXP_UNLOCK(sc);
1472			return; /* nothing to do */
1473		}
1474		tmp &= ~statack;
1475		/* ack what we can */
1476		if (tmp != 0)
1477			CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
1478		statack |= tmp;
1479	}
1480	fxp_intr_body(sc, ifp, statack, count);
1481	FXP_UNLOCK(sc);
1482}
1483#endif /* DEVICE_POLLING */
1484
1485/*
1486 * Process interface interrupts.
1487 */
1488static void
1489fxp_intr(void *xsc)
1490{
1491	struct fxp_softc *sc = xsc;
1492	struct ifnet *ifp = sc->ifp;
1493	uint8_t statack;
1494
1495	FXP_LOCK(sc);
1496	if (sc->suspended) {
1497		FXP_UNLOCK(sc);
1498		return;
1499	}
1500
1501#ifdef DEVICE_POLLING
1502	if (ifp->if_capenable & IFCAP_POLLING) {
1503		FXP_UNLOCK(sc);
1504		return;
1505	}
1506#endif
1507	while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
1508		/*
1509		 * It should not be possible to have all bits set; the
1510		 * FXP_SCB_INTR_SWI bit always returns 0 on a read.  If
1511		 * all bits are set, this may indicate that the card has
1512		 * been physically ejected, so ignore it.
1513		 */
1514		if (statack == 0xff) {
1515			FXP_UNLOCK(sc);
1516			return;
1517		}
1518
1519		/*
1520		 * First ACK all the interrupts in this pass.
1521		 */
1522		CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
1523		fxp_intr_body(sc, ifp, statack, -1);
1524	}
1525	FXP_UNLOCK(sc);
1526}
1527
1528static void
1529fxp_txeof(struct fxp_softc *sc)
1530{
1531	struct fxp_tx *txp;
1532
1533	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD);
1534	for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
1535	    (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0;
1536	    txp = txp->tx_next) {
1537		if (txp->tx_mbuf != NULL) {
1538			bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
1539			    BUS_DMASYNC_POSTWRITE);
1540			bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
1541			m_freem(txp->tx_mbuf);
1542			txp->tx_mbuf = NULL;
1543			/* clear this to reset csum offload bits */
1544			txp->tx_cb->tbd[0].tb_addr = 0;
1545		}
1546		sc->tx_queued--;
1547	}
1548	sc->fxp_desc.tx_first = txp;
1549	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1550}
1551
1552static void
1553fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack,
1554    int count)
1555{
1556	struct mbuf *m;
1557	struct fxp_rx *rxp;
1558	struct fxp_rfa *rfa;
1559	int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
1560	int fxp_rc = 0;
1561
1562	FXP_LOCK_ASSERT(sc, MA_OWNED);
1563	if (rnr)
1564		sc->rnr++;
1565#ifdef DEVICE_POLLING
1566	/* Pick up a deferred RNR condition if `count' ran out last time. */
1567	if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
1568		sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
1569		rnr = 1;
1570	}
1571#endif
1572
1573	/*
1574	 * Free any finished transmit mbuf chains.
1575	 *
1576	 * Handle the CNA event likt a CXTNO event. It used to
1577	 * be that this event (control unit not ready) was not
1578	 * encountered, but it is now with the SMPng modifications.
1579	 * The exact sequence of events that occur when the interface
1580	 * is brought up are different now, and if this event
1581	 * goes unhandled, the configuration/rxfilter setup sequence
1582	 * can stall for several seconds. The result is that no
1583	 * packets go out onto the wire for about 5 to 10 seconds
1584	 * after the interface is ifconfig'ed for the first time.
1585	 */
1586	if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) {
1587		fxp_txeof(sc);
1588
1589		ifp->if_timer = 0;
1590		if (sc->tx_queued == 0) {
1591			if (sc->need_mcsetup)
1592				fxp_mc_setup(sc);
1593		}
1594		/*
1595		 * Try to start more packets transmitting.
1596		 */
1597		if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1598			fxp_start_body(ifp);
1599	}
1600
1601	/*
1602	 * Just return if nothing happened on the receive side.
1603	 */
1604	if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
1605		return;
1606
1607	/*
1608	 * Process receiver interrupts. If a no-resource (RNR)
1609	 * condition exists, get whatever packets we can and
1610	 * re-start the receiver.
1611	 *
1612	 * When using polling, we do not process the list to completion,
1613	 * so when we get an RNR interrupt we must defer the restart
1614	 * until we hit the last buffer with the C bit set.
1615	 * If we run out of cycles and rfa_headm has the C bit set,
1616	 * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
1617	 * that the info will be used in the subsequent polling cycle.
1618	 */
1619	for (;;) {
1620		rxp = sc->fxp_desc.rx_head;
1621		m = rxp->rx_mbuf;
1622		rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
1623		    RFA_ALIGNMENT_FUDGE);
1624		bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
1625		    BUS_DMASYNC_POSTREAD);
1626
1627#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
1628		if (count >= 0 && count-- == 0) {
1629			if (rnr) {
1630				/* Defer RNR processing until the next time. */
1631				sc->flags |= FXP_FLAG_DEFERRED_RNR;
1632				rnr = 0;
1633			}
1634			break;
1635		}
1636#endif /* DEVICE_POLLING */
1637
1638		if ((le16toh(rfa->rfa_status) & FXP_RFA_STATUS_C) == 0)
1639			break;
1640
1641		/*
1642		 * Advance head forward.
1643		 */
1644		sc->fxp_desc.rx_head = rxp->rx_next;
1645
1646		/*
1647		 * Add a new buffer to the receive chain.
1648		 * If this fails, the old buffer is recycled
1649		 * instead.
1650		 */
1651		fxp_rc = fxp_add_rfabuf(sc, rxp);
1652		if (fxp_rc == 0) {
1653			int total_len;
1654
1655			/*
1656			 * Fetch packet length (the top 2 bits of
1657			 * actual_size are flags set by the controller
1658			 * upon completion), and drop the packet in case
1659			 * of bogus length or CRC errors.
1660			 */
1661			total_len = le16toh(rfa->actual_size) & 0x3fff;
1662			if (total_len < sizeof(struct ether_header) ||
1663			    total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE -
1664				sc->rfa_size ||
1665			    le16toh(rfa->rfa_status) & FXP_RFA_STATUS_CRC) {
1666				m_freem(m);
1667				continue;
1668			}
1669
1670                        /* Do IP checksum checking. */
1671			if (le16toh(rfa->rfa_status) & FXP_RFA_STATUS_PARSE) {
1672				if (rfa->rfax_csum_sts &
1673				    FXP_RFDX_CS_IP_CSUM_BIT_VALID)
1674					m->m_pkthdr.csum_flags |=
1675					    CSUM_IP_CHECKED;
1676				if (rfa->rfax_csum_sts &
1677				    FXP_RFDX_CS_IP_CSUM_VALID)
1678					m->m_pkthdr.csum_flags |=
1679					    CSUM_IP_VALID;
1680				if ((rfa->rfax_csum_sts &
1681				    FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
1682				    (rfa->rfax_csum_sts &
1683				    FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
1684					m->m_pkthdr.csum_flags |=
1685					    CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1686					m->m_pkthdr.csum_data = 0xffff;
1687				}
1688			}
1689
1690			m->m_pkthdr.len = m->m_len = total_len;
1691			m->m_pkthdr.rcvif = ifp;
1692
1693			/*
1694			 * Drop locks before calling if_input() since it
1695			 * may re-enter fxp_start() in the netisr case.
1696			 * This would result in a lock reversal.  Better
1697			 * performance might be obtained by chaining all
1698			 * packets received, dropping the lock, and then
1699			 * calling if_input() on each one.
1700			 */
1701			FXP_UNLOCK(sc);
1702			(*ifp->if_input)(ifp, m);
1703			FXP_LOCK(sc);
1704		} else if (fxp_rc == ENOBUFS) {
1705			rnr = 0;
1706			break;
1707		}
1708	}
1709	if (rnr) {
1710		fxp_scb_wait(sc);
1711		CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1712		    sc->fxp_desc.rx_head->rx_addr);
1713		fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
1714	}
1715}
1716
1717/*
1718 * Update packet in/out/collision statistics. The i82557 doesn't
1719 * allow you to access these counters without doing a fairly
1720 * expensive DMA to get _all_ of the statistics it maintains, so
1721 * we do this operation here only once per second. The statistics
1722 * counters in the kernel are updated from the previous dump-stats
1723 * DMA and then a new dump-stats DMA is started. The on-chip
1724 * counters are zeroed when the DMA completes. If we can't start
1725 * the DMA immediately, we don't wait - we just prepare to read
1726 * them again next time.
1727 */
1728static void
1729fxp_tick(void *xsc)
1730{
1731	struct fxp_softc *sc = xsc;
1732	struct ifnet *ifp = sc->ifp;
1733	struct fxp_stats *sp = sc->fxp_stats;
1734
1735	FXP_LOCK_ASSERT(sc, MA_OWNED);
1736	bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD);
1737	ifp->if_opackets += le32toh(sp->tx_good);
1738	ifp->if_collisions += le32toh(sp->tx_total_collisions);
1739	if (sp->rx_good) {
1740		ifp->if_ipackets += le32toh(sp->rx_good);
1741		sc->rx_idle_secs = 0;
1742	} else {
1743		/*
1744		 * Receiver's been idle for another second.
1745		 */
1746		sc->rx_idle_secs++;
1747	}
1748	ifp->if_ierrors +=
1749	    le32toh(sp->rx_crc_errors) +
1750	    le32toh(sp->rx_alignment_errors) +
1751	    le32toh(sp->rx_rnr_errors) +
1752	    le32toh(sp->rx_overrun_errors);
1753	/*
1754	 * If any transmit underruns occured, bump up the transmit
1755	 * threshold by another 512 bytes (64 * 8).
1756	 */
1757	if (sp->tx_underruns) {
1758		ifp->if_oerrors += le32toh(sp->tx_underruns);
1759		if (tx_threshold < 192)
1760			tx_threshold += 64;
1761	}
1762
1763	/*
1764	 * Release any xmit buffers that have completed DMA. This isn't
1765	 * strictly necessary to do here, but it's advantagous for mbufs
1766	 * with external storage to be released in a timely manner rather
1767	 * than being defered for a potentially long time. This limits
1768	 * the delay to a maximum of one second.
1769	 */
1770	fxp_txeof(sc);
1771
1772	/*
1773	 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
1774	 * then assume the receiver has locked up and attempt to clear
1775	 * the condition by reprogramming the multicast filter. This is
1776	 * a work-around for a bug in the 82557 where the receiver locks
1777	 * up if it gets certain types of garbage in the syncronization
1778	 * bits prior to the packet header. This bug is supposed to only
1779	 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
1780	 * mode as well (perhaps due to a 10/100 speed transition).
1781	 */
1782	if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
1783		sc->rx_idle_secs = 0;
1784		fxp_mc_setup(sc);
1785	}
1786	/*
1787	 * If there is no pending command, start another stats
1788	 * dump. Otherwise punt for now.
1789	 */
1790	if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
1791		/*
1792		 * Start another stats dump.
1793		 */
1794		bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
1795		    BUS_DMASYNC_PREREAD);
1796		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
1797	} else {
1798		/*
1799		 * A previous command is still waiting to be accepted.
1800		 * Just zero our copy of the stats and wait for the
1801		 * next timer event to update them.
1802		 */
1803		sp->tx_good = 0;
1804		sp->tx_underruns = 0;
1805		sp->tx_total_collisions = 0;
1806
1807		sp->rx_good = 0;
1808		sp->rx_crc_errors = 0;
1809		sp->rx_alignment_errors = 0;
1810		sp->rx_rnr_errors = 0;
1811		sp->rx_overrun_errors = 0;
1812	}
1813	if (sc->miibus != NULL)
1814		mii_tick(device_get_softc(sc->miibus));
1815
1816	/*
1817	 * Schedule another timeout one second from now.
1818	 */
1819	callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
1820}
1821
1822/*
1823 * Stop the interface. Cancels the statistics updater and resets
1824 * the interface.
1825 */
1826static void
1827fxp_stop(struct fxp_softc *sc)
1828{
1829	struct ifnet *ifp = sc->ifp;
1830	struct fxp_tx *txp;
1831	int i;
1832
1833	ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1834	ifp->if_timer = 0;
1835
1836	/*
1837	 * Cancel stats updater.
1838	 */
1839	callout_stop(&sc->stat_ch);
1840
1841	/*
1842	 * Issue software reset, which also unloads the microcode.
1843	 */
1844	sc->flags &= ~FXP_FLAG_UCODE;
1845	CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
1846	DELAY(50);
1847
1848	/*
1849	 * Release any xmit buffers.
1850	 */
1851	txp = sc->fxp_desc.tx_list;
1852	if (txp != NULL) {
1853		for (i = 0; i < FXP_NTXCB; i++) {
1854 			if (txp[i].tx_mbuf != NULL) {
1855				bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map,
1856				    BUS_DMASYNC_POSTWRITE);
1857				bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map);
1858				m_freem(txp[i].tx_mbuf);
1859				txp[i].tx_mbuf = NULL;
1860				/* clear this to reset csum offload bits */
1861				txp[i].tx_cb->tbd[0].tb_addr = 0;
1862			}
1863		}
1864	}
1865	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1866	sc->tx_queued = 0;
1867}
1868
1869/*
1870 * Watchdog/transmission transmit timeout handler. Called when a
1871 * transmission is started on the interface, but no interrupt is
1872 * received before the timeout. This usually indicates that the
1873 * card has wedged for some reason.
1874 */
1875static void
1876fxp_watchdog(struct ifnet *ifp)
1877{
1878	struct fxp_softc *sc = ifp->if_softc;
1879
1880	FXP_LOCK(sc);
1881	device_printf(sc->dev, "device timeout\n");
1882	ifp->if_oerrors++;
1883
1884	fxp_init_body(sc);
1885	FXP_UNLOCK(sc);
1886}
1887
1888/*
1889 * Acquire locks and then call the real initialization function.  This
1890 * is necessary because ether_ioctl() calls if_init() and this would
1891 * result in mutex recursion if the mutex was held.
1892 */
1893static void
1894fxp_init(void *xsc)
1895{
1896	struct fxp_softc *sc = xsc;
1897
1898	FXP_LOCK(sc);
1899	fxp_init_body(sc);
1900	FXP_UNLOCK(sc);
1901}
1902
1903/*
1904 * Perform device initialization. This routine must be called with the
1905 * softc lock held.
1906 */
1907static void
1908fxp_init_body(struct fxp_softc *sc)
1909{
1910	struct ifnet *ifp = sc->ifp;
1911	struct fxp_cb_config *cbp;
1912	struct fxp_cb_ias *cb_ias;
1913	struct fxp_cb_tx *tcbp;
1914	struct fxp_tx *txp;
1915	struct fxp_cb_mcs *mcsp;
1916	int i, prm;
1917
1918	FXP_LOCK_ASSERT(sc, MA_OWNED);
1919	/*
1920	 * Cancel any pending I/O
1921	 */
1922	fxp_stop(sc);
1923
1924	prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1925
1926	/*
1927	 * Initialize base of CBL and RFA memory. Loading with zero
1928	 * sets it up for regular linear addressing.
1929	 */
1930	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1931	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
1932
1933	fxp_scb_wait(sc);
1934	fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
1935
1936	/*
1937	 * Initialize base of dump-stats buffer.
1938	 */
1939	fxp_scb_wait(sc);
1940	bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD);
1941	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
1942	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
1943
1944	/*
1945	 * Attempt to load microcode if requested.
1946	 */
1947	if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0)
1948		fxp_load_ucode(sc);
1949
1950	/*
1951	 * Initialize the multicast address list.
1952	 */
1953	if (fxp_mc_addrs(sc)) {
1954		mcsp = sc->mcsp;
1955		mcsp->cb_status = 0;
1956		mcsp->cb_command =
1957		    htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
1958		mcsp->link_addr = 0xffffffff;
1959		/*
1960	 	 * Start the multicast setup command.
1961		 */
1962		fxp_scb_wait(sc);
1963		bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
1964		CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
1965		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1966		/* ...and wait for it to complete. */
1967		fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map);
1968		bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
1969		    BUS_DMASYNC_POSTWRITE);
1970	}
1971
1972	/*
1973	 * We temporarily use memory that contains the TxCB list to
1974	 * construct the config CB. The TxCB list memory is rebuilt
1975	 * later.
1976	 */
1977	cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
1978
1979	/*
1980	 * This bcopy is kind of disgusting, but there are a bunch of must be
1981	 * zero and must be one bits in this structure and this is the easiest
1982	 * way to initialize them all to proper values.
1983	 */
1984	bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
1985
1986	cbp->cb_status =	0;
1987	cbp->cb_command =	htole16(FXP_CB_COMMAND_CONFIG |
1988	    FXP_CB_COMMAND_EL);
1989	cbp->link_addr =	0xffffffff;	/* (no) next command */
1990	cbp->byte_count =	sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
1991	cbp->rx_fifo_limit =	8;	/* rx fifo threshold (32 bytes) */
1992	cbp->tx_fifo_limit =	0;	/* tx fifo threshold (0 bytes) */
1993	cbp->adaptive_ifs =	0;	/* (no) adaptive interframe spacing */
1994	cbp->mwi_enable =	sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
1995	cbp->type_enable =	0;	/* actually reserved */
1996	cbp->read_align_en =	sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
1997	cbp->end_wr_on_cl =	sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
1998	cbp->rx_dma_bytecount =	0;	/* (no) rx DMA max */
1999	cbp->tx_dma_bytecount =	0;	/* (no) tx DMA max */
2000	cbp->dma_mbce =		0;	/* (disable) dma max counters */
2001	cbp->late_scb =		0;	/* (don't) defer SCB update */
2002	cbp->direct_dma_dis =	1;	/* disable direct rcv dma mode */
2003	cbp->tno_int_or_tco_en =0;	/* (disable) tx not okay interrupt */
2004	cbp->ci_int =		1;	/* interrupt on CU idle */
2005	cbp->ext_txcb_dis = 	sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
2006	cbp->ext_stats_dis = 	1;	/* disable extended counters */
2007	cbp->keep_overrun_rx = 	0;	/* don't pass overrun frames to host */
2008	cbp->save_bf =		sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm;
2009	cbp->disc_short_rx =	!prm;	/* discard short packets */
2010	cbp->underrun_retry =	1;	/* retry mode (once) on DMA underrun */
2011	cbp->two_frames =	0;	/* do not limit FIFO to 2 frames */
2012	cbp->dyn_tbd =		0;	/* (no) dynamic TBD mode */
2013	cbp->ext_rfa =		sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
2014	cbp->mediatype =	sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
2015	cbp->csma_dis =		0;	/* (don't) disable link */
2016	cbp->tcp_udp_cksum =	0;	/* (don't) enable checksum */
2017	cbp->vlan_tco =		0;	/* (don't) enable vlan wakeup */
2018	cbp->link_wake_en =	0;	/* (don't) assert PME# on link change */
2019	cbp->arp_wake_en =	0;	/* (don't) assert PME# on arp */
2020	cbp->mc_wake_en =	0;	/* (don't) enable PME# on mcmatch */
2021	cbp->nsai =		1;	/* (don't) disable source addr insert */
2022	cbp->preamble_length =	2;	/* (7 byte) preamble */
2023	cbp->loopback =		0;	/* (don't) loopback */
2024	cbp->linear_priority =	0;	/* (normal CSMA/CD operation) */
2025	cbp->linear_pri_mode =	0;	/* (wait after xmit only) */
2026	cbp->interfrm_spacing =	6;	/* (96 bits of) interframe spacing */
2027	cbp->promiscuous =	prm;	/* promiscuous mode */
2028	cbp->bcast_disable =	0;	/* (don't) disable broadcasts */
2029	cbp->wait_after_win =	0;	/* (don't) enable modified backoff alg*/
2030	cbp->ignore_ul =	0;	/* consider U/L bit in IA matching */
2031	cbp->crc16_en =		0;	/* (don't) enable crc-16 algorithm */
2032	cbp->crscdt =		sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
2033
2034	cbp->stripping =	!prm;	/* truncate rx packet to byte count */
2035	cbp->padding =		1;	/* (do) pad short tx packets */
2036	cbp->rcv_crc_xfer =	0;	/* (don't) xfer CRC to host */
2037	cbp->long_rx_en =	sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
2038	cbp->ia_wake_en =	0;	/* (don't) wake up on address match */
2039	cbp->magic_pkt_dis =	0;	/* (don't) disable magic packet */
2040					/* must set wake_en in PMCSR also */
2041	cbp->force_fdx =	0;	/* (don't) force full duplex */
2042	cbp->fdx_pin_en =	1;	/* (enable) FDX# pin */
2043	cbp->multi_ia =		0;	/* (don't) accept multiple IAs */
2044	cbp->mc_all =		sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0;
2045	cbp->gamla_rx =		sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
2046
2047	if (sc->tunable_noflow || sc->revision == FXP_REV_82557) {
2048		/*
2049		 * The 82557 has no hardware flow control, the values
2050		 * below are the defaults for the chip.
2051		 */
2052		cbp->fc_delay_lsb =	0;
2053		cbp->fc_delay_msb =	0x40;
2054		cbp->pri_fc_thresh =	3;
2055		cbp->tx_fc_dis =	0;
2056		cbp->rx_fc_restop =	0;
2057		cbp->rx_fc_restart =	0;
2058		cbp->fc_filter =	0;
2059		cbp->pri_fc_loc =	1;
2060	} else {
2061		cbp->fc_delay_lsb =	0x1f;
2062		cbp->fc_delay_msb =	0x01;
2063		cbp->pri_fc_thresh =	3;
2064		cbp->tx_fc_dis =	0;	/* enable transmit FC */
2065		cbp->rx_fc_restop =	1;	/* enable FC restop frames */
2066		cbp->rx_fc_restart =	1;	/* enable FC restart frames */
2067		cbp->fc_filter =	!prm;	/* drop FC frames to host */
2068		cbp->pri_fc_loc =	1;	/* FC pri location (byte31) */
2069	}
2070
2071	/*
2072	 * Start the config command/DMA.
2073	 */
2074	fxp_scb_wait(sc);
2075	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2076	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2077	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2078	/* ...and wait for it to complete. */
2079	fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
2080	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2081
2082	/*
2083	 * Now initialize the station address. Temporarily use the TxCB
2084	 * memory area like we did above for the config CB.
2085	 */
2086	cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
2087	cb_ias->cb_status = 0;
2088	cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
2089	cb_ias->link_addr = 0xffffffff;
2090	bcopy(IF_LLADDR(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN);
2091
2092	/*
2093	 * Start the IAS (Individual Address Setup) command/DMA.
2094	 */
2095	fxp_scb_wait(sc);
2096	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2097	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2098	/* ...and wait for it to complete. */
2099	fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map);
2100	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2101
2102	/*
2103	 * Initialize transmit control block (TxCB) list.
2104	 */
2105	txp = sc->fxp_desc.tx_list;
2106	tcbp = sc->fxp_desc.cbl_list;
2107	bzero(tcbp, FXP_TXCB_SZ);
2108	for (i = 0; i < FXP_NTXCB; i++) {
2109		txp[i].tx_mbuf = NULL;
2110		tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK);
2111		tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
2112		tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr +
2113		    (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx)));
2114		if (sc->flags & FXP_FLAG_EXT_TXCB)
2115			tcbp[i].tbd_array_addr =
2116			    htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
2117		else
2118			tcbp[i].tbd_array_addr =
2119			    htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
2120		txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
2121	}
2122	/*
2123	 * Set the suspend flag on the first TxCB and start the control
2124	 * unit. It will execute the NOP and then suspend.
2125	 */
2126	tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
2127	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2128	sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2129	sc->tx_queued = 1;
2130
2131	fxp_scb_wait(sc);
2132	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2133
2134	/*
2135	 * Initialize receiver buffer area - RFA.
2136	 */
2137	fxp_scb_wait(sc);
2138	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
2139	fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
2140
2141	/*
2142	 * Set current media.
2143	 */
2144	if (sc->miibus != NULL)
2145		mii_mediachg(device_get_softc(sc->miibus));
2146
2147	ifp->if_drv_flags |= IFF_DRV_RUNNING;
2148	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2149
2150	/*
2151	 * Enable interrupts.
2152	 */
2153#ifdef DEVICE_POLLING
2154	/*
2155	 * ... but only do that if we are not polling. And because (presumably)
2156	 * the default is interrupts on, we need to disable them explicitly!
2157	 */
2158	if (ifp->if_capenable & IFCAP_POLLING )
2159		CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
2160	else
2161#endif /* DEVICE_POLLING */
2162	CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
2163
2164	/*
2165	 * Start stats updater.
2166	 */
2167	callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
2168}
2169
2170static int
2171fxp_serial_ifmedia_upd(struct ifnet *ifp)
2172{
2173
2174	return (0);
2175}
2176
2177static void
2178fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2179{
2180
2181	ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2182}
2183
2184/*
2185 * Change media according to request.
2186 */
2187static int
2188fxp_ifmedia_upd(struct ifnet *ifp)
2189{
2190	struct fxp_softc *sc = ifp->if_softc;
2191	struct mii_data *mii;
2192
2193	mii = device_get_softc(sc->miibus);
2194	FXP_LOCK(sc);
2195	mii_mediachg(mii);
2196	FXP_UNLOCK(sc);
2197	return (0);
2198}
2199
2200/*
2201 * Notify the world which media we're using.
2202 */
2203static void
2204fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2205{
2206	struct fxp_softc *sc = ifp->if_softc;
2207	struct mii_data *mii;
2208
2209	mii = device_get_softc(sc->miibus);
2210	FXP_LOCK(sc);
2211	mii_pollstat(mii);
2212	ifmr->ifm_active = mii->mii_media_active;
2213	ifmr->ifm_status = mii->mii_media_status;
2214
2215	if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_10_T &&
2216	    sc->flags & FXP_FLAG_CU_RESUME_BUG)
2217		sc->cu_resume_bug = 1;
2218	else
2219		sc->cu_resume_bug = 0;
2220	FXP_UNLOCK(sc);
2221}
2222
2223/*
2224 * Add a buffer to the end of the RFA buffer list.
2225 * Return 0 if successful, 1 for failure. A failure results in
2226 * adding the 'oldm' (if non-NULL) on to the end of the list -
2227 * tossing out its old contents and recycling it.
2228 * The RFA struct is stuck at the beginning of mbuf cluster and the
2229 * data pointer is fixed up to point just past it.
2230 */
2231static int
2232fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2233{
2234	struct mbuf *m;
2235	struct fxp_rfa *rfa, *p_rfa;
2236	struct fxp_rx *p_rx;
2237	bus_dmamap_t tmp_map;
2238	int error;
2239
2240	m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2241	if (m == NULL)
2242		return (ENOBUFS);
2243
2244	/*
2245	 * Move the data pointer up so that the incoming data packet
2246	 * will be 32-bit aligned.
2247	 */
2248	m->m_data += RFA_ALIGNMENT_FUDGE;
2249
2250	/*
2251	 * Get a pointer to the base of the mbuf cluster and move
2252	 * data start past it.
2253	 */
2254	rfa = mtod(m, struct fxp_rfa *);
2255	m->m_data += sc->rfa_size;
2256	rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
2257
2258	rfa->rfa_status = 0;
2259	rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
2260	rfa->actual_size = 0;
2261
2262	/*
2263	 * Initialize the rest of the RFA.  Note that since the RFA
2264	 * is misaligned, we cannot store values directly.  We're thus
2265	 * using the le32enc() function which handles endianness and
2266	 * is also alignment-safe.
2267	 */
2268	le32enc(&rfa->link_addr, 0xffffffff);
2269	le32enc(&rfa->rbd_addr, 0xffffffff);
2270
2271	/* Map the RFA into DMA memory. */
2272	error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa,
2273	    MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
2274	    &rxp->rx_addr, 0);
2275	if (error) {
2276		m_freem(m);
2277		return (error);
2278	}
2279
2280	bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
2281	tmp_map = sc->spare_map;
2282	sc->spare_map = rxp->rx_map;
2283	rxp->rx_map = tmp_map;
2284	rxp->rx_mbuf = m;
2285
2286	bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
2287	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2288
2289	/*
2290	 * If there are other buffers already on the list, attach this
2291	 * one to the end by fixing up the tail to point to this one.
2292	 */
2293	if (sc->fxp_desc.rx_head != NULL) {
2294		p_rx = sc->fxp_desc.rx_tail;
2295		p_rfa = (struct fxp_rfa *)
2296		    (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
2297		p_rx->rx_next = rxp;
2298		le32enc(&p_rfa->link_addr, rxp->rx_addr);
2299		p_rfa->rfa_control = 0;
2300		bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map,
2301		    BUS_DMASYNC_PREWRITE);
2302	} else {
2303		rxp->rx_next = NULL;
2304		sc->fxp_desc.rx_head = rxp;
2305	}
2306	sc->fxp_desc.rx_tail = rxp;
2307	return (0);
2308}
2309
2310static volatile int
2311fxp_miibus_readreg(device_t dev, int phy, int reg)
2312{
2313	struct fxp_softc *sc = device_get_softc(dev);
2314	int count = 10000;
2315	int value;
2316
2317	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2318	    (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
2319
2320	while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
2321	    && count--)
2322		DELAY(10);
2323
2324	if (count <= 0)
2325		device_printf(dev, "fxp_miibus_readreg: timed out\n");
2326
2327	return (value & 0xffff);
2328}
2329
2330static void
2331fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
2332{
2333	struct fxp_softc *sc = device_get_softc(dev);
2334	int count = 10000;
2335
2336	CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2337	    (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
2338	    (value & 0xffff));
2339
2340	while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
2341	    count--)
2342		DELAY(10);
2343
2344	if (count <= 0)
2345		device_printf(dev, "fxp_miibus_writereg: timed out\n");
2346}
2347
2348static int
2349fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2350{
2351	struct fxp_softc *sc = ifp->if_softc;
2352	struct ifreq *ifr = (struct ifreq *)data;
2353	struct mii_data *mii;
2354	int flag, mask, error = 0;
2355
2356	switch (command) {
2357	case SIOCSIFFLAGS:
2358		FXP_LOCK(sc);
2359		if (ifp->if_flags & IFF_ALLMULTI)
2360			sc->flags |= FXP_FLAG_ALL_MCAST;
2361		else
2362			sc->flags &= ~FXP_FLAG_ALL_MCAST;
2363
2364		/*
2365		 * If interface is marked up and not running, then start it.
2366		 * If it is marked down and running, stop it.
2367		 * XXX If it's up then re-initialize it. This is so flags
2368		 * such as IFF_PROMISC are handled.
2369		 */
2370		if (ifp->if_flags & IFF_UP) {
2371			fxp_init_body(sc);
2372		} else {
2373			if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2374				fxp_stop(sc);
2375		}
2376		FXP_UNLOCK(sc);
2377		break;
2378
2379	case SIOCADDMULTI:
2380	case SIOCDELMULTI:
2381		FXP_LOCK(sc);
2382		if (ifp->if_flags & IFF_ALLMULTI)
2383			sc->flags |= FXP_FLAG_ALL_MCAST;
2384		else
2385			sc->flags &= ~FXP_FLAG_ALL_MCAST;
2386		/*
2387		 * Multicast list has changed; set the hardware filter
2388		 * accordingly.
2389		 */
2390		if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0)
2391			fxp_mc_setup(sc);
2392		/*
2393		 * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it
2394		 * again rather than else {}.
2395		 */
2396		if (sc->flags & FXP_FLAG_ALL_MCAST)
2397			fxp_init_body(sc);
2398		FXP_UNLOCK(sc);
2399		error = 0;
2400		break;
2401
2402	case SIOCSIFMEDIA:
2403	case SIOCGIFMEDIA:
2404		if (sc->miibus != NULL) {
2405			mii = device_get_softc(sc->miibus);
2406                        error = ifmedia_ioctl(ifp, ifr,
2407                            &mii->mii_media, command);
2408		} else {
2409                        error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
2410		}
2411		break;
2412
2413	case SIOCSIFCAP:
2414		mask = ifp->if_capenable ^ ifr->ifr_reqcap;
2415#ifdef DEVICE_POLLING
2416		if (mask & IFCAP_POLLING) {
2417			if (ifr->ifr_reqcap & IFCAP_POLLING) {
2418				error = ether_poll_register(fxp_poll, ifp);
2419				if (error)
2420					return(error);
2421				FXP_LOCK(sc);
2422				CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL,
2423				    FXP_SCB_INTR_DISABLE);
2424				ifp->if_capenable |= IFCAP_POLLING;
2425				FXP_UNLOCK(sc);
2426			} else {
2427				error = ether_poll_deregister(ifp);
2428				/* Enable interrupts in any case */
2429				FXP_LOCK(sc);
2430				CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
2431				ifp->if_capenable &= ~IFCAP_POLLING;
2432				FXP_UNLOCK(sc);
2433			}
2434		}
2435#endif
2436		if (mask & IFCAP_VLAN_MTU) {
2437			FXP_LOCK(sc);
2438			ifp->if_capenable ^= IFCAP_VLAN_MTU;
2439			if (sc->revision != FXP_REV_82557)
2440				flag = FXP_FLAG_LONG_PKT_EN;
2441			else /* a hack to get long frames on the old chip */
2442				flag = FXP_FLAG_SAVE_BAD;
2443			sc->flags ^= flag;
2444			if (ifp->if_flags & IFF_UP)
2445				fxp_init_body(sc);
2446			FXP_UNLOCK(sc);
2447		}
2448		break;
2449
2450	default:
2451		error = ether_ioctl(ifp, command, data);
2452	}
2453	return (error);
2454}
2455
2456/*
2457 * Fill in the multicast address list and return number of entries.
2458 */
2459static int
2460fxp_mc_addrs(struct fxp_softc *sc)
2461{
2462	struct fxp_cb_mcs *mcsp = sc->mcsp;
2463	struct ifnet *ifp = sc->ifp;
2464	struct ifmultiaddr *ifma;
2465	int nmcasts;
2466
2467	nmcasts = 0;
2468	if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) {
2469		IF_ADDR_LOCK(ifp);
2470		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2471			if (ifma->ifma_addr->sa_family != AF_LINK)
2472				continue;
2473			if (nmcasts >= MAXMCADDR) {
2474				sc->flags |= FXP_FLAG_ALL_MCAST;
2475				nmcasts = 0;
2476				break;
2477			}
2478			bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2479			    &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
2480			nmcasts++;
2481		}
2482		IF_ADDR_UNLOCK(ifp);
2483	}
2484	mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN);
2485	return (nmcasts);
2486}
2487
2488/*
2489 * Program the multicast filter.
2490 *
2491 * We have an artificial restriction that the multicast setup command
2492 * must be the first command in the chain, so we take steps to ensure
2493 * this. By requiring this, it allows us to keep up the performance of
2494 * the pre-initialized command ring (esp. link pointers) by not actually
2495 * inserting the mcsetup command in the ring - i.e. its link pointer
2496 * points to the TxCB ring, but the mcsetup descriptor itself is not part
2497 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
2498 * lead into the regular TxCB ring when it completes.
2499 *
2500 * This function must be called at splimp.
2501 */
2502static void
2503fxp_mc_setup(struct fxp_softc *sc)
2504{
2505	struct fxp_cb_mcs *mcsp = sc->mcsp;
2506	struct ifnet *ifp = sc->ifp;
2507	struct fxp_tx *txp;
2508	int count;
2509
2510	FXP_LOCK_ASSERT(sc, MA_OWNED);
2511	/*
2512	 * If there are queued commands, we must wait until they are all
2513	 * completed. If we are already waiting, then add a NOP command
2514	 * with interrupt option so that we're notified when all commands
2515	 * have been completed - fxp_start() ensures that no additional
2516	 * TX commands will be added when need_mcsetup is true.
2517	 */
2518	if (sc->tx_queued) {
2519		/*
2520		 * need_mcsetup will be true if we are already waiting for the
2521		 * NOP command to be completed (see below). In this case, bail.
2522		 */
2523		if (sc->need_mcsetup)
2524			return;
2525		sc->need_mcsetup = 1;
2526
2527		/*
2528		 * Add a NOP command with interrupt so that we are notified
2529		 * when all TX commands have been processed.
2530		 */
2531		txp = sc->fxp_desc.tx_last->tx_next;
2532		txp->tx_mbuf = NULL;
2533		txp->tx_cb->cb_status = 0;
2534		txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP |
2535		    FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
2536		/*
2537		 * Advance the end of list forward.
2538		 */
2539		sc->fxp_desc.tx_last->tx_cb->cb_command &=
2540		    htole16(~FXP_CB_COMMAND_S);
2541		bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2542		sc->fxp_desc.tx_last = txp;
2543		sc->tx_queued++;
2544		/*
2545		 * Issue a resume in case the CU has just suspended.
2546		 */
2547		fxp_scb_wait(sc);
2548		fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
2549		/*
2550		 * Set a 5 second timer just in case we don't hear from the
2551		 * card again.
2552		 */
2553		ifp->if_timer = 5;
2554
2555		return;
2556	}
2557	sc->need_mcsetup = 0;
2558
2559	/*
2560	 * Initialize multicast setup descriptor.
2561	 */
2562	mcsp->cb_status = 0;
2563	mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS |
2564	    FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
2565	mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr);
2566	txp = &sc->fxp_desc.mcs_tx;
2567	txp->tx_mbuf = NULL;
2568	txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp;
2569	txp->tx_next = sc->fxp_desc.tx_list;
2570	(void) fxp_mc_addrs(sc);
2571	sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2572	sc->tx_queued = 1;
2573
2574	/*
2575	 * Wait until command unit is not active. This should never
2576	 * be the case when nothing is queued, but make sure anyway.
2577	 */
2578	count = 100;
2579	while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
2580	    FXP_SCB_CUS_ACTIVE && --count)
2581		DELAY(10);
2582	if (count == 0) {
2583		device_printf(sc->dev, "command queue timeout\n");
2584		return;
2585	}
2586
2587	/*
2588	 * Start the multicast setup command.
2589	 */
2590	fxp_scb_wait(sc);
2591	bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
2592	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
2593	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2594
2595	ifp->if_timer = 2;
2596	return;
2597}
2598
2599static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
2600static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
2601static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
2602static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
2603static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
2604static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
2605static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE;
2606
2607#define UCODE(x)	x, sizeof(x)/sizeof(uint32_t)
2608
2609struct ucode {
2610	uint32_t	revision;
2611	uint32_t	*ucode;
2612	int		length;
2613	u_short		int_delay_offset;
2614	u_short		bundle_max_offset;
2615} ucode_table[] = {
2616	{ FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
2617	{ FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
2618	{ FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
2619	    D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
2620	{ FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
2621	    D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
2622	{ FXP_REV_82550, UCODE(fxp_ucode_d102),
2623	    D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
2624	{ FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
2625	    D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
2626	{ FXP_REV_82551_F, UCODE(fxp_ucode_d102e),
2627	    D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
2628	{ 0, NULL, 0, 0, 0 }
2629};
2630
2631static void
2632fxp_load_ucode(struct fxp_softc *sc)
2633{
2634	struct ucode *uc;
2635	struct fxp_cb_ucode *cbp;
2636	int i;
2637
2638	for (uc = ucode_table; uc->ucode != NULL; uc++)
2639		if (sc->revision == uc->revision)
2640			break;
2641	if (uc->ucode == NULL)
2642		return;
2643	cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
2644	cbp->cb_status = 0;
2645	cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL);
2646	cbp->link_addr = 0xffffffff;    	/* (no) next command */
2647	for (i = 0; i < uc->length; i++)
2648		cbp->ucode[i] = htole32(uc->ucode[i]);
2649	if (uc->int_delay_offset)
2650		*(uint16_t *)&cbp->ucode[uc->int_delay_offset] =
2651		    htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2);
2652	if (uc->bundle_max_offset)
2653		*(uint16_t *)&cbp->ucode[uc->bundle_max_offset] =
2654		    htole16(sc->tunable_bundle_max);
2655	/*
2656	 * Download the ucode to the chip.
2657	 */
2658	fxp_scb_wait(sc);
2659	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2660	CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2661	fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2662	/* ...and wait for it to complete. */
2663	fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
2664	bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2665	device_printf(sc->dev,
2666	    "Microcode loaded, int_delay: %d usec  bundle_max: %d\n",
2667	    sc->tunable_int_delay,
2668	    uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
2669	sc->flags |= FXP_FLAG_UCODE;
2670}
2671
2672static int
2673sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2674{
2675	int error, value;
2676
2677	value = *(int *)arg1;
2678	error = sysctl_handle_int(oidp, &value, 0, req);
2679	if (error || !req->newptr)
2680		return (error);
2681	if (value < low || value > high)
2682		return (EINVAL);
2683	*(int *)arg1 = value;
2684	return (0);
2685}
2686
2687/*
2688 * Interrupt delay is expressed in microseconds, a multiplier is used
2689 * to convert this to the appropriate clock ticks before using.
2690 */
2691static int
2692sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
2693{
2694	return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
2695}
2696
2697static int
2698sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
2699{
2700	return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
2701}
2702