Deleted Added
full compact
if_fxp.c (185329) if_fxp.c (185330)
1/*-
2 * Copyright (c) 1995, David Greenman
3 * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1995, David Greenman
3 * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: head/sys/dev/fxp/if_fxp.c 185329 2008-11-26 06:36:53Z yongari $");
31__FBSDID("$FreeBSD: head/sys/dev/fxp/if_fxp.c 185330 2008-11-26 07:36:17Z yongari $");
32
33/*
34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
35 */
36
37#ifdef HAVE_KERNEL_OPTION_HEADERS
38#include "opt_device_polling.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/bus.h>
44#include <sys/endian.h>
45#include <sys/kernel.h>
46#include <sys/mbuf.h>
47#include <sys/module.h>
48#include <sys/rman.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52
53#include <net/bpf.h>
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_arp.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59#include <net/if_types.h>
60#include <net/if_vlan_var.h>
61
62#include <netinet/in.h>
63#include <netinet/in_systm.h>
64#include <netinet/ip.h>
65#include <netinet/tcp.h>
66#include <netinet/udp.h>
67
68#include <machine/bus.h>
69#include <machine/in_cksum.h>
70#include <machine/resource.h>
71
72#include <dev/pci/pcivar.h>
73#include <dev/pci/pcireg.h> /* for PCIM_CMD_xxx */
74
75#include <dev/mii/mii.h>
76#include <dev/mii/miivar.h>
77
78#include <dev/fxp/if_fxpreg.h>
79#include <dev/fxp/if_fxpvar.h>
80#include <dev/fxp/rcvbundl.h>
81
82MODULE_DEPEND(fxp, pci, 1, 1, 1);
83MODULE_DEPEND(fxp, ether, 1, 1, 1);
84MODULE_DEPEND(fxp, miibus, 1, 1, 1);
85#include "miibus_if.h"
86
87/*
88 * NOTE! On the Alpha, we have an alignment constraint. The
89 * card DMAs the packet immediately following the RFA. However,
90 * the first thing in the packet is a 14-byte Ethernet header.
91 * This means that the packet is misaligned. To compensate,
92 * we actually offset the RFA 2 bytes into the cluster. This
93 * alignes the packet after the Ethernet header at a 32-bit
94 * boundary. HOWEVER! This means that the RFA is misaligned!
95 */
96#define RFA_ALIGNMENT_FUDGE 2
97
98/*
99 * Set initial transmit threshold at 64 (512 bytes). This is
100 * increased by 64 (512 bytes) at a time, to maximum of 192
101 * (1536 bytes), if an underrun occurs.
102 */
103static int tx_threshold = 64;
104
105/*
106 * The configuration byte map has several undefined fields which
107 * must be one or must be zero. Set up a template for these bits
108 * only, (assuming a 82557 chip) leaving the actual configuration
109 * to fxp_init.
110 *
111 * See struct fxp_cb_config for the bit definitions.
112 */
113static u_char fxp_cb_config_template[] = {
114 0x0, 0x0, /* cb_status */
115 0x0, 0x0, /* cb_command */
116 0x0, 0x0, 0x0, 0x0, /* link_addr */
117 0x0, /* 0 */
118 0x0, /* 1 */
119 0x0, /* 2 */
120 0x0, /* 3 */
121 0x0, /* 4 */
122 0x0, /* 5 */
123 0x32, /* 6 */
124 0x0, /* 7 */
125 0x0, /* 8 */
126 0x0, /* 9 */
127 0x6, /* 10 */
128 0x0, /* 11 */
129 0x0, /* 12 */
130 0x0, /* 13 */
131 0xf2, /* 14 */
132 0x48, /* 15 */
133 0x0, /* 16 */
134 0x40, /* 17 */
135 0xf0, /* 18 */
136 0x0, /* 19 */
137 0x3f, /* 20 */
138 0x5 /* 21 */
139};
140
141struct fxp_ident {
142 uint16_t devid;
143 int16_t revid; /* -1 matches anything */
144 char *name;
145};
146
147/*
148 * Claim various Intel PCI device identifiers for this driver. The
149 * sub-vendor and sub-device field are extensively used to identify
150 * particular variants, but we don't currently differentiate between
151 * them.
152 */
153static struct fxp_ident fxp_ident_table[] = {
154 { 0x1029, -1, "Intel 82559 PCI/CardBus Pro/100" },
155 { 0x1030, -1, "Intel 82559 Pro/100 Ethernet" },
156 { 0x1031, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
157 { 0x1032, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
158 { 0x1033, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
159 { 0x1034, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
160 { 0x1035, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
161 { 0x1036, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
162 { 0x1037, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
163 { 0x1038, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
164 { 0x1039, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
165 { 0x103A, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
166 { 0x103B, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
167 { 0x103C, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
168 { 0x103D, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
169 { 0x103E, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
170 { 0x1050, -1, "Intel 82801BA (D865) Pro/100 VE Ethernet" },
171 { 0x1051, -1, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" },
172 { 0x1059, -1, "Intel 82551QM Pro/100 M Mobile Connection" },
173 { 0x1064, -1, "Intel 82562EZ (ICH6)" },
174 { 0x1065, -1, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" },
175 { 0x1068, -1, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" },
176 { 0x1069, -1, "Intel 82562EM/EX/GX Pro/100 Ethernet" },
177 { 0x1091, -1, "Intel 82562GX Pro/100 Ethernet" },
178 { 0x1092, -1, "Intel Pro/100 VE Network Connection" },
179 { 0x1093, -1, "Intel Pro/100 VM Network Connection" },
180 { 0x1094, -1, "Intel Pro/100 946GZ (ICH7) Network Connection" },
181 { 0x1209, -1, "Intel 82559ER Embedded 10/100 Ethernet" },
182 { 0x1229, 0x01, "Intel 82557 Pro/100 Ethernet" },
183 { 0x1229, 0x02, "Intel 82557 Pro/100 Ethernet" },
184 { 0x1229, 0x03, "Intel 82557 Pro/100 Ethernet" },
185 { 0x1229, 0x04, "Intel 82558 Pro/100 Ethernet" },
186 { 0x1229, 0x05, "Intel 82558 Pro/100 Ethernet" },
187 { 0x1229, 0x06, "Intel 82559 Pro/100 Ethernet" },
188 { 0x1229, 0x07, "Intel 82559 Pro/100 Ethernet" },
189 { 0x1229, 0x08, "Intel 82559 Pro/100 Ethernet" },
190 { 0x1229, 0x09, "Intel 82559ER Pro/100 Ethernet" },
191 { 0x1229, 0x0c, "Intel 82550 Pro/100 Ethernet" },
192 { 0x1229, 0x0d, "Intel 82550 Pro/100 Ethernet" },
193 { 0x1229, 0x0e, "Intel 82550 Pro/100 Ethernet" },
194 { 0x1229, 0x0f, "Intel 82551 Pro/100 Ethernet" },
195 { 0x1229, 0x10, "Intel 82551 Pro/100 Ethernet" },
196 { 0x1229, -1, "Intel 82557/8/9 Pro/100 Ethernet" },
197 { 0x2449, -1, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
198 { 0x27dc, -1, "Intel 82801GB (ICH7) 10/100 Ethernet" },
199 { 0, -1, NULL },
200};
201
202#ifdef FXP_IP_CSUM_WAR
203#define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
204#else
205#define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
206#endif
207
208static int fxp_probe(device_t dev);
209static int fxp_attach(device_t dev);
210static int fxp_detach(device_t dev);
211static int fxp_shutdown(device_t dev);
212static int fxp_suspend(device_t dev);
213static int fxp_resume(device_t dev);
214
215static void fxp_intr(void *xsc);
216static void fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp,
217 struct mbuf *m, uint16_t status, int pos);
218static void fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp,
219 uint8_t statack, int count);
220static void fxp_init(void *xsc);
221static void fxp_init_body(struct fxp_softc *sc);
222static void fxp_tick(void *xsc);
223static void fxp_start(struct ifnet *ifp);
224static void fxp_start_body(struct ifnet *ifp);
225static int fxp_encap(struct fxp_softc *sc, struct mbuf **m_head);
226static void fxp_txeof(struct fxp_softc *sc);
227static void fxp_stop(struct fxp_softc *sc);
228static void fxp_release(struct fxp_softc *sc);
229static int fxp_ioctl(struct ifnet *ifp, u_long command,
230 caddr_t data);
231static void fxp_watchdog(struct fxp_softc *sc);
232static void fxp_add_rfabuf(struct fxp_softc *sc,
233 struct fxp_rx *rxp);
234static void fxp_discard_rfabuf(struct fxp_softc *sc,
235 struct fxp_rx *rxp);
236static int fxp_new_rfabuf(struct fxp_softc *sc,
237 struct fxp_rx *rxp);
238static int fxp_mc_addrs(struct fxp_softc *sc);
239static void fxp_mc_setup(struct fxp_softc *sc);
240static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset,
241 int autosize);
242static void fxp_eeprom_putword(struct fxp_softc *sc, int offset,
243 uint16_t data);
244static void fxp_autosize_eeprom(struct fxp_softc *sc);
245static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
246 int offset, int words);
247static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
248 int offset, int words);
249static int fxp_ifmedia_upd(struct ifnet *ifp);
250static void fxp_ifmedia_sts(struct ifnet *ifp,
251 struct ifmediareq *ifmr);
252static int fxp_serial_ifmedia_upd(struct ifnet *ifp);
253static void fxp_serial_ifmedia_sts(struct ifnet *ifp,
254 struct ifmediareq *ifmr);
255static int fxp_miibus_readreg(device_t dev, int phy, int reg);
256static void fxp_miibus_writereg(device_t dev, int phy, int reg,
257 int value);
258static void fxp_load_ucode(struct fxp_softc *sc);
259static int sysctl_int_range(SYSCTL_HANDLER_ARGS,
260 int low, int high);
261static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
262static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
263static void fxp_scb_wait(struct fxp_softc *sc);
264static void fxp_scb_cmd(struct fxp_softc *sc, int cmd);
265static void fxp_dma_wait(struct fxp_softc *sc,
266 volatile uint16_t *status, bus_dma_tag_t dmat,
267 bus_dmamap_t map);
268
269static device_method_t fxp_methods[] = {
270 /* Device interface */
271 DEVMETHOD(device_probe, fxp_probe),
272 DEVMETHOD(device_attach, fxp_attach),
273 DEVMETHOD(device_detach, fxp_detach),
274 DEVMETHOD(device_shutdown, fxp_shutdown),
275 DEVMETHOD(device_suspend, fxp_suspend),
276 DEVMETHOD(device_resume, fxp_resume),
277
278 /* MII interface */
279 DEVMETHOD(miibus_readreg, fxp_miibus_readreg),
280 DEVMETHOD(miibus_writereg, fxp_miibus_writereg),
281
282 { 0, 0 }
283};
284
285static driver_t fxp_driver = {
286 "fxp",
287 fxp_methods,
288 sizeof(struct fxp_softc),
289};
290
291static devclass_t fxp_devclass;
292
293DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0);
294DRIVER_MODULE(fxp, cardbus, fxp_driver, fxp_devclass, 0, 0);
295DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
296
297static struct resource_spec fxp_res_spec_mem[] = {
298 { SYS_RES_MEMORY, FXP_PCI_MMBA, RF_ACTIVE },
299 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
300 { -1, 0 }
301};
302
303static struct resource_spec fxp_res_spec_io[] = {
304 { SYS_RES_IOPORT, FXP_PCI_IOBA, RF_ACTIVE },
305 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
306 { -1, 0 }
307};
308
309/*
310 * Wait for the previous command to be accepted (but not necessarily
311 * completed).
312 */
313static void
314fxp_scb_wait(struct fxp_softc *sc)
315{
316 union {
317 uint16_t w;
318 uint8_t b[2];
319 } flowctl;
320 int i = 10000;
321
322 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
323 DELAY(2);
324 if (i == 0) {
325 flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL);
326 flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL + 1);
327 device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
328 CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
329 CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
330 CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w);
331 }
332}
333
334static void
335fxp_scb_cmd(struct fxp_softc *sc, int cmd)
336{
337
338 if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
339 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
340 fxp_scb_wait(sc);
341 }
342 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
343}
344
345static void
346fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status,
347 bus_dma_tag_t dmat, bus_dmamap_t map)
348{
349 int i = 10000;
350
351 bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
352 while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) {
353 DELAY(2);
354 bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
355 }
356 if (i == 0)
357 device_printf(sc->dev, "DMA timeout\n");
358}
359
360/*
361 * Return identification string if this device is ours.
362 */
363static int
364fxp_probe(device_t dev)
365{
366 uint16_t devid;
367 uint8_t revid;
368 struct fxp_ident *ident;
369
370 if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
371 devid = pci_get_device(dev);
372 revid = pci_get_revid(dev);
373 for (ident = fxp_ident_table; ident->name != NULL; ident++) {
374 if (ident->devid == devid &&
375 (ident->revid == revid || ident->revid == -1)) {
376 device_set_desc(dev, ident->name);
377 return (BUS_PROBE_DEFAULT);
378 }
379 }
380 }
381 return (ENXIO);
382}
383
384static void
385fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
386{
387 uint32_t *addr;
388
389 if (error)
390 return;
391
392 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
393 addr = arg;
394 *addr = segs->ds_addr;
395}
396
397static int
398fxp_attach(device_t dev)
399{
400 struct fxp_softc *sc;
401 struct fxp_cb_tx *tcbp;
402 struct fxp_tx *txp;
403 struct fxp_rx *rxp;
404 struct ifnet *ifp;
405 uint32_t val;
406 uint16_t data, myea[ETHER_ADDR_LEN / 2];
407 u_char eaddr[ETHER_ADDR_LEN];
408 int i, prefer_iomap;
409 int error;
410
411 error = 0;
412 sc = device_get_softc(dev);
413 sc->dev = dev;
414 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
415 MTX_DEF);
416 callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0);
417 ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
418 fxp_serial_ifmedia_sts);
419
420 ifp = sc->ifp = if_alloc(IFT_ETHER);
421 if (ifp == NULL) {
422 device_printf(dev, "can not if_alloc()\n");
423 error = ENOSPC;
424 goto fail;
425 }
426
427 /*
428 * Enable bus mastering.
429 */
430 pci_enable_busmaster(dev);
431 val = pci_read_config(dev, PCIR_COMMAND, 2);
432
433 /*
434 * Figure out which we should try first - memory mapping or i/o mapping?
435 * We default to memory mapping. Then we accept an override from the
436 * command line. Then we check to see which one is enabled.
437 */
438 prefer_iomap = 0;
439 resource_int_value(device_get_name(dev), device_get_unit(dev),
440 "prefer_iomap", &prefer_iomap);
441 if (prefer_iomap)
442 sc->fxp_spec = fxp_res_spec_io;
443 else
444 sc->fxp_spec = fxp_res_spec_mem;
445
446 error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
447 if (error) {
448 if (sc->fxp_spec == fxp_res_spec_mem)
449 sc->fxp_spec = fxp_res_spec_io;
450 else
451 sc->fxp_spec = fxp_res_spec_mem;
452 error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
453 }
454 if (error) {
455 device_printf(dev, "could not allocate resources\n");
456 error = ENXIO;
457 goto fail;
458 }
459
460 if (bootverbose) {
461 device_printf(dev, "using %s space register mapping\n",
462 sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O");
463 }
464
465 /*
466 * Reset to a stable state.
467 */
468 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
469 DELAY(10);
470
471 /*
472 * Find out how large of an SEEPROM we have.
473 */
474 fxp_autosize_eeprom(sc);
475
476 /*
477 * Find out the chip revision; lump all 82557 revs together.
478 */
479 fxp_read_eeprom(sc, &data, 5, 1);
480 if ((data >> 8) == 1)
481 sc->revision = FXP_REV_82557;
482 else
483 sc->revision = pci_get_revid(dev);
484
485 /*
486 * Determine whether we must use the 503 serial interface.
487 */
488 fxp_read_eeprom(sc, &data, 6, 1);
489 if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0
490 && (data & FXP_PHY_SERIAL_ONLY))
491 sc->flags |= FXP_FLAG_SERIAL_MEDIA;
492
493 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
494 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
495 OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW,
496 &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
497 "FXP driver receive interrupt microcode bundling delay");
498 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
499 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
500 OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW,
501 &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
502 "FXP driver receive interrupt microcode bundle size limit");
503 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
504 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
505 OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0,
506 "FXP RNR events");
507 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
508 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
509 OID_AUTO, "noflow", CTLFLAG_RW, &sc->tunable_noflow, 0,
510 "FXP flow control disabled");
511
512 /*
513 * Pull in device tunables.
514 */
515 sc->tunable_int_delay = TUNABLE_INT_DELAY;
516 sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
517 sc->tunable_noflow = 1;
518 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
519 "int_delay", &sc->tunable_int_delay);
520 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
521 "bundle_max", &sc->tunable_bundle_max);
522 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
523 "noflow", &sc->tunable_noflow);
524 sc->rnr = 0;
525
526 /*
527 * Enable workarounds for certain chip revision deficiencies.
528 *
529 * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
530 * some systems based a normal 82559 design, have a defect where
531 * the chip can cause a PCI protocol violation if it receives
532 * a CU_RESUME command when it is entering the IDLE state. The
533 * workaround is to disable Dynamic Standby Mode, so the chip never
534 * deasserts CLKRUN#, and always remains in an active state.
535 *
536 * See Intel 82801BA/82801BAM Specification Update, Errata #30.
537 */
538 i = pci_get_device(dev);
539 if (i == 0x2449 || (i > 0x1030 && i < 0x1039) ||
540 sc->revision >= FXP_REV_82559_A0) {
541 fxp_read_eeprom(sc, &data, 10, 1);
542 if (data & 0x02) { /* STB enable */
543 uint16_t cksum;
544 int i;
545
546 device_printf(dev,
547 "Disabling dynamic standby mode in EEPROM\n");
548 data &= ~0x02;
549 fxp_write_eeprom(sc, &data, 10, 1);
550 device_printf(dev, "New EEPROM ID: 0x%x\n", data);
551 cksum = 0;
552 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
553 fxp_read_eeprom(sc, &data, i, 1);
554 cksum += data;
555 }
556 i = (1 << sc->eeprom_size) - 1;
557 cksum = 0xBABA - cksum;
558 fxp_read_eeprom(sc, &data, i, 1);
559 fxp_write_eeprom(sc, &cksum, i, 1);
560 device_printf(dev,
561 "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
562 i, data, cksum);
563#if 1
564 /*
565 * If the user elects to continue, try the software
566 * workaround, as it is better than nothing.
567 */
568 sc->flags |= FXP_FLAG_CU_RESUME_BUG;
569#endif
570 }
571 }
572
573 /*
574 * If we are not a 82557 chip, we can enable extended features.
575 */
576 if (sc->revision != FXP_REV_82557) {
577 /*
578 * If MWI is enabled in the PCI configuration, and there
579 * is a valid cacheline size (8 or 16 dwords), then tell
580 * the board to turn on MWI.
581 */
582 if (val & PCIM_CMD_MWRICEN &&
583 pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
584 sc->flags |= FXP_FLAG_MWI_ENABLE;
585
586 /* turn on the extended TxCB feature */
587 sc->flags |= FXP_FLAG_EXT_TXCB;
588
589 /* enable reception of long frames for VLAN */
590 sc->flags |= FXP_FLAG_LONG_PKT_EN;
591 } else {
592 /* a hack to get long VLAN frames on a 82557 */
593 sc->flags |= FXP_FLAG_SAVE_BAD;
594 }
595
596 /* For 82559 or later chips, Rx checksum offload is supported. */
597 if (sc->revision >= FXP_REV_82559_A0)
598 sc->flags |= FXP_FLAG_82559_RXCSUM;
599 /*
600 * Enable use of extended RFDs and TCBs for 82550
601 * and later chips. Note: we need extended TXCB support
602 * too, but that's already enabled by the code above.
603 * Be careful to do this only on the right devices.
604 */
605 if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C ||
606 sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F
607 || sc->revision == FXP_REV_82551_10) {
608 sc->rfa_size = sizeof (struct fxp_rfa);
609 sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
610 sc->flags |= FXP_FLAG_EXT_RFA;
611 /* Use extended RFA instead of 82559 checksum mode. */
612 sc->flags &= ~FXP_FLAG_82559_RXCSUM;
613 } else {
614 sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
615 sc->tx_cmd = FXP_CB_COMMAND_XMIT;
616 }
617
618 /*
619 * Allocate DMA tags and DMA safe memory.
620 */
621 sc->maxtxseg = FXP_NTXSEG;
32
33/*
34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
35 */
36
37#ifdef HAVE_KERNEL_OPTION_HEADERS
38#include "opt_device_polling.h"
39#endif
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/bus.h>
44#include <sys/endian.h>
45#include <sys/kernel.h>
46#include <sys/mbuf.h>
47#include <sys/module.h>
48#include <sys/rman.h>
49#include <sys/socket.h>
50#include <sys/sockio.h>
51#include <sys/sysctl.h>
52
53#include <net/bpf.h>
54#include <net/ethernet.h>
55#include <net/if.h>
56#include <net/if_arp.h>
57#include <net/if_dl.h>
58#include <net/if_media.h>
59#include <net/if_types.h>
60#include <net/if_vlan_var.h>
61
62#include <netinet/in.h>
63#include <netinet/in_systm.h>
64#include <netinet/ip.h>
65#include <netinet/tcp.h>
66#include <netinet/udp.h>
67
68#include <machine/bus.h>
69#include <machine/in_cksum.h>
70#include <machine/resource.h>
71
72#include <dev/pci/pcivar.h>
73#include <dev/pci/pcireg.h> /* for PCIM_CMD_xxx */
74
75#include <dev/mii/mii.h>
76#include <dev/mii/miivar.h>
77
78#include <dev/fxp/if_fxpreg.h>
79#include <dev/fxp/if_fxpvar.h>
80#include <dev/fxp/rcvbundl.h>
81
82MODULE_DEPEND(fxp, pci, 1, 1, 1);
83MODULE_DEPEND(fxp, ether, 1, 1, 1);
84MODULE_DEPEND(fxp, miibus, 1, 1, 1);
85#include "miibus_if.h"
86
87/*
88 * NOTE! On the Alpha, we have an alignment constraint. The
89 * card DMAs the packet immediately following the RFA. However,
90 * the first thing in the packet is a 14-byte Ethernet header.
91 * This means that the packet is misaligned. To compensate,
92 * we actually offset the RFA 2 bytes into the cluster. This
93 * alignes the packet after the Ethernet header at a 32-bit
94 * boundary. HOWEVER! This means that the RFA is misaligned!
95 */
96#define RFA_ALIGNMENT_FUDGE 2
97
98/*
99 * Set initial transmit threshold at 64 (512 bytes). This is
100 * increased by 64 (512 bytes) at a time, to maximum of 192
101 * (1536 bytes), if an underrun occurs.
102 */
103static int tx_threshold = 64;
104
105/*
106 * The configuration byte map has several undefined fields which
107 * must be one or must be zero. Set up a template for these bits
108 * only, (assuming a 82557 chip) leaving the actual configuration
109 * to fxp_init.
110 *
111 * See struct fxp_cb_config for the bit definitions.
112 */
113static u_char fxp_cb_config_template[] = {
114 0x0, 0x0, /* cb_status */
115 0x0, 0x0, /* cb_command */
116 0x0, 0x0, 0x0, 0x0, /* link_addr */
117 0x0, /* 0 */
118 0x0, /* 1 */
119 0x0, /* 2 */
120 0x0, /* 3 */
121 0x0, /* 4 */
122 0x0, /* 5 */
123 0x32, /* 6 */
124 0x0, /* 7 */
125 0x0, /* 8 */
126 0x0, /* 9 */
127 0x6, /* 10 */
128 0x0, /* 11 */
129 0x0, /* 12 */
130 0x0, /* 13 */
131 0xf2, /* 14 */
132 0x48, /* 15 */
133 0x0, /* 16 */
134 0x40, /* 17 */
135 0xf0, /* 18 */
136 0x0, /* 19 */
137 0x3f, /* 20 */
138 0x5 /* 21 */
139};
140
141struct fxp_ident {
142 uint16_t devid;
143 int16_t revid; /* -1 matches anything */
144 char *name;
145};
146
147/*
148 * Claim various Intel PCI device identifiers for this driver. The
149 * sub-vendor and sub-device field are extensively used to identify
150 * particular variants, but we don't currently differentiate between
151 * them.
152 */
153static struct fxp_ident fxp_ident_table[] = {
154 { 0x1029, -1, "Intel 82559 PCI/CardBus Pro/100" },
155 { 0x1030, -1, "Intel 82559 Pro/100 Ethernet" },
156 { 0x1031, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
157 { 0x1032, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
158 { 0x1033, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
159 { 0x1034, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
160 { 0x1035, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
161 { 0x1036, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
162 { 0x1037, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
163 { 0x1038, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
164 { 0x1039, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
165 { 0x103A, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
166 { 0x103B, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
167 { 0x103C, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
168 { 0x103D, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
169 { 0x103E, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
170 { 0x1050, -1, "Intel 82801BA (D865) Pro/100 VE Ethernet" },
171 { 0x1051, -1, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" },
172 { 0x1059, -1, "Intel 82551QM Pro/100 M Mobile Connection" },
173 { 0x1064, -1, "Intel 82562EZ (ICH6)" },
174 { 0x1065, -1, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" },
175 { 0x1068, -1, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" },
176 { 0x1069, -1, "Intel 82562EM/EX/GX Pro/100 Ethernet" },
177 { 0x1091, -1, "Intel 82562GX Pro/100 Ethernet" },
178 { 0x1092, -1, "Intel Pro/100 VE Network Connection" },
179 { 0x1093, -1, "Intel Pro/100 VM Network Connection" },
180 { 0x1094, -1, "Intel Pro/100 946GZ (ICH7) Network Connection" },
181 { 0x1209, -1, "Intel 82559ER Embedded 10/100 Ethernet" },
182 { 0x1229, 0x01, "Intel 82557 Pro/100 Ethernet" },
183 { 0x1229, 0x02, "Intel 82557 Pro/100 Ethernet" },
184 { 0x1229, 0x03, "Intel 82557 Pro/100 Ethernet" },
185 { 0x1229, 0x04, "Intel 82558 Pro/100 Ethernet" },
186 { 0x1229, 0x05, "Intel 82558 Pro/100 Ethernet" },
187 { 0x1229, 0x06, "Intel 82559 Pro/100 Ethernet" },
188 { 0x1229, 0x07, "Intel 82559 Pro/100 Ethernet" },
189 { 0x1229, 0x08, "Intel 82559 Pro/100 Ethernet" },
190 { 0x1229, 0x09, "Intel 82559ER Pro/100 Ethernet" },
191 { 0x1229, 0x0c, "Intel 82550 Pro/100 Ethernet" },
192 { 0x1229, 0x0d, "Intel 82550 Pro/100 Ethernet" },
193 { 0x1229, 0x0e, "Intel 82550 Pro/100 Ethernet" },
194 { 0x1229, 0x0f, "Intel 82551 Pro/100 Ethernet" },
195 { 0x1229, 0x10, "Intel 82551 Pro/100 Ethernet" },
196 { 0x1229, -1, "Intel 82557/8/9 Pro/100 Ethernet" },
197 { 0x2449, -1, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
198 { 0x27dc, -1, "Intel 82801GB (ICH7) 10/100 Ethernet" },
199 { 0, -1, NULL },
200};
201
202#ifdef FXP_IP_CSUM_WAR
203#define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
204#else
205#define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
206#endif
207
208static int fxp_probe(device_t dev);
209static int fxp_attach(device_t dev);
210static int fxp_detach(device_t dev);
211static int fxp_shutdown(device_t dev);
212static int fxp_suspend(device_t dev);
213static int fxp_resume(device_t dev);
214
215static void fxp_intr(void *xsc);
216static void fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp,
217 struct mbuf *m, uint16_t status, int pos);
218static void fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp,
219 uint8_t statack, int count);
220static void fxp_init(void *xsc);
221static void fxp_init_body(struct fxp_softc *sc);
222static void fxp_tick(void *xsc);
223static void fxp_start(struct ifnet *ifp);
224static void fxp_start_body(struct ifnet *ifp);
225static int fxp_encap(struct fxp_softc *sc, struct mbuf **m_head);
226static void fxp_txeof(struct fxp_softc *sc);
227static void fxp_stop(struct fxp_softc *sc);
228static void fxp_release(struct fxp_softc *sc);
229static int fxp_ioctl(struct ifnet *ifp, u_long command,
230 caddr_t data);
231static void fxp_watchdog(struct fxp_softc *sc);
232static void fxp_add_rfabuf(struct fxp_softc *sc,
233 struct fxp_rx *rxp);
234static void fxp_discard_rfabuf(struct fxp_softc *sc,
235 struct fxp_rx *rxp);
236static int fxp_new_rfabuf(struct fxp_softc *sc,
237 struct fxp_rx *rxp);
238static int fxp_mc_addrs(struct fxp_softc *sc);
239static void fxp_mc_setup(struct fxp_softc *sc);
240static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset,
241 int autosize);
242static void fxp_eeprom_putword(struct fxp_softc *sc, int offset,
243 uint16_t data);
244static void fxp_autosize_eeprom(struct fxp_softc *sc);
245static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
246 int offset, int words);
247static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
248 int offset, int words);
249static int fxp_ifmedia_upd(struct ifnet *ifp);
250static void fxp_ifmedia_sts(struct ifnet *ifp,
251 struct ifmediareq *ifmr);
252static int fxp_serial_ifmedia_upd(struct ifnet *ifp);
253static void fxp_serial_ifmedia_sts(struct ifnet *ifp,
254 struct ifmediareq *ifmr);
255static int fxp_miibus_readreg(device_t dev, int phy, int reg);
256static void fxp_miibus_writereg(device_t dev, int phy, int reg,
257 int value);
258static void fxp_load_ucode(struct fxp_softc *sc);
259static int sysctl_int_range(SYSCTL_HANDLER_ARGS,
260 int low, int high);
261static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
262static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
263static void fxp_scb_wait(struct fxp_softc *sc);
264static void fxp_scb_cmd(struct fxp_softc *sc, int cmd);
265static void fxp_dma_wait(struct fxp_softc *sc,
266 volatile uint16_t *status, bus_dma_tag_t dmat,
267 bus_dmamap_t map);
268
269static device_method_t fxp_methods[] = {
270 /* Device interface */
271 DEVMETHOD(device_probe, fxp_probe),
272 DEVMETHOD(device_attach, fxp_attach),
273 DEVMETHOD(device_detach, fxp_detach),
274 DEVMETHOD(device_shutdown, fxp_shutdown),
275 DEVMETHOD(device_suspend, fxp_suspend),
276 DEVMETHOD(device_resume, fxp_resume),
277
278 /* MII interface */
279 DEVMETHOD(miibus_readreg, fxp_miibus_readreg),
280 DEVMETHOD(miibus_writereg, fxp_miibus_writereg),
281
282 { 0, 0 }
283};
284
285static driver_t fxp_driver = {
286 "fxp",
287 fxp_methods,
288 sizeof(struct fxp_softc),
289};
290
291static devclass_t fxp_devclass;
292
293DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0);
294DRIVER_MODULE(fxp, cardbus, fxp_driver, fxp_devclass, 0, 0);
295DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
296
297static struct resource_spec fxp_res_spec_mem[] = {
298 { SYS_RES_MEMORY, FXP_PCI_MMBA, RF_ACTIVE },
299 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
300 { -1, 0 }
301};
302
303static struct resource_spec fxp_res_spec_io[] = {
304 { SYS_RES_IOPORT, FXP_PCI_IOBA, RF_ACTIVE },
305 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
306 { -1, 0 }
307};
308
309/*
310 * Wait for the previous command to be accepted (but not necessarily
311 * completed).
312 */
313static void
314fxp_scb_wait(struct fxp_softc *sc)
315{
316 union {
317 uint16_t w;
318 uint8_t b[2];
319 } flowctl;
320 int i = 10000;
321
322 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
323 DELAY(2);
324 if (i == 0) {
325 flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL);
326 flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL + 1);
327 device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
328 CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
329 CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
330 CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w);
331 }
332}
333
334static void
335fxp_scb_cmd(struct fxp_softc *sc, int cmd)
336{
337
338 if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
339 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
340 fxp_scb_wait(sc);
341 }
342 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
343}
344
345static void
346fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status,
347 bus_dma_tag_t dmat, bus_dmamap_t map)
348{
349 int i = 10000;
350
351 bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
352 while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) {
353 DELAY(2);
354 bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD);
355 }
356 if (i == 0)
357 device_printf(sc->dev, "DMA timeout\n");
358}
359
360/*
361 * Return identification string if this device is ours.
362 */
363static int
364fxp_probe(device_t dev)
365{
366 uint16_t devid;
367 uint8_t revid;
368 struct fxp_ident *ident;
369
370 if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
371 devid = pci_get_device(dev);
372 revid = pci_get_revid(dev);
373 for (ident = fxp_ident_table; ident->name != NULL; ident++) {
374 if (ident->devid == devid &&
375 (ident->revid == revid || ident->revid == -1)) {
376 device_set_desc(dev, ident->name);
377 return (BUS_PROBE_DEFAULT);
378 }
379 }
380 }
381 return (ENXIO);
382}
383
384static void
385fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
386{
387 uint32_t *addr;
388
389 if (error)
390 return;
391
392 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
393 addr = arg;
394 *addr = segs->ds_addr;
395}
396
397static int
398fxp_attach(device_t dev)
399{
400 struct fxp_softc *sc;
401 struct fxp_cb_tx *tcbp;
402 struct fxp_tx *txp;
403 struct fxp_rx *rxp;
404 struct ifnet *ifp;
405 uint32_t val;
406 uint16_t data, myea[ETHER_ADDR_LEN / 2];
407 u_char eaddr[ETHER_ADDR_LEN];
408 int i, prefer_iomap;
409 int error;
410
411 error = 0;
412 sc = device_get_softc(dev);
413 sc->dev = dev;
414 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
415 MTX_DEF);
416 callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0);
417 ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
418 fxp_serial_ifmedia_sts);
419
420 ifp = sc->ifp = if_alloc(IFT_ETHER);
421 if (ifp == NULL) {
422 device_printf(dev, "can not if_alloc()\n");
423 error = ENOSPC;
424 goto fail;
425 }
426
427 /*
428 * Enable bus mastering.
429 */
430 pci_enable_busmaster(dev);
431 val = pci_read_config(dev, PCIR_COMMAND, 2);
432
433 /*
434 * Figure out which we should try first - memory mapping or i/o mapping?
435 * We default to memory mapping. Then we accept an override from the
436 * command line. Then we check to see which one is enabled.
437 */
438 prefer_iomap = 0;
439 resource_int_value(device_get_name(dev), device_get_unit(dev),
440 "prefer_iomap", &prefer_iomap);
441 if (prefer_iomap)
442 sc->fxp_spec = fxp_res_spec_io;
443 else
444 sc->fxp_spec = fxp_res_spec_mem;
445
446 error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
447 if (error) {
448 if (sc->fxp_spec == fxp_res_spec_mem)
449 sc->fxp_spec = fxp_res_spec_io;
450 else
451 sc->fxp_spec = fxp_res_spec_mem;
452 error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res);
453 }
454 if (error) {
455 device_printf(dev, "could not allocate resources\n");
456 error = ENXIO;
457 goto fail;
458 }
459
460 if (bootverbose) {
461 device_printf(dev, "using %s space register mapping\n",
462 sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O");
463 }
464
465 /*
466 * Reset to a stable state.
467 */
468 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
469 DELAY(10);
470
471 /*
472 * Find out how large of an SEEPROM we have.
473 */
474 fxp_autosize_eeprom(sc);
475
476 /*
477 * Find out the chip revision; lump all 82557 revs together.
478 */
479 fxp_read_eeprom(sc, &data, 5, 1);
480 if ((data >> 8) == 1)
481 sc->revision = FXP_REV_82557;
482 else
483 sc->revision = pci_get_revid(dev);
484
485 /*
486 * Determine whether we must use the 503 serial interface.
487 */
488 fxp_read_eeprom(sc, &data, 6, 1);
489 if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0
490 && (data & FXP_PHY_SERIAL_ONLY))
491 sc->flags |= FXP_FLAG_SERIAL_MEDIA;
492
493 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
494 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
495 OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW,
496 &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
497 "FXP driver receive interrupt microcode bundling delay");
498 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
499 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
500 OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW,
501 &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
502 "FXP driver receive interrupt microcode bundle size limit");
503 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
504 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
505 OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0,
506 "FXP RNR events");
507 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
508 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
509 OID_AUTO, "noflow", CTLFLAG_RW, &sc->tunable_noflow, 0,
510 "FXP flow control disabled");
511
512 /*
513 * Pull in device tunables.
514 */
515 sc->tunable_int_delay = TUNABLE_INT_DELAY;
516 sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
517 sc->tunable_noflow = 1;
518 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
519 "int_delay", &sc->tunable_int_delay);
520 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
521 "bundle_max", &sc->tunable_bundle_max);
522 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
523 "noflow", &sc->tunable_noflow);
524 sc->rnr = 0;
525
526 /*
527 * Enable workarounds for certain chip revision deficiencies.
528 *
529 * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
530 * some systems based a normal 82559 design, have a defect where
531 * the chip can cause a PCI protocol violation if it receives
532 * a CU_RESUME command when it is entering the IDLE state. The
533 * workaround is to disable Dynamic Standby Mode, so the chip never
534 * deasserts CLKRUN#, and always remains in an active state.
535 *
536 * See Intel 82801BA/82801BAM Specification Update, Errata #30.
537 */
538 i = pci_get_device(dev);
539 if (i == 0x2449 || (i > 0x1030 && i < 0x1039) ||
540 sc->revision >= FXP_REV_82559_A0) {
541 fxp_read_eeprom(sc, &data, 10, 1);
542 if (data & 0x02) { /* STB enable */
543 uint16_t cksum;
544 int i;
545
546 device_printf(dev,
547 "Disabling dynamic standby mode in EEPROM\n");
548 data &= ~0x02;
549 fxp_write_eeprom(sc, &data, 10, 1);
550 device_printf(dev, "New EEPROM ID: 0x%x\n", data);
551 cksum = 0;
552 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
553 fxp_read_eeprom(sc, &data, i, 1);
554 cksum += data;
555 }
556 i = (1 << sc->eeprom_size) - 1;
557 cksum = 0xBABA - cksum;
558 fxp_read_eeprom(sc, &data, i, 1);
559 fxp_write_eeprom(sc, &cksum, i, 1);
560 device_printf(dev,
561 "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
562 i, data, cksum);
563#if 1
564 /*
565 * If the user elects to continue, try the software
566 * workaround, as it is better than nothing.
567 */
568 sc->flags |= FXP_FLAG_CU_RESUME_BUG;
569#endif
570 }
571 }
572
573 /*
574 * If we are not a 82557 chip, we can enable extended features.
575 */
576 if (sc->revision != FXP_REV_82557) {
577 /*
578 * If MWI is enabled in the PCI configuration, and there
579 * is a valid cacheline size (8 or 16 dwords), then tell
580 * the board to turn on MWI.
581 */
582 if (val & PCIM_CMD_MWRICEN &&
583 pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
584 sc->flags |= FXP_FLAG_MWI_ENABLE;
585
586 /* turn on the extended TxCB feature */
587 sc->flags |= FXP_FLAG_EXT_TXCB;
588
589 /* enable reception of long frames for VLAN */
590 sc->flags |= FXP_FLAG_LONG_PKT_EN;
591 } else {
592 /* a hack to get long VLAN frames on a 82557 */
593 sc->flags |= FXP_FLAG_SAVE_BAD;
594 }
595
596 /* For 82559 or later chips, Rx checksum offload is supported. */
597 if (sc->revision >= FXP_REV_82559_A0)
598 sc->flags |= FXP_FLAG_82559_RXCSUM;
599 /*
600 * Enable use of extended RFDs and TCBs for 82550
601 * and later chips. Note: we need extended TXCB support
602 * too, but that's already enabled by the code above.
603 * Be careful to do this only on the right devices.
604 */
605 if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C ||
606 sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F
607 || sc->revision == FXP_REV_82551_10) {
608 sc->rfa_size = sizeof (struct fxp_rfa);
609 sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
610 sc->flags |= FXP_FLAG_EXT_RFA;
611 /* Use extended RFA instead of 82559 checksum mode. */
612 sc->flags &= ~FXP_FLAG_82559_RXCSUM;
613 } else {
614 sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
615 sc->tx_cmd = FXP_CB_COMMAND_XMIT;
616 }
617
618 /*
619 * Allocate DMA tags and DMA safe memory.
620 */
621 sc->maxtxseg = FXP_NTXSEG;
622 if (sc->flags & FXP_FLAG_EXT_RFA)
622 sc->maxsegsize = MCLBYTES;
623 if (sc->flags & FXP_FLAG_EXT_RFA) {
623 sc->maxtxseg--;
624 sc->maxtxseg--;
625 sc->maxsegsize = FXP_TSO_SEGSIZE;
626 }
624 error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
625 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
627 error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0,
628 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
626 MCLBYTES * sc->maxtxseg, sc->maxtxseg, MCLBYTES, 0,
629 sc->maxsegsize * sc->maxtxseg + sizeof(struct ether_vlan_header),
630 sc->maxtxseg, sc->maxsegsize, 0,
627 busdma_lock_mutex, &Giant, &sc->fxp_mtag);
628 if (error) {
629 device_printf(dev, "could not allocate dma tag\n");
630 goto fail;
631 }
632
633 error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
634 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
635 sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0,
636 busdma_lock_mutex, &Giant, &sc->fxp_stag);
637 if (error) {
638 device_printf(dev, "could not allocate dma tag\n");
639 goto fail;
640 }
641
642 error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
643 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap);
644 if (error)
645 goto fail;
646 error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
647 sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0);
648 if (error) {
649 device_printf(dev, "could not map the stats buffer\n");
650 goto fail;
651 }
652
653 error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
654 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
655 FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0,
656 busdma_lock_mutex, &Giant, &sc->cbl_tag);
657 if (error) {
658 device_printf(dev, "could not allocate dma tag\n");
659 goto fail;
660 }
661
662 error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
663 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map);
664 if (error)
665 goto fail;
666
667 error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
668 sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
669 &sc->fxp_desc.cbl_addr, 0);
670 if (error) {
671 device_printf(dev, "could not map DMA memory\n");
672 goto fail;
673 }
674
675 error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
676 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
677 sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0,
678 busdma_lock_mutex, &Giant, &sc->mcs_tag);
679 if (error) {
680 device_printf(dev, "could not allocate dma tag\n");
681 goto fail;
682 }
683
684 error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
685 BUS_DMA_NOWAIT, &sc->mcs_map);
686 if (error)
687 goto fail;
688 error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
689 sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0);
690 if (error) {
691 device_printf(dev, "can't map the multicast setup command\n");
692 goto fail;
693 }
694
695 /*
696 * Pre-allocate the TX DMA maps and setup the pointers to
697 * the TX command blocks.
698 */
699 txp = sc->fxp_desc.tx_list;
700 tcbp = sc->fxp_desc.cbl_list;
701 for (i = 0; i < FXP_NTXCB; i++) {
702 txp[i].tx_cb = tcbp + i;
703 error = bus_dmamap_create(sc->fxp_mtag, 0, &txp[i].tx_map);
704 if (error) {
705 device_printf(dev, "can't create DMA map for TX\n");
706 goto fail;
707 }
708 }
709 error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map);
710 if (error) {
711 device_printf(dev, "can't create spare DMA map\n");
712 goto fail;
713 }
714
715 /*
716 * Pre-allocate our receive buffers.
717 */
718 sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
719 for (i = 0; i < FXP_NRFABUFS; i++) {
720 rxp = &sc->fxp_desc.rx_list[i];
721 error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map);
722 if (error) {
723 device_printf(dev, "can't create DMA map for RX\n");
724 goto fail;
725 }
726 if (fxp_new_rfabuf(sc, rxp) != 0) {
727 error = ENOMEM;
728 goto fail;
729 }
730 fxp_add_rfabuf(sc, rxp);
731 }
732
733 /*
734 * Read MAC address.
735 */
736 fxp_read_eeprom(sc, myea, 0, 3);
737 eaddr[0] = myea[0] & 0xff;
738 eaddr[1] = myea[0] >> 8;
739 eaddr[2] = myea[1] & 0xff;
740 eaddr[3] = myea[1] >> 8;
741 eaddr[4] = myea[2] & 0xff;
742 eaddr[5] = myea[2] >> 8;
743 if (bootverbose) {
744 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
745 pci_get_vendor(dev), pci_get_device(dev),
746 pci_get_subvendor(dev), pci_get_subdevice(dev),
747 pci_get_revid(dev));
748 fxp_read_eeprom(sc, &data, 10, 1);
749 device_printf(dev, "Dynamic Standby mode is %s\n",
750 data & 0x02 ? "enabled" : "disabled");
751 }
752
753 /*
754 * If this is only a 10Mbps device, then there is no MII, and
755 * the PHY will use a serial interface instead.
756 *
757 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
758 * doesn't have a programming interface of any sort. The
759 * media is sensed automatically based on how the link partner
760 * is configured. This is, in essence, manual configuration.
761 */
762 if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
763 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
764 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
765 } else {
766 if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd,
767 fxp_ifmedia_sts)) {
768 device_printf(dev, "MII without any PHY!\n");
769 error = ENXIO;
770 goto fail;
771 }
772 }
773
774 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
775 ifp->if_init = fxp_init;
776 ifp->if_softc = sc;
777 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
778 ifp->if_ioctl = fxp_ioctl;
779 ifp->if_start = fxp_start;
780
781 ifp->if_capabilities = ifp->if_capenable = 0;
782
631 busdma_lock_mutex, &Giant, &sc->fxp_mtag);
632 if (error) {
633 device_printf(dev, "could not allocate dma tag\n");
634 goto fail;
635 }
636
637 error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
638 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
639 sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0,
640 busdma_lock_mutex, &Giant, &sc->fxp_stag);
641 if (error) {
642 device_printf(dev, "could not allocate dma tag\n");
643 goto fail;
644 }
645
646 error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
647 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap);
648 if (error)
649 goto fail;
650 error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
651 sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0);
652 if (error) {
653 device_printf(dev, "could not map the stats buffer\n");
654 goto fail;
655 }
656
657 error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
658 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
659 FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0,
660 busdma_lock_mutex, &Giant, &sc->cbl_tag);
661 if (error) {
662 device_printf(dev, "could not allocate dma tag\n");
663 goto fail;
664 }
665
666 error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
667 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map);
668 if (error)
669 goto fail;
670
671 error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
672 sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
673 &sc->fxp_desc.cbl_addr, 0);
674 if (error) {
675 device_printf(dev, "could not map DMA memory\n");
676 goto fail;
677 }
678
679 error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0,
680 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
681 sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0,
682 busdma_lock_mutex, &Giant, &sc->mcs_tag);
683 if (error) {
684 device_printf(dev, "could not allocate dma tag\n");
685 goto fail;
686 }
687
688 error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
689 BUS_DMA_NOWAIT, &sc->mcs_map);
690 if (error)
691 goto fail;
692 error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
693 sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0);
694 if (error) {
695 device_printf(dev, "can't map the multicast setup command\n");
696 goto fail;
697 }
698
699 /*
700 * Pre-allocate the TX DMA maps and setup the pointers to
701 * the TX command blocks.
702 */
703 txp = sc->fxp_desc.tx_list;
704 tcbp = sc->fxp_desc.cbl_list;
705 for (i = 0; i < FXP_NTXCB; i++) {
706 txp[i].tx_cb = tcbp + i;
707 error = bus_dmamap_create(sc->fxp_mtag, 0, &txp[i].tx_map);
708 if (error) {
709 device_printf(dev, "can't create DMA map for TX\n");
710 goto fail;
711 }
712 }
713 error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map);
714 if (error) {
715 device_printf(dev, "can't create spare DMA map\n");
716 goto fail;
717 }
718
719 /*
720 * Pre-allocate our receive buffers.
721 */
722 sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
723 for (i = 0; i < FXP_NRFABUFS; i++) {
724 rxp = &sc->fxp_desc.rx_list[i];
725 error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map);
726 if (error) {
727 device_printf(dev, "can't create DMA map for RX\n");
728 goto fail;
729 }
730 if (fxp_new_rfabuf(sc, rxp) != 0) {
731 error = ENOMEM;
732 goto fail;
733 }
734 fxp_add_rfabuf(sc, rxp);
735 }
736
737 /*
738 * Read MAC address.
739 */
740 fxp_read_eeprom(sc, myea, 0, 3);
741 eaddr[0] = myea[0] & 0xff;
742 eaddr[1] = myea[0] >> 8;
743 eaddr[2] = myea[1] & 0xff;
744 eaddr[3] = myea[1] >> 8;
745 eaddr[4] = myea[2] & 0xff;
746 eaddr[5] = myea[2] >> 8;
747 if (bootverbose) {
748 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
749 pci_get_vendor(dev), pci_get_device(dev),
750 pci_get_subvendor(dev), pci_get_subdevice(dev),
751 pci_get_revid(dev));
752 fxp_read_eeprom(sc, &data, 10, 1);
753 device_printf(dev, "Dynamic Standby mode is %s\n",
754 data & 0x02 ? "enabled" : "disabled");
755 }
756
757 /*
758 * If this is only a 10Mbps device, then there is no MII, and
759 * the PHY will use a serial interface instead.
760 *
761 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
762 * doesn't have a programming interface of any sort. The
763 * media is sensed automatically based on how the link partner
764 * is configured. This is, in essence, manual configuration.
765 */
766 if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
767 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
768 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
769 } else {
770 if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd,
771 fxp_ifmedia_sts)) {
772 device_printf(dev, "MII without any PHY!\n");
773 error = ENXIO;
774 goto fail;
775 }
776 }
777
778 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
779 ifp->if_init = fxp_init;
780 ifp->if_softc = sc;
781 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
782 ifp->if_ioctl = fxp_ioctl;
783 ifp->if_start = fxp_start;
784
785 ifp->if_capabilities = ifp->if_capenable = 0;
786
783 /* Enable checksum offload for 82550 or better chips */
787 /* Enable checksum offload/TSO for 82550 or better chips */
784 if (sc->flags & FXP_FLAG_EXT_RFA) {
788 if (sc->flags & FXP_FLAG_EXT_RFA) {
785 ifp->if_hwassist = FXP_CSUM_FEATURES;
786 ifp->if_capabilities |= IFCAP_HWCSUM;
787 ifp->if_capenable |= IFCAP_HWCSUM;
789 ifp->if_hwassist = FXP_CSUM_FEATURES | CSUM_TSO;
790 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4;
791 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_TSO4;
788 }
789
790 if (sc->flags & FXP_FLAG_82559_RXCSUM) {
791 ifp->if_capabilities |= IFCAP_RXCSUM;
792 ifp->if_capenable |= IFCAP_RXCSUM;
793 }
794
795#ifdef DEVICE_POLLING
796 /* Inform the world we support polling. */
797 ifp->if_capabilities |= IFCAP_POLLING;
798#endif
799
800 /*
801 * Attach the interface.
802 */
803 ether_ifattach(ifp, eaddr);
804
805 /*
806 * Tell the upper layer(s) we support long frames.
807 * Must appear after the call to ether_ifattach() because
808 * ether_ifattach() sets ifi_hdrlen to the default value.
809 */
810 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
811 ifp->if_capabilities |= IFCAP_VLAN_MTU;
812 ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
813
814 /*
815 * Let the system queue as many packets as we have available
816 * TX descriptors.
817 */
818 IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1);
819 ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1;
820 IFQ_SET_READY(&ifp->if_snd);
821
822 /*
823 * Hook our interrupt after all initialization is complete.
824 */
825 error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE,
826 NULL, fxp_intr, sc, &sc->ih);
827 if (error) {
828 device_printf(dev, "could not setup irq\n");
829 ether_ifdetach(sc->ifp);
830 goto fail;
831 }
832
833fail:
834 if (error)
835 fxp_release(sc);
836 return (error);
837}
838
839/*
840 * Release all resources. The softc lock should not be held and the
841 * interrupt should already be torn down.
842 */
843static void
844fxp_release(struct fxp_softc *sc)
845{
846 struct fxp_rx *rxp;
847 struct fxp_tx *txp;
848 int i;
849
850 FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
851 KASSERT(sc->ih == NULL,
852 ("fxp_release() called with intr handle still active"));
853 if (sc->miibus)
854 device_delete_child(sc->dev, sc->miibus);
855 bus_generic_detach(sc->dev);
856 ifmedia_removeall(&sc->sc_media);
857 if (sc->fxp_desc.cbl_list) {
858 bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
859 bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
860 sc->cbl_map);
861 }
862 if (sc->fxp_stats) {
863 bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
864 bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
865 }
866 if (sc->mcsp) {
867 bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
868 bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
869 }
870 bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res);
871 if (sc->fxp_mtag) {
872 for (i = 0; i < FXP_NRFABUFS; i++) {
873 rxp = &sc->fxp_desc.rx_list[i];
874 if (rxp->rx_mbuf != NULL) {
875 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
876 BUS_DMASYNC_POSTREAD);
877 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
878 m_freem(rxp->rx_mbuf);
879 }
880 bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
881 }
882 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
883 for (i = 0; i < FXP_NTXCB; i++) {
884 txp = &sc->fxp_desc.tx_list[i];
885 if (txp->tx_mbuf != NULL) {
886 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
887 BUS_DMASYNC_POSTWRITE);
888 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
889 m_freem(txp->tx_mbuf);
890 }
891 bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
892 }
893 bus_dma_tag_destroy(sc->fxp_mtag);
894 }
895 if (sc->fxp_stag)
896 bus_dma_tag_destroy(sc->fxp_stag);
897 if (sc->cbl_tag)
898 bus_dma_tag_destroy(sc->cbl_tag);
899 if (sc->mcs_tag)
900 bus_dma_tag_destroy(sc->mcs_tag);
901 if (sc->ifp)
902 if_free(sc->ifp);
903
904 mtx_destroy(&sc->sc_mtx);
905}
906
907/*
908 * Detach interface.
909 */
910static int
911fxp_detach(device_t dev)
912{
913 struct fxp_softc *sc = device_get_softc(dev);
914
915#ifdef DEVICE_POLLING
916 if (sc->ifp->if_capenable & IFCAP_POLLING)
917 ether_poll_deregister(sc->ifp);
918#endif
919
920 FXP_LOCK(sc);
921 sc->suspended = 1; /* Do same thing as we do for suspend */
922 /*
923 * Stop DMA and drop transmit queue, but disable interrupts first.
924 */
925 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
926 fxp_stop(sc);
927 FXP_UNLOCK(sc);
928 callout_drain(&sc->stat_ch);
929
930 /*
931 * Close down routes etc.
932 */
933 ether_ifdetach(sc->ifp);
934
935 /*
936 * Unhook interrupt before dropping lock. This is to prevent
937 * races with fxp_intr().
938 */
939 bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih);
940 sc->ih = NULL;
941
942 /* Release our allocated resources. */
943 fxp_release(sc);
944 return (0);
945}
946
947/*
948 * Device shutdown routine. Called at system shutdown after sync. The
949 * main purpose of this routine is to shut off receiver DMA so that
950 * kernel memory doesn't get clobbered during warmboot.
951 */
952static int
953fxp_shutdown(device_t dev)
954{
955 struct fxp_softc *sc = device_get_softc(dev);
956
957 /*
958 * Make sure that DMA is disabled prior to reboot. Not doing
959 * do could allow DMA to corrupt kernel memory during the
960 * reboot before the driver initializes.
961 */
962 FXP_LOCK(sc);
963 fxp_stop(sc);
964 FXP_UNLOCK(sc);
965 return (0);
966}
967
968/*
969 * Device suspend routine. Stop the interface and save some PCI
970 * settings in case the BIOS doesn't restore them properly on
971 * resume.
972 */
973static int
974fxp_suspend(device_t dev)
975{
976 struct fxp_softc *sc = device_get_softc(dev);
977
978 FXP_LOCK(sc);
979
980 fxp_stop(sc);
981
982 sc->suspended = 1;
983
984 FXP_UNLOCK(sc);
985 return (0);
986}
987
988/*
989 * Device resume routine. re-enable busmastering, and restart the interface if
990 * appropriate.
991 */
992static int
993fxp_resume(device_t dev)
994{
995 struct fxp_softc *sc = device_get_softc(dev);
996 struct ifnet *ifp = sc->ifp;
997
998 FXP_LOCK(sc);
999
1000 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
1001 DELAY(10);
1002
1003 /* reinitialize interface if necessary */
1004 if (ifp->if_flags & IFF_UP)
1005 fxp_init_body(sc);
1006
1007 sc->suspended = 0;
1008
1009 FXP_UNLOCK(sc);
1010 return (0);
1011}
1012
1013static void
1014fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
1015{
1016 uint16_t reg;
1017 int x;
1018
1019 /*
1020 * Shift in data.
1021 */
1022 for (x = 1 << (length - 1); x; x >>= 1) {
1023 if (data & x)
1024 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1025 else
1026 reg = FXP_EEPROM_EECS;
1027 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1028 DELAY(1);
1029 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1030 DELAY(1);
1031 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1032 DELAY(1);
1033 }
1034}
1035
1036/*
1037 * Read from the serial EEPROM. Basically, you manually shift in
1038 * the read opcode (one bit at a time) and then shift in the address,
1039 * and then you shift out the data (all of this one bit at a time).
1040 * The word size is 16 bits, so you have to provide the address for
1041 * every 16 bits of data.
1042 */
1043static uint16_t
1044fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
1045{
1046 uint16_t reg, data;
1047 int x;
1048
1049 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1050 /*
1051 * Shift in read opcode.
1052 */
1053 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
1054 /*
1055 * Shift in address.
1056 */
1057 data = 0;
1058 for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
1059 if (offset & x)
1060 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1061 else
1062 reg = FXP_EEPROM_EECS;
1063 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1064 DELAY(1);
1065 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1066 DELAY(1);
1067 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1068 DELAY(1);
1069 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
1070 data++;
1071 if (autosize && reg == 0) {
1072 sc->eeprom_size = data;
1073 break;
1074 }
1075 }
1076 /*
1077 * Shift out data.
1078 */
1079 data = 0;
1080 reg = FXP_EEPROM_EECS;
1081 for (x = 1 << 15; x; x >>= 1) {
1082 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1083 DELAY(1);
1084 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1085 data |= x;
1086 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1087 DELAY(1);
1088 }
1089 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1090 DELAY(1);
1091
1092 return (data);
1093}
1094
1095static void
1096fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data)
1097{
1098 int i;
1099
1100 /*
1101 * Erase/write enable.
1102 */
1103 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1104 fxp_eeprom_shiftin(sc, 0x4, 3);
1105 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
1106 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1107 DELAY(1);
1108 /*
1109 * Shift in write opcode, address, data.
1110 */
1111 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1112 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
1113 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
1114 fxp_eeprom_shiftin(sc, data, 16);
1115 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1116 DELAY(1);
1117 /*
1118 * Wait for EEPROM to finish up.
1119 */
1120 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1121 DELAY(1);
1122 for (i = 0; i < 1000; i++) {
1123 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1124 break;
1125 DELAY(50);
1126 }
1127 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1128 DELAY(1);
1129 /*
1130 * Erase/write disable.
1131 */
1132 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1133 fxp_eeprom_shiftin(sc, 0x4, 3);
1134 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
1135 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1136 DELAY(1);
1137}
1138
1139/*
1140 * From NetBSD:
1141 *
1142 * Figure out EEPROM size.
1143 *
1144 * 559's can have either 64-word or 256-word EEPROMs, the 558
1145 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
1146 * talks about the existance of 16 to 256 word EEPROMs.
1147 *
1148 * The only known sizes are 64 and 256, where the 256 version is used
1149 * by CardBus cards to store CIS information.
1150 *
1151 * The address is shifted in msb-to-lsb, and after the last
1152 * address-bit the EEPROM is supposed to output a `dummy zero' bit,
1153 * after which follows the actual data. We try to detect this zero, by
1154 * probing the data-out bit in the EEPROM control register just after
1155 * having shifted in a bit. If the bit is zero, we assume we've
1156 * shifted enough address bits. The data-out should be tri-state,
1157 * before this, which should translate to a logical one.
1158 */
1159static void
1160fxp_autosize_eeprom(struct fxp_softc *sc)
1161{
1162
1163 /* guess maximum size of 256 words */
1164 sc->eeprom_size = 8;
1165
1166 /* autosize */
1167 (void) fxp_eeprom_getword(sc, 0, 1);
1168}
1169
1170static void
1171fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1172{
1173 int i;
1174
1175 for (i = 0; i < words; i++)
1176 data[i] = fxp_eeprom_getword(sc, offset + i, 0);
1177}
1178
1179static void
1180fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1181{
1182 int i;
1183
1184 for (i = 0; i < words; i++)
1185 fxp_eeprom_putword(sc, offset + i, data[i]);
1186}
1187
1188/*
1189 * Grab the softc lock and call the real fxp_start_body() routine
1190 */
1191static void
1192fxp_start(struct ifnet *ifp)
1193{
1194 struct fxp_softc *sc = ifp->if_softc;
1195
1196 FXP_LOCK(sc);
1197 fxp_start_body(ifp);
1198 FXP_UNLOCK(sc);
1199}
1200
1201/*
1202 * Start packet transmission on the interface.
1203 * This routine must be called with the softc lock held, and is an
1204 * internal entry point only.
1205 */
1206static void
1207fxp_start_body(struct ifnet *ifp)
1208{
1209 struct fxp_softc *sc = ifp->if_softc;
1210 struct mbuf *mb_head;
1211 int txqueued;
1212
1213 FXP_LOCK_ASSERT(sc, MA_OWNED);
1214
1215 /*
1216 * See if we need to suspend xmit until the multicast filter
1217 * has been reprogrammed (which can only be done at the head
1218 * of the command chain).
1219 */
1220 if (sc->need_mcsetup)
1221 return;
1222
1223 if (sc->tx_queued > FXP_NTXCB_HIWAT)
1224 fxp_txeof(sc);
1225 /*
1226 * We're finished if there is nothing more to add to the list or if
1227 * we're all filled up with buffers to transmit.
1228 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
1229 * a NOP command when needed.
1230 */
1231 txqueued = 0;
1232 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1233 sc->tx_queued < FXP_NTXCB - 1) {
1234
1235 /*
1236 * Grab a packet to transmit.
1237 */
1238 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
1239 if (mb_head == NULL)
1240 break;
1241
1242 if (fxp_encap(sc, &mb_head)) {
1243 if (mb_head == NULL)
1244 break;
1245 IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
1246 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1247 }
1248 txqueued++;
1249 /*
1250 * Pass packet to bpf if there is a listener.
1251 */
1252 BPF_MTAP(ifp, mb_head);
1253 }
1254
1255 /*
1256 * We're finished. If we added to the list, issue a RESUME to get DMA
1257 * going again if suspended.
1258 */
1259 if (txqueued > 0) {
1260 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1261 fxp_scb_wait(sc);
1262 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
1263 /*
1264 * Set a 5 second timer just in case we don't hear
1265 * from the card again.
1266 */
1267 sc->watchdog_timer = 5;
1268 }
1269}
1270
1271static int
1272fxp_encap(struct fxp_softc *sc, struct mbuf **m_head)
1273{
1274 struct ifnet *ifp;
1275 struct mbuf *m;
1276 struct fxp_tx *txp;
1277 struct fxp_cb_tx *cbp;
792 }
793
794 if (sc->flags & FXP_FLAG_82559_RXCSUM) {
795 ifp->if_capabilities |= IFCAP_RXCSUM;
796 ifp->if_capenable |= IFCAP_RXCSUM;
797 }
798
799#ifdef DEVICE_POLLING
800 /* Inform the world we support polling. */
801 ifp->if_capabilities |= IFCAP_POLLING;
802#endif
803
804 /*
805 * Attach the interface.
806 */
807 ether_ifattach(ifp, eaddr);
808
809 /*
810 * Tell the upper layer(s) we support long frames.
811 * Must appear after the call to ether_ifattach() because
812 * ether_ifattach() sets ifi_hdrlen to the default value.
813 */
814 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
815 ifp->if_capabilities |= IFCAP_VLAN_MTU;
816 ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */
817
818 /*
819 * Let the system queue as many packets as we have available
820 * TX descriptors.
821 */
822 IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1);
823 ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1;
824 IFQ_SET_READY(&ifp->if_snd);
825
826 /*
827 * Hook our interrupt after all initialization is complete.
828 */
829 error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE,
830 NULL, fxp_intr, sc, &sc->ih);
831 if (error) {
832 device_printf(dev, "could not setup irq\n");
833 ether_ifdetach(sc->ifp);
834 goto fail;
835 }
836
837fail:
838 if (error)
839 fxp_release(sc);
840 return (error);
841}
842
843/*
844 * Release all resources. The softc lock should not be held and the
845 * interrupt should already be torn down.
846 */
847static void
848fxp_release(struct fxp_softc *sc)
849{
850 struct fxp_rx *rxp;
851 struct fxp_tx *txp;
852 int i;
853
854 FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
855 KASSERT(sc->ih == NULL,
856 ("fxp_release() called with intr handle still active"));
857 if (sc->miibus)
858 device_delete_child(sc->dev, sc->miibus);
859 bus_generic_detach(sc->dev);
860 ifmedia_removeall(&sc->sc_media);
861 if (sc->fxp_desc.cbl_list) {
862 bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
863 bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
864 sc->cbl_map);
865 }
866 if (sc->fxp_stats) {
867 bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
868 bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
869 }
870 if (sc->mcsp) {
871 bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
872 bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
873 }
874 bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res);
875 if (sc->fxp_mtag) {
876 for (i = 0; i < FXP_NRFABUFS; i++) {
877 rxp = &sc->fxp_desc.rx_list[i];
878 if (rxp->rx_mbuf != NULL) {
879 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
880 BUS_DMASYNC_POSTREAD);
881 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
882 m_freem(rxp->rx_mbuf);
883 }
884 bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
885 }
886 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
887 for (i = 0; i < FXP_NTXCB; i++) {
888 txp = &sc->fxp_desc.tx_list[i];
889 if (txp->tx_mbuf != NULL) {
890 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
891 BUS_DMASYNC_POSTWRITE);
892 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
893 m_freem(txp->tx_mbuf);
894 }
895 bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
896 }
897 bus_dma_tag_destroy(sc->fxp_mtag);
898 }
899 if (sc->fxp_stag)
900 bus_dma_tag_destroy(sc->fxp_stag);
901 if (sc->cbl_tag)
902 bus_dma_tag_destroy(sc->cbl_tag);
903 if (sc->mcs_tag)
904 bus_dma_tag_destroy(sc->mcs_tag);
905 if (sc->ifp)
906 if_free(sc->ifp);
907
908 mtx_destroy(&sc->sc_mtx);
909}
910
911/*
912 * Detach interface.
913 */
914static int
915fxp_detach(device_t dev)
916{
917 struct fxp_softc *sc = device_get_softc(dev);
918
919#ifdef DEVICE_POLLING
920 if (sc->ifp->if_capenable & IFCAP_POLLING)
921 ether_poll_deregister(sc->ifp);
922#endif
923
924 FXP_LOCK(sc);
925 sc->suspended = 1; /* Do same thing as we do for suspend */
926 /*
927 * Stop DMA and drop transmit queue, but disable interrupts first.
928 */
929 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
930 fxp_stop(sc);
931 FXP_UNLOCK(sc);
932 callout_drain(&sc->stat_ch);
933
934 /*
935 * Close down routes etc.
936 */
937 ether_ifdetach(sc->ifp);
938
939 /*
940 * Unhook interrupt before dropping lock. This is to prevent
941 * races with fxp_intr().
942 */
943 bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih);
944 sc->ih = NULL;
945
946 /* Release our allocated resources. */
947 fxp_release(sc);
948 return (0);
949}
950
951/*
952 * Device shutdown routine. Called at system shutdown after sync. The
953 * main purpose of this routine is to shut off receiver DMA so that
954 * kernel memory doesn't get clobbered during warmboot.
955 */
956static int
957fxp_shutdown(device_t dev)
958{
959 struct fxp_softc *sc = device_get_softc(dev);
960
961 /*
962 * Make sure that DMA is disabled prior to reboot. Not doing
963 * do could allow DMA to corrupt kernel memory during the
964 * reboot before the driver initializes.
965 */
966 FXP_LOCK(sc);
967 fxp_stop(sc);
968 FXP_UNLOCK(sc);
969 return (0);
970}
971
972/*
973 * Device suspend routine. Stop the interface and save some PCI
974 * settings in case the BIOS doesn't restore them properly on
975 * resume.
976 */
977static int
978fxp_suspend(device_t dev)
979{
980 struct fxp_softc *sc = device_get_softc(dev);
981
982 FXP_LOCK(sc);
983
984 fxp_stop(sc);
985
986 sc->suspended = 1;
987
988 FXP_UNLOCK(sc);
989 return (0);
990}
991
992/*
993 * Device resume routine. re-enable busmastering, and restart the interface if
994 * appropriate.
995 */
996static int
997fxp_resume(device_t dev)
998{
999 struct fxp_softc *sc = device_get_softc(dev);
1000 struct ifnet *ifp = sc->ifp;
1001
1002 FXP_LOCK(sc);
1003
1004 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
1005 DELAY(10);
1006
1007 /* reinitialize interface if necessary */
1008 if (ifp->if_flags & IFF_UP)
1009 fxp_init_body(sc);
1010
1011 sc->suspended = 0;
1012
1013 FXP_UNLOCK(sc);
1014 return (0);
1015}
1016
1017static void
1018fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
1019{
1020 uint16_t reg;
1021 int x;
1022
1023 /*
1024 * Shift in data.
1025 */
1026 for (x = 1 << (length - 1); x; x >>= 1) {
1027 if (data & x)
1028 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1029 else
1030 reg = FXP_EEPROM_EECS;
1031 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1032 DELAY(1);
1033 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1034 DELAY(1);
1035 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1036 DELAY(1);
1037 }
1038}
1039
1040/*
1041 * Read from the serial EEPROM. Basically, you manually shift in
1042 * the read opcode (one bit at a time) and then shift in the address,
1043 * and then you shift out the data (all of this one bit at a time).
1044 * The word size is 16 bits, so you have to provide the address for
1045 * every 16 bits of data.
1046 */
1047static uint16_t
1048fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
1049{
1050 uint16_t reg, data;
1051 int x;
1052
1053 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1054 /*
1055 * Shift in read opcode.
1056 */
1057 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
1058 /*
1059 * Shift in address.
1060 */
1061 data = 0;
1062 for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
1063 if (offset & x)
1064 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1065 else
1066 reg = FXP_EEPROM_EECS;
1067 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1068 DELAY(1);
1069 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1070 DELAY(1);
1071 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1072 DELAY(1);
1073 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
1074 data++;
1075 if (autosize && reg == 0) {
1076 sc->eeprom_size = data;
1077 break;
1078 }
1079 }
1080 /*
1081 * Shift out data.
1082 */
1083 data = 0;
1084 reg = FXP_EEPROM_EECS;
1085 for (x = 1 << 15; x; x >>= 1) {
1086 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1087 DELAY(1);
1088 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1089 data |= x;
1090 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1091 DELAY(1);
1092 }
1093 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1094 DELAY(1);
1095
1096 return (data);
1097}
1098
1099static void
1100fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data)
1101{
1102 int i;
1103
1104 /*
1105 * Erase/write enable.
1106 */
1107 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1108 fxp_eeprom_shiftin(sc, 0x4, 3);
1109 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
1110 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1111 DELAY(1);
1112 /*
1113 * Shift in write opcode, address, data.
1114 */
1115 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1116 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
1117 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
1118 fxp_eeprom_shiftin(sc, data, 16);
1119 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1120 DELAY(1);
1121 /*
1122 * Wait for EEPROM to finish up.
1123 */
1124 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1125 DELAY(1);
1126 for (i = 0; i < 1000; i++) {
1127 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1128 break;
1129 DELAY(50);
1130 }
1131 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1132 DELAY(1);
1133 /*
1134 * Erase/write disable.
1135 */
1136 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1137 fxp_eeprom_shiftin(sc, 0x4, 3);
1138 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
1139 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1140 DELAY(1);
1141}
1142
1143/*
1144 * From NetBSD:
1145 *
1146 * Figure out EEPROM size.
1147 *
1148 * 559's can have either 64-word or 256-word EEPROMs, the 558
1149 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
1150 * talks about the existance of 16 to 256 word EEPROMs.
1151 *
1152 * The only known sizes are 64 and 256, where the 256 version is used
1153 * by CardBus cards to store CIS information.
1154 *
1155 * The address is shifted in msb-to-lsb, and after the last
1156 * address-bit the EEPROM is supposed to output a `dummy zero' bit,
1157 * after which follows the actual data. We try to detect this zero, by
1158 * probing the data-out bit in the EEPROM control register just after
1159 * having shifted in a bit. If the bit is zero, we assume we've
1160 * shifted enough address bits. The data-out should be tri-state,
1161 * before this, which should translate to a logical one.
1162 */
1163static void
1164fxp_autosize_eeprom(struct fxp_softc *sc)
1165{
1166
1167 /* guess maximum size of 256 words */
1168 sc->eeprom_size = 8;
1169
1170 /* autosize */
1171 (void) fxp_eeprom_getword(sc, 0, 1);
1172}
1173
1174static void
1175fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1176{
1177 int i;
1178
1179 for (i = 0; i < words; i++)
1180 data[i] = fxp_eeprom_getword(sc, offset + i, 0);
1181}
1182
1183static void
1184fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1185{
1186 int i;
1187
1188 for (i = 0; i < words; i++)
1189 fxp_eeprom_putword(sc, offset + i, data[i]);
1190}
1191
1192/*
1193 * Grab the softc lock and call the real fxp_start_body() routine
1194 */
1195static void
1196fxp_start(struct ifnet *ifp)
1197{
1198 struct fxp_softc *sc = ifp->if_softc;
1199
1200 FXP_LOCK(sc);
1201 fxp_start_body(ifp);
1202 FXP_UNLOCK(sc);
1203}
1204
1205/*
1206 * Start packet transmission on the interface.
1207 * This routine must be called with the softc lock held, and is an
1208 * internal entry point only.
1209 */
1210static void
1211fxp_start_body(struct ifnet *ifp)
1212{
1213 struct fxp_softc *sc = ifp->if_softc;
1214 struct mbuf *mb_head;
1215 int txqueued;
1216
1217 FXP_LOCK_ASSERT(sc, MA_OWNED);
1218
1219 /*
1220 * See if we need to suspend xmit until the multicast filter
1221 * has been reprogrammed (which can only be done at the head
1222 * of the command chain).
1223 */
1224 if (sc->need_mcsetup)
1225 return;
1226
1227 if (sc->tx_queued > FXP_NTXCB_HIWAT)
1228 fxp_txeof(sc);
1229 /*
1230 * We're finished if there is nothing more to add to the list or if
1231 * we're all filled up with buffers to transmit.
1232 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
1233 * a NOP command when needed.
1234 */
1235 txqueued = 0;
1236 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) &&
1237 sc->tx_queued < FXP_NTXCB - 1) {
1238
1239 /*
1240 * Grab a packet to transmit.
1241 */
1242 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head);
1243 if (mb_head == NULL)
1244 break;
1245
1246 if (fxp_encap(sc, &mb_head)) {
1247 if (mb_head == NULL)
1248 break;
1249 IFQ_DRV_PREPEND(&ifp->if_snd, mb_head);
1250 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1251 }
1252 txqueued++;
1253 /*
1254 * Pass packet to bpf if there is a listener.
1255 */
1256 BPF_MTAP(ifp, mb_head);
1257 }
1258
1259 /*
1260 * We're finished. If we added to the list, issue a RESUME to get DMA
1261 * going again if suspended.
1262 */
1263 if (txqueued > 0) {
1264 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1265 fxp_scb_wait(sc);
1266 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
1267 /*
1268 * Set a 5 second timer just in case we don't hear
1269 * from the card again.
1270 */
1271 sc->watchdog_timer = 5;
1272 }
1273}
1274
1275static int
1276fxp_encap(struct fxp_softc *sc, struct mbuf **m_head)
1277{
1278 struct ifnet *ifp;
1279 struct mbuf *m;
1280 struct fxp_tx *txp;
1281 struct fxp_cb_tx *cbp;
1282 struct tcphdr *tcp;
1278 bus_dma_segment_t segs[FXP_NTXSEG];
1283 bus_dma_segment_t segs[FXP_NTXSEG];
1279 int error, i, nseg;
1284 int error, i, nseg, tcp_payload;
1280
1281 FXP_LOCK_ASSERT(sc, MA_OWNED);
1282 ifp = sc->ifp;
1283
1285
1286 FXP_LOCK_ASSERT(sc, MA_OWNED);
1287 ifp = sc->ifp;
1288
1289 tcp_payload = 0;
1290 tcp = NULL;
1284 /*
1285 * Get pointer to next available tx desc.
1286 */
1287 txp = sc->fxp_desc.tx_last->tx_next;
1288
1289 /*
1290 * A note in Appendix B of the Intel 8255x 10/100 Mbps
1291 * Ethernet Controller Family Open Source Software
1292 * Developer Manual says:
1293 * Using software parsing is only allowed with legal
1294 * TCP/IP or UDP/IP packets.
1295 * ...
1296 * For all other datagrams, hardware parsing must
1297 * be used.
1298 * Software parsing appears to truncate ICMP and
1299 * fragmented UDP packets that contain one to three
1300 * bytes in the second (and final) mbuf of the packet.
1301 */
1302 if (sc->flags & FXP_FLAG_EXT_RFA)
1303 txp->tx_cb->ipcb_ip_activation_high =
1304 FXP_IPCB_HARDWAREPARSING_ENABLE;
1305
1306 m = *m_head;
1307 /*
1308 * Deal with TCP/IP checksum offload. Note that
1309 * in order for TCP checksum offload to work,
1310 * the pseudo header checksum must have already
1311 * been computed and stored in the checksum field
1312 * in the TCP header. The stack should have
1313 * already done this for us.
1314 */
1315 if (m->m_pkthdr.csum_flags & FXP_CSUM_FEATURES) {
1316 txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
1317 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1318 txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET;
1319
1320#ifdef FXP_IP_CSUM_WAR
1321 /*
1322 * XXX The 82550 chip appears to have trouble
1323 * dealing with IP header checksums in very small
1324 * datagrams, namely fragments from 1 to 3 bytes
1325 * in size. For example, say you want to transmit
1326 * a UDP packet of 1473 bytes. The packet will be
1327 * fragmented over two IP datagrams, the latter
1328 * containing only one byte of data. The 82550 will
1329 * botch the header checksum on the 1-byte fragment.
1330 * As long as the datagram contains 4 or more bytes
1331 * of data, you're ok.
1332 *
1333 * The following code attempts to work around this
1334 * problem: if the datagram is less than 38 bytes
1335 * in size (14 bytes ether header, 20 bytes IP header,
1336 * plus 4 bytes of data), we punt and compute the IP
1337 * header checksum by hand. This workaround doesn't
1338 * work very well, however, since it can be fooled
1339 * by things like VLAN tags and IP options that make
1340 * the header sizes/offsets vary.
1341 */
1342
1343 if (m->m_pkthdr.csum_flags & CSUM_IP) {
1344 if (m->m_pkthdr.len < 38) {
1345 struct ip *ip;
1346 m->m_data += ETHER_HDR_LEN;
1347 ip = mtod(m, struct ip *);
1348 ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
1349 m->m_data -= ETHER_HDR_LEN;
1350 m->m_pkthdr.csum_flags &= ~CSUM_IP;
1351 } else {
1352 txp->tx_cb->ipcb_ip_activation_high =
1353 FXP_IPCB_HARDWAREPARSING_ENABLE;
1354 txp->tx_cb->ipcb_ip_schedule |=
1355 FXP_IPCB_IP_CHECKSUM_ENABLE;
1356 }
1357 }
1358#endif
1359 }
1360
1291 /*
1292 * Get pointer to next available tx desc.
1293 */
1294 txp = sc->fxp_desc.tx_last->tx_next;
1295
1296 /*
1297 * A note in Appendix B of the Intel 8255x 10/100 Mbps
1298 * Ethernet Controller Family Open Source Software
1299 * Developer Manual says:
1300 * Using software parsing is only allowed with legal
1301 * TCP/IP or UDP/IP packets.
1302 * ...
1303 * For all other datagrams, hardware parsing must
1304 * be used.
1305 * Software parsing appears to truncate ICMP and
1306 * fragmented UDP packets that contain one to three
1307 * bytes in the second (and final) mbuf of the packet.
1308 */
1309 if (sc->flags & FXP_FLAG_EXT_RFA)
1310 txp->tx_cb->ipcb_ip_activation_high =
1311 FXP_IPCB_HARDWAREPARSING_ENABLE;
1312
1313 m = *m_head;
1314 /*
1315 * Deal with TCP/IP checksum offload. Note that
1316 * in order for TCP checksum offload to work,
1317 * the pseudo header checksum must have already
1318 * been computed and stored in the checksum field
1319 * in the TCP header. The stack should have
1320 * already done this for us.
1321 */
1322 if (m->m_pkthdr.csum_flags & FXP_CSUM_FEATURES) {
1323 txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
1324 if (m->m_pkthdr.csum_flags & CSUM_TCP)
1325 txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET;
1326
1327#ifdef FXP_IP_CSUM_WAR
1328 /*
1329 * XXX The 82550 chip appears to have trouble
1330 * dealing with IP header checksums in very small
1331 * datagrams, namely fragments from 1 to 3 bytes
1332 * in size. For example, say you want to transmit
1333 * a UDP packet of 1473 bytes. The packet will be
1334 * fragmented over two IP datagrams, the latter
1335 * containing only one byte of data. The 82550 will
1336 * botch the header checksum on the 1-byte fragment.
1337 * As long as the datagram contains 4 or more bytes
1338 * of data, you're ok.
1339 *
1340 * The following code attempts to work around this
1341 * problem: if the datagram is less than 38 bytes
1342 * in size (14 bytes ether header, 20 bytes IP header,
1343 * plus 4 bytes of data), we punt and compute the IP
1344 * header checksum by hand. This workaround doesn't
1345 * work very well, however, since it can be fooled
1346 * by things like VLAN tags and IP options that make
1347 * the header sizes/offsets vary.
1348 */
1349
1350 if (m->m_pkthdr.csum_flags & CSUM_IP) {
1351 if (m->m_pkthdr.len < 38) {
1352 struct ip *ip;
1353 m->m_data += ETHER_HDR_LEN;
1354 ip = mtod(m, struct ip *);
1355 ip->ip_sum = in_cksum(m, ip->ip_hl << 2);
1356 m->m_data -= ETHER_HDR_LEN;
1357 m->m_pkthdr.csum_flags &= ~CSUM_IP;
1358 } else {
1359 txp->tx_cb->ipcb_ip_activation_high =
1360 FXP_IPCB_HARDWAREPARSING_ENABLE;
1361 txp->tx_cb->ipcb_ip_schedule |=
1362 FXP_IPCB_IP_CHECKSUM_ENABLE;
1363 }
1364 }
1365#endif
1366 }
1367
1368 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1369 /*
1370 * 82550/82551 requires ethernet/IP/TCP headers must be
1371 * contained in the first active transmit buffer.
1372 */
1373 struct ether_header *eh;
1374 struct ip *ip;
1375 uint32_t ip_off, poff;
1376
1377 if (M_WRITABLE(*m_head) == 0) {
1378 /* Get a writable copy. */
1379 m = m_dup(*m_head, M_DONTWAIT);
1380 m_freem(*m_head);
1381 if (m == NULL) {
1382 *m_head = NULL;
1383 return (ENOBUFS);
1384 }
1385 *m_head = m;
1386 }
1387 ip_off = sizeof(struct ether_header);
1388 m = m_pullup(*m_head, ip_off);
1389 if (m == NULL) {
1390 *m_head = NULL;
1391 return (ENOBUFS);
1392 }
1393 eh = mtod(m, struct ether_header *);
1394 /* Check the existence of VLAN tag. */
1395 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1396 ip_off = sizeof(struct ether_vlan_header);
1397 m = m_pullup(m, ip_off);
1398 if (m == NULL) {
1399 *m_head = NULL;
1400 return (ENOBUFS);
1401 }
1402 }
1403 m = m_pullup(m, ip_off + sizeof(struct ip));
1404 if (m == NULL) {
1405 *m_head = NULL;
1406 return (ENOBUFS);
1407 }
1408 ip = (struct ip *)(mtod(m, char *) + ip_off);
1409 poff = ip_off + (ip->ip_hl << 2);
1410 m = m_pullup(m, poff + sizeof(struct tcphdr));
1411 if (m == NULL) {
1412 *m_head = NULL;
1413 return (ENOBUFS);
1414 }
1415 tcp = (struct tcphdr *)(mtod(m, char *) + poff);
1416 m = m_pullup(m, poff + sizeof(struct tcphdr) + tcp->th_off);
1417 if (m == NULL) {
1418 *m_head = NULL;
1419 return (ENOBUFS);
1420 }
1421
1422 /*
1423 * Since 82550/82551 doesn't modify IP length and pseudo
1424 * checksum in the first frame driver should compute it.
1425 */
1426 ip->ip_sum = 0;
1427 ip->ip_len = htons(ifp->if_mtu);
1428 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
1429 htons(IPPROTO_TCP + (tcp->th_off << 2) +
1430 m->m_pkthdr.tso_segsz));
1431 /* Compute total TCP payload. */
1432 tcp_payload = m->m_pkthdr.len - ip_off - (ip->ip_hl << 2);
1433 tcp_payload -= tcp->th_off << 2;
1434 *m_head = m;
1435 }
1436
1361 error = bus_dmamap_load_mbuf_sg(sc->fxp_mtag, txp->tx_map, *m_head,
1362 segs, &nseg, 0);
1363 if (error == EFBIG) {
1364 m = m_collapse(*m_head, M_DONTWAIT, sc->maxtxseg);
1365 if (m == NULL) {
1366 m_freem(*m_head);
1367 *m_head = NULL;
1368 return (ENOMEM);
1369 }
1370 *m_head = m;
1371 error = bus_dmamap_load_mbuf_sg(sc->fxp_mtag, txp->tx_map,
1372 *m_head, segs, &nseg, 0);
1373 if (error != 0) {
1374 m_freem(*m_head);
1375 *m_head = NULL;
1376 return (ENOMEM);
1377 }
1378 } else if (error != 0)
1379 return (error);
1380 if (nseg == 0) {
1381 m_freem(*m_head);
1382 *m_head = NULL;
1383 return (EIO);
1384 }
1385
1386 KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments"));
1387 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_PREWRITE);
1388
1389 cbp = txp->tx_cb;
1390 for (i = 0; i < nseg; i++) {
1437 error = bus_dmamap_load_mbuf_sg(sc->fxp_mtag, txp->tx_map, *m_head,
1438 segs, &nseg, 0);
1439 if (error == EFBIG) {
1440 m = m_collapse(*m_head, M_DONTWAIT, sc->maxtxseg);
1441 if (m == NULL) {
1442 m_freem(*m_head);
1443 *m_head = NULL;
1444 return (ENOMEM);
1445 }
1446 *m_head = m;
1447 error = bus_dmamap_load_mbuf_sg(sc->fxp_mtag, txp->tx_map,
1448 *m_head, segs, &nseg, 0);
1449 if (error != 0) {
1450 m_freem(*m_head);
1451 *m_head = NULL;
1452 return (ENOMEM);
1453 }
1454 } else if (error != 0)
1455 return (error);
1456 if (nseg == 0) {
1457 m_freem(*m_head);
1458 *m_head = NULL;
1459 return (EIO);
1460 }
1461
1462 KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments"));
1463 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_PREWRITE);
1464
1465 cbp = txp->tx_cb;
1466 for (i = 0; i < nseg; i++) {
1391 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
1392 /*
1393 * If this is an 82550/82551, then we're using extended
1394 * TxCBs _and_ we're using checksum offload. This means
1395 * that the TxCB is really an IPCB. One major difference
1396 * between the two is that with plain extended TxCBs,
1397 * the bottom half of the TxCB contains two entries from
1398 * the TBD array, whereas IPCBs contain just one entry:
1399 * one entry (8 bytes) has been sacrificed for the TCP/IP
1400 * checksum offload control bits. So to make things work
1401 * right, we have to start filling in the TBD array
1402 * starting from a different place depending on whether
1403 * the chip is an 82550/82551 or not.
1404 */
1405 if (sc->flags & FXP_FLAG_EXT_RFA) {
1467 /*
1468 * If this is an 82550/82551, then we're using extended
1469 * TxCBs _and_ we're using checksum offload. This means
1470 * that the TxCB is really an IPCB. One major difference
1471 * between the two is that with plain extended TxCBs,
1472 * the bottom half of the TxCB contains two entries from
1473 * the TBD array, whereas IPCBs contain just one entry:
1474 * one entry (8 bytes) has been sacrificed for the TCP/IP
1475 * checksum offload control bits. So to make things work
1476 * right, we have to start filling in the TBD array
1477 * starting from a different place depending on whether
1478 * the chip is an 82550/82551 or not.
1479 */
1480 if (sc->flags & FXP_FLAG_EXT_RFA) {
1406 cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr);
1407 cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len);
1481 cbp->tbd[i + 2].tb_addr = htole32(segs[i].ds_addr);
1482 cbp->tbd[i + 2].tb_size = htole32(segs[i].ds_len);
1408 } else {
1409 cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
1410 cbp->tbd[i].tb_size = htole32(segs[i].ds_len);
1411 }
1412 }
1483 } else {
1484 cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr);
1485 cbp->tbd[i].tb_size = htole32(segs[i].ds_len);
1486 }
1487 }
1413 cbp->tbd_number = nseg;
1488 if (sc->flags & FXP_FLAG_EXT_RFA) {
1489 /* Configure dynamic TBD for 82550/82551. */
1490 cbp->tbd_number = 0xFF;
1491 cbp->tbd[nseg + 1].tb_size |= htole32(0x8000);
1492 } else
1493 cbp->tbd_number = nseg;
1494 /* Configure TSO. */
1495 if (m->m_pkthdr.csum_flags & CSUM_TSO) {
1496 cbp->tbd[-1].tb_size = htole32(m->m_pkthdr.tso_segsz << 16);
1497 cbp->tbd[1].tb_size = htole32(tcp_payload << 16);
1498 cbp->ipcb_ip_schedule |= FXP_IPCB_LARGESEND_ENABLE |
1499 FXP_IPCB_IP_CHECKSUM_ENABLE |
1500 FXP_IPCB_TCP_PACKET |
1501 FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
1502 }
1414
1415 txp->tx_mbuf = m;
1416 txp->tx_cb->cb_status = 0;
1417 txp->tx_cb->byte_count = 0;
1418 if (sc->tx_queued != FXP_CXINT_THRESH - 1)
1419 txp->tx_cb->cb_command =
1420 htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
1421 FXP_CB_COMMAND_S);
1422 else
1423 txp->tx_cb->cb_command =
1424 htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
1425 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
1503
1504 txp->tx_mbuf = m;
1505 txp->tx_cb->cb_status = 0;
1506 txp->tx_cb->byte_count = 0;
1507 if (sc->tx_queued != FXP_CXINT_THRESH - 1)
1508 txp->tx_cb->cb_command =
1509 htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
1510 FXP_CB_COMMAND_S);
1511 else
1512 txp->tx_cb->cb_command =
1513 htole16(sc->tx_cmd | FXP_CB_COMMAND_SF |
1514 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
1426 txp->tx_cb->tx_threshold = tx_threshold;
1515 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0)
1516 txp->tx_cb->tx_threshold = tx_threshold;
1427
1428 /*
1429 * Advance the end of list forward.
1430 */
1431
1432#ifdef __alpha__
1433 /*
1434 * On platforms which can't access memory in 16-bit
1435 * granularities, we must prevent the card from DMA'ing
1436 * up the status while we update the command field.
1437 * This could cause us to overwrite the completion status.
1438 * XXX This is probably bogus and we're _not_ looking
1439 * for atomicity here.
1440 */
1441 atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command,
1442 htole16(FXP_CB_COMMAND_S));
1443#else
1444 sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S);
1445#endif /*__alpha__*/
1446 sc->fxp_desc.tx_last = txp;
1447
1448 /*
1449 * Advance the beginning of the list forward if there are
1450 * no other packets queued (when nothing is queued, tx_first
1451 * sits on the last TxCB that was sent out).
1452 */
1453 if (sc->tx_queued == 0)
1454 sc->fxp_desc.tx_first = txp;
1455
1456 sc->tx_queued++;
1457
1458 return (0);
1459}
1460
1461#ifdef DEVICE_POLLING
1462static poll_handler_t fxp_poll;
1463
1464static void
1465fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1466{
1467 struct fxp_softc *sc = ifp->if_softc;
1468 uint8_t statack;
1469
1470 FXP_LOCK(sc);
1471 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1472 FXP_UNLOCK(sc);
1473 return;
1474 }
1475
1476 statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
1477 FXP_SCB_STATACK_FR;
1478 if (cmd == POLL_AND_CHECK_STATUS) {
1479 uint8_t tmp;
1480
1481 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
1482 if (tmp == 0xff || tmp == 0) {
1483 FXP_UNLOCK(sc);
1484 return; /* nothing to do */
1485 }
1486 tmp &= ~statack;
1487 /* ack what we can */
1488 if (tmp != 0)
1489 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
1490 statack |= tmp;
1491 }
1492 fxp_intr_body(sc, ifp, statack, count);
1493 FXP_UNLOCK(sc);
1494}
1495#endif /* DEVICE_POLLING */
1496
1497/*
1498 * Process interface interrupts.
1499 */
1500static void
1501fxp_intr(void *xsc)
1502{
1503 struct fxp_softc *sc = xsc;
1504 struct ifnet *ifp = sc->ifp;
1505 uint8_t statack;
1506
1507 FXP_LOCK(sc);
1508 if (sc->suspended) {
1509 FXP_UNLOCK(sc);
1510 return;
1511 }
1512
1513#ifdef DEVICE_POLLING
1514 if (ifp->if_capenable & IFCAP_POLLING) {
1515 FXP_UNLOCK(sc);
1516 return;
1517 }
1518#endif
1519 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
1520 /*
1521 * It should not be possible to have all bits set; the
1522 * FXP_SCB_INTR_SWI bit always returns 0 on a read. If
1523 * all bits are set, this may indicate that the card has
1524 * been physically ejected, so ignore it.
1525 */
1526 if (statack == 0xff) {
1527 FXP_UNLOCK(sc);
1528 return;
1529 }
1530
1531 /*
1532 * First ACK all the interrupts in this pass.
1533 */
1534 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
1535 fxp_intr_body(sc, ifp, statack, -1);
1536 }
1537 FXP_UNLOCK(sc);
1538}
1539
1540static void
1541fxp_txeof(struct fxp_softc *sc)
1542{
1543 struct ifnet *ifp;
1544 struct fxp_tx *txp;
1545
1546 ifp = sc->ifp;
1547 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD);
1548 for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
1549 (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0;
1550 txp = txp->tx_next) {
1551 if (txp->tx_mbuf != NULL) {
1552 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
1553 BUS_DMASYNC_POSTWRITE);
1554 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
1555 m_freem(txp->tx_mbuf);
1556 txp->tx_mbuf = NULL;
1557 /* clear this to reset csum offload bits */
1558 txp->tx_cb->tbd[0].tb_addr = 0;
1559 }
1560 sc->tx_queued--;
1561 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1562 }
1563 sc->fxp_desc.tx_first = txp;
1564 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1565 if (sc->tx_queued == 0) {
1566 sc->watchdog_timer = 0;
1567 if (sc->need_mcsetup)
1568 fxp_mc_setup(sc);
1569 }
1570}
1571
1572static void
1573fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp, struct mbuf *m,
1574 uint16_t status, int pos)
1575{
1576 struct ether_header *eh;
1577 struct ip *ip;
1578 struct udphdr *uh;
1579 int32_t hlen, len, pktlen, temp32;
1580 uint16_t csum, *opts;
1581
1582 if ((sc->flags & FXP_FLAG_82559_RXCSUM) == 0) {
1583 if ((status & FXP_RFA_STATUS_PARSE) != 0) {
1584 if (status & FXP_RFDX_CS_IP_CSUM_BIT_VALID)
1585 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1586 if (status & FXP_RFDX_CS_IP_CSUM_VALID)
1587 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1588 if ((status & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
1589 (status & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
1590 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1591 CSUM_PSEUDO_HDR;
1592 m->m_pkthdr.csum_data = 0xffff;
1593 }
1594 }
1595 return;
1596 }
1597
1598 pktlen = m->m_pkthdr.len;
1599 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1600 return;
1601 eh = mtod(m, struct ether_header *);
1602 if (eh->ether_type != htons(ETHERTYPE_IP))
1603 return;
1604 ip = (struct ip *)(eh + 1);
1605 if (ip->ip_v != IPVERSION)
1606 return;
1607
1608 hlen = ip->ip_hl << 2;
1609 pktlen -= sizeof(struct ether_header);
1610 if (hlen < sizeof(struct ip))
1611 return;
1612 if (ntohs(ip->ip_len) < hlen)
1613 return;
1614 if (ntohs(ip->ip_len) != pktlen)
1615 return;
1616 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1617 return; /* can't handle fragmented packet */
1618
1619 switch (ip->ip_p) {
1620 case IPPROTO_TCP:
1621 if (pktlen < (hlen + sizeof(struct tcphdr)))
1622 return;
1623 break;
1624 case IPPROTO_UDP:
1625 if (pktlen < (hlen + sizeof(struct udphdr)))
1626 return;
1627 uh = (struct udphdr *)((caddr_t)ip + hlen);
1628 if (uh->uh_sum == 0)
1629 return; /* no checksum */
1630 break;
1631 default:
1632 return;
1633 }
1634 /* Extract computed checksum. */
1635 csum = be16dec(mtod(m, char *) + pos);
1636 /* checksum fixup for IP options */
1637 len = hlen - sizeof(struct ip);
1638 if (len > 0) {
1639 opts = (uint16_t *)(ip + 1);
1640 for (; len > 0; len -= sizeof(uint16_t), opts++) {
1641 temp32 = csum - *opts;
1642 temp32 = (temp32 >> 16) + (temp32 & 65535);
1643 csum = temp32 & 65535;
1644 }
1645 }
1646 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1647 m->m_pkthdr.csum_data = csum;
1648}
1649
1650static void
1651fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack,
1652 int count)
1653{
1654 struct mbuf *m;
1655 struct fxp_rx *rxp;
1656 struct fxp_rfa *rfa;
1657 int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
1658 uint16_t status;
1659
1660 FXP_LOCK_ASSERT(sc, MA_OWNED);
1661 if (rnr)
1662 sc->rnr++;
1663#ifdef DEVICE_POLLING
1664 /* Pick up a deferred RNR condition if `count' ran out last time. */
1665 if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
1666 sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
1667 rnr = 1;
1668 }
1669#endif
1670
1671 /*
1672 * Free any finished transmit mbuf chains.
1673 *
1674 * Handle the CNA event likt a CXTNO event. It used to
1675 * be that this event (control unit not ready) was not
1676 * encountered, but it is now with the SMPng modifications.
1677 * The exact sequence of events that occur when the interface
1678 * is brought up are different now, and if this event
1679 * goes unhandled, the configuration/rxfilter setup sequence
1680 * can stall for several seconds. The result is that no
1681 * packets go out onto the wire for about 5 to 10 seconds
1682 * after the interface is ifconfig'ed for the first time.
1683 */
1684 if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA))
1685 fxp_txeof(sc);
1686
1687 /*
1688 * Try to start more packets transmitting.
1689 */
1690 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1691 fxp_start_body(ifp);
1692
1693 /*
1694 * Just return if nothing happened on the receive side.
1695 */
1696 if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
1697 return;
1698
1699 /*
1700 * Process receiver interrupts. If a no-resource (RNR)
1701 * condition exists, get whatever packets we can and
1702 * re-start the receiver.
1703 *
1704 * When using polling, we do not process the list to completion,
1705 * so when we get an RNR interrupt we must defer the restart
1706 * until we hit the last buffer with the C bit set.
1707 * If we run out of cycles and rfa_headm has the C bit set,
1708 * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
1709 * that the info will be used in the subsequent polling cycle.
1710 */
1711 for (;;) {
1712 rxp = sc->fxp_desc.rx_head;
1713 m = rxp->rx_mbuf;
1714 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
1715 RFA_ALIGNMENT_FUDGE);
1716 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
1717 BUS_DMASYNC_POSTREAD);
1718
1719#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
1720 if (count >= 0 && count-- == 0) {
1721 if (rnr) {
1722 /* Defer RNR processing until the next time. */
1723 sc->flags |= FXP_FLAG_DEFERRED_RNR;
1724 rnr = 0;
1725 }
1726 break;
1727 }
1728#endif /* DEVICE_POLLING */
1729
1730 status = le16toh(rfa->rfa_status);
1731 if ((status & FXP_RFA_STATUS_C) == 0)
1732 break;
1733
1734 /*
1735 * Advance head forward.
1736 */
1737 sc->fxp_desc.rx_head = rxp->rx_next;
1738
1739 /*
1740 * Add a new buffer to the receive chain.
1741 * If this fails, the old buffer is recycled
1742 * instead.
1743 */
1744 if (fxp_new_rfabuf(sc, rxp) == 0) {
1745 int total_len;
1746
1747 /*
1748 * Fetch packet length (the top 2 bits of
1749 * actual_size are flags set by the controller
1750 * upon completion), and drop the packet in case
1751 * of bogus length or CRC errors.
1752 */
1753 total_len = le16toh(rfa->actual_size) & 0x3fff;
1754 if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
1755 (ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1756 /* Adjust for appended checksum bytes. */
1757 total_len -= 2;
1758 }
1759 if (total_len < sizeof(struct ether_header) ||
1760 total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE -
1761 sc->rfa_size || status & FXP_RFA_STATUS_CRC) {
1762 m_freem(m);
1763 continue;
1764 }
1765
1766 m->m_pkthdr.len = m->m_len = total_len;
1767 m->m_pkthdr.rcvif = ifp;
1768
1769 /* Do IP checksum checking. */
1770 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1771 fxp_rxcsum(sc, ifp, m, status, total_len);
1772 /*
1773 * Drop locks before calling if_input() since it
1774 * may re-enter fxp_start() in the netisr case.
1775 * This would result in a lock reversal. Better
1776 * performance might be obtained by chaining all
1777 * packets received, dropping the lock, and then
1778 * calling if_input() on each one.
1779 */
1780 FXP_UNLOCK(sc);
1781 (*ifp->if_input)(ifp, m);
1782 FXP_LOCK(sc);
1783 } else {
1784 /* Reuse RFA and loaded DMA map. */
1785 ifp->if_iqdrops++;
1786 fxp_discard_rfabuf(sc, rxp);
1787 }
1788 fxp_add_rfabuf(sc, rxp);
1789 }
1790 if (rnr) {
1791 fxp_scb_wait(sc);
1792 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1793 sc->fxp_desc.rx_head->rx_addr);
1794 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
1795 }
1796}
1797
1798/*
1799 * Update packet in/out/collision statistics. The i82557 doesn't
1800 * allow you to access these counters without doing a fairly
1801 * expensive DMA to get _all_ of the statistics it maintains, so
1802 * we do this operation here only once per second. The statistics
1803 * counters in the kernel are updated from the previous dump-stats
1804 * DMA and then a new dump-stats DMA is started. The on-chip
1805 * counters are zeroed when the DMA completes. If we can't start
1806 * the DMA immediately, we don't wait - we just prepare to read
1807 * them again next time.
1808 */
1809static void
1810fxp_tick(void *xsc)
1811{
1812 struct fxp_softc *sc = xsc;
1813 struct ifnet *ifp = sc->ifp;
1814 struct fxp_stats *sp = sc->fxp_stats;
1815
1816 FXP_LOCK_ASSERT(sc, MA_OWNED);
1817 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD);
1818 ifp->if_opackets += le32toh(sp->tx_good);
1819 ifp->if_collisions += le32toh(sp->tx_total_collisions);
1820 if (sp->rx_good) {
1821 ifp->if_ipackets += le32toh(sp->rx_good);
1822 sc->rx_idle_secs = 0;
1823 } else {
1824 /*
1825 * Receiver's been idle for another second.
1826 */
1827 sc->rx_idle_secs++;
1828 }
1829 ifp->if_ierrors +=
1830 le32toh(sp->rx_crc_errors) +
1831 le32toh(sp->rx_alignment_errors) +
1832 le32toh(sp->rx_rnr_errors) +
1833 le32toh(sp->rx_overrun_errors);
1834 /*
1835 * If any transmit underruns occured, bump up the transmit
1836 * threshold by another 512 bytes (64 * 8).
1837 */
1838 if (sp->tx_underruns) {
1839 ifp->if_oerrors += le32toh(sp->tx_underruns);
1840 if (tx_threshold < 192)
1841 tx_threshold += 64;
1842 }
1843
1844 /*
1845 * Release any xmit buffers that have completed DMA. This isn't
1846 * strictly necessary to do here, but it's advantagous for mbufs
1847 * with external storage to be released in a timely manner rather
1848 * than being defered for a potentially long time. This limits
1849 * the delay to a maximum of one second.
1850 */
1851 fxp_txeof(sc);
1852
1853 /*
1854 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
1855 * then assume the receiver has locked up and attempt to clear
1856 * the condition by reprogramming the multicast filter. This is
1857 * a work-around for a bug in the 82557 where the receiver locks
1858 * up if it gets certain types of garbage in the syncronization
1859 * bits prior to the packet header. This bug is supposed to only
1860 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
1861 * mode as well (perhaps due to a 10/100 speed transition).
1862 */
1863 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
1864 sc->rx_idle_secs = 0;
1865 fxp_mc_setup(sc);
1866 }
1867 /*
1868 * If there is no pending command, start another stats
1869 * dump. Otherwise punt for now.
1870 */
1871 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
1872 /*
1873 * Start another stats dump.
1874 */
1875 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
1876 BUS_DMASYNC_PREREAD);
1877 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
1878 } else {
1879 /*
1880 * A previous command is still waiting to be accepted.
1881 * Just zero our copy of the stats and wait for the
1882 * next timer event to update them.
1883 */
1884 sp->tx_good = 0;
1885 sp->tx_underruns = 0;
1886 sp->tx_total_collisions = 0;
1887
1888 sp->rx_good = 0;
1889 sp->rx_crc_errors = 0;
1890 sp->rx_alignment_errors = 0;
1891 sp->rx_rnr_errors = 0;
1892 sp->rx_overrun_errors = 0;
1893 }
1894 if (sc->miibus != NULL)
1895 mii_tick(device_get_softc(sc->miibus));
1896
1897 /*
1898 * Check that chip hasn't hung.
1899 */
1900 fxp_watchdog(sc);
1901
1902 /*
1903 * Schedule another timeout one second from now.
1904 */
1905 callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
1906}
1907
1908/*
1909 * Stop the interface. Cancels the statistics updater and resets
1910 * the interface.
1911 */
1912static void
1913fxp_stop(struct fxp_softc *sc)
1914{
1915 struct ifnet *ifp = sc->ifp;
1916 struct fxp_tx *txp;
1917 int i;
1918
1919 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1920 sc->watchdog_timer = 0;
1921
1922 /*
1923 * Cancel stats updater.
1924 */
1925 callout_stop(&sc->stat_ch);
1926
1927 /*
1928 * Issue software reset, which also unloads the microcode.
1929 */
1930 sc->flags &= ~FXP_FLAG_UCODE;
1931 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
1932 DELAY(50);
1933
1934 /*
1935 * Release any xmit buffers.
1936 */
1937 txp = sc->fxp_desc.tx_list;
1938 if (txp != NULL) {
1939 for (i = 0; i < FXP_NTXCB; i++) {
1940 if (txp[i].tx_mbuf != NULL) {
1941 bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map,
1942 BUS_DMASYNC_POSTWRITE);
1943 bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map);
1944 m_freem(txp[i].tx_mbuf);
1945 txp[i].tx_mbuf = NULL;
1946 /* clear this to reset csum offload bits */
1947 txp[i].tx_cb->tbd[0].tb_addr = 0;
1948 }
1949 }
1950 }
1951 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1952 sc->tx_queued = 0;
1953}
1954
1955/*
1956 * Watchdog/transmission transmit timeout handler. Called when a
1957 * transmission is started on the interface, but no interrupt is
1958 * received before the timeout. This usually indicates that the
1959 * card has wedged for some reason.
1960 */
1961static void
1962fxp_watchdog(struct fxp_softc *sc)
1963{
1964
1965 FXP_LOCK_ASSERT(sc, MA_OWNED);
1966
1967 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
1968 return;
1969
1970 device_printf(sc->dev, "device timeout\n");
1971 sc->ifp->if_oerrors++;
1972
1973 fxp_init_body(sc);
1974}
1975
1976/*
1977 * Acquire locks and then call the real initialization function. This
1978 * is necessary because ether_ioctl() calls if_init() and this would
1979 * result in mutex recursion if the mutex was held.
1980 */
1981static void
1982fxp_init(void *xsc)
1983{
1984 struct fxp_softc *sc = xsc;
1985
1986 FXP_LOCK(sc);
1987 fxp_init_body(sc);
1988 FXP_UNLOCK(sc);
1989}
1990
1991/*
1992 * Perform device initialization. This routine must be called with the
1993 * softc lock held.
1994 */
1995static void
1996fxp_init_body(struct fxp_softc *sc)
1997{
1998 struct ifnet *ifp = sc->ifp;
1999 struct fxp_cb_config *cbp;
2000 struct fxp_cb_ias *cb_ias;
2001 struct fxp_cb_tx *tcbp;
2002 struct fxp_tx *txp;
2003 struct fxp_cb_mcs *mcsp;
2004 int i, prm;
2005
2006 FXP_LOCK_ASSERT(sc, MA_OWNED);
2007 /*
2008 * Cancel any pending I/O
2009 */
2010 fxp_stop(sc);
2011
2012 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
2013
2014 /*
2015 * Initialize base of CBL and RFA memory. Loading with zero
2016 * sets it up for regular linear addressing.
2017 */
2018 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
2019 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
2020
2021 fxp_scb_wait(sc);
2022 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
2023
2024 /*
2025 * Initialize base of dump-stats buffer.
2026 */
2027 fxp_scb_wait(sc);
2028 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD);
2029 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
2030 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
2031
2032 /*
2033 * Attempt to load microcode if requested.
2034 */
2035 if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0)
2036 fxp_load_ucode(sc);
2037
2038 /*
2039 * Initialize the multicast address list.
2040 */
2041 if (fxp_mc_addrs(sc)) {
2042 mcsp = sc->mcsp;
2043 mcsp->cb_status = 0;
2044 mcsp->cb_command =
2045 htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
2046 mcsp->link_addr = 0xffffffff;
2047 /*
2048 * Start the multicast setup command.
2049 */
2050 fxp_scb_wait(sc);
2051 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
2052 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
2053 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2054 /* ...and wait for it to complete. */
2055 fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map);
2056 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
2057 BUS_DMASYNC_POSTWRITE);
2058 }
2059
2060 /*
2061 * We temporarily use memory that contains the TxCB list to
2062 * construct the config CB. The TxCB list memory is rebuilt
2063 * later.
2064 */
2065 cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
2066
2067 /*
2068 * This bcopy is kind of disgusting, but there are a bunch of must be
2069 * zero and must be one bits in this structure and this is the easiest
2070 * way to initialize them all to proper values.
2071 */
2072 bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
2073
2074 cbp->cb_status = 0;
2075 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG |
2076 FXP_CB_COMMAND_EL);
2077 cbp->link_addr = 0xffffffff; /* (no) next command */
2078 cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
2079 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
2080 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
2081 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
2082 cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
2083 cbp->type_enable = 0; /* actually reserved */
2084 cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
2085 cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
2086 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
2087 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
2088 cbp->dma_mbce = 0; /* (disable) dma max counters */
2089 cbp->late_scb = 0; /* (don't) defer SCB update */
2090 cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */
2091 cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */
2092 cbp->ci_int = 1; /* interrupt on CU idle */
2093 cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
2094 cbp->ext_stats_dis = 1; /* disable extended counters */
2095 cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */
2096 cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm;
2097 cbp->disc_short_rx = !prm; /* discard short packets */
2098 cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */
2099 cbp->two_frames = 0; /* do not limit FIFO to 2 frames */
1517
1518 /*
1519 * Advance the end of list forward.
1520 */
1521
1522#ifdef __alpha__
1523 /*
1524 * On platforms which can't access memory in 16-bit
1525 * granularities, we must prevent the card from DMA'ing
1526 * up the status while we update the command field.
1527 * This could cause us to overwrite the completion status.
1528 * XXX This is probably bogus and we're _not_ looking
1529 * for atomicity here.
1530 */
1531 atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command,
1532 htole16(FXP_CB_COMMAND_S));
1533#else
1534 sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S);
1535#endif /*__alpha__*/
1536 sc->fxp_desc.tx_last = txp;
1537
1538 /*
1539 * Advance the beginning of the list forward if there are
1540 * no other packets queued (when nothing is queued, tx_first
1541 * sits on the last TxCB that was sent out).
1542 */
1543 if (sc->tx_queued == 0)
1544 sc->fxp_desc.tx_first = txp;
1545
1546 sc->tx_queued++;
1547
1548 return (0);
1549}
1550
1551#ifdef DEVICE_POLLING
1552static poll_handler_t fxp_poll;
1553
1554static void
1555fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1556{
1557 struct fxp_softc *sc = ifp->if_softc;
1558 uint8_t statack;
1559
1560 FXP_LOCK(sc);
1561 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1562 FXP_UNLOCK(sc);
1563 return;
1564 }
1565
1566 statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
1567 FXP_SCB_STATACK_FR;
1568 if (cmd == POLL_AND_CHECK_STATUS) {
1569 uint8_t tmp;
1570
1571 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
1572 if (tmp == 0xff || tmp == 0) {
1573 FXP_UNLOCK(sc);
1574 return; /* nothing to do */
1575 }
1576 tmp &= ~statack;
1577 /* ack what we can */
1578 if (tmp != 0)
1579 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
1580 statack |= tmp;
1581 }
1582 fxp_intr_body(sc, ifp, statack, count);
1583 FXP_UNLOCK(sc);
1584}
1585#endif /* DEVICE_POLLING */
1586
1587/*
1588 * Process interface interrupts.
1589 */
1590static void
1591fxp_intr(void *xsc)
1592{
1593 struct fxp_softc *sc = xsc;
1594 struct ifnet *ifp = sc->ifp;
1595 uint8_t statack;
1596
1597 FXP_LOCK(sc);
1598 if (sc->suspended) {
1599 FXP_UNLOCK(sc);
1600 return;
1601 }
1602
1603#ifdef DEVICE_POLLING
1604 if (ifp->if_capenable & IFCAP_POLLING) {
1605 FXP_UNLOCK(sc);
1606 return;
1607 }
1608#endif
1609 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
1610 /*
1611 * It should not be possible to have all bits set; the
1612 * FXP_SCB_INTR_SWI bit always returns 0 on a read. If
1613 * all bits are set, this may indicate that the card has
1614 * been physically ejected, so ignore it.
1615 */
1616 if (statack == 0xff) {
1617 FXP_UNLOCK(sc);
1618 return;
1619 }
1620
1621 /*
1622 * First ACK all the interrupts in this pass.
1623 */
1624 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
1625 fxp_intr_body(sc, ifp, statack, -1);
1626 }
1627 FXP_UNLOCK(sc);
1628}
1629
1630static void
1631fxp_txeof(struct fxp_softc *sc)
1632{
1633 struct ifnet *ifp;
1634 struct fxp_tx *txp;
1635
1636 ifp = sc->ifp;
1637 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD);
1638 for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
1639 (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0;
1640 txp = txp->tx_next) {
1641 if (txp->tx_mbuf != NULL) {
1642 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
1643 BUS_DMASYNC_POSTWRITE);
1644 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
1645 m_freem(txp->tx_mbuf);
1646 txp->tx_mbuf = NULL;
1647 /* clear this to reset csum offload bits */
1648 txp->tx_cb->tbd[0].tb_addr = 0;
1649 }
1650 sc->tx_queued--;
1651 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1652 }
1653 sc->fxp_desc.tx_first = txp;
1654 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1655 if (sc->tx_queued == 0) {
1656 sc->watchdog_timer = 0;
1657 if (sc->need_mcsetup)
1658 fxp_mc_setup(sc);
1659 }
1660}
1661
1662static void
1663fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp, struct mbuf *m,
1664 uint16_t status, int pos)
1665{
1666 struct ether_header *eh;
1667 struct ip *ip;
1668 struct udphdr *uh;
1669 int32_t hlen, len, pktlen, temp32;
1670 uint16_t csum, *opts;
1671
1672 if ((sc->flags & FXP_FLAG_82559_RXCSUM) == 0) {
1673 if ((status & FXP_RFA_STATUS_PARSE) != 0) {
1674 if (status & FXP_RFDX_CS_IP_CSUM_BIT_VALID)
1675 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
1676 if (status & FXP_RFDX_CS_IP_CSUM_VALID)
1677 m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
1678 if ((status & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
1679 (status & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
1680 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID |
1681 CSUM_PSEUDO_HDR;
1682 m->m_pkthdr.csum_data = 0xffff;
1683 }
1684 }
1685 return;
1686 }
1687
1688 pktlen = m->m_pkthdr.len;
1689 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip))
1690 return;
1691 eh = mtod(m, struct ether_header *);
1692 if (eh->ether_type != htons(ETHERTYPE_IP))
1693 return;
1694 ip = (struct ip *)(eh + 1);
1695 if (ip->ip_v != IPVERSION)
1696 return;
1697
1698 hlen = ip->ip_hl << 2;
1699 pktlen -= sizeof(struct ether_header);
1700 if (hlen < sizeof(struct ip))
1701 return;
1702 if (ntohs(ip->ip_len) < hlen)
1703 return;
1704 if (ntohs(ip->ip_len) != pktlen)
1705 return;
1706 if (ip->ip_off & htons(IP_MF | IP_OFFMASK))
1707 return; /* can't handle fragmented packet */
1708
1709 switch (ip->ip_p) {
1710 case IPPROTO_TCP:
1711 if (pktlen < (hlen + sizeof(struct tcphdr)))
1712 return;
1713 break;
1714 case IPPROTO_UDP:
1715 if (pktlen < (hlen + sizeof(struct udphdr)))
1716 return;
1717 uh = (struct udphdr *)((caddr_t)ip + hlen);
1718 if (uh->uh_sum == 0)
1719 return; /* no checksum */
1720 break;
1721 default:
1722 return;
1723 }
1724 /* Extract computed checksum. */
1725 csum = be16dec(mtod(m, char *) + pos);
1726 /* checksum fixup for IP options */
1727 len = hlen - sizeof(struct ip);
1728 if (len > 0) {
1729 opts = (uint16_t *)(ip + 1);
1730 for (; len > 0; len -= sizeof(uint16_t), opts++) {
1731 temp32 = csum - *opts;
1732 temp32 = (temp32 >> 16) + (temp32 & 65535);
1733 csum = temp32 & 65535;
1734 }
1735 }
1736 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID;
1737 m->m_pkthdr.csum_data = csum;
1738}
1739
1740static void
1741fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack,
1742 int count)
1743{
1744 struct mbuf *m;
1745 struct fxp_rx *rxp;
1746 struct fxp_rfa *rfa;
1747 int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
1748 uint16_t status;
1749
1750 FXP_LOCK_ASSERT(sc, MA_OWNED);
1751 if (rnr)
1752 sc->rnr++;
1753#ifdef DEVICE_POLLING
1754 /* Pick up a deferred RNR condition if `count' ran out last time. */
1755 if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
1756 sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
1757 rnr = 1;
1758 }
1759#endif
1760
1761 /*
1762 * Free any finished transmit mbuf chains.
1763 *
1764 * Handle the CNA event likt a CXTNO event. It used to
1765 * be that this event (control unit not ready) was not
1766 * encountered, but it is now with the SMPng modifications.
1767 * The exact sequence of events that occur when the interface
1768 * is brought up are different now, and if this event
1769 * goes unhandled, the configuration/rxfilter setup sequence
1770 * can stall for several seconds. The result is that no
1771 * packets go out onto the wire for about 5 to 10 seconds
1772 * after the interface is ifconfig'ed for the first time.
1773 */
1774 if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA))
1775 fxp_txeof(sc);
1776
1777 /*
1778 * Try to start more packets transmitting.
1779 */
1780 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1781 fxp_start_body(ifp);
1782
1783 /*
1784 * Just return if nothing happened on the receive side.
1785 */
1786 if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
1787 return;
1788
1789 /*
1790 * Process receiver interrupts. If a no-resource (RNR)
1791 * condition exists, get whatever packets we can and
1792 * re-start the receiver.
1793 *
1794 * When using polling, we do not process the list to completion,
1795 * so when we get an RNR interrupt we must defer the restart
1796 * until we hit the last buffer with the C bit set.
1797 * If we run out of cycles and rfa_headm has the C bit set,
1798 * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
1799 * that the info will be used in the subsequent polling cycle.
1800 */
1801 for (;;) {
1802 rxp = sc->fxp_desc.rx_head;
1803 m = rxp->rx_mbuf;
1804 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
1805 RFA_ALIGNMENT_FUDGE);
1806 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
1807 BUS_DMASYNC_POSTREAD);
1808
1809#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
1810 if (count >= 0 && count-- == 0) {
1811 if (rnr) {
1812 /* Defer RNR processing until the next time. */
1813 sc->flags |= FXP_FLAG_DEFERRED_RNR;
1814 rnr = 0;
1815 }
1816 break;
1817 }
1818#endif /* DEVICE_POLLING */
1819
1820 status = le16toh(rfa->rfa_status);
1821 if ((status & FXP_RFA_STATUS_C) == 0)
1822 break;
1823
1824 /*
1825 * Advance head forward.
1826 */
1827 sc->fxp_desc.rx_head = rxp->rx_next;
1828
1829 /*
1830 * Add a new buffer to the receive chain.
1831 * If this fails, the old buffer is recycled
1832 * instead.
1833 */
1834 if (fxp_new_rfabuf(sc, rxp) == 0) {
1835 int total_len;
1836
1837 /*
1838 * Fetch packet length (the top 2 bits of
1839 * actual_size are flags set by the controller
1840 * upon completion), and drop the packet in case
1841 * of bogus length or CRC errors.
1842 */
1843 total_len = le16toh(rfa->actual_size) & 0x3fff;
1844 if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
1845 (ifp->if_capenable & IFCAP_RXCSUM) != 0) {
1846 /* Adjust for appended checksum bytes. */
1847 total_len -= 2;
1848 }
1849 if (total_len < sizeof(struct ether_header) ||
1850 total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE -
1851 sc->rfa_size || status & FXP_RFA_STATUS_CRC) {
1852 m_freem(m);
1853 continue;
1854 }
1855
1856 m->m_pkthdr.len = m->m_len = total_len;
1857 m->m_pkthdr.rcvif = ifp;
1858
1859 /* Do IP checksum checking. */
1860 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
1861 fxp_rxcsum(sc, ifp, m, status, total_len);
1862 /*
1863 * Drop locks before calling if_input() since it
1864 * may re-enter fxp_start() in the netisr case.
1865 * This would result in a lock reversal. Better
1866 * performance might be obtained by chaining all
1867 * packets received, dropping the lock, and then
1868 * calling if_input() on each one.
1869 */
1870 FXP_UNLOCK(sc);
1871 (*ifp->if_input)(ifp, m);
1872 FXP_LOCK(sc);
1873 } else {
1874 /* Reuse RFA and loaded DMA map. */
1875 ifp->if_iqdrops++;
1876 fxp_discard_rfabuf(sc, rxp);
1877 }
1878 fxp_add_rfabuf(sc, rxp);
1879 }
1880 if (rnr) {
1881 fxp_scb_wait(sc);
1882 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1883 sc->fxp_desc.rx_head->rx_addr);
1884 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
1885 }
1886}
1887
1888/*
1889 * Update packet in/out/collision statistics. The i82557 doesn't
1890 * allow you to access these counters without doing a fairly
1891 * expensive DMA to get _all_ of the statistics it maintains, so
1892 * we do this operation here only once per second. The statistics
1893 * counters in the kernel are updated from the previous dump-stats
1894 * DMA and then a new dump-stats DMA is started. The on-chip
1895 * counters are zeroed when the DMA completes. If we can't start
1896 * the DMA immediately, we don't wait - we just prepare to read
1897 * them again next time.
1898 */
1899static void
1900fxp_tick(void *xsc)
1901{
1902 struct fxp_softc *sc = xsc;
1903 struct ifnet *ifp = sc->ifp;
1904 struct fxp_stats *sp = sc->fxp_stats;
1905
1906 FXP_LOCK_ASSERT(sc, MA_OWNED);
1907 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD);
1908 ifp->if_opackets += le32toh(sp->tx_good);
1909 ifp->if_collisions += le32toh(sp->tx_total_collisions);
1910 if (sp->rx_good) {
1911 ifp->if_ipackets += le32toh(sp->rx_good);
1912 sc->rx_idle_secs = 0;
1913 } else {
1914 /*
1915 * Receiver's been idle for another second.
1916 */
1917 sc->rx_idle_secs++;
1918 }
1919 ifp->if_ierrors +=
1920 le32toh(sp->rx_crc_errors) +
1921 le32toh(sp->rx_alignment_errors) +
1922 le32toh(sp->rx_rnr_errors) +
1923 le32toh(sp->rx_overrun_errors);
1924 /*
1925 * If any transmit underruns occured, bump up the transmit
1926 * threshold by another 512 bytes (64 * 8).
1927 */
1928 if (sp->tx_underruns) {
1929 ifp->if_oerrors += le32toh(sp->tx_underruns);
1930 if (tx_threshold < 192)
1931 tx_threshold += 64;
1932 }
1933
1934 /*
1935 * Release any xmit buffers that have completed DMA. This isn't
1936 * strictly necessary to do here, but it's advantagous for mbufs
1937 * with external storage to be released in a timely manner rather
1938 * than being defered for a potentially long time. This limits
1939 * the delay to a maximum of one second.
1940 */
1941 fxp_txeof(sc);
1942
1943 /*
1944 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
1945 * then assume the receiver has locked up and attempt to clear
1946 * the condition by reprogramming the multicast filter. This is
1947 * a work-around for a bug in the 82557 where the receiver locks
1948 * up if it gets certain types of garbage in the syncronization
1949 * bits prior to the packet header. This bug is supposed to only
1950 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
1951 * mode as well (perhaps due to a 10/100 speed transition).
1952 */
1953 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
1954 sc->rx_idle_secs = 0;
1955 fxp_mc_setup(sc);
1956 }
1957 /*
1958 * If there is no pending command, start another stats
1959 * dump. Otherwise punt for now.
1960 */
1961 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
1962 /*
1963 * Start another stats dump.
1964 */
1965 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
1966 BUS_DMASYNC_PREREAD);
1967 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
1968 } else {
1969 /*
1970 * A previous command is still waiting to be accepted.
1971 * Just zero our copy of the stats and wait for the
1972 * next timer event to update them.
1973 */
1974 sp->tx_good = 0;
1975 sp->tx_underruns = 0;
1976 sp->tx_total_collisions = 0;
1977
1978 sp->rx_good = 0;
1979 sp->rx_crc_errors = 0;
1980 sp->rx_alignment_errors = 0;
1981 sp->rx_rnr_errors = 0;
1982 sp->rx_overrun_errors = 0;
1983 }
1984 if (sc->miibus != NULL)
1985 mii_tick(device_get_softc(sc->miibus));
1986
1987 /*
1988 * Check that chip hasn't hung.
1989 */
1990 fxp_watchdog(sc);
1991
1992 /*
1993 * Schedule another timeout one second from now.
1994 */
1995 callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
1996}
1997
1998/*
1999 * Stop the interface. Cancels the statistics updater and resets
2000 * the interface.
2001 */
2002static void
2003fxp_stop(struct fxp_softc *sc)
2004{
2005 struct ifnet *ifp = sc->ifp;
2006 struct fxp_tx *txp;
2007 int i;
2008
2009 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
2010 sc->watchdog_timer = 0;
2011
2012 /*
2013 * Cancel stats updater.
2014 */
2015 callout_stop(&sc->stat_ch);
2016
2017 /*
2018 * Issue software reset, which also unloads the microcode.
2019 */
2020 sc->flags &= ~FXP_FLAG_UCODE;
2021 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
2022 DELAY(50);
2023
2024 /*
2025 * Release any xmit buffers.
2026 */
2027 txp = sc->fxp_desc.tx_list;
2028 if (txp != NULL) {
2029 for (i = 0; i < FXP_NTXCB; i++) {
2030 if (txp[i].tx_mbuf != NULL) {
2031 bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map,
2032 BUS_DMASYNC_POSTWRITE);
2033 bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map);
2034 m_freem(txp[i].tx_mbuf);
2035 txp[i].tx_mbuf = NULL;
2036 /* clear this to reset csum offload bits */
2037 txp[i].tx_cb->tbd[0].tb_addr = 0;
2038 }
2039 }
2040 }
2041 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2042 sc->tx_queued = 0;
2043}
2044
2045/*
2046 * Watchdog/transmission transmit timeout handler. Called when a
2047 * transmission is started on the interface, but no interrupt is
2048 * received before the timeout. This usually indicates that the
2049 * card has wedged for some reason.
2050 */
2051static void
2052fxp_watchdog(struct fxp_softc *sc)
2053{
2054
2055 FXP_LOCK_ASSERT(sc, MA_OWNED);
2056
2057 if (sc->watchdog_timer == 0 || --sc->watchdog_timer)
2058 return;
2059
2060 device_printf(sc->dev, "device timeout\n");
2061 sc->ifp->if_oerrors++;
2062
2063 fxp_init_body(sc);
2064}
2065
2066/*
2067 * Acquire locks and then call the real initialization function. This
2068 * is necessary because ether_ioctl() calls if_init() and this would
2069 * result in mutex recursion if the mutex was held.
2070 */
2071static void
2072fxp_init(void *xsc)
2073{
2074 struct fxp_softc *sc = xsc;
2075
2076 FXP_LOCK(sc);
2077 fxp_init_body(sc);
2078 FXP_UNLOCK(sc);
2079}
2080
2081/*
2082 * Perform device initialization. This routine must be called with the
2083 * softc lock held.
2084 */
2085static void
2086fxp_init_body(struct fxp_softc *sc)
2087{
2088 struct ifnet *ifp = sc->ifp;
2089 struct fxp_cb_config *cbp;
2090 struct fxp_cb_ias *cb_ias;
2091 struct fxp_cb_tx *tcbp;
2092 struct fxp_tx *txp;
2093 struct fxp_cb_mcs *mcsp;
2094 int i, prm;
2095
2096 FXP_LOCK_ASSERT(sc, MA_OWNED);
2097 /*
2098 * Cancel any pending I/O
2099 */
2100 fxp_stop(sc);
2101
2102 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
2103
2104 /*
2105 * Initialize base of CBL and RFA memory. Loading with zero
2106 * sets it up for regular linear addressing.
2107 */
2108 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
2109 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
2110
2111 fxp_scb_wait(sc);
2112 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
2113
2114 /*
2115 * Initialize base of dump-stats buffer.
2116 */
2117 fxp_scb_wait(sc);
2118 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD);
2119 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
2120 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
2121
2122 /*
2123 * Attempt to load microcode if requested.
2124 */
2125 if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0)
2126 fxp_load_ucode(sc);
2127
2128 /*
2129 * Initialize the multicast address list.
2130 */
2131 if (fxp_mc_addrs(sc)) {
2132 mcsp = sc->mcsp;
2133 mcsp->cb_status = 0;
2134 mcsp->cb_command =
2135 htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL);
2136 mcsp->link_addr = 0xffffffff;
2137 /*
2138 * Start the multicast setup command.
2139 */
2140 fxp_scb_wait(sc);
2141 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
2142 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
2143 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2144 /* ...and wait for it to complete. */
2145 fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map);
2146 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
2147 BUS_DMASYNC_POSTWRITE);
2148 }
2149
2150 /*
2151 * We temporarily use memory that contains the TxCB list to
2152 * construct the config CB. The TxCB list memory is rebuilt
2153 * later.
2154 */
2155 cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
2156
2157 /*
2158 * This bcopy is kind of disgusting, but there are a bunch of must be
2159 * zero and must be one bits in this structure and this is the easiest
2160 * way to initialize them all to proper values.
2161 */
2162 bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
2163
2164 cbp->cb_status = 0;
2165 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG |
2166 FXP_CB_COMMAND_EL);
2167 cbp->link_addr = 0xffffffff; /* (no) next command */
2168 cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
2169 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
2170 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
2171 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
2172 cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
2173 cbp->type_enable = 0; /* actually reserved */
2174 cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
2175 cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
2176 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
2177 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
2178 cbp->dma_mbce = 0; /* (disable) dma max counters */
2179 cbp->late_scb = 0; /* (don't) defer SCB update */
2180 cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */
2181 cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */
2182 cbp->ci_int = 1; /* interrupt on CU idle */
2183 cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
2184 cbp->ext_stats_dis = 1; /* disable extended counters */
2185 cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */
2186 cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm;
2187 cbp->disc_short_rx = !prm; /* discard short packets */
2188 cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */
2189 cbp->two_frames = 0; /* do not limit FIFO to 2 frames */
2100 cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */
2190 cbp->dyn_tbd = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
2101 cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
2102 cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
2103 cbp->csma_dis = 0; /* (don't) disable link */
2104 cbp->tcp_udp_cksum = ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
2105 (ifp->if_capenable & IFCAP_RXCSUM) != 0) ? 1 : 0;
2106 cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */
2107 cbp->link_wake_en = 0; /* (don't) assert PME# on link change */
2108 cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */
2109 cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */
2110 cbp->nsai = 1; /* (don't) disable source addr insert */
2111 cbp->preamble_length = 2; /* (7 byte) preamble */
2112 cbp->loopback = 0; /* (don't) loopback */
2113 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
2114 cbp->linear_pri_mode = 0; /* (wait after xmit only) */
2115 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
2116 cbp->promiscuous = prm; /* promiscuous mode */
2117 cbp->bcast_disable = 0; /* (don't) disable broadcasts */
2118 cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/
2119 cbp->ignore_ul = 0; /* consider U/L bit in IA matching */
2120 cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */
2121 cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
2122
2123 cbp->stripping = !prm; /* truncate rx packet to byte count */
2124 cbp->padding = 1; /* (do) pad short tx packets */
2125 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
2126 cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
2127 cbp->ia_wake_en = 0; /* (don't) wake up on address match */
2128 cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */
2129 /* must set wake_en in PMCSR also */
2130 cbp->force_fdx = 0; /* (don't) force full duplex */
2131 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
2132 cbp->multi_ia = 0; /* (don't) accept multiple IAs */
2133 cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0;
2134 cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
2135
2136 if (sc->tunable_noflow || sc->revision == FXP_REV_82557) {
2137 /*
2138 * The 82557 has no hardware flow control, the values
2139 * below are the defaults for the chip.
2140 */
2141 cbp->fc_delay_lsb = 0;
2142 cbp->fc_delay_msb = 0x40;
2143 cbp->pri_fc_thresh = 3;
2144 cbp->tx_fc_dis = 0;
2145 cbp->rx_fc_restop = 0;
2146 cbp->rx_fc_restart = 0;
2147 cbp->fc_filter = 0;
2148 cbp->pri_fc_loc = 1;
2149 } else {
2150 cbp->fc_delay_lsb = 0x1f;
2151 cbp->fc_delay_msb = 0x01;
2152 cbp->pri_fc_thresh = 3;
2153 cbp->tx_fc_dis = 0; /* enable transmit FC */
2154 cbp->rx_fc_restop = 1; /* enable FC restop frames */
2155 cbp->rx_fc_restart = 1; /* enable FC restart frames */
2156 cbp->fc_filter = !prm; /* drop FC frames to host */
2157 cbp->pri_fc_loc = 1; /* FC pri location (byte31) */
2158 }
2159
2160 /*
2161 * Start the config command/DMA.
2162 */
2163 fxp_scb_wait(sc);
2164 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2165 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2166 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2167 /* ...and wait for it to complete. */
2168 fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
2169 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2170
2171 /*
2172 * Now initialize the station address. Temporarily use the TxCB
2173 * memory area like we did above for the config CB.
2174 */
2175 cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
2176 cb_ias->cb_status = 0;
2177 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
2178 cb_ias->link_addr = 0xffffffff;
2179 bcopy(IF_LLADDR(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN);
2180
2181 /*
2182 * Start the IAS (Individual Address Setup) command/DMA.
2183 */
2184 fxp_scb_wait(sc);
2185 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2186 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2187 /* ...and wait for it to complete. */
2188 fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map);
2189 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2190
2191 /*
2192 * Initialize transmit control block (TxCB) list.
2193 */
2194 txp = sc->fxp_desc.tx_list;
2195 tcbp = sc->fxp_desc.cbl_list;
2196 bzero(tcbp, FXP_TXCB_SZ);
2197 for (i = 0; i < FXP_NTXCB; i++) {
2198 txp[i].tx_mbuf = NULL;
2199 tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK);
2200 tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
2201 tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr +
2202 (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx)));
2203 if (sc->flags & FXP_FLAG_EXT_TXCB)
2204 tcbp[i].tbd_array_addr =
2205 htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
2206 else
2207 tcbp[i].tbd_array_addr =
2208 htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
2209 txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
2210 }
2211 /*
2212 * Set the suspend flag on the first TxCB and start the control
2213 * unit. It will execute the NOP and then suspend.
2214 */
2215 tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
2216 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2217 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2218 sc->tx_queued = 1;
2219
2220 fxp_scb_wait(sc);
2221 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2222
2223 /*
2224 * Initialize receiver buffer area - RFA.
2225 */
2226 fxp_scb_wait(sc);
2227 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
2228 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
2229
2230 /*
2231 * Set current media.
2232 */
2233 if (sc->miibus != NULL)
2234 mii_mediachg(device_get_softc(sc->miibus));
2235
2236 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2237 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2238
2239 /*
2240 * Enable interrupts.
2241 */
2242#ifdef DEVICE_POLLING
2243 /*
2244 * ... but only do that if we are not polling. And because (presumably)
2245 * the default is interrupts on, we need to disable them explicitly!
2246 */
2247 if (ifp->if_capenable & IFCAP_POLLING )
2248 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
2249 else
2250#endif /* DEVICE_POLLING */
2251 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
2252
2253 /*
2254 * Start stats updater.
2255 */
2256 callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
2257}
2258
2259static int
2260fxp_serial_ifmedia_upd(struct ifnet *ifp)
2261{
2262
2263 return (0);
2264}
2265
2266static void
2267fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2268{
2269
2270 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2271}
2272
2273/*
2274 * Change media according to request.
2275 */
2276static int
2277fxp_ifmedia_upd(struct ifnet *ifp)
2278{
2279 struct fxp_softc *sc = ifp->if_softc;
2280 struct mii_data *mii;
2281
2282 mii = device_get_softc(sc->miibus);
2283 FXP_LOCK(sc);
2284 if (mii->mii_instance) {
2285 struct mii_softc *miisc;
2286 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2287 mii_phy_reset(miisc);
2288 }
2289 mii_mediachg(mii);
2290 FXP_UNLOCK(sc);
2291 return (0);
2292}
2293
2294/*
2295 * Notify the world which media we're using.
2296 */
2297static void
2298fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2299{
2300 struct fxp_softc *sc = ifp->if_softc;
2301 struct mii_data *mii;
2302
2303 mii = device_get_softc(sc->miibus);
2304 FXP_LOCK(sc);
2305 mii_pollstat(mii);
2306 ifmr->ifm_active = mii->mii_media_active;
2307 ifmr->ifm_status = mii->mii_media_status;
2308
2309 if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_10_T &&
2310 sc->flags & FXP_FLAG_CU_RESUME_BUG)
2311 sc->cu_resume_bug = 1;
2312 else
2313 sc->cu_resume_bug = 0;
2314 FXP_UNLOCK(sc);
2315}
2316
2317/*
2318 * Add a buffer to the end of the RFA buffer list.
2319 * Return 0 if successful, 1 for failure. A failure results in
2320 * reusing the RFA buffer.
2321 * The RFA struct is stuck at the beginning of mbuf cluster and the
2322 * data pointer is fixed up to point just past it.
2323 */
2324static int
2325fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2326{
2327 struct mbuf *m;
2328 struct fxp_rfa *rfa;
2329 bus_dmamap_t tmp_map;
2330 int error;
2331
2332 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2333 if (m == NULL)
2334 return (ENOBUFS);
2335
2336 /*
2337 * Move the data pointer up so that the incoming data packet
2338 * will be 32-bit aligned.
2339 */
2340 m->m_data += RFA_ALIGNMENT_FUDGE;
2341
2342 /*
2343 * Get a pointer to the base of the mbuf cluster and move
2344 * data start past it.
2345 */
2346 rfa = mtod(m, struct fxp_rfa *);
2347 m->m_data += sc->rfa_size;
2348 rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
2349
2350 rfa->rfa_status = 0;
2351 rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
2352 rfa->actual_size = 0;
2353 m->m_len = m->m_pkthdr.len = MCLBYTES - RFA_ALIGNMENT_FUDGE -
2354 sc->rfa_size;
2355
2356 /*
2357 * Initialize the rest of the RFA. Note that since the RFA
2358 * is misaligned, we cannot store values directly. We're thus
2359 * using the le32enc() function which handles endianness and
2360 * is also alignment-safe.
2361 */
2362 le32enc(&rfa->link_addr, 0xffffffff);
2363 le32enc(&rfa->rbd_addr, 0xffffffff);
2364
2365 /* Map the RFA into DMA memory. */
2366 error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa,
2367 MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
2368 &rxp->rx_addr, 0);
2369 if (error) {
2370 m_freem(m);
2371 return (error);
2372 }
2373
2374 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
2375 tmp_map = sc->spare_map;
2376 sc->spare_map = rxp->rx_map;
2377 rxp->rx_map = tmp_map;
2378 rxp->rx_mbuf = m;
2379
2380 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
2381 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2382 return (0);
2383}
2384
2385static void
2386fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2387{
2388 struct fxp_rfa *p_rfa;
2389 struct fxp_rx *p_rx;
2390
2391 /*
2392 * If there are other buffers already on the list, attach this
2393 * one to the end by fixing up the tail to point to this one.
2394 */
2395 if (sc->fxp_desc.rx_head != NULL) {
2396 p_rx = sc->fxp_desc.rx_tail;
2397 p_rfa = (struct fxp_rfa *)
2398 (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
2399 p_rx->rx_next = rxp;
2400 le32enc(&p_rfa->link_addr, rxp->rx_addr);
2401 p_rfa->rfa_control = 0;
2402 bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map,
2403 BUS_DMASYNC_PREWRITE);
2404 } else {
2405 rxp->rx_next = NULL;
2406 sc->fxp_desc.rx_head = rxp;
2407 }
2408 sc->fxp_desc.rx_tail = rxp;
2409}
2410
2411static void
2412fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2413{
2414 struct mbuf *m;
2415 struct fxp_rfa *rfa;
2416
2417 m = rxp->rx_mbuf;
2418 m->m_data = m->m_ext.ext_buf;
2419 /*
2420 * Move the data pointer up so that the incoming data packet
2421 * will be 32-bit aligned.
2422 */
2423 m->m_data += RFA_ALIGNMENT_FUDGE;
2424
2425 /*
2426 * Get a pointer to the base of the mbuf cluster and move
2427 * data start past it.
2428 */
2429 rfa = mtod(m, struct fxp_rfa *);
2430 m->m_data += sc->rfa_size;
2431 rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
2432
2433 rfa->rfa_status = 0;
2434 rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
2435 rfa->actual_size = 0;
2436
2437 /*
2438 * Initialize the rest of the RFA. Note that since the RFA
2439 * is misaligned, we cannot store values directly. We're thus
2440 * using the le32enc() function which handles endianness and
2441 * is also alignment-safe.
2442 */
2443 le32enc(&rfa->link_addr, 0xffffffff);
2444 le32enc(&rfa->rbd_addr, 0xffffffff);
2445
2446 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
2447 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2448}
2449
2450static int
2451fxp_miibus_readreg(device_t dev, int phy, int reg)
2452{
2453 struct fxp_softc *sc = device_get_softc(dev);
2454 int count = 10000;
2455 int value;
2456
2457 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2458 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
2459
2460 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
2461 && count--)
2462 DELAY(10);
2463
2464 if (count <= 0)
2465 device_printf(dev, "fxp_miibus_readreg: timed out\n");
2466
2467 return (value & 0xffff);
2468}
2469
2470static void
2471fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
2472{
2473 struct fxp_softc *sc = device_get_softc(dev);
2474 int count = 10000;
2475
2476 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2477 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
2478 (value & 0xffff));
2479
2480 while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
2481 count--)
2482 DELAY(10);
2483
2484 if (count <= 0)
2485 device_printf(dev, "fxp_miibus_writereg: timed out\n");
2486}
2487
2488static int
2489fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2490{
2491 struct fxp_softc *sc = ifp->if_softc;
2492 struct ifreq *ifr = (struct ifreq *)data;
2493 struct mii_data *mii;
2494 int flag, mask, error = 0, reinit;
2495
2496 switch (command) {
2497 case SIOCSIFFLAGS:
2498 FXP_LOCK(sc);
2499 if (ifp->if_flags & IFF_ALLMULTI)
2500 sc->flags |= FXP_FLAG_ALL_MCAST;
2501 else
2502 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2503
2504 /*
2505 * If interface is marked up and not running, then start it.
2506 * If it is marked down and running, stop it.
2507 * XXX If it's up then re-initialize it. This is so flags
2508 * such as IFF_PROMISC are handled.
2509 */
2510 if (ifp->if_flags & IFF_UP) {
2511 fxp_init_body(sc);
2512 } else {
2513 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2514 fxp_stop(sc);
2515 }
2516 FXP_UNLOCK(sc);
2517 break;
2518
2519 case SIOCADDMULTI:
2520 case SIOCDELMULTI:
2521 FXP_LOCK(sc);
2522 if (ifp->if_flags & IFF_ALLMULTI)
2523 sc->flags |= FXP_FLAG_ALL_MCAST;
2524 else
2525 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2526 /*
2527 * Multicast list has changed; set the hardware filter
2528 * accordingly.
2529 */
2530 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0)
2531 fxp_mc_setup(sc);
2532 /*
2533 * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it
2534 * again rather than else {}.
2535 */
2536 if (sc->flags & FXP_FLAG_ALL_MCAST)
2537 fxp_init_body(sc);
2538 FXP_UNLOCK(sc);
2539 error = 0;
2540 break;
2541
2542 case SIOCSIFMEDIA:
2543 case SIOCGIFMEDIA:
2544 if (sc->miibus != NULL) {
2545 mii = device_get_softc(sc->miibus);
2546 error = ifmedia_ioctl(ifp, ifr,
2547 &mii->mii_media, command);
2548 } else {
2549 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
2550 }
2551 break;
2552
2553 case SIOCSIFCAP:
2554 reinit = 0;
2555 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
2556#ifdef DEVICE_POLLING
2557 if (mask & IFCAP_POLLING) {
2558 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2559 error = ether_poll_register(fxp_poll, ifp);
2560 if (error)
2561 return(error);
2562 FXP_LOCK(sc);
2563 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL,
2564 FXP_SCB_INTR_DISABLE);
2565 ifp->if_capenable |= IFCAP_POLLING;
2566 FXP_UNLOCK(sc);
2567 } else {
2568 error = ether_poll_deregister(ifp);
2569 /* Enable interrupts in any case */
2570 FXP_LOCK(sc);
2571 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
2572 ifp->if_capenable &= ~IFCAP_POLLING;
2573 FXP_UNLOCK(sc);
2574 }
2575 }
2576#endif
2577 FXP_LOCK(sc);
2578 if ((mask & IFCAP_TXCSUM) != 0 &&
2579 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2580 ifp->if_capenable ^= IFCAP_TXCSUM;
2581 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2582 ifp->if_hwassist |= FXP_CSUM_FEATURES;
2583 else
2584 ifp->if_hwassist &= ~FXP_CSUM_FEATURES;
2585 }
2586 if ((mask & IFCAP_RXCSUM) != 0 &&
2587 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
2588 ifp->if_capenable ^= IFCAP_RXCSUM;
2589 if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0)
2590 reinit++;
2591 }
2191 cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
2192 cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
2193 cbp->csma_dis = 0; /* (don't) disable link */
2194 cbp->tcp_udp_cksum = ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 &&
2195 (ifp->if_capenable & IFCAP_RXCSUM) != 0) ? 1 : 0;
2196 cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */
2197 cbp->link_wake_en = 0; /* (don't) assert PME# on link change */
2198 cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */
2199 cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */
2200 cbp->nsai = 1; /* (don't) disable source addr insert */
2201 cbp->preamble_length = 2; /* (7 byte) preamble */
2202 cbp->loopback = 0; /* (don't) loopback */
2203 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
2204 cbp->linear_pri_mode = 0; /* (wait after xmit only) */
2205 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
2206 cbp->promiscuous = prm; /* promiscuous mode */
2207 cbp->bcast_disable = 0; /* (don't) disable broadcasts */
2208 cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/
2209 cbp->ignore_ul = 0; /* consider U/L bit in IA matching */
2210 cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */
2211 cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
2212
2213 cbp->stripping = !prm; /* truncate rx packet to byte count */
2214 cbp->padding = 1; /* (do) pad short tx packets */
2215 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
2216 cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
2217 cbp->ia_wake_en = 0; /* (don't) wake up on address match */
2218 cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */
2219 /* must set wake_en in PMCSR also */
2220 cbp->force_fdx = 0; /* (don't) force full duplex */
2221 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
2222 cbp->multi_ia = 0; /* (don't) accept multiple IAs */
2223 cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0;
2224 cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
2225
2226 if (sc->tunable_noflow || sc->revision == FXP_REV_82557) {
2227 /*
2228 * The 82557 has no hardware flow control, the values
2229 * below are the defaults for the chip.
2230 */
2231 cbp->fc_delay_lsb = 0;
2232 cbp->fc_delay_msb = 0x40;
2233 cbp->pri_fc_thresh = 3;
2234 cbp->tx_fc_dis = 0;
2235 cbp->rx_fc_restop = 0;
2236 cbp->rx_fc_restart = 0;
2237 cbp->fc_filter = 0;
2238 cbp->pri_fc_loc = 1;
2239 } else {
2240 cbp->fc_delay_lsb = 0x1f;
2241 cbp->fc_delay_msb = 0x01;
2242 cbp->pri_fc_thresh = 3;
2243 cbp->tx_fc_dis = 0; /* enable transmit FC */
2244 cbp->rx_fc_restop = 1; /* enable FC restop frames */
2245 cbp->rx_fc_restart = 1; /* enable FC restart frames */
2246 cbp->fc_filter = !prm; /* drop FC frames to host */
2247 cbp->pri_fc_loc = 1; /* FC pri location (byte31) */
2248 }
2249
2250 /*
2251 * Start the config command/DMA.
2252 */
2253 fxp_scb_wait(sc);
2254 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2255 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2256 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2257 /* ...and wait for it to complete. */
2258 fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
2259 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2260
2261 /*
2262 * Now initialize the station address. Temporarily use the TxCB
2263 * memory area like we did above for the config CB.
2264 */
2265 cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
2266 cb_ias->cb_status = 0;
2267 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL);
2268 cb_ias->link_addr = 0xffffffff;
2269 bcopy(IF_LLADDR(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN);
2270
2271 /*
2272 * Start the IAS (Individual Address Setup) command/DMA.
2273 */
2274 fxp_scb_wait(sc);
2275 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2276 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2277 /* ...and wait for it to complete. */
2278 fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map);
2279 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2280
2281 /*
2282 * Initialize transmit control block (TxCB) list.
2283 */
2284 txp = sc->fxp_desc.tx_list;
2285 tcbp = sc->fxp_desc.cbl_list;
2286 bzero(tcbp, FXP_TXCB_SZ);
2287 for (i = 0; i < FXP_NTXCB; i++) {
2288 txp[i].tx_mbuf = NULL;
2289 tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK);
2290 tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP);
2291 tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr +
2292 (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx)));
2293 if (sc->flags & FXP_FLAG_EXT_TXCB)
2294 tcbp[i].tbd_array_addr =
2295 htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]));
2296 else
2297 tcbp[i].tbd_array_addr =
2298 htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]));
2299 txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
2300 }
2301 /*
2302 * Set the suspend flag on the first TxCB and start the control
2303 * unit. It will execute the NOP and then suspend.
2304 */
2305 tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S);
2306 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2307 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2308 sc->tx_queued = 1;
2309
2310 fxp_scb_wait(sc);
2311 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2312
2313 /*
2314 * Initialize receiver buffer area - RFA.
2315 */
2316 fxp_scb_wait(sc);
2317 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
2318 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
2319
2320 /*
2321 * Set current media.
2322 */
2323 if (sc->miibus != NULL)
2324 mii_mediachg(device_get_softc(sc->miibus));
2325
2326 ifp->if_drv_flags |= IFF_DRV_RUNNING;
2327 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
2328
2329 /*
2330 * Enable interrupts.
2331 */
2332#ifdef DEVICE_POLLING
2333 /*
2334 * ... but only do that if we are not polling. And because (presumably)
2335 * the default is interrupts on, we need to disable them explicitly!
2336 */
2337 if (ifp->if_capenable & IFCAP_POLLING )
2338 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
2339 else
2340#endif /* DEVICE_POLLING */
2341 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
2342
2343 /*
2344 * Start stats updater.
2345 */
2346 callout_reset(&sc->stat_ch, hz, fxp_tick, sc);
2347}
2348
2349static int
2350fxp_serial_ifmedia_upd(struct ifnet *ifp)
2351{
2352
2353 return (0);
2354}
2355
2356static void
2357fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2358{
2359
2360 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2361}
2362
2363/*
2364 * Change media according to request.
2365 */
2366static int
2367fxp_ifmedia_upd(struct ifnet *ifp)
2368{
2369 struct fxp_softc *sc = ifp->if_softc;
2370 struct mii_data *mii;
2371
2372 mii = device_get_softc(sc->miibus);
2373 FXP_LOCK(sc);
2374 if (mii->mii_instance) {
2375 struct mii_softc *miisc;
2376 LIST_FOREACH(miisc, &mii->mii_phys, mii_list)
2377 mii_phy_reset(miisc);
2378 }
2379 mii_mediachg(mii);
2380 FXP_UNLOCK(sc);
2381 return (0);
2382}
2383
2384/*
2385 * Notify the world which media we're using.
2386 */
2387static void
2388fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2389{
2390 struct fxp_softc *sc = ifp->if_softc;
2391 struct mii_data *mii;
2392
2393 mii = device_get_softc(sc->miibus);
2394 FXP_LOCK(sc);
2395 mii_pollstat(mii);
2396 ifmr->ifm_active = mii->mii_media_active;
2397 ifmr->ifm_status = mii->mii_media_status;
2398
2399 if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_10_T &&
2400 sc->flags & FXP_FLAG_CU_RESUME_BUG)
2401 sc->cu_resume_bug = 1;
2402 else
2403 sc->cu_resume_bug = 0;
2404 FXP_UNLOCK(sc);
2405}
2406
2407/*
2408 * Add a buffer to the end of the RFA buffer list.
2409 * Return 0 if successful, 1 for failure. A failure results in
2410 * reusing the RFA buffer.
2411 * The RFA struct is stuck at the beginning of mbuf cluster and the
2412 * data pointer is fixed up to point just past it.
2413 */
2414static int
2415fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2416{
2417 struct mbuf *m;
2418 struct fxp_rfa *rfa;
2419 bus_dmamap_t tmp_map;
2420 int error;
2421
2422 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2423 if (m == NULL)
2424 return (ENOBUFS);
2425
2426 /*
2427 * Move the data pointer up so that the incoming data packet
2428 * will be 32-bit aligned.
2429 */
2430 m->m_data += RFA_ALIGNMENT_FUDGE;
2431
2432 /*
2433 * Get a pointer to the base of the mbuf cluster and move
2434 * data start past it.
2435 */
2436 rfa = mtod(m, struct fxp_rfa *);
2437 m->m_data += sc->rfa_size;
2438 rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
2439
2440 rfa->rfa_status = 0;
2441 rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
2442 rfa->actual_size = 0;
2443 m->m_len = m->m_pkthdr.len = MCLBYTES - RFA_ALIGNMENT_FUDGE -
2444 sc->rfa_size;
2445
2446 /*
2447 * Initialize the rest of the RFA. Note that since the RFA
2448 * is misaligned, we cannot store values directly. We're thus
2449 * using the le32enc() function which handles endianness and
2450 * is also alignment-safe.
2451 */
2452 le32enc(&rfa->link_addr, 0xffffffff);
2453 le32enc(&rfa->rbd_addr, 0xffffffff);
2454
2455 /* Map the RFA into DMA memory. */
2456 error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa,
2457 MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
2458 &rxp->rx_addr, 0);
2459 if (error) {
2460 m_freem(m);
2461 return (error);
2462 }
2463
2464 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
2465 tmp_map = sc->spare_map;
2466 sc->spare_map = rxp->rx_map;
2467 rxp->rx_map = tmp_map;
2468 rxp->rx_mbuf = m;
2469
2470 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
2471 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2472 return (0);
2473}
2474
2475static void
2476fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2477{
2478 struct fxp_rfa *p_rfa;
2479 struct fxp_rx *p_rx;
2480
2481 /*
2482 * If there are other buffers already on the list, attach this
2483 * one to the end by fixing up the tail to point to this one.
2484 */
2485 if (sc->fxp_desc.rx_head != NULL) {
2486 p_rx = sc->fxp_desc.rx_tail;
2487 p_rfa = (struct fxp_rfa *)
2488 (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
2489 p_rx->rx_next = rxp;
2490 le32enc(&p_rfa->link_addr, rxp->rx_addr);
2491 p_rfa->rfa_control = 0;
2492 bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map,
2493 BUS_DMASYNC_PREWRITE);
2494 } else {
2495 rxp->rx_next = NULL;
2496 sc->fxp_desc.rx_head = rxp;
2497 }
2498 sc->fxp_desc.rx_tail = rxp;
2499}
2500
2501static void
2502fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2503{
2504 struct mbuf *m;
2505 struct fxp_rfa *rfa;
2506
2507 m = rxp->rx_mbuf;
2508 m->m_data = m->m_ext.ext_buf;
2509 /*
2510 * Move the data pointer up so that the incoming data packet
2511 * will be 32-bit aligned.
2512 */
2513 m->m_data += RFA_ALIGNMENT_FUDGE;
2514
2515 /*
2516 * Get a pointer to the base of the mbuf cluster and move
2517 * data start past it.
2518 */
2519 rfa = mtod(m, struct fxp_rfa *);
2520 m->m_data += sc->rfa_size;
2521 rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE);
2522
2523 rfa->rfa_status = 0;
2524 rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL);
2525 rfa->actual_size = 0;
2526
2527 /*
2528 * Initialize the rest of the RFA. Note that since the RFA
2529 * is misaligned, we cannot store values directly. We're thus
2530 * using the le32enc() function which handles endianness and
2531 * is also alignment-safe.
2532 */
2533 le32enc(&rfa->link_addr, 0xffffffff);
2534 le32enc(&rfa->rbd_addr, 0xffffffff);
2535
2536 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
2537 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2538}
2539
2540static int
2541fxp_miibus_readreg(device_t dev, int phy, int reg)
2542{
2543 struct fxp_softc *sc = device_get_softc(dev);
2544 int count = 10000;
2545 int value;
2546
2547 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2548 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
2549
2550 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
2551 && count--)
2552 DELAY(10);
2553
2554 if (count <= 0)
2555 device_printf(dev, "fxp_miibus_readreg: timed out\n");
2556
2557 return (value & 0xffff);
2558}
2559
2560static void
2561fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
2562{
2563 struct fxp_softc *sc = device_get_softc(dev);
2564 int count = 10000;
2565
2566 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2567 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
2568 (value & 0xffff));
2569
2570 while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
2571 count--)
2572 DELAY(10);
2573
2574 if (count <= 0)
2575 device_printf(dev, "fxp_miibus_writereg: timed out\n");
2576}
2577
2578static int
2579fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2580{
2581 struct fxp_softc *sc = ifp->if_softc;
2582 struct ifreq *ifr = (struct ifreq *)data;
2583 struct mii_data *mii;
2584 int flag, mask, error = 0, reinit;
2585
2586 switch (command) {
2587 case SIOCSIFFLAGS:
2588 FXP_LOCK(sc);
2589 if (ifp->if_flags & IFF_ALLMULTI)
2590 sc->flags |= FXP_FLAG_ALL_MCAST;
2591 else
2592 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2593
2594 /*
2595 * If interface is marked up and not running, then start it.
2596 * If it is marked down and running, stop it.
2597 * XXX If it's up then re-initialize it. This is so flags
2598 * such as IFF_PROMISC are handled.
2599 */
2600 if (ifp->if_flags & IFF_UP) {
2601 fxp_init_body(sc);
2602 } else {
2603 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
2604 fxp_stop(sc);
2605 }
2606 FXP_UNLOCK(sc);
2607 break;
2608
2609 case SIOCADDMULTI:
2610 case SIOCDELMULTI:
2611 FXP_LOCK(sc);
2612 if (ifp->if_flags & IFF_ALLMULTI)
2613 sc->flags |= FXP_FLAG_ALL_MCAST;
2614 else
2615 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2616 /*
2617 * Multicast list has changed; set the hardware filter
2618 * accordingly.
2619 */
2620 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0)
2621 fxp_mc_setup(sc);
2622 /*
2623 * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it
2624 * again rather than else {}.
2625 */
2626 if (sc->flags & FXP_FLAG_ALL_MCAST)
2627 fxp_init_body(sc);
2628 FXP_UNLOCK(sc);
2629 error = 0;
2630 break;
2631
2632 case SIOCSIFMEDIA:
2633 case SIOCGIFMEDIA:
2634 if (sc->miibus != NULL) {
2635 mii = device_get_softc(sc->miibus);
2636 error = ifmedia_ioctl(ifp, ifr,
2637 &mii->mii_media, command);
2638 } else {
2639 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
2640 }
2641 break;
2642
2643 case SIOCSIFCAP:
2644 reinit = 0;
2645 mask = ifp->if_capenable ^ ifr->ifr_reqcap;
2646#ifdef DEVICE_POLLING
2647 if (mask & IFCAP_POLLING) {
2648 if (ifr->ifr_reqcap & IFCAP_POLLING) {
2649 error = ether_poll_register(fxp_poll, ifp);
2650 if (error)
2651 return(error);
2652 FXP_LOCK(sc);
2653 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL,
2654 FXP_SCB_INTR_DISABLE);
2655 ifp->if_capenable |= IFCAP_POLLING;
2656 FXP_UNLOCK(sc);
2657 } else {
2658 error = ether_poll_deregister(ifp);
2659 /* Enable interrupts in any case */
2660 FXP_LOCK(sc);
2661 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
2662 ifp->if_capenable &= ~IFCAP_POLLING;
2663 FXP_UNLOCK(sc);
2664 }
2665 }
2666#endif
2667 FXP_LOCK(sc);
2668 if ((mask & IFCAP_TXCSUM) != 0 &&
2669 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) {
2670 ifp->if_capenable ^= IFCAP_TXCSUM;
2671 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0)
2672 ifp->if_hwassist |= FXP_CSUM_FEATURES;
2673 else
2674 ifp->if_hwassist &= ~FXP_CSUM_FEATURES;
2675 }
2676 if ((mask & IFCAP_RXCSUM) != 0 &&
2677 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) {
2678 ifp->if_capenable ^= IFCAP_RXCSUM;
2679 if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0)
2680 reinit++;
2681 }
2682 if ((mask & IFCAP_TSO4) != 0 &&
2683 (ifp->if_capabilities & IFCAP_TSO4) != 0) {
2684 ifp->if_capenable ^= IFCAP_TSO4;
2685 if ((ifp->if_capenable & IFCAP_TSO4) != 0)
2686 ifp->if_hwassist |= CSUM_TSO;
2687 else
2688 ifp->if_hwassist &= ~CSUM_TSO;
2689 }
2592 if ((mask & IFCAP_VLAN_MTU) != 0 &&
2593 (ifp->if_capabilities & IFCAP_VLAN_MTU) != 0) {
2594 ifp->if_capenable ^= IFCAP_VLAN_MTU;
2595 if (sc->revision != FXP_REV_82557)
2596 flag = FXP_FLAG_LONG_PKT_EN;
2597 else /* a hack to get long frames on the old chip */
2598 flag = FXP_FLAG_SAVE_BAD;
2599 sc->flags ^= flag;
2600 if (ifp->if_flags & IFF_UP)
2601 reinit++;
2602 }
2603 if (reinit > 0)
2604 fxp_init_body(sc);
2605 FXP_UNLOCK(sc);
2606 break;
2607
2608 default:
2609 error = ether_ioctl(ifp, command, data);
2610 }
2611 return (error);
2612}
2613
2614/*
2615 * Fill in the multicast address list and return number of entries.
2616 */
2617static int
2618fxp_mc_addrs(struct fxp_softc *sc)
2619{
2620 struct fxp_cb_mcs *mcsp = sc->mcsp;
2621 struct ifnet *ifp = sc->ifp;
2622 struct ifmultiaddr *ifma;
2623 int nmcasts;
2624
2625 nmcasts = 0;
2626 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) {
2627 IF_ADDR_LOCK(ifp);
2628 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2629 if (ifma->ifma_addr->sa_family != AF_LINK)
2630 continue;
2631 if (nmcasts >= MAXMCADDR) {
2632 sc->flags |= FXP_FLAG_ALL_MCAST;
2633 nmcasts = 0;
2634 break;
2635 }
2636 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2637 &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
2638 nmcasts++;
2639 }
2640 IF_ADDR_UNLOCK(ifp);
2641 }
2642 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN);
2643 return (nmcasts);
2644}
2645
2646/*
2647 * Program the multicast filter.
2648 *
2649 * We have an artificial restriction that the multicast setup command
2650 * must be the first command in the chain, so we take steps to ensure
2651 * this. By requiring this, it allows us to keep up the performance of
2652 * the pre-initialized command ring (esp. link pointers) by not actually
2653 * inserting the mcsetup command in the ring - i.e. its link pointer
2654 * points to the TxCB ring, but the mcsetup descriptor itself is not part
2655 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
2656 * lead into the regular TxCB ring when it completes.
2657 *
2658 * This function must be called at splimp.
2659 */
2660static void
2661fxp_mc_setup(struct fxp_softc *sc)
2662{
2663 struct fxp_cb_mcs *mcsp = sc->mcsp;
2664 struct fxp_tx *txp;
2665 int count;
2666
2667 FXP_LOCK_ASSERT(sc, MA_OWNED);
2668 /*
2669 * If there are queued commands, we must wait until they are all
2670 * completed. If we are already waiting, then add a NOP command
2671 * with interrupt option so that we're notified when all commands
2672 * have been completed - fxp_start() ensures that no additional
2673 * TX commands will be added when need_mcsetup is true.
2674 */
2675 if (sc->tx_queued) {
2676 /*
2677 * need_mcsetup will be true if we are already waiting for the
2678 * NOP command to be completed (see below). In this case, bail.
2679 */
2680 if (sc->need_mcsetup)
2681 return;
2682 sc->need_mcsetup = 1;
2683
2684 /*
2685 * Add a NOP command with interrupt so that we are notified
2686 * when all TX commands have been processed.
2687 */
2688 txp = sc->fxp_desc.tx_last->tx_next;
2689 txp->tx_mbuf = NULL;
2690 txp->tx_cb->cb_status = 0;
2691 txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP |
2692 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
2693 /*
2694 * Advance the end of list forward.
2695 */
2696 sc->fxp_desc.tx_last->tx_cb->cb_command &=
2697 htole16(~FXP_CB_COMMAND_S);
2698 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2699 sc->fxp_desc.tx_last = txp;
2700 sc->tx_queued++;
2701 /*
2702 * Issue a resume in case the CU has just suspended.
2703 */
2704 fxp_scb_wait(sc);
2705 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
2706 /*
2707 * Set a 5 second timer just in case we don't hear from the
2708 * card again.
2709 */
2710 sc->watchdog_timer = 5;
2711
2712 return;
2713 }
2714 sc->need_mcsetup = 0;
2715
2716 /*
2717 * Initialize multicast setup descriptor.
2718 */
2719 mcsp->cb_status = 0;
2720 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS |
2721 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
2722 mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr);
2723 txp = &sc->fxp_desc.mcs_tx;
2724 txp->tx_mbuf = NULL;
2725 txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp;
2726 txp->tx_next = sc->fxp_desc.tx_list;
2727 (void) fxp_mc_addrs(sc);
2728 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2729 sc->tx_queued = 1;
2730
2731 /*
2732 * Wait until command unit is not active. This should never
2733 * be the case when nothing is queued, but make sure anyway.
2734 */
2735 count = 100;
2736 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
2737 FXP_SCB_CUS_ACTIVE && --count)
2738 DELAY(10);
2739 if (count == 0) {
2740 device_printf(sc->dev, "command queue timeout\n");
2741 return;
2742 }
2743
2744 /*
2745 * Start the multicast setup command.
2746 */
2747 fxp_scb_wait(sc);
2748 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
2749 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
2750 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2751
2752 sc->watchdog_timer = 2;
2753 return;
2754}
2755
2756static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
2757static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
2758static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
2759static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
2760static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
2761static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
2762static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE;
2763
2764#define UCODE(x) x, sizeof(x)/sizeof(uint32_t)
2765
2766struct ucode {
2767 uint32_t revision;
2768 uint32_t *ucode;
2769 int length;
2770 u_short int_delay_offset;
2771 u_short bundle_max_offset;
2772} ucode_table[] = {
2773 { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
2774 { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
2775 { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
2776 D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
2777 { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
2778 D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
2779 { FXP_REV_82550, UCODE(fxp_ucode_d102),
2780 D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
2781 { FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
2782 D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
2783 { FXP_REV_82551_F, UCODE(fxp_ucode_d102e),
2784 D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
2785 { 0, NULL, 0, 0, 0 }
2786};
2787
2788static void
2789fxp_load_ucode(struct fxp_softc *sc)
2790{
2791 struct ucode *uc;
2792 struct fxp_cb_ucode *cbp;
2793 int i;
2794
2795 for (uc = ucode_table; uc->ucode != NULL; uc++)
2796 if (sc->revision == uc->revision)
2797 break;
2798 if (uc->ucode == NULL)
2799 return;
2800 cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
2801 cbp->cb_status = 0;
2802 cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL);
2803 cbp->link_addr = 0xffffffff; /* (no) next command */
2804 for (i = 0; i < uc->length; i++)
2805 cbp->ucode[i] = htole32(uc->ucode[i]);
2806 if (uc->int_delay_offset)
2807 *(uint16_t *)&cbp->ucode[uc->int_delay_offset] =
2808 htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2);
2809 if (uc->bundle_max_offset)
2810 *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] =
2811 htole16(sc->tunable_bundle_max);
2812 /*
2813 * Download the ucode to the chip.
2814 */
2815 fxp_scb_wait(sc);
2816 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2817 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2818 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2819 /* ...and wait for it to complete. */
2820 fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
2821 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2822 device_printf(sc->dev,
2823 "Microcode loaded, int_delay: %d usec bundle_max: %d\n",
2824 sc->tunable_int_delay,
2825 uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
2826 sc->flags |= FXP_FLAG_UCODE;
2827}
2828
2829static int
2830sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2831{
2832 int error, value;
2833
2834 value = *(int *)arg1;
2835 error = sysctl_handle_int(oidp, &value, 0, req);
2836 if (error || !req->newptr)
2837 return (error);
2838 if (value < low || value > high)
2839 return (EINVAL);
2840 *(int *)arg1 = value;
2841 return (0);
2842}
2843
2844/*
2845 * Interrupt delay is expressed in microseconds, a multiplier is used
2846 * to convert this to the appropriate clock ticks before using.
2847 */
2848static int
2849sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
2850{
2851 return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
2852}
2853
2854static int
2855sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
2856{
2857 return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
2858}
2690 if ((mask & IFCAP_VLAN_MTU) != 0 &&
2691 (ifp->if_capabilities & IFCAP_VLAN_MTU) != 0) {
2692 ifp->if_capenable ^= IFCAP_VLAN_MTU;
2693 if (sc->revision != FXP_REV_82557)
2694 flag = FXP_FLAG_LONG_PKT_EN;
2695 else /* a hack to get long frames on the old chip */
2696 flag = FXP_FLAG_SAVE_BAD;
2697 sc->flags ^= flag;
2698 if (ifp->if_flags & IFF_UP)
2699 reinit++;
2700 }
2701 if (reinit > 0)
2702 fxp_init_body(sc);
2703 FXP_UNLOCK(sc);
2704 break;
2705
2706 default:
2707 error = ether_ioctl(ifp, command, data);
2708 }
2709 return (error);
2710}
2711
2712/*
2713 * Fill in the multicast address list and return number of entries.
2714 */
2715static int
2716fxp_mc_addrs(struct fxp_softc *sc)
2717{
2718 struct fxp_cb_mcs *mcsp = sc->mcsp;
2719 struct ifnet *ifp = sc->ifp;
2720 struct ifmultiaddr *ifma;
2721 int nmcasts;
2722
2723 nmcasts = 0;
2724 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) {
2725 IF_ADDR_LOCK(ifp);
2726 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2727 if (ifma->ifma_addr->sa_family != AF_LINK)
2728 continue;
2729 if (nmcasts >= MAXMCADDR) {
2730 sc->flags |= FXP_FLAG_ALL_MCAST;
2731 nmcasts = 0;
2732 break;
2733 }
2734 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2735 &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN);
2736 nmcasts++;
2737 }
2738 IF_ADDR_UNLOCK(ifp);
2739 }
2740 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN);
2741 return (nmcasts);
2742}
2743
2744/*
2745 * Program the multicast filter.
2746 *
2747 * We have an artificial restriction that the multicast setup command
2748 * must be the first command in the chain, so we take steps to ensure
2749 * this. By requiring this, it allows us to keep up the performance of
2750 * the pre-initialized command ring (esp. link pointers) by not actually
2751 * inserting the mcsetup command in the ring - i.e. its link pointer
2752 * points to the TxCB ring, but the mcsetup descriptor itself is not part
2753 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
2754 * lead into the regular TxCB ring when it completes.
2755 *
2756 * This function must be called at splimp.
2757 */
2758static void
2759fxp_mc_setup(struct fxp_softc *sc)
2760{
2761 struct fxp_cb_mcs *mcsp = sc->mcsp;
2762 struct fxp_tx *txp;
2763 int count;
2764
2765 FXP_LOCK_ASSERT(sc, MA_OWNED);
2766 /*
2767 * If there are queued commands, we must wait until they are all
2768 * completed. If we are already waiting, then add a NOP command
2769 * with interrupt option so that we're notified when all commands
2770 * have been completed - fxp_start() ensures that no additional
2771 * TX commands will be added when need_mcsetup is true.
2772 */
2773 if (sc->tx_queued) {
2774 /*
2775 * need_mcsetup will be true if we are already waiting for the
2776 * NOP command to be completed (see below). In this case, bail.
2777 */
2778 if (sc->need_mcsetup)
2779 return;
2780 sc->need_mcsetup = 1;
2781
2782 /*
2783 * Add a NOP command with interrupt so that we are notified
2784 * when all TX commands have been processed.
2785 */
2786 txp = sc->fxp_desc.tx_last->tx_next;
2787 txp->tx_mbuf = NULL;
2788 txp->tx_cb->cb_status = 0;
2789 txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP |
2790 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
2791 /*
2792 * Advance the end of list forward.
2793 */
2794 sc->fxp_desc.tx_last->tx_cb->cb_command &=
2795 htole16(~FXP_CB_COMMAND_S);
2796 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2797 sc->fxp_desc.tx_last = txp;
2798 sc->tx_queued++;
2799 /*
2800 * Issue a resume in case the CU has just suspended.
2801 */
2802 fxp_scb_wait(sc);
2803 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
2804 /*
2805 * Set a 5 second timer just in case we don't hear from the
2806 * card again.
2807 */
2808 sc->watchdog_timer = 5;
2809
2810 return;
2811 }
2812 sc->need_mcsetup = 0;
2813
2814 /*
2815 * Initialize multicast setup descriptor.
2816 */
2817 mcsp->cb_status = 0;
2818 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS |
2819 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I);
2820 mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr);
2821 txp = &sc->fxp_desc.mcs_tx;
2822 txp->tx_mbuf = NULL;
2823 txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp;
2824 txp->tx_next = sc->fxp_desc.tx_list;
2825 (void) fxp_mc_addrs(sc);
2826 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2827 sc->tx_queued = 1;
2828
2829 /*
2830 * Wait until command unit is not active. This should never
2831 * be the case when nothing is queued, but make sure anyway.
2832 */
2833 count = 100;
2834 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
2835 FXP_SCB_CUS_ACTIVE && --count)
2836 DELAY(10);
2837 if (count == 0) {
2838 device_printf(sc->dev, "command queue timeout\n");
2839 return;
2840 }
2841
2842 /*
2843 * Start the multicast setup command.
2844 */
2845 fxp_scb_wait(sc);
2846 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
2847 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
2848 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2849
2850 sc->watchdog_timer = 2;
2851 return;
2852}
2853
2854static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
2855static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
2856static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
2857static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
2858static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
2859static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
2860static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE;
2861
2862#define UCODE(x) x, sizeof(x)/sizeof(uint32_t)
2863
2864struct ucode {
2865 uint32_t revision;
2866 uint32_t *ucode;
2867 int length;
2868 u_short int_delay_offset;
2869 u_short bundle_max_offset;
2870} ucode_table[] = {
2871 { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
2872 { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
2873 { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
2874 D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
2875 { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
2876 D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
2877 { FXP_REV_82550, UCODE(fxp_ucode_d102),
2878 D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
2879 { FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
2880 D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
2881 { FXP_REV_82551_F, UCODE(fxp_ucode_d102e),
2882 D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD },
2883 { 0, NULL, 0, 0, 0 }
2884};
2885
2886static void
2887fxp_load_ucode(struct fxp_softc *sc)
2888{
2889 struct ucode *uc;
2890 struct fxp_cb_ucode *cbp;
2891 int i;
2892
2893 for (uc = ucode_table; uc->ucode != NULL; uc++)
2894 if (sc->revision == uc->revision)
2895 break;
2896 if (uc->ucode == NULL)
2897 return;
2898 cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
2899 cbp->cb_status = 0;
2900 cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL);
2901 cbp->link_addr = 0xffffffff; /* (no) next command */
2902 for (i = 0; i < uc->length; i++)
2903 cbp->ucode[i] = htole32(uc->ucode[i]);
2904 if (uc->int_delay_offset)
2905 *(uint16_t *)&cbp->ucode[uc->int_delay_offset] =
2906 htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2);
2907 if (uc->bundle_max_offset)
2908 *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] =
2909 htole16(sc->tunable_bundle_max);
2910 /*
2911 * Download the ucode to the chip.
2912 */
2913 fxp_scb_wait(sc);
2914 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2915 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2916 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2917 /* ...and wait for it to complete. */
2918 fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map);
2919 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2920 device_printf(sc->dev,
2921 "Microcode loaded, int_delay: %d usec bundle_max: %d\n",
2922 sc->tunable_int_delay,
2923 uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
2924 sc->flags |= FXP_FLAG_UCODE;
2925}
2926
2927static int
2928sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2929{
2930 int error, value;
2931
2932 value = *(int *)arg1;
2933 error = sysctl_handle_int(oidp, &value, 0, req);
2934 if (error || !req->newptr)
2935 return (error);
2936 if (value < low || value > high)
2937 return (EINVAL);
2938 *(int *)arg1 = value;
2939 return (0);
2940}
2941
2942/*
2943 * Interrupt delay is expressed in microseconds, a multiplier is used
2944 * to convert this to the appropriate clock ticks before using.
2945 */
2946static int
2947sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
2948{
2949 return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
2950}
2951
2952static int
2953sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
2954{
2955 return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
2956}