Deleted Added
full compact
if_fxp.c (112982) if_fxp.c (113017)
1/*-
2 * Copyright (c) 1995, David Greenman
3 * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
1/*-
2 * Copyright (c) 1995, David Greenman
3 * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice unmodified, this list of conditions, and the following
11 * disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 * $FreeBSD: head/sys/dev/fxp/if_fxp.c 112982 2003-04-02 16:47:16Z mux $
28 * $FreeBSD: head/sys/dev/fxp/if_fxp.c 113017 2003-04-03 14:08:35Z mux $
29 */
30
31/*
32 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
33 */
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/mbuf.h>
38 /* #include <sys/mutex.h> */
39#include <sys/kernel.h>
40#include <sys/socket.h>
41#include <sys/sysctl.h>
42
43#include <net/if.h>
44#include <net/if_dl.h>
45#include <net/if_media.h>
46
47#include <net/bpf.h>
48#include <sys/sockio.h>
49#include <sys/bus.h>
50#include <machine/bus.h>
51#include <sys/rman.h>
52#include <machine/resource.h>
53
54#include <net/ethernet.h>
55#include <net/if_arp.h>
56
57#include <machine/clock.h> /* for DELAY */
58
59#include <net/if_types.h>
60#include <net/if_vlan_var.h>
61
62#ifdef FXP_IP_CSUM_WAR
63#include <netinet/in.h>
64#include <netinet/in_systm.h>
65#include <netinet/ip.h>
66#include <machine/in_cksum.h>
67#endif
68
69#include <pci/pcivar.h>
70#include <pci/pcireg.h> /* for PCIM_CMD_xxx */
71
72#include <dev/mii/mii.h>
73#include <dev/mii/miivar.h>
74
75#include <dev/fxp/if_fxpreg.h>
76#include <dev/fxp/if_fxpvar.h>
77#include <dev/fxp/rcvbundl.h>
78
79MODULE_DEPEND(fxp, miibus, 1, 1, 1);
80#include "miibus_if.h"
81
82/*
83 * NOTE! On the Alpha, we have an alignment constraint. The
84 * card DMAs the packet immediately following the RFA. However,
85 * the first thing in the packet is a 14-byte Ethernet header.
86 * This means that the packet is misaligned. To compensate,
87 * we actually offset the RFA 2 bytes into the cluster. This
88 * alignes the packet after the Ethernet header at a 32-bit
89 * boundary. HOWEVER! This means that the RFA is misaligned!
90 */
91#define RFA_ALIGNMENT_FUDGE 2
92
93/*
94 * Set initial transmit threshold at 64 (512 bytes). This is
95 * increased by 64 (512 bytes) at a time, to maximum of 192
96 * (1536 bytes), if an underrun occurs.
97 */
98static int tx_threshold = 64;
99
100/*
101 * The configuration byte map has several undefined fields which
102 * must be one or must be zero. Set up a template for these bits
103 * only, (assuming a 82557 chip) leaving the actual configuration
104 * to fxp_init.
105 *
106 * See struct fxp_cb_config for the bit definitions.
107 */
108static u_char fxp_cb_config_template[] = {
109 0x0, 0x0, /* cb_status */
110 0x0, 0x0, /* cb_command */
111 0x0, 0x0, 0x0, 0x0, /* link_addr */
112 0x0, /* 0 */
113 0x0, /* 1 */
114 0x0, /* 2 */
115 0x0, /* 3 */
116 0x0, /* 4 */
117 0x0, /* 5 */
118 0x32, /* 6 */
119 0x0, /* 7 */
120 0x0, /* 8 */
121 0x0, /* 9 */
122 0x6, /* 10 */
123 0x0, /* 11 */
124 0x0, /* 12 */
125 0x0, /* 13 */
126 0xf2, /* 14 */
127 0x48, /* 15 */
128 0x0, /* 16 */
129 0x40, /* 17 */
130 0xf0, /* 18 */
131 0x0, /* 19 */
132 0x3f, /* 20 */
133 0x5 /* 21 */
134};
135
136struct fxp_ident {
137 u_int16_t devid;
138 char *name;
139};
140
141/*
142 * Claim various Intel PCI device identifiers for this driver. The
143 * sub-vendor and sub-device field are extensively used to identify
144 * particular variants, but we don't currently differentiate between
145 * them.
146 */
147static struct fxp_ident fxp_ident_table[] = {
148 { 0x1029, "Intel 82559 PCI/CardBus Pro/100" },
149 { 0x1030, "Intel 82559 Pro/100 Ethernet" },
150 { 0x1031, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
151 { 0x1032, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
152 { 0x1033, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
153 { 0x1034, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
154 { 0x1035, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
155 { 0x1036, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
156 { 0x1037, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
157 { 0x1038, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
158 { 0x1039, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
159 { 0x103A, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
160 { 0x103B, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
161 { 0x103C, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
162 { 0x103D, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
163 { 0x103E, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
164 { 0x1059, "Intel 82551QM Pro/100 M Mobile Connection" },
165 { 0x1209, "Intel 82559ER Embedded 10/100 Ethernet" },
166 { 0x1229, "Intel 82557/8/9 EtherExpress Pro/100(B) Ethernet" },
167 { 0x2449, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
168 { 0, NULL },
169};
170
171#ifdef FXP_IP_CSUM_WAR
172#define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
173#else
174#define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
175#endif
176
177static int fxp_probe(device_t dev);
178static int fxp_attach(device_t dev);
179static int fxp_detach(device_t dev);
180static int fxp_shutdown(device_t dev);
181static int fxp_suspend(device_t dev);
182static int fxp_resume(device_t dev);
183
184static void fxp_intr(void *xsc);
185static void fxp_init(void *xsc);
186static void fxp_tick(void *xsc);
187static void fxp_powerstate_d0(device_t dev);
188static void fxp_start(struct ifnet *ifp);
189static void fxp_stop(struct fxp_softc *sc);
190static void fxp_release(struct fxp_softc *sc);
191static int fxp_ioctl(struct ifnet *ifp, u_long command,
192 caddr_t data);
193static void fxp_watchdog(struct ifnet *ifp);
194static int fxp_add_rfabuf(struct fxp_softc *sc,
195 struct fxp_rx *rxp);
196static int fxp_mc_addrs(struct fxp_softc *sc);
197static void fxp_mc_setup(struct fxp_softc *sc);
198static u_int16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset,
199 int autosize);
200static void fxp_eeprom_putword(struct fxp_softc *sc, int offset,
201 u_int16_t data);
202static void fxp_autosize_eeprom(struct fxp_softc *sc);
203static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
204 int offset, int words);
205static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
206 int offset, int words);
207static int fxp_ifmedia_upd(struct ifnet *ifp);
208static void fxp_ifmedia_sts(struct ifnet *ifp,
209 struct ifmediareq *ifmr);
210static int fxp_serial_ifmedia_upd(struct ifnet *ifp);
211static void fxp_serial_ifmedia_sts(struct ifnet *ifp,
212 struct ifmediareq *ifmr);
213static volatile int fxp_miibus_readreg(device_t dev, int phy, int reg);
214static void fxp_miibus_writereg(device_t dev, int phy, int reg,
215 int value);
216static void fxp_load_ucode(struct fxp_softc *sc);
217static int sysctl_int_range(SYSCTL_HANDLER_ARGS,
218 int low, int high);
219static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
220static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
221static __inline void fxp_lwcopy(volatile u_int32_t *src,
222 volatile u_int32_t *dst);
223static __inline void fxp_scb_wait(struct fxp_softc *sc);
224static __inline void fxp_scb_cmd(struct fxp_softc *sc, int cmd);
225static __inline void fxp_dma_wait(volatile u_int16_t *status,
226 struct fxp_softc *sc);
227
228static device_method_t fxp_methods[] = {
229 /* Device interface */
230 DEVMETHOD(device_probe, fxp_probe),
231 DEVMETHOD(device_attach, fxp_attach),
232 DEVMETHOD(device_detach, fxp_detach),
233 DEVMETHOD(device_shutdown, fxp_shutdown),
234 DEVMETHOD(device_suspend, fxp_suspend),
235 DEVMETHOD(device_resume, fxp_resume),
236
237 /* MII interface */
238 DEVMETHOD(miibus_readreg, fxp_miibus_readreg),
239 DEVMETHOD(miibus_writereg, fxp_miibus_writereg),
240
241 { 0, 0 }
242};
243
244static driver_t fxp_driver = {
245 "fxp",
246 fxp_methods,
247 sizeof(struct fxp_softc),
248};
249
250static devclass_t fxp_devclass;
251
252DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0);
253DRIVER_MODULE(if_fxp, cardbus, fxp_driver, fxp_devclass, 0, 0);
254DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
255
256static int fxp_rnr;
257SYSCTL_INT(_hw, OID_AUTO, fxp_rnr, CTLFLAG_RW, &fxp_rnr, 0, "fxp rnr events");
258
259/*
260 * Inline function to copy a 16-bit aligned 32-bit quantity.
261 */
262static __inline void
263fxp_lwcopy(volatile u_int32_t *src, volatile u_int32_t *dst)
264{
265#ifdef __i386__
266 *dst = *src;
267#else
268 volatile u_int16_t *a = (volatile u_int16_t *)src;
269 volatile u_int16_t *b = (volatile u_int16_t *)dst;
270
271 b[0] = a[0];
272 b[1] = a[1];
273#endif
274}
275
276/*
277 * Wait for the previous command to be accepted (but not necessarily
278 * completed).
279 */
280static __inline void
281fxp_scb_wait(struct fxp_softc *sc)
282{
283 int i = 10000;
284
285 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
286 DELAY(2);
287 if (i == 0)
288 device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
289 CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
290 CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
291 CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS),
292 CSR_READ_2(sc, FXP_CSR_FLOWCONTROL));
293}
294
295static __inline void
296fxp_scb_cmd(struct fxp_softc *sc, int cmd)
297{
298
299 if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
300 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
301 fxp_scb_wait(sc);
302 }
303 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
304}
305
306static __inline void
307fxp_dma_wait(volatile u_int16_t *status, struct fxp_softc *sc)
308{
309 int i = 10000;
310
311 while (!(*status & FXP_CB_STATUS_C) && --i)
312 DELAY(2);
313 if (i == 0)
314 device_printf(sc->dev, "DMA timeout\n");
315}
316
317/*
318 * Return identification string if this is device is ours.
319 */
320static int
321fxp_probe(device_t dev)
322{
323 u_int16_t devid;
324 struct fxp_ident *ident;
325
326 if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
327 devid = pci_get_device(dev);
328 for (ident = fxp_ident_table; ident->name != NULL; ident++) {
329 if (ident->devid == devid) {
330 device_set_desc(dev, ident->name);
331 return (0);
332 }
333 }
334 }
335 return (ENXIO);
336}
337
338static void
339fxp_powerstate_d0(device_t dev)
340{
341#if __FreeBSD_version >= 430002
342 u_int32_t iobase, membase, irq;
343
344 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
345 /* Save important PCI config data. */
346 iobase = pci_read_config(dev, FXP_PCI_IOBA, 4);
347 membase = pci_read_config(dev, FXP_PCI_MMBA, 4);
348 irq = pci_read_config(dev, PCIR_INTLINE, 4);
349
350 /* Reset the power state. */
351 device_printf(dev, "chip is in D%d power mode "
352 "-- setting to D0\n", pci_get_powerstate(dev));
353
354 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
355
356 /* Restore PCI config data. */
357 pci_write_config(dev, FXP_PCI_IOBA, iobase, 4);
358 pci_write_config(dev, FXP_PCI_MMBA, membase, 4);
359 pci_write_config(dev, PCIR_INTLINE, irq, 4);
360 }
361#endif
362}
363
364static void
365fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
366{
367 u_int32_t *addr;
368
369 if (error)
370 return;
371
372 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
373 addr = arg;
374 *addr = segs->ds_addr;
375}
376
377static int
378fxp_attach(device_t dev)
379{
380 int error = 0;
381 struct fxp_softc *sc = device_get_softc(dev);
382 struct ifnet *ifp;
383 struct fxp_rx *rxp;
384 u_int32_t val;
385 u_int16_t data;
386 int i, rid, m1, m2, prefer_iomap;
387 int s;
388
389 bzero(sc, sizeof(*sc));
390 sc->dev = dev;
391 callout_handle_init(&sc->stat_ch);
392 sysctl_ctx_init(&sc->sysctl_ctx);
393 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
394 MTX_DEF | MTX_RECURSE);
395
396 s = splimp();
397
398 /*
399 * Enable bus mastering. Enable memory space too, in case
400 * BIOS/Prom forgot about it.
401 */
402 val = pci_read_config(dev, PCIR_COMMAND, 2);
403 val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
404 pci_write_config(dev, PCIR_COMMAND, val, 2);
405 val = pci_read_config(dev, PCIR_COMMAND, 2);
406
407 fxp_powerstate_d0(dev);
408
409 /*
410 * Figure out which we should try first - memory mapping or i/o mapping?
411 * We default to memory mapping. Then we accept an override from the
412 * command line. Then we check to see which one is enabled.
413 */
414 m1 = PCIM_CMD_MEMEN;
415 m2 = PCIM_CMD_PORTEN;
416 prefer_iomap = 0;
417 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
418 "prefer_iomap", &prefer_iomap) == 0 && prefer_iomap != 0) {
419 m1 = PCIM_CMD_PORTEN;
420 m2 = PCIM_CMD_MEMEN;
421 }
422
423 if (val & m1) {
424 sc->rtp =
425 (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
426 sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
427 sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd,
428 0, ~0, 1, RF_ACTIVE);
429 }
430 if (sc->mem == NULL && (val & m2)) {
431 sc->rtp =
432 (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
433 sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
434 sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd,
435 0, ~0, 1, RF_ACTIVE);
436 }
437
438 if (!sc->mem) {
439 device_printf(dev, "could not map device registers\n");
440 error = ENXIO;
441 goto fail;
442 }
443 if (bootverbose) {
444 device_printf(dev, "using %s space register mapping\n",
445 sc->rtp == SYS_RES_MEMORY? "memory" : "I/O");
446 }
447
448 sc->sc_st = rman_get_bustag(sc->mem);
449 sc->sc_sh = rman_get_bushandle(sc->mem);
450
451 /*
452 * Allocate our interrupt.
453 */
454 rid = 0;
455 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
456 RF_SHAREABLE | RF_ACTIVE);
457 if (sc->irq == NULL) {
458 device_printf(dev, "could not map interrupt\n");
459 error = ENXIO;
460 goto fail;
461 }
462
463 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET,
464 fxp_intr, sc, &sc->ih);
465 if (error) {
466 device_printf(dev, "could not setup irq\n");
467 goto fail;
468 }
469
470 /*
471 * Reset to a stable state.
472 */
473 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
474 DELAY(10);
475
476 /*
477 * Find out how large of an SEEPROM we have.
478 */
479 fxp_autosize_eeprom(sc);
480
481 /*
482 * Determine whether we must use the 503 serial interface.
483 */
484 fxp_read_eeprom(sc, &data, 6, 1);
485 if ((data & FXP_PHY_DEVICE_MASK) != 0 &&
486 (data & FXP_PHY_SERIAL_ONLY))
487 sc->flags |= FXP_FLAG_SERIAL_MEDIA;
488
489 /*
490 * Create the sysctl tree
491 */
492 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
493 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
494 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
495 if (sc->sysctl_tree == NULL)
496 goto fail;
497 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
498 OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_PRISON,
499 &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
500 "FXP driver receive interrupt microcode bundling delay");
501 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
502 OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_PRISON,
503 &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
504 "FXP driver receive interrupt microcode bundle size limit");
505
506 /*
507 * Pull in device tunables.
508 */
509 sc->tunable_int_delay = TUNABLE_INT_DELAY;
510 sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
511 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
512 "int_delay", &sc->tunable_int_delay);
513 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
514 "bundle_max", &sc->tunable_bundle_max);
515
516 /*
517 * Find out the chip revision; lump all 82557 revs together.
518 */
519 fxp_read_eeprom(sc, &data, 5, 1);
520 if ((data >> 8) == 1)
521 sc->revision = FXP_REV_82557;
522 else
523 sc->revision = pci_get_revid(dev);
524
525 /*
526 * Enable workarounds for certain chip revision deficiencies.
527 *
528 * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
529 * some systems based a normal 82559 design, have a defect where
530 * the chip can cause a PCI protocol violation if it receives
531 * a CU_RESUME command when it is entering the IDLE state. The
532 * workaround is to disable Dynamic Standby Mode, so the chip never
533 * deasserts CLKRUN#, and always remains in an active state.
534 *
535 * See Intel 82801BA/82801BAM Specification Update, Errata #30.
536 */
537 i = pci_get_device(dev);
538 if (i == 0x2449 || (i > 0x1030 && i < 0x1039) ||
539 sc->revision >= FXP_REV_82559_A0) {
540 fxp_read_eeprom(sc, &data, 10, 1);
541 if (data & 0x02) { /* STB enable */
542 u_int16_t cksum;
543 int i;
544
545 device_printf(dev,
546 "Disabling dynamic standby mode in EEPROM\n");
547 data &= ~0x02;
548 fxp_write_eeprom(sc, &data, 10, 1);
549 device_printf(dev, "New EEPROM ID: 0x%x\n", data);
550 cksum = 0;
551 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
552 fxp_read_eeprom(sc, &data, i, 1);
553 cksum += data;
554 }
555 i = (1 << sc->eeprom_size) - 1;
556 cksum = 0xBABA - cksum;
557 fxp_read_eeprom(sc, &data, i, 1);
558 fxp_write_eeprom(sc, &cksum, i, 1);
559 device_printf(dev,
560 "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
561 i, data, cksum);
562#if 1
563 /*
564 * If the user elects to continue, try the software
565 * workaround, as it is better than nothing.
566 */
567 sc->flags |= FXP_FLAG_CU_RESUME_BUG;
568#endif
569 }
570 }
571
572 /*
573 * If we are not a 82557 chip, we can enable extended features.
574 */
575 if (sc->revision != FXP_REV_82557) {
576 /*
577 * If MWI is enabled in the PCI configuration, and there
578 * is a valid cacheline size (8 or 16 dwords), then tell
579 * the board to turn on MWI.
580 */
581 if (val & PCIM_CMD_MWRICEN &&
582 pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
583 sc->flags |= FXP_FLAG_MWI_ENABLE;
584
585 /* turn on the extended TxCB feature */
586 sc->flags |= FXP_FLAG_EXT_TXCB;
587
588 /* enable reception of long frames for VLAN */
589 sc->flags |= FXP_FLAG_LONG_PKT_EN;
590 }
591
592 /*
593 * Enable use of extended RFDs and TCBs for 82550
594 * and later chips. Note: we need extended TXCB support
595 * too, but that's already enabled by the code above.
596 * Be careful to do this only on the right devices.
597 */
598
599 if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C) {
600 sc->rfa_size = sizeof (struct fxp_rfa);
601 sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
602 sc->flags |= FXP_FLAG_EXT_RFA;
603 } else {
604 sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
605 sc->tx_cmd = FXP_CB_COMMAND_XMIT;
606 }
607
608 /*
609 * Allocate DMA tags and DMA safe memory.
610 */
611 error = bus_dma_tag_create(NULL, 2, 0, BUS_SPACE_MAXADDR_32BIT,
612 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
613 sc->flags & FXP_FLAG_EXT_RFA ? FXP_NTXSEG - 1 : FXP_NTXSEG,
614 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->fxp_mtag);
615 if (error) {
616 device_printf(dev, "could not allocate dma tag\n");
617 goto fail;
618 }
619
620 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
621 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1,
622 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->fxp_stag);
623 if (error) {
624 device_printf(dev, "could not allocate dma tag\n");
625 goto fail;
626 }
627
628 error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
629 BUS_DMA_NOWAIT, &sc->fxp_smap);
630 if (error)
631 goto failmem;
632 error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
633 sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0);
634 if (error) {
635 device_printf(dev, "could not map the stats buffer\n");
636 goto fail;
637 }
638 bzero(sc->fxp_stats, sizeof(struct fxp_stats));
639
640 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
641 BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1,
642 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->cbl_tag);
643 if (error) {
644 device_printf(dev, "could not allocate dma tag\n");
645 goto fail;
646 }
647
648 error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
649 BUS_DMA_NOWAIT, &sc->cbl_map);
650 if (error)
651 goto failmem;
652 bzero(sc->fxp_desc.cbl_list, FXP_TXCB_SZ);
653
654 error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
655 sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
656 &sc->fxp_desc.cbl_addr, 0);
657 if (error) {
658 device_printf(dev, "could not map DMA memory\n");
659 goto fail;
660 }
661
662 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
663 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1,
664 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->mcs_tag);
665 if (error) {
666 device_printf(dev, "could not allocate dma tag\n");
667 goto fail;
668 }
669
670 error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
671 BUS_DMA_NOWAIT, &sc->mcs_map);
672 if (error)
673 goto failmem;
674 error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
675 sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0);
676 if (error) {
677 device_printf(dev, "can't map the multicast setup command\n");
678 goto fail;
679 }
680
681 /*
682 * Pre-allocate the TX DMA maps.
683 */
684 for (i = 0; i < FXP_NRFABUFS; i++) {
685 error = bus_dmamap_create(sc->fxp_mtag, 0,
686 &sc->fxp_desc.tx_list[i].tx_map);
687 if (error) {
688 device_printf(dev, "can't create DMA map for TX\n");
689 goto fail;
690 }
691 }
692 error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map);
693 if (error) {
694 device_printf(dev, "can't create spare DMA map\n");
695 goto fail;
696 }
697
698 /*
699 * Pre-allocate our receive buffers.
700 */
701 sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
702 for (i = 0; i < FXP_NRFABUFS; i++) {
703 rxp = &sc->fxp_desc.rx_list[i];
704 rxp->rx_mbuf = NULL;
705 error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map);
706 if (error) {
707 device_printf(dev, "can't create DMA map for RX\n");
708 goto fail;
709 }
710 if (fxp_add_rfabuf(sc, rxp) != 0)
711 goto failmem;
712 }
713
714 /*
715 * Read MAC address.
716 */
717 fxp_read_eeprom(sc, (u_int16_t *)sc->arpcom.ac_enaddr, 0, 3);
718 device_printf(dev, "Ethernet address %6D%s\n",
719 sc->arpcom.ac_enaddr, ":",
720 sc->flags & FXP_FLAG_SERIAL_MEDIA ? ", 10Mbps" : "");
721 if (bootverbose) {
722 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
723 pci_get_vendor(dev), pci_get_device(dev),
724 pci_get_subvendor(dev), pci_get_subdevice(dev),
725 pci_get_revid(dev));
726 fxp_read_eeprom(sc, &data, 10, 1);
727 device_printf(dev, "Dynamic Standby mode is %s\n",
728 data & 0x02 ? "enabled" : "disabled");
729 }
730
731 /*
732 * If this is only a 10Mbps device, then there is no MII, and
733 * the PHY will use a serial interface instead.
734 *
735 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
736 * doesn't have a programming interface of any sort. The
737 * media is sensed automatically based on how the link partner
738 * is configured. This is, in essence, manual configuration.
739 */
740 if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
741 ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
742 fxp_serial_ifmedia_sts);
743 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
744 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
745 } else {
746 if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd,
747 fxp_ifmedia_sts)) {
748 device_printf(dev, "MII without any PHY!\n");
749 error = ENXIO;
750 goto fail;
751 }
752 }
753
754 ifp = &sc->arpcom.ac_if;
755 ifp->if_unit = device_get_unit(dev);
756 ifp->if_name = "fxp";
757 ifp->if_output = ether_output;
758 ifp->if_baudrate = 100000000;
759 ifp->if_init = fxp_init;
760 ifp->if_softc = sc;
761 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
762 ifp->if_ioctl = fxp_ioctl;
763 ifp->if_start = fxp_start;
764 ifp->if_watchdog = fxp_watchdog;
765
766 /* Enable checksum offload for 82550 or better chips */
767
768 if (sc->flags & FXP_FLAG_EXT_RFA) {
769 ifp->if_hwassist = FXP_CSUM_FEATURES;
770 ifp->if_capabilities = IFCAP_HWCSUM;
771 ifp->if_capenable = ifp->if_capabilities;
772 }
773
774 /*
775 * Attach the interface.
776 */
777 ether_ifattach(ifp, sc->arpcom.ac_enaddr);
778
779 /*
780 * Tell the upper layer(s) we support long frames.
781 */
782 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
783 ifp->if_capabilities |= IFCAP_VLAN_MTU;
784
785 /*
786 * Let the system queue as many packets as we have available
787 * TX descriptors.
788 */
789 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1;
790
791 splx(s);
792 return (0);
793
794failmem:
795 device_printf(dev, "Failed to malloc memory\n");
796 error = ENOMEM;
797fail:
798 splx(s);
799 fxp_release(sc);
800 return (error);
801}
802
803/*
804 * release all resources
805 */
806static void
807fxp_release(struct fxp_softc *sc)
808{
809 struct fxp_rx *rxp;
810 struct fxp_tx *txp;
811 int i;
812
813 for (i = 0; i < FXP_NRFABUFS; i++) {
814 rxp = &sc->fxp_desc.rx_list[i];
815 if (rxp->rx_mbuf != NULL) {
816 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
817 BUS_DMASYNC_POSTREAD);
818 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
819 m_freem(rxp->rx_mbuf);
820 }
821 bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
822 }
823 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
824
825 for (i = 0; i < FXP_NTXCB; i++) {
826 txp = &sc->fxp_desc.tx_list[i];
827 if (txp->tx_mbuf != NULL) {
828 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
829 BUS_DMASYNC_POSTWRITE);
830 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
831 m_freem(txp->tx_mbuf);
832 }
833 bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
834 }
835
836 bus_generic_detach(sc->dev);
837 if (sc->miibus)
838 device_delete_child(sc->dev, sc->miibus);
839
840 if (sc->fxp_desc.cbl_list) {
841 bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
842 bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
843 sc->cbl_map);
844 }
845 if (sc->fxp_stats) {
846 bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
847 bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
848 }
849 if (sc->mcsp) {
850 bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
851 bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
852 }
853 if (sc->ih)
854 bus_teardown_intr(sc->dev, sc->irq, sc->ih);
855 if (sc->irq)
856 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
857 if (sc->mem)
858 bus_release_resource(sc->dev, sc->rtp, sc->rgd, sc->mem);
859 if (sc->fxp_mtag)
860 bus_dma_tag_destroy(sc->fxp_mtag);
861 if (sc->fxp_stag)
862 bus_dma_tag_destroy(sc->fxp_stag);
863 if (sc->cbl_tag)
864 bus_dma_tag_destroy(sc->cbl_tag);
865 if (sc->mcs_tag)
866 bus_dma_tag_destroy(sc->mcs_tag);
867
868 sysctl_ctx_free(&sc->sysctl_ctx);
869
870 mtx_destroy(&sc->sc_mtx);
871}
872
873/*
874 * Detach interface.
875 */
876static int
877fxp_detach(device_t dev)
878{
879 struct fxp_softc *sc = device_get_softc(dev);
880 int s;
881
882 /* disable interrupts */
883 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
884
885 s = splimp();
886
887 /*
888 * Stop DMA and drop transmit queue.
889 */
890 fxp_stop(sc);
891
892 /*
893 * Close down routes etc.
894 */
895 ether_ifdetach(&sc->arpcom.ac_if);
896
897 /*
898 * Free all media structures.
899 */
900 ifmedia_removeall(&sc->sc_media);
901
902 splx(s);
903
904 /* Release our allocated resources. */
905 fxp_release(sc);
906
907 return (0);
908}
909
910/*
911 * Device shutdown routine. Called at system shutdown after sync. The
912 * main purpose of this routine is to shut off receiver DMA so that
913 * kernel memory doesn't get clobbered during warmboot.
914 */
915static int
916fxp_shutdown(device_t dev)
917{
918 /*
919 * Make sure that DMA is disabled prior to reboot. Not doing
920 * do could allow DMA to corrupt kernel memory during the
921 * reboot before the driver initializes.
922 */
923 fxp_stop((struct fxp_softc *) device_get_softc(dev));
924 return (0);
925}
926
927/*
928 * Device suspend routine. Stop the interface and save some PCI
929 * settings in case the BIOS doesn't restore them properly on
930 * resume.
931 */
932static int
933fxp_suspend(device_t dev)
934{
935 struct fxp_softc *sc = device_get_softc(dev);
936 int i, s;
937
938 s = splimp();
939
940 fxp_stop(sc);
941
942 for (i = 0; i < 5; i++)
943 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
944 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
945 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
946 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
947 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
948
949 sc->suspended = 1;
950
951 splx(s);
952 return (0);
953}
954
955/*
956 * Device resume routine. Restore some PCI settings in case the BIOS
957 * doesn't, re-enable busmastering, and restart the interface if
958 * appropriate.
959 */
960static int
961fxp_resume(device_t dev)
962{
963 struct fxp_softc *sc = device_get_softc(dev);
964 struct ifnet *ifp = &sc->sc_if;
965 u_int16_t pci_command;
966 int i, s;
967
968 s = splimp();
969
970 fxp_powerstate_d0(dev);
971
972 /* better way to do this? */
973 for (i = 0; i < 5; i++)
974 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
975 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
976 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
977 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
978 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
979
980 /* reenable busmastering */
981 pci_command = pci_read_config(dev, PCIR_COMMAND, 2);
982 pci_command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
983 pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
984
985 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
986 DELAY(10);
987
988 /* reinitialize interface if necessary */
989 if (ifp->if_flags & IFF_UP)
990 fxp_init(sc);
991
992 sc->suspended = 0;
993
994 splx(s);
995 return (0);
996}
997
998static void
999fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
1000{
1001 u_int16_t reg;
1002 int x;
1003
1004 /*
1005 * Shift in data.
1006 */
1007 for (x = 1 << (length - 1); x; x >>= 1) {
1008 if (data & x)
1009 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1010 else
1011 reg = FXP_EEPROM_EECS;
1012 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1013 DELAY(1);
1014 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1015 DELAY(1);
1016 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1017 DELAY(1);
1018 }
1019}
1020
1021/*
1022 * Read from the serial EEPROM. Basically, you manually shift in
1023 * the read opcode (one bit at a time) and then shift in the address,
1024 * and then you shift out the data (all of this one bit at a time).
1025 * The word size is 16 bits, so you have to provide the address for
1026 * every 16 bits of data.
1027 */
1028static u_int16_t
1029fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
1030{
1031 u_int16_t reg, data;
1032 int x;
1033
1034 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1035 /*
1036 * Shift in read opcode.
1037 */
1038 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
1039 /*
1040 * Shift in address.
1041 */
1042 data = 0;
1043 for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
1044 if (offset & x)
1045 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1046 else
1047 reg = FXP_EEPROM_EECS;
1048 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1049 DELAY(1);
1050 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1051 DELAY(1);
1052 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1053 DELAY(1);
1054 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
1055 data++;
1056 if (autosize && reg == 0) {
1057 sc->eeprom_size = data;
1058 break;
1059 }
1060 }
1061 /*
1062 * Shift out data.
1063 */
1064 data = 0;
1065 reg = FXP_EEPROM_EECS;
1066 for (x = 1 << 15; x; x >>= 1) {
1067 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1068 DELAY(1);
1069 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1070 data |= x;
1071 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1072 DELAY(1);
1073 }
1074 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1075 DELAY(1);
1076
1077 return (data);
1078}
1079
1080static void
1081fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data)
1082{
1083 int i;
1084
1085 /*
1086 * Erase/write enable.
1087 */
1088 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1089 fxp_eeprom_shiftin(sc, 0x4, 3);
1090 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
1091 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1092 DELAY(1);
1093 /*
1094 * Shift in write opcode, address, data.
1095 */
1096 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1097 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
1098 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
1099 fxp_eeprom_shiftin(sc, data, 16);
1100 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1101 DELAY(1);
1102 /*
1103 * Wait for EEPROM to finish up.
1104 */
1105 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1106 DELAY(1);
1107 for (i = 0; i < 1000; i++) {
1108 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1109 break;
1110 DELAY(50);
1111 }
1112 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1113 DELAY(1);
1114 /*
1115 * Erase/write disable.
1116 */
1117 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1118 fxp_eeprom_shiftin(sc, 0x4, 3);
1119 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
1120 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1121 DELAY(1);
1122}
1123
1124/*
1125 * From NetBSD:
1126 *
1127 * Figure out EEPROM size.
1128 *
1129 * 559's can have either 64-word or 256-word EEPROMs, the 558
1130 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
1131 * talks about the existance of 16 to 256 word EEPROMs.
1132 *
1133 * The only known sizes are 64 and 256, where the 256 version is used
1134 * by CardBus cards to store CIS information.
1135 *
1136 * The address is shifted in msb-to-lsb, and after the last
1137 * address-bit the EEPROM is supposed to output a `dummy zero' bit,
1138 * after which follows the actual data. We try to detect this zero, by
1139 * probing the data-out bit in the EEPROM control register just after
1140 * having shifted in a bit. If the bit is zero, we assume we've
1141 * shifted enough address bits. The data-out should be tri-state,
1142 * before this, which should translate to a logical one.
1143 */
1144static void
1145fxp_autosize_eeprom(struct fxp_softc *sc)
1146{
1147
1148 /* guess maximum size of 256 words */
1149 sc->eeprom_size = 8;
1150
1151 /* autosize */
1152 (void) fxp_eeprom_getword(sc, 0, 1);
1153}
1154
1155static void
1156fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1157{
1158 int i;
1159
1160 for (i = 0; i < words; i++)
1161 data[i] = fxp_eeprom_getword(sc, offset + i, 0);
1162}
1163
1164static void
1165fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1166{
1167 int i;
1168
1169 for (i = 0; i < words; i++)
1170 fxp_eeprom_putword(sc, offset + i, data[i]);
1171}
1172
1173static void
1174fxp_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg,
1175 bus_size_t mapsize, int error)
1176{
1177 struct fxp_softc *sc;
1178 struct fxp_cb_tx *txp;
1179 int i;
1180
1181 if (error)
1182 return;
1183
1184 KASSERT(nseg <= FXP_NTXSEG, ("too many DMA segments"));
1185
1186 sc = arg;
1187 txp = sc->fxp_desc.tx_last->tx_next->tx_cb;
1188 for (i = 0; i < nseg; i++) {
1189 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
1190 /*
1191 * If this is an 82550/82551, then we're using extended
1192 * TxCBs _and_ we're using checksum offload. This means
1193 * that the TxCB is really an IPCB. One major difference
1194 * between the two is that with plain extended TxCBs,
1195 * the bottom half of the TxCB contains two entries from
1196 * the TBD array, whereas IPCBs contain just one entry:
1197 * one entry (8 bytes) has been sacrificed for the TCP/IP
1198 * checksum offload control bits. So to make things work
1199 * right, we have to start filling in the TBD array
1200 * starting from a different place depending on whether
1201 * the chip is an 82550/82551 or not.
1202 */
1203 if (sc->flags & FXP_FLAG_EXT_RFA) {
1204 txp->tbd[i + 1].tb_addr = segs[i].ds_addr;
1205 txp->tbd[i + 1].tb_size = segs[i].ds_len;
1206 } else {
1207 txp->tbd[i].tb_addr = segs[i].ds_addr;
1208 txp->tbd[i].tb_size = segs[i].ds_len;
1209 }
1210 }
1211 txp->tbd_number = nseg;
1212}
1213
1214/*
1215 * Start packet transmission on the interface.
1216 */
1217static void
1218fxp_start(struct ifnet *ifp)
1219{
1220 struct fxp_softc *sc = ifp->if_softc;
1221 struct fxp_tx *txp;
1222 struct mbuf *mb_head;
1223 int error;
1224
1225 /*
1226 * See if we need to suspend xmit until the multicast filter
1227 * has been reprogrammed (which can only be done at the head
1228 * of the command chain).
1229 */
1230 if (sc->need_mcsetup) {
1231 return;
1232 }
1233
1234 txp = NULL;
1235
1236 /*
1237 * We're finished if there is nothing more to add to the list or if
1238 * we're all filled up with buffers to transmit.
1239 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
1240 * a NOP command when needed.
1241 */
1242 while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) {
1243
1244 /*
1245 * Grab a packet to transmit.
1246 */
1247 IF_DEQUEUE(&ifp->if_snd, mb_head);
1248
1249 /*
1250 * Get pointer to next available tx desc.
1251 */
1252 txp = sc->fxp_desc.tx_last->tx_next;
1253
1254 /*
1255 * Deal with TCP/IP checksum offload. Note that
1256 * in order for TCP checksum offload to work,
1257 * the pseudo header checksum must have already
1258 * been computed and stored in the checksum field
1259 * in the TCP header. The stack should have
1260 * already done this for us.
1261 */
1262
1263 if (mb_head->m_pkthdr.csum_flags) {
1264 if (mb_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
1265 txp->tx_cb->ipcb_ip_activation_high =
1266 FXP_IPCB_HARDWAREPARSING_ENABLE;
1267 txp->tx_cb->ipcb_ip_schedule =
1268 FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
1269 if (mb_head->m_pkthdr.csum_flags & CSUM_TCP)
1270 txp->tx_cb->ipcb_ip_schedule |=
1271 FXP_IPCB_TCP_PACKET;
1272 }
1273#ifdef FXP_IP_CSUM_WAR
1274 /*
1275 * XXX The 82550 chip appears to have trouble
1276 * dealing with IP header checksums in very small
1277 * datagrams, namely fragments from 1 to 3 bytes
1278 * in size. For example, say you want to transmit
1279 * a UDP packet of 1473 bytes. The packet will be
1280 * fragmented over two IP datagrams, the latter
1281 * containing only one byte of data. The 82550 will
1282 * botch the header checksum on the 1-byte fragment.
1283 * As long as the datagram contains 4 or more bytes
1284 * of data, you're ok.
1285 *
1286 * The following code attempts to work around this
1287 * problem: if the datagram is less than 38 bytes
1288 * in size (14 bytes ether header, 20 bytes IP header,
1289 * plus 4 bytes of data), we punt and compute the IP
1290 * header checksum by hand. This workaround doesn't
1291 * work very well, however, since it can be fooled
1292 * by things like VLAN tags and IP options that make
1293 * the header sizes/offsets vary.
1294 */
1295
1296 if (mb_head->m_pkthdr.csum_flags & CSUM_IP) {
1297 if (mb_head->m_pkthdr.len < 38) {
1298 struct ip *ip;
1299 mb_head->m_data += ETHER_HDR_LEN;
1300 ip = mtod(mb_head, struct ip *);
1301 ip->ip_sum = in_cksum(mb_head,
1302 ip->ip_hl << 2);
1303 mb_head->m_data -= ETHER_HDR_LEN;
1304 } else {
1305 txp->tx_cb->ipcb_ip_activation_high =
1306 FXP_IPCB_HARDWAREPARSING_ENABLE;
1307 txp->tx_cb->ipcb_ip_schedule |=
1308 FXP_IPCB_IP_CHECKSUM_ENABLE;
1309 }
1310 }
1311#endif
1312 }
1313
1314 /*
1315 * Go through each of the mbufs in the chain and initialize
1316 * the transmit buffer descriptors with the physical address
1317 * and size of the mbuf.
1318 */
1319 error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map,
1320 mb_head, fxp_dma_map_txbuf, sc, 0);
1321
1322 if (error && error != EFBIG) {
1323 device_printf(sc->dev, "can't map mbuf (error %d)\n",
1324 error);
1325 m_freem(mb_head);
1326 break;
1327 }
1328
1329 if (error) {
1330 struct mbuf *mn;
1331
1332 /*
1333 * We ran out of segments. We have to recopy this
1334 * mbuf chain first. Bail out if we can't get the
1335 * new buffers.
1336 */
1337 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1338 if (mn == NULL) {
1339 m_freem(mb_head);
1340 break;
1341 }
1342 if (mb_head->m_pkthdr.len > MHLEN) {
1343 MCLGET(mn, M_DONTWAIT);
1344 if ((mn->m_flags & M_EXT) == 0) {
1345 m_freem(mn);
1346 m_freem(mb_head);
1347 break;
1348 }
1349 }
1350 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1351 mtod(mn, caddr_t));
1352 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1353 m_freem(mb_head);
1354 mb_head = mn;
1355 error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map,
1356 mb_head, fxp_dma_map_txbuf, sc, 0);
1357 if (error) {
1358 device_printf(sc->dev,
1359 "can't map mbuf (error %d)\n", error);
1360 m_freem(mb_head);
1361 break;
1362 }
1363 }
1364
1365 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
1366 BUS_DMASYNC_PREWRITE);
1367
1368 txp->tx_mbuf = mb_head;
1369 txp->tx_cb->cb_status = 0;
1370 txp->tx_cb->byte_count = 0;
1371 if (sc->tx_queued != FXP_CXINT_THRESH - 1) {
1372 txp->tx_cb->cb_command =
1373 sc->tx_cmd | FXP_CB_COMMAND_SF |
1374 FXP_CB_COMMAND_S;
1375 } else {
1376 txp->tx_cb->cb_command =
1377 sc->tx_cmd | FXP_CB_COMMAND_SF |
1378 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
1379 /*
1380 * Set a 5 second timer just in case we don't hear
1381 * from the card again.
1382 */
1383 ifp->if_timer = 5;
1384 }
1385 txp->tx_cb->tx_threshold = tx_threshold;
1386
1387 /*
1388 * Advance the end of list forward.
1389 */
1390
1391#ifdef __alpha__
1392 /*
1393 * On platforms which can't access memory in 16-bit
1394 * granularities, we must prevent the card from DMA'ing
1395 * up the status while we update the command field.
1396 * This could cause us to overwrite the completion status.
1397 */
1398 atomic_clear_short(&sc->fxp_desc.tx_last->tx_cb->cb_command,
1399 FXP_CB_COMMAND_S);
1400#else
1401 sc->fxp_desc.tx_last->tx_cb->cb_command &= ~FXP_CB_COMMAND_S;
1402#endif /*__alpha__*/
1403 sc->fxp_desc.tx_last = txp;
1404
1405 /*
1406 * Advance the beginning of the list forward if there are
1407 * no other packets queued (when nothing is queued, tx_first
1408 * sits on the last TxCB that was sent out).
1409 */
1410 if (sc->tx_queued == 0)
1411 sc->fxp_desc.tx_first = txp;
1412
1413 sc->tx_queued++;
1414
1415 /*
1416 * Pass packet to bpf if there is a listener.
1417 */
1418 BPF_MTAP(ifp, mb_head);
1419 }
1420 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1421
1422 /*
1423 * We're finished. If we added to the list, issue a RESUME to get DMA
1424 * going again if suspended.
1425 */
1426 if (txp != NULL) {
1427 fxp_scb_wait(sc);
1428 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
1429 }
1430}
1431
1432static void fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count);
1433
1434#ifdef DEVICE_POLLING
1435static poll_handler_t fxp_poll;
1436
1437static void
1438fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1439{
1440 struct fxp_softc *sc = ifp->if_softc;
1441 u_int8_t statack;
1442
1443 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1444 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
1445 return;
1446 }
1447 statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
1448 FXP_SCB_STATACK_FR;
1449 if (cmd == POLL_AND_CHECK_STATUS) {
1450 u_int8_t tmp;
1451
1452 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
1453 if (tmp == 0xff || tmp == 0)
1454 return; /* nothing to do */
1455 tmp &= ~statack;
1456 /* ack what we can */
1457 if (tmp != 0)
1458 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
1459 statack |= tmp;
1460 }
1461 fxp_intr_body(sc, statack, count);
1462}
1463#endif /* DEVICE_POLLING */
1464
1465/*
1466 * Process interface interrupts.
1467 */
1468static void
1469fxp_intr(void *xsc)
1470{
1471 struct fxp_softc *sc = xsc;
1472 u_int8_t statack;
1473
1474#ifdef DEVICE_POLLING
1475 struct ifnet *ifp = &sc->sc_if;
1476
1477 if (ifp->if_flags & IFF_POLLING)
1478 return;
1479 if (ether_poll_register(fxp_poll, ifp)) {
1480 /* disable interrupts */
1481 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
1482 fxp_poll(ifp, 0, 1);
1483 return;
1484 }
1485#endif
1486
1487 if (sc->suspended) {
1488 return;
1489 }
1490
1491 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
1492 /*
1493 * It should not be possible to have all bits set; the
1494 * FXP_SCB_INTR_SWI bit always returns 0 on a read. If
1495 * all bits are set, this may indicate that the card has
1496 * been physically ejected, so ignore it.
1497 */
1498 if (statack == 0xff)
1499 return;
1500
1501 /*
1502 * First ACK all the interrupts in this pass.
1503 */
1504 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
1505 fxp_intr_body(sc, statack, -1);
1506 }
1507}
1508
1509static void
1510fxp_txeof(struct fxp_softc *sc)
1511{
1512 struct fxp_tx *txp;
1513
1514 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD);
1515 for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
1516 (txp->tx_cb->cb_status & FXP_CB_STATUS_C) != 0;
1517 txp = txp->tx_next) {
1518 if (txp->tx_mbuf != NULL) {
1519 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
1520 BUS_DMASYNC_POSTWRITE);
1521 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
1522 m_freem(txp->tx_mbuf);
1523 txp->tx_mbuf = NULL;
1524 /* clear this to reset csum offload bits */
1525 txp->tx_cb->tbd[0].tb_addr = 0;
1526 }
1527 sc->tx_queued--;
1528 }
1529 sc->fxp_desc.tx_first = txp;
1530 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1531}
1532
1533static void
1534fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
1535{
1536 struct ifnet *ifp = &sc->sc_if;
1537 struct mbuf *m;
1538 struct fxp_rx *rxp;
1539 struct fxp_rfa *rfa;
1540 int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
1541
1542 if (rnr)
1543 fxp_rnr++;
1544#ifdef DEVICE_POLLING
1545 /* Pick up a deferred RNR condition if `count' ran out last time. */
1546 if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
1547 sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
1548 rnr = 1;
1549 }
1550#endif
1551
1552 /*
1553 * Free any finished transmit mbuf chains.
1554 *
1555 * Handle the CNA event likt a CXTNO event. It used to
1556 * be that this event (control unit not ready) was not
1557 * encountered, but it is now with the SMPng modifications.
1558 * The exact sequence of events that occur when the interface
1559 * is brought up are different now, and if this event
1560 * goes unhandled, the configuration/rxfilter setup sequence
1561 * can stall for several seconds. The result is that no
1562 * packets go out onto the wire for about 5 to 10 seconds
1563 * after the interface is ifconfig'ed for the first time.
1564 */
1565 if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) {
1566 fxp_txeof(sc);
1567
1568 ifp->if_timer = 0;
1569 if (sc->tx_queued == 0) {
1570 if (sc->need_mcsetup)
1571 fxp_mc_setup(sc);
1572 }
1573 /*
1574 * Try to start more packets transmitting.
1575 */
1576 if (ifp->if_snd.ifq_head != NULL)
1577 fxp_start(ifp);
1578 }
1579
1580 /*
1581 * Just return if nothing happened on the receive side.
1582 */
1583 if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
1584 return;
1585
1586 /*
1587 * Process receiver interrupts. If a no-resource (RNR)
1588 * condition exists, get whatever packets we can and
1589 * re-start the receiver.
1590 *
1591 * When using polling, we do not process the list to completion,
1592 * so when we get an RNR interrupt we must defer the restart
1593 * until we hit the last buffer with the C bit set.
1594 * If we run out of cycles and rfa_headm has the C bit set,
1595 * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
1596 * that the info will be used in the subsequent polling cycle.
1597 */
1598 for (;;) {
1599 rxp = sc->fxp_desc.rx_head;
1600 m = rxp->rx_mbuf;
1601 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
1602 RFA_ALIGNMENT_FUDGE);
1603 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
1604 BUS_DMASYNC_POSTREAD);
1605
1606#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
1607 if (count >= 0 && count-- == 0) {
1608 if (rnr) {
1609 /* Defer RNR processing until the next time. */
1610 sc->flags |= FXP_FLAG_DEFERRED_RNR;
1611 rnr = 0;
1612 }
1613 break;
1614 }
1615#endif /* DEVICE_POLLING */
1616
1617 if ((rfa->rfa_status & FXP_RFA_STATUS_C) == 0)
1618 break;
1619
1620 /*
1621 * Advance head forward.
1622 */
1623 sc->fxp_desc.rx_head = rxp->rx_next;
1624
1625 /*
1626 * Add a new buffer to the receive chain.
1627 * If this fails, the old buffer is recycled
1628 * instead.
1629 */
1630 if (fxp_add_rfabuf(sc, rxp) == 0) {
1631 int total_len;
1632
1633 /*
1634 * Fetch packet length (the top 2 bits of
1635 * actual_size are flags set by the controller
1636 * upon completion), and drop the packet in case
1637 * of bogus length or CRC errors.
1638 */
1639 total_len = rfa->actual_size & 0x3fff;
1640 if (total_len < sizeof(struct ether_header) ||
1641 total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE -
1642 sc->rfa_size ||
1643 rfa->rfa_status & FXP_RFA_STATUS_CRC) {
1644 m_freem(m);
1645 continue;
1646 }
1647
1648 /* Do IP checksum checking. */
1649 if (rfa->rfa_status & FXP_RFA_STATUS_PARSE) {
1650 if (rfa->rfax_csum_sts &
1651 FXP_RFDX_CS_IP_CSUM_BIT_VALID)
1652 m->m_pkthdr.csum_flags |=
1653 CSUM_IP_CHECKED;
1654 if (rfa->rfax_csum_sts &
1655 FXP_RFDX_CS_IP_CSUM_VALID)
1656 m->m_pkthdr.csum_flags |=
1657 CSUM_IP_VALID;
1658 if ((rfa->rfax_csum_sts &
1659 FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
1660 (rfa->rfax_csum_sts &
1661 FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
1662 m->m_pkthdr.csum_flags |=
1663 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1664 m->m_pkthdr.csum_data = 0xffff;
1665 }
1666 }
1667
1668 m->m_pkthdr.len = m->m_len = total_len;
1669 m->m_pkthdr.rcvif = ifp;
1670
1671 (*ifp->if_input)(ifp, m);
1672 }
1673 }
1674 if (rnr) {
1675 fxp_scb_wait(sc);
1676 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1677 sc->fxp_desc.rx_head->rx_addr);
1678 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
1679 }
1680}
1681
1682/*
1683 * Update packet in/out/collision statistics. The i82557 doesn't
1684 * allow you to access these counters without doing a fairly
1685 * expensive DMA to get _all_ of the statistics it maintains, so
1686 * we do this operation here only once per second. The statistics
1687 * counters in the kernel are updated from the previous dump-stats
1688 * DMA and then a new dump-stats DMA is started. The on-chip
1689 * counters are zeroed when the DMA completes. If we can't start
1690 * the DMA immediately, we don't wait - we just prepare to read
1691 * them again next time.
1692 */
1693static void
1694fxp_tick(void *xsc)
1695{
1696 struct fxp_softc *sc = xsc;
1697 struct ifnet *ifp = &sc->sc_if;
1698 struct fxp_stats *sp = sc->fxp_stats;
1699 int s;
1700
1701 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD);
1702 ifp->if_opackets += sp->tx_good;
1703 ifp->if_collisions += sp->tx_total_collisions;
1704 if (sp->rx_good) {
1705 ifp->if_ipackets += sp->rx_good;
1706 sc->rx_idle_secs = 0;
1707 } else {
1708 /*
1709 * Receiver's been idle for another second.
1710 */
1711 sc->rx_idle_secs++;
1712 }
1713 ifp->if_ierrors +=
1714 sp->rx_crc_errors +
1715 sp->rx_alignment_errors +
1716 sp->rx_rnr_errors +
1717 sp->rx_overrun_errors;
1718 /*
1719 * If any transmit underruns occured, bump up the transmit
1720 * threshold by another 512 bytes (64 * 8).
1721 */
1722 if (sp->tx_underruns) {
1723 ifp->if_oerrors += sp->tx_underruns;
1724 if (tx_threshold < 192)
1725 tx_threshold += 64;
1726 }
1727 s = splimp();
1728 /*
1729 * Release any xmit buffers that have completed DMA. This isn't
1730 * strictly necessary to do here, but it's advantagous for mbufs
1731 * with external storage to be released in a timely manner rather
1732 * than being defered for a potentially long time. This limits
1733 * the delay to a maximum of one second.
1734 */
1735 fxp_txeof(sc);
1736
1737 /*
1738 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
1739 * then assume the receiver has locked up and attempt to clear
1740 * the condition by reprogramming the multicast filter. This is
1741 * a work-around for a bug in the 82557 where the receiver locks
1742 * up if it gets certain types of garbage in the syncronization
1743 * bits prior to the packet header. This bug is supposed to only
1744 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
1745 * mode as well (perhaps due to a 10/100 speed transition).
1746 */
1747 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
1748 sc->rx_idle_secs = 0;
1749 fxp_mc_setup(sc);
1750 }
1751 /*
1752 * If there is no pending command, start another stats
1753 * dump. Otherwise punt for now.
1754 */
1755 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
1756 /*
1757 * Start another stats dump.
1758 */
1759 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
1760 BUS_DMASYNC_PREREAD);
1761 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
1762 } else {
1763 /*
1764 * A previous command is still waiting to be accepted.
1765 * Just zero our copy of the stats and wait for the
1766 * next timer event to update them.
1767 */
1768 sp->tx_good = 0;
1769 sp->tx_underruns = 0;
1770 sp->tx_total_collisions = 0;
1771
1772 sp->rx_good = 0;
1773 sp->rx_crc_errors = 0;
1774 sp->rx_alignment_errors = 0;
1775 sp->rx_rnr_errors = 0;
1776 sp->rx_overrun_errors = 0;
1777 }
1778 if (sc->miibus != NULL)
1779 mii_tick(device_get_softc(sc->miibus));
1780 splx(s);
1781 /*
1782 * Schedule another timeout one second from now.
1783 */
1784 sc->stat_ch = timeout(fxp_tick, sc, hz);
1785}
1786
1787/*
1788 * Stop the interface. Cancels the statistics updater and resets
1789 * the interface.
1790 */
1791static void
1792fxp_stop(struct fxp_softc *sc)
1793{
1794 struct ifnet *ifp = &sc->sc_if;
1795 struct fxp_tx *txp;
1796 int i;
1797
1798 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1799 ifp->if_timer = 0;
1800
1801#ifdef DEVICE_POLLING
1802 ether_poll_deregister(ifp);
1803#endif
1804 /*
1805 * Cancel stats updater.
1806 */
1807 untimeout(fxp_tick, sc, sc->stat_ch);
1808
1809 /*
1810 * Issue software reset, which also unloads the microcode.
1811 */
1812 sc->flags &= ~FXP_FLAG_UCODE;
1813 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
1814 DELAY(50);
1815
1816 /*
1817 * Release any xmit buffers.
1818 */
1819 txp = sc->fxp_desc.tx_list;
1820 if (txp != NULL) {
1821 for (i = 0; i < FXP_NTXCB; i++) {
1822 if (txp[i].tx_mbuf != NULL) {
1823 bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map,
1824 BUS_DMASYNC_POSTWRITE);
1825 bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map);
1826 m_freem(txp[i].tx_mbuf);
1827 txp[i].tx_mbuf = NULL;
1828 /* clear this to reset csum offload bits */
1829 txp[i].tx_cb->tbd[0].tb_addr = 0;
1830 }
1831 }
1832 }
1833 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1834 sc->tx_queued = 0;
1835}
1836
1837/*
1838 * Watchdog/transmission transmit timeout handler. Called when a
1839 * transmission is started on the interface, but no interrupt is
1840 * received before the timeout. This usually indicates that the
1841 * card has wedged for some reason.
1842 */
1843static void
1844fxp_watchdog(struct ifnet *ifp)
1845{
1846 struct fxp_softc *sc = ifp->if_softc;
1847
1848 device_printf(sc->dev, "device timeout\n");
1849 ifp->if_oerrors++;
1850
1851 fxp_init(sc);
1852}
1853
1854static void
1855fxp_init(void *xsc)
1856{
1857 struct fxp_softc *sc = xsc;
1858 struct ifnet *ifp = &sc->sc_if;
1859 struct fxp_cb_config *cbp;
1860 struct fxp_cb_ias *cb_ias;
1861 struct fxp_cb_tx *tcbp;
1862 struct fxp_tx *txp;
1863 struct fxp_cb_mcs *mcsp;
1864 int i, prm, s;
1865
1866 s = splimp();
1867 /*
1868 * Cancel any pending I/O
1869 */
1870 fxp_stop(sc);
1871
1872 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1873
1874 /*
1875 * Initialize base of CBL and RFA memory. Loading with zero
1876 * sets it up for regular linear addressing.
1877 */
1878 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1879 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
1880
1881 fxp_scb_wait(sc);
1882 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
1883
1884 /*
1885 * Initialize base of dump-stats buffer.
1886 */
1887 fxp_scb_wait(sc);
1888 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD);
1889 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
1890 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
1891
1892 /*
1893 * Attempt to load microcode if requested.
1894 */
1895 if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0)
1896 fxp_load_ucode(sc);
1897
1898 /*
1899 * Initialize the multicast address list.
1900 */
1901 if (fxp_mc_addrs(sc)) {
1902 mcsp = sc->mcsp;
1903 mcsp->cb_status = 0;
1904 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL;
1905 mcsp->link_addr = -1;
1906 /*
1907 * Start the multicast setup command.
1908 */
1909 fxp_scb_wait(sc);
1910 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
1911 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
1912 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1913 /* ...and wait for it to complete. */
1914 fxp_dma_wait(&mcsp->cb_status, sc);
1915 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
1916 BUS_DMASYNC_POSTWRITE);
1917 }
1918
1919 /*
1920 * We temporarily use memory that contains the TxCB list to
1921 * construct the config CB. The TxCB list memory is rebuilt
1922 * later.
1923 */
1924 cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
1925
1926 /*
1927 * This bcopy is kind of disgusting, but there are a bunch of must be
1928 * zero and must be one bits in this structure and this is the easiest
1929 * way to initialize them all to proper values.
1930 */
1931 bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
1932
1933 cbp->cb_status = 0;
1934 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1935 cbp->link_addr = -1; /* (no) next command */
1936 cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
1937 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
1938 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
1939 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
1940 cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
1941 cbp->type_enable = 0; /* actually reserved */
1942 cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
1943 cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
1944 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
1945 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
1946 cbp->dma_mbce = 0; /* (disable) dma max counters */
1947 cbp->late_scb = 0; /* (don't) defer SCB update */
1948 cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */
1949 cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */
1950 cbp->ci_int = 1; /* interrupt on CU idle */
1951 cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
1952 cbp->ext_stats_dis = 1; /* disable extended counters */
1953 cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */
1954 cbp->save_bf = sc->revision == FXP_REV_82557 ? 1 : prm;
1955 cbp->disc_short_rx = !prm; /* discard short packets */
1956 cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */
1957 cbp->two_frames = 0; /* do not limit FIFO to 2 frames */
1958 cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */
1959 cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
1960 cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
1961 cbp->csma_dis = 0; /* (don't) disable link */
1962 cbp->tcp_udp_cksum = 0; /* (don't) enable checksum */
1963 cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */
1964 cbp->link_wake_en = 0; /* (don't) assert PME# on link change */
1965 cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */
1966 cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */
1967 cbp->nsai = 1; /* (don't) disable source addr insert */
1968 cbp->preamble_length = 2; /* (7 byte) preamble */
1969 cbp->loopback = 0; /* (don't) loopback */
1970 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
1971 cbp->linear_pri_mode = 0; /* (wait after xmit only) */
1972 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
1973 cbp->promiscuous = prm; /* promiscuous mode */
1974 cbp->bcast_disable = 0; /* (don't) disable broadcasts */
1975 cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/
1976 cbp->ignore_ul = 0; /* consider U/L bit in IA matching */
1977 cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */
1978 cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
1979
1980 cbp->stripping = !prm; /* truncate rx packet to byte count */
1981 cbp->padding = 1; /* (do) pad short tx packets */
1982 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
1983 cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
1984 cbp->ia_wake_en = 0; /* (don't) wake up on address match */
1985 cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */
1986 /* must set wake_en in PMCSR also */
1987 cbp->force_fdx = 0; /* (don't) force full duplex */
1988 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
1989 cbp->multi_ia = 0; /* (don't) accept multiple IAs */
1990 cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0;
1991 cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
1992
1993 if (sc->revision == FXP_REV_82557) {
1994 /*
1995 * The 82557 has no hardware flow control, the values
1996 * below are the defaults for the chip.
1997 */
1998 cbp->fc_delay_lsb = 0;
1999 cbp->fc_delay_msb = 0x40;
2000 cbp->pri_fc_thresh = 3;
2001 cbp->tx_fc_dis = 0;
2002 cbp->rx_fc_restop = 0;
2003 cbp->rx_fc_restart = 0;
2004 cbp->fc_filter = 0;
2005 cbp->pri_fc_loc = 1;
2006 } else {
2007 cbp->fc_delay_lsb = 0x1f;
2008 cbp->fc_delay_msb = 0x01;
2009 cbp->pri_fc_thresh = 3;
2010 cbp->tx_fc_dis = 0; /* enable transmit FC */
2011 cbp->rx_fc_restop = 1; /* enable FC restop frames */
2012 cbp->rx_fc_restart = 1; /* enable FC restart frames */
2013 cbp->fc_filter = !prm; /* drop FC frames to host */
2014 cbp->pri_fc_loc = 1; /* FC pri location (byte31) */
2015 }
2016
2017 /*
2018 * Start the config command/DMA.
2019 */
2020 fxp_scb_wait(sc);
2021 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2022 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2023 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2024 /* ...and wait for it to complete. */
2025 fxp_dma_wait(&cbp->cb_status, sc);
2026 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2027
2028 /*
2029 * Now initialize the station address. Temporarily use the TxCB
2030 * memory area like we did above for the config CB.
2031 */
2032 cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
2033 cb_ias->cb_status = 0;
2034 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL;
2035 cb_ias->link_addr = -1;
29 */
30
31/*
32 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver
33 */
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/mbuf.h>
38 /* #include <sys/mutex.h> */
39#include <sys/kernel.h>
40#include <sys/socket.h>
41#include <sys/sysctl.h>
42
43#include <net/if.h>
44#include <net/if_dl.h>
45#include <net/if_media.h>
46
47#include <net/bpf.h>
48#include <sys/sockio.h>
49#include <sys/bus.h>
50#include <machine/bus.h>
51#include <sys/rman.h>
52#include <machine/resource.h>
53
54#include <net/ethernet.h>
55#include <net/if_arp.h>
56
57#include <machine/clock.h> /* for DELAY */
58
59#include <net/if_types.h>
60#include <net/if_vlan_var.h>
61
62#ifdef FXP_IP_CSUM_WAR
63#include <netinet/in.h>
64#include <netinet/in_systm.h>
65#include <netinet/ip.h>
66#include <machine/in_cksum.h>
67#endif
68
69#include <pci/pcivar.h>
70#include <pci/pcireg.h> /* for PCIM_CMD_xxx */
71
72#include <dev/mii/mii.h>
73#include <dev/mii/miivar.h>
74
75#include <dev/fxp/if_fxpreg.h>
76#include <dev/fxp/if_fxpvar.h>
77#include <dev/fxp/rcvbundl.h>
78
79MODULE_DEPEND(fxp, miibus, 1, 1, 1);
80#include "miibus_if.h"
81
82/*
83 * NOTE! On the Alpha, we have an alignment constraint. The
84 * card DMAs the packet immediately following the RFA. However,
85 * the first thing in the packet is a 14-byte Ethernet header.
86 * This means that the packet is misaligned. To compensate,
87 * we actually offset the RFA 2 bytes into the cluster. This
88 * alignes the packet after the Ethernet header at a 32-bit
89 * boundary. HOWEVER! This means that the RFA is misaligned!
90 */
91#define RFA_ALIGNMENT_FUDGE 2
92
93/*
94 * Set initial transmit threshold at 64 (512 bytes). This is
95 * increased by 64 (512 bytes) at a time, to maximum of 192
96 * (1536 bytes), if an underrun occurs.
97 */
98static int tx_threshold = 64;
99
100/*
101 * The configuration byte map has several undefined fields which
102 * must be one or must be zero. Set up a template for these bits
103 * only, (assuming a 82557 chip) leaving the actual configuration
104 * to fxp_init.
105 *
106 * See struct fxp_cb_config for the bit definitions.
107 */
108static u_char fxp_cb_config_template[] = {
109 0x0, 0x0, /* cb_status */
110 0x0, 0x0, /* cb_command */
111 0x0, 0x0, 0x0, 0x0, /* link_addr */
112 0x0, /* 0 */
113 0x0, /* 1 */
114 0x0, /* 2 */
115 0x0, /* 3 */
116 0x0, /* 4 */
117 0x0, /* 5 */
118 0x32, /* 6 */
119 0x0, /* 7 */
120 0x0, /* 8 */
121 0x0, /* 9 */
122 0x6, /* 10 */
123 0x0, /* 11 */
124 0x0, /* 12 */
125 0x0, /* 13 */
126 0xf2, /* 14 */
127 0x48, /* 15 */
128 0x0, /* 16 */
129 0x40, /* 17 */
130 0xf0, /* 18 */
131 0x0, /* 19 */
132 0x3f, /* 20 */
133 0x5 /* 21 */
134};
135
136struct fxp_ident {
137 u_int16_t devid;
138 char *name;
139};
140
141/*
142 * Claim various Intel PCI device identifiers for this driver. The
143 * sub-vendor and sub-device field are extensively used to identify
144 * particular variants, but we don't currently differentiate between
145 * them.
146 */
147static struct fxp_ident fxp_ident_table[] = {
148 { 0x1029, "Intel 82559 PCI/CardBus Pro/100" },
149 { 0x1030, "Intel 82559 Pro/100 Ethernet" },
150 { 0x1031, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
151 { 0x1032, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" },
152 { 0x1033, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
153 { 0x1034, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
154 { 0x1035, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
155 { 0x1036, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
156 { 0x1037, "Intel 82801CAM (ICH3) Pro/100 Ethernet" },
157 { 0x1038, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" },
158 { 0x1039, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
159 { 0x103A, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
160 { 0x103B, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
161 { 0x103C, "Intel 82801DB (ICH4) Pro/100 Ethernet" },
162 { 0x103D, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" },
163 { 0x103E, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" },
164 { 0x1059, "Intel 82551QM Pro/100 M Mobile Connection" },
165 { 0x1209, "Intel 82559ER Embedded 10/100 Ethernet" },
166 { 0x1229, "Intel 82557/8/9 EtherExpress Pro/100(B) Ethernet" },
167 { 0x2449, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" },
168 { 0, NULL },
169};
170
171#ifdef FXP_IP_CSUM_WAR
172#define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP)
173#else
174#define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP)
175#endif
176
177static int fxp_probe(device_t dev);
178static int fxp_attach(device_t dev);
179static int fxp_detach(device_t dev);
180static int fxp_shutdown(device_t dev);
181static int fxp_suspend(device_t dev);
182static int fxp_resume(device_t dev);
183
184static void fxp_intr(void *xsc);
185static void fxp_init(void *xsc);
186static void fxp_tick(void *xsc);
187static void fxp_powerstate_d0(device_t dev);
188static void fxp_start(struct ifnet *ifp);
189static void fxp_stop(struct fxp_softc *sc);
190static void fxp_release(struct fxp_softc *sc);
191static int fxp_ioctl(struct ifnet *ifp, u_long command,
192 caddr_t data);
193static void fxp_watchdog(struct ifnet *ifp);
194static int fxp_add_rfabuf(struct fxp_softc *sc,
195 struct fxp_rx *rxp);
196static int fxp_mc_addrs(struct fxp_softc *sc);
197static void fxp_mc_setup(struct fxp_softc *sc);
198static u_int16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset,
199 int autosize);
200static void fxp_eeprom_putword(struct fxp_softc *sc, int offset,
201 u_int16_t data);
202static void fxp_autosize_eeprom(struct fxp_softc *sc);
203static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data,
204 int offset, int words);
205static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data,
206 int offset, int words);
207static int fxp_ifmedia_upd(struct ifnet *ifp);
208static void fxp_ifmedia_sts(struct ifnet *ifp,
209 struct ifmediareq *ifmr);
210static int fxp_serial_ifmedia_upd(struct ifnet *ifp);
211static void fxp_serial_ifmedia_sts(struct ifnet *ifp,
212 struct ifmediareq *ifmr);
213static volatile int fxp_miibus_readreg(device_t dev, int phy, int reg);
214static void fxp_miibus_writereg(device_t dev, int phy, int reg,
215 int value);
216static void fxp_load_ucode(struct fxp_softc *sc);
217static int sysctl_int_range(SYSCTL_HANDLER_ARGS,
218 int low, int high);
219static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS);
220static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS);
221static __inline void fxp_lwcopy(volatile u_int32_t *src,
222 volatile u_int32_t *dst);
223static __inline void fxp_scb_wait(struct fxp_softc *sc);
224static __inline void fxp_scb_cmd(struct fxp_softc *sc, int cmd);
225static __inline void fxp_dma_wait(volatile u_int16_t *status,
226 struct fxp_softc *sc);
227
228static device_method_t fxp_methods[] = {
229 /* Device interface */
230 DEVMETHOD(device_probe, fxp_probe),
231 DEVMETHOD(device_attach, fxp_attach),
232 DEVMETHOD(device_detach, fxp_detach),
233 DEVMETHOD(device_shutdown, fxp_shutdown),
234 DEVMETHOD(device_suspend, fxp_suspend),
235 DEVMETHOD(device_resume, fxp_resume),
236
237 /* MII interface */
238 DEVMETHOD(miibus_readreg, fxp_miibus_readreg),
239 DEVMETHOD(miibus_writereg, fxp_miibus_writereg),
240
241 { 0, 0 }
242};
243
244static driver_t fxp_driver = {
245 "fxp",
246 fxp_methods,
247 sizeof(struct fxp_softc),
248};
249
250static devclass_t fxp_devclass;
251
252DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0);
253DRIVER_MODULE(if_fxp, cardbus, fxp_driver, fxp_devclass, 0, 0);
254DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0);
255
256static int fxp_rnr;
257SYSCTL_INT(_hw, OID_AUTO, fxp_rnr, CTLFLAG_RW, &fxp_rnr, 0, "fxp rnr events");
258
259/*
260 * Inline function to copy a 16-bit aligned 32-bit quantity.
261 */
262static __inline void
263fxp_lwcopy(volatile u_int32_t *src, volatile u_int32_t *dst)
264{
265#ifdef __i386__
266 *dst = *src;
267#else
268 volatile u_int16_t *a = (volatile u_int16_t *)src;
269 volatile u_int16_t *b = (volatile u_int16_t *)dst;
270
271 b[0] = a[0];
272 b[1] = a[1];
273#endif
274}
275
276/*
277 * Wait for the previous command to be accepted (but not necessarily
278 * completed).
279 */
280static __inline void
281fxp_scb_wait(struct fxp_softc *sc)
282{
283 int i = 10000;
284
285 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i)
286 DELAY(2);
287 if (i == 0)
288 device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n",
289 CSR_READ_1(sc, FXP_CSR_SCB_COMMAND),
290 CSR_READ_1(sc, FXP_CSR_SCB_STATACK),
291 CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS),
292 CSR_READ_2(sc, FXP_CSR_FLOWCONTROL));
293}
294
295static __inline void
296fxp_scb_cmd(struct fxp_softc *sc, int cmd)
297{
298
299 if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) {
300 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP);
301 fxp_scb_wait(sc);
302 }
303 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd);
304}
305
306static __inline void
307fxp_dma_wait(volatile u_int16_t *status, struct fxp_softc *sc)
308{
309 int i = 10000;
310
311 while (!(*status & FXP_CB_STATUS_C) && --i)
312 DELAY(2);
313 if (i == 0)
314 device_printf(sc->dev, "DMA timeout\n");
315}
316
317/*
318 * Return identification string if this is device is ours.
319 */
320static int
321fxp_probe(device_t dev)
322{
323 u_int16_t devid;
324 struct fxp_ident *ident;
325
326 if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) {
327 devid = pci_get_device(dev);
328 for (ident = fxp_ident_table; ident->name != NULL; ident++) {
329 if (ident->devid == devid) {
330 device_set_desc(dev, ident->name);
331 return (0);
332 }
333 }
334 }
335 return (ENXIO);
336}
337
338static void
339fxp_powerstate_d0(device_t dev)
340{
341#if __FreeBSD_version >= 430002
342 u_int32_t iobase, membase, irq;
343
344 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
345 /* Save important PCI config data. */
346 iobase = pci_read_config(dev, FXP_PCI_IOBA, 4);
347 membase = pci_read_config(dev, FXP_PCI_MMBA, 4);
348 irq = pci_read_config(dev, PCIR_INTLINE, 4);
349
350 /* Reset the power state. */
351 device_printf(dev, "chip is in D%d power mode "
352 "-- setting to D0\n", pci_get_powerstate(dev));
353
354 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
355
356 /* Restore PCI config data. */
357 pci_write_config(dev, FXP_PCI_IOBA, iobase, 4);
358 pci_write_config(dev, FXP_PCI_MMBA, membase, 4);
359 pci_write_config(dev, PCIR_INTLINE, irq, 4);
360 }
361#endif
362}
363
364static void
365fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
366{
367 u_int32_t *addr;
368
369 if (error)
370 return;
371
372 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg));
373 addr = arg;
374 *addr = segs->ds_addr;
375}
376
377static int
378fxp_attach(device_t dev)
379{
380 int error = 0;
381 struct fxp_softc *sc = device_get_softc(dev);
382 struct ifnet *ifp;
383 struct fxp_rx *rxp;
384 u_int32_t val;
385 u_int16_t data;
386 int i, rid, m1, m2, prefer_iomap;
387 int s;
388
389 bzero(sc, sizeof(*sc));
390 sc->dev = dev;
391 callout_handle_init(&sc->stat_ch);
392 sysctl_ctx_init(&sc->sysctl_ctx);
393 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
394 MTX_DEF | MTX_RECURSE);
395
396 s = splimp();
397
398 /*
399 * Enable bus mastering. Enable memory space too, in case
400 * BIOS/Prom forgot about it.
401 */
402 val = pci_read_config(dev, PCIR_COMMAND, 2);
403 val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
404 pci_write_config(dev, PCIR_COMMAND, val, 2);
405 val = pci_read_config(dev, PCIR_COMMAND, 2);
406
407 fxp_powerstate_d0(dev);
408
409 /*
410 * Figure out which we should try first - memory mapping or i/o mapping?
411 * We default to memory mapping. Then we accept an override from the
412 * command line. Then we check to see which one is enabled.
413 */
414 m1 = PCIM_CMD_MEMEN;
415 m2 = PCIM_CMD_PORTEN;
416 prefer_iomap = 0;
417 if (resource_int_value(device_get_name(dev), device_get_unit(dev),
418 "prefer_iomap", &prefer_iomap) == 0 && prefer_iomap != 0) {
419 m1 = PCIM_CMD_PORTEN;
420 m2 = PCIM_CMD_MEMEN;
421 }
422
423 if (val & m1) {
424 sc->rtp =
425 (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
426 sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
427 sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd,
428 0, ~0, 1, RF_ACTIVE);
429 }
430 if (sc->mem == NULL && (val & m2)) {
431 sc->rtp =
432 (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT;
433 sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA;
434 sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd,
435 0, ~0, 1, RF_ACTIVE);
436 }
437
438 if (!sc->mem) {
439 device_printf(dev, "could not map device registers\n");
440 error = ENXIO;
441 goto fail;
442 }
443 if (bootverbose) {
444 device_printf(dev, "using %s space register mapping\n",
445 sc->rtp == SYS_RES_MEMORY? "memory" : "I/O");
446 }
447
448 sc->sc_st = rman_get_bustag(sc->mem);
449 sc->sc_sh = rman_get_bushandle(sc->mem);
450
451 /*
452 * Allocate our interrupt.
453 */
454 rid = 0;
455 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1,
456 RF_SHAREABLE | RF_ACTIVE);
457 if (sc->irq == NULL) {
458 device_printf(dev, "could not map interrupt\n");
459 error = ENXIO;
460 goto fail;
461 }
462
463 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET,
464 fxp_intr, sc, &sc->ih);
465 if (error) {
466 device_printf(dev, "could not setup irq\n");
467 goto fail;
468 }
469
470 /*
471 * Reset to a stable state.
472 */
473 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
474 DELAY(10);
475
476 /*
477 * Find out how large of an SEEPROM we have.
478 */
479 fxp_autosize_eeprom(sc);
480
481 /*
482 * Determine whether we must use the 503 serial interface.
483 */
484 fxp_read_eeprom(sc, &data, 6, 1);
485 if ((data & FXP_PHY_DEVICE_MASK) != 0 &&
486 (data & FXP_PHY_SERIAL_ONLY))
487 sc->flags |= FXP_FLAG_SERIAL_MEDIA;
488
489 /*
490 * Create the sysctl tree
491 */
492 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
493 SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO,
494 device_get_nameunit(dev), CTLFLAG_RD, 0, "");
495 if (sc->sysctl_tree == NULL)
496 goto fail;
497 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
498 OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_PRISON,
499 &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I",
500 "FXP driver receive interrupt microcode bundling delay");
501 SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
502 OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_PRISON,
503 &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I",
504 "FXP driver receive interrupt microcode bundle size limit");
505
506 /*
507 * Pull in device tunables.
508 */
509 sc->tunable_int_delay = TUNABLE_INT_DELAY;
510 sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX;
511 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
512 "int_delay", &sc->tunable_int_delay);
513 (void) resource_int_value(device_get_name(dev), device_get_unit(dev),
514 "bundle_max", &sc->tunable_bundle_max);
515
516 /*
517 * Find out the chip revision; lump all 82557 revs together.
518 */
519 fxp_read_eeprom(sc, &data, 5, 1);
520 if ((data >> 8) == 1)
521 sc->revision = FXP_REV_82557;
522 else
523 sc->revision = pci_get_revid(dev);
524
525 /*
526 * Enable workarounds for certain chip revision deficiencies.
527 *
528 * Systems based on the ICH2/ICH2-M chip from Intel, and possibly
529 * some systems based a normal 82559 design, have a defect where
530 * the chip can cause a PCI protocol violation if it receives
531 * a CU_RESUME command when it is entering the IDLE state. The
532 * workaround is to disable Dynamic Standby Mode, so the chip never
533 * deasserts CLKRUN#, and always remains in an active state.
534 *
535 * See Intel 82801BA/82801BAM Specification Update, Errata #30.
536 */
537 i = pci_get_device(dev);
538 if (i == 0x2449 || (i > 0x1030 && i < 0x1039) ||
539 sc->revision >= FXP_REV_82559_A0) {
540 fxp_read_eeprom(sc, &data, 10, 1);
541 if (data & 0x02) { /* STB enable */
542 u_int16_t cksum;
543 int i;
544
545 device_printf(dev,
546 "Disabling dynamic standby mode in EEPROM\n");
547 data &= ~0x02;
548 fxp_write_eeprom(sc, &data, 10, 1);
549 device_printf(dev, "New EEPROM ID: 0x%x\n", data);
550 cksum = 0;
551 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) {
552 fxp_read_eeprom(sc, &data, i, 1);
553 cksum += data;
554 }
555 i = (1 << sc->eeprom_size) - 1;
556 cksum = 0xBABA - cksum;
557 fxp_read_eeprom(sc, &data, i, 1);
558 fxp_write_eeprom(sc, &cksum, i, 1);
559 device_printf(dev,
560 "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n",
561 i, data, cksum);
562#if 1
563 /*
564 * If the user elects to continue, try the software
565 * workaround, as it is better than nothing.
566 */
567 sc->flags |= FXP_FLAG_CU_RESUME_BUG;
568#endif
569 }
570 }
571
572 /*
573 * If we are not a 82557 chip, we can enable extended features.
574 */
575 if (sc->revision != FXP_REV_82557) {
576 /*
577 * If MWI is enabled in the PCI configuration, and there
578 * is a valid cacheline size (8 or 16 dwords), then tell
579 * the board to turn on MWI.
580 */
581 if (val & PCIM_CMD_MWRICEN &&
582 pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0)
583 sc->flags |= FXP_FLAG_MWI_ENABLE;
584
585 /* turn on the extended TxCB feature */
586 sc->flags |= FXP_FLAG_EXT_TXCB;
587
588 /* enable reception of long frames for VLAN */
589 sc->flags |= FXP_FLAG_LONG_PKT_EN;
590 }
591
592 /*
593 * Enable use of extended RFDs and TCBs for 82550
594 * and later chips. Note: we need extended TXCB support
595 * too, but that's already enabled by the code above.
596 * Be careful to do this only on the right devices.
597 */
598
599 if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C) {
600 sc->rfa_size = sizeof (struct fxp_rfa);
601 sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT;
602 sc->flags |= FXP_FLAG_EXT_RFA;
603 } else {
604 sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN;
605 sc->tx_cmd = FXP_CB_COMMAND_XMIT;
606 }
607
608 /*
609 * Allocate DMA tags and DMA safe memory.
610 */
611 error = bus_dma_tag_create(NULL, 2, 0, BUS_SPACE_MAXADDR_32BIT,
612 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES,
613 sc->flags & FXP_FLAG_EXT_RFA ? FXP_NTXSEG - 1 : FXP_NTXSEG,
614 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->fxp_mtag);
615 if (error) {
616 device_printf(dev, "could not allocate dma tag\n");
617 goto fail;
618 }
619
620 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
621 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1,
622 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->fxp_stag);
623 if (error) {
624 device_printf(dev, "could not allocate dma tag\n");
625 goto fail;
626 }
627
628 error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats,
629 BUS_DMA_NOWAIT, &sc->fxp_smap);
630 if (error)
631 goto failmem;
632 error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats,
633 sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0);
634 if (error) {
635 device_printf(dev, "could not map the stats buffer\n");
636 goto fail;
637 }
638 bzero(sc->fxp_stats, sizeof(struct fxp_stats));
639
640 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
641 BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1,
642 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->cbl_tag);
643 if (error) {
644 device_printf(dev, "could not allocate dma tag\n");
645 goto fail;
646 }
647
648 error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list,
649 BUS_DMA_NOWAIT, &sc->cbl_map);
650 if (error)
651 goto failmem;
652 bzero(sc->fxp_desc.cbl_list, FXP_TXCB_SZ);
653
654 error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map,
655 sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr,
656 &sc->fxp_desc.cbl_addr, 0);
657 if (error) {
658 device_printf(dev, "could not map DMA memory\n");
659 goto fail;
660 }
661
662 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT,
663 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1,
664 BUS_SPACE_MAXSIZE_32BIT, 0, &sc->mcs_tag);
665 if (error) {
666 device_printf(dev, "could not allocate dma tag\n");
667 goto fail;
668 }
669
670 error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp,
671 BUS_DMA_NOWAIT, &sc->mcs_map);
672 if (error)
673 goto failmem;
674 error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp,
675 sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0);
676 if (error) {
677 device_printf(dev, "can't map the multicast setup command\n");
678 goto fail;
679 }
680
681 /*
682 * Pre-allocate the TX DMA maps.
683 */
684 for (i = 0; i < FXP_NRFABUFS; i++) {
685 error = bus_dmamap_create(sc->fxp_mtag, 0,
686 &sc->fxp_desc.tx_list[i].tx_map);
687 if (error) {
688 device_printf(dev, "can't create DMA map for TX\n");
689 goto fail;
690 }
691 }
692 error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map);
693 if (error) {
694 device_printf(dev, "can't create spare DMA map\n");
695 goto fail;
696 }
697
698 /*
699 * Pre-allocate our receive buffers.
700 */
701 sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL;
702 for (i = 0; i < FXP_NRFABUFS; i++) {
703 rxp = &sc->fxp_desc.rx_list[i];
704 rxp->rx_mbuf = NULL;
705 error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map);
706 if (error) {
707 device_printf(dev, "can't create DMA map for RX\n");
708 goto fail;
709 }
710 if (fxp_add_rfabuf(sc, rxp) != 0)
711 goto failmem;
712 }
713
714 /*
715 * Read MAC address.
716 */
717 fxp_read_eeprom(sc, (u_int16_t *)sc->arpcom.ac_enaddr, 0, 3);
718 device_printf(dev, "Ethernet address %6D%s\n",
719 sc->arpcom.ac_enaddr, ":",
720 sc->flags & FXP_FLAG_SERIAL_MEDIA ? ", 10Mbps" : "");
721 if (bootverbose) {
722 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n",
723 pci_get_vendor(dev), pci_get_device(dev),
724 pci_get_subvendor(dev), pci_get_subdevice(dev),
725 pci_get_revid(dev));
726 fxp_read_eeprom(sc, &data, 10, 1);
727 device_printf(dev, "Dynamic Standby mode is %s\n",
728 data & 0x02 ? "enabled" : "disabled");
729 }
730
731 /*
732 * If this is only a 10Mbps device, then there is no MII, and
733 * the PHY will use a serial interface instead.
734 *
735 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter
736 * doesn't have a programming interface of any sort. The
737 * media is sensed automatically based on how the link partner
738 * is configured. This is, in essence, manual configuration.
739 */
740 if (sc->flags & FXP_FLAG_SERIAL_MEDIA) {
741 ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd,
742 fxp_serial_ifmedia_sts);
743 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL);
744 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL);
745 } else {
746 if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd,
747 fxp_ifmedia_sts)) {
748 device_printf(dev, "MII without any PHY!\n");
749 error = ENXIO;
750 goto fail;
751 }
752 }
753
754 ifp = &sc->arpcom.ac_if;
755 ifp->if_unit = device_get_unit(dev);
756 ifp->if_name = "fxp";
757 ifp->if_output = ether_output;
758 ifp->if_baudrate = 100000000;
759 ifp->if_init = fxp_init;
760 ifp->if_softc = sc;
761 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
762 ifp->if_ioctl = fxp_ioctl;
763 ifp->if_start = fxp_start;
764 ifp->if_watchdog = fxp_watchdog;
765
766 /* Enable checksum offload for 82550 or better chips */
767
768 if (sc->flags & FXP_FLAG_EXT_RFA) {
769 ifp->if_hwassist = FXP_CSUM_FEATURES;
770 ifp->if_capabilities = IFCAP_HWCSUM;
771 ifp->if_capenable = ifp->if_capabilities;
772 }
773
774 /*
775 * Attach the interface.
776 */
777 ether_ifattach(ifp, sc->arpcom.ac_enaddr);
778
779 /*
780 * Tell the upper layer(s) we support long frames.
781 */
782 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
783 ifp->if_capabilities |= IFCAP_VLAN_MTU;
784
785 /*
786 * Let the system queue as many packets as we have available
787 * TX descriptors.
788 */
789 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1;
790
791 splx(s);
792 return (0);
793
794failmem:
795 device_printf(dev, "Failed to malloc memory\n");
796 error = ENOMEM;
797fail:
798 splx(s);
799 fxp_release(sc);
800 return (error);
801}
802
803/*
804 * release all resources
805 */
806static void
807fxp_release(struct fxp_softc *sc)
808{
809 struct fxp_rx *rxp;
810 struct fxp_tx *txp;
811 int i;
812
813 for (i = 0; i < FXP_NRFABUFS; i++) {
814 rxp = &sc->fxp_desc.rx_list[i];
815 if (rxp->rx_mbuf != NULL) {
816 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
817 BUS_DMASYNC_POSTREAD);
818 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
819 m_freem(rxp->rx_mbuf);
820 }
821 bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map);
822 }
823 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map);
824
825 for (i = 0; i < FXP_NTXCB; i++) {
826 txp = &sc->fxp_desc.tx_list[i];
827 if (txp->tx_mbuf != NULL) {
828 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
829 BUS_DMASYNC_POSTWRITE);
830 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
831 m_freem(txp->tx_mbuf);
832 }
833 bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map);
834 }
835
836 bus_generic_detach(sc->dev);
837 if (sc->miibus)
838 device_delete_child(sc->dev, sc->miibus);
839
840 if (sc->fxp_desc.cbl_list) {
841 bus_dmamap_unload(sc->cbl_tag, sc->cbl_map);
842 bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list,
843 sc->cbl_map);
844 }
845 if (sc->fxp_stats) {
846 bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap);
847 bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap);
848 }
849 if (sc->mcsp) {
850 bus_dmamap_unload(sc->mcs_tag, sc->mcs_map);
851 bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map);
852 }
853 if (sc->ih)
854 bus_teardown_intr(sc->dev, sc->irq, sc->ih);
855 if (sc->irq)
856 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq);
857 if (sc->mem)
858 bus_release_resource(sc->dev, sc->rtp, sc->rgd, sc->mem);
859 if (sc->fxp_mtag)
860 bus_dma_tag_destroy(sc->fxp_mtag);
861 if (sc->fxp_stag)
862 bus_dma_tag_destroy(sc->fxp_stag);
863 if (sc->cbl_tag)
864 bus_dma_tag_destroy(sc->cbl_tag);
865 if (sc->mcs_tag)
866 bus_dma_tag_destroy(sc->mcs_tag);
867
868 sysctl_ctx_free(&sc->sysctl_ctx);
869
870 mtx_destroy(&sc->sc_mtx);
871}
872
873/*
874 * Detach interface.
875 */
876static int
877fxp_detach(device_t dev)
878{
879 struct fxp_softc *sc = device_get_softc(dev);
880 int s;
881
882 /* disable interrupts */
883 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
884
885 s = splimp();
886
887 /*
888 * Stop DMA and drop transmit queue.
889 */
890 fxp_stop(sc);
891
892 /*
893 * Close down routes etc.
894 */
895 ether_ifdetach(&sc->arpcom.ac_if);
896
897 /*
898 * Free all media structures.
899 */
900 ifmedia_removeall(&sc->sc_media);
901
902 splx(s);
903
904 /* Release our allocated resources. */
905 fxp_release(sc);
906
907 return (0);
908}
909
910/*
911 * Device shutdown routine. Called at system shutdown after sync. The
912 * main purpose of this routine is to shut off receiver DMA so that
913 * kernel memory doesn't get clobbered during warmboot.
914 */
915static int
916fxp_shutdown(device_t dev)
917{
918 /*
919 * Make sure that DMA is disabled prior to reboot. Not doing
920 * do could allow DMA to corrupt kernel memory during the
921 * reboot before the driver initializes.
922 */
923 fxp_stop((struct fxp_softc *) device_get_softc(dev));
924 return (0);
925}
926
927/*
928 * Device suspend routine. Stop the interface and save some PCI
929 * settings in case the BIOS doesn't restore them properly on
930 * resume.
931 */
932static int
933fxp_suspend(device_t dev)
934{
935 struct fxp_softc *sc = device_get_softc(dev);
936 int i, s;
937
938 s = splimp();
939
940 fxp_stop(sc);
941
942 for (i = 0; i < 5; i++)
943 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
944 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
945 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
946 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
947 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
948
949 sc->suspended = 1;
950
951 splx(s);
952 return (0);
953}
954
955/*
956 * Device resume routine. Restore some PCI settings in case the BIOS
957 * doesn't, re-enable busmastering, and restart the interface if
958 * appropriate.
959 */
960static int
961fxp_resume(device_t dev)
962{
963 struct fxp_softc *sc = device_get_softc(dev);
964 struct ifnet *ifp = &sc->sc_if;
965 u_int16_t pci_command;
966 int i, s;
967
968 s = splimp();
969
970 fxp_powerstate_d0(dev);
971
972 /* better way to do this? */
973 for (i = 0; i < 5; i++)
974 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
975 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
976 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
977 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
978 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
979
980 /* reenable busmastering */
981 pci_command = pci_read_config(dev, PCIR_COMMAND, 2);
982 pci_command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN);
983 pci_write_config(dev, PCIR_COMMAND, pci_command, 2);
984
985 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET);
986 DELAY(10);
987
988 /* reinitialize interface if necessary */
989 if (ifp->if_flags & IFF_UP)
990 fxp_init(sc);
991
992 sc->suspended = 0;
993
994 splx(s);
995 return (0);
996}
997
998static void
999fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length)
1000{
1001 u_int16_t reg;
1002 int x;
1003
1004 /*
1005 * Shift in data.
1006 */
1007 for (x = 1 << (length - 1); x; x >>= 1) {
1008 if (data & x)
1009 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1010 else
1011 reg = FXP_EEPROM_EECS;
1012 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1013 DELAY(1);
1014 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1015 DELAY(1);
1016 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1017 DELAY(1);
1018 }
1019}
1020
1021/*
1022 * Read from the serial EEPROM. Basically, you manually shift in
1023 * the read opcode (one bit at a time) and then shift in the address,
1024 * and then you shift out the data (all of this one bit at a time).
1025 * The word size is 16 bits, so you have to provide the address for
1026 * every 16 bits of data.
1027 */
1028static u_int16_t
1029fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize)
1030{
1031 u_int16_t reg, data;
1032 int x;
1033
1034 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1035 /*
1036 * Shift in read opcode.
1037 */
1038 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3);
1039 /*
1040 * Shift in address.
1041 */
1042 data = 0;
1043 for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) {
1044 if (offset & x)
1045 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI;
1046 else
1047 reg = FXP_EEPROM_EECS;
1048 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1049 DELAY(1);
1050 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1051 DELAY(1);
1052 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1053 DELAY(1);
1054 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO;
1055 data++;
1056 if (autosize && reg == 0) {
1057 sc->eeprom_size = data;
1058 break;
1059 }
1060 }
1061 /*
1062 * Shift out data.
1063 */
1064 data = 0;
1065 reg = FXP_EEPROM_EECS;
1066 for (x = 1 << 15; x; x >>= 1) {
1067 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK);
1068 DELAY(1);
1069 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1070 data |= x;
1071 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg);
1072 DELAY(1);
1073 }
1074 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1075 DELAY(1);
1076
1077 return (data);
1078}
1079
1080static void
1081fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data)
1082{
1083 int i;
1084
1085 /*
1086 * Erase/write enable.
1087 */
1088 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1089 fxp_eeprom_shiftin(sc, 0x4, 3);
1090 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size);
1091 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1092 DELAY(1);
1093 /*
1094 * Shift in write opcode, address, data.
1095 */
1096 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1097 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3);
1098 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size);
1099 fxp_eeprom_shiftin(sc, data, 16);
1100 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1101 DELAY(1);
1102 /*
1103 * Wait for EEPROM to finish up.
1104 */
1105 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1106 DELAY(1);
1107 for (i = 0; i < 1000; i++) {
1108 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO)
1109 break;
1110 DELAY(50);
1111 }
1112 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1113 DELAY(1);
1114 /*
1115 * Erase/write disable.
1116 */
1117 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS);
1118 fxp_eeprom_shiftin(sc, 0x4, 3);
1119 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size);
1120 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0);
1121 DELAY(1);
1122}
1123
1124/*
1125 * From NetBSD:
1126 *
1127 * Figure out EEPROM size.
1128 *
1129 * 559's can have either 64-word or 256-word EEPROMs, the 558
1130 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet
1131 * talks about the existance of 16 to 256 word EEPROMs.
1132 *
1133 * The only known sizes are 64 and 256, where the 256 version is used
1134 * by CardBus cards to store CIS information.
1135 *
1136 * The address is shifted in msb-to-lsb, and after the last
1137 * address-bit the EEPROM is supposed to output a `dummy zero' bit,
1138 * after which follows the actual data. We try to detect this zero, by
1139 * probing the data-out bit in the EEPROM control register just after
1140 * having shifted in a bit. If the bit is zero, we assume we've
1141 * shifted enough address bits. The data-out should be tri-state,
1142 * before this, which should translate to a logical one.
1143 */
1144static void
1145fxp_autosize_eeprom(struct fxp_softc *sc)
1146{
1147
1148 /* guess maximum size of 256 words */
1149 sc->eeprom_size = 8;
1150
1151 /* autosize */
1152 (void) fxp_eeprom_getword(sc, 0, 1);
1153}
1154
1155static void
1156fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1157{
1158 int i;
1159
1160 for (i = 0; i < words; i++)
1161 data[i] = fxp_eeprom_getword(sc, offset + i, 0);
1162}
1163
1164static void
1165fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words)
1166{
1167 int i;
1168
1169 for (i = 0; i < words; i++)
1170 fxp_eeprom_putword(sc, offset + i, data[i]);
1171}
1172
1173static void
1174fxp_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg,
1175 bus_size_t mapsize, int error)
1176{
1177 struct fxp_softc *sc;
1178 struct fxp_cb_tx *txp;
1179 int i;
1180
1181 if (error)
1182 return;
1183
1184 KASSERT(nseg <= FXP_NTXSEG, ("too many DMA segments"));
1185
1186 sc = arg;
1187 txp = sc->fxp_desc.tx_last->tx_next->tx_cb;
1188 for (i = 0; i < nseg; i++) {
1189 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large"));
1190 /*
1191 * If this is an 82550/82551, then we're using extended
1192 * TxCBs _and_ we're using checksum offload. This means
1193 * that the TxCB is really an IPCB. One major difference
1194 * between the two is that with plain extended TxCBs,
1195 * the bottom half of the TxCB contains two entries from
1196 * the TBD array, whereas IPCBs contain just one entry:
1197 * one entry (8 bytes) has been sacrificed for the TCP/IP
1198 * checksum offload control bits. So to make things work
1199 * right, we have to start filling in the TBD array
1200 * starting from a different place depending on whether
1201 * the chip is an 82550/82551 or not.
1202 */
1203 if (sc->flags & FXP_FLAG_EXT_RFA) {
1204 txp->tbd[i + 1].tb_addr = segs[i].ds_addr;
1205 txp->tbd[i + 1].tb_size = segs[i].ds_len;
1206 } else {
1207 txp->tbd[i].tb_addr = segs[i].ds_addr;
1208 txp->tbd[i].tb_size = segs[i].ds_len;
1209 }
1210 }
1211 txp->tbd_number = nseg;
1212}
1213
1214/*
1215 * Start packet transmission on the interface.
1216 */
1217static void
1218fxp_start(struct ifnet *ifp)
1219{
1220 struct fxp_softc *sc = ifp->if_softc;
1221 struct fxp_tx *txp;
1222 struct mbuf *mb_head;
1223 int error;
1224
1225 /*
1226 * See if we need to suspend xmit until the multicast filter
1227 * has been reprogrammed (which can only be done at the head
1228 * of the command chain).
1229 */
1230 if (sc->need_mcsetup) {
1231 return;
1232 }
1233
1234 txp = NULL;
1235
1236 /*
1237 * We're finished if there is nothing more to add to the list or if
1238 * we're all filled up with buffers to transmit.
1239 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add
1240 * a NOP command when needed.
1241 */
1242 while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) {
1243
1244 /*
1245 * Grab a packet to transmit.
1246 */
1247 IF_DEQUEUE(&ifp->if_snd, mb_head);
1248
1249 /*
1250 * Get pointer to next available tx desc.
1251 */
1252 txp = sc->fxp_desc.tx_last->tx_next;
1253
1254 /*
1255 * Deal with TCP/IP checksum offload. Note that
1256 * in order for TCP checksum offload to work,
1257 * the pseudo header checksum must have already
1258 * been computed and stored in the checksum field
1259 * in the TCP header. The stack should have
1260 * already done this for us.
1261 */
1262
1263 if (mb_head->m_pkthdr.csum_flags) {
1264 if (mb_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
1265 txp->tx_cb->ipcb_ip_activation_high =
1266 FXP_IPCB_HARDWAREPARSING_ENABLE;
1267 txp->tx_cb->ipcb_ip_schedule =
1268 FXP_IPCB_TCPUDP_CHECKSUM_ENABLE;
1269 if (mb_head->m_pkthdr.csum_flags & CSUM_TCP)
1270 txp->tx_cb->ipcb_ip_schedule |=
1271 FXP_IPCB_TCP_PACKET;
1272 }
1273#ifdef FXP_IP_CSUM_WAR
1274 /*
1275 * XXX The 82550 chip appears to have trouble
1276 * dealing with IP header checksums in very small
1277 * datagrams, namely fragments from 1 to 3 bytes
1278 * in size. For example, say you want to transmit
1279 * a UDP packet of 1473 bytes. The packet will be
1280 * fragmented over two IP datagrams, the latter
1281 * containing only one byte of data. The 82550 will
1282 * botch the header checksum on the 1-byte fragment.
1283 * As long as the datagram contains 4 or more bytes
1284 * of data, you're ok.
1285 *
1286 * The following code attempts to work around this
1287 * problem: if the datagram is less than 38 bytes
1288 * in size (14 bytes ether header, 20 bytes IP header,
1289 * plus 4 bytes of data), we punt and compute the IP
1290 * header checksum by hand. This workaround doesn't
1291 * work very well, however, since it can be fooled
1292 * by things like VLAN tags and IP options that make
1293 * the header sizes/offsets vary.
1294 */
1295
1296 if (mb_head->m_pkthdr.csum_flags & CSUM_IP) {
1297 if (mb_head->m_pkthdr.len < 38) {
1298 struct ip *ip;
1299 mb_head->m_data += ETHER_HDR_LEN;
1300 ip = mtod(mb_head, struct ip *);
1301 ip->ip_sum = in_cksum(mb_head,
1302 ip->ip_hl << 2);
1303 mb_head->m_data -= ETHER_HDR_LEN;
1304 } else {
1305 txp->tx_cb->ipcb_ip_activation_high =
1306 FXP_IPCB_HARDWAREPARSING_ENABLE;
1307 txp->tx_cb->ipcb_ip_schedule |=
1308 FXP_IPCB_IP_CHECKSUM_ENABLE;
1309 }
1310 }
1311#endif
1312 }
1313
1314 /*
1315 * Go through each of the mbufs in the chain and initialize
1316 * the transmit buffer descriptors with the physical address
1317 * and size of the mbuf.
1318 */
1319 error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map,
1320 mb_head, fxp_dma_map_txbuf, sc, 0);
1321
1322 if (error && error != EFBIG) {
1323 device_printf(sc->dev, "can't map mbuf (error %d)\n",
1324 error);
1325 m_freem(mb_head);
1326 break;
1327 }
1328
1329 if (error) {
1330 struct mbuf *mn;
1331
1332 /*
1333 * We ran out of segments. We have to recopy this
1334 * mbuf chain first. Bail out if we can't get the
1335 * new buffers.
1336 */
1337 MGETHDR(mn, M_DONTWAIT, MT_DATA);
1338 if (mn == NULL) {
1339 m_freem(mb_head);
1340 break;
1341 }
1342 if (mb_head->m_pkthdr.len > MHLEN) {
1343 MCLGET(mn, M_DONTWAIT);
1344 if ((mn->m_flags & M_EXT) == 0) {
1345 m_freem(mn);
1346 m_freem(mb_head);
1347 break;
1348 }
1349 }
1350 m_copydata(mb_head, 0, mb_head->m_pkthdr.len,
1351 mtod(mn, caddr_t));
1352 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len;
1353 m_freem(mb_head);
1354 mb_head = mn;
1355 error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map,
1356 mb_head, fxp_dma_map_txbuf, sc, 0);
1357 if (error) {
1358 device_printf(sc->dev,
1359 "can't map mbuf (error %d)\n", error);
1360 m_freem(mb_head);
1361 break;
1362 }
1363 }
1364
1365 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
1366 BUS_DMASYNC_PREWRITE);
1367
1368 txp->tx_mbuf = mb_head;
1369 txp->tx_cb->cb_status = 0;
1370 txp->tx_cb->byte_count = 0;
1371 if (sc->tx_queued != FXP_CXINT_THRESH - 1) {
1372 txp->tx_cb->cb_command =
1373 sc->tx_cmd | FXP_CB_COMMAND_SF |
1374 FXP_CB_COMMAND_S;
1375 } else {
1376 txp->tx_cb->cb_command =
1377 sc->tx_cmd | FXP_CB_COMMAND_SF |
1378 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
1379 /*
1380 * Set a 5 second timer just in case we don't hear
1381 * from the card again.
1382 */
1383 ifp->if_timer = 5;
1384 }
1385 txp->tx_cb->tx_threshold = tx_threshold;
1386
1387 /*
1388 * Advance the end of list forward.
1389 */
1390
1391#ifdef __alpha__
1392 /*
1393 * On platforms which can't access memory in 16-bit
1394 * granularities, we must prevent the card from DMA'ing
1395 * up the status while we update the command field.
1396 * This could cause us to overwrite the completion status.
1397 */
1398 atomic_clear_short(&sc->fxp_desc.tx_last->tx_cb->cb_command,
1399 FXP_CB_COMMAND_S);
1400#else
1401 sc->fxp_desc.tx_last->tx_cb->cb_command &= ~FXP_CB_COMMAND_S;
1402#endif /*__alpha__*/
1403 sc->fxp_desc.tx_last = txp;
1404
1405 /*
1406 * Advance the beginning of the list forward if there are
1407 * no other packets queued (when nothing is queued, tx_first
1408 * sits on the last TxCB that was sent out).
1409 */
1410 if (sc->tx_queued == 0)
1411 sc->fxp_desc.tx_first = txp;
1412
1413 sc->tx_queued++;
1414
1415 /*
1416 * Pass packet to bpf if there is a listener.
1417 */
1418 BPF_MTAP(ifp, mb_head);
1419 }
1420 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1421
1422 /*
1423 * We're finished. If we added to the list, issue a RESUME to get DMA
1424 * going again if suspended.
1425 */
1426 if (txp != NULL) {
1427 fxp_scb_wait(sc);
1428 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
1429 }
1430}
1431
1432static void fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count);
1433
1434#ifdef DEVICE_POLLING
1435static poll_handler_t fxp_poll;
1436
1437static void
1438fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
1439{
1440 struct fxp_softc *sc = ifp->if_softc;
1441 u_int8_t statack;
1442
1443 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */
1444 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
1445 return;
1446 }
1447 statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA |
1448 FXP_SCB_STATACK_FR;
1449 if (cmd == POLL_AND_CHECK_STATUS) {
1450 u_int8_t tmp;
1451
1452 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK);
1453 if (tmp == 0xff || tmp == 0)
1454 return; /* nothing to do */
1455 tmp &= ~statack;
1456 /* ack what we can */
1457 if (tmp != 0)
1458 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp);
1459 statack |= tmp;
1460 }
1461 fxp_intr_body(sc, statack, count);
1462}
1463#endif /* DEVICE_POLLING */
1464
1465/*
1466 * Process interface interrupts.
1467 */
1468static void
1469fxp_intr(void *xsc)
1470{
1471 struct fxp_softc *sc = xsc;
1472 u_int8_t statack;
1473
1474#ifdef DEVICE_POLLING
1475 struct ifnet *ifp = &sc->sc_if;
1476
1477 if (ifp->if_flags & IFF_POLLING)
1478 return;
1479 if (ether_poll_register(fxp_poll, ifp)) {
1480 /* disable interrupts */
1481 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
1482 fxp_poll(ifp, 0, 1);
1483 return;
1484 }
1485#endif
1486
1487 if (sc->suspended) {
1488 return;
1489 }
1490
1491 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) {
1492 /*
1493 * It should not be possible to have all bits set; the
1494 * FXP_SCB_INTR_SWI bit always returns 0 on a read. If
1495 * all bits are set, this may indicate that the card has
1496 * been physically ejected, so ignore it.
1497 */
1498 if (statack == 0xff)
1499 return;
1500
1501 /*
1502 * First ACK all the interrupts in this pass.
1503 */
1504 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack);
1505 fxp_intr_body(sc, statack, -1);
1506 }
1507}
1508
1509static void
1510fxp_txeof(struct fxp_softc *sc)
1511{
1512 struct fxp_tx *txp;
1513
1514 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD);
1515 for (txp = sc->fxp_desc.tx_first; sc->tx_queued &&
1516 (txp->tx_cb->cb_status & FXP_CB_STATUS_C) != 0;
1517 txp = txp->tx_next) {
1518 if (txp->tx_mbuf != NULL) {
1519 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map,
1520 BUS_DMASYNC_POSTWRITE);
1521 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map);
1522 m_freem(txp->tx_mbuf);
1523 txp->tx_mbuf = NULL;
1524 /* clear this to reset csum offload bits */
1525 txp->tx_cb->tbd[0].tb_addr = 0;
1526 }
1527 sc->tx_queued--;
1528 }
1529 sc->fxp_desc.tx_first = txp;
1530 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1531}
1532
1533static void
1534fxp_intr_body(struct fxp_softc *sc, u_int8_t statack, int count)
1535{
1536 struct ifnet *ifp = &sc->sc_if;
1537 struct mbuf *m;
1538 struct fxp_rx *rxp;
1539 struct fxp_rfa *rfa;
1540 int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0;
1541
1542 if (rnr)
1543 fxp_rnr++;
1544#ifdef DEVICE_POLLING
1545 /* Pick up a deferred RNR condition if `count' ran out last time. */
1546 if (sc->flags & FXP_FLAG_DEFERRED_RNR) {
1547 sc->flags &= ~FXP_FLAG_DEFERRED_RNR;
1548 rnr = 1;
1549 }
1550#endif
1551
1552 /*
1553 * Free any finished transmit mbuf chains.
1554 *
1555 * Handle the CNA event likt a CXTNO event. It used to
1556 * be that this event (control unit not ready) was not
1557 * encountered, but it is now with the SMPng modifications.
1558 * The exact sequence of events that occur when the interface
1559 * is brought up are different now, and if this event
1560 * goes unhandled, the configuration/rxfilter setup sequence
1561 * can stall for several seconds. The result is that no
1562 * packets go out onto the wire for about 5 to 10 seconds
1563 * after the interface is ifconfig'ed for the first time.
1564 */
1565 if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) {
1566 fxp_txeof(sc);
1567
1568 ifp->if_timer = 0;
1569 if (sc->tx_queued == 0) {
1570 if (sc->need_mcsetup)
1571 fxp_mc_setup(sc);
1572 }
1573 /*
1574 * Try to start more packets transmitting.
1575 */
1576 if (ifp->if_snd.ifq_head != NULL)
1577 fxp_start(ifp);
1578 }
1579
1580 /*
1581 * Just return if nothing happened on the receive side.
1582 */
1583 if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0)
1584 return;
1585
1586 /*
1587 * Process receiver interrupts. If a no-resource (RNR)
1588 * condition exists, get whatever packets we can and
1589 * re-start the receiver.
1590 *
1591 * When using polling, we do not process the list to completion,
1592 * so when we get an RNR interrupt we must defer the restart
1593 * until we hit the last buffer with the C bit set.
1594 * If we run out of cycles and rfa_headm has the C bit set,
1595 * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so
1596 * that the info will be used in the subsequent polling cycle.
1597 */
1598 for (;;) {
1599 rxp = sc->fxp_desc.rx_head;
1600 m = rxp->rx_mbuf;
1601 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf +
1602 RFA_ALIGNMENT_FUDGE);
1603 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map,
1604 BUS_DMASYNC_POSTREAD);
1605
1606#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */
1607 if (count >= 0 && count-- == 0) {
1608 if (rnr) {
1609 /* Defer RNR processing until the next time. */
1610 sc->flags |= FXP_FLAG_DEFERRED_RNR;
1611 rnr = 0;
1612 }
1613 break;
1614 }
1615#endif /* DEVICE_POLLING */
1616
1617 if ((rfa->rfa_status & FXP_RFA_STATUS_C) == 0)
1618 break;
1619
1620 /*
1621 * Advance head forward.
1622 */
1623 sc->fxp_desc.rx_head = rxp->rx_next;
1624
1625 /*
1626 * Add a new buffer to the receive chain.
1627 * If this fails, the old buffer is recycled
1628 * instead.
1629 */
1630 if (fxp_add_rfabuf(sc, rxp) == 0) {
1631 int total_len;
1632
1633 /*
1634 * Fetch packet length (the top 2 bits of
1635 * actual_size are flags set by the controller
1636 * upon completion), and drop the packet in case
1637 * of bogus length or CRC errors.
1638 */
1639 total_len = rfa->actual_size & 0x3fff;
1640 if (total_len < sizeof(struct ether_header) ||
1641 total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE -
1642 sc->rfa_size ||
1643 rfa->rfa_status & FXP_RFA_STATUS_CRC) {
1644 m_freem(m);
1645 continue;
1646 }
1647
1648 /* Do IP checksum checking. */
1649 if (rfa->rfa_status & FXP_RFA_STATUS_PARSE) {
1650 if (rfa->rfax_csum_sts &
1651 FXP_RFDX_CS_IP_CSUM_BIT_VALID)
1652 m->m_pkthdr.csum_flags |=
1653 CSUM_IP_CHECKED;
1654 if (rfa->rfax_csum_sts &
1655 FXP_RFDX_CS_IP_CSUM_VALID)
1656 m->m_pkthdr.csum_flags |=
1657 CSUM_IP_VALID;
1658 if ((rfa->rfax_csum_sts &
1659 FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) &&
1660 (rfa->rfax_csum_sts &
1661 FXP_RFDX_CS_TCPUDP_CSUM_VALID)) {
1662 m->m_pkthdr.csum_flags |=
1663 CSUM_DATA_VALID|CSUM_PSEUDO_HDR;
1664 m->m_pkthdr.csum_data = 0xffff;
1665 }
1666 }
1667
1668 m->m_pkthdr.len = m->m_len = total_len;
1669 m->m_pkthdr.rcvif = ifp;
1670
1671 (*ifp->if_input)(ifp, m);
1672 }
1673 }
1674 if (rnr) {
1675 fxp_scb_wait(sc);
1676 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL,
1677 sc->fxp_desc.rx_head->rx_addr);
1678 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
1679 }
1680}
1681
1682/*
1683 * Update packet in/out/collision statistics. The i82557 doesn't
1684 * allow you to access these counters without doing a fairly
1685 * expensive DMA to get _all_ of the statistics it maintains, so
1686 * we do this operation here only once per second. The statistics
1687 * counters in the kernel are updated from the previous dump-stats
1688 * DMA and then a new dump-stats DMA is started. The on-chip
1689 * counters are zeroed when the DMA completes. If we can't start
1690 * the DMA immediately, we don't wait - we just prepare to read
1691 * them again next time.
1692 */
1693static void
1694fxp_tick(void *xsc)
1695{
1696 struct fxp_softc *sc = xsc;
1697 struct ifnet *ifp = &sc->sc_if;
1698 struct fxp_stats *sp = sc->fxp_stats;
1699 int s;
1700
1701 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD);
1702 ifp->if_opackets += sp->tx_good;
1703 ifp->if_collisions += sp->tx_total_collisions;
1704 if (sp->rx_good) {
1705 ifp->if_ipackets += sp->rx_good;
1706 sc->rx_idle_secs = 0;
1707 } else {
1708 /*
1709 * Receiver's been idle for another second.
1710 */
1711 sc->rx_idle_secs++;
1712 }
1713 ifp->if_ierrors +=
1714 sp->rx_crc_errors +
1715 sp->rx_alignment_errors +
1716 sp->rx_rnr_errors +
1717 sp->rx_overrun_errors;
1718 /*
1719 * If any transmit underruns occured, bump up the transmit
1720 * threshold by another 512 bytes (64 * 8).
1721 */
1722 if (sp->tx_underruns) {
1723 ifp->if_oerrors += sp->tx_underruns;
1724 if (tx_threshold < 192)
1725 tx_threshold += 64;
1726 }
1727 s = splimp();
1728 /*
1729 * Release any xmit buffers that have completed DMA. This isn't
1730 * strictly necessary to do here, but it's advantagous for mbufs
1731 * with external storage to be released in a timely manner rather
1732 * than being defered for a potentially long time. This limits
1733 * the delay to a maximum of one second.
1734 */
1735 fxp_txeof(sc);
1736
1737 /*
1738 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds,
1739 * then assume the receiver has locked up and attempt to clear
1740 * the condition by reprogramming the multicast filter. This is
1741 * a work-around for a bug in the 82557 where the receiver locks
1742 * up if it gets certain types of garbage in the syncronization
1743 * bits prior to the packet header. This bug is supposed to only
1744 * occur in 10Mbps mode, but has been seen to occur in 100Mbps
1745 * mode as well (perhaps due to a 10/100 speed transition).
1746 */
1747 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) {
1748 sc->rx_idle_secs = 0;
1749 fxp_mc_setup(sc);
1750 }
1751 /*
1752 * If there is no pending command, start another stats
1753 * dump. Otherwise punt for now.
1754 */
1755 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) {
1756 /*
1757 * Start another stats dump.
1758 */
1759 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap,
1760 BUS_DMASYNC_PREREAD);
1761 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET);
1762 } else {
1763 /*
1764 * A previous command is still waiting to be accepted.
1765 * Just zero our copy of the stats and wait for the
1766 * next timer event to update them.
1767 */
1768 sp->tx_good = 0;
1769 sp->tx_underruns = 0;
1770 sp->tx_total_collisions = 0;
1771
1772 sp->rx_good = 0;
1773 sp->rx_crc_errors = 0;
1774 sp->rx_alignment_errors = 0;
1775 sp->rx_rnr_errors = 0;
1776 sp->rx_overrun_errors = 0;
1777 }
1778 if (sc->miibus != NULL)
1779 mii_tick(device_get_softc(sc->miibus));
1780 splx(s);
1781 /*
1782 * Schedule another timeout one second from now.
1783 */
1784 sc->stat_ch = timeout(fxp_tick, sc, hz);
1785}
1786
1787/*
1788 * Stop the interface. Cancels the statistics updater and resets
1789 * the interface.
1790 */
1791static void
1792fxp_stop(struct fxp_softc *sc)
1793{
1794 struct ifnet *ifp = &sc->sc_if;
1795 struct fxp_tx *txp;
1796 int i;
1797
1798 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
1799 ifp->if_timer = 0;
1800
1801#ifdef DEVICE_POLLING
1802 ether_poll_deregister(ifp);
1803#endif
1804 /*
1805 * Cancel stats updater.
1806 */
1807 untimeout(fxp_tick, sc, sc->stat_ch);
1808
1809 /*
1810 * Issue software reset, which also unloads the microcode.
1811 */
1812 sc->flags &= ~FXP_FLAG_UCODE;
1813 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET);
1814 DELAY(50);
1815
1816 /*
1817 * Release any xmit buffers.
1818 */
1819 txp = sc->fxp_desc.tx_list;
1820 if (txp != NULL) {
1821 for (i = 0; i < FXP_NTXCB; i++) {
1822 if (txp[i].tx_mbuf != NULL) {
1823 bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map,
1824 BUS_DMASYNC_POSTWRITE);
1825 bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map);
1826 m_freem(txp[i].tx_mbuf);
1827 txp[i].tx_mbuf = NULL;
1828 /* clear this to reset csum offload bits */
1829 txp[i].tx_cb->tbd[0].tb_addr = 0;
1830 }
1831 }
1832 }
1833 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
1834 sc->tx_queued = 0;
1835}
1836
1837/*
1838 * Watchdog/transmission transmit timeout handler. Called when a
1839 * transmission is started on the interface, but no interrupt is
1840 * received before the timeout. This usually indicates that the
1841 * card has wedged for some reason.
1842 */
1843static void
1844fxp_watchdog(struct ifnet *ifp)
1845{
1846 struct fxp_softc *sc = ifp->if_softc;
1847
1848 device_printf(sc->dev, "device timeout\n");
1849 ifp->if_oerrors++;
1850
1851 fxp_init(sc);
1852}
1853
1854static void
1855fxp_init(void *xsc)
1856{
1857 struct fxp_softc *sc = xsc;
1858 struct ifnet *ifp = &sc->sc_if;
1859 struct fxp_cb_config *cbp;
1860 struct fxp_cb_ias *cb_ias;
1861 struct fxp_cb_tx *tcbp;
1862 struct fxp_tx *txp;
1863 struct fxp_cb_mcs *mcsp;
1864 int i, prm, s;
1865
1866 s = splimp();
1867 /*
1868 * Cancel any pending I/O
1869 */
1870 fxp_stop(sc);
1871
1872 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0;
1873
1874 /*
1875 * Initialize base of CBL and RFA memory. Loading with zero
1876 * sets it up for regular linear addressing.
1877 */
1878 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0);
1879 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE);
1880
1881 fxp_scb_wait(sc);
1882 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE);
1883
1884 /*
1885 * Initialize base of dump-stats buffer.
1886 */
1887 fxp_scb_wait(sc);
1888 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD);
1889 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr);
1890 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR);
1891
1892 /*
1893 * Attempt to load microcode if requested.
1894 */
1895 if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0)
1896 fxp_load_ucode(sc);
1897
1898 /*
1899 * Initialize the multicast address list.
1900 */
1901 if (fxp_mc_addrs(sc)) {
1902 mcsp = sc->mcsp;
1903 mcsp->cb_status = 0;
1904 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL;
1905 mcsp->link_addr = -1;
1906 /*
1907 * Start the multicast setup command.
1908 */
1909 fxp_scb_wait(sc);
1910 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
1911 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
1912 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
1913 /* ...and wait for it to complete. */
1914 fxp_dma_wait(&mcsp->cb_status, sc);
1915 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map,
1916 BUS_DMASYNC_POSTWRITE);
1917 }
1918
1919 /*
1920 * We temporarily use memory that contains the TxCB list to
1921 * construct the config CB. The TxCB list memory is rebuilt
1922 * later.
1923 */
1924 cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list;
1925
1926 /*
1927 * This bcopy is kind of disgusting, but there are a bunch of must be
1928 * zero and must be one bits in this structure and this is the easiest
1929 * way to initialize them all to proper values.
1930 */
1931 bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template));
1932
1933 cbp->cb_status = 0;
1934 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL;
1935 cbp->link_addr = -1; /* (no) next command */
1936 cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22;
1937 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */
1938 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */
1939 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */
1940 cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0;
1941 cbp->type_enable = 0; /* actually reserved */
1942 cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0;
1943 cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0;
1944 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */
1945 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */
1946 cbp->dma_mbce = 0; /* (disable) dma max counters */
1947 cbp->late_scb = 0; /* (don't) defer SCB update */
1948 cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */
1949 cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */
1950 cbp->ci_int = 1; /* interrupt on CU idle */
1951 cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1;
1952 cbp->ext_stats_dis = 1; /* disable extended counters */
1953 cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */
1954 cbp->save_bf = sc->revision == FXP_REV_82557 ? 1 : prm;
1955 cbp->disc_short_rx = !prm; /* discard short packets */
1956 cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */
1957 cbp->two_frames = 0; /* do not limit FIFO to 2 frames */
1958 cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */
1959 cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
1960 cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1;
1961 cbp->csma_dis = 0; /* (don't) disable link */
1962 cbp->tcp_udp_cksum = 0; /* (don't) enable checksum */
1963 cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */
1964 cbp->link_wake_en = 0; /* (don't) assert PME# on link change */
1965 cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */
1966 cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */
1967 cbp->nsai = 1; /* (don't) disable source addr insert */
1968 cbp->preamble_length = 2; /* (7 byte) preamble */
1969 cbp->loopback = 0; /* (don't) loopback */
1970 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */
1971 cbp->linear_pri_mode = 0; /* (wait after xmit only) */
1972 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */
1973 cbp->promiscuous = prm; /* promiscuous mode */
1974 cbp->bcast_disable = 0; /* (don't) disable broadcasts */
1975 cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/
1976 cbp->ignore_ul = 0; /* consider U/L bit in IA matching */
1977 cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */
1978 cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0;
1979
1980 cbp->stripping = !prm; /* truncate rx packet to byte count */
1981 cbp->padding = 1; /* (do) pad short tx packets */
1982 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */
1983 cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0;
1984 cbp->ia_wake_en = 0; /* (don't) wake up on address match */
1985 cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */
1986 /* must set wake_en in PMCSR also */
1987 cbp->force_fdx = 0; /* (don't) force full duplex */
1988 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */
1989 cbp->multi_ia = 0; /* (don't) accept multiple IAs */
1990 cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0;
1991 cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0;
1992
1993 if (sc->revision == FXP_REV_82557) {
1994 /*
1995 * The 82557 has no hardware flow control, the values
1996 * below are the defaults for the chip.
1997 */
1998 cbp->fc_delay_lsb = 0;
1999 cbp->fc_delay_msb = 0x40;
2000 cbp->pri_fc_thresh = 3;
2001 cbp->tx_fc_dis = 0;
2002 cbp->rx_fc_restop = 0;
2003 cbp->rx_fc_restart = 0;
2004 cbp->fc_filter = 0;
2005 cbp->pri_fc_loc = 1;
2006 } else {
2007 cbp->fc_delay_lsb = 0x1f;
2008 cbp->fc_delay_msb = 0x01;
2009 cbp->pri_fc_thresh = 3;
2010 cbp->tx_fc_dis = 0; /* enable transmit FC */
2011 cbp->rx_fc_restop = 1; /* enable FC restop frames */
2012 cbp->rx_fc_restart = 1; /* enable FC restart frames */
2013 cbp->fc_filter = !prm; /* drop FC frames to host */
2014 cbp->pri_fc_loc = 1; /* FC pri location (byte31) */
2015 }
2016
2017 /*
2018 * Start the config command/DMA.
2019 */
2020 fxp_scb_wait(sc);
2021 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2022 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2023 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2024 /* ...and wait for it to complete. */
2025 fxp_dma_wait(&cbp->cb_status, sc);
2026 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2027
2028 /*
2029 * Now initialize the station address. Temporarily use the TxCB
2030 * memory area like we did above for the config CB.
2031 */
2032 cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list;
2033 cb_ias->cb_status = 0;
2034 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL;
2035 cb_ias->link_addr = -1;
2036 bcopy(sc->arpcom.ac_enaddr,
2037 (void *)(uintptr_t)(volatile void *)cb_ias->macaddr,
2036 bcopy(sc->arpcom.ac_enaddr, cb_ias->macaddr,
2038 sizeof(sc->arpcom.ac_enaddr));
2039
2040 /*
2041 * Start the IAS (Individual Address Setup) command/DMA.
2042 */
2043 fxp_scb_wait(sc);
2044 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2045 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2046 /* ...and wait for it to complete. */
2047 fxp_dma_wait(&cb_ias->cb_status, sc);
2048 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2049
2050 /*
2051 * Initialize transmit control block (TxCB) list.
2052 */
2053 txp = sc->fxp_desc.tx_list;
2054 tcbp = sc->fxp_desc.cbl_list;
2055 bzero(tcbp, FXP_TXCB_SZ);
2056 for (i = 0; i < FXP_NTXCB; i++) {
2057 txp[i].tx_cb = tcbp + i;
2058 txp[i].tx_mbuf = NULL;
2059 tcbp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK;
2060 tcbp[i].cb_command = FXP_CB_COMMAND_NOP;
2061 tcbp[i].link_addr = sc->fxp_desc.cbl_addr +
2062 (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx));
2063 if (sc->flags & FXP_FLAG_EXT_TXCB)
2064 tcbp[i].tbd_array_addr =
2065 FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]);
2066 else
2067 tcbp[i].tbd_array_addr =
2068 FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]);
2069 txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
2070 }
2071 /*
2072 * Set the suspend flag on the first TxCB and start the control
2073 * unit. It will execute the NOP and then suspend.
2074 */
2075 tcbp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S;
2076 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2077 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2078 sc->tx_queued = 1;
2079
2080 fxp_scb_wait(sc);
2081 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2082
2083 /*
2084 * Initialize receiver buffer area - RFA.
2085 */
2086 fxp_scb_wait(sc);
2087 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
2088 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
2089
2090 /*
2091 * Set current media.
2092 */
2093 if (sc->miibus != NULL)
2094 mii_mediachg(device_get_softc(sc->miibus));
2095
2096 ifp->if_flags |= IFF_RUNNING;
2097 ifp->if_flags &= ~IFF_OACTIVE;
2098
2099 /*
2100 * Enable interrupts.
2101 */
2102#ifdef DEVICE_POLLING
2103 /*
2104 * ... but only do that if we are not polling. And because (presumably)
2105 * the default is interrupts on, we need to disable them explicitly!
2106 */
2107 if ( ifp->if_flags & IFF_POLLING )
2108 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
2109 else
2110#endif /* DEVICE_POLLING */
2111 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
2112 splx(s);
2113
2114 /*
2115 * Start stats updater.
2116 */
2117 sc->stat_ch = timeout(fxp_tick, sc, hz);
2118}
2119
2120static int
2121fxp_serial_ifmedia_upd(struct ifnet *ifp)
2122{
2123
2124 return (0);
2125}
2126
2127static void
2128fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2129{
2130
2131 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2132}
2133
2134/*
2135 * Change media according to request.
2136 */
2137static int
2138fxp_ifmedia_upd(struct ifnet *ifp)
2139{
2140 struct fxp_softc *sc = ifp->if_softc;
2141 struct mii_data *mii;
2142
2143 mii = device_get_softc(sc->miibus);
2144 mii_mediachg(mii);
2145 return (0);
2146}
2147
2148/*
2149 * Notify the world which media we're using.
2150 */
2151static void
2152fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2153{
2154 struct fxp_softc *sc = ifp->if_softc;
2155 struct mii_data *mii;
2156
2157 mii = device_get_softc(sc->miibus);
2158 mii_pollstat(mii);
2159 ifmr->ifm_active = mii->mii_media_active;
2160 ifmr->ifm_status = mii->mii_media_status;
2161
2162 if (ifmr->ifm_status & IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG)
2163 sc->cu_resume_bug = 1;
2164 else
2165 sc->cu_resume_bug = 0;
2166}
2167
2168/*
2169 * Add a buffer to the end of the RFA buffer list.
2170 * Return 0 if successful, 1 for failure. A failure results in
2171 * adding the 'oldm' (if non-NULL) on to the end of the list -
2172 * tossing out its old contents and recycling it.
2173 * The RFA struct is stuck at the beginning of mbuf cluster and the
2174 * data pointer is fixed up to point just past it.
2175 */
2176static int
2177fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2178{
2179 struct mbuf *m;
2180 struct fxp_rfa *rfa, *p_rfa;
2181 struct fxp_rx *p_rx;
2182 bus_dmamap_t tmp_map;
2183 u_int32_t v;
2184 int error;
2185
2186 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2187 if (m == NULL)
2188 return (ENOBUFS);
2189
2190 /*
2191 * Move the data pointer up so that the incoming data packet
2192 * will be 32-bit aligned.
2193 */
2194 m->m_data += RFA_ALIGNMENT_FUDGE;
2195
2196 /*
2197 * Get a pointer to the base of the mbuf cluster and move
2198 * data start past it.
2199 */
2200 rfa = mtod(m, struct fxp_rfa *);
2201 m->m_data += sc->rfa_size;
2202 rfa->size = MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE;
2203
2204 /*
2205 * Initialize the rest of the RFA. Note that since the RFA
2206 * is misaligned, we cannot store values directly. Instead,
2207 * we use an optimized, inline copy.
2208 */
2209
2210 rfa->rfa_status = 0;
2211 rfa->rfa_control = FXP_RFA_CONTROL_EL;
2212 rfa->actual_size = 0;
2213
2214 v = -1;
2215 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr);
2216 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr);
2217
2218 /* Map the RFA into DMA memory. */
2219 error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa,
2220 MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
2221 &rxp->rx_addr, 0);
2222 if (error) {
2223 m_freem(m);
2224 return (error);
2225 }
2226
2227 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
2228 tmp_map = sc->spare_map;
2229 sc->spare_map = rxp->rx_map;
2230 rxp->rx_map = tmp_map;
2231 rxp->rx_mbuf = m;
2232
2233 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_PREREAD);
2234
2235 /*
2236 * If there are other buffers already on the list, attach this
2237 * one to the end by fixing up the tail to point to this one.
2238 */
2239 if (sc->fxp_desc.rx_head != NULL) {
2240 p_rx = sc->fxp_desc.rx_tail;
2241 p_rfa = (struct fxp_rfa *)
2242 (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
2243 p_rx->rx_next = rxp;
2244 fxp_lwcopy(&rxp->rx_addr,
2245 (volatile u_int32_t *)p_rfa->link_addr);
2246 p_rfa->rfa_control = 0;
2247 bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map,
2248 BUS_DMASYNC_PREREAD);
2249 } else {
2250 rxp->rx_next = NULL;
2251 sc->fxp_desc.rx_head = rxp;
2252 }
2253 sc->fxp_desc.rx_tail = rxp;
2254 return (0);
2255}
2256
2257static volatile int
2258fxp_miibus_readreg(device_t dev, int phy, int reg)
2259{
2260 struct fxp_softc *sc = device_get_softc(dev);
2261 int count = 10000;
2262 int value;
2263
2264 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2265 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
2266
2267 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
2268 && count--)
2269 DELAY(10);
2270
2271 if (count <= 0)
2272 device_printf(dev, "fxp_miibus_readreg: timed out\n");
2273
2274 return (value & 0xffff);
2275}
2276
2277static void
2278fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
2279{
2280 struct fxp_softc *sc = device_get_softc(dev);
2281 int count = 10000;
2282
2283 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2284 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
2285 (value & 0xffff));
2286
2287 while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
2288 count--)
2289 DELAY(10);
2290
2291 if (count <= 0)
2292 device_printf(dev, "fxp_miibus_writereg: timed out\n");
2293}
2294
2295static int
2296fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2297{
2298 struct fxp_softc *sc = ifp->if_softc;
2299 struct ifreq *ifr = (struct ifreq *)data;
2300 struct mii_data *mii;
2301 int s, error = 0;
2302
2303 s = splimp();
2304
2305 switch (command) {
2306 case SIOCSIFFLAGS:
2307 if (ifp->if_flags & IFF_ALLMULTI)
2308 sc->flags |= FXP_FLAG_ALL_MCAST;
2309 else
2310 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2311
2312 /*
2313 * If interface is marked up and not running, then start it.
2314 * If it is marked down and running, stop it.
2315 * XXX If it's up then re-initialize it. This is so flags
2316 * such as IFF_PROMISC are handled.
2317 */
2318 if (ifp->if_flags & IFF_UP) {
2319 fxp_init(sc);
2320 } else {
2321 if (ifp->if_flags & IFF_RUNNING)
2322 fxp_stop(sc);
2323 }
2324 break;
2325
2326 case SIOCADDMULTI:
2327 case SIOCDELMULTI:
2328 if (ifp->if_flags & IFF_ALLMULTI)
2329 sc->flags |= FXP_FLAG_ALL_MCAST;
2330 else
2331 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2332 /*
2333 * Multicast list has changed; set the hardware filter
2334 * accordingly.
2335 */
2336 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0)
2337 fxp_mc_setup(sc);
2338 /*
2339 * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it
2340 * again rather than else {}.
2341 */
2342 if (sc->flags & FXP_FLAG_ALL_MCAST)
2343 fxp_init(sc);
2344 error = 0;
2345 break;
2346
2347 case SIOCSIFMEDIA:
2348 case SIOCGIFMEDIA:
2349 if (sc->miibus != NULL) {
2350 mii = device_get_softc(sc->miibus);
2351 error = ifmedia_ioctl(ifp, ifr,
2352 &mii->mii_media, command);
2353 } else {
2354 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
2355 }
2356 break;
2357
2358 default:
2359 error = ether_ioctl(ifp, command, data);
2360 }
2361 splx(s);
2362 return (error);
2363}
2364
2365/*
2366 * Fill in the multicast address list and return number of entries.
2367 */
2368static int
2369fxp_mc_addrs(struct fxp_softc *sc)
2370{
2371 struct fxp_cb_mcs *mcsp = sc->mcsp;
2372 struct ifnet *ifp = &sc->sc_if;
2373 struct ifmultiaddr *ifma;
2374 int nmcasts;
2375
2376 nmcasts = 0;
2377 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) {
2378#if __FreeBSD_version < 500000
2379 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2380#else
2381 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2382#endif
2383 if (ifma->ifma_addr->sa_family != AF_LINK)
2384 continue;
2385 if (nmcasts >= MAXMCADDR) {
2386 sc->flags |= FXP_FLAG_ALL_MCAST;
2387 nmcasts = 0;
2388 break;
2389 }
2390 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2037 sizeof(sc->arpcom.ac_enaddr));
2038
2039 /*
2040 * Start the IAS (Individual Address Setup) command/DMA.
2041 */
2042 fxp_scb_wait(sc);
2043 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2044 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2045 /* ...and wait for it to complete. */
2046 fxp_dma_wait(&cb_ias->cb_status, sc);
2047 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2048
2049 /*
2050 * Initialize transmit control block (TxCB) list.
2051 */
2052 txp = sc->fxp_desc.tx_list;
2053 tcbp = sc->fxp_desc.cbl_list;
2054 bzero(tcbp, FXP_TXCB_SZ);
2055 for (i = 0; i < FXP_NTXCB; i++) {
2056 txp[i].tx_cb = tcbp + i;
2057 txp[i].tx_mbuf = NULL;
2058 tcbp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK;
2059 tcbp[i].cb_command = FXP_CB_COMMAND_NOP;
2060 tcbp[i].link_addr = sc->fxp_desc.cbl_addr +
2061 (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx));
2062 if (sc->flags & FXP_FLAG_EXT_TXCB)
2063 tcbp[i].tbd_array_addr =
2064 FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2]);
2065 else
2066 tcbp[i].tbd_array_addr =
2067 FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0]);
2068 txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK];
2069 }
2070 /*
2071 * Set the suspend flag on the first TxCB and start the control
2072 * unit. It will execute the NOP and then suspend.
2073 */
2074 tcbp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S;
2075 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2076 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2077 sc->tx_queued = 1;
2078
2079 fxp_scb_wait(sc);
2080 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2081
2082 /*
2083 * Initialize receiver buffer area - RFA.
2084 */
2085 fxp_scb_wait(sc);
2086 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr);
2087 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START);
2088
2089 /*
2090 * Set current media.
2091 */
2092 if (sc->miibus != NULL)
2093 mii_mediachg(device_get_softc(sc->miibus));
2094
2095 ifp->if_flags |= IFF_RUNNING;
2096 ifp->if_flags &= ~IFF_OACTIVE;
2097
2098 /*
2099 * Enable interrupts.
2100 */
2101#ifdef DEVICE_POLLING
2102 /*
2103 * ... but only do that if we are not polling. And because (presumably)
2104 * the default is interrupts on, we need to disable them explicitly!
2105 */
2106 if ( ifp->if_flags & IFF_POLLING )
2107 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE);
2108 else
2109#endif /* DEVICE_POLLING */
2110 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0);
2111 splx(s);
2112
2113 /*
2114 * Start stats updater.
2115 */
2116 sc->stat_ch = timeout(fxp_tick, sc, hz);
2117}
2118
2119static int
2120fxp_serial_ifmedia_upd(struct ifnet *ifp)
2121{
2122
2123 return (0);
2124}
2125
2126static void
2127fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2128{
2129
2130 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL;
2131}
2132
2133/*
2134 * Change media according to request.
2135 */
2136static int
2137fxp_ifmedia_upd(struct ifnet *ifp)
2138{
2139 struct fxp_softc *sc = ifp->if_softc;
2140 struct mii_data *mii;
2141
2142 mii = device_get_softc(sc->miibus);
2143 mii_mediachg(mii);
2144 return (0);
2145}
2146
2147/*
2148 * Notify the world which media we're using.
2149 */
2150static void
2151fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
2152{
2153 struct fxp_softc *sc = ifp->if_softc;
2154 struct mii_data *mii;
2155
2156 mii = device_get_softc(sc->miibus);
2157 mii_pollstat(mii);
2158 ifmr->ifm_active = mii->mii_media_active;
2159 ifmr->ifm_status = mii->mii_media_status;
2160
2161 if (ifmr->ifm_status & IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG)
2162 sc->cu_resume_bug = 1;
2163 else
2164 sc->cu_resume_bug = 0;
2165}
2166
2167/*
2168 * Add a buffer to the end of the RFA buffer list.
2169 * Return 0 if successful, 1 for failure. A failure results in
2170 * adding the 'oldm' (if non-NULL) on to the end of the list -
2171 * tossing out its old contents and recycling it.
2172 * The RFA struct is stuck at the beginning of mbuf cluster and the
2173 * data pointer is fixed up to point just past it.
2174 */
2175static int
2176fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp)
2177{
2178 struct mbuf *m;
2179 struct fxp_rfa *rfa, *p_rfa;
2180 struct fxp_rx *p_rx;
2181 bus_dmamap_t tmp_map;
2182 u_int32_t v;
2183 int error;
2184
2185 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2186 if (m == NULL)
2187 return (ENOBUFS);
2188
2189 /*
2190 * Move the data pointer up so that the incoming data packet
2191 * will be 32-bit aligned.
2192 */
2193 m->m_data += RFA_ALIGNMENT_FUDGE;
2194
2195 /*
2196 * Get a pointer to the base of the mbuf cluster and move
2197 * data start past it.
2198 */
2199 rfa = mtod(m, struct fxp_rfa *);
2200 m->m_data += sc->rfa_size;
2201 rfa->size = MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE;
2202
2203 /*
2204 * Initialize the rest of the RFA. Note that since the RFA
2205 * is misaligned, we cannot store values directly. Instead,
2206 * we use an optimized, inline copy.
2207 */
2208
2209 rfa->rfa_status = 0;
2210 rfa->rfa_control = FXP_RFA_CONTROL_EL;
2211 rfa->actual_size = 0;
2212
2213 v = -1;
2214 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr);
2215 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr);
2216
2217 /* Map the RFA into DMA memory. */
2218 error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa,
2219 MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr,
2220 &rxp->rx_addr, 0);
2221 if (error) {
2222 m_freem(m);
2223 return (error);
2224 }
2225
2226 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map);
2227 tmp_map = sc->spare_map;
2228 sc->spare_map = rxp->rx_map;
2229 rxp->rx_map = tmp_map;
2230 rxp->rx_mbuf = m;
2231
2232 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_PREREAD);
2233
2234 /*
2235 * If there are other buffers already on the list, attach this
2236 * one to the end by fixing up the tail to point to this one.
2237 */
2238 if (sc->fxp_desc.rx_head != NULL) {
2239 p_rx = sc->fxp_desc.rx_tail;
2240 p_rfa = (struct fxp_rfa *)
2241 (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE);
2242 p_rx->rx_next = rxp;
2243 fxp_lwcopy(&rxp->rx_addr,
2244 (volatile u_int32_t *)p_rfa->link_addr);
2245 p_rfa->rfa_control = 0;
2246 bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map,
2247 BUS_DMASYNC_PREREAD);
2248 } else {
2249 rxp->rx_next = NULL;
2250 sc->fxp_desc.rx_head = rxp;
2251 }
2252 sc->fxp_desc.rx_tail = rxp;
2253 return (0);
2254}
2255
2256static volatile int
2257fxp_miibus_readreg(device_t dev, int phy, int reg)
2258{
2259 struct fxp_softc *sc = device_get_softc(dev);
2260 int count = 10000;
2261 int value;
2262
2263 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2264 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21));
2265
2266 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0
2267 && count--)
2268 DELAY(10);
2269
2270 if (count <= 0)
2271 device_printf(dev, "fxp_miibus_readreg: timed out\n");
2272
2273 return (value & 0xffff);
2274}
2275
2276static void
2277fxp_miibus_writereg(device_t dev, int phy, int reg, int value)
2278{
2279 struct fxp_softc *sc = device_get_softc(dev);
2280 int count = 10000;
2281
2282 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL,
2283 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) |
2284 (value & 0xffff));
2285
2286 while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 &&
2287 count--)
2288 DELAY(10);
2289
2290 if (count <= 0)
2291 device_printf(dev, "fxp_miibus_writereg: timed out\n");
2292}
2293
2294static int
2295fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
2296{
2297 struct fxp_softc *sc = ifp->if_softc;
2298 struct ifreq *ifr = (struct ifreq *)data;
2299 struct mii_data *mii;
2300 int s, error = 0;
2301
2302 s = splimp();
2303
2304 switch (command) {
2305 case SIOCSIFFLAGS:
2306 if (ifp->if_flags & IFF_ALLMULTI)
2307 sc->flags |= FXP_FLAG_ALL_MCAST;
2308 else
2309 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2310
2311 /*
2312 * If interface is marked up and not running, then start it.
2313 * If it is marked down and running, stop it.
2314 * XXX If it's up then re-initialize it. This is so flags
2315 * such as IFF_PROMISC are handled.
2316 */
2317 if (ifp->if_flags & IFF_UP) {
2318 fxp_init(sc);
2319 } else {
2320 if (ifp->if_flags & IFF_RUNNING)
2321 fxp_stop(sc);
2322 }
2323 break;
2324
2325 case SIOCADDMULTI:
2326 case SIOCDELMULTI:
2327 if (ifp->if_flags & IFF_ALLMULTI)
2328 sc->flags |= FXP_FLAG_ALL_MCAST;
2329 else
2330 sc->flags &= ~FXP_FLAG_ALL_MCAST;
2331 /*
2332 * Multicast list has changed; set the hardware filter
2333 * accordingly.
2334 */
2335 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0)
2336 fxp_mc_setup(sc);
2337 /*
2338 * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it
2339 * again rather than else {}.
2340 */
2341 if (sc->flags & FXP_FLAG_ALL_MCAST)
2342 fxp_init(sc);
2343 error = 0;
2344 break;
2345
2346 case SIOCSIFMEDIA:
2347 case SIOCGIFMEDIA:
2348 if (sc->miibus != NULL) {
2349 mii = device_get_softc(sc->miibus);
2350 error = ifmedia_ioctl(ifp, ifr,
2351 &mii->mii_media, command);
2352 } else {
2353 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command);
2354 }
2355 break;
2356
2357 default:
2358 error = ether_ioctl(ifp, command, data);
2359 }
2360 splx(s);
2361 return (error);
2362}
2363
2364/*
2365 * Fill in the multicast address list and return number of entries.
2366 */
2367static int
2368fxp_mc_addrs(struct fxp_softc *sc)
2369{
2370 struct fxp_cb_mcs *mcsp = sc->mcsp;
2371 struct ifnet *ifp = &sc->sc_if;
2372 struct ifmultiaddr *ifma;
2373 int nmcasts;
2374
2375 nmcasts = 0;
2376 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) {
2377#if __FreeBSD_version < 500000
2378 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2379#else
2380 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2381#endif
2382 if (ifma->ifma_addr->sa_family != AF_LINK)
2383 continue;
2384 if (nmcasts >= MAXMCADDR) {
2385 sc->flags |= FXP_FLAG_ALL_MCAST;
2386 nmcasts = 0;
2387 break;
2388 }
2389 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
2391 (void *)(uintptr_t)(volatile void *)
2392 &sc->mcsp->mc_addr[nmcasts][0], 6);
2390 &sc->mcsp->mc_addr[nmcasts][0], 6);
2393 nmcasts++;
2394 }
2395 }
2396 mcsp->mc_cnt = nmcasts * 6;
2397 return (nmcasts);
2398}
2399
2400/*
2401 * Program the multicast filter.
2402 *
2403 * We have an artificial restriction that the multicast setup command
2404 * must be the first command in the chain, so we take steps to ensure
2405 * this. By requiring this, it allows us to keep up the performance of
2406 * the pre-initialized command ring (esp. link pointers) by not actually
2407 * inserting the mcsetup command in the ring - i.e. its link pointer
2408 * points to the TxCB ring, but the mcsetup descriptor itself is not part
2409 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
2410 * lead into the regular TxCB ring when it completes.
2411 *
2412 * This function must be called at splimp.
2413 */
2414static void
2415fxp_mc_setup(struct fxp_softc *sc)
2416{
2417 struct fxp_cb_mcs *mcsp = sc->mcsp;
2418 struct ifnet *ifp = &sc->sc_if;
2419 struct fxp_tx *txp;
2420 int count;
2421
2422 /*
2423 * If there are queued commands, we must wait until they are all
2424 * completed. If we are already waiting, then add a NOP command
2425 * with interrupt option so that we're notified when all commands
2426 * have been completed - fxp_start() ensures that no additional
2427 * TX commands will be added when need_mcsetup is true.
2428 */
2429 if (sc->tx_queued) {
2430 /*
2431 * need_mcsetup will be true if we are already waiting for the
2432 * NOP command to be completed (see below). In this case, bail.
2433 */
2434 if (sc->need_mcsetup)
2435 return;
2436 sc->need_mcsetup = 1;
2437
2438 /*
2439 * Add a NOP command with interrupt so that we are notified
2440 * when all TX commands have been processed.
2441 */
2442 txp = sc->fxp_desc.tx_last->tx_next;
2443 txp->tx_mbuf = NULL;
2444 txp->tx_cb->cb_status = 0;
2445 txp->tx_cb->cb_command = FXP_CB_COMMAND_NOP |
2446 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
2447 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2448 /*
2449 * Advance the end of list forward.
2450 */
2451 sc->fxp_desc.tx_last->tx_cb->cb_command &= ~FXP_CB_COMMAND_S;
2452 sc->fxp_desc.tx_last = txp;
2453 sc->tx_queued++;
2454 /*
2455 * Issue a resume in case the CU has just suspended.
2456 */
2457 fxp_scb_wait(sc);
2458 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
2459 /*
2460 * Set a 5 second timer just in case we don't hear from the
2461 * card again.
2462 */
2463 ifp->if_timer = 5;
2464
2465 return;
2466 }
2467 sc->need_mcsetup = 0;
2468
2469 /*
2470 * Initialize multicast setup descriptor.
2471 */
2472 mcsp->cb_status = 0;
2473 mcsp->cb_command = FXP_CB_COMMAND_MCAS |
2474 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
2475 mcsp->link_addr = sc->fxp_desc.cbl_addr;
2476 txp = &sc->fxp_desc.mcs_tx;
2477 txp->tx_mbuf = NULL;
2478 txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp;
2479 txp->tx_next = sc->fxp_desc.tx_list;
2480 (void) fxp_mc_addrs(sc);
2481 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2482 sc->tx_queued = 1;
2483
2484 /*
2485 * Wait until command unit is not active. This should never
2486 * be the case when nothing is queued, but make sure anyway.
2487 */
2488 count = 100;
2489 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
2490 FXP_SCB_CUS_ACTIVE && --count)
2491 DELAY(10);
2492 if (count == 0) {
2493 device_printf(sc->dev, "command queue timeout\n");
2494 return;
2495 }
2496
2497 /*
2498 * Start the multicast setup command.
2499 */
2500 fxp_scb_wait(sc);
2501 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
2502 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
2503 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2504
2505 ifp->if_timer = 2;
2506 return;
2507}
2508
2509static u_int32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
2510static u_int32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
2511static u_int32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
2512static u_int32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
2513static u_int32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
2514static u_int32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
2515
2516#define UCODE(x) x, sizeof(x)
2517
2518struct ucode {
2519 u_int32_t revision;
2520 u_int32_t *ucode;
2521 int length;
2522 u_short int_delay_offset;
2523 u_short bundle_max_offset;
2524} ucode_table[] = {
2525 { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
2526 { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
2527 { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
2528 D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
2529 { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
2530 D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
2531 { FXP_REV_82550, UCODE(fxp_ucode_d102),
2532 D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
2533 { FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
2534 D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
2535 { 0, NULL, 0, 0, 0 }
2536};
2537
2538static void
2539fxp_load_ucode(struct fxp_softc *sc)
2540{
2541 struct ucode *uc;
2542 struct fxp_cb_ucode *cbp;
2543
2544 for (uc = ucode_table; uc->ucode != NULL; uc++)
2545 if (sc->revision == uc->revision)
2546 break;
2547 if (uc->ucode == NULL)
2548 return;
2549 cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
2550 cbp->cb_status = 0;
2551 cbp->cb_command = FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL;
2552 cbp->link_addr = -1; /* (no) next command */
2553 memcpy(cbp->ucode, uc->ucode, uc->length);
2554 if (uc->int_delay_offset)
2555 *(u_short *)&cbp->ucode[uc->int_delay_offset] =
2556 sc->tunable_int_delay + sc->tunable_int_delay / 2;
2557 if (uc->bundle_max_offset)
2558 *(u_short *)&cbp->ucode[uc->bundle_max_offset] =
2559 sc->tunable_bundle_max;
2560 /*
2561 * Download the ucode to the chip.
2562 */
2563 fxp_scb_wait(sc);
2564 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2565 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2566 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2567 /* ...and wait for it to complete. */
2568 fxp_dma_wait(&cbp->cb_status, sc);
2569 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2570 device_printf(sc->dev,
2571 "Microcode loaded, int_delay: %d usec bundle_max: %d\n",
2572 sc->tunable_int_delay,
2573 uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
2574 sc->flags |= FXP_FLAG_UCODE;
2575}
2576
2577static int
2578sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2579{
2580 int error, value;
2581
2582 value = *(int *)arg1;
2583 error = sysctl_handle_int(oidp, &value, 0, req);
2584 if (error || !req->newptr)
2585 return (error);
2586 if (value < low || value > high)
2587 return (EINVAL);
2588 *(int *)arg1 = value;
2589 return (0);
2590}
2591
2592/*
2593 * Interrupt delay is expressed in microseconds, a multiplier is used
2594 * to convert this to the appropriate clock ticks before using.
2595 */
2596static int
2597sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
2598{
2599 return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
2600}
2601
2602static int
2603sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
2604{
2605 return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
2606}
2391 nmcasts++;
2392 }
2393 }
2394 mcsp->mc_cnt = nmcasts * 6;
2395 return (nmcasts);
2396}
2397
2398/*
2399 * Program the multicast filter.
2400 *
2401 * We have an artificial restriction that the multicast setup command
2402 * must be the first command in the chain, so we take steps to ensure
2403 * this. By requiring this, it allows us to keep up the performance of
2404 * the pre-initialized command ring (esp. link pointers) by not actually
2405 * inserting the mcsetup command in the ring - i.e. its link pointer
2406 * points to the TxCB ring, but the mcsetup descriptor itself is not part
2407 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it
2408 * lead into the regular TxCB ring when it completes.
2409 *
2410 * This function must be called at splimp.
2411 */
2412static void
2413fxp_mc_setup(struct fxp_softc *sc)
2414{
2415 struct fxp_cb_mcs *mcsp = sc->mcsp;
2416 struct ifnet *ifp = &sc->sc_if;
2417 struct fxp_tx *txp;
2418 int count;
2419
2420 /*
2421 * If there are queued commands, we must wait until they are all
2422 * completed. If we are already waiting, then add a NOP command
2423 * with interrupt option so that we're notified when all commands
2424 * have been completed - fxp_start() ensures that no additional
2425 * TX commands will be added when need_mcsetup is true.
2426 */
2427 if (sc->tx_queued) {
2428 /*
2429 * need_mcsetup will be true if we are already waiting for the
2430 * NOP command to be completed (see below). In this case, bail.
2431 */
2432 if (sc->need_mcsetup)
2433 return;
2434 sc->need_mcsetup = 1;
2435
2436 /*
2437 * Add a NOP command with interrupt so that we are notified
2438 * when all TX commands have been processed.
2439 */
2440 txp = sc->fxp_desc.tx_last->tx_next;
2441 txp->tx_mbuf = NULL;
2442 txp->tx_cb->cb_status = 0;
2443 txp->tx_cb->cb_command = FXP_CB_COMMAND_NOP |
2444 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
2445 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2446 /*
2447 * Advance the end of list forward.
2448 */
2449 sc->fxp_desc.tx_last->tx_cb->cb_command &= ~FXP_CB_COMMAND_S;
2450 sc->fxp_desc.tx_last = txp;
2451 sc->tx_queued++;
2452 /*
2453 * Issue a resume in case the CU has just suspended.
2454 */
2455 fxp_scb_wait(sc);
2456 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME);
2457 /*
2458 * Set a 5 second timer just in case we don't hear from the
2459 * card again.
2460 */
2461 ifp->if_timer = 5;
2462
2463 return;
2464 }
2465 sc->need_mcsetup = 0;
2466
2467 /*
2468 * Initialize multicast setup descriptor.
2469 */
2470 mcsp->cb_status = 0;
2471 mcsp->cb_command = FXP_CB_COMMAND_MCAS |
2472 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I;
2473 mcsp->link_addr = sc->fxp_desc.cbl_addr;
2474 txp = &sc->fxp_desc.mcs_tx;
2475 txp->tx_mbuf = NULL;
2476 txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp;
2477 txp->tx_next = sc->fxp_desc.tx_list;
2478 (void) fxp_mc_addrs(sc);
2479 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp;
2480 sc->tx_queued = 1;
2481
2482 /*
2483 * Wait until command unit is not active. This should never
2484 * be the case when nothing is queued, but make sure anyway.
2485 */
2486 count = 100;
2487 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) ==
2488 FXP_SCB_CUS_ACTIVE && --count)
2489 DELAY(10);
2490 if (count == 0) {
2491 device_printf(sc->dev, "command queue timeout\n");
2492 return;
2493 }
2494
2495 /*
2496 * Start the multicast setup command.
2497 */
2498 fxp_scb_wait(sc);
2499 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE);
2500 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr);
2501 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2502
2503 ifp->if_timer = 2;
2504 return;
2505}
2506
2507static u_int32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE;
2508static u_int32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE;
2509static u_int32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE;
2510static u_int32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE;
2511static u_int32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE;
2512static u_int32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE;
2513
2514#define UCODE(x) x, sizeof(x)
2515
2516struct ucode {
2517 u_int32_t revision;
2518 u_int32_t *ucode;
2519 int length;
2520 u_short int_delay_offset;
2521 u_short bundle_max_offset;
2522} ucode_table[] = {
2523 { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 },
2524 { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 },
2525 { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma),
2526 D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD },
2527 { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s),
2528 D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD },
2529 { FXP_REV_82550, UCODE(fxp_ucode_d102),
2530 D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD },
2531 { FXP_REV_82550_C, UCODE(fxp_ucode_d102c),
2532 D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD },
2533 { 0, NULL, 0, 0, 0 }
2534};
2535
2536static void
2537fxp_load_ucode(struct fxp_softc *sc)
2538{
2539 struct ucode *uc;
2540 struct fxp_cb_ucode *cbp;
2541
2542 for (uc = ucode_table; uc->ucode != NULL; uc++)
2543 if (sc->revision == uc->revision)
2544 break;
2545 if (uc->ucode == NULL)
2546 return;
2547 cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list;
2548 cbp->cb_status = 0;
2549 cbp->cb_command = FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL;
2550 cbp->link_addr = -1; /* (no) next command */
2551 memcpy(cbp->ucode, uc->ucode, uc->length);
2552 if (uc->int_delay_offset)
2553 *(u_short *)&cbp->ucode[uc->int_delay_offset] =
2554 sc->tunable_int_delay + sc->tunable_int_delay / 2;
2555 if (uc->bundle_max_offset)
2556 *(u_short *)&cbp->ucode[uc->bundle_max_offset] =
2557 sc->tunable_bundle_max;
2558 /*
2559 * Download the ucode to the chip.
2560 */
2561 fxp_scb_wait(sc);
2562 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE);
2563 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr);
2564 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START);
2565 /* ...and wait for it to complete. */
2566 fxp_dma_wait(&cbp->cb_status, sc);
2567 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE);
2568 device_printf(sc->dev,
2569 "Microcode loaded, int_delay: %d usec bundle_max: %d\n",
2570 sc->tunable_int_delay,
2571 uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max);
2572 sc->flags |= FXP_FLAG_UCODE;
2573}
2574
2575static int
2576sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high)
2577{
2578 int error, value;
2579
2580 value = *(int *)arg1;
2581 error = sysctl_handle_int(oidp, &value, 0, req);
2582 if (error || !req->newptr)
2583 return (error);
2584 if (value < low || value > high)
2585 return (EINVAL);
2586 *(int *)arg1 = value;
2587 return (0);
2588}
2589
2590/*
2591 * Interrupt delay is expressed in microseconds, a multiplier is used
2592 * to convert this to the appropriate clock ticks before using.
2593 */
2594static int
2595sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS)
2596{
2597 return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000));
2598}
2599
2600static int
2601sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS)
2602{
2603 return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff));
2604}