if_fxp.c revision 143069
1/*- 2 * Copyright (c) 1995, David Greenman 3 * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice unmodified, this list of conditions, and the following 11 * disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: head/sys/dev/fxp/if_fxp.c 143069 2005-03-03 00:22:59Z mux $"); 32 33/* 34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 35 */ 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/endian.h> 40#include <sys/mbuf.h> 41 /* #include <sys/mutex.h> */ 42#include <sys/kernel.h> 43#include <sys/module.h> 44#include <sys/socket.h> 45#include <sys/sysctl.h> 46 47#include <net/if.h> 48#include <net/if_dl.h> 49#include <net/if_media.h> 50 51#include <net/bpf.h> 52#include <sys/sockio.h> 53#include <sys/bus.h> 54#include <machine/bus.h> 55#include <sys/rman.h> 56#include <machine/resource.h> 57 58#include <net/ethernet.h> 59#include <net/if_arp.h> 60 61#include <machine/clock.h> /* for DELAY */ 62 63#include <net/if_types.h> 64#include <net/if_vlan_var.h> 65 66#ifdef FXP_IP_CSUM_WAR 67#include <netinet/in.h> 68#include <netinet/in_systm.h> 69#include <netinet/ip.h> 70#include <machine/in_cksum.h> 71#endif 72 73#include <dev/pci/pcivar.h> 74#include <dev/pci/pcireg.h> /* for PCIM_CMD_xxx */ 75 76#include <dev/mii/mii.h> 77#include <dev/mii/miivar.h> 78 79#include <dev/fxp/if_fxpreg.h> 80#include <dev/fxp/if_fxpvar.h> 81#include <dev/fxp/rcvbundl.h> 82 83MODULE_DEPEND(fxp, pci, 1, 1, 1); 84MODULE_DEPEND(fxp, ether, 1, 1, 1); 85MODULE_DEPEND(fxp, miibus, 1, 1, 1); 86#include "miibus_if.h" 87 88/* 89 * NOTE! On the Alpha, we have an alignment constraint. The 90 * card DMAs the packet immediately following the RFA. However, 91 * the first thing in the packet is a 14-byte Ethernet header. 92 * This means that the packet is misaligned. To compensate, 93 * we actually offset the RFA 2 bytes into the cluster. This 94 * alignes the packet after the Ethernet header at a 32-bit 95 * boundary. HOWEVER! This means that the RFA is misaligned! 96 */ 97#define RFA_ALIGNMENT_FUDGE 2 98 99/* 100 * Set initial transmit threshold at 64 (512 bytes). This is 101 * increased by 64 (512 bytes) at a time, to maximum of 192 102 * (1536 bytes), if an underrun occurs. 103 */ 104static int tx_threshold = 64; 105 106/* 107 * The configuration byte map has several undefined fields which 108 * must be one or must be zero. Set up a template for these bits 109 * only, (assuming a 82557 chip) leaving the actual configuration 110 * to fxp_init. 111 * 112 * See struct fxp_cb_config for the bit definitions. 113 */ 114static u_char fxp_cb_config_template[] = { 115 0x0, 0x0, /* cb_status */ 116 0x0, 0x0, /* cb_command */ 117 0x0, 0x0, 0x0, 0x0, /* link_addr */ 118 0x0, /* 0 */ 119 0x0, /* 1 */ 120 0x0, /* 2 */ 121 0x0, /* 3 */ 122 0x0, /* 4 */ 123 0x0, /* 5 */ 124 0x32, /* 6 */ 125 0x0, /* 7 */ 126 0x0, /* 8 */ 127 0x0, /* 9 */ 128 0x6, /* 10 */ 129 0x0, /* 11 */ 130 0x0, /* 12 */ 131 0x0, /* 13 */ 132 0xf2, /* 14 */ 133 0x48, /* 15 */ 134 0x0, /* 16 */ 135 0x40, /* 17 */ 136 0xf0, /* 18 */ 137 0x0, /* 19 */ 138 0x3f, /* 20 */ 139 0x5 /* 21 */ 140}; 141 142struct fxp_ident { 143 u_int16_t devid; 144 int16_t revid; /* -1 matches anything */ 145 char *name; 146}; 147 148/* 149 * Claim various Intel PCI device identifiers for this driver. The 150 * sub-vendor and sub-device field are extensively used to identify 151 * particular variants, but we don't currently differentiate between 152 * them. 153 */ 154static struct fxp_ident fxp_ident_table[] = { 155 { 0x1029, -1, "Intel 82559 PCI/CardBus Pro/100" }, 156 { 0x1030, -1, "Intel 82559 Pro/100 Ethernet" }, 157 { 0x1031, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, 158 { 0x1032, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, 159 { 0x1033, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, 160 { 0x1034, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, 161 { 0x1035, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, 162 { 0x1036, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, 163 { 0x1037, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, 164 { 0x1038, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, 165 { 0x1039, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, 166 { 0x103A, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, 167 { 0x103B, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, 168 { 0x103C, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, 169 { 0x103D, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, 170 { 0x103E, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, 171 { 0x1050, -1, "Intel 82801BA (D865) Pro/100 VE Ethernet" }, 172 { 0x1051, -1, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" }, 173 { 0x1059, -1, "Intel 82551QM Pro/100 M Mobile Connection" }, 174 { 0x1064, -1, "Intel 82562EZ (ICH6)" }, 175 { 0x1209, -1, "Intel 82559ER Embedded 10/100 Ethernet" }, 176 { 0x1229, 0x01, "Intel 82557 Pro/100 Ethernet" }, 177 { 0x1229, 0x02, "Intel 82557 Pro/100 Ethernet" }, 178 { 0x1229, 0x03, "Intel 82557 Pro/100 Ethernet" }, 179 { 0x1229, 0x04, "Intel 82558 Pro/100 Ethernet" }, 180 { 0x1229, 0x05, "Intel 82558 Pro/100 Ethernet" }, 181 { 0x1229, 0x06, "Intel 82559 Pro/100 Ethernet" }, 182 { 0x1229, 0x07, "Intel 82559 Pro/100 Ethernet" }, 183 { 0x1229, 0x08, "Intel 82559 Pro/100 Ethernet" }, 184 { 0x1229, 0x09, "Intel 82559ER Pro/100 Ethernet" }, 185 { 0x1229, 0x0c, "Intel 82550 Pro/100 Ethernet" }, 186 { 0x1229, 0x0d, "Intel 82550 Pro/100 Ethernet" }, 187 { 0x1229, 0x0e, "Intel 82550 Pro/100 Ethernet" }, 188 { 0x1229, 0x0f, "Intel 82551 Pro/100 Ethernet" }, 189 { 0x1229, 0x10, "Intel 82551 Pro/100 Ethernet" }, 190 { 0x1229, -1, "Intel 82557/8/9 Pro/100 Ethernet" }, 191 { 0x2449, -1, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" }, 192 { 0, -1, NULL }, 193}; 194 195#ifdef FXP_IP_CSUM_WAR 196#define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 197#else 198#define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 199#endif 200 201static int fxp_probe(device_t dev); 202static int fxp_attach(device_t dev); 203static int fxp_detach(device_t dev); 204static int fxp_shutdown(device_t dev); 205static int fxp_suspend(device_t dev); 206static int fxp_resume(device_t dev); 207 208static void fxp_intr(void *xsc); 209static void fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, 210 u_int8_t statack, int count); 211static void fxp_init(void *xsc); 212static void fxp_init_body(struct fxp_softc *sc); 213static void fxp_tick(void *xsc); 214static void fxp_start(struct ifnet *ifp); 215static void fxp_start_body(struct ifnet *ifp); 216static void fxp_stop(struct fxp_softc *sc); 217static void fxp_release(struct fxp_softc *sc); 218static int fxp_ioctl(struct ifnet *ifp, u_long command, 219 caddr_t data); 220static void fxp_watchdog(struct ifnet *ifp); 221static int fxp_add_rfabuf(struct fxp_softc *sc, 222 struct fxp_rx *rxp); 223static int fxp_mc_addrs(struct fxp_softc *sc); 224static void fxp_mc_setup(struct fxp_softc *sc); 225static u_int16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, 226 int autosize); 227static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, 228 u_int16_t data); 229static void fxp_autosize_eeprom(struct fxp_softc *sc); 230static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, 231 int offset, int words); 232static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, 233 int offset, int words); 234static int fxp_ifmedia_upd(struct ifnet *ifp); 235static void fxp_ifmedia_sts(struct ifnet *ifp, 236 struct ifmediareq *ifmr); 237static int fxp_serial_ifmedia_upd(struct ifnet *ifp); 238static void fxp_serial_ifmedia_sts(struct ifnet *ifp, 239 struct ifmediareq *ifmr); 240static volatile int fxp_miibus_readreg(device_t dev, int phy, int reg); 241static void fxp_miibus_writereg(device_t dev, int phy, int reg, 242 int value); 243static void fxp_load_ucode(struct fxp_softc *sc); 244static int sysctl_int_range(SYSCTL_HANDLER_ARGS, 245 int low, int high); 246static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS); 247static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS); 248static void fxp_scb_wait(struct fxp_softc *sc); 249static void fxp_scb_cmd(struct fxp_softc *sc, int cmd); 250static void fxp_dma_wait(struct fxp_softc *sc, 251 volatile u_int16_t *status, bus_dma_tag_t dmat, 252 bus_dmamap_t map); 253 254static device_method_t fxp_methods[] = { 255 /* Device interface */ 256 DEVMETHOD(device_probe, fxp_probe), 257 DEVMETHOD(device_attach, fxp_attach), 258 DEVMETHOD(device_detach, fxp_detach), 259 DEVMETHOD(device_shutdown, fxp_shutdown), 260 DEVMETHOD(device_suspend, fxp_suspend), 261 DEVMETHOD(device_resume, fxp_resume), 262 263 /* MII interface */ 264 DEVMETHOD(miibus_readreg, fxp_miibus_readreg), 265 DEVMETHOD(miibus_writereg, fxp_miibus_writereg), 266 267 { 0, 0 } 268}; 269 270static driver_t fxp_driver = { 271 "fxp", 272 fxp_methods, 273 sizeof(struct fxp_softc), 274}; 275 276static devclass_t fxp_devclass; 277 278DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0); 279DRIVER_MODULE(fxp, cardbus, fxp_driver, fxp_devclass, 0, 0); 280DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0); 281 282/* 283 * Wait for the previous command to be accepted (but not necessarily 284 * completed). 285 */ 286static void 287fxp_scb_wait(struct fxp_softc *sc) 288{ 289 int i = 10000; 290 291 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) 292 DELAY(2); 293 if (i == 0) 294 device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n", 295 CSR_READ_1(sc, FXP_CSR_SCB_COMMAND), 296 CSR_READ_1(sc, FXP_CSR_SCB_STATACK), 297 CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), 298 CSR_READ_2(sc, FXP_CSR_FLOWCONTROL)); 299} 300 301static void 302fxp_scb_cmd(struct fxp_softc *sc, int cmd) 303{ 304 305 if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) { 306 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP); 307 fxp_scb_wait(sc); 308 } 309 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd); 310} 311 312static void 313fxp_dma_wait(struct fxp_softc *sc, volatile u_int16_t *status, 314 bus_dma_tag_t dmat, bus_dmamap_t map) 315{ 316 int i = 10000; 317 318 bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD); 319 while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) { 320 DELAY(2); 321 bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD); 322 } 323 if (i == 0) 324 device_printf(sc->dev, "DMA timeout\n"); 325} 326 327/* 328 * Return identification string if this device is ours. 329 */ 330static int 331fxp_probe(device_t dev) 332{ 333 u_int16_t devid; 334 u_int8_t revid; 335 struct fxp_ident *ident; 336 337 if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { 338 devid = pci_get_device(dev); 339 revid = pci_get_revid(dev); 340 for (ident = fxp_ident_table; ident->name != NULL; ident++) { 341 if (ident->devid == devid && 342 (ident->revid == revid || ident->revid == -1)) { 343 device_set_desc(dev, ident->name); 344 return (0); 345 } 346 } 347 } 348 return (ENXIO); 349} 350 351static void 352fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 353{ 354 u_int32_t *addr; 355 356 if (error) 357 return; 358 359 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 360 addr = arg; 361 *addr = segs->ds_addr; 362} 363 364static int 365fxp_attach(device_t dev) 366{ 367 struct fxp_softc *sc; 368 struct fxp_cb_tx *tcbp; 369 struct fxp_tx *txp; 370 struct fxp_rx *rxp; 371 struct ifnet *ifp; 372 u_int32_t val; 373 u_int16_t data, myea[ETHER_ADDR_LEN / 2]; 374 int i, rid, m1, m2, prefer_iomap, maxtxseg; 375 int error, s; 376 377 error = 0; 378 sc = device_get_softc(dev); 379 sc->dev = dev; 380 callout_init(&sc->stat_ch, CALLOUT_MPSAFE); 381 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 382 MTX_DEF); 383 ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd, 384 fxp_serial_ifmedia_sts); 385 386 s = splimp(); 387 388 /* 389 * Enable bus mastering. 390 */ 391 pci_enable_busmaster(dev); 392 val = pci_read_config(dev, PCIR_COMMAND, 2); 393 394 /* 395 * Figure out which we should try first - memory mapping or i/o mapping? 396 * We default to memory mapping. Then we accept an override from the 397 * command line. Then we check to see which one is enabled. 398 */ 399 m1 = PCIM_CMD_MEMEN; 400 m2 = PCIM_CMD_PORTEN; 401 prefer_iomap = 0; 402 if (resource_int_value(device_get_name(dev), device_get_unit(dev), 403 "prefer_iomap", &prefer_iomap) == 0 && prefer_iomap != 0) { 404 m1 = PCIM_CMD_PORTEN; 405 m2 = PCIM_CMD_MEMEN; 406 } 407 408 sc->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 409 sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; 410 sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd, RF_ACTIVE); 411 if (sc->mem == NULL) { 412 sc->rtp = 413 (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; 414 sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; 415 sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd, 416 RF_ACTIVE); 417 } 418 419 if (!sc->mem) { 420 error = ENXIO; 421 goto fail; 422 } 423 if (bootverbose) { 424 device_printf(dev, "using %s space register mapping\n", 425 sc->rtp == SYS_RES_MEMORY? "memory" : "I/O"); 426 } 427 428 sc->sc_st = rman_get_bustag(sc->mem); 429 sc->sc_sh = rman_get_bushandle(sc->mem); 430 431 /* 432 * Allocate our interrupt. 433 */ 434 rid = 0; 435 sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 436 RF_SHAREABLE | RF_ACTIVE); 437 if (sc->irq == NULL) { 438 device_printf(dev, "could not map interrupt\n"); 439 error = ENXIO; 440 goto fail; 441 } 442 443 /* 444 * Reset to a stable state. 445 */ 446 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 447 DELAY(10); 448 449 /* 450 * Find out how large of an SEEPROM we have. 451 */ 452 fxp_autosize_eeprom(sc); 453 454 /* 455 * Find out the chip revision; lump all 82557 revs together. 456 */ 457 fxp_read_eeprom(sc, &data, 5, 1); 458 if ((data >> 8) == 1) 459 sc->revision = FXP_REV_82557; 460 else 461 sc->revision = pci_get_revid(dev); 462 463 /* 464 * Determine whether we must use the 503 serial interface. 465 */ 466 fxp_read_eeprom(sc, &data, 6, 1); 467 if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0 468 && (data & FXP_PHY_SERIAL_ONLY)) 469 sc->flags |= FXP_FLAG_SERIAL_MEDIA; 470 471 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 472 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 473 OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW, 474 &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I", 475 "FXP driver receive interrupt microcode bundling delay"); 476 SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), 477 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 478 OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW, 479 &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I", 480 "FXP driver receive interrupt microcode bundle size limit"); 481 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 482 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 483 OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0, 484 "FXP RNR events"); 485 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), 486 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), 487 OID_AUTO, "noflow", CTLFLAG_RW, &sc->tunable_noflow, 0, 488 "FXP flow control disabled"); 489 490 /* 491 * Pull in device tunables. 492 */ 493 sc->tunable_int_delay = TUNABLE_INT_DELAY; 494 sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX; 495 sc->tunable_noflow = 1; 496 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 497 "int_delay", &sc->tunable_int_delay); 498 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 499 "bundle_max", &sc->tunable_bundle_max); 500 (void) resource_int_value(device_get_name(dev), device_get_unit(dev), 501 "noflow", &sc->tunable_noflow); 502 sc->rnr = 0; 503 504 /* 505 * Enable workarounds for certain chip revision deficiencies. 506 * 507 * Systems based on the ICH2/ICH2-M chip from Intel, and possibly 508 * some systems based a normal 82559 design, have a defect where 509 * the chip can cause a PCI protocol violation if it receives 510 * a CU_RESUME command when it is entering the IDLE state. The 511 * workaround is to disable Dynamic Standby Mode, so the chip never 512 * deasserts CLKRUN#, and always remains in an active state. 513 * 514 * See Intel 82801BA/82801BAM Specification Update, Errata #30. 515 */ 516 i = pci_get_device(dev); 517 if (i == 0x2449 || (i > 0x1030 && i < 0x1039) || 518 sc->revision >= FXP_REV_82559_A0) { 519 fxp_read_eeprom(sc, &data, 10, 1); 520 if (data & 0x02) { /* STB enable */ 521 u_int16_t cksum; 522 int i; 523 524 device_printf(dev, 525 "Disabling dynamic standby mode in EEPROM\n"); 526 data &= ~0x02; 527 fxp_write_eeprom(sc, &data, 10, 1); 528 device_printf(dev, "New EEPROM ID: 0x%x\n", data); 529 cksum = 0; 530 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) { 531 fxp_read_eeprom(sc, &data, i, 1); 532 cksum += data; 533 } 534 i = (1 << sc->eeprom_size) - 1; 535 cksum = 0xBABA - cksum; 536 fxp_read_eeprom(sc, &data, i, 1); 537 fxp_write_eeprom(sc, &cksum, i, 1); 538 device_printf(dev, 539 "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n", 540 i, data, cksum); 541#if 1 542 /* 543 * If the user elects to continue, try the software 544 * workaround, as it is better than nothing. 545 */ 546 sc->flags |= FXP_FLAG_CU_RESUME_BUG; 547#endif 548 } 549 } 550 551 /* 552 * If we are not a 82557 chip, we can enable extended features. 553 */ 554 if (sc->revision != FXP_REV_82557) { 555 /* 556 * If MWI is enabled in the PCI configuration, and there 557 * is a valid cacheline size (8 or 16 dwords), then tell 558 * the board to turn on MWI. 559 */ 560 if (val & PCIM_CMD_MWRICEN && 561 pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0) 562 sc->flags |= FXP_FLAG_MWI_ENABLE; 563 564 /* turn on the extended TxCB feature */ 565 sc->flags |= FXP_FLAG_EXT_TXCB; 566 567 /* enable reception of long frames for VLAN */ 568 sc->flags |= FXP_FLAG_LONG_PKT_EN; 569 } else { 570 /* a hack to get long VLAN frames on a 82557 */ 571 sc->flags |= FXP_FLAG_SAVE_BAD; 572 } 573 574 /* 575 * Enable use of extended RFDs and TCBs for 82550 576 * and later chips. Note: we need extended TXCB support 577 * too, but that's already enabled by the code above. 578 * Be careful to do this only on the right devices. 579 */ 580 581 if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C) { 582 sc->rfa_size = sizeof (struct fxp_rfa); 583 sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT; 584 sc->flags |= FXP_FLAG_EXT_RFA; 585 } else { 586 sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN; 587 sc->tx_cmd = FXP_CB_COMMAND_XMIT; 588 } 589 590 /* 591 * Allocate DMA tags and DMA safe memory. 592 */ 593 maxtxseg = sc->flags & FXP_FLAG_EXT_RFA ? FXP_NTXSEG - 1 : FXP_NTXSEG; 594 error = bus_dma_tag_create(NULL, 2, 0, BUS_SPACE_MAXADDR_32BIT, 595 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * maxtxseg, 596 maxtxseg, MCLBYTES, 0, busdma_lock_mutex, &Giant, &sc->fxp_mtag); 597 if (error) { 598 device_printf(dev, "could not allocate dma tag\n"); 599 goto fail; 600 } 601 602 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 603 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1, 604 sizeof(struct fxp_stats), 0, busdma_lock_mutex, &Giant, 605 &sc->fxp_stag); 606 if (error) { 607 device_printf(dev, "could not allocate dma tag\n"); 608 goto fail; 609 } 610 611 error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats, 612 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap); 613 if (error) 614 goto fail; 615 error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats, 616 sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0); 617 if (error) { 618 device_printf(dev, "could not map the stats buffer\n"); 619 goto fail; 620 } 621 622 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 623 BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1, 624 FXP_TXCB_SZ, 0, busdma_lock_mutex, &Giant, &sc->cbl_tag); 625 if (error) { 626 device_printf(dev, "could not allocate dma tag\n"); 627 goto fail; 628 } 629 630 error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list, 631 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map); 632 if (error) 633 goto fail; 634 635 error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map, 636 sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr, 637 &sc->fxp_desc.cbl_addr, 0); 638 if (error) { 639 device_printf(dev, "could not map DMA memory\n"); 640 goto fail; 641 } 642 643 error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, 644 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1, 645 sizeof(struct fxp_cb_mcs), 0, busdma_lock_mutex, &Giant, 646 &sc->mcs_tag); 647 if (error) { 648 device_printf(dev, "could not allocate dma tag\n"); 649 goto fail; 650 } 651 652 error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp, 653 BUS_DMA_NOWAIT, &sc->mcs_map); 654 if (error) 655 goto fail; 656 error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp, 657 sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0); 658 if (error) { 659 device_printf(dev, "can't map the multicast setup command\n"); 660 goto fail; 661 } 662 663 /* 664 * Pre-allocate the TX DMA maps and setup the pointers to 665 * the TX command blocks. 666 */ 667 txp = sc->fxp_desc.tx_list; 668 tcbp = sc->fxp_desc.cbl_list; 669 for (i = 0; i < FXP_NTXCB; i++) { 670 txp[i].tx_cb = tcbp + i; 671 error = bus_dmamap_create(sc->fxp_mtag, 0, &txp[i].tx_map); 672 if (error) { 673 device_printf(dev, "can't create DMA map for TX\n"); 674 goto fail; 675 } 676 } 677 error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map); 678 if (error) { 679 device_printf(dev, "can't create spare DMA map\n"); 680 goto fail; 681 } 682 683 /* 684 * Pre-allocate our receive buffers. 685 */ 686 sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL; 687 for (i = 0; i < FXP_NRFABUFS; i++) { 688 rxp = &sc->fxp_desc.rx_list[i]; 689 error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map); 690 if (error) { 691 device_printf(dev, "can't create DMA map for RX\n"); 692 goto fail; 693 } 694 if (fxp_add_rfabuf(sc, rxp) != 0) { 695 error = ENOMEM; 696 goto fail; 697 } 698 } 699 700 /* 701 * Read MAC address. 702 */ 703 fxp_read_eeprom(sc, myea, 0, 3); 704 sc->arpcom.ac_enaddr[0] = myea[0] & 0xff; 705 sc->arpcom.ac_enaddr[1] = myea[0] >> 8; 706 sc->arpcom.ac_enaddr[2] = myea[1] & 0xff; 707 sc->arpcom.ac_enaddr[3] = myea[1] >> 8; 708 sc->arpcom.ac_enaddr[4] = myea[2] & 0xff; 709 sc->arpcom.ac_enaddr[5] = myea[2] >> 8; 710 if (bootverbose) { 711 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n", 712 pci_get_vendor(dev), pci_get_device(dev), 713 pci_get_subvendor(dev), pci_get_subdevice(dev), 714 pci_get_revid(dev)); 715 fxp_read_eeprom(sc, &data, 10, 1); 716 device_printf(dev, "Dynamic Standby mode is %s\n", 717 data & 0x02 ? "enabled" : "disabled"); 718 } 719 720 /* 721 * If this is only a 10Mbps device, then there is no MII, and 722 * the PHY will use a serial interface instead. 723 * 724 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter 725 * doesn't have a programming interface of any sort. The 726 * media is sensed automatically based on how the link partner 727 * is configured. This is, in essence, manual configuration. 728 */ 729 if (sc->flags & FXP_FLAG_SERIAL_MEDIA) { 730 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 731 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 732 } else { 733 if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd, 734 fxp_ifmedia_sts)) { 735 device_printf(dev, "MII without any PHY!\n"); 736 error = ENXIO; 737 goto fail; 738 } 739 } 740 741 ifp = &sc->arpcom.ac_if; 742 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 743 ifp->if_baudrate = 100000000; 744 ifp->if_init = fxp_init; 745 ifp->if_softc = sc; 746 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 747 ifp->if_ioctl = fxp_ioctl; 748 ifp->if_start = fxp_start; 749 ifp->if_watchdog = fxp_watchdog; 750 751 ifp->if_capabilities = ifp->if_capenable = 0; 752 753 /* Enable checksum offload for 82550 or better chips */ 754 if (sc->flags & FXP_FLAG_EXT_RFA) { 755 ifp->if_hwassist = FXP_CSUM_FEATURES; 756 ifp->if_capabilities |= IFCAP_HWCSUM; 757 ifp->if_capenable |= IFCAP_HWCSUM; 758 } 759 760#ifdef DEVICE_POLLING 761 /* Inform the world we support polling. */ 762 ifp->if_capabilities |= IFCAP_POLLING; 763 ifp->if_capenable |= IFCAP_POLLING; 764#endif 765 766 /* 767 * Attach the interface. 768 */ 769 ether_ifattach(ifp, sc->arpcom.ac_enaddr); 770 771 /* 772 * Tell the upper layer(s) we support long frames. 773 * Must appear after the call to ether_ifattach() because 774 * ether_ifattach() sets ifi_hdrlen to the default value. 775 */ 776 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 777 ifp->if_capabilities |= IFCAP_VLAN_MTU; 778 ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */ 779 780 /* 781 * Let the system queue as many packets as we have available 782 * TX descriptors. 783 */ 784 IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1); 785 ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1; 786 IFQ_SET_READY(&ifp->if_snd); 787 788 /* 789 * Hook our interrupt after all initialization is complete. 790 */ 791 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, 792 fxp_intr, sc, &sc->ih); 793 if (error) { 794 device_printf(dev, "could not setup irq\n"); 795 ether_ifdetach(&sc->arpcom.ac_if); 796 goto fail; 797 } 798 799fail: 800 splx(s); 801 if (error) 802 fxp_release(sc); 803 return (error); 804} 805 806/* 807 * Release all resources. The softc lock should not be held and the 808 * interrupt should already be torn down. 809 */ 810static void 811fxp_release(struct fxp_softc *sc) 812{ 813 struct fxp_rx *rxp; 814 struct fxp_tx *txp; 815 int i; 816 817 FXP_LOCK_ASSERT(sc, MA_NOTOWNED); 818 KASSERT(sc->ih == NULL, 819 ("fxp_release() called with intr handle still active")); 820 if (sc->miibus) 821 device_delete_child(sc->dev, sc->miibus); 822 bus_generic_detach(sc->dev); 823 ifmedia_removeall(&sc->sc_media); 824 if (sc->fxp_desc.cbl_list) { 825 bus_dmamap_unload(sc->cbl_tag, sc->cbl_map); 826 bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list, 827 sc->cbl_map); 828 } 829 if (sc->fxp_stats) { 830 bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap); 831 bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap); 832 } 833 if (sc->mcsp) { 834 bus_dmamap_unload(sc->mcs_tag, sc->mcs_map); 835 bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map); 836 } 837 if (sc->irq) 838 bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); 839 if (sc->mem) 840 bus_release_resource(sc->dev, sc->rtp, sc->rgd, sc->mem); 841 if (sc->fxp_mtag) { 842 for (i = 0; i < FXP_NRFABUFS; i++) { 843 rxp = &sc->fxp_desc.rx_list[i]; 844 if (rxp->rx_mbuf != NULL) { 845 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, 846 BUS_DMASYNC_POSTREAD); 847 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map); 848 m_freem(rxp->rx_mbuf); 849 } 850 bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map); 851 } 852 bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map); 853 bus_dma_tag_destroy(sc->fxp_mtag); 854 } 855 if (sc->fxp_stag) { 856 for (i = 0; i < FXP_NTXCB; i++) { 857 txp = &sc->fxp_desc.tx_list[i]; 858 if (txp->tx_mbuf != NULL) { 859 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, 860 BUS_DMASYNC_POSTWRITE); 861 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map); 862 m_freem(txp->tx_mbuf); 863 } 864 bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map); 865 } 866 bus_dma_tag_destroy(sc->fxp_stag); 867 } 868 if (sc->cbl_tag) 869 bus_dma_tag_destroy(sc->cbl_tag); 870 if (sc->mcs_tag) 871 bus_dma_tag_destroy(sc->mcs_tag); 872 873 mtx_destroy(&sc->sc_mtx); 874} 875 876/* 877 * Detach interface. 878 */ 879static int 880fxp_detach(device_t dev) 881{ 882 struct fxp_softc *sc = device_get_softc(dev); 883 int s; 884 885 FXP_LOCK(sc); 886 s = splimp(); 887 888 sc->suspended = 1; /* Do same thing as we do for suspend */ 889 /* 890 * Close down routes etc. 891 */ 892 ether_ifdetach(&sc->arpcom.ac_if); 893 894 /* 895 * Stop DMA and drop transmit queue, but disable interrupts first. 896 */ 897 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); 898 fxp_stop(sc); 899 FXP_UNLOCK(sc); 900 901 /* 902 * Unhook interrupt before dropping lock. This is to prevent 903 * races with fxp_intr(). 904 */ 905 bus_teardown_intr(sc->dev, sc->irq, sc->ih); 906 sc->ih = NULL; 907 908 splx(s); 909 910 /* Release our allocated resources. */ 911 fxp_release(sc); 912 return (0); 913} 914 915/* 916 * Device shutdown routine. Called at system shutdown after sync. The 917 * main purpose of this routine is to shut off receiver DMA so that 918 * kernel memory doesn't get clobbered during warmboot. 919 */ 920static int 921fxp_shutdown(device_t dev) 922{ 923 /* 924 * Make sure that DMA is disabled prior to reboot. Not doing 925 * do could allow DMA to corrupt kernel memory during the 926 * reboot before the driver initializes. 927 */ 928 fxp_stop((struct fxp_softc *) device_get_softc(dev)); 929 return (0); 930} 931 932/* 933 * Device suspend routine. Stop the interface and save some PCI 934 * settings in case the BIOS doesn't restore them properly on 935 * resume. 936 */ 937static int 938fxp_suspend(device_t dev) 939{ 940 struct fxp_softc *sc = device_get_softc(dev); 941 int i, s; 942 943 FXP_LOCK(sc); 944 s = splimp(); 945 946 fxp_stop(sc); 947 948 for (i = 0; i < 5; i++) 949 sc->saved_maps[i] = pci_read_config(dev, PCIR_BAR(i), 4); 950 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 951 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 952 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 953 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 954 955 sc->suspended = 1; 956 957 FXP_UNLOCK(sc); 958 splx(s); 959 return (0); 960} 961 962/* 963 * Device resume routine. Restore some PCI settings in case the BIOS 964 * doesn't, re-enable busmastering, and restart the interface if 965 * appropriate. 966 */ 967static int 968fxp_resume(device_t dev) 969{ 970 struct fxp_softc *sc = device_get_softc(dev); 971 struct ifnet *ifp = &sc->sc_if; 972 u_int16_t pci_command; 973 int i, s; 974 975 FXP_LOCK(sc); 976 s = splimp(); 977 978 /* better way to do this? */ 979 for (i = 0; i < 5; i++) 980 pci_write_config(dev, PCIR_BAR(i), sc->saved_maps[i], 4); 981 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 982 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 983 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 984 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 985 986 /* reenable busmastering */ 987 pci_command = pci_read_config(dev, PCIR_COMMAND, 2); 988 pci_command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 989 pci_write_config(dev, PCIR_COMMAND, pci_command, 2); 990 991 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 992 DELAY(10); 993 994 /* reinitialize interface if necessary */ 995 if (ifp->if_flags & IFF_UP) 996 fxp_init_body(sc); 997 998 sc->suspended = 0; 999 1000 FXP_UNLOCK(sc); 1001 splx(s); 1002 return (0); 1003} 1004 1005static void 1006fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) 1007{ 1008 u_int16_t reg; 1009 int x; 1010 1011 /* 1012 * Shift in data. 1013 */ 1014 for (x = 1 << (length - 1); x; x >>= 1) { 1015 if (data & x) 1016 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 1017 else 1018 reg = FXP_EEPROM_EECS; 1019 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1020 DELAY(1); 1021 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 1022 DELAY(1); 1023 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1024 DELAY(1); 1025 } 1026} 1027 1028/* 1029 * Read from the serial EEPROM. Basically, you manually shift in 1030 * the read opcode (one bit at a time) and then shift in the address, 1031 * and then you shift out the data (all of this one bit at a time). 1032 * The word size is 16 bits, so you have to provide the address for 1033 * every 16 bits of data. 1034 */ 1035static u_int16_t 1036fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize) 1037{ 1038 u_int16_t reg, data; 1039 int x; 1040 1041 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1042 /* 1043 * Shift in read opcode. 1044 */ 1045 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); 1046 /* 1047 * Shift in address. 1048 */ 1049 data = 0; 1050 for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) { 1051 if (offset & x) 1052 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 1053 else 1054 reg = FXP_EEPROM_EECS; 1055 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1056 DELAY(1); 1057 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 1058 DELAY(1); 1059 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1060 DELAY(1); 1061 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO; 1062 data++; 1063 if (autosize && reg == 0) { 1064 sc->eeprom_size = data; 1065 break; 1066 } 1067 } 1068 /* 1069 * Shift out data. 1070 */ 1071 data = 0; 1072 reg = FXP_EEPROM_EECS; 1073 for (x = 1 << 15; x; x >>= 1) { 1074 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 1075 DELAY(1); 1076 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) 1077 data |= x; 1078 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1079 DELAY(1); 1080 } 1081 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1082 DELAY(1); 1083 1084 return (data); 1085} 1086 1087static void 1088fxp_eeprom_putword(struct fxp_softc *sc, int offset, u_int16_t data) 1089{ 1090 int i; 1091 1092 /* 1093 * Erase/write enable. 1094 */ 1095 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1096 fxp_eeprom_shiftin(sc, 0x4, 3); 1097 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); 1098 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1099 DELAY(1); 1100 /* 1101 * Shift in write opcode, address, data. 1102 */ 1103 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1104 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); 1105 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); 1106 fxp_eeprom_shiftin(sc, data, 16); 1107 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1108 DELAY(1); 1109 /* 1110 * Wait for EEPROM to finish up. 1111 */ 1112 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1113 DELAY(1); 1114 for (i = 0; i < 1000; i++) { 1115 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) 1116 break; 1117 DELAY(50); 1118 } 1119 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1120 DELAY(1); 1121 /* 1122 * Erase/write disable. 1123 */ 1124 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1125 fxp_eeprom_shiftin(sc, 0x4, 3); 1126 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); 1127 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1128 DELAY(1); 1129} 1130 1131/* 1132 * From NetBSD: 1133 * 1134 * Figure out EEPROM size. 1135 * 1136 * 559's can have either 64-word or 256-word EEPROMs, the 558 1137 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 1138 * talks about the existance of 16 to 256 word EEPROMs. 1139 * 1140 * The only known sizes are 64 and 256, where the 256 version is used 1141 * by CardBus cards to store CIS information. 1142 * 1143 * The address is shifted in msb-to-lsb, and after the last 1144 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 1145 * after which follows the actual data. We try to detect this zero, by 1146 * probing the data-out bit in the EEPROM control register just after 1147 * having shifted in a bit. If the bit is zero, we assume we've 1148 * shifted enough address bits. The data-out should be tri-state, 1149 * before this, which should translate to a logical one. 1150 */ 1151static void 1152fxp_autosize_eeprom(struct fxp_softc *sc) 1153{ 1154 1155 /* guess maximum size of 256 words */ 1156 sc->eeprom_size = 8; 1157 1158 /* autosize */ 1159 (void) fxp_eeprom_getword(sc, 0, 1); 1160} 1161 1162static void 1163fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) 1164{ 1165 int i; 1166 1167 for (i = 0; i < words; i++) 1168 data[i] = fxp_eeprom_getword(sc, offset + i, 0); 1169} 1170 1171static void 1172fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) 1173{ 1174 int i; 1175 1176 for (i = 0; i < words; i++) 1177 fxp_eeprom_putword(sc, offset + i, data[i]); 1178} 1179 1180static void 1181fxp_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg, 1182 bus_size_t mapsize, int error) 1183{ 1184 struct fxp_softc *sc; 1185 struct fxp_cb_tx *txp; 1186 int i; 1187 1188 if (error) 1189 return; 1190 1191 KASSERT(nseg <= FXP_NTXSEG, ("too many DMA segments")); 1192 1193 sc = arg; 1194 txp = sc->fxp_desc.tx_last->tx_next->tx_cb; 1195 for (i = 0; i < nseg; i++) { 1196 KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large")); 1197 /* 1198 * If this is an 82550/82551, then we're using extended 1199 * TxCBs _and_ we're using checksum offload. This means 1200 * that the TxCB is really an IPCB. One major difference 1201 * between the two is that with plain extended TxCBs, 1202 * the bottom half of the TxCB contains two entries from 1203 * the TBD array, whereas IPCBs contain just one entry: 1204 * one entry (8 bytes) has been sacrificed for the TCP/IP 1205 * checksum offload control bits. So to make things work 1206 * right, we have to start filling in the TBD array 1207 * starting from a different place depending on whether 1208 * the chip is an 82550/82551 or not. 1209 */ 1210 if (sc->flags & FXP_FLAG_EXT_RFA) { 1211 txp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr); 1212 txp->tbd[i + 1].tb_size = htole32(segs[i].ds_len); 1213 } else { 1214 txp->tbd[i].tb_addr = htole32(segs[i].ds_addr); 1215 txp->tbd[i].tb_size = htole32(segs[i].ds_len); 1216 } 1217 } 1218 txp->tbd_number = nseg; 1219} 1220 1221/* 1222 * Grab the softc lock and call the real fxp_start_body() routine 1223 */ 1224static void 1225fxp_start(struct ifnet *ifp) 1226{ 1227 struct fxp_softc *sc = ifp->if_softc; 1228 1229 FXP_LOCK(sc); 1230 fxp_start_body(ifp); 1231 FXP_UNLOCK(sc); 1232} 1233 1234/* 1235 * Start packet transmission on the interface. 1236 * This routine must be called with the softc lock held, and is an 1237 * internal entry point only. 1238 */ 1239static void 1240fxp_start_body(struct ifnet *ifp) 1241{ 1242 struct fxp_softc *sc = ifp->if_softc; 1243 struct fxp_tx *txp; 1244 struct mbuf *mb_head; 1245 int error; 1246 1247 FXP_LOCK_ASSERT(sc, MA_OWNED); 1248 /* 1249 * See if we need to suspend xmit until the multicast filter 1250 * has been reprogrammed (which can only be done at the head 1251 * of the command chain). 1252 */ 1253 if (sc->need_mcsetup) { 1254 return; 1255 } 1256 1257 txp = NULL; 1258 1259 /* 1260 * We're finished if there is nothing more to add to the list or if 1261 * we're all filled up with buffers to transmit. 1262 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add 1263 * a NOP command when needed. 1264 */ 1265 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1266 sc->tx_queued < FXP_NTXCB - 1) { 1267 1268 /* 1269 * Grab a packet to transmit. 1270 */ 1271 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head); 1272 if (mb_head == NULL) 1273 break; 1274 1275 /* 1276 * Get pointer to next available tx desc. 1277 */ 1278 txp = sc->fxp_desc.tx_last->tx_next; 1279 1280 /* 1281 * A note in Appendix B of the Intel 8255x 10/100 Mbps 1282 * Ethernet Controller Family Open Source Software 1283 * Developer Manual says: 1284 * Using software parsing is only allowed with legal 1285 * TCP/IP or UDP/IP packets. 1286 * ... 1287 * For all other datagrams, hardware parsing must 1288 * be used. 1289 * Software parsing appears to truncate ICMP and 1290 * fragmented UDP packets that contain one to three 1291 * bytes in the second (and final) mbuf of the packet. 1292 */ 1293 if (sc->flags & FXP_FLAG_EXT_RFA) 1294 txp->tx_cb->ipcb_ip_activation_high = 1295 FXP_IPCB_HARDWAREPARSING_ENABLE; 1296 1297 /* 1298 * Deal with TCP/IP checksum offload. Note that 1299 * in order for TCP checksum offload to work, 1300 * the pseudo header checksum must have already 1301 * been computed and stored in the checksum field 1302 * in the TCP header. The stack should have 1303 * already done this for us. 1304 */ 1305 1306 if (mb_head->m_pkthdr.csum_flags) { 1307 if (mb_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { 1308 txp->tx_cb->ipcb_ip_schedule = 1309 FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; 1310 if (mb_head->m_pkthdr.csum_flags & CSUM_TCP) 1311 txp->tx_cb->ipcb_ip_schedule |= 1312 FXP_IPCB_TCP_PACKET; 1313 } 1314#ifdef FXP_IP_CSUM_WAR 1315 /* 1316 * XXX The 82550 chip appears to have trouble 1317 * dealing with IP header checksums in very small 1318 * datagrams, namely fragments from 1 to 3 bytes 1319 * in size. For example, say you want to transmit 1320 * a UDP packet of 1473 bytes. The packet will be 1321 * fragmented over two IP datagrams, the latter 1322 * containing only one byte of data. The 82550 will 1323 * botch the header checksum on the 1-byte fragment. 1324 * As long as the datagram contains 4 or more bytes 1325 * of data, you're ok. 1326 * 1327 * The following code attempts to work around this 1328 * problem: if the datagram is less than 38 bytes 1329 * in size (14 bytes ether header, 20 bytes IP header, 1330 * plus 4 bytes of data), we punt and compute the IP 1331 * header checksum by hand. This workaround doesn't 1332 * work very well, however, since it can be fooled 1333 * by things like VLAN tags and IP options that make 1334 * the header sizes/offsets vary. 1335 */ 1336 1337 if (mb_head->m_pkthdr.csum_flags & CSUM_IP) { 1338 if (mb_head->m_pkthdr.len < 38) { 1339 struct ip *ip; 1340 mb_head->m_data += ETHER_HDR_LEN; 1341 ip = mtod(mb_head, struct ip *); 1342 ip->ip_sum = in_cksum(mb_head, 1343 ip->ip_hl << 2); 1344 mb_head->m_data -= ETHER_HDR_LEN; 1345 } else { 1346 txp->tx_cb->ipcb_ip_activation_high = 1347 FXP_IPCB_HARDWAREPARSING_ENABLE; 1348 txp->tx_cb->ipcb_ip_schedule |= 1349 FXP_IPCB_IP_CHECKSUM_ENABLE; 1350 } 1351 } 1352#endif 1353 } 1354 1355 /* 1356 * Go through each of the mbufs in the chain and initialize 1357 * the transmit buffer descriptors with the physical address 1358 * and size of the mbuf. 1359 */ 1360 error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map, 1361 mb_head, fxp_dma_map_txbuf, sc, 0); 1362 1363 if (error && error != EFBIG) { 1364 device_printf(sc->dev, "can't map mbuf (error %d)\n", 1365 error); 1366 m_freem(mb_head); 1367 break; 1368 } 1369 1370 if (error) { 1371 struct mbuf *mn; 1372 1373 /* 1374 * We ran out of segments. We have to recopy this 1375 * mbuf chain first. Bail out if we can't get the 1376 * new buffers. 1377 */ 1378 mn = m_defrag(mb_head, M_DONTWAIT); 1379 if (mn == NULL) { 1380 m_freem(mb_head); 1381 break; 1382 } else { 1383 mb_head = mn; 1384 } 1385 error = bus_dmamap_load_mbuf(sc->fxp_mtag, txp->tx_map, 1386 mb_head, fxp_dma_map_txbuf, sc, 0); 1387 if (error) { 1388 device_printf(sc->dev, 1389 "can't map mbuf (error %d)\n", error); 1390 m_freem(mb_head); 1391 break; 1392 } 1393 } 1394 1395 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, 1396 BUS_DMASYNC_PREWRITE); 1397 1398 txp->tx_mbuf = mb_head; 1399 txp->tx_cb->cb_status = 0; 1400 txp->tx_cb->byte_count = 0; 1401 if (sc->tx_queued != FXP_CXINT_THRESH - 1) { 1402 txp->tx_cb->cb_command = 1403 htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | 1404 FXP_CB_COMMAND_S); 1405 } else { 1406 txp->tx_cb->cb_command = 1407 htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | 1408 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); 1409 /* 1410 * Set a 5 second timer just in case we don't hear 1411 * from the card again. 1412 */ 1413 ifp->if_timer = 5; 1414 } 1415 txp->tx_cb->tx_threshold = tx_threshold; 1416 1417 /* 1418 * Advance the end of list forward. 1419 */ 1420 1421#ifdef __alpha__ 1422 /* 1423 * On platforms which can't access memory in 16-bit 1424 * granularities, we must prevent the card from DMA'ing 1425 * up the status while we update the command field. 1426 * This could cause us to overwrite the completion status. 1427 * XXX This is probably bogus and we're _not_ looking 1428 * for atomicity here. 1429 */ 1430 atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command, 1431 htole16(FXP_CB_COMMAND_S)); 1432#else 1433 sc->fxp_desc.tx_last->tx_cb->cb_command &= 1434 htole16(~FXP_CB_COMMAND_S); 1435#endif /*__alpha__*/ 1436 sc->fxp_desc.tx_last = txp; 1437 1438 /* 1439 * Advance the beginning of the list forward if there are 1440 * no other packets queued (when nothing is queued, tx_first 1441 * sits on the last TxCB that was sent out). 1442 */ 1443 if (sc->tx_queued == 0) 1444 sc->fxp_desc.tx_first = txp; 1445 1446 sc->tx_queued++; 1447 1448 /* 1449 * Pass packet to bpf if there is a listener. 1450 */ 1451 BPF_MTAP(ifp, mb_head); 1452 } 1453 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); 1454 1455 /* 1456 * We're finished. If we added to the list, issue a RESUME to get DMA 1457 * going again if suspended. 1458 */ 1459 if (txp != NULL) { 1460 fxp_scb_wait(sc); 1461 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); 1462 } 1463} 1464 1465#ifdef DEVICE_POLLING 1466static poll_handler_t fxp_poll; 1467 1468static void 1469fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1470{ 1471 struct fxp_softc *sc = ifp->if_softc; 1472 u_int8_t statack; 1473 1474 FXP_LOCK(sc); 1475 if (!(ifp->if_capenable & IFCAP_POLLING)) { 1476 ether_poll_deregister(ifp); 1477 cmd = POLL_DEREGISTER; 1478 } 1479 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 1480 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); 1481 FXP_UNLOCK(sc); 1482 return; 1483 } 1484 statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA | 1485 FXP_SCB_STATACK_FR; 1486 if (cmd == POLL_AND_CHECK_STATUS) { 1487 u_int8_t tmp; 1488 1489 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); 1490 if (tmp == 0xff || tmp == 0) { 1491 FXP_UNLOCK(sc); 1492 return; /* nothing to do */ 1493 } 1494 tmp &= ~statack; 1495 /* ack what we can */ 1496 if (tmp != 0) 1497 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp); 1498 statack |= tmp; 1499 } 1500 fxp_intr_body(sc, ifp, statack, count); 1501 FXP_UNLOCK(sc); 1502} 1503#endif /* DEVICE_POLLING */ 1504 1505/* 1506 * Process interface interrupts. 1507 */ 1508static void 1509fxp_intr(void *xsc) 1510{ 1511 struct fxp_softc *sc = xsc; 1512 struct ifnet *ifp = &sc->sc_if; 1513 u_int8_t statack; 1514 1515 FXP_LOCK(sc); 1516 if (sc->suspended) { 1517 FXP_UNLOCK(sc); 1518 return; 1519 } 1520 1521#ifdef DEVICE_POLLING 1522 if (ifp->if_flags & IFF_POLLING) { 1523 FXP_UNLOCK(sc); 1524 return; 1525 } 1526 if ((ifp->if_capenable & IFCAP_POLLING) && 1527 ether_poll_register(fxp_poll, ifp)) { 1528 /* disable interrupts */ 1529 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); 1530 FXP_UNLOCK(sc); 1531 fxp_poll(ifp, 0, 1); 1532 return; 1533 } 1534#endif 1535 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { 1536 /* 1537 * It should not be possible to have all bits set; the 1538 * FXP_SCB_INTR_SWI bit always returns 0 on a read. If 1539 * all bits are set, this may indicate that the card has 1540 * been physically ejected, so ignore it. 1541 */ 1542 if (statack == 0xff) { 1543 FXP_UNLOCK(sc); 1544 return; 1545 } 1546 1547 /* 1548 * First ACK all the interrupts in this pass. 1549 */ 1550 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 1551 fxp_intr_body(sc, ifp, statack, -1); 1552 } 1553 FXP_UNLOCK(sc); 1554} 1555 1556static void 1557fxp_txeof(struct fxp_softc *sc) 1558{ 1559 struct fxp_tx *txp; 1560 1561 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD); 1562 for (txp = sc->fxp_desc.tx_first; sc->tx_queued && 1563 (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0; 1564 txp = txp->tx_next) { 1565 if (txp->tx_mbuf != NULL) { 1566 bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, 1567 BUS_DMASYNC_POSTWRITE); 1568 bus_dmamap_unload(sc->fxp_mtag, txp->tx_map); 1569 m_freem(txp->tx_mbuf); 1570 txp->tx_mbuf = NULL; 1571 /* clear this to reset csum offload bits */ 1572 txp->tx_cb->tbd[0].tb_addr = 0; 1573 } 1574 sc->tx_queued--; 1575 } 1576 sc->fxp_desc.tx_first = txp; 1577 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); 1578} 1579 1580static void 1581fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, u_int8_t statack, 1582 int count) 1583{ 1584 struct mbuf *m; 1585 struct fxp_rx *rxp; 1586 struct fxp_rfa *rfa; 1587 int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0; 1588 1589 FXP_LOCK_ASSERT(sc, MA_OWNED); 1590 if (rnr) 1591 sc->rnr++; 1592#ifdef DEVICE_POLLING 1593 /* Pick up a deferred RNR condition if `count' ran out last time. */ 1594 if (sc->flags & FXP_FLAG_DEFERRED_RNR) { 1595 sc->flags &= ~FXP_FLAG_DEFERRED_RNR; 1596 rnr = 1; 1597 } 1598#endif 1599 1600 /* 1601 * Free any finished transmit mbuf chains. 1602 * 1603 * Handle the CNA event likt a CXTNO event. It used to 1604 * be that this event (control unit not ready) was not 1605 * encountered, but it is now with the SMPng modifications. 1606 * The exact sequence of events that occur when the interface 1607 * is brought up are different now, and if this event 1608 * goes unhandled, the configuration/rxfilter setup sequence 1609 * can stall for several seconds. The result is that no 1610 * packets go out onto the wire for about 5 to 10 seconds 1611 * after the interface is ifconfig'ed for the first time. 1612 */ 1613 if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) { 1614 fxp_txeof(sc); 1615 1616 ifp->if_timer = 0; 1617 if (sc->tx_queued == 0) { 1618 if (sc->need_mcsetup) 1619 fxp_mc_setup(sc); 1620 } 1621 /* 1622 * Try to start more packets transmitting. 1623 */ 1624 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1625 fxp_start_body(ifp); 1626 } 1627 1628 /* 1629 * Just return if nothing happened on the receive side. 1630 */ 1631 if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0) 1632 return; 1633 1634 /* 1635 * Process receiver interrupts. If a no-resource (RNR) 1636 * condition exists, get whatever packets we can and 1637 * re-start the receiver. 1638 * 1639 * When using polling, we do not process the list to completion, 1640 * so when we get an RNR interrupt we must defer the restart 1641 * until we hit the last buffer with the C bit set. 1642 * If we run out of cycles and rfa_headm has the C bit set, 1643 * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so 1644 * that the info will be used in the subsequent polling cycle. 1645 */ 1646 for (;;) { 1647 rxp = sc->fxp_desc.rx_head; 1648 m = rxp->rx_mbuf; 1649 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + 1650 RFA_ALIGNMENT_FUDGE); 1651 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, 1652 BUS_DMASYNC_POSTREAD); 1653 1654#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */ 1655 if (count >= 0 && count-- == 0) { 1656 if (rnr) { 1657 /* Defer RNR processing until the next time. */ 1658 sc->flags |= FXP_FLAG_DEFERRED_RNR; 1659 rnr = 0; 1660 } 1661 break; 1662 } 1663#endif /* DEVICE_POLLING */ 1664 1665 if ((le16toh(rfa->rfa_status) & FXP_RFA_STATUS_C) == 0) 1666 break; 1667 1668 /* 1669 * Advance head forward. 1670 */ 1671 sc->fxp_desc.rx_head = rxp->rx_next; 1672 1673 /* 1674 * Add a new buffer to the receive chain. 1675 * If this fails, the old buffer is recycled 1676 * instead. 1677 */ 1678 if (fxp_add_rfabuf(sc, rxp) == 0) { 1679 int total_len; 1680 1681 /* 1682 * Fetch packet length (the top 2 bits of 1683 * actual_size are flags set by the controller 1684 * upon completion), and drop the packet in case 1685 * of bogus length or CRC errors. 1686 */ 1687 total_len = le16toh(rfa->actual_size) & 0x3fff; 1688 if (total_len < sizeof(struct ether_header) || 1689 total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE - 1690 sc->rfa_size || 1691 le16toh(rfa->rfa_status) & FXP_RFA_STATUS_CRC) { 1692 m_freem(m); 1693 continue; 1694 } 1695 1696 /* Do IP checksum checking. */ 1697 if (le16toh(rfa->rfa_status) & FXP_RFA_STATUS_PARSE) { 1698 if (rfa->rfax_csum_sts & 1699 FXP_RFDX_CS_IP_CSUM_BIT_VALID) 1700 m->m_pkthdr.csum_flags |= 1701 CSUM_IP_CHECKED; 1702 if (rfa->rfax_csum_sts & 1703 FXP_RFDX_CS_IP_CSUM_VALID) 1704 m->m_pkthdr.csum_flags |= 1705 CSUM_IP_VALID; 1706 if ((rfa->rfax_csum_sts & 1707 FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) && 1708 (rfa->rfax_csum_sts & 1709 FXP_RFDX_CS_TCPUDP_CSUM_VALID)) { 1710 m->m_pkthdr.csum_flags |= 1711 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1712 m->m_pkthdr.csum_data = 0xffff; 1713 } 1714 } 1715 1716 m->m_pkthdr.len = m->m_len = total_len; 1717 m->m_pkthdr.rcvif = ifp; 1718 1719 /* 1720 * Drop locks before calling if_input() since it 1721 * may re-enter fxp_start() in the netisr case. 1722 * This would result in a lock reversal. Better 1723 * performance might be obtained by chaining all 1724 * packets received, dropping the lock, and then 1725 * calling if_input() on each one. 1726 */ 1727 FXP_UNLOCK(sc); 1728 (*ifp->if_input)(ifp, m); 1729 FXP_LOCK(sc); 1730 } 1731 } 1732 if (rnr) { 1733 fxp_scb_wait(sc); 1734 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1735 sc->fxp_desc.rx_head->rx_addr); 1736 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); 1737 } 1738} 1739 1740/* 1741 * Update packet in/out/collision statistics. The i82557 doesn't 1742 * allow you to access these counters without doing a fairly 1743 * expensive DMA to get _all_ of the statistics it maintains, so 1744 * we do this operation here only once per second. The statistics 1745 * counters in the kernel are updated from the previous dump-stats 1746 * DMA and then a new dump-stats DMA is started. The on-chip 1747 * counters are zeroed when the DMA completes. If we can't start 1748 * the DMA immediately, we don't wait - we just prepare to read 1749 * them again next time. 1750 */ 1751static void 1752fxp_tick(void *xsc) 1753{ 1754 struct fxp_softc *sc = xsc; 1755 struct ifnet *ifp = &sc->sc_if; 1756 struct fxp_stats *sp = sc->fxp_stats; 1757 int s; 1758 1759 FXP_LOCK(sc); 1760 s = splimp(); 1761 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD); 1762 ifp->if_opackets += le32toh(sp->tx_good); 1763 ifp->if_collisions += le32toh(sp->tx_total_collisions); 1764 if (sp->rx_good) { 1765 ifp->if_ipackets += le32toh(sp->rx_good); 1766 sc->rx_idle_secs = 0; 1767 } else { 1768 /* 1769 * Receiver's been idle for another second. 1770 */ 1771 sc->rx_idle_secs++; 1772 } 1773 ifp->if_ierrors += 1774 le32toh(sp->rx_crc_errors) + 1775 le32toh(sp->rx_alignment_errors) + 1776 le32toh(sp->rx_rnr_errors) + 1777 le32toh(sp->rx_overrun_errors); 1778 /* 1779 * If any transmit underruns occured, bump up the transmit 1780 * threshold by another 512 bytes (64 * 8). 1781 */ 1782 if (sp->tx_underruns) { 1783 ifp->if_oerrors += le32toh(sp->tx_underruns); 1784 if (tx_threshold < 192) 1785 tx_threshold += 64; 1786 } 1787 1788 /* 1789 * Release any xmit buffers that have completed DMA. This isn't 1790 * strictly necessary to do here, but it's advantagous for mbufs 1791 * with external storage to be released in a timely manner rather 1792 * than being defered for a potentially long time. This limits 1793 * the delay to a maximum of one second. 1794 */ 1795 fxp_txeof(sc); 1796 1797 /* 1798 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, 1799 * then assume the receiver has locked up and attempt to clear 1800 * the condition by reprogramming the multicast filter. This is 1801 * a work-around for a bug in the 82557 where the receiver locks 1802 * up if it gets certain types of garbage in the syncronization 1803 * bits prior to the packet header. This bug is supposed to only 1804 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 1805 * mode as well (perhaps due to a 10/100 speed transition). 1806 */ 1807 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 1808 sc->rx_idle_secs = 0; 1809 fxp_mc_setup(sc); 1810 } 1811 /* 1812 * If there is no pending command, start another stats 1813 * dump. Otherwise punt for now. 1814 */ 1815 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { 1816 /* 1817 * Start another stats dump. 1818 */ 1819 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, 1820 BUS_DMASYNC_PREREAD); 1821 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); 1822 } else { 1823 /* 1824 * A previous command is still waiting to be accepted. 1825 * Just zero our copy of the stats and wait for the 1826 * next timer event to update them. 1827 */ 1828 sp->tx_good = 0; 1829 sp->tx_underruns = 0; 1830 sp->tx_total_collisions = 0; 1831 1832 sp->rx_good = 0; 1833 sp->rx_crc_errors = 0; 1834 sp->rx_alignment_errors = 0; 1835 sp->rx_rnr_errors = 0; 1836 sp->rx_overrun_errors = 0; 1837 } 1838 if (sc->miibus != NULL) 1839 mii_tick(device_get_softc(sc->miibus)); 1840 1841 /* 1842 * Schedule another timeout one second from now. 1843 */ 1844 callout_reset(&sc->stat_ch, hz, fxp_tick, sc); 1845 FXP_UNLOCK(sc); 1846 splx(s); 1847} 1848 1849/* 1850 * Stop the interface. Cancels the statistics updater and resets 1851 * the interface. 1852 */ 1853static void 1854fxp_stop(struct fxp_softc *sc) 1855{ 1856 struct ifnet *ifp = &sc->sc_if; 1857 struct fxp_tx *txp; 1858 int i; 1859 1860 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1861 ifp->if_timer = 0; 1862 1863#ifdef DEVICE_POLLING 1864 ether_poll_deregister(ifp); 1865#endif 1866 /* 1867 * Cancel stats updater. 1868 */ 1869 callout_stop(&sc->stat_ch); 1870 1871 /* 1872 * Issue software reset, which also unloads the microcode. 1873 */ 1874 sc->flags &= ~FXP_FLAG_UCODE; 1875 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); 1876 DELAY(50); 1877 1878 /* 1879 * Release any xmit buffers. 1880 */ 1881 txp = sc->fxp_desc.tx_list; 1882 if (txp != NULL) { 1883 for (i = 0; i < FXP_NTXCB; i++) { 1884 if (txp[i].tx_mbuf != NULL) { 1885 bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map, 1886 BUS_DMASYNC_POSTWRITE); 1887 bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map); 1888 m_freem(txp[i].tx_mbuf); 1889 txp[i].tx_mbuf = NULL; 1890 /* clear this to reset csum offload bits */ 1891 txp[i].tx_cb->tbd[0].tb_addr = 0; 1892 } 1893 } 1894 } 1895 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); 1896 sc->tx_queued = 0; 1897} 1898 1899/* 1900 * Watchdog/transmission transmit timeout handler. Called when a 1901 * transmission is started on the interface, but no interrupt is 1902 * received before the timeout. This usually indicates that the 1903 * card has wedged for some reason. 1904 */ 1905static void 1906fxp_watchdog(struct ifnet *ifp) 1907{ 1908 struct fxp_softc *sc = ifp->if_softc; 1909 1910 FXP_LOCK(sc); 1911 device_printf(sc->dev, "device timeout\n"); 1912 ifp->if_oerrors++; 1913 1914 fxp_init_body(sc); 1915 FXP_UNLOCK(sc); 1916} 1917 1918/* 1919 * Acquire locks and then call the real initialization function. This 1920 * is necessary because ether_ioctl() calls if_init() and this would 1921 * result in mutex recursion if the mutex was held. 1922 */ 1923static void 1924fxp_init(void *xsc) 1925{ 1926 struct fxp_softc *sc = xsc; 1927 1928 FXP_LOCK(sc); 1929 fxp_init_body(sc); 1930 FXP_UNLOCK(sc); 1931} 1932 1933/* 1934 * Perform device initialization. This routine must be called with the 1935 * softc lock held. 1936 */ 1937static void 1938fxp_init_body(struct fxp_softc *sc) 1939{ 1940 struct ifnet *ifp = &sc->sc_if; 1941 struct fxp_cb_config *cbp; 1942 struct fxp_cb_ias *cb_ias; 1943 struct fxp_cb_tx *tcbp; 1944 struct fxp_tx *txp; 1945 struct fxp_cb_mcs *mcsp; 1946 int i, prm, s; 1947 1948 FXP_LOCK_ASSERT(sc, MA_OWNED); 1949 s = splimp(); 1950 /* 1951 * Cancel any pending I/O 1952 */ 1953 fxp_stop(sc); 1954 1955 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1956 1957 /* 1958 * Initialize base of CBL and RFA memory. Loading with zero 1959 * sets it up for regular linear addressing. 1960 */ 1961 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1962 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); 1963 1964 fxp_scb_wait(sc); 1965 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); 1966 1967 /* 1968 * Initialize base of dump-stats buffer. 1969 */ 1970 fxp_scb_wait(sc); 1971 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD); 1972 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr); 1973 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); 1974 1975 /* 1976 * Attempt to load microcode if requested. 1977 */ 1978 if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0) 1979 fxp_load_ucode(sc); 1980 1981 /* 1982 * Initialize the multicast address list. 1983 */ 1984 if (fxp_mc_addrs(sc)) { 1985 mcsp = sc->mcsp; 1986 mcsp->cb_status = 0; 1987 mcsp->cb_command = 1988 htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); 1989 mcsp->link_addr = 0xffffffff; 1990 /* 1991 * Start the multicast setup command. 1992 */ 1993 fxp_scb_wait(sc); 1994 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE); 1995 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); 1996 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 1997 /* ...and wait for it to complete. */ 1998 fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map); 1999 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, 2000 BUS_DMASYNC_POSTWRITE); 2001 } 2002 2003 /* 2004 * We temporarily use memory that contains the TxCB list to 2005 * construct the config CB. The TxCB list memory is rebuilt 2006 * later. 2007 */ 2008 cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list; 2009 2010 /* 2011 * This bcopy is kind of disgusting, but there are a bunch of must be 2012 * zero and must be one bits in this structure and this is the easiest 2013 * way to initialize them all to proper values. 2014 */ 2015 bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template)); 2016 2017 cbp->cb_status = 0; 2018 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | 2019 FXP_CB_COMMAND_EL); 2020 cbp->link_addr = 0xffffffff; /* (no) next command */ 2021 cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22; 2022 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 2023 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 2024 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 2025 cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0; 2026 cbp->type_enable = 0; /* actually reserved */ 2027 cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0; 2028 cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0; 2029 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 2030 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 2031 cbp->dma_mbce = 0; /* (disable) dma max counters */ 2032 cbp->late_scb = 0; /* (don't) defer SCB update */ 2033 cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */ 2034 cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */ 2035 cbp->ci_int = 1; /* interrupt on CU idle */ 2036 cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1; 2037 cbp->ext_stats_dis = 1; /* disable extended counters */ 2038 cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */ 2039 cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm; 2040 cbp->disc_short_rx = !prm; /* discard short packets */ 2041 cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */ 2042 cbp->two_frames = 0; /* do not limit FIFO to 2 frames */ 2043 cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */ 2044 cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; 2045 cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1; 2046 cbp->csma_dis = 0; /* (don't) disable link */ 2047 cbp->tcp_udp_cksum = 0; /* (don't) enable checksum */ 2048 cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */ 2049 cbp->link_wake_en = 0; /* (don't) assert PME# on link change */ 2050 cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */ 2051 cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */ 2052 cbp->nsai = 1; /* (don't) disable source addr insert */ 2053 cbp->preamble_length = 2; /* (7 byte) preamble */ 2054 cbp->loopback = 0; /* (don't) loopback */ 2055 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 2056 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 2057 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 2058 cbp->promiscuous = prm; /* promiscuous mode */ 2059 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 2060 cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/ 2061 cbp->ignore_ul = 0; /* consider U/L bit in IA matching */ 2062 cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */ 2063 cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0; 2064 2065 cbp->stripping = !prm; /* truncate rx packet to byte count */ 2066 cbp->padding = 1; /* (do) pad short tx packets */ 2067 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 2068 cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0; 2069 cbp->ia_wake_en = 0; /* (don't) wake up on address match */ 2070 cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */ 2071 /* must set wake_en in PMCSR also */ 2072 cbp->force_fdx = 0; /* (don't) force full duplex */ 2073 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 2074 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 2075 cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0; 2076 cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; 2077 2078 if (sc->tunable_noflow || sc->revision == FXP_REV_82557) { 2079 /* 2080 * The 82557 has no hardware flow control, the values 2081 * below are the defaults for the chip. 2082 */ 2083 cbp->fc_delay_lsb = 0; 2084 cbp->fc_delay_msb = 0x40; 2085 cbp->pri_fc_thresh = 3; 2086 cbp->tx_fc_dis = 0; 2087 cbp->rx_fc_restop = 0; 2088 cbp->rx_fc_restart = 0; 2089 cbp->fc_filter = 0; 2090 cbp->pri_fc_loc = 1; 2091 } else { 2092 cbp->fc_delay_lsb = 0x1f; 2093 cbp->fc_delay_msb = 0x01; 2094 cbp->pri_fc_thresh = 3; 2095 cbp->tx_fc_dis = 0; /* enable transmit FC */ 2096 cbp->rx_fc_restop = 1; /* enable FC restop frames */ 2097 cbp->rx_fc_restart = 1; /* enable FC restart frames */ 2098 cbp->fc_filter = !prm; /* drop FC frames to host */ 2099 cbp->pri_fc_loc = 1; /* FC pri location (byte31) */ 2100 } 2101 2102 /* 2103 * Start the config command/DMA. 2104 */ 2105 fxp_scb_wait(sc); 2106 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); 2107 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); 2108 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 2109 /* ...and wait for it to complete. */ 2110 fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); 2111 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); 2112 2113 /* 2114 * Now initialize the station address. Temporarily use the TxCB 2115 * memory area like we did above for the config CB. 2116 */ 2117 cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list; 2118 cb_ias->cb_status = 0; 2119 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); 2120 cb_ias->link_addr = 0xffffffff; 2121 bcopy(sc->arpcom.ac_enaddr, cb_ias->macaddr, 2122 sizeof(sc->arpcom.ac_enaddr)); 2123 2124 /* 2125 * Start the IAS (Individual Address Setup) command/DMA. 2126 */ 2127 fxp_scb_wait(sc); 2128 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); 2129 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 2130 /* ...and wait for it to complete. */ 2131 fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map); 2132 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); 2133 2134 /* 2135 * Initialize transmit control block (TxCB) list. 2136 */ 2137 txp = sc->fxp_desc.tx_list; 2138 tcbp = sc->fxp_desc.cbl_list; 2139 bzero(tcbp, FXP_TXCB_SZ); 2140 for (i = 0; i < FXP_NTXCB; i++) { 2141 txp[i].tx_mbuf = NULL; 2142 tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK); 2143 tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); 2144 tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr + 2145 (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx))); 2146 if (sc->flags & FXP_FLAG_EXT_TXCB) 2147 tcbp[i].tbd_array_addr = 2148 htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2])); 2149 else 2150 tcbp[i].tbd_array_addr = 2151 htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0])); 2152 txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK]; 2153 } 2154 /* 2155 * Set the suspend flag on the first TxCB and start the control 2156 * unit. It will execute the NOP and then suspend. 2157 */ 2158 tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); 2159 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); 2160 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; 2161 sc->tx_queued = 1; 2162 2163 fxp_scb_wait(sc); 2164 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 2165 2166 /* 2167 * Initialize receiver buffer area - RFA. 2168 */ 2169 fxp_scb_wait(sc); 2170 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); 2171 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); 2172 2173 /* 2174 * Set current media. 2175 */ 2176 if (sc->miibus != NULL) 2177 mii_mediachg(device_get_softc(sc->miibus)); 2178 2179 ifp->if_flags |= IFF_RUNNING; 2180 ifp->if_flags &= ~IFF_OACTIVE; 2181 2182 /* 2183 * Enable interrupts. 2184 */ 2185#ifdef DEVICE_POLLING 2186 /* 2187 * ... but only do that if we are not polling. And because (presumably) 2188 * the default is interrupts on, we need to disable them explicitly! 2189 */ 2190 if ( ifp->if_flags & IFF_POLLING ) 2191 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); 2192 else 2193#endif /* DEVICE_POLLING */ 2194 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); 2195 2196 /* 2197 * Start stats updater. 2198 */ 2199 callout_reset(&sc->stat_ch, hz, fxp_tick, sc); 2200 splx(s); 2201} 2202 2203static int 2204fxp_serial_ifmedia_upd(struct ifnet *ifp) 2205{ 2206 2207 return (0); 2208} 2209 2210static void 2211fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2212{ 2213 2214 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2215} 2216 2217/* 2218 * Change media according to request. 2219 */ 2220static int 2221fxp_ifmedia_upd(struct ifnet *ifp) 2222{ 2223 struct fxp_softc *sc = ifp->if_softc; 2224 struct mii_data *mii; 2225 2226 mii = device_get_softc(sc->miibus); 2227 mii_mediachg(mii); 2228 return (0); 2229} 2230 2231/* 2232 * Notify the world which media we're using. 2233 */ 2234static void 2235fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2236{ 2237 struct fxp_softc *sc = ifp->if_softc; 2238 struct mii_data *mii; 2239 2240 mii = device_get_softc(sc->miibus); 2241 mii_pollstat(mii); 2242 ifmr->ifm_active = mii->mii_media_active; 2243 ifmr->ifm_status = mii->mii_media_status; 2244 2245 if (ifmr->ifm_status & IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG) 2246 sc->cu_resume_bug = 1; 2247 else 2248 sc->cu_resume_bug = 0; 2249} 2250 2251/* 2252 * Add a buffer to the end of the RFA buffer list. 2253 * Return 0 if successful, 1 for failure. A failure results in 2254 * adding the 'oldm' (if non-NULL) on to the end of the list - 2255 * tossing out its old contents and recycling it. 2256 * The RFA struct is stuck at the beginning of mbuf cluster and the 2257 * data pointer is fixed up to point just past it. 2258 */ 2259static int 2260fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) 2261{ 2262 struct mbuf *m; 2263 struct fxp_rfa *rfa, *p_rfa; 2264 struct fxp_rx *p_rx; 2265 bus_dmamap_t tmp_map; 2266 int error; 2267 2268 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2269 if (m == NULL) 2270 return (ENOBUFS); 2271 2272 /* 2273 * Move the data pointer up so that the incoming data packet 2274 * will be 32-bit aligned. 2275 */ 2276 m->m_data += RFA_ALIGNMENT_FUDGE; 2277 2278 /* 2279 * Get a pointer to the base of the mbuf cluster and move 2280 * data start past it. 2281 */ 2282 rfa = mtod(m, struct fxp_rfa *); 2283 m->m_data += sc->rfa_size; 2284 rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE); 2285 2286 rfa->rfa_status = 0; 2287 rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL); 2288 rfa->actual_size = 0; 2289 2290 /* 2291 * Initialize the rest of the RFA. Note that since the RFA 2292 * is misaligned, we cannot store values directly. We're thus 2293 * using the le32enc() function which handles endianness and 2294 * is also alignment-safe. 2295 */ 2296 le32enc(&rfa->link_addr, 0xffffffff); 2297 le32enc(&rfa->rbd_addr, 0xffffffff); 2298 2299 /* Map the RFA into DMA memory. */ 2300 error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa, 2301 MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr, 2302 &rxp->rx_addr, 0); 2303 if (error) { 2304 m_freem(m); 2305 return (error); 2306 } 2307 2308 bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map); 2309 tmp_map = sc->spare_map; 2310 sc->spare_map = rxp->rx_map; 2311 rxp->rx_map = tmp_map; 2312 rxp->rx_mbuf = m; 2313 2314 bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, 2315 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2316 2317 /* 2318 * If there are other buffers already on the list, attach this 2319 * one to the end by fixing up the tail to point to this one. 2320 */ 2321 if (sc->fxp_desc.rx_head != NULL) { 2322 p_rx = sc->fxp_desc.rx_tail; 2323 p_rfa = (struct fxp_rfa *) 2324 (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); 2325 p_rx->rx_next = rxp; 2326 le32enc(&p_rfa->link_addr, rxp->rx_addr); 2327 p_rfa->rfa_control = 0; 2328 bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map, 2329 BUS_DMASYNC_PREWRITE); 2330 } else { 2331 rxp->rx_next = NULL; 2332 sc->fxp_desc.rx_head = rxp; 2333 } 2334 sc->fxp_desc.rx_tail = rxp; 2335 return (0); 2336} 2337 2338static volatile int 2339fxp_miibus_readreg(device_t dev, int phy, int reg) 2340{ 2341 struct fxp_softc *sc = device_get_softc(dev); 2342 int count = 10000; 2343 int value; 2344 2345 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 2346 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 2347 2348 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 2349 && count--) 2350 DELAY(10); 2351 2352 if (count <= 0) 2353 device_printf(dev, "fxp_miibus_readreg: timed out\n"); 2354 2355 return (value & 0xffff); 2356} 2357 2358static void 2359fxp_miibus_writereg(device_t dev, int phy, int reg, int value) 2360{ 2361 struct fxp_softc *sc = device_get_softc(dev); 2362 int count = 10000; 2363 2364 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 2365 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 2366 (value & 0xffff)); 2367 2368 while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 2369 count--) 2370 DELAY(10); 2371 2372 if (count <= 0) 2373 device_printf(dev, "fxp_miibus_writereg: timed out\n"); 2374} 2375 2376static int 2377fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2378{ 2379 struct fxp_softc *sc = ifp->if_softc; 2380 struct ifreq *ifr = (struct ifreq *)data; 2381 struct mii_data *mii; 2382 int flag, mask, s, error = 0; 2383 2384 /* 2385 * Detaching causes us to call ioctl with the mutex owned. Preclude 2386 * that by saying we're busy if the lock is already held. 2387 */ 2388 if (FXP_LOCKED(sc)) 2389 return (EBUSY); 2390 2391 FXP_LOCK(sc); 2392 s = splimp(); 2393 2394 switch (command) { 2395 case SIOCSIFFLAGS: 2396 if (ifp->if_flags & IFF_ALLMULTI) 2397 sc->flags |= FXP_FLAG_ALL_MCAST; 2398 else 2399 sc->flags &= ~FXP_FLAG_ALL_MCAST; 2400 2401 /* 2402 * If interface is marked up and not running, then start it. 2403 * If it is marked down and running, stop it. 2404 * XXX If it's up then re-initialize it. This is so flags 2405 * such as IFF_PROMISC are handled. 2406 */ 2407 if (ifp->if_flags & IFF_UP) { 2408 fxp_init_body(sc); 2409 } else { 2410 if (ifp->if_flags & IFF_RUNNING) 2411 fxp_stop(sc); 2412 } 2413 break; 2414 2415 case SIOCADDMULTI: 2416 case SIOCDELMULTI: 2417 if (ifp->if_flags & IFF_ALLMULTI) 2418 sc->flags |= FXP_FLAG_ALL_MCAST; 2419 else 2420 sc->flags &= ~FXP_FLAG_ALL_MCAST; 2421 /* 2422 * Multicast list has changed; set the hardware filter 2423 * accordingly. 2424 */ 2425 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) 2426 fxp_mc_setup(sc); 2427 /* 2428 * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it 2429 * again rather than else {}. 2430 */ 2431 if (sc->flags & FXP_FLAG_ALL_MCAST) 2432 fxp_init_body(sc); 2433 error = 0; 2434 break; 2435 2436 case SIOCSIFMEDIA: 2437 case SIOCGIFMEDIA: 2438 if (sc->miibus != NULL) { 2439 mii = device_get_softc(sc->miibus); 2440 error = ifmedia_ioctl(ifp, ifr, 2441 &mii->mii_media, command); 2442 } else { 2443 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); 2444 } 2445 break; 2446 2447 case SIOCSIFCAP: 2448 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 2449 if (mask & IFCAP_POLLING) 2450 ifp->if_capenable ^= IFCAP_POLLING; 2451 if (mask & IFCAP_VLAN_MTU) { 2452 ifp->if_capenable ^= IFCAP_VLAN_MTU; 2453 if (sc->revision != FXP_REV_82557) 2454 flag = FXP_FLAG_LONG_PKT_EN; 2455 else /* a hack to get long frames on the old chip */ 2456 flag = FXP_FLAG_SAVE_BAD; 2457 sc->flags ^= flag; 2458 if (ifp->if_flags & IFF_UP) 2459 fxp_init_body(sc); 2460 } 2461 break; 2462 2463 default: 2464 /* 2465 * ether_ioctl() will eventually call fxp_start() which 2466 * will result in mutex recursion so drop it first. 2467 */ 2468 FXP_UNLOCK(sc); 2469 error = ether_ioctl(ifp, command, data); 2470 } 2471 if (FXP_LOCKED(sc)) 2472 FXP_UNLOCK(sc); 2473 splx(s); 2474 return (error); 2475} 2476 2477/* 2478 * Fill in the multicast address list and return number of entries. 2479 */ 2480static int 2481fxp_mc_addrs(struct fxp_softc *sc) 2482{ 2483 struct fxp_cb_mcs *mcsp = sc->mcsp; 2484 struct ifnet *ifp = &sc->sc_if; 2485 struct ifmultiaddr *ifma; 2486 int nmcasts; 2487 2488 nmcasts = 0; 2489 if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) { 2490#if __FreeBSD_version < 500000 2491 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2492#else 2493 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2494#endif 2495 if (ifma->ifma_addr->sa_family != AF_LINK) 2496 continue; 2497 if (nmcasts >= MAXMCADDR) { 2498 sc->flags |= FXP_FLAG_ALL_MCAST; 2499 nmcasts = 0; 2500 break; 2501 } 2502 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2503 &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); 2504 nmcasts++; 2505 } 2506 } 2507 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); 2508 return (nmcasts); 2509} 2510 2511/* 2512 * Program the multicast filter. 2513 * 2514 * We have an artificial restriction that the multicast setup command 2515 * must be the first command in the chain, so we take steps to ensure 2516 * this. By requiring this, it allows us to keep up the performance of 2517 * the pre-initialized command ring (esp. link pointers) by not actually 2518 * inserting the mcsetup command in the ring - i.e. its link pointer 2519 * points to the TxCB ring, but the mcsetup descriptor itself is not part 2520 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 2521 * lead into the regular TxCB ring when it completes. 2522 * 2523 * This function must be called at splimp. 2524 */ 2525static void 2526fxp_mc_setup(struct fxp_softc *sc) 2527{ 2528 struct fxp_cb_mcs *mcsp = sc->mcsp; 2529 struct ifnet *ifp = &sc->sc_if; 2530 struct fxp_tx *txp; 2531 int count; 2532 2533 FXP_LOCK_ASSERT(sc, MA_OWNED); 2534 /* 2535 * If there are queued commands, we must wait until they are all 2536 * completed. If we are already waiting, then add a NOP command 2537 * with interrupt option so that we're notified when all commands 2538 * have been completed - fxp_start() ensures that no additional 2539 * TX commands will be added when need_mcsetup is true. 2540 */ 2541 if (sc->tx_queued) { 2542 /* 2543 * need_mcsetup will be true if we are already waiting for the 2544 * NOP command to be completed (see below). In this case, bail. 2545 */ 2546 if (sc->need_mcsetup) 2547 return; 2548 sc->need_mcsetup = 1; 2549 2550 /* 2551 * Add a NOP command with interrupt so that we are notified 2552 * when all TX commands have been processed. 2553 */ 2554 txp = sc->fxp_desc.tx_last->tx_next; 2555 txp->tx_mbuf = NULL; 2556 txp->tx_cb->cb_status = 0; 2557 txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP | 2558 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); 2559 /* 2560 * Advance the end of list forward. 2561 */ 2562 sc->fxp_desc.tx_last->tx_cb->cb_command &= 2563 htole16(~FXP_CB_COMMAND_S); 2564 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); 2565 sc->fxp_desc.tx_last = txp; 2566 sc->tx_queued++; 2567 /* 2568 * Issue a resume in case the CU has just suspended. 2569 */ 2570 fxp_scb_wait(sc); 2571 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); 2572 /* 2573 * Set a 5 second timer just in case we don't hear from the 2574 * card again. 2575 */ 2576 ifp->if_timer = 5; 2577 2578 return; 2579 } 2580 sc->need_mcsetup = 0; 2581 2582 /* 2583 * Initialize multicast setup descriptor. 2584 */ 2585 mcsp->cb_status = 0; 2586 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | 2587 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); 2588 mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr); 2589 txp = &sc->fxp_desc.mcs_tx; 2590 txp->tx_mbuf = NULL; 2591 txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp; 2592 txp->tx_next = sc->fxp_desc.tx_list; 2593 (void) fxp_mc_addrs(sc); 2594 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; 2595 sc->tx_queued = 1; 2596 2597 /* 2598 * Wait until command unit is not active. This should never 2599 * be the case when nothing is queued, but make sure anyway. 2600 */ 2601 count = 100; 2602 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == 2603 FXP_SCB_CUS_ACTIVE && --count) 2604 DELAY(10); 2605 if (count == 0) { 2606 device_printf(sc->dev, "command queue timeout\n"); 2607 return; 2608 } 2609 2610 /* 2611 * Start the multicast setup command. 2612 */ 2613 fxp_scb_wait(sc); 2614 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE); 2615 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); 2616 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 2617 2618 ifp->if_timer = 2; 2619 return; 2620} 2621 2622static u_int32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE; 2623static u_int32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE; 2624static u_int32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE; 2625static u_int32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE; 2626static u_int32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE; 2627static u_int32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE; 2628 2629#define UCODE(x) x, sizeof(x)/sizeof(u_int32_t) 2630 2631struct ucode { 2632 u_int32_t revision; 2633 u_int32_t *ucode; 2634 int length; 2635 u_short int_delay_offset; 2636 u_short bundle_max_offset; 2637} ucode_table[] = { 2638 { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 }, 2639 { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 }, 2640 { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma), 2641 D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD }, 2642 { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s), 2643 D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD }, 2644 { FXP_REV_82550, UCODE(fxp_ucode_d102), 2645 D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD }, 2646 { FXP_REV_82550_C, UCODE(fxp_ucode_d102c), 2647 D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD }, 2648 { 0, NULL, 0, 0, 0 } 2649}; 2650 2651static void 2652fxp_load_ucode(struct fxp_softc *sc) 2653{ 2654 struct ucode *uc; 2655 struct fxp_cb_ucode *cbp; 2656 int i; 2657 2658 for (uc = ucode_table; uc->ucode != NULL; uc++) 2659 if (sc->revision == uc->revision) 2660 break; 2661 if (uc->ucode == NULL) 2662 return; 2663 cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list; 2664 cbp->cb_status = 0; 2665 cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL); 2666 cbp->link_addr = 0xffffffff; /* (no) next command */ 2667 for (i = 0; i < uc->length; i++) 2668 cbp->ucode[i] = htole32(uc->ucode[i]); 2669 if (uc->int_delay_offset) 2670 *(u_int16_t *)&cbp->ucode[uc->int_delay_offset] = 2671 htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2); 2672 if (uc->bundle_max_offset) 2673 *(u_int16_t *)&cbp->ucode[uc->bundle_max_offset] = 2674 htole16(sc->tunable_bundle_max); 2675 /* 2676 * Download the ucode to the chip. 2677 */ 2678 fxp_scb_wait(sc); 2679 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); 2680 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); 2681 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 2682 /* ...and wait for it to complete. */ 2683 fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); 2684 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); 2685 device_printf(sc->dev, 2686 "Microcode loaded, int_delay: %d usec bundle_max: %d\n", 2687 sc->tunable_int_delay, 2688 uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max); 2689 sc->flags |= FXP_FLAG_UCODE; 2690} 2691 2692static int 2693sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2694{ 2695 int error, value; 2696 2697 value = *(int *)arg1; 2698 error = sysctl_handle_int(oidp, &value, 0, req); 2699 if (error || !req->newptr) 2700 return (error); 2701 if (value < low || value > high) 2702 return (EINVAL); 2703 *(int *)arg1 = value; 2704 return (0); 2705} 2706 2707/* 2708 * Interrupt delay is expressed in microseconds, a multiplier is used 2709 * to convert this to the appropriate clock ticks before using. 2710 */ 2711static int 2712sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS) 2713{ 2714 return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000)); 2715} 2716 2717static int 2718sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS) 2719{ 2720 return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff)); 2721} 2722