if_fxp.c revision 214302
155825Speter/*- 278388Sbenno * Copyright (c) 1995, David Greenman 378388Sbenno * Copyright (c) 2001 Jonathan Lemon <jlemon@freebsd.org> 478388Sbenno * All rights reserved. 537684Sdfr * 655825Speter * Redistribution and use in source and binary forms, with or without 778388Sbenno * modification, are permitted provided that the following conditions 837684Sdfr * are met: 937684Sdfr * 1. Redistributions of source code must retain the above copyright 1037684Sdfr * notice unmodified, this list of conditions, and the following 11176617Smarcel * disclaimer. 12176617Smarcel * 2. Redistributions in binary form must reproduce the above copyright 13176617Smarcel * notice, this list of conditions and the following disclaimer in the 14176617Smarcel * documentation and/or other materials provided with the distribution. 15176617Smarcel * 16176617Smarcel * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17176617Smarcel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18176617Smarcel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19176617Smarcel * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20176617Smarcel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21176617Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22176617Smarcel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23176617Smarcel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24176617Smarcel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2537684Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2637684Sdfr * SUCH DAMAGE. 2737684Sdfr * 2837684Sdfr */ 2937684Sdfr 3037684Sdfr#include <sys/cdefs.h> 3137684Sdfr__FBSDID("$FreeBSD: head/sys/dev/fxp/if_fxp.c 214302 2010-10-24 21:59:51Z yongari $"); 3237684Sdfr 3337684Sdfr/* 3437684Sdfr * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 3537684Sdfr */ 3637684Sdfr 3737684Sdfr#ifdef HAVE_KERNEL_OPTION_HEADERS 3837684Sdfr#include "opt_device_polling.h" 3978388Sbenno#endif 4078388Sbenno 4137684Sdfr#include <sys/param.h> 4237684Sdfr#include <sys/systm.h> 4337684Sdfr#include <sys/bus.h> 4437684Sdfr#include <sys/endian.h> 4537684Sdfr#include <sys/kernel.h> 4637684Sdfr#include <sys/mbuf.h> 4778388Sbenno#include <sys/lock.h> 4878388Sbenno#include <sys/module.h> 4978388Sbenno#include <sys/mutex.h> 5078388Sbenno#include <sys/rman.h> 51176617Smarcel#include <sys/socket.h> 5278388Sbenno#include <sys/sockio.h> 5378388Sbenno#include <sys/sysctl.h> 5437684Sdfr 5537684Sdfr#include <net/bpf.h> 5678388Sbenno#include <net/ethernet.h> 5778388Sbenno#include <net/if.h> 5884665Smp#include <net/if_arp.h> 5984665Smp#include <net/if_dl.h> 6037684Sdfr#include <net/if_media.h> 6137684Sdfr#include <net/if_types.h> 6237684Sdfr#include <net/if_vlan_var.h> 6337684Sdfr 6437684Sdfr#include <netinet/in.h> 6537684Sdfr#include <netinet/in_systm.h> 6637684Sdfr#include <netinet/ip.h> 6778388Sbenno#include <netinet/tcp.h> 6878388Sbenno#include <netinet/udp.h> 6978388Sbenno 7078388Sbenno#include <machine/bus.h> 7178388Sbenno#include <machine/in_cksum.h> 7278388Sbenno#include <machine/resource.h> 7378388Sbenno 7478388Sbenno#include <dev/pci/pcivar.h> 7578388Sbenno#include <dev/pci/pcireg.h> /* for PCIM_CMD_xxx */ 7678388Sbenno 7778388Sbenno#include <dev/mii/mii.h> 7878388Sbenno#include <dev/mii/miivar.h> 7978388Sbenno 8078388Sbenno#include <dev/fxp/if_fxpreg.h> 8178388Sbenno#include <dev/fxp/if_fxpvar.h> 8278388Sbenno#include <dev/fxp/rcvbundl.h> 8378388Sbenno 8478388SbennoMODULE_DEPEND(fxp, pci, 1, 1, 1); 8578388SbennoMODULE_DEPEND(fxp, ether, 1, 1, 1); 8678388SbennoMODULE_DEPEND(fxp, miibus, 1, 1, 1); 8778388Sbenno#include "miibus_if.h" 8837684Sdfr 8937684Sdfr/* 9037684Sdfr * NOTE! On the Alpha, we have an alignment constraint. The 9137684Sdfr * card DMAs the packet immediately following the RFA. However, 9237684Sdfr * the first thing in the packet is a 14-byte Ethernet header. 9337684Sdfr * This means that the packet is misaligned. To compensate, 9478388Sbenno * we actually offset the RFA 2 bytes into the cluster. This 9578388Sbenno * alignes the packet after the Ethernet header at a 32-bit 9678388Sbenno * boundary. HOWEVER! This means that the RFA is misaligned! 9778388Sbenno */ 9878388Sbenno#define RFA_ALIGNMENT_FUDGE 2 9978388Sbenno 10078388Sbenno/* 10178388Sbenno * Set initial transmit threshold at 64 (512 bytes). This is 10278388Sbenno * increased by 64 (512 bytes) at a time, to maximum of 192 10337684Sdfr * (1536 bytes), if an underrun occurs. 10437684Sdfr */ 10578388Sbennostatic int tx_threshold = 64; 10637684Sdfr 10737684Sdfr/* 10837684Sdfr * The configuration byte map has several undefined fields which 10937684Sdfr * must be one or must be zero. Set up a template for these bits. 11037684Sdfr * The actual configuration is performed in fxp_init. 11137684Sdfr * 11237684Sdfr * See struct fxp_cb_config for the bit definitions. 11337684Sdfr */ 11437684Sdfrstatic u_char fxp_cb_config_template[] = { 11537684Sdfr 0x0, 0x0, /* cb_status */ 11637684Sdfr 0x0, 0x0, /* cb_command */ 11737684Sdfr 0x0, 0x0, 0x0, 0x0, /* link_addr */ 11837684Sdfr 0x0, /* 0 */ 11937684Sdfr 0x0, /* 1 */ 12037684Sdfr 0x0, /* 2 */ 12137684Sdfr 0x0, /* 3 */ 12237684Sdfr 0x0, /* 4 */ 12337684Sdfr 0x0, /* 5 */ 12437684Sdfr 0x32, /* 6 */ 12537684Sdfr 0x0, /* 7 */ 12637684Sdfr 0x0, /* 8 */ 12737684Sdfr 0x0, /* 9 */ 12837684Sdfr 0x6, /* 10 */ 12937684Sdfr 0x0, /* 11 */ 13037684Sdfr 0x0, /* 12 */ 13137684Sdfr 0x0, /* 13 */ 13237684Sdfr 0xf2, /* 14 */ 13337684Sdfr 0x48, /* 15 */ 13437684Sdfr 0x0, /* 16 */ 13537684Sdfr 0x40, /* 17 */ 13637684Sdfr 0xf0, /* 18 */ 13737684Sdfr 0x0, /* 19 */ 13837684Sdfr 0x3f, /* 20 */ 13937684Sdfr 0x5, /* 21 */ 14037684Sdfr 0x0, /* 22 */ 14137684Sdfr 0x0, /* 23 */ 14237684Sdfr 0x0, /* 24 */ 143 0x0, /* 25 */ 144 0x0, /* 26 */ 145 0x0, /* 27 */ 146 0x0, /* 28 */ 147 0x0, /* 29 */ 148 0x0, /* 30 */ 149 0x0 /* 31 */ 150}; 151 152/* 153 * Claim various Intel PCI device identifiers for this driver. The 154 * sub-vendor and sub-device field are extensively used to identify 155 * particular variants, but we don't currently differentiate between 156 * them. 157 */ 158static struct fxp_ident fxp_ident_table[] = { 159 { 0x1029, -1, 0, "Intel 82559 PCI/CardBus Pro/100" }, 160 { 0x1030, -1, 0, "Intel 82559 Pro/100 Ethernet" }, 161 { 0x1031, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, 162 { 0x1032, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, 163 { 0x1033, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, 164 { 0x1034, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, 165 { 0x1035, -1, 3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, 166 { 0x1036, -1, 3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, 167 { 0x1037, -1, 3, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, 168 { 0x1038, -1, 3, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, 169 { 0x1039, -1, 4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, 170 { 0x103A, -1, 4, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, 171 { 0x103B, -1, 4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, 172 { 0x103C, -1, 4, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, 173 { 0x103D, -1, 4, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, 174 { 0x103E, -1, 4, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, 175 { 0x1050, -1, 5, "Intel 82801BA (D865) Pro/100 VE Ethernet" }, 176 { 0x1051, -1, 5, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" }, 177 { 0x1059, -1, 0, "Intel 82551QM Pro/100 M Mobile Connection" }, 178 { 0x1064, -1, 6, "Intel 82562EZ (ICH6)" }, 179 { 0x1065, -1, 6, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" }, 180 { 0x1068, -1, 6, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" }, 181 { 0x1069, -1, 6, "Intel 82562EM/EX/GX Pro/100 Ethernet" }, 182 { 0x1091, -1, 7, "Intel 82562GX Pro/100 Ethernet" }, 183 { 0x1092, -1, 7, "Intel Pro/100 VE Network Connection" }, 184 { 0x1093, -1, 7, "Intel Pro/100 VM Network Connection" }, 185 { 0x1094, -1, 7, "Intel Pro/100 946GZ (ICH7) Network Connection" }, 186 { 0x1209, -1, 0, "Intel 82559ER Embedded 10/100 Ethernet" }, 187 { 0x1229, 0x01, 0, "Intel 82557 Pro/100 Ethernet" }, 188 { 0x1229, 0x02, 0, "Intel 82557 Pro/100 Ethernet" }, 189 { 0x1229, 0x03, 0, "Intel 82557 Pro/100 Ethernet" }, 190 { 0x1229, 0x04, 0, "Intel 82558 Pro/100 Ethernet" }, 191 { 0x1229, 0x05, 0, "Intel 82558 Pro/100 Ethernet" }, 192 { 0x1229, 0x06, 0, "Intel 82559 Pro/100 Ethernet" }, 193 { 0x1229, 0x07, 0, "Intel 82559 Pro/100 Ethernet" }, 194 { 0x1229, 0x08, 0, "Intel 82559 Pro/100 Ethernet" }, 195 { 0x1229, 0x09, 0, "Intel 82559ER Pro/100 Ethernet" }, 196 { 0x1229, 0x0c, 0, "Intel 82550 Pro/100 Ethernet" }, 197 { 0x1229, 0x0d, 0, "Intel 82550 Pro/100 Ethernet" }, 198 { 0x1229, 0x0e, 0, "Intel 82550 Pro/100 Ethernet" }, 199 { 0x1229, 0x0f, 0, "Intel 82551 Pro/100 Ethernet" }, 200 { 0x1229, 0x10, 0, "Intel 82551 Pro/100 Ethernet" }, 201 { 0x1229, -1, 0, "Intel 82557/8/9 Pro/100 Ethernet" }, 202 { 0x2449, -1, 2, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" }, 203 { 0x27dc, -1, 7, "Intel 82801GB (ICH7) 10/100 Ethernet" }, 204 { 0, -1, 0, NULL }, 205}; 206 207#ifdef FXP_IP_CSUM_WAR 208#define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 209#else 210#define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) 211#endif 212 213static int fxp_probe(device_t dev); 214static int fxp_attach(device_t dev); 215static int fxp_detach(device_t dev); 216static int fxp_shutdown(device_t dev); 217static int fxp_suspend(device_t dev); 218static int fxp_resume(device_t dev); 219 220static struct fxp_ident *fxp_find_ident(device_t dev); 221static void fxp_intr(void *xsc); 222static void fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp, 223 struct mbuf *m, uint16_t status, int pos); 224static int fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, 225 uint8_t statack, int count); 226static void fxp_init(void *xsc); 227static void fxp_init_body(struct fxp_softc *sc); 228static void fxp_tick(void *xsc); 229static void fxp_start(struct ifnet *ifp); 230static void fxp_start_body(struct ifnet *ifp); 231static int fxp_encap(struct fxp_softc *sc, struct mbuf **m_head); 232static void fxp_txeof(struct fxp_softc *sc); 233static void fxp_stop(struct fxp_softc *sc); 234static void fxp_release(struct fxp_softc *sc); 235static int fxp_ioctl(struct ifnet *ifp, u_long command, 236 caddr_t data); 237static void fxp_watchdog(struct fxp_softc *sc); 238static void fxp_add_rfabuf(struct fxp_softc *sc, 239 struct fxp_rx *rxp); 240static void fxp_discard_rfabuf(struct fxp_softc *sc, 241 struct fxp_rx *rxp); 242static int fxp_new_rfabuf(struct fxp_softc *sc, 243 struct fxp_rx *rxp); 244static int fxp_mc_addrs(struct fxp_softc *sc); 245static void fxp_mc_setup(struct fxp_softc *sc); 246static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, 247 int autosize); 248static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, 249 uint16_t data); 250static void fxp_autosize_eeprom(struct fxp_softc *sc); 251static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, 252 int offset, int words); 253static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, 254 int offset, int words); 255static int fxp_ifmedia_upd(struct ifnet *ifp); 256static void fxp_ifmedia_sts(struct ifnet *ifp, 257 struct ifmediareq *ifmr); 258static int fxp_serial_ifmedia_upd(struct ifnet *ifp); 259static void fxp_serial_ifmedia_sts(struct ifnet *ifp, 260 struct ifmediareq *ifmr); 261static int fxp_miibus_readreg(device_t dev, int phy, int reg); 262static int fxp_miibus_writereg(device_t dev, int phy, int reg, 263 int value); 264static void fxp_load_ucode(struct fxp_softc *sc); 265static void fxp_update_stats(struct fxp_softc *sc); 266static void fxp_sysctl_node(struct fxp_softc *sc); 267static int sysctl_int_range(SYSCTL_HANDLER_ARGS, 268 int low, int high); 269static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS); 270static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS); 271static void fxp_scb_wait(struct fxp_softc *sc); 272static void fxp_scb_cmd(struct fxp_softc *sc, int cmd); 273static void fxp_dma_wait(struct fxp_softc *sc, 274 volatile uint16_t *status, bus_dma_tag_t dmat, 275 bus_dmamap_t map); 276 277static device_method_t fxp_methods[] = { 278 /* Device interface */ 279 DEVMETHOD(device_probe, fxp_probe), 280 DEVMETHOD(device_attach, fxp_attach), 281 DEVMETHOD(device_detach, fxp_detach), 282 DEVMETHOD(device_shutdown, fxp_shutdown), 283 DEVMETHOD(device_suspend, fxp_suspend), 284 DEVMETHOD(device_resume, fxp_resume), 285 286 /* MII interface */ 287 DEVMETHOD(miibus_readreg, fxp_miibus_readreg), 288 DEVMETHOD(miibus_writereg, fxp_miibus_writereg), 289 290 { 0, 0 } 291}; 292 293static driver_t fxp_driver = { 294 "fxp", 295 fxp_methods, 296 sizeof(struct fxp_softc), 297}; 298 299static devclass_t fxp_devclass; 300 301DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0); 302DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0); 303 304static struct resource_spec fxp_res_spec_mem[] = { 305 { SYS_RES_MEMORY, FXP_PCI_MMBA, RF_ACTIVE }, 306 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 307 { -1, 0 } 308}; 309 310static struct resource_spec fxp_res_spec_io[] = { 311 { SYS_RES_IOPORT, FXP_PCI_IOBA, RF_ACTIVE }, 312 { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, 313 { -1, 0 } 314}; 315 316/* 317 * Wait for the previous command to be accepted (but not necessarily 318 * completed). 319 */ 320static void 321fxp_scb_wait(struct fxp_softc *sc) 322{ 323 union { 324 uint16_t w; 325 uint8_t b[2]; 326 } flowctl; 327 int i = 10000; 328 329 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) 330 DELAY(2); 331 if (i == 0) { 332 flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL); 333 flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL + 1); 334 device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n", 335 CSR_READ_1(sc, FXP_CSR_SCB_COMMAND), 336 CSR_READ_1(sc, FXP_CSR_SCB_STATACK), 337 CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w); 338 } 339} 340 341static void 342fxp_scb_cmd(struct fxp_softc *sc, int cmd) 343{ 344 345 if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) { 346 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP); 347 fxp_scb_wait(sc); 348 } 349 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd); 350} 351 352static void 353fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status, 354 bus_dma_tag_t dmat, bus_dmamap_t map) 355{ 356 int i; 357 358 for (i = 10000; i > 0; i--) { 359 DELAY(2); 360 bus_dmamap_sync(dmat, map, 361 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 362 if ((le16toh(*status) & FXP_CB_STATUS_C) != 0) 363 break; 364 } 365 if (i == 0) 366 device_printf(sc->dev, "DMA timeout\n"); 367} 368 369static struct fxp_ident * 370fxp_find_ident(device_t dev) 371{ 372 uint16_t devid; 373 uint8_t revid; 374 struct fxp_ident *ident; 375 376 if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { 377 devid = pci_get_device(dev); 378 revid = pci_get_revid(dev); 379 for (ident = fxp_ident_table; ident->name != NULL; ident++) { 380 if (ident->devid == devid && 381 (ident->revid == revid || ident->revid == -1)) { 382 return (ident); 383 } 384 } 385 } 386 return (NULL); 387} 388 389/* 390 * Return identification string if this device is ours. 391 */ 392static int 393fxp_probe(device_t dev) 394{ 395 struct fxp_ident *ident; 396 397 ident = fxp_find_ident(dev); 398 if (ident != NULL) { 399 device_set_desc(dev, ident->name); 400 return (BUS_PROBE_DEFAULT); 401 } 402 return (ENXIO); 403} 404 405static void 406fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 407{ 408 uint32_t *addr; 409 410 if (error) 411 return; 412 413 KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); 414 addr = arg; 415 *addr = segs->ds_addr; 416} 417 418static int 419fxp_attach(device_t dev) 420{ 421 struct fxp_softc *sc; 422 struct fxp_cb_tx *tcbp; 423 struct fxp_tx *txp; 424 struct fxp_rx *rxp; 425 struct ifnet *ifp; 426 uint32_t val; 427 uint16_t data, myea[ETHER_ADDR_LEN / 2]; 428 u_char eaddr[ETHER_ADDR_LEN]; 429 int i, pmc, prefer_iomap; 430 int error; 431 432 error = 0; 433 sc = device_get_softc(dev); 434 sc->dev = dev; 435 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 436 MTX_DEF); 437 callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0); 438 ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd, 439 fxp_serial_ifmedia_sts); 440 441 ifp = sc->ifp = if_alloc(IFT_ETHER); 442 if (ifp == NULL) { 443 device_printf(dev, "can not if_alloc()\n"); 444 error = ENOSPC; 445 goto fail; 446 } 447 448 /* 449 * Enable bus mastering. 450 */ 451 pci_enable_busmaster(dev); 452 val = pci_read_config(dev, PCIR_COMMAND, 2); 453 454 /* 455 * Figure out which we should try first - memory mapping or i/o mapping? 456 * We default to memory mapping. Then we accept an override from the 457 * command line. Then we check to see which one is enabled. 458 */ 459 prefer_iomap = 0; 460 resource_int_value(device_get_name(dev), device_get_unit(dev), 461 "prefer_iomap", &prefer_iomap); 462 if (prefer_iomap) 463 sc->fxp_spec = fxp_res_spec_io; 464 else 465 sc->fxp_spec = fxp_res_spec_mem; 466 467 error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res); 468 if (error) { 469 if (sc->fxp_spec == fxp_res_spec_mem) 470 sc->fxp_spec = fxp_res_spec_io; 471 else 472 sc->fxp_spec = fxp_res_spec_mem; 473 error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res); 474 } 475 if (error) { 476 device_printf(dev, "could not allocate resources\n"); 477 error = ENXIO; 478 goto fail; 479 } 480 481 if (bootverbose) { 482 device_printf(dev, "using %s space register mapping\n", 483 sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O"); 484 } 485 486 /* 487 * Put CU/RU idle state and prepare full reset. 488 */ 489 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 490 DELAY(10); 491 /* Full reset and disable interrupts. */ 492 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); 493 DELAY(10); 494 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); 495 496 /* 497 * Find out how large of an SEEPROM we have. 498 */ 499 fxp_autosize_eeprom(sc); 500 501 /* 502 * Find out the chip revision; lump all 82557 revs together. 503 */ 504 sc->ident = fxp_find_ident(dev); 505 if (sc->ident->ich > 0) { 506 /* Assume ICH controllers are 82559. */ 507 sc->revision = FXP_REV_82559_A0; 508 } else { 509 fxp_read_eeprom(sc, &data, 5, 1); 510 if ((data >> 8) == 1) 511 sc->revision = FXP_REV_82557; 512 else 513 sc->revision = pci_get_revid(dev); 514 } 515 516 /* 517 * Check availability of WOL. 82559ER does not support WOL. 518 */ 519 if (sc->revision >= FXP_REV_82558_A4 && 520 sc->revision != FXP_REV_82559S_A) { 521 fxp_read_eeprom(sc, &data, 10, 1); 522 if ((data & 0x20) != 0 && 523 pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0) 524 sc->flags |= FXP_FLAG_WOLCAP; 525 } 526 527 /* Receiver lock-up workaround detection. */ 528 fxp_read_eeprom(sc, &data, 3, 1); 529 if ((data & 0x03) != 0x03) { 530 sc->flags |= FXP_FLAG_RXBUG; 531 device_printf(dev, "Enabling Rx lock-up workaround\n"); 532 } 533 534 /* 535 * Determine whether we must use the 503 serial interface. 536 */ 537 fxp_read_eeprom(sc, &data, 6, 1); 538 if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0 539 && (data & FXP_PHY_SERIAL_ONLY)) 540 sc->flags |= FXP_FLAG_SERIAL_MEDIA; 541 542 fxp_sysctl_node(sc); 543 /* 544 * Enable workarounds for certain chip revision deficiencies. 545 * 546 * Systems based on the ICH2/ICH2-M chip from Intel, and possibly 547 * some systems based a normal 82559 design, have a defect where 548 * the chip can cause a PCI protocol violation if it receives 549 * a CU_RESUME command when it is entering the IDLE state. The 550 * workaround is to disable Dynamic Standby Mode, so the chip never 551 * deasserts CLKRUN#, and always remains in an active state. 552 * 553 * See Intel 82801BA/82801BAM Specification Update, Errata #30. 554 */ 555 if ((sc->ident->ich >= 2 && sc->ident->ich <= 3) || 556 (sc->ident->ich == 0 && sc->revision >= FXP_REV_82559_A0)) { 557 fxp_read_eeprom(sc, &data, 10, 1); 558 if (data & 0x02) { /* STB enable */ 559 uint16_t cksum; 560 int i; 561 562 device_printf(dev, 563 "Disabling dynamic standby mode in EEPROM\n"); 564 data &= ~0x02; 565 fxp_write_eeprom(sc, &data, 10, 1); 566 device_printf(dev, "New EEPROM ID: 0x%x\n", data); 567 cksum = 0; 568 for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) { 569 fxp_read_eeprom(sc, &data, i, 1); 570 cksum += data; 571 } 572 i = (1 << sc->eeprom_size) - 1; 573 cksum = 0xBABA - cksum; 574 fxp_read_eeprom(sc, &data, i, 1); 575 fxp_write_eeprom(sc, &cksum, i, 1); 576 device_printf(dev, 577 "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n", 578 i, data, cksum); 579#if 1 580 /* 581 * If the user elects to continue, try the software 582 * workaround, as it is better than nothing. 583 */ 584 sc->flags |= FXP_FLAG_CU_RESUME_BUG; 585#endif 586 } 587 } 588 589 /* 590 * If we are not a 82557 chip, we can enable extended features. 591 */ 592 if (sc->revision != FXP_REV_82557) { 593 /* 594 * If MWI is enabled in the PCI configuration, and there 595 * is a valid cacheline size (8 or 16 dwords), then tell 596 * the board to turn on MWI. 597 */ 598 if (val & PCIM_CMD_MWRICEN && 599 pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0) 600 sc->flags |= FXP_FLAG_MWI_ENABLE; 601 602 /* turn on the extended TxCB feature */ 603 sc->flags |= FXP_FLAG_EXT_TXCB; 604 605 /* enable reception of long frames for VLAN */ 606 sc->flags |= FXP_FLAG_LONG_PKT_EN; 607 } else { 608 /* a hack to get long VLAN frames on a 82557 */ 609 sc->flags |= FXP_FLAG_SAVE_BAD; 610 } 611 612 /* For 82559 or later chips, Rx checksum offload is supported. */ 613 if (sc->revision >= FXP_REV_82559_A0) { 614 /* 82559ER does not support Rx checksum offloading. */ 615 if (sc->ident->devid != 0x1209) 616 sc->flags |= FXP_FLAG_82559_RXCSUM; 617 } 618 /* 619 * Enable use of extended RFDs and TCBs for 82550 620 * and later chips. Note: we need extended TXCB support 621 * too, but that's already enabled by the code above. 622 * Be careful to do this only on the right devices. 623 */ 624 if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C || 625 sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F 626 || sc->revision == FXP_REV_82551_10) { 627 sc->rfa_size = sizeof (struct fxp_rfa); 628 sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT; 629 sc->flags |= FXP_FLAG_EXT_RFA; 630 /* Use extended RFA instead of 82559 checksum mode. */ 631 sc->flags &= ~FXP_FLAG_82559_RXCSUM; 632 } else { 633 sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN; 634 sc->tx_cmd = FXP_CB_COMMAND_XMIT; 635 } 636 637 /* 638 * Allocate DMA tags and DMA safe memory. 639 */ 640 sc->maxtxseg = FXP_NTXSEG; 641 sc->maxsegsize = MCLBYTES; 642 if (sc->flags & FXP_FLAG_EXT_RFA) { 643 sc->maxtxseg--; 644 sc->maxsegsize = FXP_TSO_SEGSIZE; 645 } 646 error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, 647 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 648 sc->maxsegsize * sc->maxtxseg + sizeof(struct ether_vlan_header), 649 sc->maxtxseg, sc->maxsegsize, 0, 650 busdma_lock_mutex, &Giant, &sc->fxp_txmtag); 651 if (error) { 652 device_printf(dev, "could not create TX DMA tag\n"); 653 goto fail; 654 } 655 656 error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, 657 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 658 MCLBYTES, 1, MCLBYTES, 0, 659 busdma_lock_mutex, &Giant, &sc->fxp_rxmtag); 660 if (error) { 661 device_printf(dev, "could not create RX DMA tag\n"); 662 goto fail; 663 } 664 665 error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0, 666 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 667 sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0, 668 busdma_lock_mutex, &Giant, &sc->fxp_stag); 669 if (error) { 670 device_printf(dev, "could not create stats DMA tag\n"); 671 goto fail; 672 } 673 674 error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats, 675 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap); 676 if (error) { 677 device_printf(dev, "could not allocate stats DMA memory\n"); 678 goto fail; 679 } 680 error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats, 681 sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0); 682 if (error) { 683 device_printf(dev, "could not load the stats DMA buffer\n"); 684 goto fail; 685 } 686 687 error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0, 688 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 689 FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0, 690 busdma_lock_mutex, &Giant, &sc->cbl_tag); 691 if (error) { 692 device_printf(dev, "could not create TxCB DMA tag\n"); 693 goto fail; 694 } 695 696 error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list, 697 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map); 698 if (error) { 699 device_printf(dev, "could not allocate TxCB DMA memory\n"); 700 goto fail; 701 } 702 703 error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map, 704 sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr, 705 &sc->fxp_desc.cbl_addr, 0); 706 if (error) { 707 device_printf(dev, "could not load TxCB DMA buffer\n"); 708 goto fail; 709 } 710 711 error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0, 712 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 713 sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0, 714 busdma_lock_mutex, &Giant, &sc->mcs_tag); 715 if (error) { 716 device_printf(dev, 717 "could not create multicast setup DMA tag\n"); 718 goto fail; 719 } 720 721 error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp, 722 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->mcs_map); 723 if (error) { 724 device_printf(dev, 725 "could not allocate multicast setup DMA memory\n"); 726 goto fail; 727 } 728 error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp, 729 sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0); 730 if (error) { 731 device_printf(dev, 732 "can't load the multicast setup DMA buffer\n"); 733 goto fail; 734 } 735 736 /* 737 * Pre-allocate the TX DMA maps and setup the pointers to 738 * the TX command blocks. 739 */ 740 txp = sc->fxp_desc.tx_list; 741 tcbp = sc->fxp_desc.cbl_list; 742 for (i = 0; i < FXP_NTXCB; i++) { 743 txp[i].tx_cb = tcbp + i; 744 error = bus_dmamap_create(sc->fxp_txmtag, 0, &txp[i].tx_map); 745 if (error) { 746 device_printf(dev, "can't create DMA map for TX\n"); 747 goto fail; 748 } 749 } 750 error = bus_dmamap_create(sc->fxp_rxmtag, 0, &sc->spare_map); 751 if (error) { 752 device_printf(dev, "can't create spare DMA map\n"); 753 goto fail; 754 } 755 756 /* 757 * Pre-allocate our receive buffers. 758 */ 759 sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL; 760 for (i = 0; i < FXP_NRFABUFS; i++) { 761 rxp = &sc->fxp_desc.rx_list[i]; 762 error = bus_dmamap_create(sc->fxp_rxmtag, 0, &rxp->rx_map); 763 if (error) { 764 device_printf(dev, "can't create DMA map for RX\n"); 765 goto fail; 766 } 767 if (fxp_new_rfabuf(sc, rxp) != 0) { 768 error = ENOMEM; 769 goto fail; 770 } 771 fxp_add_rfabuf(sc, rxp); 772 } 773 774 /* 775 * Read MAC address. 776 */ 777 fxp_read_eeprom(sc, myea, 0, 3); 778 eaddr[0] = myea[0] & 0xff; 779 eaddr[1] = myea[0] >> 8; 780 eaddr[2] = myea[1] & 0xff; 781 eaddr[3] = myea[1] >> 8; 782 eaddr[4] = myea[2] & 0xff; 783 eaddr[5] = myea[2] >> 8; 784 if (bootverbose) { 785 device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n", 786 pci_get_vendor(dev), pci_get_device(dev), 787 pci_get_subvendor(dev), pci_get_subdevice(dev), 788 pci_get_revid(dev)); 789 fxp_read_eeprom(sc, &data, 10, 1); 790 device_printf(dev, "Dynamic Standby mode is %s\n", 791 data & 0x02 ? "enabled" : "disabled"); 792 } 793 794 /* 795 * If this is only a 10Mbps device, then there is no MII, and 796 * the PHY will use a serial interface instead. 797 * 798 * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter 799 * doesn't have a programming interface of any sort. The 800 * media is sensed automatically based on how the link partner 801 * is configured. This is, in essence, manual configuration. 802 */ 803 if (sc->flags & FXP_FLAG_SERIAL_MEDIA) { 804 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 805 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 806 } else { 807 /* 808 * i82557 wedge when isolating all of their PHYs. 809 */ 810 error = mii_attach(dev, &sc->miibus, ifp, fxp_ifmedia_upd, 811 fxp_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, 812 MII_OFFSET_ANY, MIIF_NOISOLATE); 813 if (error != 0) { 814 device_printf(dev, "attaching PHYs failed\n"); 815 goto fail; 816 } 817 } 818 819 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 820 ifp->if_init = fxp_init; 821 ifp->if_softc = sc; 822 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 823 ifp->if_ioctl = fxp_ioctl; 824 ifp->if_start = fxp_start; 825 826 ifp->if_capabilities = ifp->if_capenable = 0; 827 828 /* Enable checksum offload/TSO for 82550 or better chips */ 829 if (sc->flags & FXP_FLAG_EXT_RFA) { 830 ifp->if_hwassist = FXP_CSUM_FEATURES | CSUM_TSO; 831 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; 832 ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_TSO4; 833 } 834 835 if (sc->flags & FXP_FLAG_82559_RXCSUM) { 836 ifp->if_capabilities |= IFCAP_RXCSUM; 837 ifp->if_capenable |= IFCAP_RXCSUM; 838 } 839 840 if (sc->flags & FXP_FLAG_WOLCAP) { 841 ifp->if_capabilities |= IFCAP_WOL_MAGIC; 842 ifp->if_capenable |= IFCAP_WOL_MAGIC; 843 } 844 845#ifdef DEVICE_POLLING 846 /* Inform the world we support polling. */ 847 ifp->if_capabilities |= IFCAP_POLLING; 848#endif 849 850 /* 851 * Attach the interface. 852 */ 853 ether_ifattach(ifp, eaddr); 854 855 /* 856 * Tell the upper layer(s) we support long frames. 857 * Must appear after the call to ether_ifattach() because 858 * ether_ifattach() sets ifi_hdrlen to the default value. 859 */ 860 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 861 ifp->if_capabilities |= IFCAP_VLAN_MTU; 862 ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */ 863 if ((sc->flags & FXP_FLAG_EXT_RFA) != 0) { 864 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | 865 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; 866 ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | 867 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO; 868 } 869 870 /* 871 * Let the system queue as many packets as we have available 872 * TX descriptors. 873 */ 874 IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1); 875 ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1; 876 IFQ_SET_READY(&ifp->if_snd); 877 878 /* 879 * Hook our interrupt after all initialization is complete. 880 */ 881 error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE, 882 NULL, fxp_intr, sc, &sc->ih); 883 if (error) { 884 device_printf(dev, "could not setup irq\n"); 885 ether_ifdetach(sc->ifp); 886 goto fail; 887 } 888 889 /* 890 * Configure hardware to reject magic frames otherwise 891 * system will hang on recipt of magic frames. 892 */ 893 if ((sc->flags & FXP_FLAG_WOLCAP) != 0) { 894 FXP_LOCK(sc); 895 /* Clear wakeup events. */ 896 CSR_WRITE_1(sc, FXP_CSR_PMDR, CSR_READ_1(sc, FXP_CSR_PMDR)); 897 fxp_init_body(sc); 898 fxp_stop(sc); 899 FXP_UNLOCK(sc); 900 } 901 902fail: 903 if (error) 904 fxp_release(sc); 905 return (error); 906} 907 908/* 909 * Release all resources. The softc lock should not be held and the 910 * interrupt should already be torn down. 911 */ 912static void 913fxp_release(struct fxp_softc *sc) 914{ 915 struct fxp_rx *rxp; 916 struct fxp_tx *txp; 917 int i; 918 919 FXP_LOCK_ASSERT(sc, MA_NOTOWNED); 920 KASSERT(sc->ih == NULL, 921 ("fxp_release() called with intr handle still active")); 922 if (sc->miibus) 923 device_delete_child(sc->dev, sc->miibus); 924 bus_generic_detach(sc->dev); 925 ifmedia_removeall(&sc->sc_media); 926 if (sc->fxp_desc.cbl_list) { 927 bus_dmamap_unload(sc->cbl_tag, sc->cbl_map); 928 bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list, 929 sc->cbl_map); 930 } 931 if (sc->fxp_stats) { 932 bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap); 933 bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap); 934 } 935 if (sc->mcsp) { 936 bus_dmamap_unload(sc->mcs_tag, sc->mcs_map); 937 bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map); 938 } 939 bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res); 940 if (sc->fxp_rxmtag) { 941 for (i = 0; i < FXP_NRFABUFS; i++) { 942 rxp = &sc->fxp_desc.rx_list[i]; 943 if (rxp->rx_mbuf != NULL) { 944 bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, 945 BUS_DMASYNC_POSTREAD); 946 bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map); 947 m_freem(rxp->rx_mbuf); 948 } 949 bus_dmamap_destroy(sc->fxp_rxmtag, rxp->rx_map); 950 } 951 bus_dmamap_destroy(sc->fxp_rxmtag, sc->spare_map); 952 bus_dma_tag_destroy(sc->fxp_rxmtag); 953 } 954 if (sc->fxp_txmtag) { 955 for (i = 0; i < FXP_NTXCB; i++) { 956 txp = &sc->fxp_desc.tx_list[i]; 957 if (txp->tx_mbuf != NULL) { 958 bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, 959 BUS_DMASYNC_POSTWRITE); 960 bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map); 961 m_freem(txp->tx_mbuf); 962 } 963 bus_dmamap_destroy(sc->fxp_txmtag, txp->tx_map); 964 } 965 bus_dma_tag_destroy(sc->fxp_txmtag); 966 } 967 if (sc->fxp_stag) 968 bus_dma_tag_destroy(sc->fxp_stag); 969 if (sc->cbl_tag) 970 bus_dma_tag_destroy(sc->cbl_tag); 971 if (sc->mcs_tag) 972 bus_dma_tag_destroy(sc->mcs_tag); 973 if (sc->ifp) 974 if_free(sc->ifp); 975 976 mtx_destroy(&sc->sc_mtx); 977} 978 979/* 980 * Detach interface. 981 */ 982static int 983fxp_detach(device_t dev) 984{ 985 struct fxp_softc *sc = device_get_softc(dev); 986 987#ifdef DEVICE_POLLING 988 if (sc->ifp->if_capenable & IFCAP_POLLING) 989 ether_poll_deregister(sc->ifp); 990#endif 991 992 FXP_LOCK(sc); 993 /* 994 * Stop DMA and drop transmit queue, but disable interrupts first. 995 */ 996 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); 997 fxp_stop(sc); 998 FXP_UNLOCK(sc); 999 callout_drain(&sc->stat_ch); 1000 1001 /* 1002 * Close down routes etc. 1003 */ 1004 ether_ifdetach(sc->ifp); 1005 1006 /* 1007 * Unhook interrupt before dropping lock. This is to prevent 1008 * races with fxp_intr(). 1009 */ 1010 bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih); 1011 sc->ih = NULL; 1012 1013 /* Release our allocated resources. */ 1014 fxp_release(sc); 1015 return (0); 1016} 1017 1018/* 1019 * Device shutdown routine. Called at system shutdown after sync. The 1020 * main purpose of this routine is to shut off receiver DMA so that 1021 * kernel memory doesn't get clobbered during warmboot. 1022 */ 1023static int 1024fxp_shutdown(device_t dev) 1025{ 1026 1027 /* 1028 * Make sure that DMA is disabled prior to reboot. Not doing 1029 * do could allow DMA to corrupt kernel memory during the 1030 * reboot before the driver initializes. 1031 */ 1032 return (fxp_suspend(dev)); 1033} 1034 1035/* 1036 * Device suspend routine. Stop the interface and save some PCI 1037 * settings in case the BIOS doesn't restore them properly on 1038 * resume. 1039 */ 1040static int 1041fxp_suspend(device_t dev) 1042{ 1043 struct fxp_softc *sc = device_get_softc(dev); 1044 struct ifnet *ifp; 1045 int pmc; 1046 uint16_t pmstat; 1047 1048 FXP_LOCK(sc); 1049 1050 ifp = sc->ifp; 1051 if (pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0) { 1052 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); 1053 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 1054 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { 1055 /* Request PME. */ 1056 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 1057 sc->flags |= FXP_FLAG_WOL; 1058 /* Reconfigure hardware to accept magic frames. */ 1059 fxp_init_body(sc); 1060 } 1061 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1062 } 1063 fxp_stop(sc); 1064 1065 sc->suspended = 1; 1066 1067 FXP_UNLOCK(sc); 1068 return (0); 1069} 1070 1071/* 1072 * Device resume routine. re-enable busmastering, and restart the interface if 1073 * appropriate. 1074 */ 1075static int 1076fxp_resume(device_t dev) 1077{ 1078 struct fxp_softc *sc = device_get_softc(dev); 1079 struct ifnet *ifp = sc->ifp; 1080 int pmc; 1081 uint16_t pmstat; 1082 1083 FXP_LOCK(sc); 1084 1085 if (pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0) { 1086 sc->flags &= ~FXP_FLAG_WOL; 1087 pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); 1088 /* Disable PME and clear PME status. */ 1089 pmstat &= ~PCIM_PSTAT_PMEENABLE; 1090 pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); 1091 if ((sc->flags & FXP_FLAG_WOLCAP) != 0) 1092 CSR_WRITE_1(sc, FXP_CSR_PMDR, 1093 CSR_READ_1(sc, FXP_CSR_PMDR)); 1094 } 1095 1096 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1097 DELAY(10); 1098 1099 /* reinitialize interface if necessary */ 1100 if (ifp->if_flags & IFF_UP) 1101 fxp_init_body(sc); 1102 1103 sc->suspended = 0; 1104 1105 FXP_UNLOCK(sc); 1106 return (0); 1107} 1108 1109static void 1110fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) 1111{ 1112 uint16_t reg; 1113 int x; 1114 1115 /* 1116 * Shift in data. 1117 */ 1118 for (x = 1 << (length - 1); x; x >>= 1) { 1119 if (data & x) 1120 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 1121 else 1122 reg = FXP_EEPROM_EECS; 1123 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1124 DELAY(1); 1125 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 1126 DELAY(1); 1127 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1128 DELAY(1); 1129 } 1130} 1131 1132/* 1133 * Read from the serial EEPROM. Basically, you manually shift in 1134 * the read opcode (one bit at a time) and then shift in the address, 1135 * and then you shift out the data (all of this one bit at a time). 1136 * The word size is 16 bits, so you have to provide the address for 1137 * every 16 bits of data. 1138 */ 1139static uint16_t 1140fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize) 1141{ 1142 uint16_t reg, data; 1143 int x; 1144 1145 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1146 /* 1147 * Shift in read opcode. 1148 */ 1149 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); 1150 /* 1151 * Shift in address. 1152 */ 1153 data = 0; 1154 for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) { 1155 if (offset & x) 1156 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 1157 else 1158 reg = FXP_EEPROM_EECS; 1159 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1160 DELAY(1); 1161 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 1162 DELAY(1); 1163 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1164 DELAY(1); 1165 reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO; 1166 data++; 1167 if (autosize && reg == 0) { 1168 sc->eeprom_size = data; 1169 break; 1170 } 1171 } 1172 /* 1173 * Shift out data. 1174 */ 1175 data = 0; 1176 reg = FXP_EEPROM_EECS; 1177 for (x = 1 << 15; x; x >>= 1) { 1178 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); 1179 DELAY(1); 1180 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) 1181 data |= x; 1182 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 1183 DELAY(1); 1184 } 1185 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1186 DELAY(1); 1187 1188 return (data); 1189} 1190 1191static void 1192fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data) 1193{ 1194 int i; 1195 1196 /* 1197 * Erase/write enable. 1198 */ 1199 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1200 fxp_eeprom_shiftin(sc, 0x4, 3); 1201 fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); 1202 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1203 DELAY(1); 1204 /* 1205 * Shift in write opcode, address, data. 1206 */ 1207 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1208 fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); 1209 fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); 1210 fxp_eeprom_shiftin(sc, data, 16); 1211 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1212 DELAY(1); 1213 /* 1214 * Wait for EEPROM to finish up. 1215 */ 1216 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1217 DELAY(1); 1218 for (i = 0; i < 1000; i++) { 1219 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) 1220 break; 1221 DELAY(50); 1222 } 1223 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1224 DELAY(1); 1225 /* 1226 * Erase/write disable. 1227 */ 1228 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 1229 fxp_eeprom_shiftin(sc, 0x4, 3); 1230 fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); 1231 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 1232 DELAY(1); 1233} 1234 1235/* 1236 * From NetBSD: 1237 * 1238 * Figure out EEPROM size. 1239 * 1240 * 559's can have either 64-word or 256-word EEPROMs, the 558 1241 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 1242 * talks about the existance of 16 to 256 word EEPROMs. 1243 * 1244 * The only known sizes are 64 and 256, where the 256 version is used 1245 * by CardBus cards to store CIS information. 1246 * 1247 * The address is shifted in msb-to-lsb, and after the last 1248 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 1249 * after which follows the actual data. We try to detect this zero, by 1250 * probing the data-out bit in the EEPROM control register just after 1251 * having shifted in a bit. If the bit is zero, we assume we've 1252 * shifted enough address bits. The data-out should be tri-state, 1253 * before this, which should translate to a logical one. 1254 */ 1255static void 1256fxp_autosize_eeprom(struct fxp_softc *sc) 1257{ 1258 1259 /* guess maximum size of 256 words */ 1260 sc->eeprom_size = 8; 1261 1262 /* autosize */ 1263 (void) fxp_eeprom_getword(sc, 0, 1); 1264} 1265 1266static void 1267fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) 1268{ 1269 int i; 1270 1271 for (i = 0; i < words; i++) 1272 data[i] = fxp_eeprom_getword(sc, offset + i, 0); 1273} 1274 1275static void 1276fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) 1277{ 1278 int i; 1279 1280 for (i = 0; i < words; i++) 1281 fxp_eeprom_putword(sc, offset + i, data[i]); 1282} 1283 1284/* 1285 * Grab the softc lock and call the real fxp_start_body() routine 1286 */ 1287static void 1288fxp_start(struct ifnet *ifp) 1289{ 1290 struct fxp_softc *sc = ifp->if_softc; 1291 1292 FXP_LOCK(sc); 1293 fxp_start_body(ifp); 1294 FXP_UNLOCK(sc); 1295} 1296 1297/* 1298 * Start packet transmission on the interface. 1299 * This routine must be called with the softc lock held, and is an 1300 * internal entry point only. 1301 */ 1302static void 1303fxp_start_body(struct ifnet *ifp) 1304{ 1305 struct fxp_softc *sc = ifp->if_softc; 1306 struct mbuf *mb_head; 1307 int txqueued; 1308 1309 FXP_LOCK_ASSERT(sc, MA_OWNED); 1310 1311 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 1312 IFF_DRV_RUNNING) 1313 return; 1314 1315 if (sc->tx_queued > FXP_NTXCB_HIWAT) 1316 fxp_txeof(sc); 1317 /* 1318 * We're finished if there is nothing more to add to the list or if 1319 * we're all filled up with buffers to transmit. 1320 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add 1321 * a NOP command when needed. 1322 */ 1323 txqueued = 0; 1324 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 1325 sc->tx_queued < FXP_NTXCB - 1) { 1326 1327 /* 1328 * Grab a packet to transmit. 1329 */ 1330 IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head); 1331 if (mb_head == NULL) 1332 break; 1333 1334 if (fxp_encap(sc, &mb_head)) { 1335 if (mb_head == NULL) 1336 break; 1337 IFQ_DRV_PREPEND(&ifp->if_snd, mb_head); 1338 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1339 } 1340 txqueued++; 1341 /* 1342 * Pass packet to bpf if there is a listener. 1343 */ 1344 BPF_MTAP(ifp, mb_head); 1345 } 1346 1347 /* 1348 * We're finished. If we added to the list, issue a RESUME to get DMA 1349 * going again if suspended. 1350 */ 1351 if (txqueued > 0) { 1352 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, 1353 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1354 fxp_scb_wait(sc); 1355 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); 1356 /* 1357 * Set a 5 second timer just in case we don't hear 1358 * from the card again. 1359 */ 1360 sc->watchdog_timer = 5; 1361 } 1362} 1363 1364static int 1365fxp_encap(struct fxp_softc *sc, struct mbuf **m_head) 1366{ 1367 struct ifnet *ifp; 1368 struct mbuf *m; 1369 struct fxp_tx *txp; 1370 struct fxp_cb_tx *cbp; 1371 struct tcphdr *tcp; 1372 bus_dma_segment_t segs[FXP_NTXSEG]; 1373 int error, i, nseg, tcp_payload; 1374 1375 FXP_LOCK_ASSERT(sc, MA_OWNED); 1376 ifp = sc->ifp; 1377 1378 tcp_payload = 0; 1379 tcp = NULL; 1380 /* 1381 * Get pointer to next available tx desc. 1382 */ 1383 txp = sc->fxp_desc.tx_last->tx_next; 1384 1385 /* 1386 * A note in Appendix B of the Intel 8255x 10/100 Mbps 1387 * Ethernet Controller Family Open Source Software 1388 * Developer Manual says: 1389 * Using software parsing is only allowed with legal 1390 * TCP/IP or UDP/IP packets. 1391 * ... 1392 * For all other datagrams, hardware parsing must 1393 * be used. 1394 * Software parsing appears to truncate ICMP and 1395 * fragmented UDP packets that contain one to three 1396 * bytes in the second (and final) mbuf of the packet. 1397 */ 1398 if (sc->flags & FXP_FLAG_EXT_RFA) 1399 txp->tx_cb->ipcb_ip_activation_high = 1400 FXP_IPCB_HARDWAREPARSING_ENABLE; 1401 1402 m = *m_head; 1403 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1404 /* 1405 * 82550/82551 requires ethernet/IP/TCP headers must be 1406 * contained in the first active transmit buffer. 1407 */ 1408 struct ether_header *eh; 1409 struct ip *ip; 1410 uint32_t ip_off, poff; 1411 1412 if (M_WRITABLE(*m_head) == 0) { 1413 /* Get a writable copy. */ 1414 m = m_dup(*m_head, M_DONTWAIT); 1415 m_freem(*m_head); 1416 if (m == NULL) { 1417 *m_head = NULL; 1418 return (ENOBUFS); 1419 } 1420 *m_head = m; 1421 } 1422 ip_off = sizeof(struct ether_header); 1423 m = m_pullup(*m_head, ip_off); 1424 if (m == NULL) { 1425 *m_head = NULL; 1426 return (ENOBUFS); 1427 } 1428 eh = mtod(m, struct ether_header *); 1429 /* Check the existence of VLAN tag. */ 1430 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1431 ip_off = sizeof(struct ether_vlan_header); 1432 m = m_pullup(m, ip_off); 1433 if (m == NULL) { 1434 *m_head = NULL; 1435 return (ENOBUFS); 1436 } 1437 } 1438 m = m_pullup(m, ip_off + sizeof(struct ip)); 1439 if (m == NULL) { 1440 *m_head = NULL; 1441 return (ENOBUFS); 1442 } 1443 ip = (struct ip *)(mtod(m, char *) + ip_off); 1444 poff = ip_off + (ip->ip_hl << 2); 1445 m = m_pullup(m, poff + sizeof(struct tcphdr)); 1446 if (m == NULL) { 1447 *m_head = NULL; 1448 return (ENOBUFS); 1449 } 1450 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1451 m = m_pullup(m, poff + sizeof(struct tcphdr) + tcp->th_off); 1452 if (m == NULL) { 1453 *m_head = NULL; 1454 return (ENOBUFS); 1455 } 1456 1457 /* 1458 * Since 82550/82551 doesn't modify IP length and pseudo 1459 * checksum in the first frame driver should compute it. 1460 */ 1461 ip = (struct ip *)(mtod(m, char *) + ip_off); 1462 tcp = (struct tcphdr *)(mtod(m, char *) + poff); 1463 ip->ip_sum = 0; 1464 ip->ip_len = htons(m->m_pkthdr.tso_segsz + (ip->ip_hl << 2) + 1465 (tcp->th_off << 2)); 1466 tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1467 htons(IPPROTO_TCP + (tcp->th_off << 2) + 1468 m->m_pkthdr.tso_segsz)); 1469 /* Compute total TCP payload. */ 1470 tcp_payload = m->m_pkthdr.len - ip_off - (ip->ip_hl << 2); 1471 tcp_payload -= tcp->th_off << 2; 1472 *m_head = m; 1473 } else if (m->m_pkthdr.csum_flags & FXP_CSUM_FEATURES) { 1474 /* 1475 * Deal with TCP/IP checksum offload. Note that 1476 * in order for TCP checksum offload to work, 1477 * the pseudo header checksum must have already 1478 * been computed and stored in the checksum field 1479 * in the TCP header. The stack should have 1480 * already done this for us. 1481 */ 1482 txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; 1483 if (m->m_pkthdr.csum_flags & CSUM_TCP) 1484 txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET; 1485 1486#ifdef FXP_IP_CSUM_WAR 1487 /* 1488 * XXX The 82550 chip appears to have trouble 1489 * dealing with IP header checksums in very small 1490 * datagrams, namely fragments from 1 to 3 bytes 1491 * in size. For example, say you want to transmit 1492 * a UDP packet of 1473 bytes. The packet will be 1493 * fragmented over two IP datagrams, the latter 1494 * containing only one byte of data. The 82550 will 1495 * botch the header checksum on the 1-byte fragment. 1496 * As long as the datagram contains 4 or more bytes 1497 * of data, you're ok. 1498 * 1499 * The following code attempts to work around this 1500 * problem: if the datagram is less than 38 bytes 1501 * in size (14 bytes ether header, 20 bytes IP header, 1502 * plus 4 bytes of data), we punt and compute the IP 1503 * header checksum by hand. This workaround doesn't 1504 * work very well, however, since it can be fooled 1505 * by things like VLAN tags and IP options that make 1506 * the header sizes/offsets vary. 1507 */ 1508 1509 if (m->m_pkthdr.csum_flags & CSUM_IP) { 1510 if (m->m_pkthdr.len < 38) { 1511 struct ip *ip; 1512 m->m_data += ETHER_HDR_LEN; 1513 ip = mtod(m, struct ip *); 1514 ip->ip_sum = in_cksum(m, ip->ip_hl << 2); 1515 m->m_data -= ETHER_HDR_LEN; 1516 m->m_pkthdr.csum_flags &= ~CSUM_IP; 1517 } else { 1518 txp->tx_cb->ipcb_ip_activation_high = 1519 FXP_IPCB_HARDWAREPARSING_ENABLE; 1520 txp->tx_cb->ipcb_ip_schedule |= 1521 FXP_IPCB_IP_CHECKSUM_ENABLE; 1522 } 1523 } 1524#endif 1525 } 1526 1527 error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map, *m_head, 1528 segs, &nseg, 0); 1529 if (error == EFBIG) { 1530 m = m_collapse(*m_head, M_DONTWAIT, sc->maxtxseg); 1531 if (m == NULL) { 1532 m_freem(*m_head); 1533 *m_head = NULL; 1534 return (ENOMEM); 1535 } 1536 *m_head = m; 1537 error = bus_dmamap_load_mbuf_sg(sc->fxp_txmtag, txp->tx_map, 1538 *m_head, segs, &nseg, 0); 1539 if (error != 0) { 1540 m_freem(*m_head); 1541 *m_head = NULL; 1542 return (ENOMEM); 1543 } 1544 } else if (error != 0) 1545 return (error); 1546 if (nseg == 0) { 1547 m_freem(*m_head); 1548 *m_head = NULL; 1549 return (EIO); 1550 } 1551 1552 KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments")); 1553 bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, BUS_DMASYNC_PREWRITE); 1554 1555 cbp = txp->tx_cb; 1556 for (i = 0; i < nseg; i++) { 1557 /* 1558 * If this is an 82550/82551, then we're using extended 1559 * TxCBs _and_ we're using checksum offload. This means 1560 * that the TxCB is really an IPCB. One major difference 1561 * between the two is that with plain extended TxCBs, 1562 * the bottom half of the TxCB contains two entries from 1563 * the TBD array, whereas IPCBs contain just one entry: 1564 * one entry (8 bytes) has been sacrificed for the TCP/IP 1565 * checksum offload control bits. So to make things work 1566 * right, we have to start filling in the TBD array 1567 * starting from a different place depending on whether 1568 * the chip is an 82550/82551 or not. 1569 */ 1570 if (sc->flags & FXP_FLAG_EXT_RFA) { 1571 cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr); 1572 cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len); 1573 } else { 1574 cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr); 1575 cbp->tbd[i].tb_size = htole32(segs[i].ds_len); 1576 } 1577 } 1578 if (sc->flags & FXP_FLAG_EXT_RFA) { 1579 /* Configure dynamic TBD for 82550/82551. */ 1580 cbp->tbd_number = 0xFF; 1581 cbp->tbd[nseg].tb_size |= htole32(0x8000); 1582 } else 1583 cbp->tbd_number = nseg; 1584 /* Configure TSO. */ 1585 if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1586 cbp->tbd[-1].tb_size = htole32(m->m_pkthdr.tso_segsz << 16); 1587 cbp->tbd[1].tb_size |= htole32(tcp_payload << 16); 1588 cbp->ipcb_ip_schedule |= FXP_IPCB_LARGESEND_ENABLE | 1589 FXP_IPCB_IP_CHECKSUM_ENABLE | 1590 FXP_IPCB_TCP_PACKET | 1591 FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; 1592 } 1593 /* Configure VLAN hardware tag insertion. */ 1594 if ((m->m_flags & M_VLANTAG) != 0) { 1595 cbp->ipcb_vlan_id = htons(m->m_pkthdr.ether_vtag); 1596 txp->tx_cb->ipcb_ip_activation_high |= 1597 FXP_IPCB_INSERTVLAN_ENABLE; 1598 } 1599 1600 txp->tx_mbuf = m; 1601 txp->tx_cb->cb_status = 0; 1602 txp->tx_cb->byte_count = 0; 1603 if (sc->tx_queued != FXP_CXINT_THRESH - 1) 1604 txp->tx_cb->cb_command = 1605 htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | 1606 FXP_CB_COMMAND_S); 1607 else 1608 txp->tx_cb->cb_command = 1609 htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | 1610 FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); 1611 if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) 1612 txp->tx_cb->tx_threshold = tx_threshold; 1613 1614 /* 1615 * Advance the end of list forward. 1616 */ 1617 1618#ifdef __alpha__ 1619 /* 1620 * On platforms which can't access memory in 16-bit 1621 * granularities, we must prevent the card from DMA'ing 1622 * up the status while we update the command field. 1623 * This could cause us to overwrite the completion status. 1624 * XXX This is probably bogus and we're _not_ looking 1625 * for atomicity here. 1626 */ 1627 atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command, 1628 htole16(FXP_CB_COMMAND_S)); 1629#else 1630 sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S); 1631#endif /*__alpha__*/ 1632 sc->fxp_desc.tx_last = txp; 1633 1634 /* 1635 * Advance the beginning of the list forward if there are 1636 * no other packets queued (when nothing is queued, tx_first 1637 * sits on the last TxCB that was sent out). 1638 */ 1639 if (sc->tx_queued == 0) 1640 sc->fxp_desc.tx_first = txp; 1641 1642 sc->tx_queued++; 1643 1644 return (0); 1645} 1646 1647#ifdef DEVICE_POLLING 1648static poll_handler_t fxp_poll; 1649 1650static int 1651fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1652{ 1653 struct fxp_softc *sc = ifp->if_softc; 1654 uint8_t statack; 1655 int rx_npkts = 0; 1656 1657 FXP_LOCK(sc); 1658 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 1659 FXP_UNLOCK(sc); 1660 return (rx_npkts); 1661 } 1662 1663 statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA | 1664 FXP_SCB_STATACK_FR; 1665 if (cmd == POLL_AND_CHECK_STATUS) { 1666 uint8_t tmp; 1667 1668 tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); 1669 if (tmp == 0xff || tmp == 0) { 1670 FXP_UNLOCK(sc); 1671 return (rx_npkts); /* nothing to do */ 1672 } 1673 tmp &= ~statack; 1674 /* ack what we can */ 1675 if (tmp != 0) 1676 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp); 1677 statack |= tmp; 1678 } 1679 rx_npkts = fxp_intr_body(sc, ifp, statack, count); 1680 FXP_UNLOCK(sc); 1681 return (rx_npkts); 1682} 1683#endif /* DEVICE_POLLING */ 1684 1685/* 1686 * Process interface interrupts. 1687 */ 1688static void 1689fxp_intr(void *xsc) 1690{ 1691 struct fxp_softc *sc = xsc; 1692 struct ifnet *ifp = sc->ifp; 1693 uint8_t statack; 1694 1695 FXP_LOCK(sc); 1696 if (sc->suspended) { 1697 FXP_UNLOCK(sc); 1698 return; 1699 } 1700 1701#ifdef DEVICE_POLLING 1702 if (ifp->if_capenable & IFCAP_POLLING) { 1703 FXP_UNLOCK(sc); 1704 return; 1705 } 1706#endif 1707 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { 1708 /* 1709 * It should not be possible to have all bits set; the 1710 * FXP_SCB_INTR_SWI bit always returns 0 on a read. If 1711 * all bits are set, this may indicate that the card has 1712 * been physically ejected, so ignore it. 1713 */ 1714 if (statack == 0xff) { 1715 FXP_UNLOCK(sc); 1716 return; 1717 } 1718 1719 /* 1720 * First ACK all the interrupts in this pass. 1721 */ 1722 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 1723 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 1724 fxp_intr_body(sc, ifp, statack, -1); 1725 } 1726 FXP_UNLOCK(sc); 1727} 1728 1729static void 1730fxp_txeof(struct fxp_softc *sc) 1731{ 1732 struct ifnet *ifp; 1733 struct fxp_tx *txp; 1734 1735 ifp = sc->ifp; 1736 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, 1737 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1738 for (txp = sc->fxp_desc.tx_first; sc->tx_queued && 1739 (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0; 1740 txp = txp->tx_next) { 1741 if (txp->tx_mbuf != NULL) { 1742 bus_dmamap_sync(sc->fxp_txmtag, txp->tx_map, 1743 BUS_DMASYNC_POSTWRITE); 1744 bus_dmamap_unload(sc->fxp_txmtag, txp->tx_map); 1745 m_freem(txp->tx_mbuf); 1746 txp->tx_mbuf = NULL; 1747 /* clear this to reset csum offload bits */ 1748 txp->tx_cb->tbd[0].tb_addr = 0; 1749 } 1750 sc->tx_queued--; 1751 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1752 } 1753 sc->fxp_desc.tx_first = txp; 1754 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, 1755 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1756 if (sc->tx_queued == 0) 1757 sc->watchdog_timer = 0; 1758} 1759 1760static void 1761fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp, struct mbuf *m, 1762 uint16_t status, int pos) 1763{ 1764 struct ether_header *eh; 1765 struct ip *ip; 1766 struct udphdr *uh; 1767 int32_t hlen, len, pktlen, temp32; 1768 uint16_t csum, *opts; 1769 1770 if ((sc->flags & FXP_FLAG_82559_RXCSUM) == 0) { 1771 if ((status & FXP_RFA_STATUS_PARSE) != 0) { 1772 if (status & FXP_RFDX_CS_IP_CSUM_BIT_VALID) 1773 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1774 if (status & FXP_RFDX_CS_IP_CSUM_VALID) 1775 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1776 if ((status & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) && 1777 (status & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) { 1778 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | 1779 CSUM_PSEUDO_HDR; 1780 m->m_pkthdr.csum_data = 0xffff; 1781 } 1782 } 1783 return; 1784 } 1785 1786 pktlen = m->m_pkthdr.len; 1787 if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) 1788 return; 1789 eh = mtod(m, struct ether_header *); 1790 if (eh->ether_type != htons(ETHERTYPE_IP)) 1791 return; 1792 ip = (struct ip *)(eh + 1); 1793 if (ip->ip_v != IPVERSION) 1794 return; 1795 1796 hlen = ip->ip_hl << 2; 1797 pktlen -= sizeof(struct ether_header); 1798 if (hlen < sizeof(struct ip)) 1799 return; 1800 if (ntohs(ip->ip_len) < hlen) 1801 return; 1802 if (ntohs(ip->ip_len) != pktlen) 1803 return; 1804 if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) 1805 return; /* can't handle fragmented packet */ 1806 1807 switch (ip->ip_p) { 1808 case IPPROTO_TCP: 1809 if (pktlen < (hlen + sizeof(struct tcphdr))) 1810 return; 1811 break; 1812 case IPPROTO_UDP: 1813 if (pktlen < (hlen + sizeof(struct udphdr))) 1814 return; 1815 uh = (struct udphdr *)((caddr_t)ip + hlen); 1816 if (uh->uh_sum == 0) 1817 return; /* no checksum */ 1818 break; 1819 default: 1820 return; 1821 } 1822 /* Extract computed checksum. */ 1823 csum = be16dec(mtod(m, char *) + pos); 1824 /* checksum fixup for IP options */ 1825 len = hlen - sizeof(struct ip); 1826 if (len > 0) { 1827 opts = (uint16_t *)(ip + 1); 1828 for (; len > 0; len -= sizeof(uint16_t), opts++) { 1829 temp32 = csum - *opts; 1830 temp32 = (temp32 >> 16) + (temp32 & 65535); 1831 csum = temp32 & 65535; 1832 } 1833 } 1834 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; 1835 m->m_pkthdr.csum_data = csum; 1836} 1837 1838static int 1839fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack, 1840 int count) 1841{ 1842 struct mbuf *m; 1843 struct fxp_rx *rxp; 1844 struct fxp_rfa *rfa; 1845 int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0; 1846 int rx_npkts; 1847 uint16_t status; 1848 1849 rx_npkts = 0; 1850 FXP_LOCK_ASSERT(sc, MA_OWNED); 1851 1852 if (rnr) 1853 sc->rnr++; 1854#ifdef DEVICE_POLLING 1855 /* Pick up a deferred RNR condition if `count' ran out last time. */ 1856 if (sc->flags & FXP_FLAG_DEFERRED_RNR) { 1857 sc->flags &= ~FXP_FLAG_DEFERRED_RNR; 1858 rnr = 1; 1859 } 1860#endif 1861 1862 /* 1863 * Free any finished transmit mbuf chains. 1864 * 1865 * Handle the CNA event likt a CXTNO event. It used to 1866 * be that this event (control unit not ready) was not 1867 * encountered, but it is now with the SMPng modifications. 1868 * The exact sequence of events that occur when the interface 1869 * is brought up are different now, and if this event 1870 * goes unhandled, the configuration/rxfilter setup sequence 1871 * can stall for several seconds. The result is that no 1872 * packets go out onto the wire for about 5 to 10 seconds 1873 * after the interface is ifconfig'ed for the first time. 1874 */ 1875 if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) 1876 fxp_txeof(sc); 1877 1878 /* 1879 * Try to start more packets transmitting. 1880 */ 1881 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) 1882 fxp_start_body(ifp); 1883 1884 /* 1885 * Just return if nothing happened on the receive side. 1886 */ 1887 if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0) 1888 return (rx_npkts); 1889 1890 /* 1891 * Process receiver interrupts. If a no-resource (RNR) 1892 * condition exists, get whatever packets we can and 1893 * re-start the receiver. 1894 * 1895 * When using polling, we do not process the list to completion, 1896 * so when we get an RNR interrupt we must defer the restart 1897 * until we hit the last buffer with the C bit set. 1898 * If we run out of cycles and rfa_headm has the C bit set, 1899 * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so 1900 * that the info will be used in the subsequent polling cycle. 1901 */ 1902 for (;;) { 1903 rxp = sc->fxp_desc.rx_head; 1904 m = rxp->rx_mbuf; 1905 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + 1906 RFA_ALIGNMENT_FUDGE); 1907 bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, 1908 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1909 1910#ifdef DEVICE_POLLING /* loop at most count times if count >=0 */ 1911 if (count >= 0 && count-- == 0) { 1912 if (rnr) { 1913 /* Defer RNR processing until the next time. */ 1914 sc->flags |= FXP_FLAG_DEFERRED_RNR; 1915 rnr = 0; 1916 } 1917 break; 1918 } 1919#endif /* DEVICE_POLLING */ 1920 1921 status = le16toh(rfa->rfa_status); 1922 if ((status & FXP_RFA_STATUS_C) == 0) 1923 break; 1924 1925 if ((status & FXP_RFA_STATUS_RNR) != 0) 1926 rnr++; 1927 /* 1928 * Advance head forward. 1929 */ 1930 sc->fxp_desc.rx_head = rxp->rx_next; 1931 1932 /* 1933 * Add a new buffer to the receive chain. 1934 * If this fails, the old buffer is recycled 1935 * instead. 1936 */ 1937 if (fxp_new_rfabuf(sc, rxp) == 0) { 1938 int total_len; 1939 1940 /* 1941 * Fetch packet length (the top 2 bits of 1942 * actual_size are flags set by the controller 1943 * upon completion), and drop the packet in case 1944 * of bogus length or CRC errors. 1945 */ 1946 total_len = le16toh(rfa->actual_size) & 0x3fff; 1947 if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 && 1948 (ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1949 /* Adjust for appended checksum bytes. */ 1950 total_len -= 2; 1951 } 1952 if (total_len < sizeof(struct ether_header) || 1953 total_len > (MCLBYTES - RFA_ALIGNMENT_FUDGE - 1954 sc->rfa_size) || 1955 status & (FXP_RFA_STATUS_CRC | 1956 FXP_RFA_STATUS_ALIGN)) { 1957 m_freem(m); 1958 fxp_add_rfabuf(sc, rxp); 1959 continue; 1960 } 1961 1962 m->m_pkthdr.len = m->m_len = total_len; 1963 m->m_pkthdr.rcvif = ifp; 1964 1965 /* Do IP checksum checking. */ 1966 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 1967 fxp_rxcsum(sc, ifp, m, status, total_len); 1968 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && 1969 (status & FXP_RFA_STATUS_VLAN) != 0) { 1970 m->m_pkthdr.ether_vtag = 1971 ntohs(rfa->rfax_vlan_id); 1972 m->m_flags |= M_VLANTAG; 1973 } 1974 /* 1975 * Drop locks before calling if_input() since it 1976 * may re-enter fxp_start() in the netisr case. 1977 * This would result in a lock reversal. Better 1978 * performance might be obtained by chaining all 1979 * packets received, dropping the lock, and then 1980 * calling if_input() on each one. 1981 */ 1982 FXP_UNLOCK(sc); 1983 (*ifp->if_input)(ifp, m); 1984 FXP_LOCK(sc); 1985 rx_npkts++; 1986 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1987 return (rx_npkts); 1988 } else { 1989 /* Reuse RFA and loaded DMA map. */ 1990 ifp->if_iqdrops++; 1991 fxp_discard_rfabuf(sc, rxp); 1992 } 1993 fxp_add_rfabuf(sc, rxp); 1994 } 1995 if (rnr) { 1996 fxp_scb_wait(sc); 1997 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1998 sc->fxp_desc.rx_head->rx_addr); 1999 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); 2000 } 2001 return (rx_npkts); 2002} 2003 2004static void 2005fxp_update_stats(struct fxp_softc *sc) 2006{ 2007 struct ifnet *ifp = sc->ifp; 2008 struct fxp_stats *sp = sc->fxp_stats; 2009 struct fxp_hwstats *hsp; 2010 uint32_t *status; 2011 2012 FXP_LOCK_ASSERT(sc, MA_OWNED); 2013 2014 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, 2015 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2016 /* Update statistical counters. */ 2017 if (sc->revision >= FXP_REV_82559_A0) 2018 status = &sp->completion_status; 2019 else if (sc->revision >= FXP_REV_82558_A4) 2020 status = (uint32_t *)&sp->tx_tco; 2021 else 2022 status = &sp->tx_pause; 2023 if (*status == htole32(FXP_STATS_DR_COMPLETE)) { 2024 hsp = &sc->fxp_hwstats; 2025 hsp->tx_good += le32toh(sp->tx_good); 2026 hsp->tx_maxcols += le32toh(sp->tx_maxcols); 2027 hsp->tx_latecols += le32toh(sp->tx_latecols); 2028 hsp->tx_underruns += le32toh(sp->tx_underruns); 2029 hsp->tx_lostcrs += le32toh(sp->tx_lostcrs); 2030 hsp->tx_deffered += le32toh(sp->tx_deffered); 2031 hsp->tx_single_collisions += le32toh(sp->tx_single_collisions); 2032 hsp->tx_multiple_collisions += 2033 le32toh(sp->tx_multiple_collisions); 2034 hsp->tx_total_collisions += le32toh(sp->tx_total_collisions); 2035 hsp->rx_good += le32toh(sp->rx_good); 2036 hsp->rx_crc_errors += le32toh(sp->rx_crc_errors); 2037 hsp->rx_alignment_errors += le32toh(sp->rx_alignment_errors); 2038 hsp->rx_rnr_errors += le32toh(sp->rx_rnr_errors); 2039 hsp->rx_overrun_errors += le32toh(sp->rx_overrun_errors); 2040 hsp->rx_cdt_errors += le32toh(sp->rx_cdt_errors); 2041 hsp->rx_shortframes += le32toh(sp->rx_shortframes); 2042 hsp->tx_pause += le32toh(sp->tx_pause); 2043 hsp->rx_pause += le32toh(sp->rx_pause); 2044 hsp->rx_controls += le32toh(sp->rx_controls); 2045 hsp->tx_tco += le16toh(sp->tx_tco); 2046 hsp->rx_tco += le16toh(sp->rx_tco); 2047 2048 ifp->if_opackets += le32toh(sp->tx_good); 2049 ifp->if_collisions += le32toh(sp->tx_total_collisions); 2050 if (sp->rx_good) { 2051 ifp->if_ipackets += le32toh(sp->rx_good); 2052 sc->rx_idle_secs = 0; 2053 } else if (sc->flags & FXP_FLAG_RXBUG) { 2054 /* 2055 * Receiver's been idle for another second. 2056 */ 2057 sc->rx_idle_secs++; 2058 } 2059 ifp->if_ierrors += 2060 le32toh(sp->rx_crc_errors) + 2061 le32toh(sp->rx_alignment_errors) + 2062 le32toh(sp->rx_rnr_errors) + 2063 le32toh(sp->rx_overrun_errors); 2064 /* 2065 * If any transmit underruns occured, bump up the transmit 2066 * threshold by another 512 bytes (64 * 8). 2067 */ 2068 if (sp->tx_underruns) { 2069 ifp->if_oerrors += le32toh(sp->tx_underruns); 2070 if (tx_threshold < 192) 2071 tx_threshold += 64; 2072 } 2073 *status = 0; 2074 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, 2075 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2076 } 2077} 2078 2079/* 2080 * Update packet in/out/collision statistics. The i82557 doesn't 2081 * allow you to access these counters without doing a fairly 2082 * expensive DMA to get _all_ of the statistics it maintains, so 2083 * we do this operation here only once per second. The statistics 2084 * counters in the kernel are updated from the previous dump-stats 2085 * DMA and then a new dump-stats DMA is started. The on-chip 2086 * counters are zeroed when the DMA completes. If we can't start 2087 * the DMA immediately, we don't wait - we just prepare to read 2088 * them again next time. 2089 */ 2090static void 2091fxp_tick(void *xsc) 2092{ 2093 struct fxp_softc *sc = xsc; 2094 struct ifnet *ifp = sc->ifp; 2095 2096 FXP_LOCK_ASSERT(sc, MA_OWNED); 2097 2098 /* Update statistical counters. */ 2099 fxp_update_stats(sc); 2100 2101 /* 2102 * Release any xmit buffers that have completed DMA. This isn't 2103 * strictly necessary to do here, but it's advantagous for mbufs 2104 * with external storage to be released in a timely manner rather 2105 * than being defered for a potentially long time. This limits 2106 * the delay to a maximum of one second. 2107 */ 2108 fxp_txeof(sc); 2109 2110 /* 2111 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, 2112 * then assume the receiver has locked up and attempt to clear 2113 * the condition by reprogramming the multicast filter. This is 2114 * a work-around for a bug in the 82557 where the receiver locks 2115 * up if it gets certain types of garbage in the syncronization 2116 * bits prior to the packet header. This bug is supposed to only 2117 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 2118 * mode as well (perhaps due to a 10/100 speed transition). 2119 */ 2120 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 2121 sc->rx_idle_secs = 0; 2122 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2123 fxp_init_body(sc); 2124 return; 2125 } 2126 /* 2127 * If there is no pending command, start another stats 2128 * dump. Otherwise punt for now. 2129 */ 2130 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { 2131 /* 2132 * Start another stats dump. 2133 */ 2134 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); 2135 } 2136 if (sc->miibus != NULL) 2137 mii_tick(device_get_softc(sc->miibus)); 2138 2139 /* 2140 * Check that chip hasn't hung. 2141 */ 2142 fxp_watchdog(sc); 2143 2144 /* 2145 * Schedule another timeout one second from now. 2146 */ 2147 callout_reset(&sc->stat_ch, hz, fxp_tick, sc); 2148} 2149 2150/* 2151 * Stop the interface. Cancels the statistics updater and resets 2152 * the interface. 2153 */ 2154static void 2155fxp_stop(struct fxp_softc *sc) 2156{ 2157 struct ifnet *ifp = sc->ifp; 2158 struct fxp_tx *txp; 2159 int i; 2160 2161 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2162 sc->watchdog_timer = 0; 2163 2164 /* 2165 * Cancel stats updater. 2166 */ 2167 callout_stop(&sc->stat_ch); 2168 2169 /* 2170 * Preserve PCI configuration, configure, IA/multicast 2171 * setup and put RU and CU into idle state. 2172 */ 2173 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 2174 DELAY(50); 2175 /* Disable interrupts. */ 2176 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); 2177 2178 fxp_update_stats(sc); 2179 2180 /* 2181 * Release any xmit buffers. 2182 */ 2183 txp = sc->fxp_desc.tx_list; 2184 if (txp != NULL) { 2185 for (i = 0; i < FXP_NTXCB; i++) { 2186 if (txp[i].tx_mbuf != NULL) { 2187 bus_dmamap_sync(sc->fxp_txmtag, txp[i].tx_map, 2188 BUS_DMASYNC_POSTWRITE); 2189 bus_dmamap_unload(sc->fxp_txmtag, 2190 txp[i].tx_map); 2191 m_freem(txp[i].tx_mbuf); 2192 txp[i].tx_mbuf = NULL; 2193 /* clear this to reset csum offload bits */ 2194 txp[i].tx_cb->tbd[0].tb_addr = 0; 2195 } 2196 } 2197 } 2198 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, 2199 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2200 sc->tx_queued = 0; 2201} 2202 2203/* 2204 * Watchdog/transmission transmit timeout handler. Called when a 2205 * transmission is started on the interface, but no interrupt is 2206 * received before the timeout. This usually indicates that the 2207 * card has wedged for some reason. 2208 */ 2209static void 2210fxp_watchdog(struct fxp_softc *sc) 2211{ 2212 2213 FXP_LOCK_ASSERT(sc, MA_OWNED); 2214 2215 if (sc->watchdog_timer == 0 || --sc->watchdog_timer) 2216 return; 2217 2218 device_printf(sc->dev, "device timeout\n"); 2219 sc->ifp->if_oerrors++; 2220 2221 fxp_init_body(sc); 2222} 2223 2224/* 2225 * Acquire locks and then call the real initialization function. This 2226 * is necessary because ether_ioctl() calls if_init() and this would 2227 * result in mutex recursion if the mutex was held. 2228 */ 2229static void 2230fxp_init(void *xsc) 2231{ 2232 struct fxp_softc *sc = xsc; 2233 2234 FXP_LOCK(sc); 2235 fxp_init_body(sc); 2236 FXP_UNLOCK(sc); 2237} 2238 2239/* 2240 * Perform device initialization. This routine must be called with the 2241 * softc lock held. 2242 */ 2243static void 2244fxp_init_body(struct fxp_softc *sc) 2245{ 2246 struct ifnet *ifp = sc->ifp; 2247 struct fxp_cb_config *cbp; 2248 struct fxp_cb_ias *cb_ias; 2249 struct fxp_cb_tx *tcbp; 2250 struct fxp_tx *txp; 2251 int i, prm; 2252 2253 FXP_LOCK_ASSERT(sc, MA_OWNED); 2254 /* 2255 * Cancel any pending I/O 2256 */ 2257 fxp_stop(sc); 2258 2259 /* 2260 * Issue software reset, which also unloads the microcode. 2261 */ 2262 sc->flags &= ~FXP_FLAG_UCODE; 2263 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); 2264 DELAY(50); 2265 2266 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 2267 2268 /* 2269 * Initialize base of CBL and RFA memory. Loading with zero 2270 * sets it up for regular linear addressing. 2271 */ 2272 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 2273 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); 2274 2275 fxp_scb_wait(sc); 2276 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); 2277 2278 /* 2279 * Initialize base of dump-stats buffer. 2280 */ 2281 fxp_scb_wait(sc); 2282 bzero(sc->fxp_stats, sizeof(struct fxp_stats)); 2283 bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, 2284 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2285 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr); 2286 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); 2287 2288 /* 2289 * Attempt to load microcode if requested. 2290 * For ICH based controllers do not load microcode. 2291 */ 2292 if (sc->ident->ich == 0) { 2293 if (ifp->if_flags & IFF_LINK0 && 2294 (sc->flags & FXP_FLAG_UCODE) == 0) 2295 fxp_load_ucode(sc); 2296 } 2297 2298 /* 2299 * Set IFF_ALLMULTI status. It's needed in configure action 2300 * command. 2301 */ 2302 fxp_mc_addrs(sc); 2303 2304 /* 2305 * We temporarily use memory that contains the TxCB list to 2306 * construct the config CB. The TxCB list memory is rebuilt 2307 * later. 2308 */ 2309 cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list; 2310 2311 /* 2312 * This bcopy is kind of disgusting, but there are a bunch of must be 2313 * zero and must be one bits in this structure and this is the easiest 2314 * way to initialize them all to proper values. 2315 */ 2316 bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template)); 2317 2318 cbp->cb_status = 0; 2319 cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | 2320 FXP_CB_COMMAND_EL); 2321 cbp->link_addr = 0xffffffff; /* (no) next command */ 2322 cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22; 2323 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 2324 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 2325 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 2326 cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0; 2327 cbp->type_enable = 0; /* actually reserved */ 2328 cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0; 2329 cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0; 2330 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 2331 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 2332 cbp->dma_mbce = 0; /* (disable) dma max counters */ 2333 cbp->late_scb = 0; /* (don't) defer SCB update */ 2334 cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */ 2335 cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */ 2336 cbp->ci_int = 1; /* interrupt on CU idle */ 2337 cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1; 2338 cbp->ext_stats_dis = 1; /* disable extended counters */ 2339 cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */ 2340 cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm; 2341 cbp->disc_short_rx = !prm; /* discard short packets */ 2342 cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */ 2343 cbp->two_frames = 0; /* do not limit FIFO to 2 frames */ 2344 cbp->dyn_tbd = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; 2345 cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; 2346 cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1; 2347 cbp->csma_dis = 0; /* (don't) disable link */ 2348 cbp->tcp_udp_cksum = ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 && 2349 (ifp->if_capenable & IFCAP_RXCSUM) != 0) ? 1 : 0; 2350 cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */ 2351 cbp->link_wake_en = 0; /* (don't) assert PME# on link change */ 2352 cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */ 2353 cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */ 2354 cbp->nsai = 1; /* (don't) disable source addr insert */ 2355 cbp->preamble_length = 2; /* (7 byte) preamble */ 2356 cbp->loopback = 0; /* (don't) loopback */ 2357 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 2358 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 2359 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 2360 cbp->promiscuous = prm; /* promiscuous mode */ 2361 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 2362 cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/ 2363 cbp->ignore_ul = 0; /* consider U/L bit in IA matching */ 2364 cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */ 2365 cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0; 2366 2367 cbp->stripping = !prm; /* truncate rx packet to byte count */ 2368 cbp->padding = 1; /* (do) pad short tx packets */ 2369 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 2370 cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0; 2371 cbp->ia_wake_en = 0; /* (don't) wake up on address match */ 2372 cbp->magic_pkt_dis = sc->flags & FXP_FLAG_WOL ? 0 : 1; 2373 cbp->force_fdx = 0; /* (don't) force full duplex */ 2374 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 2375 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 2376 cbp->mc_all = ifp->if_flags & IFF_ALLMULTI ? 1 : prm; 2377 cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; 2378 cbp->vlan_strip_en = ((sc->flags & FXP_FLAG_EXT_RFA) != 0 && 2379 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) ? 1 : 0; 2380 2381 if (sc->tunable_noflow || sc->revision == FXP_REV_82557) { 2382 /* 2383 * The 82557 has no hardware flow control, the values 2384 * below are the defaults for the chip. 2385 */ 2386 cbp->fc_delay_lsb = 0; 2387 cbp->fc_delay_msb = 0x40; 2388 cbp->pri_fc_thresh = 3; 2389 cbp->tx_fc_dis = 0; 2390 cbp->rx_fc_restop = 0; 2391 cbp->rx_fc_restart = 0; 2392 cbp->fc_filter = 0; 2393 cbp->pri_fc_loc = 1; 2394 } else { 2395 cbp->fc_delay_lsb = 0x1f; 2396 cbp->fc_delay_msb = 0x01; 2397 cbp->pri_fc_thresh = 3; 2398 cbp->tx_fc_dis = 0; /* enable transmit FC */ 2399 cbp->rx_fc_restop = 1; /* enable FC restop frames */ 2400 cbp->rx_fc_restart = 1; /* enable FC restart frames */ 2401 cbp->fc_filter = !prm; /* drop FC frames to host */ 2402 cbp->pri_fc_loc = 1; /* FC pri location (byte31) */ 2403 } 2404 2405 /* Enable 82558 and 82559 extended statistics functionality. */ 2406 if (sc->revision >= FXP_REV_82558_A4) { 2407 if (sc->revision >= FXP_REV_82559_A0) { 2408 /* 2409 * Extend configuration table size to 32 2410 * to include TCO configuration. 2411 */ 2412 cbp->byte_count = 32; 2413 cbp->ext_stats_dis = 1; 2414 /* Enable TCO stats. */ 2415 cbp->tno_int_or_tco_en = 1; 2416 cbp->gamla_rx = 1; 2417 } else 2418 cbp->ext_stats_dis = 0; 2419 } 2420 2421 /* 2422 * Start the config command/DMA. 2423 */ 2424 fxp_scb_wait(sc); 2425 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, 2426 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2427 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); 2428 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 2429 /* ...and wait for it to complete. */ 2430 fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); 2431 2432 /* 2433 * Now initialize the station address. Temporarily use the TxCB 2434 * memory area like we did above for the config CB. 2435 */ 2436 cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list; 2437 cb_ias->cb_status = 0; 2438 cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); 2439 cb_ias->link_addr = 0xffffffff; 2440 bcopy(IF_LLADDR(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN); 2441 2442 /* 2443 * Start the IAS (Individual Address Setup) command/DMA. 2444 */ 2445 fxp_scb_wait(sc); 2446 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, 2447 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2448 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); 2449 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 2450 /* ...and wait for it to complete. */ 2451 fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map); 2452 2453 /* 2454 * Initialize the multicast address list. 2455 */ 2456 fxp_mc_setup(sc); 2457 2458 /* 2459 * Initialize transmit control block (TxCB) list. 2460 */ 2461 txp = sc->fxp_desc.tx_list; 2462 tcbp = sc->fxp_desc.cbl_list; 2463 bzero(tcbp, FXP_TXCB_SZ); 2464 for (i = 0; i < FXP_NTXCB; i++) { 2465 txp[i].tx_mbuf = NULL; 2466 tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK); 2467 tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); 2468 tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr + 2469 (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx))); 2470 if (sc->flags & FXP_FLAG_EXT_TXCB) 2471 tcbp[i].tbd_array_addr = 2472 htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2])); 2473 else 2474 tcbp[i].tbd_array_addr = 2475 htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0])); 2476 txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK]; 2477 } 2478 /* 2479 * Set the suspend flag on the first TxCB and start the control 2480 * unit. It will execute the NOP and then suspend. 2481 */ 2482 tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); 2483 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, 2484 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2485 sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; 2486 sc->tx_queued = 1; 2487 2488 fxp_scb_wait(sc); 2489 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); 2490 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 2491 2492 /* 2493 * Initialize receiver buffer area - RFA. 2494 */ 2495 fxp_scb_wait(sc); 2496 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); 2497 fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); 2498 2499 /* 2500 * Set current media. 2501 */ 2502 if (sc->miibus != NULL) 2503 mii_mediachg(device_get_softc(sc->miibus)); 2504 2505 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2506 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2507 2508 /* 2509 * Enable interrupts. 2510 */ 2511#ifdef DEVICE_POLLING 2512 /* 2513 * ... but only do that if we are not polling. And because (presumably) 2514 * the default is interrupts on, we need to disable them explicitly! 2515 */ 2516 if (ifp->if_capenable & IFCAP_POLLING ) 2517 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); 2518 else 2519#endif /* DEVICE_POLLING */ 2520 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); 2521 2522 /* 2523 * Start stats updater. 2524 */ 2525 callout_reset(&sc->stat_ch, hz, fxp_tick, sc); 2526} 2527 2528static int 2529fxp_serial_ifmedia_upd(struct ifnet *ifp) 2530{ 2531 2532 return (0); 2533} 2534 2535static void 2536fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2537{ 2538 2539 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; 2540} 2541 2542/* 2543 * Change media according to request. 2544 */ 2545static int 2546fxp_ifmedia_upd(struct ifnet *ifp) 2547{ 2548 struct fxp_softc *sc = ifp->if_softc; 2549 struct mii_data *mii; 2550 2551 mii = device_get_softc(sc->miibus); 2552 FXP_LOCK(sc); 2553 if (mii->mii_instance) { 2554 struct mii_softc *miisc; 2555 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2556 mii_phy_reset(miisc); 2557 } 2558 mii_mediachg(mii); 2559 FXP_UNLOCK(sc); 2560 return (0); 2561} 2562 2563/* 2564 * Notify the world which media we're using. 2565 */ 2566static void 2567fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2568{ 2569 struct fxp_softc *sc = ifp->if_softc; 2570 struct mii_data *mii; 2571 2572 mii = device_get_softc(sc->miibus); 2573 FXP_LOCK(sc); 2574 mii_pollstat(mii); 2575 ifmr->ifm_active = mii->mii_media_active; 2576 ifmr->ifm_status = mii->mii_media_status; 2577 2578 if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_10_T && 2579 sc->flags & FXP_FLAG_CU_RESUME_BUG) 2580 sc->cu_resume_bug = 1; 2581 else 2582 sc->cu_resume_bug = 0; 2583 FXP_UNLOCK(sc); 2584} 2585 2586/* 2587 * Add a buffer to the end of the RFA buffer list. 2588 * Return 0 if successful, 1 for failure. A failure results in 2589 * reusing the RFA buffer. 2590 * The RFA struct is stuck at the beginning of mbuf cluster and the 2591 * data pointer is fixed up to point just past it. 2592 */ 2593static int 2594fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) 2595{ 2596 struct mbuf *m; 2597 struct fxp_rfa *rfa; 2598 bus_dmamap_t tmp_map; 2599 int error; 2600 2601 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2602 if (m == NULL) 2603 return (ENOBUFS); 2604 2605 /* 2606 * Move the data pointer up so that the incoming data packet 2607 * will be 32-bit aligned. 2608 */ 2609 m->m_data += RFA_ALIGNMENT_FUDGE; 2610 2611 /* 2612 * Get a pointer to the base of the mbuf cluster and move 2613 * data start past it. 2614 */ 2615 rfa = mtod(m, struct fxp_rfa *); 2616 m->m_data += sc->rfa_size; 2617 rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE); 2618 2619 rfa->rfa_status = 0; 2620 rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL); 2621 rfa->actual_size = 0; 2622 m->m_len = m->m_pkthdr.len = MCLBYTES - RFA_ALIGNMENT_FUDGE - 2623 sc->rfa_size; 2624 2625 /* 2626 * Initialize the rest of the RFA. Note that since the RFA 2627 * is misaligned, we cannot store values directly. We're thus 2628 * using the le32enc() function which handles endianness and 2629 * is also alignment-safe. 2630 */ 2631 le32enc(&rfa->link_addr, 0xffffffff); 2632 le32enc(&rfa->rbd_addr, 0xffffffff); 2633 2634 /* Map the RFA into DMA memory. */ 2635 error = bus_dmamap_load(sc->fxp_rxmtag, sc->spare_map, rfa, 2636 MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr, 2637 &rxp->rx_addr, BUS_DMA_NOWAIT); 2638 if (error) { 2639 m_freem(m); 2640 return (error); 2641 } 2642 2643 if (rxp->rx_mbuf != NULL) 2644 bus_dmamap_unload(sc->fxp_rxmtag, rxp->rx_map); 2645 tmp_map = sc->spare_map; 2646 sc->spare_map = rxp->rx_map; 2647 rxp->rx_map = tmp_map; 2648 rxp->rx_mbuf = m; 2649 2650 bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, 2651 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2652 return (0); 2653} 2654 2655static void 2656fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) 2657{ 2658 struct fxp_rfa *p_rfa; 2659 struct fxp_rx *p_rx; 2660 2661 /* 2662 * If there are other buffers already on the list, attach this 2663 * one to the end by fixing up the tail to point to this one. 2664 */ 2665 if (sc->fxp_desc.rx_head != NULL) { 2666 p_rx = sc->fxp_desc.rx_tail; 2667 p_rfa = (struct fxp_rfa *) 2668 (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); 2669 p_rx->rx_next = rxp; 2670 le32enc(&p_rfa->link_addr, rxp->rx_addr); 2671 p_rfa->rfa_control = 0; 2672 bus_dmamap_sync(sc->fxp_rxmtag, p_rx->rx_map, 2673 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2674 } else { 2675 rxp->rx_next = NULL; 2676 sc->fxp_desc.rx_head = rxp; 2677 } 2678 sc->fxp_desc.rx_tail = rxp; 2679} 2680 2681static void 2682fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) 2683{ 2684 struct mbuf *m; 2685 struct fxp_rfa *rfa; 2686 2687 m = rxp->rx_mbuf; 2688 m->m_data = m->m_ext.ext_buf; 2689 /* 2690 * Move the data pointer up so that the incoming data packet 2691 * will be 32-bit aligned. 2692 */ 2693 m->m_data += RFA_ALIGNMENT_FUDGE; 2694 2695 /* 2696 * Get a pointer to the base of the mbuf cluster and move 2697 * data start past it. 2698 */ 2699 rfa = mtod(m, struct fxp_rfa *); 2700 m->m_data += sc->rfa_size; 2701 rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE); 2702 2703 rfa->rfa_status = 0; 2704 rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL); 2705 rfa->actual_size = 0; 2706 2707 /* 2708 * Initialize the rest of the RFA. Note that since the RFA 2709 * is misaligned, we cannot store values directly. We're thus 2710 * using the le32enc() function which handles endianness and 2711 * is also alignment-safe. 2712 */ 2713 le32enc(&rfa->link_addr, 0xffffffff); 2714 le32enc(&rfa->rbd_addr, 0xffffffff); 2715 2716 bus_dmamap_sync(sc->fxp_rxmtag, rxp->rx_map, 2717 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2718} 2719 2720static int 2721fxp_miibus_readreg(device_t dev, int phy, int reg) 2722{ 2723 struct fxp_softc *sc = device_get_softc(dev); 2724 int count = 10000; 2725 int value; 2726 2727 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 2728 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 2729 2730 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 2731 && count--) 2732 DELAY(10); 2733 2734 if (count <= 0) 2735 device_printf(dev, "fxp_miibus_readreg: timed out\n"); 2736 2737 return (value & 0xffff); 2738} 2739 2740static int 2741fxp_miibus_writereg(device_t dev, int phy, int reg, int value) 2742{ 2743 struct fxp_softc *sc = device_get_softc(dev); 2744 int count = 10000; 2745 2746 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 2747 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 2748 (value & 0xffff)); 2749 2750 while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 2751 count--) 2752 DELAY(10); 2753 2754 if (count <= 0) 2755 device_printf(dev, "fxp_miibus_writereg: timed out\n"); 2756 return (0); 2757} 2758 2759static int 2760fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2761{ 2762 struct fxp_softc *sc = ifp->if_softc; 2763 struct ifreq *ifr = (struct ifreq *)data; 2764 struct mii_data *mii; 2765 int flag, mask, error = 0, reinit; 2766 2767 switch (command) { 2768 case SIOCSIFFLAGS: 2769 FXP_LOCK(sc); 2770 /* 2771 * If interface is marked up and not running, then start it. 2772 * If it is marked down and running, stop it. 2773 * XXX If it's up then re-initialize it. This is so flags 2774 * such as IFF_PROMISC are handled. 2775 */ 2776 if (ifp->if_flags & IFF_UP) { 2777 if (((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) && 2778 ((ifp->if_flags ^ sc->if_flags) & 2779 (IFF_PROMISC | IFF_ALLMULTI | IFF_LINK0)) != 0) 2780 fxp_init_body(sc); 2781 else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 2782 fxp_init_body(sc); 2783 } else { 2784 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2785 fxp_stop(sc); 2786 } 2787 sc->if_flags = ifp->if_flags; 2788 FXP_UNLOCK(sc); 2789 break; 2790 2791 case SIOCADDMULTI: 2792 case SIOCDELMULTI: 2793 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) 2794 fxp_init(sc); 2795 break; 2796 2797 case SIOCSIFMEDIA: 2798 case SIOCGIFMEDIA: 2799 if (sc->miibus != NULL) { 2800 mii = device_get_softc(sc->miibus); 2801 error = ifmedia_ioctl(ifp, ifr, 2802 &mii->mii_media, command); 2803 } else { 2804 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); 2805 } 2806 break; 2807 2808 case SIOCSIFCAP: 2809 reinit = 0; 2810 mask = ifp->if_capenable ^ ifr->ifr_reqcap; 2811#ifdef DEVICE_POLLING 2812 if (mask & IFCAP_POLLING) { 2813 if (ifr->ifr_reqcap & IFCAP_POLLING) { 2814 error = ether_poll_register(fxp_poll, ifp); 2815 if (error) 2816 return(error); 2817 FXP_LOCK(sc); 2818 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 2819 FXP_SCB_INTR_DISABLE); 2820 ifp->if_capenable |= IFCAP_POLLING; 2821 FXP_UNLOCK(sc); 2822 } else { 2823 error = ether_poll_deregister(ifp); 2824 /* Enable interrupts in any case */ 2825 FXP_LOCK(sc); 2826 CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); 2827 ifp->if_capenable &= ~IFCAP_POLLING; 2828 FXP_UNLOCK(sc); 2829 } 2830 } 2831#endif 2832 FXP_LOCK(sc); 2833 if ((mask & IFCAP_TXCSUM) != 0 && 2834 (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { 2835 ifp->if_capenable ^= IFCAP_TXCSUM; 2836 if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) 2837 ifp->if_hwassist |= FXP_CSUM_FEATURES; 2838 else 2839 ifp->if_hwassist &= ~FXP_CSUM_FEATURES; 2840 } 2841 if ((mask & IFCAP_RXCSUM) != 0 && 2842 (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { 2843 ifp->if_capenable ^= IFCAP_RXCSUM; 2844 if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0) 2845 reinit++; 2846 } 2847 if ((mask & IFCAP_TSO4) != 0 && 2848 (ifp->if_capabilities & IFCAP_TSO4) != 0) { 2849 ifp->if_capenable ^= IFCAP_TSO4; 2850 if ((ifp->if_capenable & IFCAP_TSO4) != 0) 2851 ifp->if_hwassist |= CSUM_TSO; 2852 else 2853 ifp->if_hwassist &= ~CSUM_TSO; 2854 } 2855 if ((mask & IFCAP_WOL_MAGIC) != 0 && 2856 (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) 2857 ifp->if_capenable ^= IFCAP_WOL_MAGIC; 2858 if ((mask & IFCAP_VLAN_MTU) != 0 && 2859 (ifp->if_capabilities & IFCAP_VLAN_MTU) != 0) { 2860 ifp->if_capenable ^= IFCAP_VLAN_MTU; 2861 if (sc->revision != FXP_REV_82557) 2862 flag = FXP_FLAG_LONG_PKT_EN; 2863 else /* a hack to get long frames on the old chip */ 2864 flag = FXP_FLAG_SAVE_BAD; 2865 sc->flags ^= flag; 2866 if (ifp->if_flags & IFF_UP) 2867 reinit++; 2868 } 2869 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2870 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2871 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2872 if ((mask & IFCAP_VLAN_HWTSO) != 0 && 2873 (ifp->if_capabilities & IFCAP_VLAN_HWTSO) != 0) 2874 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2875 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2876 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2877 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2878 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2879 ifp->if_capenable &= 2880 ~(IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWCSUM); 2881 reinit++; 2882 } 2883 if (reinit > 0 && ifp->if_flags & IFF_UP) 2884 fxp_init_body(sc); 2885 FXP_UNLOCK(sc); 2886 VLAN_CAPABILITIES(ifp); 2887 break; 2888 2889 default: 2890 error = ether_ioctl(ifp, command, data); 2891 } 2892 return (error); 2893} 2894 2895/* 2896 * Fill in the multicast address list and return number of entries. 2897 */ 2898static int 2899fxp_mc_addrs(struct fxp_softc *sc) 2900{ 2901 struct fxp_cb_mcs *mcsp = sc->mcsp; 2902 struct ifnet *ifp = sc->ifp; 2903 struct ifmultiaddr *ifma; 2904 int nmcasts; 2905 2906 nmcasts = 0; 2907 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2908 if_maddr_rlock(ifp); 2909 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2910 if (ifma->ifma_addr->sa_family != AF_LINK) 2911 continue; 2912 if (nmcasts >= MAXMCADDR) { 2913 ifp->if_flags |= IFF_ALLMULTI; 2914 nmcasts = 0; 2915 break; 2916 } 2917 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2918 &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); 2919 nmcasts++; 2920 } 2921 if_maddr_runlock(ifp); 2922 } 2923 mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); 2924 return (nmcasts); 2925} 2926 2927/* 2928 * Program the multicast filter. 2929 * 2930 * We have an artificial restriction that the multicast setup command 2931 * must be the first command in the chain, so we take steps to ensure 2932 * this. By requiring this, it allows us to keep up the performance of 2933 * the pre-initialized command ring (esp. link pointers) by not actually 2934 * inserting the mcsetup command in the ring - i.e. its link pointer 2935 * points to the TxCB ring, but the mcsetup descriptor itself is not part 2936 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 2937 * lead into the regular TxCB ring when it completes. 2938 */ 2939static void 2940fxp_mc_setup(struct fxp_softc *sc) 2941{ 2942 struct fxp_cb_mcs *mcsp; 2943 int count; 2944 2945 FXP_LOCK_ASSERT(sc, MA_OWNED); 2946 2947 mcsp = sc->mcsp; 2948 mcsp->cb_status = 0; 2949 mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); 2950 mcsp->link_addr = 0xffffffff; 2951 fxp_mc_addrs(sc); 2952 2953 /* 2954 * Wait until command unit is idle. This should never be the 2955 * case when nothing is queued, but make sure anyway. 2956 */ 2957 count = 100; 2958 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) != 2959 FXP_SCB_CUS_IDLE && --count) 2960 DELAY(10); 2961 if (count == 0) { 2962 device_printf(sc->dev, "command queue timeout\n"); 2963 return; 2964 } 2965 2966 /* 2967 * Start the multicast setup command. 2968 */ 2969 fxp_scb_wait(sc); 2970 bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, 2971 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2972 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); 2973 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 2974 /* ...and wait for it to complete. */ 2975 fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map); 2976} 2977 2978static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE; 2979static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE; 2980static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE; 2981static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE; 2982static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE; 2983static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE; 2984static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE; 2985 2986#define UCODE(x) x, sizeof(x)/sizeof(uint32_t) 2987 2988struct ucode { 2989 uint32_t revision; 2990 uint32_t *ucode; 2991 int length; 2992 u_short int_delay_offset; 2993 u_short bundle_max_offset; 2994} ucode_table[] = { 2995 { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 }, 2996 { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 }, 2997 { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma), 2998 D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD }, 2999 { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s), 3000 D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD }, 3001 { FXP_REV_82550, UCODE(fxp_ucode_d102), 3002 D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD }, 3003 { FXP_REV_82550_C, UCODE(fxp_ucode_d102c), 3004 D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD }, 3005 { FXP_REV_82551_F, UCODE(fxp_ucode_d102e), 3006 D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD }, 3007 { 0, NULL, 0, 0, 0 } 3008}; 3009 3010static void 3011fxp_load_ucode(struct fxp_softc *sc) 3012{ 3013 struct ucode *uc; 3014 struct fxp_cb_ucode *cbp; 3015 int i; 3016 3017 for (uc = ucode_table; uc->ucode != NULL; uc++) 3018 if (sc->revision == uc->revision) 3019 break; 3020 if (uc->ucode == NULL) 3021 return; 3022 cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list; 3023 cbp->cb_status = 0; 3024 cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL); 3025 cbp->link_addr = 0xffffffff; /* (no) next command */ 3026 for (i = 0; i < uc->length; i++) 3027 cbp->ucode[i] = htole32(uc->ucode[i]); 3028 if (uc->int_delay_offset) 3029 *(uint16_t *)&cbp->ucode[uc->int_delay_offset] = 3030 htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2); 3031 if (uc->bundle_max_offset) 3032 *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] = 3033 htole16(sc->tunable_bundle_max); 3034 /* 3035 * Download the ucode to the chip. 3036 */ 3037 fxp_scb_wait(sc); 3038 bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, 3039 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3040 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); 3041 fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); 3042 /* ...and wait for it to complete. */ 3043 fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); 3044 device_printf(sc->dev, 3045 "Microcode loaded, int_delay: %d usec bundle_max: %d\n", 3046 sc->tunable_int_delay, 3047 uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max); 3048 sc->flags |= FXP_FLAG_UCODE; 3049} 3050 3051#define FXP_SYSCTL_STAT_ADD(c, h, n, p, d) \ 3052 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 3053 3054static void 3055fxp_sysctl_node(struct fxp_softc *sc) 3056{ 3057 struct sysctl_ctx_list *ctx; 3058 struct sysctl_oid_list *child, *parent; 3059 struct sysctl_oid *tree; 3060 struct fxp_hwstats *hsp; 3061 3062 ctx = device_get_sysctl_ctx(sc->dev); 3063 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); 3064 3065 SYSCTL_ADD_PROC(ctx, child, 3066 OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW, 3067 &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I", 3068 "FXP driver receive interrupt microcode bundling delay"); 3069 SYSCTL_ADD_PROC(ctx, child, 3070 OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW, 3071 &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I", 3072 "FXP driver receive interrupt microcode bundle size limit"); 3073 SYSCTL_ADD_INT(ctx, child,OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0, 3074 "FXP RNR events"); 3075 SYSCTL_ADD_INT(ctx, child, 3076 OID_AUTO, "noflow", CTLFLAG_RW, &sc->tunable_noflow, 0, 3077 "FXP flow control disabled"); 3078 3079 /* 3080 * Pull in device tunables. 3081 */ 3082 sc->tunable_int_delay = TUNABLE_INT_DELAY; 3083 sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX; 3084 sc->tunable_noflow = 1; 3085 (void) resource_int_value(device_get_name(sc->dev), 3086 device_get_unit(sc->dev), "int_delay", &sc->tunable_int_delay); 3087 (void) resource_int_value(device_get_name(sc->dev), 3088 device_get_unit(sc->dev), "bundle_max", &sc->tunable_bundle_max); 3089 (void) resource_int_value(device_get_name(sc->dev), 3090 device_get_unit(sc->dev), "noflow", &sc->tunable_noflow); 3091 sc->rnr = 0; 3092 3093 hsp = &sc->fxp_hwstats; 3094 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 3095 NULL, "FXP statistics"); 3096 parent = SYSCTL_CHILDREN(tree); 3097 3098 /* Rx MAC statistics. */ 3099 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 3100 NULL, "Rx MAC statistics"); 3101 child = SYSCTL_CHILDREN(tree); 3102 FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames", 3103 &hsp->rx_good, "Good frames"); 3104 FXP_SYSCTL_STAT_ADD(ctx, child, "crc_errors", 3105 &hsp->rx_crc_errors, "CRC errors"); 3106 FXP_SYSCTL_STAT_ADD(ctx, child, "alignment_errors", 3107 &hsp->rx_alignment_errors, "Alignment errors"); 3108 FXP_SYSCTL_STAT_ADD(ctx, child, "rnr_errors", 3109 &hsp->rx_rnr_errors, "RNR errors"); 3110 FXP_SYSCTL_STAT_ADD(ctx, child, "overrun_errors", 3111 &hsp->rx_overrun_errors, "Overrun errors"); 3112 FXP_SYSCTL_STAT_ADD(ctx, child, "cdt_errors", 3113 &hsp->rx_cdt_errors, "Collision detect errors"); 3114 FXP_SYSCTL_STAT_ADD(ctx, child, "shortframes", 3115 &hsp->rx_shortframes, "Short frame errors"); 3116 if (sc->revision >= FXP_REV_82558_A4) { 3117 FXP_SYSCTL_STAT_ADD(ctx, child, "pause", 3118 &hsp->rx_pause, "Pause frames"); 3119 FXP_SYSCTL_STAT_ADD(ctx, child, "controls", 3120 &hsp->rx_controls, "Unsupported control frames"); 3121 } 3122 if (sc->revision >= FXP_REV_82559_A0) 3123 FXP_SYSCTL_STAT_ADD(ctx, child, "tco", 3124 &hsp->rx_tco, "TCO frames"); 3125 3126 /* Tx MAC statistics. */ 3127 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 3128 NULL, "Tx MAC statistics"); 3129 child = SYSCTL_CHILDREN(tree); 3130 FXP_SYSCTL_STAT_ADD(ctx, child, "good_frames", 3131 &hsp->tx_good, "Good frames"); 3132 FXP_SYSCTL_STAT_ADD(ctx, child, "maxcols", 3133 &hsp->tx_maxcols, "Maximum collisions errors"); 3134 FXP_SYSCTL_STAT_ADD(ctx, child, "latecols", 3135 &hsp->tx_latecols, "Late collisions errors"); 3136 FXP_SYSCTL_STAT_ADD(ctx, child, "underruns", 3137 &hsp->tx_underruns, "Underrun errors"); 3138 FXP_SYSCTL_STAT_ADD(ctx, child, "lostcrs", 3139 &hsp->tx_lostcrs, "Lost carrier sense"); 3140 FXP_SYSCTL_STAT_ADD(ctx, child, "deffered", 3141 &hsp->tx_deffered, "Deferred"); 3142 FXP_SYSCTL_STAT_ADD(ctx, child, "single_collisions", 3143 &hsp->tx_single_collisions, "Single collisions"); 3144 FXP_SYSCTL_STAT_ADD(ctx, child, "multiple_collisions", 3145 &hsp->tx_multiple_collisions, "Multiple collisions"); 3146 FXP_SYSCTL_STAT_ADD(ctx, child, "total_collisions", 3147 &hsp->tx_total_collisions, "Total collisions"); 3148 if (sc->revision >= FXP_REV_82558_A4) 3149 FXP_SYSCTL_STAT_ADD(ctx, child, "pause", 3150 &hsp->tx_pause, "Pause frames"); 3151 if (sc->revision >= FXP_REV_82559_A0) 3152 FXP_SYSCTL_STAT_ADD(ctx, child, "tco", 3153 &hsp->tx_tco, "TCO frames"); 3154} 3155 3156#undef FXP_SYSCTL_STAT_ADD 3157 3158static int 3159sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 3160{ 3161 int error, value; 3162 3163 value = *(int *)arg1; 3164 error = sysctl_handle_int(oidp, &value, 0, req); 3165 if (error || !req->newptr) 3166 return (error); 3167 if (value < low || value > high) 3168 return (EINVAL); 3169 *(int *)arg1 = value; 3170 return (0); 3171} 3172 3173/* 3174 * Interrupt delay is expressed in microseconds, a multiplier is used 3175 * to convert this to the appropriate clock ticks before using. 3176 */ 3177static int 3178sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS) 3179{ 3180 return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000)); 3181} 3182 3183static int 3184sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS) 3185{ 3186 return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff)); 3187} 3188