if_fxp.c revision 65983
1/* 2 * Copyright (c) 1995, David Greenman 3 * All rights reserved. 4 * 5 * Modifications to support NetBSD and media selection: 6 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $FreeBSD: head/sys/dev/fxp/if_fxp.c 65983 2000-09-17 13:26:25Z cp $ 31 */ 32 33/* 34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 35 */ 36 37#include <sys/param.h> 38#include <sys/systm.h> 39#include <sys/mbuf.h> 40#include <sys/malloc.h> 41#include <sys/kernel.h> 42#include <sys/socket.h> 43 44#include <net/if.h> 45#include <net/if_dl.h> 46#include <net/if_media.h> 47 48#ifdef NS 49#include <netns/ns.h> 50#include <netns/ns_if.h> 51#endif 52 53#include <net/bpf.h> 54 55#if defined(__NetBSD__) 56 57#include <sys/ioctl.h> 58#include <sys/errno.h> 59#include <sys/device.h> 60 61#include <net/if_dl.h> 62#include <net/if_ether.h> 63 64#include <netinet/if_inarp.h> 65 66#include <vm/vm.h> 67 68#include <machine/cpu.h> 69#include <machine/bus.h> 70#include <machine/intr.h> 71 72#include <dev/pci/if_fxpreg.h> 73#include <dev/pci/if_fxpvar.h> 74 75#include <dev/pci/pcivar.h> 76#include <dev/pci/pcireg.h> 77#include <dev/pci/pcidevs.h> 78 79 80#else /* __FreeBSD__ */ 81 82#include <sys/sockio.h> 83#include <sys/bus.h> 84#include <machine/bus.h> 85#include <sys/rman.h> 86#include <machine/resource.h> 87#include <machine/mutex.h> 88 89#include <net/ethernet.h> 90#include <net/if_arp.h> 91 92#include <vm/vm.h> /* for vtophys */ 93#include <vm/pmap.h> /* for vtophys */ 94#include <machine/clock.h> /* for DELAY */ 95 96#include <pci/pcivar.h> 97#include <pci/pcireg.h> /* for PCIM_CMD_xxx */ 98#include <pci/if_fxpreg.h> 99#include <pci/if_fxpvar.h> 100 101#endif /* __NetBSD__ */ 102 103#ifdef __alpha__ /* XXX */ 104/* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */ 105#undef vtophys 106#define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va)) 107#endif /* __alpha__ */ 108 109/* 110 * NOTE! On the Alpha, we have an alignment constraint. The 111 * card DMAs the packet immediately following the RFA. However, 112 * the first thing in the packet is a 14-byte Ethernet header. 113 * This means that the packet is misaligned. To compensate, 114 * we actually offset the RFA 2 bytes into the cluster. This 115 * alignes the packet after the Ethernet header at a 32-bit 116 * boundary. HOWEVER! This means that the RFA is misaligned! 117 */ 118#define RFA_ALIGNMENT_FUDGE 2 119 120/* 121 * Inline function to copy a 16-bit aligned 32-bit quantity. 122 */ 123static __inline void fxp_lwcopy __P((volatile u_int32_t *, 124 volatile u_int32_t *)); 125static __inline void 126fxp_lwcopy(src, dst) 127 volatile u_int32_t *src, *dst; 128{ 129#ifdef __i386__ 130 *dst = *src; 131#else 132 volatile u_int16_t *a = (volatile u_int16_t *)src; 133 volatile u_int16_t *b = (volatile u_int16_t *)dst; 134 135 b[0] = a[0]; 136 b[1] = a[1]; 137#endif 138} 139 140/* 141 * Template for default configuration parameters. 142 * See struct fxp_cb_config for the bit definitions. 143 */ 144static u_char fxp_cb_config_template[] = { 145 0x0, 0x0, /* cb_status */ 146 0x80, 0x2, /* cb_command */ 147 0xff, 0xff, 0xff, 0xff, /* link_addr */ 148 0x16, /* 0 */ 149 0x8, /* 1 */ 150 0x0, /* 2 */ 151 0x0, /* 3 */ 152 0x0, /* 4 */ 153 0x80, /* 5 */ 154 0xb2, /* 6 */ 155 0x3, /* 7 */ 156 0x1, /* 8 */ 157 0x0, /* 9 */ 158 0x26, /* 10 */ 159 0x0, /* 11 */ 160 0x60, /* 12 */ 161 0x0, /* 13 */ 162 0xf2, /* 14 */ 163 0x48, /* 15 */ 164 0x0, /* 16 */ 165 0x40, /* 17 */ 166 0xf3, /* 18 */ 167 0x0, /* 19 */ 168 0x3f, /* 20 */ 169 0x5 /* 21 */ 170}; 171 172/* Supported media types. */ 173struct fxp_supported_media { 174 const int fsm_phy; /* PHY type */ 175 const int *fsm_media; /* the media array */ 176 const int fsm_nmedia; /* the number of supported media */ 177 const int fsm_defmedia; /* default media for this PHY */ 178}; 179 180static const int fxp_media_standard[] = { 181 IFM_ETHER|IFM_10_T, 182 IFM_ETHER|IFM_10_T|IFM_FDX, 183 IFM_ETHER|IFM_100_TX, 184 IFM_ETHER|IFM_100_TX|IFM_FDX, 185 IFM_ETHER|IFM_AUTO, 186}; 187#define FXP_MEDIA_STANDARD_DEFMEDIA (IFM_ETHER|IFM_AUTO) 188 189static const int fxp_media_default[] = { 190 IFM_ETHER|IFM_MANUAL, /* XXX IFM_AUTO ? */ 191}; 192#define FXP_MEDIA_DEFAULT_DEFMEDIA (IFM_ETHER|IFM_MANUAL) 193 194static const struct fxp_supported_media fxp_media[] = { 195 { FXP_PHY_DP83840, fxp_media_standard, 196 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 197 FXP_MEDIA_STANDARD_DEFMEDIA }, 198 { FXP_PHY_DP83840A, fxp_media_standard, 199 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 200 FXP_MEDIA_STANDARD_DEFMEDIA }, 201 { FXP_PHY_82553A, fxp_media_standard, 202 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 203 FXP_MEDIA_STANDARD_DEFMEDIA }, 204 { FXP_PHY_82553C, fxp_media_standard, 205 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 206 FXP_MEDIA_STANDARD_DEFMEDIA }, 207 { FXP_PHY_82555, fxp_media_standard, 208 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 209 FXP_MEDIA_STANDARD_DEFMEDIA }, 210 { FXP_PHY_82555B, fxp_media_standard, 211 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 212 FXP_MEDIA_STANDARD_DEFMEDIA }, 213 { FXP_PHY_80C24, fxp_media_default, 214 sizeof(fxp_media_default) / sizeof(fxp_media_default[0]), 215 FXP_MEDIA_DEFAULT_DEFMEDIA }, 216}; 217#define NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0])) 218 219static int fxp_mediachange __P((struct ifnet *)); 220static void fxp_mediastatus __P((struct ifnet *, struct ifmediareq *)); 221static void fxp_set_media __P((struct fxp_softc *, int)); 222static __inline void fxp_scb_wait __P((struct fxp_softc *)); 223static FXP_INTR_TYPE fxp_intr __P((void *)); 224static void fxp_start __P((struct ifnet *)); 225static int fxp_ioctl __P((struct ifnet *, 226 FXP_IOCTLCMD_TYPE, caddr_t)); 227static void fxp_init __P((void *)); 228static void fxp_stop __P((struct fxp_softc *)); 229static void fxp_watchdog __P((struct ifnet *)); 230static int fxp_add_rfabuf __P((struct fxp_softc *, struct mbuf *)); 231static int fxp_mdi_read __P((struct fxp_softc *, int, int)); 232static void fxp_mdi_write __P((struct fxp_softc *, int, int, int)); 233static void fxp_autosize_eeprom __P((struct fxp_softc *)); 234static void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, 235 int, int)); 236static int fxp_attach_common __P((struct fxp_softc *, u_int8_t *)); 237static void fxp_stats_update __P((void *)); 238static void fxp_mc_setup __P((struct fxp_softc *)); 239 240/* 241 * Set initial transmit threshold at 64 (512 bytes). This is 242 * increased by 64 (512 bytes) at a time, to maximum of 192 243 * (1536 bytes), if an underrun occurs. 244 */ 245static int tx_threshold = 64; 246 247/* 248 * Number of transmit control blocks. This determines the number 249 * of transmit buffers that can be chained in the CB list. 250 * This must be a power of two. 251 */ 252#define FXP_NTXCB 128 253 254/* 255 * Number of completed TX commands at which point an interrupt 256 * will be generated to garbage collect the attached buffers. 257 * Must be at least one less than FXP_NTXCB, and should be 258 * enough less so that the transmitter doesn't becomes idle 259 * during the buffer rundown (which would reduce performance). 260 */ 261#define FXP_CXINT_THRESH 120 262 263/* 264 * TxCB list index mask. This is used to do list wrap-around. 265 */ 266#define FXP_TXCB_MASK (FXP_NTXCB - 1) 267 268/* 269 * Number of receive frame area buffers. These are large so chose 270 * wisely. 271 */ 272#define FXP_NRFABUFS 64 273 274/* 275 * Maximum number of seconds that the receiver can be idle before we 276 * assume it's dead and attempt to reset it by reprogramming the 277 * multicast filter. This is part of a work-around for a bug in the 278 * NIC. See fxp_stats_update(). 279 */ 280#define FXP_MAX_RX_IDLE 15 281 282/* 283 * Wait for the previous command to be accepted (but not necessarily 284 * completed). 285 */ 286static __inline void 287fxp_scb_wait(sc) 288 struct fxp_softc *sc; 289{ 290 int i = 10000; 291 292 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i); 293} 294 295/************************************************************* 296 * Operating system-specific autoconfiguration glue 297 *************************************************************/ 298 299#if defined(__NetBSD__) 300 301#ifdef __BROKEN_INDIRECT_CONFIG 302static int fxp_match __P((struct device *, void *, void *)); 303#else 304static int fxp_match __P((struct device *, struct cfdata *, void *)); 305#endif 306static void fxp_attach __P((struct device *, struct device *, void *)); 307 308static void fxp_shutdown __P((void *)); 309 310/* Compensate for lack of a generic ether_ioctl() */ 311static int fxp_ether_ioctl __P((struct ifnet *, 312 FXP_IOCTLCMD_TYPE, caddr_t)); 313#define ether_ioctl fxp_ether_ioctl 314 315struct cfattach fxp_ca = { 316 sizeof(struct fxp_softc), fxp_match, fxp_attach 317}; 318 319struct cfdriver fxp_cd = { 320 NULL, "fxp", DV_IFNET 321}; 322 323/* 324 * Check if a device is an 82557. 325 */ 326static int 327fxp_match(parent, match, aux) 328 struct device *parent; 329#ifdef __BROKEN_INDIRECT_CONFIG 330 void *match; 331#else 332 struct cfdata *match; 333#endif 334 void *aux; 335{ 336 struct pci_attach_args *pa = aux; 337 338 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 339 return (0); 340 341 switch (PCI_PRODUCT(pa->pa_id)) { 342 case PCI_PRODUCT_INTEL_82557: 343 return (1); 344 } 345 346 return (0); 347} 348 349static void 350fxp_attach(parent, self, aux) 351 struct device *parent, *self; 352 void *aux; 353{ 354 struct fxp_softc *sc = (struct fxp_softc *)self; 355 struct pci_attach_args *pa = aux; 356 pci_chipset_tag_t pc = pa->pa_pc; 357 pci_intr_handle_t ih; 358 const char *intrstr = NULL; 359 u_int8_t enaddr[6]; 360 struct ifnet *ifp; 361 362 /* 363 * Map control/status registers. 364 */ 365 if (pci_mapreg_map(pa, FXP_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0, 366 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 367 printf(": can't map registers\n"); 368 return; 369 } 370 printf(": Intel EtherExpress Pro 10/100B Ethernet\n"); 371 372 /* 373 * Allocate our interrupt. 374 */ 375 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin, 376 pa->pa_intrline, &ih)) { 377 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname); 378 return; 379 } 380 intrstr = pci_intr_string(pc, ih); 381 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, fxp_intr, sc); 382 if (sc->sc_ih == NULL) { 383 printf("%s: couldn't establish interrupt", 384 sc->sc_dev.dv_xname); 385 if (intrstr != NULL) 386 printf(" at %s", intrstr); 387 printf("\n"); 388 return; 389 } 390 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 391 392 /* Do generic parts of attach. */ 393 if (fxp_attach_common(sc, enaddr)) { 394 /* Failed! */ 395 return; 396 } 397 398 printf("%s: Ethernet address %s%s\n", sc->sc_dev.dv_xname, 399 ether_sprintf(enaddr), sc->phy_10Mbps_only ? ", 10Mbps" : ""); 400 401 ifp = &sc->sc_ethercom.ec_if; 402 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 403 ifp->if_softc = sc; 404 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 405 ifp->if_ioctl = fxp_ioctl; 406 ifp->if_start = fxp_start; 407 ifp->if_watchdog = fxp_watchdog; 408 409 /* 410 * Attach the interface. 411 */ 412 if_attach(ifp); 413 /* 414 * Let the system queue as many packets as we have available 415 * TX descriptors. 416 */ 417 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 418 ether_ifattach(ifp, enaddr); 419 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB, 420 sizeof(struct ether_header)); 421 422 /* 423 * Add shutdown hook so that DMA is disabled prior to reboot. Not 424 * doing do could allow DMA to corrupt kernel memory during the 425 * reboot before the driver initializes. 426 */ 427 shutdownhook_establish(fxp_shutdown, sc); 428} 429 430/* 431 * Device shutdown routine. Called at system shutdown after sync. The 432 * main purpose of this routine is to shut off receiver DMA so that 433 * kernel memory doesn't get clobbered during warmboot. 434 */ 435static void 436fxp_shutdown(sc) 437 void *sc; 438{ 439 fxp_stop((struct fxp_softc *) sc); 440} 441 442static int 443fxp_ether_ioctl(ifp, cmd, data) 444 struct ifnet *ifp; 445 FXP_IOCTLCMD_TYPE cmd; 446 caddr_t data; 447{ 448 struct ifaddr *ifa = (struct ifaddr *) data; 449 struct fxp_softc *sc = ifp->if_softc; 450 451 switch (cmd) { 452 case SIOCSIFADDR: 453 ifp->if_flags |= IFF_UP; 454 455 switch (ifa->ifa_addr->sa_family) { 456#ifdef INET 457 case AF_INET: 458 fxp_init(sc); 459 arp_ifinit(ifp, ifa); 460 break; 461#endif 462#ifdef NS 463 case AF_NS: 464 { 465 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 466 467 if (ns_nullhost(*ina)) 468 ina->x_host = *(union ns_host *) 469 LLADDR(ifp->if_sadl); 470 else 471 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 472 ifp->if_addrlen); 473 /* Set new address. */ 474 fxp_init(sc); 475 break; 476 } 477#endif 478 default: 479 fxp_init(sc); 480 break; 481 } 482 break; 483 484 default: 485 return (EINVAL); 486 } 487 488 return (0); 489} 490 491#else /* __FreeBSD__ */ 492 493/* 494 * Return identification string if this is device is ours. 495 */ 496static int 497fxp_probe(device_t dev) 498{ 499 if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { 500 switch (pci_get_device(dev)) { 501 502 case FXP_DEVICEID_i82557: 503 device_set_desc(dev, "Intel Pro 10/100B/100+ Ethernet"); 504 return 0; 505 case FXP_DEVICEID_i82559: 506 device_set_desc(dev, "Intel InBusiness 10/100 Ethernet"); 507 return 0; 508 case FXP_DEVICEID_i82559ER: 509 device_set_desc(dev, "Intel Embedded 10/100 Ethernet"); 510 return 0; 511 default: 512 break; 513 } 514 } 515 516 return ENXIO; 517} 518 519static int 520fxp_attach(device_t dev) 521{ 522 int error = 0; 523 struct fxp_softc *sc = device_get_softc(dev); 524 struct ifnet *ifp; 525 FXP_SPLVAR(s) 526 u_long val; 527 int rid; 528 529#if !defined(__NetBSD__) 530 mtx_init(&sc->sc_mtx, "fxp", MTX_DEF); 531#endif 532 callout_handle_init(&sc->stat_ch); 533 534 FXP_LOCK(sc, s); 535 536 /* 537 * Enable bus mastering. 538 */ 539 val = pci_read_config(dev, PCIR_COMMAND, 2); 540 val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 541 pci_write_config(dev, PCIR_COMMAND, val, 2); 542 543 /* 544 * Map control/status registers. 545 */ 546 rid = FXP_PCI_MMBA; 547 sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 548 0, ~0, 1, RF_ACTIVE); 549 if (!sc->mem) { 550 device_printf(dev, "could not map memory\n"); 551 error = ENXIO; 552 goto fail; 553 } 554 555 sc->sc_st = rman_get_bustag(sc->mem); 556 sc->sc_sh = rman_get_bushandle(sc->mem); 557 558 /* 559 * Allocate our interrupt. 560 */ 561 rid = 0; 562 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 563 RF_SHAREABLE | RF_ACTIVE); 564 if (sc->irq == NULL) { 565 device_printf(dev, "could not map interrupt\n"); 566 error = ENXIO; 567 goto fail; 568 } 569 570 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, 571 fxp_intr, sc, &sc->ih); 572 if (error) { 573 device_printf(dev, "could not setup irq\n"); 574 goto fail; 575 } 576 577 /* Do generic parts of attach. */ 578 if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) { 579 /* Failed! */ 580 bus_teardown_intr(dev, sc->irq, sc->ih); 581 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 582 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 583 error = ENXIO; 584 goto fail; 585 } 586 587 device_printf(dev, "Ethernet address %6D%s\n", 588 sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : ""); 589 590 ifp = &sc->arpcom.ac_if; 591 ifp->if_unit = device_get_unit(dev); 592 ifp->if_name = "fxp"; 593 ifp->if_output = ether_output; 594 ifp->if_baudrate = 100000000; 595 ifp->if_init = fxp_init; 596 ifp->if_softc = sc; 597 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 598 ifp->if_ioctl = fxp_ioctl; 599 ifp->if_start = fxp_start; 600 ifp->if_watchdog = fxp_watchdog; 601 602 /* 603 * Attach the interface. 604 */ 605 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); 606 /* 607 * Let the system queue as many packets as we have available 608 * TX descriptors. 609 */ 610 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 611 612 FXP_UNLOCK(sc, s); 613 return 0; 614 615 fail: 616 FXP_UNLOCK(sc, s); 617 mtx_destroy(&sc->sc_mtx); 618 return error; 619} 620 621/* 622 * Detach interface. 623 */ 624static int 625fxp_detach(device_t dev) 626{ 627 struct fxp_softc *sc = device_get_softc(dev); 628 FXP_SPLVAR(s) 629 630 FXP_LOCK(sc, s); 631 632 /* 633 * Close down routes etc. 634 */ 635 ether_ifdetach(&sc->arpcom.ac_if, ETHER_BPF_SUPPORTED); 636 637 /* 638 * Stop DMA and drop transmit queue. 639 */ 640 fxp_stop(sc); 641 642 /* 643 * Deallocate resources. 644 */ 645 bus_teardown_intr(dev, sc->irq, sc->ih); 646 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 647 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 648 649 /* 650 * Free all the receive buffers. 651 */ 652 if (sc->rfa_headm != NULL) 653 m_freem(sc->rfa_headm); 654 655 /* 656 * Free all media structures. 657 */ 658 ifmedia_removeall(&sc->sc_media); 659 660 /* 661 * Free anciliary structures. 662 */ 663 free(sc->cbl_base, M_DEVBUF); 664 free(sc->fxp_stats, M_DEVBUF); 665 free(sc->mcsp, M_DEVBUF); 666 667 FXP_UNLOCK(sc, s); 668 669 return 0; 670} 671 672/* 673 * Device shutdown routine. Called at system shutdown after sync. The 674 * main purpose of this routine is to shut off receiver DMA so that 675 * kernel memory doesn't get clobbered during warmboot. 676 */ 677static int 678fxp_shutdown(device_t dev) 679{ 680 /* 681 * Make sure that DMA is disabled prior to reboot. Not doing 682 * do could allow DMA to corrupt kernel memory during the 683 * reboot before the driver initializes. 684 */ 685 fxp_stop((struct fxp_softc *) device_get_softc(dev)); 686 return 0; 687} 688 689static device_method_t fxp_methods[] = { 690 /* Device interface */ 691 DEVMETHOD(device_probe, fxp_probe), 692 DEVMETHOD(device_attach, fxp_attach), 693 DEVMETHOD(device_detach, fxp_detach), 694 DEVMETHOD(device_shutdown, fxp_shutdown), 695 696 { 0, 0 } 697}; 698 699static driver_t fxp_driver = { 700 "fxp", 701 fxp_methods, 702 sizeof(struct fxp_softc), 703}; 704 705static devclass_t fxp_devclass; 706 707DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0); 708 709#endif /* __NetBSD__ */ 710 711/************************************************************* 712 * End of operating system-specific autoconfiguration glue 713 *************************************************************/ 714 715/* 716 * Do generic parts of attach. 717 */ 718static int 719fxp_attach_common(sc, enaddr) 720 struct fxp_softc *sc; 721 u_int8_t *enaddr; 722{ 723 u_int16_t data; 724 int i, nmedia, defmedia; 725 const int *media; 726 727 /* 728 * Reset to a stable state. 729 */ 730 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 731 DELAY(10); 732 733 sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB, 734 M_DEVBUF, M_NOWAIT); 735 if (sc->cbl_base == NULL) 736 goto fail; 737 bzero(sc->cbl_base, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 738 739 sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT); 740 if (sc->fxp_stats == NULL) 741 goto fail; 742 bzero(sc->fxp_stats, sizeof(struct fxp_stats)); 743 744 sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT); 745 if (sc->mcsp == NULL) 746 goto fail; 747 748 /* 749 * Pre-allocate our receive buffers. 750 */ 751 for (i = 0; i < FXP_NRFABUFS; i++) { 752 if (fxp_add_rfabuf(sc, NULL) != 0) { 753 goto fail; 754 } 755 } 756 757 /* 758 * Find out how large of an SEEPROM we have. 759 */ 760 fxp_autosize_eeprom(sc); 761 762 /* 763 * Get info about the primary PHY 764 */ 765 fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); 766 sc->phy_primary_addr = data & 0xff; 767 sc->phy_primary_device = (data >> 8) & 0x3f; 768 sc->phy_10Mbps_only = data >> 15; 769 770 /* 771 * Read MAC address. 772 */ 773 fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); 774 775 /* 776 * Initialize the media structures. 777 */ 778 779 media = fxp_media_default; 780 nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]); 781 defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA; 782 783 for (i = 0; i < NFXPMEDIA; i++) { 784 if (sc->phy_primary_device == fxp_media[i].fsm_phy) { 785 media = fxp_media[i].fsm_media; 786 nmedia = fxp_media[i].fsm_nmedia; 787 defmedia = fxp_media[i].fsm_defmedia; 788 } 789 } 790 791 ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus); 792 for (i = 0; i < nmedia; i++) { 793 if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only) 794 continue; 795 ifmedia_add(&sc->sc_media, media[i], 0, NULL); 796 } 797 ifmedia_set(&sc->sc_media, defmedia); 798 799 return (0); 800 801 fail: 802 printf(FXP_FORMAT ": Failed to malloc memory\n", FXP_ARGS(sc)); 803 if (sc->cbl_base) 804 free(sc->cbl_base, M_DEVBUF); 805 if (sc->fxp_stats) 806 free(sc->fxp_stats, M_DEVBUF); 807 if (sc->mcsp) 808 free(sc->mcsp, M_DEVBUF); 809 /* frees entire chain */ 810 if (sc->rfa_headm) 811 m_freem(sc->rfa_headm); 812 813 return (ENOMEM); 814} 815 816/* 817 * From NetBSD: 818 * 819 * Figure out EEPROM size. 820 * 821 * 559's can have either 64-word or 256-word EEPROMs, the 558 822 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet 823 * talks about the existance of 16 to 256 word EEPROMs. 824 * 825 * The only known sizes are 64 and 256, where the 256 version is used 826 * by CardBus cards to store CIS information. 827 * 828 * The address is shifted in msb-to-lsb, and after the last 829 * address-bit the EEPROM is supposed to output a `dummy zero' bit, 830 * after which follows the actual data. We try to detect this zero, by 831 * probing the data-out bit in the EEPROM control register just after 832 * having shifted in a bit. If the bit is zero, we assume we've 833 * shifted enough address bits. The data-out should be tri-state, 834 * before this, which should translate to a logical one. 835 * 836 * Other ways to do this would be to try to read a register with known 837 * contents with a varying number of address bits, but no such 838 * register seem to be available. The high bits of register 10 are 01 839 * on the 558 and 559, but apparently not on the 557. 840 * 841 * The Linux driver computes a checksum on the EEPROM data, but the 842 * value of this checksum is not very well documented. 843 */ 844static void 845fxp_autosize_eeprom(sc) 846 struct fxp_softc *sc; 847{ 848 u_int16_t reg; 849 int x; 850 851 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 852 /* 853 * Shift in read opcode. 854 */ 855 for (x = 3; x > 0; x--) { 856 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 857 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 858 } else { 859 reg = FXP_EEPROM_EECS; 860 } 861 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 862 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 863 reg | FXP_EEPROM_EESK); 864 DELAY(1); 865 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 866 DELAY(1); 867 } 868 /* 869 * Shift in address. 870 * Wait for the dummy zero following a correct address shift. 871 */ 872 for (x = 1; x <= 8; x++) { 873 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 874 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 875 FXP_EEPROM_EECS | FXP_EEPROM_EESK); 876 DELAY(1); 877 if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0) 878 break; 879 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 880 DELAY(1); 881 } 882 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 883 DELAY(1); 884 sc->eeprom_size = x; 885} 886/* 887 * Read from the serial EEPROM. Basically, you manually shift in 888 * the read opcode (one bit at a time) and then shift in the address, 889 * and then you shift out the data (all of this one bit at a time). 890 * The word size is 16 bits, so you have to provide the address for 891 * every 16 bits of data. 892 */ 893static void 894fxp_read_eeprom(sc, data, offset, words) 895 struct fxp_softc *sc; 896 u_short *data; 897 int offset; 898 int words; 899{ 900 u_int16_t reg; 901 int i, x; 902 903 for (i = 0; i < words; i++) { 904 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 905 /* 906 * Shift in read opcode. 907 */ 908 for (x = 3; x > 0; x--) { 909 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 910 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 911 } else { 912 reg = FXP_EEPROM_EECS; 913 } 914 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 915 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 916 reg | FXP_EEPROM_EESK); 917 DELAY(1); 918 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 919 DELAY(1); 920 } 921 /* 922 * Shift in address. 923 */ 924 for (x = sc->eeprom_size; x > 0; x--) { 925 if ((i + offset) & (1 << (x - 1))) { 926 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 927 } else { 928 reg = FXP_EEPROM_EECS; 929 } 930 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 931 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 932 reg | FXP_EEPROM_EESK); 933 DELAY(1); 934 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 935 DELAY(1); 936 } 937 reg = FXP_EEPROM_EECS; 938 data[i] = 0; 939 /* 940 * Shift out data. 941 */ 942 for (x = 16; x > 0; x--) { 943 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 944 reg | FXP_EEPROM_EESK); 945 DELAY(1); 946 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 947 FXP_EEPROM_EEDO) 948 data[i] |= (1 << (x - 1)); 949 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 950 DELAY(1); 951 } 952 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 953 DELAY(1); 954 } 955} 956 957/* 958 * Start packet transmission on the interface. 959 */ 960static void 961fxp_start(ifp) 962 struct ifnet *ifp; 963{ 964 struct fxp_softc *sc = ifp->if_softc; 965 struct fxp_cb_tx *txp; 966 967#if !defined(__NetBSD__) 968 FXP_LOCK(sc, s); 969#endif 970 /* 971 * See if we need to suspend xmit until the multicast filter 972 * has been reprogrammed (which can only be done at the head 973 * of the command chain). 974 */ 975 if (sc->need_mcsetup) { 976 FXP_UNLOCK(sc, s); 977 return; 978 } 979 980 txp = NULL; 981 982 /* 983 * We're finished if there is nothing more to add to the list or if 984 * we're all filled up with buffers to transmit. 985 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add 986 * a NOP command when needed. 987 */ 988 while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) { 989 struct mbuf *m, *mb_head; 990 int segment; 991 992 /* 993 * Grab a packet to transmit. 994 */ 995 IF_DEQUEUE(&ifp->if_snd, mb_head); 996 997 /* 998 * Get pointer to next available tx desc. 999 */ 1000 txp = sc->cbl_last->next; 1001 1002 /* 1003 * Go through each of the mbufs in the chain and initialize 1004 * the transmit buffer descriptors with the physical address 1005 * and size of the mbuf. 1006 */ 1007tbdinit: 1008 for (m = mb_head, segment = 0; m != NULL; m = m->m_next) { 1009 if (m->m_len != 0) { 1010 if (segment == FXP_NTXSEG) 1011 break; 1012 txp->tbd[segment].tb_addr = 1013 vtophys(mtod(m, vm_offset_t)); 1014 txp->tbd[segment].tb_size = m->m_len; 1015 segment++; 1016 } 1017 } 1018 if (m != NULL) { 1019 struct mbuf *mn; 1020 1021 /* 1022 * We ran out of segments. We have to recopy this mbuf 1023 * chain first. Bail out if we can't get the new buffers. 1024 */ 1025 MGETHDR(mn, M_DONTWAIT, MT_DATA); 1026 if (mn == NULL) { 1027 m_freem(mb_head); 1028 break; 1029 } 1030 if (mb_head->m_pkthdr.len > MHLEN) { 1031 MCLGET(mn, M_DONTWAIT); 1032 if ((mn->m_flags & M_EXT) == 0) { 1033 m_freem(mn); 1034 m_freem(mb_head); 1035 break; 1036 } 1037 } 1038 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 1039 mtod(mn, caddr_t)); 1040 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 1041 m_freem(mb_head); 1042 mb_head = mn; 1043 goto tbdinit; 1044 } 1045 1046 txp->tbd_number = segment; 1047 txp->mb_head = mb_head; 1048 txp->cb_status = 0; 1049 if (sc->tx_queued != FXP_CXINT_THRESH - 1) { 1050 txp->cb_command = 1051 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S; 1052 } else { 1053 txp->cb_command = 1054 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1055 /* 1056 * Set a 5 second timer just in case we don't hear from the 1057 * card again. 1058 */ 1059 ifp->if_timer = 5; 1060 } 1061 txp->tx_threshold = tx_threshold; 1062 1063 /* 1064 * Advance the end of list forward. 1065 */ 1066 1067#ifdef __alpha__ 1068 /* 1069 * On platforms which can't access memory in 16-bit 1070 * granularities, we must prevent the card from DMA'ing 1071 * up the status while we update the command field. 1072 * This could cause us to overwrite the completion status. 1073 */ 1074 atomic_clear_short(&sc->cbl_last->cb_command, 1075 FXP_CB_COMMAND_S); 1076#else 1077 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 1078#endif /*__alpha__*/ 1079 sc->cbl_last = txp; 1080 1081 /* 1082 * Advance the beginning of the list forward if there are 1083 * no other packets queued (when nothing is queued, cbl_first 1084 * sits on the last TxCB that was sent out). 1085 */ 1086 if (sc->tx_queued == 0) 1087 sc->cbl_first = txp; 1088 1089 sc->tx_queued++; 1090 1091 /* 1092 * Pass packet to bpf if there is a listener. 1093 */ 1094 if (ifp->if_bpf) 1095 bpf_mtap(FXP_BPFTAP_ARG(ifp), mb_head); 1096 } 1097 1098 /* 1099 * We're finished. If we added to the list, issue a RESUME to get DMA 1100 * going again if suspended. 1101 */ 1102 if (txp != NULL) { 1103 fxp_scb_wait(sc); 1104 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1105 } 1106#if !defined(__NetBSD__) 1107 FXP_UNLOCK(sc, s); 1108#endif 1109} 1110 1111/* 1112 * Process interface interrupts. 1113 */ 1114static FXP_INTR_TYPE 1115fxp_intr(arg) 1116 void *arg; 1117{ 1118 struct fxp_softc *sc = arg; 1119 struct ifnet *ifp = &sc->sc_if; 1120 u_int8_t statack; 1121#if defined(__NetBSD__) 1122 int claimed = 0; 1123#else 1124 1125 FXP_LOCK(sc, s); 1126#endif 1127 1128 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { 1129#if defined(__NetBSD__) 1130 claimed = 1; 1131#endif 1132 /* 1133 * First ACK all the interrupts in this pass. 1134 */ 1135 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 1136 1137 /* 1138 * Free any finished transmit mbuf chains. 1139 * 1140 * Handle the CNA event likt a CXTNO event. It used to 1141 * be that this event (control unit not ready) was not 1142 * encountered, but it is now with the SMPng modifications. 1143 * The exact sequence of events that occur when the interface 1144 * is brought up are different now, and if this event 1145 * goes unhandled, the configuration/rxfilter setup sequence 1146 * can stall for several seconds. The result is that no 1147 * packets go out onto the wire for about 5 to 10 seconds 1148 * after the interface is ifconfig'ed for the first time. 1149 */ 1150 if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) { 1151 struct fxp_cb_tx *txp; 1152 1153 for (txp = sc->cbl_first; sc->tx_queued && 1154 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1155 txp = txp->next) { 1156 if (txp->mb_head != NULL) { 1157 m_freem(txp->mb_head); 1158 txp->mb_head = NULL; 1159 } 1160 sc->tx_queued--; 1161 } 1162 sc->cbl_first = txp; 1163 ifp->if_timer = 0; 1164 if (sc->tx_queued == 0) { 1165 if (sc->need_mcsetup) 1166 fxp_mc_setup(sc); 1167 } 1168 /* 1169 * Try to start more packets transmitting. 1170 */ 1171 if (ifp->if_snd.ifq_head != NULL) 1172 fxp_start(ifp); 1173 } 1174 /* 1175 * Process receiver interrupts. If a no-resource (RNR) 1176 * condition exists, get whatever packets we can and 1177 * re-start the receiver. 1178 */ 1179 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) { 1180 struct mbuf *m; 1181 struct fxp_rfa *rfa; 1182rcvloop: 1183 m = sc->rfa_headm; 1184 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + 1185 RFA_ALIGNMENT_FUDGE); 1186 1187 if (rfa->rfa_status & FXP_RFA_STATUS_C) { 1188 /* 1189 * Remove first packet from the chain. 1190 */ 1191 sc->rfa_headm = m->m_next; 1192 m->m_next = NULL; 1193 1194 /* 1195 * Add a new buffer to the receive chain. 1196 * If this fails, the old buffer is recycled 1197 * instead. 1198 */ 1199 if (fxp_add_rfabuf(sc, m) == 0) { 1200 struct ether_header *eh; 1201 int total_len; 1202 1203 total_len = rfa->actual_size & 1204 (MCLBYTES - 1); 1205 if (total_len < 1206 sizeof(struct ether_header)) { 1207 m_freem(m); 1208 goto rcvloop; 1209 } 1210 m->m_pkthdr.rcvif = ifp; 1211 m->m_pkthdr.len = m->m_len = total_len; 1212 eh = mtod(m, struct ether_header *); 1213 m->m_data += 1214 sizeof(struct ether_header); 1215 m->m_len -= 1216 sizeof(struct ether_header); 1217 m->m_pkthdr.len = m->m_len; 1218 ether_input(ifp, eh, m); 1219 } 1220 goto rcvloop; 1221 } 1222 if (statack & FXP_SCB_STATACK_RNR) { 1223 fxp_scb_wait(sc); 1224 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1225 vtophys(sc->rfa_headm->m_ext.ext_buf) + 1226 RFA_ALIGNMENT_FUDGE); 1227 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1228 FXP_SCB_COMMAND_RU_START); 1229 } 1230 } 1231 } 1232#if defined(__NetBSD__) 1233 return (claimed); 1234#else 1235 FXP_UNLOCK(sc, s); 1236#endif 1237} 1238 1239/* 1240 * Update packet in/out/collision statistics. The i82557 doesn't 1241 * allow you to access these counters without doing a fairly 1242 * expensive DMA to get _all_ of the statistics it maintains, so 1243 * we do this operation here only once per second. The statistics 1244 * counters in the kernel are updated from the previous dump-stats 1245 * DMA and then a new dump-stats DMA is started. The on-chip 1246 * counters are zeroed when the DMA completes. If we can't start 1247 * the DMA immediately, we don't wait - we just prepare to read 1248 * them again next time. 1249 */ 1250static void 1251fxp_stats_update(arg) 1252 void *arg; 1253{ 1254 struct fxp_softc *sc = arg; 1255 struct ifnet *ifp = &sc->sc_if; 1256 struct fxp_stats *sp = sc->fxp_stats; 1257 struct fxp_cb_tx *txp; 1258 FXP_SPLVAR(s) 1259 1260 ifp->if_opackets += sp->tx_good; 1261 ifp->if_collisions += sp->tx_total_collisions; 1262 if (sp->rx_good) { 1263 ifp->if_ipackets += sp->rx_good; 1264 sc->rx_idle_secs = 0; 1265 } else { 1266 /* 1267 * Receiver's been idle for another second. 1268 */ 1269 sc->rx_idle_secs++; 1270 } 1271 ifp->if_ierrors += 1272 sp->rx_crc_errors + 1273 sp->rx_alignment_errors + 1274 sp->rx_rnr_errors + 1275 sp->rx_overrun_errors; 1276 /* 1277 * If any transmit underruns occured, bump up the transmit 1278 * threshold by another 512 bytes (64 * 8). 1279 */ 1280 if (sp->tx_underruns) { 1281 ifp->if_oerrors += sp->tx_underruns; 1282 if (tx_threshold < 192) 1283 tx_threshold += 64; 1284 } 1285 FXP_LOCK(sc, s); 1286 /* 1287 * Release any xmit buffers that have completed DMA. This isn't 1288 * strictly necessary to do here, but it's advantagous for mbufs 1289 * with external storage to be released in a timely manner rather 1290 * than being defered for a potentially long time. This limits 1291 * the delay to a maximum of one second. 1292 */ 1293 for (txp = sc->cbl_first; sc->tx_queued && 1294 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1295 txp = txp->next) { 1296 if (txp->mb_head != NULL) { 1297 m_freem(txp->mb_head); 1298 txp->mb_head = NULL; 1299 } 1300 sc->tx_queued--; 1301 } 1302 sc->cbl_first = txp; 1303 /* 1304 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, 1305 * then assume the receiver has locked up and attempt to clear 1306 * the condition by reprogramming the multicast filter. This is 1307 * a work-around for a bug in the 82557 where the receiver locks 1308 * up if it gets certain types of garbage in the syncronization 1309 * bits prior to the packet header. This bug is supposed to only 1310 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 1311 * mode as well (perhaps due to a 10/100 speed transition). 1312 */ 1313 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 1314 sc->rx_idle_secs = 0; 1315 fxp_mc_setup(sc); 1316 } 1317 /* 1318 * If there is no pending command, start another stats 1319 * dump. Otherwise punt for now. 1320 */ 1321 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { 1322 /* 1323 * Start another stats dump. 1324 */ 1325 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1326 FXP_SCB_COMMAND_CU_DUMPRESET); 1327 } else { 1328 /* 1329 * A previous command is still waiting to be accepted. 1330 * Just zero our copy of the stats and wait for the 1331 * next timer event to update them. 1332 */ 1333 sp->tx_good = 0; 1334 sp->tx_underruns = 0; 1335 sp->tx_total_collisions = 0; 1336 1337 sp->rx_good = 0; 1338 sp->rx_crc_errors = 0; 1339 sp->rx_alignment_errors = 0; 1340 sp->rx_rnr_errors = 0; 1341 sp->rx_overrun_errors = 0; 1342 } 1343 FXP_UNLOCK(sc, s); 1344 /* 1345 * Schedule another timeout one second from now. 1346 */ 1347 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1348} 1349 1350/* 1351 * Stop the interface. Cancels the statistics updater and resets 1352 * the interface. 1353 */ 1354static void 1355fxp_stop(sc) 1356 struct fxp_softc *sc; 1357{ 1358 struct ifnet *ifp = &sc->sc_if; 1359 struct fxp_cb_tx *txp; 1360 int i; 1361 1362#if !defined(__NetBSD__) 1363 FXP_LOCK(sc, s); 1364#endif 1365 1366 /* 1367 * Cancel stats updater. 1368 */ 1369 untimeout(fxp_stats_update, sc, sc->stat_ch); 1370 1371 /* 1372 * Issue software reset 1373 */ 1374 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1375 DELAY(10); 1376 1377 /* 1378 * Release any xmit buffers. 1379 */ 1380 txp = sc->cbl_base; 1381 if (txp != NULL) { 1382 for (i = 0; i < FXP_NTXCB; i++) { 1383 if (txp[i].mb_head != NULL) { 1384 m_freem(txp[i].mb_head); 1385 txp[i].mb_head = NULL; 1386 } 1387 } 1388 } 1389 sc->tx_queued = 0; 1390 1391 /* 1392 * Free all the receive buffers then reallocate/reinitialize 1393 */ 1394 if (sc->rfa_headm != NULL) 1395 m_freem(sc->rfa_headm); 1396 sc->rfa_headm = NULL; 1397 sc->rfa_tailm = NULL; 1398 for (i = 0; i < FXP_NRFABUFS; i++) { 1399 if (fxp_add_rfabuf(sc, NULL) != 0) { 1400 /* 1401 * This "can't happen" - we're at splimp() 1402 * and we just freed all the buffers we need 1403 * above. 1404 */ 1405 panic("fxp_stop: no buffers!"); 1406 } 1407 } 1408 1409 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1410 ifp->if_timer = 0; 1411#if !defined(__NetBSD__) 1412 FXP_UNLOCK(sc, s); 1413#endif 1414} 1415 1416/* 1417 * Watchdog/transmission transmit timeout handler. Called when a 1418 * transmission is started on the interface, but no interrupt is 1419 * received before the timeout. This usually indicates that the 1420 * card has wedged for some reason. 1421 */ 1422static void 1423fxp_watchdog(ifp) 1424 struct ifnet *ifp; 1425{ 1426 struct fxp_softc *sc = ifp->if_softc; 1427 1428 printf(FXP_FORMAT ": device timeout\n", FXP_ARGS(sc)); 1429 ifp->if_oerrors++; 1430 1431 fxp_init(sc); 1432} 1433 1434static void 1435fxp_init(xsc) 1436 void *xsc; 1437{ 1438 struct fxp_softc *sc = xsc; 1439 struct ifnet *ifp = &sc->sc_if; 1440 struct fxp_cb_config *cbp; 1441 struct fxp_cb_ias *cb_ias; 1442 struct fxp_cb_tx *txp; 1443 int i, prm; 1444 FXP_SPLVAR(s) 1445 1446 FXP_LOCK(sc, s); 1447 /* 1448 * Cancel any pending I/O 1449 */ 1450 fxp_stop(sc); 1451 1452 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1453 1454 /* 1455 * Initialize base of CBL and RFA memory. Loading with zero 1456 * sets it up for regular linear addressing. 1457 */ 1458 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1459 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE); 1460 1461 fxp_scb_wait(sc); 1462 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE); 1463 1464 /* 1465 * Initialize base of dump-stats buffer. 1466 */ 1467 fxp_scb_wait(sc); 1468 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats)); 1469 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR); 1470 1471 /* 1472 * We temporarily use memory that contains the TxCB list to 1473 * construct the config CB. The TxCB list memory is rebuilt 1474 * later. 1475 */ 1476 cbp = (struct fxp_cb_config *) sc->cbl_base; 1477 1478 /* 1479 * This bcopy is kind of disgusting, but there are a bunch of must be 1480 * zero and must be one bits in this structure and this is the easiest 1481 * way to initialize them all to proper values. 1482 */ 1483 bcopy(fxp_cb_config_template, 1484 (void *)(uintptr_t)(volatile void *)&cbp->cb_status, 1485 sizeof(fxp_cb_config_template)); 1486 1487 cbp->cb_status = 0; 1488 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1489 cbp->link_addr = -1; /* (no) next command */ 1490 cbp->byte_count = 22; /* (22) bytes to config */ 1491 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1492 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1493 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1494 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1495 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1496 cbp->dma_bce = 0; /* (disable) dma max counters */ 1497 cbp->late_scb = 0; /* (don't) defer SCB update */ 1498 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1499 cbp->ci_int = 1; /* interrupt on CU idle */ 1500 cbp->save_bf = prm; /* save bad frames */ 1501 cbp->disc_short_rx = !prm; /* discard short packets */ 1502 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1503 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1504 cbp->nsai = 1; /* (don't) disable source addr insert */ 1505 cbp->preamble_length = 2; /* (7 byte) preamble */ 1506 cbp->loopback = 0; /* (don't) loopback */ 1507 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1508 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1509 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1510 cbp->promiscuous = prm; /* promiscuous mode */ 1511 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1512 cbp->crscdt = 0; /* (CRS only) */ 1513 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1514 cbp->padding = 1; /* (do) pad short tx packets */ 1515 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1516 cbp->force_fdx = 0; /* (don't) force full duplex */ 1517 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1518 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1519 cbp->mc_all = sc->all_mcasts;/* accept all multicasts */ 1520 1521 /* 1522 * Start the config command/DMA. 1523 */ 1524 fxp_scb_wait(sc); 1525 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status)); 1526 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1527 /* ...and wait for it to complete. */ 1528 while (!(cbp->cb_status & FXP_CB_STATUS_C)); 1529 1530 /* 1531 * Now initialize the station address. Temporarily use the TxCB 1532 * memory area like we did above for the config CB. 1533 */ 1534 cb_ias = (struct fxp_cb_ias *) sc->cbl_base; 1535 cb_ias->cb_status = 0; 1536 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL; 1537 cb_ias->link_addr = -1; 1538#if defined(__NetBSD__) 1539 bcopy(LLADDR(ifp->if_sadl), (void *)cb_ias->macaddr, 6); 1540#else 1541 bcopy(sc->arpcom.ac_enaddr, 1542 (void *)(uintptr_t)(volatile void *)cb_ias->macaddr, 1543 sizeof(sc->arpcom.ac_enaddr)); 1544#endif /* __NetBSD__ */ 1545 1546 /* 1547 * Start the IAS (Individual Address Setup) command/DMA. 1548 */ 1549 fxp_scb_wait(sc); 1550 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1551 /* ...and wait for it to complete. */ 1552 while (!(cb_ias->cb_status & FXP_CB_STATUS_C)); 1553 1554 /* 1555 * Initialize transmit control block (TxCB) list. 1556 */ 1557 1558 txp = sc->cbl_base; 1559 bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1560 for (i = 0; i < FXP_NTXCB; i++) { 1561 txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK; 1562 txp[i].cb_command = FXP_CB_COMMAND_NOP; 1563 txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status); 1564 txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]); 1565 txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK]; 1566 } 1567 /* 1568 * Set the suspend flag on the first TxCB and start the control 1569 * unit. It will execute the NOP and then suspend. 1570 */ 1571 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S; 1572 sc->cbl_first = sc->cbl_last = txp; 1573 sc->tx_queued = 1; 1574 1575 fxp_scb_wait(sc); 1576 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1577 1578 /* 1579 * Initialize receiver buffer area - RFA. 1580 */ 1581 fxp_scb_wait(sc); 1582 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1583 vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); 1584 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); 1585 1586 /* 1587 * Set current media. 1588 */ 1589 fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media); 1590 1591 ifp->if_flags |= IFF_RUNNING; 1592 ifp->if_flags &= ~IFF_OACTIVE; 1593 FXP_UNLOCK(sc, s); 1594 1595 /* 1596 * Start stats updater. 1597 */ 1598 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1599} 1600 1601static void 1602fxp_set_media(sc, media) 1603 struct fxp_softc *sc; 1604 int media; 1605{ 1606 1607 switch (sc->phy_primary_device) { 1608 case FXP_PHY_DP83840: 1609 case FXP_PHY_DP83840A: 1610 fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR, 1611 fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) | 1612 FXP_DP83840_PCR_LED4_MODE | /* LED4 always indicates duplex */ 1613 FXP_DP83840_PCR_F_CONNECT | /* force link disconnect bypass */ 1614 FXP_DP83840_PCR_BIT10); /* XXX I have no idea */ 1615 /* fall through */ 1616 case FXP_PHY_82553A: 1617 case FXP_PHY_82553C: /* untested */ 1618 case FXP_PHY_82555: 1619 case FXP_PHY_82555B: 1620 if (IFM_SUBTYPE(media) != IFM_AUTO) { 1621 int flags; 1622 1623 flags = (IFM_SUBTYPE(media) == IFM_100_TX) ? 1624 FXP_PHY_BMCR_SPEED_100M : 0; 1625 flags |= (media & IFM_FDX) ? 1626 FXP_PHY_BMCR_FULLDUPLEX : 0; 1627 fxp_mdi_write(sc, sc->phy_primary_addr, 1628 FXP_PHY_BMCR, 1629 (fxp_mdi_read(sc, sc->phy_primary_addr, 1630 FXP_PHY_BMCR) & 1631 ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M | 1632 FXP_PHY_BMCR_FULLDUPLEX)) | flags); 1633 } else { 1634 fxp_mdi_write(sc, sc->phy_primary_addr, 1635 FXP_PHY_BMCR, 1636 (fxp_mdi_read(sc, sc->phy_primary_addr, 1637 FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN)); 1638 } 1639 break; 1640 /* 1641 * The Seeq 80c24 doesn't have a PHY programming interface, so do 1642 * nothing. 1643 */ 1644 case FXP_PHY_80C24: 1645 break; 1646 default: 1647 printf(FXP_FORMAT 1648 ": warning: unsupported PHY, type = %d, addr = %d\n", 1649 FXP_ARGS(sc), sc->phy_primary_device, 1650 sc->phy_primary_addr); 1651 } 1652} 1653 1654/* 1655 * Change media according to request. 1656 */ 1657int 1658fxp_mediachange(ifp) 1659 struct ifnet *ifp; 1660{ 1661 struct fxp_softc *sc = ifp->if_softc; 1662 struct ifmedia *ifm = &sc->sc_media; 1663 1664 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1665 return (EINVAL); 1666 1667 fxp_set_media(sc, ifm->ifm_media); 1668 return (0); 1669} 1670 1671/* 1672 * Notify the world which media we're using. 1673 */ 1674void 1675fxp_mediastatus(ifp, ifmr) 1676 struct ifnet *ifp; 1677 struct ifmediareq *ifmr; 1678{ 1679 struct fxp_softc *sc = ifp->if_softc; 1680 int flags, stsflags; 1681 1682 switch (sc->phy_primary_device) { 1683 case FXP_PHY_82555: 1684 case FXP_PHY_82555B: 1685 case FXP_PHY_DP83840: 1686 case FXP_PHY_DP83840A: 1687 ifmr->ifm_status = IFM_AVALID; /* IFM_ACTIVE will be valid */ 1688 ifmr->ifm_active = IFM_ETHER; 1689 /* 1690 * the following is not an error. 1691 * You need to read this register twice to get current 1692 * status. This is correct documented behaviour, the 1693 * first read gets latched values. 1694 */ 1695 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1696 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1697 if (stsflags & FXP_PHY_STS_LINK_STS) 1698 ifmr->ifm_status |= IFM_ACTIVE; 1699 1700 /* 1701 * If we are in auto mode, then try report the result. 1702 */ 1703 flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR); 1704 if (flags & FXP_PHY_BMCR_AUTOEN) { 1705 ifmr->ifm_active |= IFM_AUTO; /* XXX presently 0 */ 1706 if (stsflags & FXP_PHY_STS_AUTO_DONE) { 1707 /* 1708 * Intel and National parts report 1709 * differently on what they found. 1710 */ 1711 if ((sc->phy_primary_device == FXP_PHY_82555) 1712 || (sc->phy_primary_device == FXP_PHY_82555B)) { 1713 flags = fxp_mdi_read(sc, 1714 sc->phy_primary_addr, 1715 FXP_PHY_USC); 1716 1717 if (flags & FXP_PHY_USC_SPEED) 1718 ifmr->ifm_active |= IFM_100_TX; 1719 else 1720 ifmr->ifm_active |= IFM_10_T; 1721 1722 if (flags & FXP_PHY_USC_DUPLEX) 1723 ifmr->ifm_active |= IFM_FDX; 1724 } else { /* it's National. only know speed */ 1725 flags = fxp_mdi_read(sc, 1726 sc->phy_primary_addr, 1727 FXP_DP83840_PAR); 1728 1729 if (flags & FXP_DP83840_PAR_SPEED_10) 1730 ifmr->ifm_active |= IFM_10_T; 1731 else 1732 ifmr->ifm_active |= IFM_100_TX; 1733 } 1734 } 1735 } else { /* in manual mode.. just report what we were set to */ 1736 if (flags & FXP_PHY_BMCR_SPEED_100M) 1737 ifmr->ifm_active |= IFM_100_TX; 1738 else 1739 ifmr->ifm_active |= IFM_10_T; 1740 1741 if (flags & FXP_PHY_BMCR_FULLDUPLEX) 1742 ifmr->ifm_active |= IFM_FDX; 1743 } 1744 break; 1745 1746 case FXP_PHY_80C24: 1747 default: 1748 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */ 1749 } 1750} 1751 1752/* 1753 * Add a buffer to the end of the RFA buffer list. 1754 * Return 0 if successful, 1 for failure. A failure results in 1755 * adding the 'oldm' (if non-NULL) on to the end of the list - 1756 * tossing out its old contents and recycling it. 1757 * The RFA struct is stuck at the beginning of mbuf cluster and the 1758 * data pointer is fixed up to point just past it. 1759 */ 1760static int 1761fxp_add_rfabuf(sc, oldm) 1762 struct fxp_softc *sc; 1763 struct mbuf *oldm; 1764{ 1765 u_int32_t v; 1766 struct mbuf *m; 1767 struct fxp_rfa *rfa, *p_rfa; 1768 1769 MGETHDR(m, M_DONTWAIT, MT_DATA); 1770 if (m != NULL) { 1771 MCLGET(m, M_DONTWAIT); 1772 if ((m->m_flags & M_EXT) == 0) { 1773 m_freem(m); 1774 if (oldm == NULL) 1775 return 1; 1776 m = oldm; 1777 m->m_data = m->m_ext.ext_buf; 1778 } 1779 } else { 1780 if (oldm == NULL) 1781 return 1; 1782 m = oldm; 1783 m->m_data = m->m_ext.ext_buf; 1784 } 1785 1786 /* 1787 * Move the data pointer up so that the incoming data packet 1788 * will be 32-bit aligned. 1789 */ 1790 m->m_data += RFA_ALIGNMENT_FUDGE; 1791 1792 /* 1793 * Get a pointer to the base of the mbuf cluster and move 1794 * data start past it. 1795 */ 1796 rfa = mtod(m, struct fxp_rfa *); 1797 m->m_data += sizeof(struct fxp_rfa); 1798 rfa->size = (u_int16_t)(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); 1799 1800 /* 1801 * Initialize the rest of the RFA. Note that since the RFA 1802 * is misaligned, we cannot store values directly. Instead, 1803 * we use an optimized, inline copy. 1804 */ 1805 1806 rfa->rfa_status = 0; 1807 rfa->rfa_control = FXP_RFA_CONTROL_EL; 1808 rfa->actual_size = 0; 1809 1810 v = -1; 1811 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr); 1812 fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr); 1813 1814 /* 1815 * If there are other buffers already on the list, attach this 1816 * one to the end by fixing up the tail to point to this one. 1817 */ 1818 if (sc->rfa_headm != NULL) { 1819 p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf + 1820 RFA_ALIGNMENT_FUDGE); 1821 sc->rfa_tailm->m_next = m; 1822 v = vtophys(rfa); 1823 fxp_lwcopy(&v, (volatile u_int32_t *) p_rfa->link_addr); 1824 p_rfa->rfa_control = 0; 1825 } else { 1826 sc->rfa_headm = m; 1827 } 1828 sc->rfa_tailm = m; 1829 1830 return (m == oldm); 1831} 1832 1833static volatile int 1834fxp_mdi_read(sc, phy, reg) 1835 struct fxp_softc *sc; 1836 int phy; 1837 int reg; 1838{ 1839 int count = 10000; 1840 int value; 1841 1842 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1843 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1844 1845 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1846 && count--) 1847 DELAY(10); 1848 1849 if (count <= 0) 1850 printf(FXP_FORMAT ": fxp_mdi_read: timed out\n", 1851 FXP_ARGS(sc)); 1852 1853 return (value & 0xffff); 1854} 1855 1856static void 1857fxp_mdi_write(sc, phy, reg, value) 1858 struct fxp_softc *sc; 1859 int phy; 1860 int reg; 1861 int value; 1862{ 1863 int count = 10000; 1864 1865 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1866 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1867 (value & 0xffff)); 1868 1869 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1870 count--) 1871 DELAY(10); 1872 1873 if (count <= 0) 1874 printf(FXP_FORMAT ": fxp_mdi_write: timed out\n", 1875 FXP_ARGS(sc)); 1876} 1877 1878static int 1879fxp_ioctl(ifp, command, data) 1880 struct ifnet *ifp; 1881 FXP_IOCTLCMD_TYPE command; 1882 caddr_t data; 1883{ 1884 struct fxp_softc *sc = ifp->if_softc; 1885 struct ifreq *ifr = (struct ifreq *)data; 1886 FXP_SPLVAR(s) 1887 int error = 0; 1888 1889 FXP_LOCK(sc, s); 1890 1891 switch (command) { 1892 1893 case SIOCSIFADDR: 1894#if !defined(__NetBSD__) 1895 case SIOCGIFADDR: 1896 case SIOCSIFMTU: 1897#endif 1898 error = ether_ioctl(ifp, command, data); 1899 break; 1900 1901 case SIOCSIFFLAGS: 1902 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1903 1904 /* 1905 * If interface is marked up and not running, then start it. 1906 * If it is marked down and running, stop it. 1907 * XXX If it's up then re-initialize it. This is so flags 1908 * such as IFF_PROMISC are handled. 1909 */ 1910 if (ifp->if_flags & IFF_UP) { 1911 fxp_init(sc); 1912 } else { 1913 if (ifp->if_flags & IFF_RUNNING) 1914 fxp_stop(sc); 1915 } 1916 break; 1917 1918 case SIOCADDMULTI: 1919 case SIOCDELMULTI: 1920 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1921#if defined(__NetBSD__) 1922 error = (command == SIOCADDMULTI) ? 1923 ether_addmulti(ifr, &sc->sc_ethercom) : 1924 ether_delmulti(ifr, &sc->sc_ethercom); 1925 1926 if (error == ENETRESET) { 1927 /* 1928 * Multicast list has changed; set the hardware 1929 * filter accordingly. 1930 */ 1931 if (!sc->all_mcasts) 1932 fxp_mc_setup(sc); 1933 /* 1934 * fxp_mc_setup() can turn on all_mcasts if we run 1935 * out of space, so check it again rather than else {}. 1936 */ 1937 if (sc->all_mcasts) 1938 fxp_init(sc); 1939 error = 0; 1940 } 1941#else /* __FreeBSD__ */ 1942 /* 1943 * Multicast list has changed; set the hardware filter 1944 * accordingly. 1945 */ 1946 if (!sc->all_mcasts) 1947 fxp_mc_setup(sc); 1948 /* 1949 * fxp_mc_setup() can turn on sc->all_mcasts, so check it 1950 * again rather than else {}. 1951 */ 1952 if (sc->all_mcasts) 1953 fxp_init(sc); 1954 error = 0; 1955#endif /* __NetBSD__ */ 1956 break; 1957 1958 case SIOCSIFMEDIA: 1959 case SIOCGIFMEDIA: 1960 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); 1961 break; 1962 1963 default: 1964 error = EINVAL; 1965 } 1966 FXP_UNLOCK(sc, s); 1967 return (error); 1968} 1969 1970/* 1971 * Program the multicast filter. 1972 * 1973 * We have an artificial restriction that the multicast setup command 1974 * must be the first command in the chain, so we take steps to ensure 1975 * this. By requiring this, it allows us to keep up the performance of 1976 * the pre-initialized command ring (esp. link pointers) by not actually 1977 * inserting the mcsetup command in the ring - i.e. its link pointer 1978 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1979 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1980 * lead into the regular TxCB ring when it completes. 1981 * 1982 * This function must be called at splimp. 1983 */ 1984static void 1985fxp_mc_setup(sc) 1986 struct fxp_softc *sc; 1987{ 1988 struct fxp_cb_mcs *mcsp = sc->mcsp; 1989 struct ifnet *ifp = &sc->sc_if; 1990 struct ifmultiaddr *ifma; 1991 int nmcasts; 1992 1993 /* 1994 * If there are queued commands, we must wait until they are all 1995 * completed. If we are already waiting, then add a NOP command 1996 * with interrupt option so that we're notified when all commands 1997 * have been completed - fxp_start() ensures that no additional 1998 * TX commands will be added when need_mcsetup is true. 1999 */ 2000 if (sc->tx_queued) { 2001 struct fxp_cb_tx *txp; 2002 2003 /* 2004 * need_mcsetup will be true if we are already waiting for the 2005 * NOP command to be completed (see below). In this case, bail. 2006 */ 2007 if (sc->need_mcsetup) 2008 return; 2009 sc->need_mcsetup = 1; 2010 2011 /* 2012 * Add a NOP command with interrupt so that we are notified when all 2013 * TX commands have been processed. 2014 */ 2015 txp = sc->cbl_last->next; 2016 txp->mb_head = NULL; 2017 txp->cb_status = 0; 2018 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 2019 /* 2020 * Advance the end of list forward. 2021 */ 2022 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 2023 sc->cbl_last = txp; 2024 sc->tx_queued++; 2025 /* 2026 * Issue a resume in case the CU has just suspended. 2027 */ 2028 fxp_scb_wait(sc); 2029 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 2030 /* 2031 * Set a 5 second timer just in case we don't hear from the 2032 * card again. 2033 */ 2034 ifp->if_timer = 5; 2035 2036 return; 2037 } 2038 sc->need_mcsetup = 0; 2039 2040 /* 2041 * Initialize multicast setup descriptor. 2042 */ 2043 mcsp->next = sc->cbl_base; 2044 mcsp->mb_head = NULL; 2045 mcsp->cb_status = 0; 2046 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 2047 mcsp->link_addr = vtophys(&sc->cbl_base->cb_status); 2048 2049 nmcasts = 0; 2050 if (!sc->all_mcasts) { 2051 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 2052 ifma = ifma->ifma_link.le_next) { 2053 if (ifma->ifma_addr->sa_family != AF_LINK) 2054 continue; 2055 if (nmcasts >= MAXMCADDR) { 2056 sc->all_mcasts = 1; 2057 nmcasts = 0; 2058 break; 2059 } 2060 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 2061 (void *)(uintptr_t)(volatile void *) 2062 &sc->mcsp->mc_addr[nmcasts][0], 6); 2063 nmcasts++; 2064 } 2065 } 2066 mcsp->mc_cnt = nmcasts * 6; 2067 sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp; 2068 sc->tx_queued = 1; 2069 2070 /* 2071 * Wait until command unit is not active. This should never 2072 * be the case when nothing is queued, but make sure anyway. 2073 */ 2074 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == 2075 FXP_SCB_CUS_ACTIVE) ; 2076 2077 /* 2078 * Start the multicast setup command. 2079 */ 2080 fxp_scb_wait(sc); 2081 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status)); 2082 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 2083 2084 ifp->if_timer = 2; 2085 return; 2086} 2087