if_fxp.c revision 50987
1/* 2 * Copyright (c) 1995, David Greenman 3 * All rights reserved. 4 * 5 * Modifications to support NetBSD and media selection: 6 * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $FreeBSD: head/sys/dev/fxp/if_fxp.c 50987 1999-09-06 06:15:18Z peter $ 31 */ 32 33/* 34 * Intel EtherExpress Pro/100B PCI Fast Ethernet driver 35 */ 36 37#include "bpf.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/mbuf.h> 42#include <sys/malloc.h> 43#include <sys/kernel.h> 44#include <sys/socket.h> 45 46#include <net/if.h> 47#include <net/if_dl.h> 48#include <net/if_media.h> 49 50#ifdef NS 51#include <netns/ns.h> 52#include <netns/ns_if.h> 53#endif 54 55#if NBPF > 0 56#include <net/bpf.h> 57#endif 58 59#if defined(__NetBSD__) 60 61#include <sys/ioctl.h> 62#include <sys/errno.h> 63#include <sys/device.h> 64 65#include <net/if_dl.h> 66#include <net/if_ether.h> 67 68#include <netinet/if_inarp.h> 69 70#include <vm/vm.h> 71 72#include <machine/cpu.h> 73#include <machine/bus.h> 74#include <machine/intr.h> 75 76#include <dev/pci/if_fxpreg.h> 77#include <dev/pci/if_fxpvar.h> 78 79#include <dev/pci/pcivar.h> 80#include <dev/pci/pcireg.h> 81#include <dev/pci/pcidevs.h> 82 83#ifdef __alpha__ /* XXX */ 84/* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */ 85#undef vtophys 86#define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va)) 87#endif /* __alpha__ */ 88 89#else /* __FreeBSD__ */ 90 91#include <sys/sockio.h> 92#include <sys/bus.h> 93#include <machine/bus.h> 94#include <sys/rman.h> 95#include <machine/resource.h> 96 97#include <net/ethernet.h> 98#include <net/if_arp.h> 99 100#include <vm/vm.h> /* for vtophys */ 101#include <vm/pmap.h> /* for vtophys */ 102#include <machine/clock.h> /* for DELAY */ 103 104#include <pci/pcivar.h> 105#include <pci/pcireg.h> /* for PCIM_CMD_xxx */ 106#include <pci/if_fxpreg.h> 107#include <pci/if_fxpvar.h> 108 109#endif /* __NetBSD__ */ 110 111#include "opt_bdg.h" 112#ifdef BRIDGE 113#include <net/if_types.h> 114#include <net/bridge.h> 115#endif 116 117/* 118 * NOTE! On the Alpha, we have an alignment constraint. The 119 * card DMAs the packet immediately following the RFA. However, 120 * the first thing in the packet is a 14-byte Ethernet header. 121 * This means that the packet is misaligned. To compensate, 122 * we actually offset the RFA 2 bytes into the cluster. This 123 * alignes the packet after the Ethernet header at a 32-bit 124 * boundary. HOWEVER! This means that the RFA is misaligned! 125 */ 126#define RFA_ALIGNMENT_FUDGE 2 127 128/* 129 * Inline function to copy a 16-bit aligned 32-bit quantity. 130 */ 131static __inline void fxp_lwcopy __P((volatile u_int32_t *, 132 volatile u_int32_t *)); 133static __inline void 134fxp_lwcopy(src, dst) 135 volatile u_int32_t *src, *dst; 136{ 137 volatile u_int16_t *a = (volatile u_int16_t *)src; 138 volatile u_int16_t *b = (volatile u_int16_t *)dst; 139 140 b[0] = a[0]; 141 b[1] = a[1]; 142} 143 144/* 145 * Template for default configuration parameters. 146 * See struct fxp_cb_config for the bit definitions. 147 */ 148static u_char fxp_cb_config_template[] = { 149 0x0, 0x0, /* cb_status */ 150 0x80, 0x2, /* cb_command */ 151 0xff, 0xff, 0xff, 0xff, /* link_addr */ 152 0x16, /* 0 */ 153 0x8, /* 1 */ 154 0x0, /* 2 */ 155 0x0, /* 3 */ 156 0x0, /* 4 */ 157 0x80, /* 5 */ 158 0xb2, /* 6 */ 159 0x3, /* 7 */ 160 0x1, /* 8 */ 161 0x0, /* 9 */ 162 0x26, /* 10 */ 163 0x0, /* 11 */ 164 0x60, /* 12 */ 165 0x0, /* 13 */ 166 0xf2, /* 14 */ 167 0x48, /* 15 */ 168 0x0, /* 16 */ 169 0x40, /* 17 */ 170 0xf3, /* 18 */ 171 0x0, /* 19 */ 172 0x3f, /* 20 */ 173 0x5 /* 21 */ 174}; 175 176/* Supported media types. */ 177struct fxp_supported_media { 178 const int fsm_phy; /* PHY type */ 179 const int *fsm_media; /* the media array */ 180 const int fsm_nmedia; /* the number of supported media */ 181 const int fsm_defmedia; /* default media for this PHY */ 182}; 183 184static const int fxp_media_standard[] = { 185 IFM_ETHER|IFM_10_T, 186 IFM_ETHER|IFM_10_T|IFM_FDX, 187 IFM_ETHER|IFM_100_TX, 188 IFM_ETHER|IFM_100_TX|IFM_FDX, 189 IFM_ETHER|IFM_AUTO, 190}; 191#define FXP_MEDIA_STANDARD_DEFMEDIA (IFM_ETHER|IFM_AUTO) 192 193static const int fxp_media_default[] = { 194 IFM_ETHER|IFM_MANUAL, /* XXX IFM_AUTO ? */ 195}; 196#define FXP_MEDIA_DEFAULT_DEFMEDIA (IFM_ETHER|IFM_MANUAL) 197 198static const struct fxp_supported_media fxp_media[] = { 199 { FXP_PHY_DP83840, fxp_media_standard, 200 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 201 FXP_MEDIA_STANDARD_DEFMEDIA }, 202 { FXP_PHY_DP83840A, fxp_media_standard, 203 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 204 FXP_MEDIA_STANDARD_DEFMEDIA }, 205 { FXP_PHY_82553A, fxp_media_standard, 206 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 207 FXP_MEDIA_STANDARD_DEFMEDIA }, 208 { FXP_PHY_82553C, fxp_media_standard, 209 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 210 FXP_MEDIA_STANDARD_DEFMEDIA }, 211 { FXP_PHY_82555, fxp_media_standard, 212 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 213 FXP_MEDIA_STANDARD_DEFMEDIA }, 214 { FXP_PHY_82555B, fxp_media_standard, 215 sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), 216 FXP_MEDIA_STANDARD_DEFMEDIA }, 217 { FXP_PHY_80C24, fxp_media_default, 218 sizeof(fxp_media_default) / sizeof(fxp_media_default[0]), 219 FXP_MEDIA_DEFAULT_DEFMEDIA }, 220}; 221#define NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0])) 222 223static int fxp_mediachange __P((struct ifnet *)); 224static void fxp_mediastatus __P((struct ifnet *, struct ifmediareq *)); 225static void fxp_set_media __P((struct fxp_softc *, int)); 226static __inline void fxp_scb_wait __P((struct fxp_softc *)); 227static FXP_INTR_TYPE fxp_intr __P((void *)); 228static void fxp_start __P((struct ifnet *)); 229static int fxp_ioctl __P((struct ifnet *, 230 FXP_IOCTLCMD_TYPE, caddr_t)); 231static void fxp_init __P((void *)); 232static void fxp_stop __P((struct fxp_softc *)); 233static void fxp_watchdog __P((struct ifnet *)); 234static int fxp_add_rfabuf __P((struct fxp_softc *, struct mbuf *)); 235static int fxp_mdi_read __P((struct fxp_softc *, int, int)); 236static void fxp_mdi_write __P((struct fxp_softc *, int, int, int)); 237static void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, 238 int, int)); 239static int fxp_attach_common __P((struct fxp_softc *, u_int8_t *)); 240static void fxp_stats_update __P((void *)); 241static void fxp_mc_setup __P((struct fxp_softc *)); 242 243/* 244 * Set initial transmit threshold at 64 (512 bytes). This is 245 * increased by 64 (512 bytes) at a time, to maximum of 192 246 * (1536 bytes), if an underrun occurs. 247 */ 248static int tx_threshold = 64; 249 250/* 251 * Number of transmit control blocks. This determines the number 252 * of transmit buffers that can be chained in the CB list. 253 * This must be a power of two. 254 */ 255#define FXP_NTXCB 128 256 257/* 258 * Number of completed TX commands at which point an interrupt 259 * will be generated to garbage collect the attached buffers. 260 * Must be at least one less than FXP_NTXCB, and should be 261 * enough less so that the transmitter doesn't becomes idle 262 * during the buffer rundown (which would reduce performance). 263 */ 264#define FXP_CXINT_THRESH 120 265 266/* 267 * TxCB list index mask. This is used to do list wrap-around. 268 */ 269#define FXP_TXCB_MASK (FXP_NTXCB - 1) 270 271/* 272 * Number of receive frame area buffers. These are large so chose 273 * wisely. 274 */ 275#define FXP_NRFABUFS 64 276 277/* 278 * Maximum number of seconds that the receiver can be idle before we 279 * assume it's dead and attempt to reset it by reprogramming the 280 * multicast filter. This is part of a work-around for a bug in the 281 * NIC. See fxp_stats_update(). 282 */ 283#define FXP_MAX_RX_IDLE 15 284 285/* 286 * Wait for the previous command to be accepted (but not necessarily 287 * completed). 288 */ 289static __inline void 290fxp_scb_wait(sc) 291 struct fxp_softc *sc; 292{ 293 int i = 10000; 294 295 while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i); 296} 297 298/************************************************************* 299 * Operating system-specific autoconfiguration glue 300 *************************************************************/ 301 302#if defined(__NetBSD__) 303 304#ifdef __BROKEN_INDIRECT_CONFIG 305static int fxp_match __P((struct device *, void *, void *)); 306#else 307static int fxp_match __P((struct device *, struct cfdata *, void *)); 308#endif 309static void fxp_attach __P((struct device *, struct device *, void *)); 310 311static void fxp_shutdown __P((void *)); 312 313/* Compensate for lack of a generic ether_ioctl() */ 314static int fxp_ether_ioctl __P((struct ifnet *, 315 FXP_IOCTLCMD_TYPE, caddr_t)); 316#define ether_ioctl fxp_ether_ioctl 317 318struct cfattach fxp_ca = { 319 sizeof(struct fxp_softc), fxp_match, fxp_attach 320}; 321 322struct cfdriver fxp_cd = { 323 NULL, "fxp", DV_IFNET 324}; 325 326/* 327 * Check if a device is an 82557. 328 */ 329static int 330fxp_match(parent, match, aux) 331 struct device *parent; 332#ifdef __BROKEN_INDIRECT_CONFIG 333 void *match; 334#else 335 struct cfdata *match; 336#endif 337 void *aux; 338{ 339 struct pci_attach_args *pa = aux; 340 341 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) 342 return (0); 343 344 switch (PCI_PRODUCT(pa->pa_id)) { 345 case PCI_PRODUCT_INTEL_82557: 346 return (1); 347 } 348 349 return (0); 350} 351 352static void 353fxp_attach(parent, self, aux) 354 struct device *parent, *self; 355 void *aux; 356{ 357 struct fxp_softc *sc = (struct fxp_softc *)self; 358 struct pci_attach_args *pa = aux; 359 pci_chipset_tag_t pc = pa->pa_pc; 360 pci_intr_handle_t ih; 361 const char *intrstr = NULL; 362 u_int8_t enaddr[6]; 363 struct ifnet *ifp; 364 365 /* 366 * Map control/status registers. 367 */ 368 if (pci_mapreg_map(pa, FXP_PCI_MMBA, PCI_MAPREG_TYPE_MEM, 0, 369 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 370 printf(": can't map registers\n"); 371 return; 372 } 373 printf(": Intel EtherExpress Pro 10/100B Ethernet\n"); 374 375 /* 376 * Allocate our interrupt. 377 */ 378 if (pci_intr_map(pc, pa->pa_intrtag, pa->pa_intrpin, 379 pa->pa_intrline, &ih)) { 380 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname); 381 return; 382 } 383 intrstr = pci_intr_string(pc, ih); 384 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, fxp_intr, sc); 385 if (sc->sc_ih == NULL) { 386 printf("%s: couldn't establish interrupt", 387 sc->sc_dev.dv_xname); 388 if (intrstr != NULL) 389 printf(" at %s", intrstr); 390 printf("\n"); 391 return; 392 } 393 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 394 395 /* Do generic parts of attach. */ 396 if (fxp_attach_common(sc, enaddr)) { 397 /* Failed! */ 398 return; 399 } 400 401 printf("%s: Ethernet address %s%s\n", sc->sc_dev.dv_xname, 402 ether_sprintf(enaddr), sc->phy_10Mbps_only ? ", 10Mbps" : ""); 403 404 ifp = &sc->sc_ethercom.ec_if; 405 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 406 ifp->if_softc = sc; 407 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 408 ifp->if_ioctl = fxp_ioctl; 409 ifp->if_start = fxp_start; 410 ifp->if_watchdog = fxp_watchdog; 411 412 /* 413 * Attach the interface. 414 */ 415 if_attach(ifp); 416 /* 417 * Let the system queue as many packets as we have available 418 * TX descriptors. 419 */ 420 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 421 ether_ifattach(ifp, enaddr); 422#if NBPF > 0 423 bpfattach(&sc->sc_ethercom.ec_if.if_bpf, ifp, DLT_EN10MB, 424 sizeof(struct ether_header)); 425#endif 426 427 /* 428 * Add shutdown hook so that DMA is disabled prior to reboot. Not 429 * doing do could allow DMA to corrupt kernel memory during the 430 * reboot before the driver initializes. 431 */ 432 shutdownhook_establish(fxp_shutdown, sc); 433} 434 435/* 436 * Device shutdown routine. Called at system shutdown after sync. The 437 * main purpose of this routine is to shut off receiver DMA so that 438 * kernel memory doesn't get clobbered during warmboot. 439 */ 440static void 441fxp_shutdown(sc) 442 void *sc; 443{ 444 fxp_stop((struct fxp_softc *) sc); 445} 446 447static int 448fxp_ether_ioctl(ifp, cmd, data) 449 struct ifnet *ifp; 450 FXP_IOCTLCMD_TYPE cmd; 451 caddr_t data; 452{ 453 struct ifaddr *ifa = (struct ifaddr *) data; 454 struct fxp_softc *sc = ifp->if_softc; 455 456 switch (cmd) { 457 case SIOCSIFADDR: 458 ifp->if_flags |= IFF_UP; 459 460 switch (ifa->ifa_addr->sa_family) { 461#ifdef INET 462 case AF_INET: 463 fxp_init(sc); 464 arp_ifinit(ifp, ifa); 465 break; 466#endif 467#ifdef NS 468 case AF_NS: 469 { 470 register struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 471 472 if (ns_nullhost(*ina)) 473 ina->x_host = *(union ns_host *) 474 LLADDR(ifp->if_sadl); 475 else 476 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 477 ifp->if_addrlen); 478 /* Set new address. */ 479 fxp_init(sc); 480 break; 481 } 482#endif 483 default: 484 fxp_init(sc); 485 break; 486 } 487 break; 488 489 default: 490 return (EINVAL); 491 } 492 493 return (0); 494} 495 496#else /* __FreeBSD__ */ 497 498/* 499 * Return identification string if this is device is ours. 500 */ 501static int 502fxp_probe(device_t dev) 503{ 504 if ((pci_get_vendor(dev) == FXP_VENDORID_INTEL) && 505 (pci_get_device(dev) == FXP_DEVICEID_i82557)) { 506 device_set_desc(dev, "Intel EtherExpress Pro 10/100B Ethernet"); 507 return 0; 508 } 509 if ((pci_get_vendor(dev) == FXP_VENDORID_INTEL) && 510 (pci_get_device(dev) == FXP_DEVICEID_i82559)) { 511 device_set_desc(dev, "Intel InBusiness 10/100 Ethernet"); 512 return 0; 513 } 514 515 return ENXIO; 516} 517 518static int 519fxp_attach(device_t dev) 520{ 521 int error = 0; 522 struct fxp_softc *sc = device_get_softc(dev); 523 struct ifnet *ifp; 524 int s; 525 u_long val; 526 int rid; 527 528 callout_handle_init(&sc->stat_ch); 529 530 s = splimp(); 531 532 /* 533 * Enable bus mastering. 534 */ 535 val = pci_read_config(dev, PCIR_COMMAND, 2); 536 val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 537 pci_write_config(dev, PCIR_COMMAND, val, 2); 538 539 /* 540 * Map control/status registers. 541 */ 542 rid = FXP_PCI_MMBA; 543 sc->mem = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 544 0, ~0, 1, RF_ACTIVE); 545 if (!sc->mem) { 546 device_printf(dev, "could not map memory\n"); 547 error = ENXIO; 548 goto fail; 549 } 550 sc->csr = rman_get_virtual(sc->mem); /* XXX use bus_space */ 551 552 /* 553 * Allocate our interrupt. 554 */ 555 rid = 0; 556 sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, 557 RF_SHAREABLE | RF_ACTIVE); 558 if (sc->irq == NULL) { 559 device_printf(dev, "could not map interrupt\n"); 560 error = ENXIO; 561 goto fail; 562 } 563 564 error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, 565 fxp_intr, sc, &sc->ih); 566 if (error) { 567 device_printf(dev, "could not setup irq\n"); 568 goto fail; 569 } 570 571 /* Do generic parts of attach. */ 572 if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) { 573 /* Failed! */ 574 bus_teardown_intr(dev, sc->irq, sc->ih); 575 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 576 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 577 error = ENXIO; 578 goto fail; 579 } 580 581 device_printf(dev, "Ethernet address %6D%s\n", 582 sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : ""); 583 584 ifp = &sc->arpcom.ac_if; 585 ifp->if_unit = device_get_unit(dev); 586 ifp->if_name = "fxp"; 587 ifp->if_output = ether_output; 588 ifp->if_baudrate = 100000000; 589 ifp->if_init = fxp_init; 590 ifp->if_softc = sc; 591 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 592 ifp->if_ioctl = fxp_ioctl; 593 ifp->if_start = fxp_start; 594 ifp->if_watchdog = fxp_watchdog; 595 596 /* 597 * Attach the interface. 598 */ 599 if_attach(ifp); 600 /* 601 * Let the system queue as many packets as we have available 602 * TX descriptors. 603 */ 604 ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; 605 ether_ifattach(ifp); 606#if NBPF > 0 607 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 608#endif 609 610 splx(s); 611 return 0; 612 613 fail: 614 splx(s); 615 return error; 616} 617 618/* 619 * Detach interface. 620 */ 621static int 622fxp_detach(device_t dev) 623{ 624 struct fxp_softc *sc = device_get_softc(dev); 625 int s; 626 627 s = splimp(); 628 629 /* 630 * Close down routes etc. 631 */ 632 if_detach(&sc->arpcom.ac_if); 633 634 /* 635 * Stop DMA and drop transmit queue. 636 */ 637 fxp_stop(sc); 638 639 /* 640 * Deallocate resources. 641 */ 642 bus_teardown_intr(dev, sc->irq, sc->ih); 643 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); 644 bus_release_resource(dev, SYS_RES_MEMORY, FXP_PCI_MMBA, sc->mem); 645 646 /* 647 * Free all the receive buffers. 648 */ 649 if (sc->rfa_headm != NULL) 650 m_freem(sc->rfa_headm); 651 652 /* 653 * Free all media structures. 654 */ 655 ifmedia_removeall(&sc->sc_media); 656 657 /* 658 * Free anciliary structures. 659 */ 660 free(sc->cbl_base, M_DEVBUF); 661 free(sc->fxp_stats, M_DEVBUF); 662 free(sc->mcsp, M_DEVBUF); 663 664 splx(s); 665 666 return 0; 667} 668 669/* 670 * Device shutdown routine. Called at system shutdown after sync. The 671 * main purpose of this routine is to shut off receiver DMA so that 672 * kernel memory doesn't get clobbered during warmboot. 673 */ 674static int 675fxp_shutdown(device_t dev) 676{ 677 /* 678 * Make sure that DMA is disabled prior to reboot. Not doing 679 * do could allow DMA to corrupt kernel memory during the 680 * reboot before the driver initializes. 681 */ 682 fxp_stop((struct fxp_softc *) device_get_softc(dev)); 683 return 0; 684} 685 686static device_method_t fxp_methods[] = { 687 /* Device interface */ 688 DEVMETHOD(device_probe, fxp_probe), 689 DEVMETHOD(device_attach, fxp_attach), 690 DEVMETHOD(device_detach, fxp_detach), 691 DEVMETHOD(device_shutdown, fxp_shutdown), 692 693 { 0, 0 } 694}; 695 696static driver_t fxp_driver = { 697 "fxp", 698 fxp_methods, 699 sizeof(struct fxp_softc), 700}; 701 702static devclass_t fxp_devclass; 703 704DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0); 705 706#endif /* __NetBSD__ */ 707 708/************************************************************* 709 * End of operating system-specific autoconfiguration glue 710 *************************************************************/ 711 712/* 713 * Do generic parts of attach. 714 */ 715static int 716fxp_attach_common(sc, enaddr) 717 struct fxp_softc *sc; 718 u_int8_t *enaddr; 719{ 720 u_int16_t data; 721 int i, nmedia, defmedia; 722 const int *media; 723 724 /* 725 * Reset to a stable state. 726 */ 727 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 728 DELAY(10); 729 730 sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB, 731 M_DEVBUF, M_NOWAIT); 732 if (sc->cbl_base == NULL) 733 goto fail; 734 bzero(sc->cbl_base, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 735 736 sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT); 737 if (sc->fxp_stats == NULL) 738 goto fail; 739 bzero(sc->fxp_stats, sizeof(struct fxp_stats)); 740 741 sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT); 742 if (sc->mcsp == NULL) 743 goto fail; 744 745 /* 746 * Pre-allocate our receive buffers. 747 */ 748 for (i = 0; i < FXP_NRFABUFS; i++) { 749 if (fxp_add_rfabuf(sc, NULL) != 0) { 750 goto fail; 751 } 752 } 753 754 /* 755 * Get info about the primary PHY 756 */ 757 fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); 758 sc->phy_primary_addr = data & 0xff; 759 sc->phy_primary_device = (data >> 8) & 0x3f; 760 sc->phy_10Mbps_only = data >> 15; 761 762 /* 763 * Read MAC address. 764 */ 765 fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); 766 767 /* 768 * Initialize the media structures. 769 */ 770 771 media = fxp_media_default; 772 nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]); 773 defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA; 774 775 for (i = 0; i < NFXPMEDIA; i++) { 776 if (sc->phy_primary_device == fxp_media[i].fsm_phy) { 777 media = fxp_media[i].fsm_media; 778 nmedia = fxp_media[i].fsm_nmedia; 779 defmedia = fxp_media[i].fsm_defmedia; 780 } 781 } 782 783 ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus); 784 for (i = 0; i < nmedia; i++) { 785 if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only) 786 continue; 787 ifmedia_add(&sc->sc_media, media[i], 0, NULL); 788 } 789 ifmedia_set(&sc->sc_media, defmedia); 790 791 return (0); 792 793 fail: 794 printf(FXP_FORMAT ": Failed to malloc memory\n", FXP_ARGS(sc)); 795 if (sc->cbl_base) 796 free(sc->cbl_base, M_DEVBUF); 797 if (sc->fxp_stats) 798 free(sc->fxp_stats, M_DEVBUF); 799 if (sc->mcsp) 800 free(sc->mcsp, M_DEVBUF); 801 /* frees entire chain */ 802 if (sc->rfa_headm) 803 m_freem(sc->rfa_headm); 804 805 return (ENOMEM); 806} 807 808/* 809 * Read from the serial EEPROM. Basically, you manually shift in 810 * the read opcode (one bit at a time) and then shift in the address, 811 * and then you shift out the data (all of this one bit at a time). 812 * The word size is 16 bits, so you have to provide the address for 813 * every 16 bits of data. 814 */ 815static void 816fxp_read_eeprom(sc, data, offset, words) 817 struct fxp_softc *sc; 818 u_short *data; 819 int offset; 820 int words; 821{ 822 u_int16_t reg; 823 int i, x; 824 825 for (i = 0; i < words; i++) { 826 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); 827 /* 828 * Shift in read opcode. 829 */ 830 for (x = 3; x > 0; x--) { 831 if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { 832 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 833 } else { 834 reg = FXP_EEPROM_EECS; 835 } 836 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 837 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 838 reg | FXP_EEPROM_EESK); 839 DELAY(1); 840 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 841 DELAY(1); 842 } 843 /* 844 * Shift in address. 845 */ 846 for (x = 6; x > 0; x--) { 847 if ((i + offset) & (1 << (x - 1))) { 848 reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; 849 } else { 850 reg = FXP_EEPROM_EECS; 851 } 852 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 853 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 854 reg | FXP_EEPROM_EESK); 855 DELAY(1); 856 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 857 DELAY(1); 858 } 859 reg = FXP_EEPROM_EECS; 860 data[i] = 0; 861 /* 862 * Shift out data. 863 */ 864 for (x = 16; x > 0; x--) { 865 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 866 reg | FXP_EEPROM_EESK); 867 DELAY(1); 868 if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & 869 FXP_EEPROM_EEDO) 870 data[i] |= (1 << (x - 1)); 871 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); 872 DELAY(1); 873 } 874 CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); 875 DELAY(1); 876 } 877} 878 879/* 880 * Start packet transmission on the interface. 881 */ 882static void 883fxp_start(ifp) 884 struct ifnet *ifp; 885{ 886 struct fxp_softc *sc = ifp->if_softc; 887 struct fxp_cb_tx *txp; 888 889 /* 890 * See if we need to suspend xmit until the multicast filter 891 * has been reprogrammed (which can only be done at the head 892 * of the command chain). 893 */ 894 if (sc->need_mcsetup) 895 return; 896 897 txp = NULL; 898 899 /* 900 * We're finished if there is nothing more to add to the list or if 901 * we're all filled up with buffers to transmit. 902 * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add 903 * a NOP command when needed. 904 */ 905 while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) { 906 struct mbuf *m, *mb_head; 907 int segment; 908 909 /* 910 * Grab a packet to transmit. 911 */ 912 IF_DEQUEUE(&ifp->if_snd, mb_head); 913 914 /* 915 * Get pointer to next available tx desc. 916 */ 917 txp = sc->cbl_last->next; 918 919 /* 920 * Go through each of the mbufs in the chain and initialize 921 * the transmit buffer descriptors with the physical address 922 * and size of the mbuf. 923 */ 924tbdinit: 925 for (m = mb_head, segment = 0; m != NULL; m = m->m_next) { 926 if (m->m_len != 0) { 927 if (segment == FXP_NTXSEG) 928 break; 929 txp->tbd[segment].tb_addr = 930 vtophys(mtod(m, vm_offset_t)); 931 txp->tbd[segment].tb_size = m->m_len; 932 segment++; 933 } 934 } 935 if (m != NULL) { 936 struct mbuf *mn; 937 938 /* 939 * We ran out of segments. We have to recopy this mbuf 940 * chain first. Bail out if we can't get the new buffers. 941 */ 942 MGETHDR(mn, M_DONTWAIT, MT_DATA); 943 if (mn == NULL) { 944 m_freem(mb_head); 945 break; 946 } 947 if (mb_head->m_pkthdr.len > MHLEN) { 948 MCLGET(mn, M_DONTWAIT); 949 if ((mn->m_flags & M_EXT) == 0) { 950 m_freem(mn); 951 m_freem(mb_head); 952 break; 953 } 954 } 955 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 956 mtod(mn, caddr_t)); 957 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 958 m_freem(mb_head); 959 mb_head = mn; 960 goto tbdinit; 961 } 962 963 txp->tbd_number = segment; 964 txp->mb_head = mb_head; 965 txp->cb_status = 0; 966 if (sc->tx_queued != FXP_CXINT_THRESH - 1) { 967 txp->cb_command = 968 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S; 969 } else { 970 txp->cb_command = 971 FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 972 /* 973 * Set a 5 second timer just in case we don't hear from the 974 * card again. 975 */ 976 ifp->if_timer = 5; 977 } 978 txp->tx_threshold = tx_threshold; 979 980 /* 981 * Advance the end of list forward. 982 */ 983 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 984 sc->cbl_last = txp; 985 986 /* 987 * Advance the beginning of the list forward if there are 988 * no other packets queued (when nothing is queued, cbl_first 989 * sits on the last TxCB that was sent out). 990 */ 991 if (sc->tx_queued == 0) 992 sc->cbl_first = txp; 993 994 sc->tx_queued++; 995 996#if NBPF > 0 997 /* 998 * Pass packet to bpf if there is a listener. 999 */ 1000 if (ifp->if_bpf) 1001 bpf_mtap(FXP_BPFTAP_ARG(ifp), mb_head); 1002#endif 1003 } 1004 1005 /* 1006 * We're finished. If we added to the list, issue a RESUME to get DMA 1007 * going again if suspended. 1008 */ 1009 if (txp != NULL) { 1010 fxp_scb_wait(sc); 1011 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1012 } 1013} 1014 1015/* 1016 * Process interface interrupts. 1017 */ 1018static FXP_INTR_TYPE 1019fxp_intr(arg) 1020 void *arg; 1021{ 1022 struct fxp_softc *sc = arg; 1023 struct ifnet *ifp = &sc->sc_if; 1024 u_int8_t statack; 1025#if defined(__NetBSD__) 1026 int claimed = 0; 1027#endif 1028 1029 while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { 1030#if defined(__NetBSD__) 1031 claimed = 1; 1032#endif 1033 /* 1034 * First ACK all the interrupts in this pass. 1035 */ 1036 CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); 1037 1038 /* 1039 * Free any finished transmit mbuf chains. 1040 */ 1041 if (statack & FXP_SCB_STATACK_CXTNO) { 1042 struct fxp_cb_tx *txp; 1043 1044 for (txp = sc->cbl_first; sc->tx_queued && 1045 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1046 txp = txp->next) { 1047 if (txp->mb_head != NULL) { 1048 m_freem(txp->mb_head); 1049 txp->mb_head = NULL; 1050 } 1051 sc->tx_queued--; 1052 } 1053 sc->cbl_first = txp; 1054 ifp->if_timer = 0; 1055 if (sc->tx_queued == 0) { 1056 if (sc->need_mcsetup) 1057 fxp_mc_setup(sc); 1058 } 1059 /* 1060 * Try to start more packets transmitting. 1061 */ 1062 if (ifp->if_snd.ifq_head != NULL) 1063 fxp_start(ifp); 1064 } 1065 /* 1066 * Process receiver interrupts. If a no-resource (RNR) 1067 * condition exists, get whatever packets we can and 1068 * re-start the receiver. 1069 */ 1070 if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) { 1071 struct mbuf *m; 1072 struct fxp_rfa *rfa; 1073rcvloop: 1074 m = sc->rfa_headm; 1075 rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + 1076 RFA_ALIGNMENT_FUDGE); 1077 1078 if (rfa->rfa_status & FXP_RFA_STATUS_C) { 1079 /* 1080 * Remove first packet from the chain. 1081 */ 1082 sc->rfa_headm = m->m_next; 1083 m->m_next = NULL; 1084 1085 /* 1086 * Add a new buffer to the receive chain. 1087 * If this fails, the old buffer is recycled 1088 * instead. 1089 */ 1090 if (fxp_add_rfabuf(sc, m) == 0) { 1091 struct ether_header *eh; 1092 u_int16_t total_len; 1093 1094 total_len = rfa->actual_size & 1095 (MCLBYTES - 1); 1096 if (total_len < 1097 sizeof(struct ether_header)) { 1098 m_freem(m); 1099 goto rcvloop; 1100 } 1101 m->m_pkthdr.rcvif = ifp; 1102 m->m_pkthdr.len = m->m_len = 1103 total_len ; 1104 eh = mtod(m, struct ether_header *); 1105#if NBPF > 0 1106 if (ifp->if_bpf) 1107 bpf_tap(FXP_BPFTAP_ARG(ifp), 1108 mtod(m, caddr_t), 1109 total_len); 1110#endif /* NBPF > 0 */ 1111#ifdef BRIDGE 1112 if (do_bridge) { 1113 struct ifnet *bdg_ifp ; 1114 bdg_ifp = bridge_in(m); 1115 if (bdg_ifp == BDG_DROP) 1116 goto dropit ; 1117 if (bdg_ifp != BDG_LOCAL) 1118 bdg_forward(&m, bdg_ifp); 1119 if (bdg_ifp != BDG_LOCAL && 1120 bdg_ifp != BDG_BCAST && 1121 bdg_ifp != BDG_MCAST) 1122 goto dropit ; 1123 goto getit ; 1124 } 1125#endif 1126 /* 1127 * Only pass this packet up 1128 * if it is for us. 1129 */ 1130 if ((ifp->if_flags & 1131 IFF_PROMISC) && 1132 (rfa->rfa_status & 1133 FXP_RFA_STATUS_IAMATCH) && 1134 (eh->ether_dhost[0] & 1) 1135 == 0) { 1136#ifdef BRIDGE 1137dropit: 1138#endif 1139 if (m) 1140 m_freem(m); 1141 goto rcvloop; 1142 } 1143#ifdef BRIDGE 1144getit: 1145#endif 1146 m->m_data += 1147 sizeof(struct ether_header); 1148 m->m_len -= 1149 sizeof(struct ether_header); 1150 m->m_pkthdr.len = m->m_len ; 1151 ether_input(ifp, eh, m); 1152 } 1153 goto rcvloop; 1154 } 1155 if (statack & FXP_SCB_STATACK_RNR) { 1156 fxp_scb_wait(sc); 1157 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1158 vtophys(sc->rfa_headm->m_ext.ext_buf) + 1159 RFA_ALIGNMENT_FUDGE); 1160 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1161 FXP_SCB_COMMAND_RU_START); 1162 } 1163 } 1164 } 1165#if defined(__NetBSD__) 1166 return (claimed); 1167#endif 1168} 1169 1170/* 1171 * Update packet in/out/collision statistics. The i82557 doesn't 1172 * allow you to access these counters without doing a fairly 1173 * expensive DMA to get _all_ of the statistics it maintains, so 1174 * we do this operation here only once per second. The statistics 1175 * counters in the kernel are updated from the previous dump-stats 1176 * DMA and then a new dump-stats DMA is started. The on-chip 1177 * counters are zeroed when the DMA completes. If we can't start 1178 * the DMA immediately, we don't wait - we just prepare to read 1179 * them again next time. 1180 */ 1181static void 1182fxp_stats_update(arg) 1183 void *arg; 1184{ 1185 struct fxp_softc *sc = arg; 1186 struct ifnet *ifp = &sc->sc_if; 1187 struct fxp_stats *sp = sc->fxp_stats; 1188 struct fxp_cb_tx *txp; 1189 int s; 1190 1191 ifp->if_opackets += sp->tx_good; 1192 ifp->if_collisions += sp->tx_total_collisions; 1193 if (sp->rx_good) { 1194 ifp->if_ipackets += sp->rx_good; 1195 sc->rx_idle_secs = 0; 1196 } else { 1197 /* 1198 * Receiver's been idle for another second. 1199 */ 1200 sc->rx_idle_secs++; 1201 } 1202 ifp->if_ierrors += 1203 sp->rx_crc_errors + 1204 sp->rx_alignment_errors + 1205 sp->rx_rnr_errors + 1206 sp->rx_overrun_errors; 1207 /* 1208 * If any transmit underruns occured, bump up the transmit 1209 * threshold by another 512 bytes (64 * 8). 1210 */ 1211 if (sp->tx_underruns) { 1212 ifp->if_oerrors += sp->tx_underruns; 1213 if (tx_threshold < 192) 1214 tx_threshold += 64; 1215 } 1216 s = splimp(); 1217 /* 1218 * Release any xmit buffers that have completed DMA. This isn't 1219 * strictly necessary to do here, but it's advantagous for mbufs 1220 * with external storage to be released in a timely manner rather 1221 * than being defered for a potentially long time. This limits 1222 * the delay to a maximum of one second. 1223 */ 1224 for (txp = sc->cbl_first; sc->tx_queued && 1225 (txp->cb_status & FXP_CB_STATUS_C) != 0; 1226 txp = txp->next) { 1227 if (txp->mb_head != NULL) { 1228 m_freem(txp->mb_head); 1229 txp->mb_head = NULL; 1230 } 1231 sc->tx_queued--; 1232 } 1233 sc->cbl_first = txp; 1234 /* 1235 * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, 1236 * then assume the receiver has locked up and attempt to clear 1237 * the condition by reprogramming the multicast filter. This is 1238 * a work-around for a bug in the 82557 where the receiver locks 1239 * up if it gets certain types of garbage in the syncronization 1240 * bits prior to the packet header. This bug is supposed to only 1241 * occur in 10Mbps mode, but has been seen to occur in 100Mbps 1242 * mode as well (perhaps due to a 10/100 speed transition). 1243 */ 1244 if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { 1245 sc->rx_idle_secs = 0; 1246 fxp_mc_setup(sc); 1247 } 1248 /* 1249 * If there is no pending command, start another stats 1250 * dump. Otherwise punt for now. 1251 */ 1252 if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { 1253 /* 1254 * Start another stats dump. 1255 */ 1256 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, 1257 FXP_SCB_COMMAND_CU_DUMPRESET); 1258 } else { 1259 /* 1260 * A previous command is still waiting to be accepted. 1261 * Just zero our copy of the stats and wait for the 1262 * next timer event to update them. 1263 */ 1264 sp->tx_good = 0; 1265 sp->tx_underruns = 0; 1266 sp->tx_total_collisions = 0; 1267 1268 sp->rx_good = 0; 1269 sp->rx_crc_errors = 0; 1270 sp->rx_alignment_errors = 0; 1271 sp->rx_rnr_errors = 0; 1272 sp->rx_overrun_errors = 0; 1273 } 1274 splx(s); 1275 /* 1276 * Schedule another timeout one second from now. 1277 */ 1278 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1279} 1280 1281/* 1282 * Stop the interface. Cancels the statistics updater and resets 1283 * the interface. 1284 */ 1285static void 1286fxp_stop(sc) 1287 struct fxp_softc *sc; 1288{ 1289 struct ifnet *ifp = &sc->sc_if; 1290 struct fxp_cb_tx *txp; 1291 int i; 1292 1293 /* 1294 * Cancel stats updater. 1295 */ 1296 untimeout(fxp_stats_update, sc, sc->stat_ch); 1297 1298 /* 1299 * Issue software reset 1300 */ 1301 CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); 1302 DELAY(10); 1303 1304 /* 1305 * Release any xmit buffers. 1306 */ 1307 txp = sc->cbl_base; 1308 if (txp != NULL) { 1309 for (i = 0; i < FXP_NTXCB; i++) { 1310 if (txp[i].mb_head != NULL) { 1311 m_freem(txp[i].mb_head); 1312 txp[i].mb_head = NULL; 1313 } 1314 } 1315 } 1316 sc->tx_queued = 0; 1317 1318 /* 1319 * Free all the receive buffers then reallocate/reinitialize 1320 */ 1321 if (sc->rfa_headm != NULL) 1322 m_freem(sc->rfa_headm); 1323 sc->rfa_headm = NULL; 1324 sc->rfa_tailm = NULL; 1325 for (i = 0; i < FXP_NRFABUFS; i++) { 1326 if (fxp_add_rfabuf(sc, NULL) != 0) { 1327 /* 1328 * This "can't happen" - we're at splimp() 1329 * and we just freed all the buffers we need 1330 * above. 1331 */ 1332 panic("fxp_stop: no buffers!"); 1333 } 1334 } 1335 1336 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 1337 ifp->if_timer = 0; 1338} 1339 1340/* 1341 * Watchdog/transmission transmit timeout handler. Called when a 1342 * transmission is started on the interface, but no interrupt is 1343 * received before the timeout. This usually indicates that the 1344 * card has wedged for some reason. 1345 */ 1346static void 1347fxp_watchdog(ifp) 1348 struct ifnet *ifp; 1349{ 1350 struct fxp_softc *sc = ifp->if_softc; 1351 1352 printf(FXP_FORMAT ": device timeout\n", FXP_ARGS(sc)); 1353 ifp->if_oerrors++; 1354 1355 fxp_init(sc); 1356} 1357 1358static void 1359fxp_init(xsc) 1360 void *xsc; 1361{ 1362 struct fxp_softc *sc = xsc; 1363 struct ifnet *ifp = &sc->sc_if; 1364 struct fxp_cb_config *cbp; 1365 struct fxp_cb_ias *cb_ias; 1366 struct fxp_cb_tx *txp; 1367 int i, s, prm; 1368 1369 s = splimp(); 1370 /* 1371 * Cancel any pending I/O 1372 */ 1373 fxp_stop(sc); 1374 1375 prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; 1376 1377 /* 1378 * Initialize base of CBL and RFA memory. Loading with zero 1379 * sets it up for regular linear addressing. 1380 */ 1381 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); 1382 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE); 1383 1384 fxp_scb_wait(sc); 1385 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE); 1386 1387 /* 1388 * Initialize base of dump-stats buffer. 1389 */ 1390 fxp_scb_wait(sc); 1391 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats)); 1392 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR); 1393 1394 /* 1395 * We temporarily use memory that contains the TxCB list to 1396 * construct the config CB. The TxCB list memory is rebuilt 1397 * later. 1398 */ 1399 cbp = (struct fxp_cb_config *) sc->cbl_base; 1400 1401 /* 1402 * This bcopy is kind of disgusting, but there are a bunch of must be 1403 * zero and must be one bits in this structure and this is the easiest 1404 * way to initialize them all to proper values. 1405 */ 1406 bcopy(fxp_cb_config_template, (volatile void *)&cbp->cb_status, 1407 sizeof(fxp_cb_config_template)); 1408 1409 cbp->cb_status = 0; 1410 cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; 1411 cbp->link_addr = -1; /* (no) next command */ 1412 cbp->byte_count = 22; /* (22) bytes to config */ 1413 cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ 1414 cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ 1415 cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ 1416 cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ 1417 cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ 1418 cbp->dma_bce = 0; /* (disable) dma max counters */ 1419 cbp->late_scb = 0; /* (don't) defer SCB update */ 1420 cbp->tno_int = 0; /* (disable) tx not okay interrupt */ 1421 cbp->ci_int = 1; /* interrupt on CU idle */ 1422 cbp->save_bf = prm; /* save bad frames */ 1423 cbp->disc_short_rx = !prm; /* discard short packets */ 1424 cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ 1425 cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ 1426 cbp->nsai = 1; /* (don't) disable source addr insert */ 1427 cbp->preamble_length = 2; /* (7 byte) preamble */ 1428 cbp->loopback = 0; /* (don't) loopback */ 1429 cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ 1430 cbp->linear_pri_mode = 0; /* (wait after xmit only) */ 1431 cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ 1432 cbp->promiscuous = prm; /* promiscuous mode */ 1433 cbp->bcast_disable = 0; /* (don't) disable broadcasts */ 1434 cbp->crscdt = 0; /* (CRS only) */ 1435 cbp->stripping = !prm; /* truncate rx packet to byte count */ 1436 cbp->padding = 1; /* (do) pad short tx packets */ 1437 cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ 1438 cbp->force_fdx = 0; /* (don't) force full duplex */ 1439 cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ 1440 cbp->multi_ia = 0; /* (don't) accept multiple IAs */ 1441 cbp->mc_all = sc->all_mcasts;/* accept all multicasts */ 1442 1443 /* 1444 * Start the config command/DMA. 1445 */ 1446 fxp_scb_wait(sc); 1447 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status)); 1448 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1449 /* ...and wait for it to complete. */ 1450 while (!(cbp->cb_status & FXP_CB_STATUS_C)); 1451 1452 /* 1453 * Now initialize the station address. Temporarily use the TxCB 1454 * memory area like we did above for the config CB. 1455 */ 1456 cb_ias = (struct fxp_cb_ias *) sc->cbl_base; 1457 cb_ias->cb_status = 0; 1458 cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL; 1459 cb_ias->link_addr = -1; 1460#if defined(__NetBSD__) 1461 bcopy(LLADDR(ifp->if_sadl), (void *)cb_ias->macaddr, 6); 1462#else 1463 bcopy(sc->arpcom.ac_enaddr, (volatile void *)cb_ias->macaddr, 1464 sizeof(sc->arpcom.ac_enaddr)); 1465#endif /* __NetBSD__ */ 1466 1467 /* 1468 * Start the IAS (Individual Address Setup) command/DMA. 1469 */ 1470 fxp_scb_wait(sc); 1471 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1472 /* ...and wait for it to complete. */ 1473 while (!(cb_ias->cb_status & FXP_CB_STATUS_C)); 1474 1475 /* 1476 * Initialize transmit control block (TxCB) list. 1477 */ 1478 1479 txp = sc->cbl_base; 1480 bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB); 1481 for (i = 0; i < FXP_NTXCB; i++) { 1482 txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK; 1483 txp[i].cb_command = FXP_CB_COMMAND_NOP; 1484 txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status); 1485 txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]); 1486 txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK]; 1487 } 1488 /* 1489 * Set the suspend flag on the first TxCB and start the control 1490 * unit. It will execute the NOP and then suspend. 1491 */ 1492 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S; 1493 sc->cbl_first = sc->cbl_last = txp; 1494 sc->tx_queued = 1; 1495 1496 fxp_scb_wait(sc); 1497 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 1498 1499 /* 1500 * Initialize receiver buffer area - RFA. 1501 */ 1502 fxp_scb_wait(sc); 1503 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 1504 vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); 1505 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); 1506 1507 /* 1508 * Set current media. 1509 */ 1510 fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media); 1511 1512 ifp->if_flags |= IFF_RUNNING; 1513 ifp->if_flags &= ~IFF_OACTIVE; 1514 splx(s); 1515 1516 /* 1517 * Start stats updater. 1518 */ 1519 sc->stat_ch = timeout(fxp_stats_update, sc, hz); 1520} 1521 1522static void 1523fxp_set_media(sc, media) 1524 struct fxp_softc *sc; 1525 int media; 1526{ 1527 1528 switch (sc->phy_primary_device) { 1529 case FXP_PHY_DP83840: 1530 case FXP_PHY_DP83840A: 1531 fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR, 1532 fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) | 1533 FXP_DP83840_PCR_LED4_MODE | /* LED4 always indicates duplex */ 1534 FXP_DP83840_PCR_F_CONNECT | /* force link disconnect bypass */ 1535 FXP_DP83840_PCR_BIT10); /* XXX I have no idea */ 1536 /* fall through */ 1537 case FXP_PHY_82553A: 1538 case FXP_PHY_82553C: /* untested */ 1539 case FXP_PHY_82555: 1540 case FXP_PHY_82555B: 1541 if (IFM_SUBTYPE(media) != IFM_AUTO) { 1542 int flags; 1543 1544 flags = (IFM_SUBTYPE(media) == IFM_100_TX) ? 1545 FXP_PHY_BMCR_SPEED_100M : 0; 1546 flags |= (media & IFM_FDX) ? 1547 FXP_PHY_BMCR_FULLDUPLEX : 0; 1548 fxp_mdi_write(sc, sc->phy_primary_addr, 1549 FXP_PHY_BMCR, 1550 (fxp_mdi_read(sc, sc->phy_primary_addr, 1551 FXP_PHY_BMCR) & 1552 ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M | 1553 FXP_PHY_BMCR_FULLDUPLEX)) | flags); 1554 } else { 1555 fxp_mdi_write(sc, sc->phy_primary_addr, 1556 FXP_PHY_BMCR, 1557 (fxp_mdi_read(sc, sc->phy_primary_addr, 1558 FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN)); 1559 } 1560 break; 1561 /* 1562 * The Seeq 80c24 doesn't have a PHY programming interface, so do 1563 * nothing. 1564 */ 1565 case FXP_PHY_80C24: 1566 break; 1567 default: 1568 printf(FXP_FORMAT 1569 ": warning: unsupported PHY, type = %d, addr = %d\n", 1570 FXP_ARGS(sc), sc->phy_primary_device, 1571 sc->phy_primary_addr); 1572 } 1573} 1574 1575/* 1576 * Change media according to request. 1577 */ 1578int 1579fxp_mediachange(ifp) 1580 struct ifnet *ifp; 1581{ 1582 struct fxp_softc *sc = ifp->if_softc; 1583 struct ifmedia *ifm = &sc->sc_media; 1584 1585 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1586 return (EINVAL); 1587 1588 fxp_set_media(sc, ifm->ifm_media); 1589 return (0); 1590} 1591 1592/* 1593 * Notify the world which media we're using. 1594 */ 1595void 1596fxp_mediastatus(ifp, ifmr) 1597 struct ifnet *ifp; 1598 struct ifmediareq *ifmr; 1599{ 1600 struct fxp_softc *sc = ifp->if_softc; 1601 int flags, stsflags; 1602 1603 switch (sc->phy_primary_device) { 1604 case FXP_PHY_82555: 1605 case FXP_PHY_82555B: 1606 case FXP_PHY_DP83840: 1607 case FXP_PHY_DP83840A: 1608 ifmr->ifm_status = IFM_AVALID; /* IFM_ACTIVE will be valid */ 1609 ifmr->ifm_active = IFM_ETHER; 1610 /* 1611 * the following is not an error. 1612 * You need to read this register twice to get current 1613 * status. This is correct documented behaviour, the 1614 * first read gets latched values. 1615 */ 1616 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1617 stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); 1618 if (stsflags & FXP_PHY_STS_LINK_STS) 1619 ifmr->ifm_status |= IFM_ACTIVE; 1620 1621 /* 1622 * If we are in auto mode, then try report the result. 1623 */ 1624 flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR); 1625 if (flags & FXP_PHY_BMCR_AUTOEN) { 1626 ifmr->ifm_active |= IFM_AUTO; /* XXX presently 0 */ 1627 if (stsflags & FXP_PHY_STS_AUTO_DONE) { 1628 /* 1629 * Intel and National parts report 1630 * differently on what they found. 1631 */ 1632 if ((sc->phy_primary_device == FXP_PHY_82555) 1633 || (sc->phy_primary_device == FXP_PHY_82555B)) { 1634 flags = fxp_mdi_read(sc, 1635 sc->phy_primary_addr, 1636 FXP_PHY_USC); 1637 1638 if (flags & FXP_PHY_USC_SPEED) 1639 ifmr->ifm_active |= IFM_100_TX; 1640 else 1641 ifmr->ifm_active |= IFM_10_T; 1642 1643 if (flags & FXP_PHY_USC_DUPLEX) 1644 ifmr->ifm_active |= IFM_FDX; 1645 } else { /* it's National. only know speed */ 1646 flags = fxp_mdi_read(sc, 1647 sc->phy_primary_addr, 1648 FXP_DP83840_PAR); 1649 1650 if (flags & FXP_DP83840_PAR_SPEED_10) 1651 ifmr->ifm_active |= IFM_10_T; 1652 else 1653 ifmr->ifm_active |= IFM_100_TX; 1654 } 1655 } 1656 } else { /* in manual mode.. just report what we were set to */ 1657 if (flags & FXP_PHY_BMCR_SPEED_100M) 1658 ifmr->ifm_active |= IFM_100_TX; 1659 else 1660 ifmr->ifm_active |= IFM_10_T; 1661 1662 if (flags & FXP_PHY_BMCR_FULLDUPLEX) 1663 ifmr->ifm_active |= IFM_FDX; 1664 } 1665 break; 1666 1667 case FXP_PHY_80C24: 1668 default: 1669 ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */ 1670 } 1671} 1672 1673/* 1674 * Add a buffer to the end of the RFA buffer list. 1675 * Return 0 if successful, 1 for failure. A failure results in 1676 * adding the 'oldm' (if non-NULL) on to the end of the list - 1677 * tossing out its old contents and recycling it. 1678 * The RFA struct is stuck at the beginning of mbuf cluster and the 1679 * data pointer is fixed up to point just past it. 1680 */ 1681static int 1682fxp_add_rfabuf(sc, oldm) 1683 struct fxp_softc *sc; 1684 struct mbuf *oldm; 1685{ 1686 u_int32_t v; 1687 struct mbuf *m; 1688 struct fxp_rfa *rfa, *p_rfa; 1689 1690 MGETHDR(m, M_DONTWAIT, MT_DATA); 1691 if (m != NULL) { 1692 MCLGET(m, M_DONTWAIT); 1693 if ((m->m_flags & M_EXT) == 0) { 1694 m_freem(m); 1695 if (oldm == NULL) 1696 return 1; 1697 m = oldm; 1698 m->m_data = m->m_ext.ext_buf; 1699 } 1700 } else { 1701 if (oldm == NULL) 1702 return 1; 1703 m = oldm; 1704 m->m_data = m->m_ext.ext_buf; 1705 } 1706 1707 /* 1708 * Move the data pointer up so that the incoming data packet 1709 * will be 32-bit aligned. 1710 */ 1711 m->m_data += RFA_ALIGNMENT_FUDGE; 1712 1713 /* 1714 * Get a pointer to the base of the mbuf cluster and move 1715 * data start past it. 1716 */ 1717 rfa = mtod(m, struct fxp_rfa *); 1718 m->m_data += sizeof(struct fxp_rfa); 1719 rfa->size = MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE; 1720 1721 /* 1722 * Initialize the rest of the RFA. Note that since the RFA 1723 * is misaligned, we cannot store values directly. Instead, 1724 * we use an optimized, inline copy. 1725 */ 1726 rfa->rfa_status = 0; 1727 rfa->rfa_control = FXP_RFA_CONTROL_EL; 1728 rfa->actual_size = 0; 1729 1730 v = -1; 1731 fxp_lwcopy(&v, &rfa->link_addr); 1732 fxp_lwcopy(&v, &rfa->rbd_addr); 1733 1734 /* 1735 * If there are other buffers already on the list, attach this 1736 * one to the end by fixing up the tail to point to this one. 1737 */ 1738 if (sc->rfa_headm != NULL) { 1739 p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf + 1740 RFA_ALIGNMENT_FUDGE); 1741 sc->rfa_tailm->m_next = m; 1742 v = vtophys(rfa); 1743 fxp_lwcopy(&v, &p_rfa->link_addr); 1744 p_rfa->rfa_control &= ~FXP_RFA_CONTROL_EL; 1745 } else { 1746 sc->rfa_headm = m; 1747 } 1748 sc->rfa_tailm = m; 1749 1750 return (m == oldm); 1751} 1752 1753static volatile int 1754fxp_mdi_read(sc, phy, reg) 1755 struct fxp_softc *sc; 1756 int phy; 1757 int reg; 1758{ 1759 int count = 10000; 1760 int value; 1761 1762 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1763 (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); 1764 1765 while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 1766 && count--) 1767 DELAY(10); 1768 1769 if (count <= 0) 1770 printf(FXP_FORMAT ": fxp_mdi_read: timed out\n", 1771 FXP_ARGS(sc)); 1772 1773 return (value & 0xffff); 1774} 1775 1776static void 1777fxp_mdi_write(sc, phy, reg, value) 1778 struct fxp_softc *sc; 1779 int phy; 1780 int reg; 1781 int value; 1782{ 1783 int count = 10000; 1784 1785 CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, 1786 (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | 1787 (value & 0xffff)); 1788 1789 while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && 1790 count--) 1791 DELAY(10); 1792 1793 if (count <= 0) 1794 printf(FXP_FORMAT ": fxp_mdi_write: timed out\n", 1795 FXP_ARGS(sc)); 1796} 1797 1798static int 1799fxp_ioctl(ifp, command, data) 1800 struct ifnet *ifp; 1801 FXP_IOCTLCMD_TYPE command; 1802 caddr_t data; 1803{ 1804 struct fxp_softc *sc = ifp->if_softc; 1805 struct ifreq *ifr = (struct ifreq *)data; 1806 int s, error = 0; 1807 1808 s = splimp(); 1809 1810 switch (command) { 1811 1812 case SIOCSIFADDR: 1813#if !defined(__NetBSD__) 1814 case SIOCGIFADDR: 1815 case SIOCSIFMTU: 1816#endif 1817 error = ether_ioctl(ifp, command, data); 1818 break; 1819 1820 case SIOCSIFFLAGS: 1821 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1822 1823 /* 1824 * If interface is marked up and not running, then start it. 1825 * If it is marked down and running, stop it. 1826 * XXX If it's up then re-initialize it. This is so flags 1827 * such as IFF_PROMISC are handled. 1828 */ 1829 if (ifp->if_flags & IFF_UP) { 1830 fxp_init(sc); 1831 } else { 1832 if (ifp->if_flags & IFF_RUNNING) 1833 fxp_stop(sc); 1834 } 1835 break; 1836 1837 case SIOCADDMULTI: 1838 case SIOCDELMULTI: 1839 sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; 1840#if defined(__NetBSD__) 1841 error = (command == SIOCADDMULTI) ? 1842 ether_addmulti(ifr, &sc->sc_ethercom) : 1843 ether_delmulti(ifr, &sc->sc_ethercom); 1844 1845 if (error == ENETRESET) { 1846 /* 1847 * Multicast list has changed; set the hardware 1848 * filter accordingly. 1849 */ 1850 if (!sc->all_mcasts) 1851 fxp_mc_setup(sc); 1852 /* 1853 * fxp_mc_setup() can turn on all_mcasts if we run 1854 * out of space, so check it again rather than else {}. 1855 */ 1856 if (sc->all_mcasts) 1857 fxp_init(sc); 1858 error = 0; 1859 } 1860#else /* __FreeBSD__ */ 1861 /* 1862 * Multicast list has changed; set the hardware filter 1863 * accordingly. 1864 */ 1865 if (!sc->all_mcasts) 1866 fxp_mc_setup(sc); 1867 /* 1868 * fxp_mc_setup() can turn on sc->all_mcasts, so check it 1869 * again rather than else {}. 1870 */ 1871 if (sc->all_mcasts) 1872 fxp_init(sc); 1873 error = 0; 1874#endif /* __NetBSD__ */ 1875 break; 1876 1877 case SIOCSIFMEDIA: 1878 case SIOCGIFMEDIA: 1879 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); 1880 break; 1881 1882 default: 1883 error = EINVAL; 1884 } 1885 (void) splx(s); 1886 return (error); 1887} 1888 1889/* 1890 * Program the multicast filter. 1891 * 1892 * We have an artificial restriction that the multicast setup command 1893 * must be the first command in the chain, so we take steps to ensure 1894 * this. By requiring this, it allows us to keep up the performance of 1895 * the pre-initialized command ring (esp. link pointers) by not actually 1896 * inserting the mcsetup command in the ring - i.e. its link pointer 1897 * points to the TxCB ring, but the mcsetup descriptor itself is not part 1898 * of it. We then can do 'CU_START' on the mcsetup descriptor and have it 1899 * lead into the regular TxCB ring when it completes. 1900 * 1901 * This function must be called at splimp. 1902 */ 1903static void 1904fxp_mc_setup(sc) 1905 struct fxp_softc *sc; 1906{ 1907 struct fxp_cb_mcs *mcsp = sc->mcsp; 1908 struct ifnet *ifp = &sc->sc_if; 1909 struct ifmultiaddr *ifma; 1910 int nmcasts; 1911 1912 /* 1913 * If there are queued commands, we must wait until they are all 1914 * completed. If we are already waiting, then add a NOP command 1915 * with interrupt option so that we're notified when all commands 1916 * have been completed - fxp_start() ensures that no additional 1917 * TX commands will be added when need_mcsetup is true. 1918 */ 1919 if (sc->tx_queued) { 1920 struct fxp_cb_tx *txp; 1921 1922 /* 1923 * need_mcsetup will be true if we are already waiting for the 1924 * NOP command to be completed (see below). In this case, bail. 1925 */ 1926 if (sc->need_mcsetup) 1927 return; 1928 sc->need_mcsetup = 1; 1929 1930 /* 1931 * Add a NOP command with interrupt so that we are notified when all 1932 * TX commands have been processed. 1933 */ 1934 txp = sc->cbl_last->next; 1935 txp->mb_head = NULL; 1936 txp->cb_status = 0; 1937 txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1938 /* 1939 * Advance the end of list forward. 1940 */ 1941 sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; 1942 sc->cbl_last = txp; 1943 sc->tx_queued++; 1944 /* 1945 * Issue a resume in case the CU has just suspended. 1946 */ 1947 fxp_scb_wait(sc); 1948 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); 1949 /* 1950 * Set a 5 second timer just in case we don't hear from the 1951 * card again. 1952 */ 1953 ifp->if_timer = 5; 1954 1955 return; 1956 } 1957 sc->need_mcsetup = 0; 1958 1959 /* 1960 * Initialize multicast setup descriptor. 1961 */ 1962 mcsp->next = sc->cbl_base; 1963 mcsp->mb_head = NULL; 1964 mcsp->cb_status = 0; 1965 mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; 1966 mcsp->link_addr = vtophys(&sc->cbl_base->cb_status); 1967 1968 nmcasts = 0; 1969 if (!sc->all_mcasts) { 1970 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 1971 ifma = ifma->ifma_link.le_next) { 1972 if (ifma->ifma_addr->sa_family != AF_LINK) 1973 continue; 1974 if (nmcasts >= MAXMCADDR) { 1975 sc->all_mcasts = 1; 1976 nmcasts = 0; 1977 break; 1978 } 1979 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 1980 (volatile void *) &sc->mcsp->mc_addr[nmcasts][0], 6); 1981 nmcasts++; 1982 } 1983 } 1984 mcsp->mc_cnt = nmcasts * 6; 1985 sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp; 1986 sc->tx_queued = 1; 1987 1988 /* 1989 * Wait until command unit is not active. This should never 1990 * be the case when nothing is queued, but make sure anyway. 1991 */ 1992 while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == 1993 FXP_SCB_CUS_ACTIVE) ; 1994 1995 /* 1996 * Start the multicast setup command. 1997 */ 1998 fxp_scb_wait(sc); 1999 CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status)); 2000 CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); 2001 2002 ifp->if_timer = 2; 2003 return; 2004} 2005