if_nge.c (192298) | if_nge.c (192506) |
---|---|
1/*- 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2000, 2001 4 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 18 unchanged lines hidden (view full) --- 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34#include <sys/cdefs.h> | 1/*- 2 * Copyright (c) 2001 Wind River Systems 3 * Copyright (c) 1997, 1998, 1999, 2000, 2001 4 * Bill Paul <wpaul@bsdi.com>. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: --- 18 unchanged lines hidden (view full) --- 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34#include <sys/cdefs.h> |
35__FBSDID("$FreeBSD: head/sys/dev/nge/if_nge.c 192298 2009-05-18 07:10:48Z yongari $"); | 35__FBSDID("$FreeBSD: head/sys/dev/nge/if_nge.c 192506 2009-05-21 02:12:10Z yongari $"); |
36 37/* 38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 39 * for FreeBSD. Datasheets are available from: 40 * 41 * http://www.national.com/ds/DP/DP83820.pdf 42 * http://www.national.com/ds/DP/DP83821.pdf 43 * --- 45 unchanged lines hidden (view full) --- 89 */ 90 91#ifdef HAVE_KERNEL_OPTION_HEADERS 92#include "opt_device_polling.h" 93#endif 94 95#include <sys/param.h> 96#include <sys/systm.h> | 36 37/* 38 * National Semiconductor DP83820/DP83821 gigabit ethernet driver 39 * for FreeBSD. Datasheets are available from: 40 * 41 * http://www.national.com/ds/DP/DP83820.pdf 42 * http://www.national.com/ds/DP/DP83821.pdf 43 * --- 45 unchanged lines hidden (view full) --- 89 */ 90 91#ifdef HAVE_KERNEL_OPTION_HEADERS 92#include "opt_device_polling.h" 93#endif 94 95#include <sys/param.h> 96#include <sys/systm.h> |
97#include <sys/sockio.h> 98#include <sys/mbuf.h> | 97#include <sys/bus.h> 98#include <sys/endian.h> 99#include <sys/kernel.h> 100#include <sys/lock.h> |
99#include <sys/malloc.h> | 101#include <sys/malloc.h> |
102#include <sys/mbuf.h> |
|
100#include <sys/module.h> | 103#include <sys/module.h> |
101#include <sys/kernel.h> | 104#include <sys/mutex.h> 105#include <sys/rman.h> |
102#include <sys/socket.h> | 106#include <sys/socket.h> |
107#include <sys/sockio.h> 108#include <sys/sysctl.h> |
|
103 | 109 |
110#include <net/bpf.h> |
|
104#include <net/if.h> 105#include <net/if_arp.h> 106#include <net/ethernet.h> 107#include <net/if_dl.h> 108#include <net/if_media.h> 109#include <net/if_types.h> 110#include <net/if_vlan_var.h> 111 | 111#include <net/if.h> 112#include <net/if_arp.h> 113#include <net/ethernet.h> 114#include <net/if_dl.h> 115#include <net/if_media.h> 116#include <net/if_types.h> 117#include <net/if_vlan_var.h> 118 |
112#include <net/bpf.h> 113 114#include <vm/vm.h> /* for vtophys */ 115#include <vm/pmap.h> /* for vtophys */ 116#include <machine/bus.h> 117#include <machine/resource.h> 118#include <sys/bus.h> 119#include <sys/rman.h> 120 | |
121#include <dev/mii/mii.h> 122#include <dev/mii/miivar.h> 123 124#include <dev/pci/pcireg.h> 125#include <dev/pci/pcivar.h> 126 | 119#include <dev/mii/mii.h> 120#include <dev/mii/miivar.h> 121 122#include <dev/pci/pcireg.h> 123#include <dev/pci/pcivar.h> 124 |
127#define NGE_USEIOSPACE | 125#include <machine/bus.h> |
128 129#include <dev/nge/if_ngereg.h> 130 | 126 127#include <dev/nge/if_ngereg.h> 128 |
129/* "device miibus" required. See GENERIC if you get errors here. */ 130#include "miibus_if.h" 131 |
|
131MODULE_DEPEND(nge, pci, 1, 1, 1); 132MODULE_DEPEND(nge, ether, 1, 1, 1); 133MODULE_DEPEND(nge, miibus, 1, 1, 1); 134 | 132MODULE_DEPEND(nge, pci, 1, 1, 1); 133MODULE_DEPEND(nge, ether, 1, 1, 1); 134MODULE_DEPEND(nge, miibus, 1, 1, 1); 135 |
135/* "device miibus" required. See GENERIC if you get errors here. */ 136#include "miibus_if.h" 137 | |
138#define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 139 140/* 141 * Various supported device vendors/types and their names. 142 */ 143static struct nge_type nge_devs[] = { 144 { NGE_VENDORID, NGE_DEVICEID, 145 "National Semiconductor Gigabit Ethernet" }, 146 { 0, 0, NULL } 147}; 148 149static int nge_probe(device_t); 150static int nge_attach(device_t); 151static int nge_detach(device_t); | 136#define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) 137 138/* 139 * Various supported device vendors/types and their names. 140 */ 141static struct nge_type nge_devs[] = { 142 { NGE_VENDORID, NGE_DEVICEID, 143 "National Semiconductor Gigabit Ethernet" }, 144 { 0, 0, NULL } 145}; 146 147static int nge_probe(device_t); 148static int nge_attach(device_t); 149static int nge_detach(device_t); |
150static int nge_shutdown(device_t); 151static int nge_suspend(device_t); 152static int nge_resume(device_t); |
|
152 | 153 |
153static int nge_newbuf(struct nge_softc *, struct nge_desc *, struct mbuf *); 154static int nge_encap(struct nge_softc *, struct mbuf *, uint32_t *); 155#ifdef NGE_FIXUP_RX 156static __inline void nge_fixup_rx (struct mbuf *); | 154static __inline void nge_discard_rxbuf(struct nge_softc *, int); 155static int nge_newbuf(struct nge_softc *, int); 156static int nge_encap(struct nge_softc *, struct mbuf **); 157#ifndef __NO_STRICT_ALIGNMENT 158static __inline void nge_fixup_rx(struct mbuf *); |
157#endif 158static void nge_rxeof(struct nge_softc *); 159static void nge_txeof(struct nge_softc *); 160static void nge_intr(void *); 161static void nge_tick(void *); | 159#endif 160static void nge_rxeof(struct nge_softc *); 161static void nge_txeof(struct nge_softc *); 162static void nge_intr(void *); 163static void nge_tick(void *); |
164static void nge_stats_update(struct nge_softc *); |
|
162static void nge_start(struct ifnet *); 163static void nge_start_locked(struct ifnet *); 164static int nge_ioctl(struct ifnet *, u_long, caddr_t); 165static void nge_init(void *); 166static void nge_init_locked(struct nge_softc *); | 165static void nge_start(struct ifnet *); 166static void nge_start_locked(struct ifnet *); 167static int nge_ioctl(struct ifnet *, u_long, caddr_t); 168static void nge_init(void *); 169static void nge_init_locked(struct nge_softc *); |
170static int nge_stop_mac(struct nge_softc *); |
|
167static void nge_stop(struct nge_softc *); | 171static void nge_stop(struct nge_softc *); |
168static void nge_watchdog(struct ifnet *); 169static int nge_shutdown(device_t); 170static int nge_ifmedia_upd(struct ifnet *); 171static void nge_ifmedia_upd_locked(struct ifnet *); 172static void nge_ifmedia_sts(struct ifnet *, struct ifmediareq *); | 172static void nge_wol(struct nge_softc *); 173static void nge_watchdog(struct nge_softc *); 174static int nge_mediachange(struct ifnet *); 175static void nge_mediastatus(struct ifnet *, struct ifmediareq *); |
173 174static void nge_delay(struct nge_softc *); 175static void nge_eeprom_idle(struct nge_softc *); 176static void nge_eeprom_putbyte(struct nge_softc *, int); 177static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *); | 176 177static void nge_delay(struct nge_softc *); 178static void nge_eeprom_idle(struct nge_softc *); 179static void nge_eeprom_putbyte(struct nge_softc *, int); 180static void nge_eeprom_getword(struct nge_softc *, int, uint16_t *); |
178static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int); | 181static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int); |
179 180static void nge_mii_sync(struct nge_softc *); 181static void nge_mii_send(struct nge_softc *, uint32_t, int); 182static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *); 183static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *); 184 185static int nge_miibus_readreg(device_t, int, int); 186static int nge_miibus_writereg(device_t, int, int, int); 187static void nge_miibus_statchg(device_t); 188 | 182 183static void nge_mii_sync(struct nge_softc *); 184static void nge_mii_send(struct nge_softc *, uint32_t, int); 185static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *); 186static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *); 187 188static int nge_miibus_readreg(device_t, int, int); 189static int nge_miibus_writereg(device_t, int, int, int); 190static void nge_miibus_statchg(device_t); 191 |
189static void nge_setmulti(struct nge_softc *); | 192static void nge_rxfilter(struct nge_softc *); |
190static void nge_reset(struct nge_softc *); | 193static void nge_reset(struct nge_softc *); |
194static void nge_dmamap_cb(void *, bus_dma_segment_t *, int, int); 195static int nge_dma_alloc(struct nge_softc *); 196static void nge_dma_free(struct nge_softc *); |
|
191static int nge_list_rx_init(struct nge_softc *); 192static int nge_list_tx_init(struct nge_softc *); | 197static int nge_list_rx_init(struct nge_softc *); 198static int nge_list_tx_init(struct nge_softc *); |
199static void nge_sysctl_node(struct nge_softc *); 200static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); 201static int sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS); |
|
193 | 202 |
194#ifdef NGE_USEIOSPACE 195#define NGE_RES SYS_RES_IOPORT 196#define NGE_RID NGE_PCI_LOIO 197#else 198#define NGE_RES SYS_RES_MEMORY 199#define NGE_RID NGE_PCI_LOMEM 200#endif 201 | |
202static device_method_t nge_methods[] = { 203 /* Device interface */ 204 DEVMETHOD(device_probe, nge_probe), 205 DEVMETHOD(device_attach, nge_attach), 206 DEVMETHOD(device_detach, nge_detach), 207 DEVMETHOD(device_shutdown, nge_shutdown), | 203static device_method_t nge_methods[] = { 204 /* Device interface */ 205 DEVMETHOD(device_probe, nge_probe), 206 DEVMETHOD(device_attach, nge_attach), 207 DEVMETHOD(device_detach, nge_detach), 208 DEVMETHOD(device_shutdown, nge_shutdown), |
209 DEVMETHOD(device_suspend, nge_suspend), 210 DEVMETHOD(device_resume, nge_resume), |
|
208 209 /* bus interface */ 210 DEVMETHOD(bus_print_child, bus_generic_print_child), 211 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 212 213 /* MII interface */ 214 DEVMETHOD(miibus_readreg, nge_miibus_readreg), 215 DEVMETHOD(miibus_writereg, nge_miibus_writereg), 216 DEVMETHOD(miibus_statchg, nge_miibus_statchg), 217 | 211 212 /* bus interface */ 213 DEVMETHOD(bus_print_child, bus_generic_print_child), 214 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 215 216 /* MII interface */ 217 DEVMETHOD(miibus_readreg, nge_miibus_readreg), 218 DEVMETHOD(miibus_writereg, nge_miibus_writereg), 219 DEVMETHOD(miibus_statchg, nge_miibus_statchg), 220 |
218 { 0, 0 } | 221 { NULL, NULL } |
219}; 220 221static driver_t nge_driver = { 222 "nge", 223 nge_methods, 224 sizeof(struct nge_softc) 225}; 226 --- 118 unchanged lines hidden (view full) --- 345 346 *dest = word; 347} 348 349/* 350 * Read a sequence of words from the EEPROM. 351 */ 352static void | 222}; 223 224static driver_t nge_driver = { 225 "nge", 226 nge_methods, 227 sizeof(struct nge_softc) 228}; 229 --- 118 unchanged lines hidden (view full) --- 348 349 *dest = word; 350} 351 352/* 353 * Read a sequence of words from the EEPROM. 354 */ 355static void |
353nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt, int swap) | 356nge_read_eeprom(struct nge_softc *sc, caddr_t dest, int off, int cnt) |
354{ 355 int i; 356 uint16_t word = 0, *ptr; 357 358 for (i = 0; i < cnt; i++) { 359 nge_eeprom_getword(sc, off + i, &word); 360 ptr = (uint16_t *)(dest + (i * 2)); | 357{ 358 int i; 359 uint16_t word = 0, *ptr; 360 361 for (i = 0; i < cnt; i++) { 362 nge_eeprom_getword(sc, off + i, &word); 363 ptr = (uint16_t *)(dest + (i * 2)); |
361 if (swap) 362 *ptr = ntohs(word); 363 else 364 *ptr = word; | 364 *ptr = word; |
365 } 366} 367 368/* 369 * Sync the PHYs by setting data bit and strobing the clock 32 times. 370 */ 371static void 372nge_mii_sync(struct nge_softc *sc) --- 162 unchanged lines hidden (view full) --- 535 return (0); 536} 537 538static int 539nge_miibus_readreg(device_t dev, int phy, int reg) 540{ 541 struct nge_softc *sc; 542 struct nge_mii_frame frame; | 365 } 366} 367 368/* 369 * Sync the PHYs by setting data bit and strobing the clock 32 times. 370 */ 371static void 372nge_mii_sync(struct nge_softc *sc) --- 162 unchanged lines hidden (view full) --- 535 return (0); 536} 537 538static int 539nge_miibus_readreg(device_t dev, int phy, int reg) 540{ 541 struct nge_softc *sc; 542 struct nge_mii_frame frame; |
543 int rv; |
|
543 544 sc = device_get_softc(dev); | 544 545 sc = device_get_softc(dev); |
546 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 547 /* Pretend PHY is at address 0. */ 548 if (phy != 0) 549 return (0); 550 switch (reg) { 551 case MII_BMCR: 552 reg = NGE_TBI_BMCR; 553 break; 554 case MII_BMSR: 555 /* 83820/83821 has different bit layout for BMSR. */ 556 rv = BMSR_ANEG | BMSR_EXTCAP | BMSR_EXTSTAT; 557 reg = CSR_READ_4(sc, NGE_TBI_BMSR); 558 if ((reg & NGE_TBIBMSR_ANEG_DONE) != 0) 559 rv |= BMSR_ACOMP; 560 if ((reg & NGE_TBIBMSR_LINKSTAT) != 0) 561 rv |= BMSR_LINK; 562 return (rv); 563 case MII_ANAR: 564 reg = NGE_TBI_ANAR; 565 break; 566 case MII_ANLPAR: 567 reg = NGE_TBI_ANLPAR; 568 break; 569 case MII_ANER: 570 reg = NGE_TBI_ANER; 571 break; 572 case MII_EXTSR: 573 reg = NGE_TBI_ESR; 574 break; 575 case MII_PHYIDR1: 576 case MII_PHYIDR2: 577 return (0); 578 default: 579 device_printf(sc->nge_dev, 580 "bad phy register read : %d\n", reg); 581 return (0); 582 } 583 return (CSR_READ_4(sc, reg)); 584 } |
|
545 546 bzero((char *)&frame, sizeof(frame)); 547 548 frame.mii_phyaddr = phy; 549 frame.mii_regaddr = reg; 550 nge_mii_readreg(sc, &frame); 551 552 return (frame.mii_data); 553} 554 555static int 556nge_miibus_writereg(device_t dev, int phy, int reg, int data) 557{ 558 struct nge_softc *sc; 559 struct nge_mii_frame frame; 560 561 sc = device_get_softc(dev); | 585 586 bzero((char *)&frame, sizeof(frame)); 587 588 frame.mii_phyaddr = phy; 589 frame.mii_regaddr = reg; 590 nge_mii_readreg(sc, &frame); 591 592 return (frame.mii_data); 593} 594 595static int 596nge_miibus_writereg(device_t dev, int phy, int reg, int data) 597{ 598 struct nge_softc *sc; 599 struct nge_mii_frame frame; 600 601 sc = device_get_softc(dev); |
602 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) { 603 /* Pretend PHY is at address 0. */ 604 if (phy != 0) 605 return (0); 606 switch (reg) { 607 case MII_BMCR: 608 reg = NGE_TBI_BMCR; 609 break; 610 case MII_BMSR: 611 return (0); 612 case MII_ANAR: 613 reg = NGE_TBI_ANAR; 614 break; 615 case MII_ANLPAR: 616 reg = NGE_TBI_ANLPAR; 617 break; 618 case MII_ANER: 619 reg = NGE_TBI_ANER; 620 break; 621 case MII_EXTSR: 622 reg = NGE_TBI_ESR; 623 break; 624 case MII_PHYIDR1: 625 case MII_PHYIDR2: 626 return (0); 627 default: 628 device_printf(sc->nge_dev, 629 "bad phy register write : %d\n", reg); 630 return (0); 631 } 632 CSR_WRITE_4(sc, reg, data); 633 return (0); 634 } |
|
562 563 bzero((char *)&frame, sizeof(frame)); 564 565 frame.mii_phyaddr = phy; 566 frame.mii_regaddr = reg; 567 frame.mii_data = data; 568 nge_mii_writereg(sc, &frame); 569 570 return (0); 571} 572 | 635 636 bzero((char *)&frame, sizeof(frame)); 637 638 frame.mii_phyaddr = phy; 639 frame.mii_regaddr = reg; 640 frame.mii_data = data; 641 nge_mii_writereg(sc, &frame); 642 643 return (0); 644} 645 |
646/* 647 * media status/link state change handler. 648 */ |
|
573static void 574nge_miibus_statchg(device_t dev) 575{ | 649static void 650nge_miibus_statchg(device_t dev) 651{ |
576 int status; | |
577 struct nge_softc *sc; 578 struct mii_data *mii; | 652 struct nge_softc *sc; 653 struct mii_data *mii; |
654 struct ifnet *ifp; 655 struct nge_txdesc *txd; 656 uint32_t done, reg, status; 657 int i; |
|
579 580 sc = device_get_softc(dev); | 658 659 sc = device_get_softc(dev); |
581 if (sc->nge_tbi) { 582 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) 583 == IFM_AUTO) { 584 status = CSR_READ_4(sc, NGE_TBI_ANLPAR); 585 if (status == 0 || status & NGE_TBIANAR_FDX) { 586 NGE_SETBIT(sc, NGE_TX_CFG, 587 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 588 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 589 } else { 590 NGE_CLRBIT(sc, NGE_TX_CFG, 591 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 592 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 593 } | 660 NGE_LOCK_ASSERT(sc); |
594 | 661 |
595 } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) 596 != IFM_FDX) { 597 NGE_CLRBIT(sc, NGE_TX_CFG, 598 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 599 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 600 } else { 601 NGE_SETBIT(sc, NGE_TX_CFG, 602 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 603 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); | 662 mii = device_get_softc(sc->nge_miibus); 663 ifp = sc->nge_ifp; 664 if (mii == NULL || ifp == NULL || 665 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 666 return; 667 668 sc->nge_flags &= ~NGE_FLAG_LINK; 669 if ((mii->mii_media_status & (IFM_AVALID | IFM_ACTIVE)) == 670 (IFM_AVALID | IFM_ACTIVE)) { 671 switch (IFM_SUBTYPE(mii->mii_media_active)) { 672 case IFM_10_T: 673 case IFM_100_TX: 674 case IFM_1000_T: 675 case IFM_1000_SX: 676 case IFM_1000_LX: 677 case IFM_1000_CX: 678 sc->nge_flags |= NGE_FLAG_LINK; 679 break; 680 default: 681 break; |
604 } | 682 } |
605 } else { 606 mii = device_get_softc(sc->nge_miibus); | 683 } |
607 | 684 |
608 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 609 NGE_SETBIT(sc, NGE_TX_CFG, 610 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); | 685 /* Stop Tx/Rx MACs. */ 686 if (nge_stop_mac(sc) == ETIMEDOUT) 687 device_printf(sc->nge_dev, 688 "%s: unable to stop Tx/Rx MAC\n", __func__); 689 nge_txeof(sc); 690 nge_rxeof(sc); 691 if (sc->nge_head != NULL) { 692 m_freem(sc->nge_head); 693 sc->nge_head = sc->nge_tail = NULL; 694 } 695 696 /* Release queued frames. */ 697 for (i = 0; i < NGE_TX_RING_CNT; i++) { 698 txd = &sc->nge_cdata.nge_txdesc[i]; 699 if (txd->tx_m != NULL) { 700 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 701 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 702 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 703 txd->tx_dmamap); 704 m_freem(txd->tx_m); 705 txd->tx_m = NULL; 706 } 707 } 708 709 /* Program MAC with resolved speed/duplex. */ 710 if ((sc->nge_flags & NGE_FLAG_LINK) != 0) { 711 if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { 712 NGE_SETBIT(sc, NGE_TX_CFG, 713 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); |
611 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); | 714 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); |
715#ifdef notyet 716 /* Enable flow-control. */ 717 if ((IFM_OPTIONS(mii->mii_media_active) & 718 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE)) != 0) 719 NGE_SETBIT(sc, NGE_PAUSECSR, 720 NGE_PAUSECSR_PAUSE_ENB); 721#endif |
|
612 } else { 613 NGE_CLRBIT(sc, NGE_TX_CFG, | 722 } else { 723 NGE_CLRBIT(sc, NGE_TX_CFG, |
614 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); | 724 (NGE_TXCFG_IGN_HBEAT | NGE_TXCFG_IGN_CARR)); |
615 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); | 725 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); |
726 NGE_CLRBIT(sc, NGE_PAUSECSR, NGE_PAUSECSR_PAUSE_ENB); |
|
616 } | 727 } |
617 | |
618 /* If we have a 1000Mbps link, set the mode_1000 bit. */ | 728 /* If we have a 1000Mbps link, set the mode_1000 bit. */ |
619 if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || 620 IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { 621 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); 622 } else { 623 NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); | 729 reg = CSR_READ_4(sc, NGE_CFG); 730 switch (IFM_SUBTYPE(mii->mii_media_active)) { 731 case IFM_1000_SX: 732 case IFM_1000_LX: 733 case IFM_1000_CX: 734 case IFM_1000_T: 735 reg |= NGE_CFG_MODE_1000; 736 break; 737 default: 738 reg &= ~NGE_CFG_MODE_1000; 739 break; |
624 } | 740 } |
741 CSR_WRITE_4(sc, NGE_CFG, reg); 742 743 /* Reset Tx/Rx MAC. */ 744 reg = CSR_READ_4(sc, NGE_CSR); 745 reg |= NGE_CSR_TX_RESET | NGE_CSR_RX_RESET; 746 CSR_WRITE_4(sc, NGE_CSR, reg); 747 /* Check the completion of reset. */ 748 done = 0; 749 for (i = 0; i < NGE_TIMEOUT; i++) { 750 DELAY(1); 751 status = CSR_READ_4(sc, NGE_ISR); 752 if ((status & NGE_ISR_RX_RESET_DONE) != 0) 753 done |= NGE_ISR_RX_RESET_DONE; 754 if ((status & NGE_ISR_TX_RESET_DONE) != 0) 755 done |= NGE_ISR_TX_RESET_DONE; 756 if (done == 757 (NGE_ISR_TX_RESET_DONE | NGE_ISR_RX_RESET_DONE)) 758 break; 759 } 760 if (i == NGE_TIMEOUT) 761 device_printf(sc->nge_dev, 762 "%s: unable to reset Tx/Rx MAC\n", __func__); 763 /* Reuse Rx buffer and reset consumer pointer. */ 764 sc->nge_cdata.nge_rx_cons = 0; 765 /* 766 * It seems that resetting Rx/Tx MAC results in 767 * resetting Tx/Rx descriptor pointer registers such 768 * that reloading Tx/Rx lists address are needed. 769 */ 770 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 771 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 772 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 773 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 774 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 775 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 776 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 777 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); 778 /* Reinitialize Tx buffers. */ 779 nge_list_tx_init(sc); 780 781 /* Restart Rx MAC. */ 782 reg = CSR_READ_4(sc, NGE_CSR); 783 reg |= NGE_CSR_RX_ENABLE; 784 CSR_WRITE_4(sc, NGE_CSR, reg); 785 for (i = 0; i < NGE_TIMEOUT; i++) { 786 if ((CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RX_ENABLE) != 0) 787 break; 788 DELAY(1); 789 } 790 if (i == NGE_TIMEOUT) 791 device_printf(sc->nge_dev, 792 "%s: unable to restart Rx MAC\n", __func__); |
|
625 } | 793 } |
794 795 /* Data LED off for TBI mode */ 796 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 797 CSR_WRITE_4(sc, NGE_GPIO, 798 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); |
|
626} 627 628static void | 799} 800 801static void |
629nge_setmulti(struct nge_softc *sc) | 802nge_rxfilter(struct nge_softc *sc) |
630{ 631 struct ifnet *ifp; 632 struct ifmultiaddr *ifma; | 803{ 804 struct ifnet *ifp; 805 struct ifmultiaddr *ifma; |
633 uint32_t h = 0, i, filtsave; | 806 uint32_t h, i, rxfilt; |
634 int bit, index; 635 636 NGE_LOCK_ASSERT(sc); 637 ifp = sc->nge_ifp; 638 | 807 int bit, index; 808 809 NGE_LOCK_ASSERT(sc); 810 ifp = sc->nge_ifp; 811 |
639 if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { 640 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 641 NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH); 642 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); 643 return; | 812 /* Make sure to stop Rx filtering. */ 813 rxfilt = CSR_READ_4(sc, NGE_RXFILT_CTL); 814 rxfilt &= ~NGE_RXFILTCTL_ENABLE; 815 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 816 CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL); 817 818 rxfilt &= ~(NGE_RXFILTCTL_ALLMULTI | NGE_RXFILTCTL_ALLPHYS); 819 rxfilt &= ~NGE_RXFILTCTL_BROAD; 820 /* 821 * We don't want to use the hash table for matching unicast 822 * addresses. 823 */ 824 rxfilt &= ~(NGE_RXFILTCTL_MCHASH | NGE_RXFILTCTL_UCHASH); 825 826 /* 827 * For the NatSemi chip, we have to explicitly enable the 828 * reception of ARP frames, as well as turn on the 'perfect 829 * match' filter where we store the station address, otherwise 830 * we won't receive unicasts meant for this host. 831 */ 832 rxfilt |= NGE_RXFILTCTL_ARP | NGE_RXFILTCTL_PERFECT; 833 834 /* 835 * Set the capture broadcast bit to capture broadcast frames. 836 */ 837 if ((ifp->if_flags & IFF_BROADCAST) != 0) 838 rxfilt |= NGE_RXFILTCTL_BROAD; 839 840 if ((ifp->if_flags & IFF_PROMISC) != 0 || 841 (ifp->if_flags & IFF_ALLMULTI) != 0) { 842 rxfilt |= NGE_RXFILTCTL_ALLMULTI; 843 if ((ifp->if_flags & IFF_PROMISC) != 0) 844 rxfilt |= NGE_RXFILTCTL_ALLPHYS; 845 goto done; |
644 } 645 646 /* 647 * We have to explicitly enable the multicast hash table 648 * on the NatSemi chip if we want to use it, which we do. | 846 } 847 848 /* 849 * We have to explicitly enable the multicast hash table 850 * on the NatSemi chip if we want to use it, which we do. |
649 * We also have to tell it that we don't want to use the 650 * hash table for matching unicast addresses. | |
651 */ | 851 */ |
652 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH); 653 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 654 NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH); | 852 rxfilt |= NGE_RXFILTCTL_MCHASH; |
655 | 853 |
656 filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL); 657 | |
658 /* first, zot all the existing hash bits */ 659 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 660 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 661 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 662 } 663 664 /* 665 * From the 11 bits returned by the crc routine, the top 7 --- 10 unchanged lines hidden (view full) --- 676 index = (h >> 4) & 0x7F; 677 bit = h & 0xF; 678 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 679 NGE_FILTADDR_MCAST_LO + (index * 2)); 680 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 681 } 682 IF_ADDR_UNLOCK(ifp); 683 | 854 /* first, zot all the existing hash bits */ 855 for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { 856 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); 857 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); 858 } 859 860 /* 861 * From the 11 bits returned by the crc routine, the top 7 --- 10 unchanged lines hidden (view full) --- 872 index = (h >> 4) & 0x7F; 873 bit = h & 0xF; 874 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 875 NGE_FILTADDR_MCAST_LO + (index * 2)); 876 NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); 877 } 878 IF_ADDR_UNLOCK(ifp); 879 |
684 CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave); | 880done: 881 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 882 /* Turn the receive filter on. */ 883 rxfilt |= NGE_RXFILTCTL_ENABLE; 884 CSR_WRITE_4(sc, NGE_RXFILT_CTL, rxfilt); 885 CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL); |
685} 686 687static void 688nge_reset(struct nge_softc *sc) 689{ | 886} 887 888static void 889nge_reset(struct nge_softc *sc) 890{ |
891 uint32_t v; |
|
690 int i; 691 692 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 693 694 for (i = 0; i < NGE_TIMEOUT; i++) { 695 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 696 break; | 892 int i; 893 894 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); 895 896 for (i = 0; i < NGE_TIMEOUT; i++) { 897 if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) 898 break; |
899 DELAY(1); |
|
697 } 698 699 if (i == NGE_TIMEOUT) 700 device_printf(sc->nge_dev, "reset never completed\n"); 701 702 /* Wait a little while for the chip to get its brains in order. */ 703 DELAY(1000); 704 705 /* 706 * If this is a NetSemi chip, make sure to clear 707 * PME mode. 708 */ 709 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 710 CSR_WRITE_4(sc, NGE_CLKRUN, 0); | 900 } 901 902 if (i == NGE_TIMEOUT) 903 device_printf(sc->nge_dev, "reset never completed\n"); 904 905 /* Wait a little while for the chip to get its brains in order. */ 906 DELAY(1000); 907 908 /* 909 * If this is a NetSemi chip, make sure to clear 910 * PME mode. 911 */ 912 CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); 913 CSR_WRITE_4(sc, NGE_CLKRUN, 0); |
914 915 /* Clear WOL events which may interfere normal Rx filter opertaion. */ 916 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 917 918 /* 919 * Only DP83820 supports 64bits addressing/data transfers and 920 * 64bit addressing requires different descriptor structures. 921 * To make it simple, disable 64bit addressing/data transfers. 922 */ 923 v = CSR_READ_4(sc, NGE_CFG); 924 v &= ~(NGE_CFG_64BIT_ADDR_ENB | NGE_CFG_64BIT_DATA_ENB); 925 CSR_WRITE_4(sc, NGE_CFG, v); |
|
711} 712 713/* 714 * Probe for a NatSemi chip. Check the PCI vendor and device 715 * IDs against our list and return a device name if we find a match. 716 */ 717static int 718nge_probe(device_t dev) --- 16 unchanged lines hidden (view full) --- 735 736/* 737 * Attach the interface. Allocate softc structures, do ifmedia 738 * setup and ethernet/BPF attach. 739 */ 740static int 741nge_attach(device_t dev) 742{ | 926} 927 928/* 929 * Probe for a NatSemi chip. Check the PCI vendor and device 930 * IDs against our list and return a device name if we find a match. 931 */ 932static int 933nge_probe(device_t dev) --- 16 unchanged lines hidden (view full) --- 950 951/* 952 * Attach the interface. Allocate softc structures, do ifmedia 953 * setup and ethernet/BPF attach. 954 */ 955static int 956nge_attach(device_t dev) 957{ |
743 u_char eaddr[ETHER_ADDR_LEN]; | 958 uint8_t eaddr[ETHER_ADDR_LEN]; 959 uint16_t ea[ETHER_ADDR_LEN/2], ea_temp, reg; |
744 struct nge_softc *sc; | 960 struct nge_softc *sc; |
745 struct ifnet *ifp = NULL; 746 int error = 0, rid; | 961 struct ifnet *ifp; 962 int error, i, rid; |
747 | 963 |
964 error = 0; |
|
748 sc = device_get_softc(dev); 749 sc->nge_dev = dev; 750 751 NGE_LOCK_INIT(sc, device_get_nameunit(dev)); 752 callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0); 753 754 /* 755 * Map control/status registers. 756 */ 757 pci_enable_busmaster(dev); 758 | 965 sc = device_get_softc(dev); 966 sc->nge_dev = dev; 967 968 NGE_LOCK_INIT(sc, device_get_nameunit(dev)); 969 callout_init_mtx(&sc->nge_stat_ch, &sc->nge_mtx, 0); 970 971 /* 972 * Map control/status registers. 973 */ 974 pci_enable_busmaster(dev); 975 |
759 rid = NGE_RID; 760 sc->nge_res = bus_alloc_resource_any(dev, NGE_RES, &rid, RF_ACTIVE); | 976#ifdef NGE_USEIOSPACE 977 sc->nge_res_type = SYS_RES_IOPORT; 978 sc->nge_res_id = PCIR_BAR(0); 979#else 980 sc->nge_res_type = SYS_RES_MEMORY; 981 sc->nge_res_id = PCIR_BAR(1); 982#endif 983 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 984 &sc->nge_res_id, RF_ACTIVE); |
761 762 if (sc->nge_res == NULL) { | 985 986 if (sc->nge_res == NULL) { |
763 device_printf(dev, "couldn't map ports/memory\n"); 764 error = ENXIO; 765 goto fail; | 987 if (sc->nge_res_type == SYS_RES_MEMORY) { 988 sc->nge_res_type = SYS_RES_IOPORT; 989 sc->nge_res_id = PCIR_BAR(0); 990 } else { 991 sc->nge_res_type = SYS_RES_MEMORY; 992 sc->nge_res_id = PCIR_BAR(1); 993 } 994 sc->nge_res = bus_alloc_resource_any(dev, sc->nge_res_type, 995 &sc->nge_res_id, RF_ACTIVE); 996 if (sc->nge_res == NULL) { 997 device_printf(dev, "couldn't allocate %s resources\n", 998 sc->nge_res_type == SYS_RES_MEMORY ? "memory" : 999 "I/O"); 1000 NGE_LOCK_DESTROY(sc); 1001 return (ENXIO); 1002 } |
766 } 767 | 1003 } 1004 |
768 sc->nge_btag = rman_get_bustag(sc->nge_res); 769 sc->nge_bhandle = rman_get_bushandle(sc->nge_res); 770 | |
771 /* Allocate interrupt */ 772 rid = 0; 773 sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 774 RF_SHAREABLE | RF_ACTIVE); 775 776 if (sc->nge_irq == NULL) { 777 device_printf(dev, "couldn't map interrupt\n"); 778 error = ENXIO; 779 goto fail; 780 } 781 | 1005 /* Allocate interrupt */ 1006 rid = 0; 1007 sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1008 RF_SHAREABLE | RF_ACTIVE); 1009 1010 if (sc->nge_irq == NULL) { 1011 device_printf(dev, "couldn't map interrupt\n"); 1012 error = ENXIO; 1013 goto fail; 1014 } 1015 |
1016 /* Enable MWI. */ 1017 reg = pci_read_config(dev, PCIR_COMMAND, 2); 1018 reg |= PCIM_CMD_MWRICEN; 1019 pci_write_config(dev, PCIR_COMMAND, reg, 2); 1020 |
|
782 /* Reset the adapter. */ 783 nge_reset(sc); 784 785 /* 786 * Get station address from the EEPROM. 787 */ | 1021 /* Reset the adapter. */ 1022 nge_reset(sc); 1023 1024 /* 1025 * Get station address from the EEPROM. 1026 */ |
788 nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0); 789 nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0); 790 nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0); | 1027 nge_read_eeprom(sc, (caddr_t)ea, NGE_EE_NODEADDR, 3); 1028 for (i = 0; i < ETHER_ADDR_LEN / 2; i++) 1029 ea[i] = le16toh(ea[i]); 1030 ea_temp = ea[0]; 1031 ea[0] = ea[2]; 1032 ea[2] = ea_temp; 1033 bcopy(ea, eaddr, sizeof(eaddr)); |
791 | 1034 |
792 sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF, 793 M_NOWAIT|M_ZERO, 0, 0xffffffff, PAGE_SIZE, 0); 794 795 if (sc->nge_ldata == NULL) { 796 device_printf(dev, "no memory for list buffers!\n"); | 1035 if (nge_dma_alloc(sc) != 0) { |
797 error = ENXIO; 798 goto fail; 799 } 800 | 1036 error = ENXIO; 1037 goto fail; 1038 } 1039 |
1040 nge_sysctl_node(sc); 1041 |
|
801 ifp = sc->nge_ifp = if_alloc(IFT_ETHER); 802 if (ifp == NULL) { | 1042 ifp = sc->nge_ifp = if_alloc(IFT_ETHER); 1043 if (ifp == NULL) { |
803 device_printf(dev, "can not if_alloc()\n"); | 1044 device_printf(dev, "can not allocate ifnet structure\n"); |
804 error = ENOSPC; 805 goto fail; 806 } 807 ifp->if_softc = sc; 808 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); | 1045 error = ENOSPC; 1046 goto fail; 1047 } 1048 ifp->if_softc = sc; 1049 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
809 ifp->if_mtu = ETHERMTU; | |
810 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 811 ifp->if_ioctl = nge_ioctl; 812 ifp->if_start = nge_start; | 1050 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1051 ifp->if_ioctl = nge_ioctl; 1052 ifp->if_start = nge_start; |
813 ifp->if_watchdog = nge_watchdog; | |
814 ifp->if_init = nge_init; | 1053 ifp->if_init = nge_init; |
815 ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1; | 1054 ifp->if_snd.ifq_drv_maxlen = NGE_TX_RING_CNT - 1; 1055 IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); 1056 IFQ_SET_READY(&ifp->if_snd); |
816 ifp->if_hwassist = NGE_CSUM_FEATURES; | 1057 ifp->if_hwassist = NGE_CSUM_FEATURES; |
817 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING; | 1058 ifp->if_capabilities = IFCAP_HWCSUM; 1059 /* 1060 * It seems that some hardwares doesn't provide 3.3V auxiliary 1061 * supply(3VAUX) to drive PME such that checking PCI power 1062 * management capability is necessary. 1063 */ 1064 if (pci_find_extcap(sc->nge_dev, PCIY_PMG, &i) == 0) 1065 ifp->if_capabilities |= IFCAP_WOL; |
818 ifp->if_capenable = ifp->if_capabilities; | 1066 ifp->if_capenable = ifp->if_capabilities; |
819#ifdef DEVICE_POLLING 820 ifp->if_capabilities |= IFCAP_POLLING; 821#endif | |
822 | 1067 |
1068 if ((CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) != 0) { 1069 sc->nge_flags |= NGE_FLAG_TBI; 1070 device_printf(dev, "Using TBI\n"); 1071 /* Configure GPIO. */ 1072 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1073 | NGE_GPIO_GP4_OUT 1074 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 1075 | NGE_GPIO_GP3_OUTENB 1076 | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN); 1077 } 1078 |
|
823 /* 824 * Do MII setup. 825 */ | 1079 /* 1080 * Do MII setup. 1081 */ |
826 /* XXX: leaked on error */ 827 if (mii_phy_probe(dev, &sc->nge_miibus, 828 nge_ifmedia_upd, nge_ifmedia_sts)) { 829 if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) { 830 sc->nge_tbi = 1; 831 device_printf(dev, "Using TBI\n"); 832 833 sc->nge_miibus = dev; 834 835 ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_upd, 836 nge_ifmedia_sts); 837#define ADD(m, c) ifmedia_add(&sc->nge_ifmedia, (m), (c), NULL) 838 ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, 0), 0); 839 ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0, 0), 0); 840 ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, 0),0); 841 ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 0); 842#undef ADD 843 device_printf(dev, " 1000baseSX, 1000baseSX-FDX, auto\n"); 844 845 ifmedia_set(&sc->nge_ifmedia, 846 IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0)); 847 848 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 849 | NGE_GPIO_GP4_OUT 850 | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB 851 | NGE_GPIO_GP3_OUTENB 852 | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN); 853 854 } else { 855 device_printf(dev, "MII without any PHY!\n"); 856 error = ENXIO; 857 goto fail; 858 } | 1082 error = mii_phy_probe(dev, &sc->nge_miibus, nge_mediachange, 1083 nge_mediastatus); 1084 if (error != 0) { 1085 device_printf(dev, "no PHY found!\n"); 1086 goto fail; |
859 } 860 861 /* 862 * Call MI attach routine. 863 */ 864 ether_ifattach(ifp, eaddr); 865 | 1087 } 1088 1089 /* 1090 * Call MI attach routine. 1091 */ 1092 ether_ifattach(ifp, eaddr); 1093 |
1094 /* VLAN capability setup. */ 1095 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 1096 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1097 ifp->if_capenable = ifp->if_capabilities; 1098#ifdef DEVICE_POLLING 1099 ifp->if_capabilities |= IFCAP_POLLING; 1100#endif |
|
866 /* | 1101 /* |
1102 * Tell the upper layer(s) we support long frames. 1103 * Must appear after the call to ether_ifattach() because 1104 * ether_ifattach() sets ifi_hdrlen to the default value. 1105 */ 1106 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 1107 1108 /* |
|
867 * Hookup IRQ last. 868 */ 869 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE, 870 NULL, nge_intr, sc, &sc->nge_intrhand); 871 if (error) { 872 device_printf(dev, "couldn't set up irq\n"); 873 goto fail; 874 } 875 | 1109 * Hookup IRQ last. 1110 */ 1111 error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE, 1112 NULL, nge_intr, sc, &sc->nge_intrhand); 1113 if (error) { 1114 device_printf(dev, "couldn't set up irq\n"); 1115 goto fail; 1116 } 1117 |
876 return (0); 877 | |
878fail: | 1118fail: |
879 if (sc->nge_ldata) 880 contigfree(sc->nge_ldata, 881 sizeof(struct nge_list_data), M_DEVBUF); 882 if (ifp) 883 if_free(ifp); 884 if (sc->nge_irq) 885 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 886 if (sc->nge_res) 887 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); 888 NGE_LOCK_DESTROY(sc); | 1119 if (error != 0) 1120 nge_detach(dev); |
889 return (error); 890} 891 892static int 893nge_detach(device_t dev) 894{ 895 struct nge_softc *sc; 896 struct ifnet *ifp; 897 898 sc = device_get_softc(dev); 899 ifp = sc->nge_ifp; 900 901#ifdef DEVICE_POLLING | 1121 return (error); 1122} 1123 1124static int 1125nge_detach(device_t dev) 1126{ 1127 struct nge_softc *sc; 1128 struct ifnet *ifp; 1129 1130 sc = device_get_softc(dev); 1131 ifp = sc->nge_ifp; 1132 1133#ifdef DEVICE_POLLING |
902 if (ifp->if_capenable & IFCAP_POLLING) | 1134 if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) |
903 ether_poll_deregister(ifp); 904#endif | 1135 ether_poll_deregister(ifp); 1136#endif |
905 NGE_LOCK(sc); 906 nge_reset(sc); 907 nge_stop(sc); 908 NGE_UNLOCK(sc); 909 callout_drain(&sc->nge_stat_ch); 910 ether_ifdetach(ifp); | |
911 | 1137 |
912 bus_generic_detach(dev); 913 if (!sc->nge_tbi) { | 1138 if (device_is_attached(dev)) { 1139 NGE_LOCK(sc); 1140 sc->nge_flags |= NGE_FLAG_DETACH; 1141 nge_stop(sc); 1142 NGE_UNLOCK(sc); 1143 callout_drain(&sc->nge_stat_ch); 1144 if (ifp != NULL) 1145 ether_ifdetach(ifp); 1146 } 1147 1148 if (sc->nge_miibus != NULL) { |
914 device_delete_child(dev, sc->nge_miibus); | 1149 device_delete_child(dev, sc->nge_miibus); |
1150 sc->nge_miibus = NULL; |
|
915 } | 1151 } |
916 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 917 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 918 bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); | 1152 bus_generic_detach(dev); 1153 if (sc->nge_intrhand != NULL) 1154 bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); 1155 if (sc->nge_irq != NULL) 1156 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); 1157 if (sc->nge_res != NULL) 1158 bus_release_resource(dev, sc->nge_res_type, sc->nge_res_id, 1159 sc->nge_res); |
919 | 1160 |
920 contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF); 921 if_free(ifp); | 1161 nge_dma_free(sc); 1162 if (ifp != NULL) 1163 if_free(ifp); |
922 923 NGE_LOCK_DESTROY(sc); 924 925 return (0); 926} 927 | 1164 1165 NGE_LOCK_DESTROY(sc); 1166 1167 return (0); 1168} 1169 |
1170struct nge_dmamap_arg { 1171 bus_addr_t nge_busaddr; 1172}; 1173 1174static void 1175nge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1176{ 1177 struct nge_dmamap_arg *ctx; 1178 1179 if (error != 0) 1180 return; 1181 ctx = arg; 1182 ctx->nge_busaddr = segs[0].ds_addr; 1183} 1184 1185static int 1186nge_dma_alloc(struct nge_softc *sc) 1187{ 1188 struct nge_dmamap_arg ctx; 1189 struct nge_txdesc *txd; 1190 struct nge_rxdesc *rxd; 1191 int error, i; 1192 1193 /* Create parent DMA tag. */ 1194 error = bus_dma_tag_create( 1195 bus_get_dma_tag(sc->nge_dev), /* parent */ 1196 1, 0, /* alignment, boundary */ 1197 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ 1198 BUS_SPACE_MAXADDR, /* highaddr */ 1199 NULL, NULL, /* filter, filterarg */ 1200 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 1201 0, /* nsegments */ 1202 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 1203 0, /* flags */ 1204 NULL, NULL, /* lockfunc, lockarg */ 1205 &sc->nge_cdata.nge_parent_tag); 1206 if (error != 0) { 1207 device_printf(sc->nge_dev, "failed to create parent DMA tag\n"); 1208 goto fail; 1209 } 1210 /* Create tag for Tx ring. */ 1211 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1212 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1213 BUS_SPACE_MAXADDR, /* lowaddr */ 1214 BUS_SPACE_MAXADDR, /* highaddr */ 1215 NULL, NULL, /* filter, filterarg */ 1216 NGE_TX_RING_SIZE, /* maxsize */ 1217 1, /* nsegments */ 1218 NGE_TX_RING_SIZE, /* maxsegsize */ 1219 0, /* flags */ 1220 NULL, NULL, /* lockfunc, lockarg */ 1221 &sc->nge_cdata.nge_tx_ring_tag); 1222 if (error != 0) { 1223 device_printf(sc->nge_dev, "failed to create Tx ring DMA tag\n"); 1224 goto fail; 1225 } 1226 1227 /* Create tag for Rx ring. */ 1228 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1229 NGE_RING_ALIGN, 0, /* alignment, boundary */ 1230 BUS_SPACE_MAXADDR, /* lowaddr */ 1231 BUS_SPACE_MAXADDR, /* highaddr */ 1232 NULL, NULL, /* filter, filterarg */ 1233 NGE_RX_RING_SIZE, /* maxsize */ 1234 1, /* nsegments */ 1235 NGE_RX_RING_SIZE, /* maxsegsize */ 1236 0, /* flags */ 1237 NULL, NULL, /* lockfunc, lockarg */ 1238 &sc->nge_cdata.nge_rx_ring_tag); 1239 if (error != 0) { 1240 device_printf(sc->nge_dev, 1241 "failed to create Rx ring DMA tag\n"); 1242 goto fail; 1243 } 1244 1245 /* Create tag for Tx buffers. */ 1246 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1247 1, 0, /* alignment, boundary */ 1248 BUS_SPACE_MAXADDR, /* lowaddr */ 1249 BUS_SPACE_MAXADDR, /* highaddr */ 1250 NULL, NULL, /* filter, filterarg */ 1251 MCLBYTES * NGE_MAXTXSEGS, /* maxsize */ 1252 NGE_MAXTXSEGS, /* nsegments */ 1253 MCLBYTES, /* maxsegsize */ 1254 0, /* flags */ 1255 NULL, NULL, /* lockfunc, lockarg */ 1256 &sc->nge_cdata.nge_tx_tag); 1257 if (error != 0) { 1258 device_printf(sc->nge_dev, "failed to create Tx DMA tag\n"); 1259 goto fail; 1260 } 1261 1262 /* Create tag for Rx buffers. */ 1263 error = bus_dma_tag_create(sc->nge_cdata.nge_parent_tag,/* parent */ 1264 NGE_RX_ALIGN, 0, /* alignment, boundary */ 1265 BUS_SPACE_MAXADDR, /* lowaddr */ 1266 BUS_SPACE_MAXADDR, /* highaddr */ 1267 NULL, NULL, /* filter, filterarg */ 1268 MCLBYTES, /* maxsize */ 1269 1, /* nsegments */ 1270 MCLBYTES, /* maxsegsize */ 1271 0, /* flags */ 1272 NULL, NULL, /* lockfunc, lockarg */ 1273 &sc->nge_cdata.nge_rx_tag); 1274 if (error != 0) { 1275 device_printf(sc->nge_dev, "failed to create Rx DMA tag\n"); 1276 goto fail; 1277 } 1278 1279 /* Allocate DMA'able memory and load the DMA map for Tx ring. */ 1280 error = bus_dmamem_alloc(sc->nge_cdata.nge_tx_ring_tag, 1281 (void **)&sc->nge_rdata.nge_tx_ring, BUS_DMA_WAITOK | 1282 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_tx_ring_map); 1283 if (error != 0) { 1284 device_printf(sc->nge_dev, 1285 "failed to allocate DMA'able memory for Tx ring\n"); 1286 goto fail; 1287 } 1288 1289 ctx.nge_busaddr = 0; 1290 error = bus_dmamap_load(sc->nge_cdata.nge_tx_ring_tag, 1291 sc->nge_cdata.nge_tx_ring_map, sc->nge_rdata.nge_tx_ring, 1292 NGE_TX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1293 if (error != 0 || ctx.nge_busaddr == 0) { 1294 device_printf(sc->nge_dev, 1295 "failed to load DMA'able memory for Tx ring\n"); 1296 goto fail; 1297 } 1298 sc->nge_rdata.nge_tx_ring_paddr = ctx.nge_busaddr; 1299 1300 /* Allocate DMA'able memory and load the DMA map for Rx ring. */ 1301 error = bus_dmamem_alloc(sc->nge_cdata.nge_rx_ring_tag, 1302 (void **)&sc->nge_rdata.nge_rx_ring, BUS_DMA_WAITOK | 1303 BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->nge_cdata.nge_rx_ring_map); 1304 if (error != 0) { 1305 device_printf(sc->nge_dev, 1306 "failed to allocate DMA'able memory for Rx ring\n"); 1307 goto fail; 1308 } 1309 1310 ctx.nge_busaddr = 0; 1311 error = bus_dmamap_load(sc->nge_cdata.nge_rx_ring_tag, 1312 sc->nge_cdata.nge_rx_ring_map, sc->nge_rdata.nge_rx_ring, 1313 NGE_RX_RING_SIZE, nge_dmamap_cb, &ctx, 0); 1314 if (error != 0 || ctx.nge_busaddr == 0) { 1315 device_printf(sc->nge_dev, 1316 "failed to load DMA'able memory for Rx ring\n"); 1317 goto fail; 1318 } 1319 sc->nge_rdata.nge_rx_ring_paddr = ctx.nge_busaddr; 1320 1321 /* Create DMA maps for Tx buffers. */ 1322 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1323 txd = &sc->nge_cdata.nge_txdesc[i]; 1324 txd->tx_m = NULL; 1325 txd->tx_dmamap = NULL; 1326 error = bus_dmamap_create(sc->nge_cdata.nge_tx_tag, 0, 1327 &txd->tx_dmamap); 1328 if (error != 0) { 1329 device_printf(sc->nge_dev, 1330 "failed to create Tx dmamap\n"); 1331 goto fail; 1332 } 1333 } 1334 /* Create DMA maps for Rx buffers. */ 1335 if ((error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1336 &sc->nge_cdata.nge_rx_sparemap)) != 0) { 1337 device_printf(sc->nge_dev, 1338 "failed to create spare Rx dmamap\n"); 1339 goto fail; 1340 } 1341 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1342 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1343 rxd->rx_m = NULL; 1344 rxd->rx_dmamap = NULL; 1345 error = bus_dmamap_create(sc->nge_cdata.nge_rx_tag, 0, 1346 &rxd->rx_dmamap); 1347 if (error != 0) { 1348 device_printf(sc->nge_dev, 1349 "failed to create Rx dmamap\n"); 1350 goto fail; 1351 } 1352 } 1353 1354fail: 1355 return (error); 1356} 1357 1358static void 1359nge_dma_free(struct nge_softc *sc) 1360{ 1361 struct nge_txdesc *txd; 1362 struct nge_rxdesc *rxd; 1363 int i; 1364 1365 /* Tx ring. */ 1366 if (sc->nge_cdata.nge_tx_ring_tag) { 1367 if (sc->nge_cdata.nge_tx_ring_map) 1368 bus_dmamap_unload(sc->nge_cdata.nge_tx_ring_tag, 1369 sc->nge_cdata.nge_tx_ring_map); 1370 if (sc->nge_cdata.nge_tx_ring_map && 1371 sc->nge_rdata.nge_tx_ring) 1372 bus_dmamem_free(sc->nge_cdata.nge_tx_ring_tag, 1373 sc->nge_rdata.nge_tx_ring, 1374 sc->nge_cdata.nge_tx_ring_map); 1375 sc->nge_rdata.nge_tx_ring = NULL; 1376 sc->nge_cdata.nge_tx_ring_map = NULL; 1377 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_ring_tag); 1378 sc->nge_cdata.nge_tx_ring_tag = NULL; 1379 } 1380 /* Rx ring. */ 1381 if (sc->nge_cdata.nge_rx_ring_tag) { 1382 if (sc->nge_cdata.nge_rx_ring_map) 1383 bus_dmamap_unload(sc->nge_cdata.nge_rx_ring_tag, 1384 sc->nge_cdata.nge_rx_ring_map); 1385 if (sc->nge_cdata.nge_rx_ring_map && 1386 sc->nge_rdata.nge_rx_ring) 1387 bus_dmamem_free(sc->nge_cdata.nge_rx_ring_tag, 1388 sc->nge_rdata.nge_rx_ring, 1389 sc->nge_cdata.nge_rx_ring_map); 1390 sc->nge_rdata.nge_rx_ring = NULL; 1391 sc->nge_cdata.nge_rx_ring_map = NULL; 1392 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_ring_tag); 1393 sc->nge_cdata.nge_rx_ring_tag = NULL; 1394 } 1395 /* Tx buffers. */ 1396 if (sc->nge_cdata.nge_tx_tag) { 1397 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1398 txd = &sc->nge_cdata.nge_txdesc[i]; 1399 if (txd->tx_dmamap) { 1400 bus_dmamap_destroy(sc->nge_cdata.nge_tx_tag, 1401 txd->tx_dmamap); 1402 txd->tx_dmamap = NULL; 1403 } 1404 } 1405 bus_dma_tag_destroy(sc->nge_cdata.nge_tx_tag); 1406 sc->nge_cdata.nge_tx_tag = NULL; 1407 } 1408 /* Rx buffers. */ 1409 if (sc->nge_cdata.nge_rx_tag) { 1410 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1411 rxd = &sc->nge_cdata.nge_rxdesc[i]; 1412 if (rxd->rx_dmamap) { 1413 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1414 rxd->rx_dmamap); 1415 rxd->rx_dmamap = NULL; 1416 } 1417 } 1418 if (sc->nge_cdata.nge_rx_sparemap) { 1419 bus_dmamap_destroy(sc->nge_cdata.nge_rx_tag, 1420 sc->nge_cdata.nge_rx_sparemap); 1421 sc->nge_cdata.nge_rx_sparemap = 0; 1422 } 1423 bus_dma_tag_destroy(sc->nge_cdata.nge_rx_tag); 1424 sc->nge_cdata.nge_rx_tag = NULL; 1425 } 1426 1427 if (sc->nge_cdata.nge_parent_tag) { 1428 bus_dma_tag_destroy(sc->nge_cdata.nge_parent_tag); 1429 sc->nge_cdata.nge_parent_tag = NULL; 1430 } 1431} 1432 |
|
928/* 929 * Initialize the transmit descriptors. 930 */ 931static int 932nge_list_tx_init(struct nge_softc *sc) 933{ | 1433/* 1434 * Initialize the transmit descriptors. 1435 */ 1436static int 1437nge_list_tx_init(struct nge_softc *sc) 1438{ |
934 struct nge_list_data *ld; 935 struct nge_ring_data *cd; | 1439 struct nge_ring_data *rd; 1440 struct nge_txdesc *txd; 1441 bus_addr_t addr; |
936 int i; 937 | 1442 int i; 1443 |
938 cd = &sc->nge_cdata; 939 ld = sc->nge_ldata; | 1444 sc->nge_cdata.nge_tx_prod = 0; 1445 sc->nge_cdata.nge_tx_cons = 0; 1446 sc->nge_cdata.nge_tx_cnt = 0; |
940 | 1447 |
941 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 942 if (i == (NGE_TX_LIST_CNT - 1)) { 943 ld->nge_tx_list[i].nge_nextdesc = 944 &ld->nge_tx_list[0]; 945 ld->nge_tx_list[i].nge_next = 946 vtophys(&ld->nge_tx_list[0]); 947 } else { 948 ld->nge_tx_list[i].nge_nextdesc = 949 &ld->nge_tx_list[i + 1]; 950 ld->nge_tx_list[i].nge_next = 951 vtophys(&ld->nge_tx_list[i + 1]); 952 } 953 ld->nge_tx_list[i].nge_mbuf = NULL; 954 ld->nge_tx_list[i].nge_ptr = 0; 955 ld->nge_tx_list[i].nge_ctl = 0; | 1448 rd = &sc->nge_rdata; 1449 bzero(rd->nge_tx_ring, sizeof(struct nge_desc) * NGE_TX_RING_CNT); 1450 for (i = 0; i < NGE_TX_RING_CNT; i++) { 1451 if (i == NGE_TX_RING_CNT - 1) 1452 addr = NGE_TX_RING_ADDR(sc, 0); 1453 else 1454 addr = NGE_TX_RING_ADDR(sc, i + 1); 1455 rd->nge_tx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); 1456 txd = &sc->nge_cdata.nge_txdesc[i]; 1457 txd->tx_m = NULL; |
956 } 957 | 1458 } 1459 |
958 cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0; | 1460 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1461 sc->nge_cdata.nge_tx_ring_map, 1462 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
959 960 return (0); 961} 962 | 1463 1464 return (0); 1465} 1466 |
963 | |
964/* 965 * Initialize the RX descriptors and allocate mbufs for them. Note that 966 * we arrange the descriptors in a closed ring, so that the last descriptor 967 * points back to the first. 968 */ 969static int 970nge_list_rx_init(struct nge_softc *sc) 971{ | 1467/* 1468 * Initialize the RX descriptors and allocate mbufs for them. Note that 1469 * we arrange the descriptors in a closed ring, so that the last descriptor 1470 * points back to the first. 1471 */ 1472static int 1473nge_list_rx_init(struct nge_softc *sc) 1474{ |
972 struct nge_list_data *ld; 973 struct nge_ring_data *cd; | 1475 struct nge_ring_data *rd; 1476 bus_addr_t addr; |
974 int i; 975 | 1477 int i; 1478 |
976 ld = sc->nge_ldata; 977 cd = &sc->nge_cdata; | 1479 sc->nge_cdata.nge_rx_cons = 0; 1480 sc->nge_head = sc->nge_tail = NULL; |
978 | 1481 |
979 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 980 if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS) | 1482 rd = &sc->nge_rdata; 1483 bzero(rd->nge_rx_ring, sizeof(struct nge_desc) * NGE_RX_RING_CNT); 1484 for (i = 0; i < NGE_RX_RING_CNT; i++) { 1485 if (nge_newbuf(sc, i) != 0) |
981 return (ENOBUFS); | 1486 return (ENOBUFS); |
982 if (i == (NGE_RX_LIST_CNT - 1)) { 983 ld->nge_rx_list[i].nge_nextdesc = 984 &ld->nge_rx_list[0]; 985 ld->nge_rx_list[i].nge_next = 986 vtophys(&ld->nge_rx_list[0]); 987 } else { 988 ld->nge_rx_list[i].nge_nextdesc = 989 &ld->nge_rx_list[i + 1]; 990 ld->nge_rx_list[i].nge_next = 991 vtophys(&ld->nge_rx_list[i + 1]); 992 } | 1487 if (i == NGE_RX_RING_CNT - 1) 1488 addr = NGE_RX_RING_ADDR(sc, 0); 1489 else 1490 addr = NGE_RX_RING_ADDR(sc, i + 1); 1491 rd->nge_rx_ring[i].nge_next = htole32(NGE_ADDR_LO(addr)); |
993 } 994 | 1492 } 1493 |
995 cd->nge_rx_prod = 0; 996 sc->nge_head = sc->nge_tail = NULL; | 1494 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1495 sc->nge_cdata.nge_rx_ring_map, 1496 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
997 998 return (0); 999} 1000 | 1497 1498 return (0); 1499} 1500 |
1501static __inline void 1502nge_discard_rxbuf(struct nge_softc *sc, int idx) 1503{ 1504 struct nge_desc *desc; 1505 1506 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1507 desc->nge_cmdsts = htole32(MCLBYTES - sizeof(uint64_t)); 1508 desc->nge_extsts = 0; 1509} 1510 |
|
1001/* 1002 * Initialize an RX descriptor and attach an MBUF cluster. 1003 */ 1004static int | 1511/* 1512 * Initialize an RX descriptor and attach an MBUF cluster. 1513 */ 1514static int |
1005nge_newbuf(struct nge_softc *sc, struct nge_desc *c, struct mbuf *m) | 1515nge_newbuf(struct nge_softc *sc, int idx) |
1006{ | 1516{ |
1517 struct nge_desc *desc; 1518 struct nge_rxdesc *rxd; 1519 struct mbuf *m; 1520 bus_dma_segment_t segs[1]; 1521 bus_dmamap_t map; 1522 int nsegs; |
|
1007 | 1523 |
1008 if (m == NULL) { 1009 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1010 if (m == NULL) 1011 return (ENOBUFS); 1012 } else 1013 m->m_data = m->m_ext.ext_buf; 1014 | 1524 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1525 if (m == NULL) 1526 return (ENOBUFS); |
1015 m->m_len = m->m_pkthdr.len = MCLBYTES; | 1527 m->m_len = m->m_pkthdr.len = MCLBYTES; |
1016 | |
1017 m_adj(m, sizeof(uint64_t)); 1018 | 1528 m_adj(m, sizeof(uint64_t)); 1529 |
1019 c->nge_mbuf = m; 1020 c->nge_ptr = vtophys(mtod(m, caddr_t)); 1021 c->nge_ctl = m->m_len; 1022 c->nge_extsts = 0; | 1530 if (bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_rx_tag, 1531 sc->nge_cdata.nge_rx_sparemap, m, segs, &nsegs, 0) != 0) { 1532 m_freem(m); 1533 return (ENOBUFS); 1534 } 1535 KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); |
1023 | 1536 |
1537 rxd = &sc->nge_cdata.nge_rxdesc[idx]; 1538 if (rxd->rx_m != NULL) { 1539 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1540 BUS_DMASYNC_POSTREAD); 1541 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap); 1542 } 1543 map = rxd->rx_dmamap; 1544 rxd->rx_dmamap = sc->nge_cdata.nge_rx_sparemap; 1545 sc->nge_cdata.nge_rx_sparemap = map; 1546 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, rxd->rx_dmamap, 1547 BUS_DMASYNC_PREREAD); 1548 rxd->rx_m = m; 1549 desc = &sc->nge_rdata.nge_rx_ring[idx]; 1550 desc->nge_ptr = htole32(NGE_ADDR_LO(segs[0].ds_addr)); 1551 desc->nge_cmdsts = htole32(segs[0].ds_len); 1552 desc->nge_extsts = 0; 1553 |
|
1024 return (0); 1025} 1026 | 1554 return (0); 1555} 1556 |
1027#ifdef NGE_FIXUP_RX | 1557#ifndef __NO_STRICT_ALIGNMENT |
1028static __inline void 1029nge_fixup_rx(struct mbuf *m) 1030{ | 1558static __inline void 1559nge_fixup_rx(struct mbuf *m) 1560{ |
1031 int i; 1032 uint16_t *src, *dst; | 1561 int i; 1562 uint16_t *src, *dst; |
1033 1034 src = mtod(m, uint16_t *); 1035 dst = src - 1; 1036 1037 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1038 *dst++ = *src++; 1039 1040 m->m_data -= ETHER_ALIGN; | 1563 1564 src = mtod(m, uint16_t *); 1565 dst = src - 1; 1566 1567 for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) 1568 *dst++ = *src++; 1569 1570 m->m_data -= ETHER_ALIGN; |
1041 1042 return; | |
1043} 1044#endif 1045 1046/* 1047 * A frame has been uploaded: pass the resulting mbuf chain up to 1048 * the higher level protocols. 1049 */ 1050static void 1051nge_rxeof(struct nge_softc *sc) 1052{ 1053 struct mbuf *m; 1054 struct ifnet *ifp; 1055 struct nge_desc *cur_rx; | 1571} 1572#endif 1573 1574/* 1575 * A frame has been uploaded: pass the resulting mbuf chain up to 1576 * the higher level protocols. 1577 */ 1578static void 1579nge_rxeof(struct nge_softc *sc) 1580{ 1581 struct mbuf *m; 1582 struct ifnet *ifp; 1583 struct nge_desc *cur_rx; |
1056 int i, total_len = 0; 1057 uint32_t rxstat; | 1584 struct nge_rxdesc *rxd; 1585 int cons, prog, total_len; 1586 uint32_t cmdsts, extsts; |
1058 1059 NGE_LOCK_ASSERT(sc); | 1587 1588 NGE_LOCK_ASSERT(sc); |
1589 |
|
1060 ifp = sc->nge_ifp; | 1590 ifp = sc->nge_ifp; |
1061 i = sc->nge_cdata.nge_rx_prod; | 1591 cons = sc->nge_cdata.nge_rx_cons; |
1062 | 1592 |
1063 while (NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) { 1064 uint32_t extsts; | 1593 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1594 sc->nge_cdata.nge_rx_ring_map, 1595 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
1065 | 1596 |
1597 for (prog = 0; prog < NGE_RX_RING_CNT && 1598 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; 1599 NGE_INC(cons, NGE_RX_RING_CNT)) { |
|
1066#ifdef DEVICE_POLLING 1067 if (ifp->if_capenable & IFCAP_POLLING) { 1068 if (sc->rxcycles <= 0) 1069 break; 1070 sc->rxcycles--; 1071 } 1072#endif | 1600#ifdef DEVICE_POLLING 1601 if (ifp->if_capenable & IFCAP_POLLING) { 1602 if (sc->rxcycles <= 0) 1603 break; 1604 sc->rxcycles--; 1605 } 1606#endif |
1607 cur_rx = &sc->nge_rdata.nge_rx_ring[cons]; 1608 cmdsts = le32toh(cur_rx->nge_cmdsts); 1609 extsts = le32toh(cur_rx->nge_extsts); 1610 if ((cmdsts & NGE_CMDSTS_OWN) == 0) 1611 break; 1612 prog++; 1613 rxd = &sc->nge_cdata.nge_rxdesc[cons]; 1614 m = rxd->rx_m; 1615 total_len = cmdsts & NGE_CMDSTS_BUFLEN; |
|
1073 | 1616 |
1074 cur_rx = &sc->nge_ldata->nge_rx_list[i]; 1075 rxstat = cur_rx->nge_rxstat; 1076 extsts = cur_rx->nge_extsts; 1077 m = cur_rx->nge_mbuf; 1078 cur_rx->nge_mbuf = NULL; 1079 total_len = NGE_RXBYTES(cur_rx); 1080 NGE_INC(i, NGE_RX_LIST_CNT); 1081 1082 if (rxstat & NGE_CMDSTS_MORE) { | 1617 if ((cmdsts & NGE_CMDSTS_MORE) != 0) { 1618 if (nge_newbuf(sc, cons) != 0) { 1619 ifp->if_iqdrops++; 1620 if (sc->nge_head != NULL) { 1621 m_freem(sc->nge_head); 1622 sc->nge_head = sc->nge_tail = NULL; 1623 } 1624 nge_discard_rxbuf(sc, cons); 1625 continue; 1626 } |
1083 m->m_len = total_len; 1084 if (sc->nge_head == NULL) { 1085 m->m_pkthdr.len = total_len; 1086 sc->nge_head = sc->nge_tail = m; 1087 } else { 1088 m->m_flags &= ~M_PKTHDR; 1089 sc->nge_head->m_pkthdr.len += total_len; 1090 sc->nge_tail->m_next = m; 1091 sc->nge_tail = m; 1092 } | 1627 m->m_len = total_len; 1628 if (sc->nge_head == NULL) { 1629 m->m_pkthdr.len = total_len; 1630 sc->nge_head = sc->nge_tail = m; 1631 } else { 1632 m->m_flags &= ~M_PKTHDR; 1633 sc->nge_head->m_pkthdr.len += total_len; 1634 sc->nge_tail->m_next = m; 1635 sc->nge_tail = m; 1636 } |
1093 nge_newbuf(sc, cur_rx, NULL); | |
1094 continue; 1095 } 1096 1097 /* 1098 * If an error occurs, update stats, clear the 1099 * status word and leave the mbuf cluster in place: 1100 * it should simply get re-used next time this descriptor 1101 * comes up in the ring. 1102 */ | 1637 continue; 1638 } 1639 1640 /* 1641 * If an error occurs, update stats, clear the 1642 * status word and leave the mbuf cluster in place: 1643 * it should simply get re-used next time this descriptor 1644 * comes up in the ring. 1645 */ |
1103 if (!(rxstat & NGE_CMDSTS_PKT_OK)) { 1104 ifp->if_ierrors++; 1105 if (sc->nge_head != NULL) { 1106 m_freem(sc->nge_head); 1107 sc->nge_head = sc->nge_tail = NULL; | 1646 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { 1647 if ((cmdsts & NGE_RXSTAT_RUNT) && 1648 total_len >= (ETHER_MIN_LEN - ETHER_CRC_LEN - 4)) { 1649 /* 1650 * Work-around hardware bug, accept runt frames 1651 * if its length is larger than or equal to 56. 1652 */ 1653 } else { 1654 /* 1655 * Input error counters are updated by hardware. 1656 */ 1657 if (sc->nge_head != NULL) { 1658 m_freem(sc->nge_head); 1659 sc->nge_head = sc->nge_tail = NULL; 1660 } 1661 nge_discard_rxbuf(sc, cons); 1662 continue; |
1108 } | 1663 } |
1109 nge_newbuf(sc, cur_rx, m); 1110 continue; | |
1111 } 1112 1113 /* Try conjure up a replacement mbuf. */ 1114 | 1664 } 1665 1666 /* Try conjure up a replacement mbuf. */ 1667 |
1115 if (nge_newbuf(sc, cur_rx, NULL)) { 1116 ifp->if_ierrors++; | 1668 if (nge_newbuf(sc, cons) != 0) { 1669 ifp->if_iqdrops++; |
1117 if (sc->nge_head != NULL) { 1118 m_freem(sc->nge_head); 1119 sc->nge_head = sc->nge_tail = NULL; 1120 } | 1670 if (sc->nge_head != NULL) { 1671 m_freem(sc->nge_head); 1672 sc->nge_head = sc->nge_tail = NULL; 1673 } |
1121 nge_newbuf(sc, cur_rx, m); | 1674 nge_discard_rxbuf(sc, cons); |
1122 continue; 1123 } 1124 | 1675 continue; 1676 } 1677 |
1678 /* Chain received mbufs. */ |
|
1125 if (sc->nge_head != NULL) { 1126 m->m_len = total_len; 1127 m->m_flags &= ~M_PKTHDR; 1128 sc->nge_tail->m_next = m; 1129 m = sc->nge_head; 1130 m->m_pkthdr.len += total_len; 1131 sc->nge_head = sc->nge_tail = NULL; 1132 } else 1133 m->m_pkthdr.len = m->m_len = total_len; 1134 1135 /* 1136 * Ok. NatSemi really screwed up here. This is the 1137 * only gigE chip I know of with alignment constraints 1138 * on receive buffers. RX buffers must be 64-bit aligned. 1139 */ 1140 /* 1141 * By popular demand, ignore the alignment problems | 1679 if (sc->nge_head != NULL) { 1680 m->m_len = total_len; 1681 m->m_flags &= ~M_PKTHDR; 1682 sc->nge_tail->m_next = m; 1683 m = sc->nge_head; 1684 m->m_pkthdr.len += total_len; 1685 sc->nge_head = sc->nge_tail = NULL; 1686 } else 1687 m->m_pkthdr.len = m->m_len = total_len; 1688 1689 /* 1690 * Ok. NatSemi really screwed up here. This is the 1691 * only gigE chip I know of with alignment constraints 1692 * on receive buffers. RX buffers must be 64-bit aligned. 1693 */ 1694 /* 1695 * By popular demand, ignore the alignment problems |
1142 * on the Intel x86 platform. The performance hit | 1696 * on the non-strict alignment platform. The performance hit |
1143 * incurred due to unaligned accesses is much smaller 1144 * than the hit produced by forcing buffer copies all 1145 * the time, especially with jumbo frames. We still 1146 * need to fix up the alignment everywhere else though. 1147 */ | 1697 * incurred due to unaligned accesses is much smaller 1698 * than the hit produced by forcing buffer copies all 1699 * the time, especially with jumbo frames. We still 1700 * need to fix up the alignment everywhere else though. 1701 */ |
1148#ifdef NGE_FIXUP_RX | 1702#ifndef __NO_STRICT_ALIGNMENT |
1149 nge_fixup_rx(m); 1150#endif | 1703 nge_fixup_rx(m); 1704#endif |
1151 1152 ifp->if_ipackets++; | |
1153 m->m_pkthdr.rcvif = ifp; | 1705 m->m_pkthdr.rcvif = ifp; |
1706 ifp->if_ipackets++; |
|
1154 | 1707 |
1155 /* Do IP checksum checking. */ 1156 if (extsts & NGE_RXEXTSTS_IPPKT) 1157 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1158 if (!(extsts & NGE_RXEXTSTS_IPCSUMERR)) 1159 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1160 if ((extsts & NGE_RXEXTSTS_TCPPKT && 1161 !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || 1162 (extsts & NGE_RXEXTSTS_UDPPKT && 1163 !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { 1164 m->m_pkthdr.csum_flags |= 1165 CSUM_DATA_VALID|CSUM_PSEUDO_HDR; 1166 m->m_pkthdr.csum_data = 0xffff; | 1708 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { 1709 /* Do IP checksum checking. */ 1710 if ((extsts & NGE_RXEXTSTS_IPPKT) != 0) 1711 m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 1712 if ((extsts & NGE_RXEXTSTS_IPCSUMERR) == 0) 1713 m->m_pkthdr.csum_flags |= CSUM_IP_VALID; 1714 if ((extsts & NGE_RXEXTSTS_TCPPKT && 1715 !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || 1716 (extsts & NGE_RXEXTSTS_UDPPKT && 1717 !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { 1718 m->m_pkthdr.csum_flags |= 1719 CSUM_DATA_VALID | CSUM_PSEUDO_HDR; 1720 m->m_pkthdr.csum_data = 0xffff; 1721 } |
1167 } 1168 1169 /* 1170 * If we received a packet with a vlan tag, pass it 1171 * to vlan_input() instead of ether_input(). 1172 */ | 1722 } 1723 1724 /* 1725 * If we received a packet with a vlan tag, pass it 1726 * to vlan_input() instead of ether_input(). 1727 */ |
1173 if (extsts & NGE_RXEXTSTS_VLANPKT) { | 1728 if ((extsts & NGE_RXEXTSTS_VLANPKT) != 0 && 1729 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { |
1174 m->m_pkthdr.ether_vtag = | 1730 m->m_pkthdr.ether_vtag = |
1175 ntohs(extsts & NGE_RXEXTSTS_VTCI); | 1731 bswap16(extsts & NGE_RXEXTSTS_VTCI); |
1176 m->m_flags |= M_VLANTAG; 1177 } 1178 NGE_UNLOCK(sc); 1179 (*ifp->if_input)(ifp, m); 1180 NGE_LOCK(sc); 1181 } 1182 | 1732 m->m_flags |= M_VLANTAG; 1733 } 1734 NGE_UNLOCK(sc); 1735 (*ifp->if_input)(ifp, m); 1736 NGE_LOCK(sc); 1737 } 1738 |
1183 sc->nge_cdata.nge_rx_prod = i; | 1739 if (prog > 0) { 1740 sc->nge_cdata.nge_rx_cons = cons; 1741 bus_dmamap_sync(sc->nge_cdata.nge_rx_ring_tag, 1742 sc->nge_cdata.nge_rx_ring_map, 1743 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1744 } |
1184} 1185 1186/* 1187 * A frame was downloaded to the chip. It's safe for us to clean up 1188 * the list buffers. 1189 */ | 1745} 1746 1747/* 1748 * A frame was downloaded to the chip. It's safe for us to clean up 1749 * the list buffers. 1750 */ |
1190 | |
1191static void 1192nge_txeof(struct nge_softc *sc) 1193{ | 1751static void 1752nge_txeof(struct nge_softc *sc) 1753{ |
1194 struct nge_desc *cur_tx; | 1754 struct nge_desc *cur_tx; 1755 struct nge_txdesc *txd; |
1195 struct ifnet *ifp; | 1756 struct ifnet *ifp; |
1196 uint32_t idx; | 1757 uint32_t cmdsts; 1758 int cons, prod; |
1197 1198 NGE_LOCK_ASSERT(sc); 1199 ifp = sc->nge_ifp; 1200 | 1759 1760 NGE_LOCK_ASSERT(sc); 1761 ifp = sc->nge_ifp; 1762 |
1763 cons = sc->nge_cdata.nge_tx_cons; 1764 prod = sc->nge_cdata.nge_tx_prod; 1765 if (cons == prod) 1766 return; 1767 1768 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 1769 sc->nge_cdata.nge_tx_ring_map, 1770 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1771 |
|
1201 /* 1202 * Go through our tx list and free mbufs for those 1203 * frames that have been transmitted. 1204 */ | 1772 /* 1773 * Go through our tx list and free mbufs for those 1774 * frames that have been transmitted. 1775 */ |
1205 idx = sc->nge_cdata.nge_tx_cons; 1206 while (idx != sc->nge_cdata.nge_tx_prod) { 1207 cur_tx = &sc->nge_ldata->nge_tx_list[idx]; 1208 1209 if (NGE_OWNDESC(cur_tx)) | 1776 for (; cons != prod; NGE_INC(cons, NGE_TX_RING_CNT)) { 1777 cur_tx = &sc->nge_rdata.nge_tx_ring[cons]; 1778 cmdsts = le32toh(cur_tx->nge_cmdsts); 1779 if ((cmdsts & NGE_CMDSTS_OWN) != 0) |
1210 break; | 1780 break; |
1211 1212 if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) { 1213 sc->nge_cdata.nge_tx_cnt--; 1214 NGE_INC(idx, NGE_TX_LIST_CNT); | 1781 sc->nge_cdata.nge_tx_cnt--; 1782 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1783 if ((cmdsts & NGE_CMDSTS_MORE) != 0) |
1215 continue; | 1784 continue; |
1216 } | |
1217 | 1785 |
1218 if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) { | 1786 txd = &sc->nge_cdata.nge_txdesc[cons]; 1787 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap, 1788 BUS_DMASYNC_POSTWRITE); 1789 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, txd->tx_dmamap); 1790 if ((cmdsts & NGE_CMDSTS_PKT_OK) == 0) { |
1219 ifp->if_oerrors++; | 1791 ifp->if_oerrors++; |
1220 if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS) | 1792 if ((cmdsts & NGE_TXSTAT_EXCESSCOLLS) != 0) |
1221 ifp->if_collisions++; | 1793 ifp->if_collisions++; |
1222 if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL) | 1794 if ((cmdsts & NGE_TXSTAT_OUTOFWINCOLL) != 0) |
1223 ifp->if_collisions++; | 1795 ifp->if_collisions++; |
1224 } | 1796 } else 1797 ifp->if_opackets++; |
1225 | 1798 |
1226 ifp->if_collisions += 1227 (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16; 1228 1229 ifp->if_opackets++; 1230 if (cur_tx->nge_mbuf != NULL) { 1231 m_freem(cur_tx->nge_mbuf); 1232 cur_tx->nge_mbuf = NULL; 1233 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1234 } 1235 1236 sc->nge_cdata.nge_tx_cnt--; 1237 NGE_INC(idx, NGE_TX_LIST_CNT); | 1799 ifp->if_collisions += (cmdsts & NGE_TXSTAT_COLLCNT) >> 16; 1800 KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbuf!\n", 1801 __func__)); 1802 m_freem(txd->tx_m); 1803 txd->tx_m = NULL; |
1238 } 1239 | 1804 } 1805 |
1240 sc->nge_cdata.nge_tx_cons = idx; 1241 1242 if (idx == sc->nge_cdata.nge_tx_prod) 1243 ifp->if_timer = 0; | 1806 sc->nge_cdata.nge_tx_cons = cons; 1807 if (sc->nge_cdata.nge_tx_cnt == 0) 1808 sc->nge_watchdog_timer = 0; |
1244} 1245 1246static void 1247nge_tick(void *xsc) 1248{ 1249 struct nge_softc *sc; 1250 struct mii_data *mii; | 1809} 1810 1811static void 1812nge_tick(void *xsc) 1813{ 1814 struct nge_softc *sc; 1815 struct mii_data *mii; |
1251 struct ifnet *ifp; | |
1252 1253 sc = xsc; 1254 NGE_LOCK_ASSERT(sc); | 1816 1817 sc = xsc; 1818 NGE_LOCK_ASSERT(sc); |
1819 mii = device_get_softc(sc->nge_miibus); 1820 mii_tick(mii); 1821 /* 1822 * For PHYs that does not reset established link, it is 1823 * necessary to check whether driver still have a valid 1824 * link(e.g link state change callback is not called). 1825 * Otherwise, driver think it lost link because driver 1826 * initialization routine clears link state flag. 1827 */ 1828 if ((sc->nge_flags & NGE_FLAG_LINK) == 0) 1829 nge_miibus_statchg(sc->nge_dev); 1830 nge_stats_update(sc); 1831 nge_watchdog(sc); 1832 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); 1833} 1834 1835static void 1836nge_stats_update(struct nge_softc *sc) 1837{ 1838 struct ifnet *ifp; 1839 struct nge_stats now, *stats, *nstats; 1840 1841 NGE_LOCK_ASSERT(sc); 1842 |
|
1255 ifp = sc->nge_ifp; | 1843 ifp = sc->nge_ifp; |
1844 stats = &now; 1845 stats->rx_pkts_errs = 1846 CSR_READ_4(sc, NGE_MIB_RXERRPKT) & 0xFFFF; 1847 stats->rx_crc_errs = 1848 CSR_READ_4(sc, NGE_MIB_RXERRFCS) & 0xFFFF; 1849 stats->rx_fifo_oflows = 1850 CSR_READ_4(sc, NGE_MIB_RXERRMISSEDPKT) & 0xFFFF; 1851 stats->rx_align_errs = 1852 CSR_READ_4(sc, NGE_MIB_RXERRALIGN) & 0xFFFF; 1853 stats->rx_sym_errs = 1854 CSR_READ_4(sc, NGE_MIB_RXERRSYM) & 0xFFFF; 1855 stats->rx_pkts_jumbos = 1856 CSR_READ_4(sc, NGE_MIB_RXERRGIANT) & 0xFFFF; 1857 stats->rx_len_errs = 1858 CSR_READ_4(sc, NGE_MIB_RXERRRANGLEN) & 0xFFFF; 1859 stats->rx_unctl_frames = 1860 CSR_READ_4(sc, NGE_MIB_RXBADOPCODE) & 0xFFFF; 1861 stats->rx_pause = 1862 CSR_READ_4(sc, NGE_MIB_RXPAUSEPKTS) & 0xFFFF; 1863 stats->tx_pause = 1864 CSR_READ_4(sc, NGE_MIB_TXPAUSEPKTS) & 0xFFFF; 1865 stats->tx_seq_errs = 1866 CSR_READ_4(sc, NGE_MIB_TXERRSQE) & 0xFF; |
|
1256 | 1867 |
1257 if (sc->nge_tbi) { 1258 if (!sc->nge_link) { 1259 if (CSR_READ_4(sc, NGE_TBI_BMSR) 1260 & NGE_TBIBMSR_ANEG_DONE) { 1261 if (bootverbose) 1262 device_printf(sc->nge_dev, 1263 "gigabit link up\n"); 1264 nge_miibus_statchg(sc->nge_miibus); 1265 sc->nge_link++; 1266 if (ifp->if_snd.ifq_head != NULL) 1267 nge_start_locked(ifp); 1268 } 1269 } 1270 } else { 1271 mii = device_get_softc(sc->nge_miibus); 1272 mii_tick(mii); | 1868 /* 1869 * Since we've accept errored frames exclude Rx length errors. 1870 */ 1871 ifp->if_ierrors += stats->rx_pkts_errs + stats->rx_crc_errs + 1872 stats->rx_fifo_oflows + stats->rx_sym_errs; |
1273 | 1873 |
1274 if (!sc->nge_link) { 1275 if (mii->mii_media_status & IFM_ACTIVE && 1276 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 1277 sc->nge_link++; 1278 if (IFM_SUBTYPE(mii->mii_media_active) 1279 == IFM_1000_T && bootverbose) 1280 device_printf(sc->nge_dev, 1281 "gigabit link up\n"); 1282 if (ifp->if_snd.ifq_head != NULL) 1283 nge_start_locked(ifp); 1284 } 1285 } 1286 } 1287 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); | 1874 nstats = &sc->nge_stats; 1875 nstats->rx_pkts_errs += stats->rx_pkts_errs; 1876 nstats->rx_crc_errs += stats->rx_crc_errs; 1877 nstats->rx_fifo_oflows += stats->rx_fifo_oflows; 1878 nstats->rx_align_errs += stats->rx_align_errs; 1879 nstats->rx_sym_errs += stats->rx_sym_errs; 1880 nstats->rx_pkts_jumbos += stats->rx_pkts_jumbos; 1881 nstats->rx_len_errs += stats->rx_len_errs; 1882 nstats->rx_unctl_frames += stats->rx_unctl_frames; 1883 nstats->rx_pause += stats->rx_pause; 1884 nstats->tx_pause += stats->tx_pause; 1885 nstats->tx_seq_errs += stats->tx_seq_errs; |
1288} 1289 1290#ifdef DEVICE_POLLING 1291static poll_handler_t nge_poll; 1292 1293static void 1294nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1295{ | 1886} 1887 1888#ifdef DEVICE_POLLING 1889static poll_handler_t nge_poll; 1890 1891static void 1892nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 1893{ |
1296 struct nge_softc *sc = ifp->if_softc; | 1894 struct nge_softc *sc; |
1297 | 1895 |
1896 sc = ifp->if_softc; 1897 |
|
1298 NGE_LOCK(sc); | 1898 NGE_LOCK(sc); |
1299 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { | 1899 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { |
1300 NGE_UNLOCK(sc); 1301 return; 1302 } 1303 1304 /* 1305 * On the nge, reading the status register also clears it. 1306 * So before returning to intr mode we must make sure that all 1307 * possible pending sources of interrupts have been served. 1308 * In practice this means run to completion the *eof routines, | 1900 NGE_UNLOCK(sc); 1901 return; 1902 } 1903 1904 /* 1905 * On the nge, reading the status register also clears it. 1906 * So before returning to intr mode we must make sure that all 1907 * possible pending sources of interrupts have been served. 1908 * In practice this means run to completion the *eof routines, |
1309 * and then call the interrupt routine | 1909 * and then call the interrupt routine. |
1310 */ 1311 sc->rxcycles = count; 1312 nge_rxeof(sc); 1313 nge_txeof(sc); | 1910 */ 1911 sc->rxcycles = count; 1912 nge_rxeof(sc); 1913 nge_txeof(sc); |
1314 if (ifp->if_snd.ifq_head != NULL) | 1914 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) |
1315 nge_start_locked(ifp); 1316 1317 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1318 uint32_t status; 1319 1320 /* Reading the ISR register clears all interrupts. */ 1321 status = CSR_READ_4(sc, NGE_ISR); 1322 | 1915 nge_start_locked(ifp); 1916 1917 if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { 1918 uint32_t status; 1919 1920 /* Reading the ISR register clears all interrupts. */ 1921 status = CSR_READ_4(sc, NGE_ISR); 1922 |
1323 if (status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) | 1923 if ((status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) != 0) |
1324 nge_rxeof(sc); 1325 | 1924 nge_rxeof(sc); 1925 |
1326 if (status & (NGE_ISR_RX_IDLE)) | 1926 if ((status & NGE_ISR_RX_IDLE) != 0) |
1327 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1328 | 1927 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1928 |
1329 if (status & NGE_ISR_SYSERR) { 1330 nge_reset(sc); | 1929 if ((status & NGE_ISR_SYSERR) != 0) { 1930 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
1331 nge_init_locked(sc); 1332 } 1333 } 1334 NGE_UNLOCK(sc); 1335} 1336#endif /* DEVICE_POLLING */ 1337 1338static void 1339nge_intr(void *arg) 1340{ 1341 struct nge_softc *sc; 1342 struct ifnet *ifp; 1343 uint32_t status; 1344 | 1931 nge_init_locked(sc); 1932 } 1933 } 1934 NGE_UNLOCK(sc); 1935} 1936#endif /* DEVICE_POLLING */ 1937 1938static void 1939nge_intr(void *arg) 1940{ 1941 struct nge_softc *sc; 1942 struct ifnet *ifp; 1943 uint32_t status; 1944 |
1345 sc = arg; | 1945 sc = (struct nge_softc *)arg; |
1346 ifp = sc->nge_ifp; 1347 1348 NGE_LOCK(sc); | 1946 ifp = sc->nge_ifp; 1947 1948 NGE_LOCK(sc); |
1949 1950 if ((sc->nge_flags & NGE_FLAG_SUSPENDED) != 0) 1951 goto done_locked; 1952 1953 /* Reading the ISR register clears all interrupts. */ 1954 status = CSR_READ_4(sc, NGE_ISR); 1955 if (status == 0xffffffff || (status & NGE_INTRS) == 0) 1956 goto done_locked; |
|
1349#ifdef DEVICE_POLLING | 1957#ifdef DEVICE_POLLING |
1350 if (ifp->if_capenable & IFCAP_POLLING) { 1351 NGE_UNLOCK(sc); 1352 return; 1353 } | 1958 if ((ifp->if_capenable & IFCAP_POLLING) != 0) 1959 goto done_locked; |
1354#endif | 1960#endif |
1961 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) 1962 goto done_locked; |
|
1355 | 1963 |
1356 /* Supress unwanted interrupts */ 1357 if (!(ifp->if_flags & IFF_UP)) { 1358 nge_stop(sc); 1359 NGE_UNLOCK(sc); 1360 return; 1361 } 1362 | |
1363 /* Disable interrupts. */ 1364 CSR_WRITE_4(sc, NGE_IER, 0); 1365 1366 /* Data LED on for TBI mode */ | 1964 /* Disable interrupts. */ 1965 CSR_WRITE_4(sc, NGE_IER, 0); 1966 1967 /* Data LED on for TBI mode */ |
1367 if (sc->nge_tbi) 1368 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1369 | NGE_GPIO_GP3_OUT); | 1968 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 1969 CSR_WRITE_4(sc, NGE_GPIO, 1970 CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT); |
1370 | 1971 |
1371 for (;;) { 1372 /* Reading the ISR register clears all interrupts. */ 1373 status = CSR_READ_4(sc, NGE_ISR); 1374 1375 if ((status & NGE_INTRS) == 0) 1376 break; 1377 1378 if ((status & NGE_ISR_TX_DESC_OK) || 1379 (status & NGE_ISR_TX_ERR) || 1380 (status & NGE_ISR_TX_OK) || 1381 (status & NGE_ISR_TX_IDLE)) | 1972 for (; (status & NGE_INTRS) != 0;) { 1973 if ((status & (NGE_ISR_TX_DESC_OK | NGE_ISR_TX_ERR | 1974 NGE_ISR_TX_OK | NGE_ISR_TX_IDLE)) != 0) |
1382 nge_txeof(sc); 1383 | 1975 nge_txeof(sc); 1976 |
1384 if ((status & NGE_ISR_RX_DESC_OK) || 1385 (status & NGE_ISR_RX_ERR) || 1386 (status & NGE_ISR_RX_OFLOW) || 1387 (status & NGE_ISR_RX_FIFO_OFLOW) || 1388 (status & NGE_ISR_RX_IDLE) || 1389 (status & NGE_ISR_RX_OK)) | 1977 if ((status & (NGE_ISR_RX_DESC_OK | NGE_ISR_RX_ERR | 1978 NGE_ISR_RX_OFLOW | NGE_ISR_RX_FIFO_OFLOW | 1979 NGE_ISR_RX_IDLE | NGE_ISR_RX_OK)) != 0) |
1390 nge_rxeof(sc); 1391 | 1980 nge_rxeof(sc); 1981 |
1392 if ((status & NGE_ISR_RX_IDLE)) | 1982 if ((status & NGE_ISR_RX_IDLE) != 0) |
1393 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1394 | 1983 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 1984 |
1395 if (status & NGE_ISR_SYSERR) { 1396 nge_reset(sc); | 1985 if ((status & NGE_ISR_SYSERR) != 0) { |
1397 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1398 nge_init_locked(sc); 1399 } | 1986 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1987 nge_init_locked(sc); 1988 } |
1400 1401#if 0 1402 /* 1403 * XXX: nge_tick() is not ready to be called this way 1404 * it screws up the aneg timeout because mii_tick() is 1405 * only to be called once per second. 1406 */ 1407 if (status & NGE_IMR_PHY_INTR) { 1408 sc->nge_link = 0; 1409 nge_tick(sc); 1410 } 1411#endif | 1989 /* Reading the ISR register clears all interrupts. */ 1990 status = CSR_READ_4(sc, NGE_ISR); |
1412 } 1413 1414 /* Re-enable interrupts. */ 1415 CSR_WRITE_4(sc, NGE_IER, 1); 1416 | 1991 } 1992 1993 /* Re-enable interrupts. */ 1994 CSR_WRITE_4(sc, NGE_IER, 1); 1995 |
1417 if (ifp->if_snd.ifq_head != NULL) | 1996 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) |
1418 nge_start_locked(ifp); 1419 1420 /* Data LED off for TBI mode */ | 1997 nge_start_locked(ifp); 1998 1999 /* Data LED off for TBI mode */ |
2000 if ((sc->nge_flags & NGE_FLAG_TBI) != 0) 2001 CSR_WRITE_4(sc, NGE_GPIO, 2002 CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); |
|
1421 | 2003 |
1422 if (sc->nge_tbi) 1423 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1424 & ~NGE_GPIO_GP3_OUT); 1425 | 2004done_locked: |
1426 NGE_UNLOCK(sc); 1427} 1428 1429/* 1430 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 1431 * pointers to the fragment pointers. 1432 */ 1433static int | 2005 NGE_UNLOCK(sc); 2006} 2007 2008/* 2009 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 2010 * pointers to the fragment pointers. 2011 */ 2012static int |
1434nge_encap(struct nge_softc *sc, struct mbuf *m_head, uint32_t *txidx) | 2013nge_encap(struct nge_softc *sc, struct mbuf **m_head) |
1435{ | 2014{ |
1436 struct nge_desc *f = NULL; | 2015 struct nge_txdesc *txd, *txd_last; 2016 struct nge_desc *desc; |
1437 struct mbuf *m; | 2017 struct mbuf *m; |
1438 int frag, cur, cnt = 0; | 2018 bus_dmamap_t map; 2019 bus_dma_segment_t txsegs[NGE_MAXTXSEGS]; 2020 int error, i, nsegs, prod, si; |
1439 | 2021 |
1440 /* 1441 * Start packing the mbufs in this chain into 1442 * the fragment pointers. Stop when we run out 1443 * of fragments or hit the end of the mbuf chain. 1444 */ 1445 m = m_head; 1446 cur = frag = *txidx; | 2022 NGE_LOCK_ASSERT(sc); |
1447 | 2023 |
1448 for (m = m_head; m != NULL; m = m->m_next) { 1449 if (m->m_len != 0) { 1450 if ((NGE_TX_LIST_CNT - 1451 (sc->nge_cdata.nge_tx_cnt + cnt)) < 2) 1452 return (ENOBUFS); 1453 f = &sc->nge_ldata->nge_tx_list[frag]; 1454 f->nge_ctl = NGE_CMDSTS_MORE | m->m_len; 1455 f->nge_ptr = vtophys(mtod(m, vm_offset_t)); 1456 if (cnt != 0) 1457 f->nge_ctl |= NGE_CMDSTS_OWN; 1458 cur = frag; 1459 NGE_INC(frag, NGE_TX_LIST_CNT); 1460 cnt++; | 2024 m = *m_head; 2025 prod = sc->nge_cdata.nge_tx_prod; 2026 txd = &sc->nge_cdata.nge_txdesc[prod]; 2027 txd_last = txd; 2028 map = txd->tx_dmamap; 2029 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, map, 2030 *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2031 if (error == EFBIG) { 2032 m = m_collapse(*m_head, M_DONTWAIT, NGE_MAXTXSEGS); 2033 if (m == NULL) { 2034 m_freem(*m_head); 2035 *m_head = NULL; 2036 return (ENOBUFS); |
1461 } | 2037 } |
2038 *m_head = m; 2039 error = bus_dmamap_load_mbuf_sg(sc->nge_cdata.nge_tx_tag, 2040 map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); 2041 if (error != 0) { 2042 m_freem(*m_head); 2043 *m_head = NULL; 2044 return (error); 2045 } 2046 } else if (error != 0) 2047 return (error); 2048 if (nsegs == 0) { 2049 m_freem(*m_head); 2050 *m_head = NULL; 2051 return (EIO); |
|
1462 } 1463 | 2052 } 2053 |
1464 if (m != NULL) | 2054 /* Check number of available descriptors. */ 2055 if (sc->nge_cdata.nge_tx_cnt + nsegs >= (NGE_TX_RING_CNT - 1)) { 2056 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, map); |
1465 return (ENOBUFS); | 2057 return (ENOBUFS); |
2058 } |
|
1466 | 2059 |
1467 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0; 1468 if (m_head->m_pkthdr.csum_flags) { 1469 if (m_head->m_pkthdr.csum_flags & CSUM_IP) 1470 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= 1471 NGE_TXEXTSTS_IPCSUM; 1472 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) 1473 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= 1474 NGE_TXEXTSTS_TCPCSUM; 1475 if (m_head->m_pkthdr.csum_flags & CSUM_UDP) 1476 sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= 1477 NGE_TXEXTSTS_UDPCSUM; | 2060 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, map, BUS_DMASYNC_PREWRITE); 2061 2062 si = prod; 2063 for (i = 0; i < nsegs; i++) { 2064 desc = &sc->nge_rdata.nge_tx_ring[prod]; 2065 desc->nge_ptr = htole32(NGE_ADDR_LO(txsegs[i].ds_addr)); 2066 if (i == 0) 2067 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 2068 NGE_CMDSTS_MORE); 2069 else 2070 desc->nge_cmdsts = htole32(txsegs[i].ds_len | 2071 NGE_CMDSTS_MORE | NGE_CMDSTS_OWN); 2072 desc->nge_extsts = 0; 2073 sc->nge_cdata.nge_tx_cnt++; 2074 NGE_INC(prod, NGE_TX_RING_CNT); |
1478 } | 2075 } |
2076 /* Update producer index. */ 2077 sc->nge_cdata.nge_tx_prod = prod; |
|
1479 | 2078 |
1480 if (m_head->m_flags & M_VLANTAG) { 1481 sc->nge_ldata->nge_tx_list[cur].nge_extsts |= 1482 (NGE_TXEXTSTS_VLANPKT|htons(m_head->m_pkthdr.ether_vtag)); | 2079 prod = (prod + NGE_TX_RING_CNT - 1) % NGE_TX_RING_CNT; 2080 desc = &sc->nge_rdata.nge_tx_ring[prod]; 2081 /* Check if we have a VLAN tag to insert. */ 2082 if ((m->m_flags & M_VLANTAG) != 0) 2083 desc->nge_extsts |= htole32(NGE_TXEXTSTS_VLANPKT | 2084 bswap16(m->m_pkthdr.ether_vtag)); 2085 /* Set EOP on the last desciptor. */ 2086 desc->nge_cmdsts &= htole32(~NGE_CMDSTS_MORE); 2087 2088 /* Set checksum offload in the first descriptor. */ 2089 desc = &sc->nge_rdata.nge_tx_ring[si]; 2090 if ((m->m_pkthdr.csum_flags & NGE_CSUM_FEATURES) != 0) { 2091 if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) 2092 desc->nge_extsts |= htole32(NGE_TXEXTSTS_IPCSUM); 2093 if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) 2094 desc->nge_extsts |= htole32(NGE_TXEXTSTS_TCPCSUM); 2095 if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) 2096 desc->nge_extsts |= htole32(NGE_TXEXTSTS_UDPCSUM); |
1483 } | 2097 } |
2098 /* Lastly, turn the first descriptor ownership to hardware. */ 2099 desc->nge_cmdsts |= htole32(NGE_CMDSTS_OWN); |
|
1484 | 2100 |
1485 sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head; 1486 sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE; 1487 sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN; 1488 sc->nge_cdata.nge_tx_cnt += cnt; 1489 *txidx = frag; | 2101 txd = &sc->nge_cdata.nge_txdesc[prod]; 2102 map = txd_last->tx_dmamap; 2103 txd_last->tx_dmamap = txd->tx_dmamap; 2104 txd->tx_dmamap = map; 2105 txd->tx_m = m; |
1490 1491 return (0); 1492} 1493 1494/* 1495 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1496 * to the mbuf data regions directly in the transmit lists. We also save a 1497 * copy of the pointers since the transmit list fragment pointers are --- 10 unchanged lines hidden (view full) --- 1508 nge_start_locked(ifp); 1509 NGE_UNLOCK(sc); 1510} 1511 1512static void 1513nge_start_locked(struct ifnet *ifp) 1514{ 1515 struct nge_softc *sc; | 2106 2107 return (0); 2108} 2109 2110/* 2111 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2112 * to the mbuf data regions directly in the transmit lists. We also save a 2113 * copy of the pointers since the transmit list fragment pointers are --- 10 unchanged lines hidden (view full) --- 2124 nge_start_locked(ifp); 2125 NGE_UNLOCK(sc); 2126} 2127 2128static void 2129nge_start_locked(struct ifnet *ifp) 2130{ 2131 struct nge_softc *sc; |
1516 struct mbuf *m_head = NULL; 1517 uint32_t idx; | 2132 struct mbuf *m_head; 2133 int enq; |
1518 1519 sc = ifp->if_softc; 1520 | 2134 2135 sc = ifp->if_softc; 2136 |
1521 if (!sc->nge_link) 1522 return; | 2137 NGE_LOCK_ASSERT(sc); |
1523 | 2138 |
1524 idx = sc->nge_cdata.nge_tx_prod; 1525 1526 if (ifp->if_drv_flags & IFF_DRV_OACTIVE) | 2139 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 2140 IFF_DRV_RUNNING || (sc->nge_flags & NGE_FLAG_LINK) == 0) |
1527 return; 1528 | 2141 return; 2142 |
1529 while (sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) { 1530 IF_DEQUEUE(&ifp->if_snd, m_head); | 2143 for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && 2144 sc->nge_cdata.nge_tx_cnt < NGE_TX_RING_CNT - 2; ) { 2145 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); |
1531 if (m_head == NULL) 1532 break; | 2146 if (m_head == NULL) 2147 break; |
1533 1534 if (nge_encap(sc, m_head, &idx)) { 1535 IF_PREPEND(&ifp->if_snd, m_head); | 2148 /* 2149 * Pack the data into the transmit ring. If we 2150 * don't have room, set the OACTIVE flag and wait 2151 * for the NIC to drain the ring. 2152 */ 2153 if (nge_encap(sc, &m_head)) { 2154 if (m_head == NULL) 2155 break; 2156 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); |
1536 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1537 break; 1538 } 1539 | 2157 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 2158 break; 2159 } 2160 |
2161 enq++; |
|
1540 /* 1541 * If there's a BPF listener, bounce a copy of this frame 1542 * to him. 1543 */ 1544 ETHER_BPF_MTAP(ifp, m_head); | 2162 /* 2163 * If there's a BPF listener, bounce a copy of this frame 2164 * to him. 2165 */ 2166 ETHER_BPF_MTAP(ifp, m_head); |
1545 | |
1546 } 1547 | 2167 } 2168 |
1548 /* Transmit */ 1549 sc->nge_cdata.nge_tx_prod = idx; 1550 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); | 2169 if (enq > 0) { 2170 bus_dmamap_sync(sc->nge_cdata.nge_tx_ring_tag, 2171 sc->nge_cdata.nge_tx_ring_map, 2172 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2173 /* Transmit */ 2174 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); |
1551 | 2175 |
1552 /* 1553 * Set a timeout in case the chip goes out to lunch. 1554 */ 1555 ifp->if_timer = 5; | 2176 /* Set a timeout in case the chip goes out to lunch. */ 2177 sc->nge_watchdog_timer = 5; 2178 } |
1556} 1557 1558static void 1559nge_init(void *xsc) 1560{ 1561 struct nge_softc *sc = xsc; 1562 1563 NGE_LOCK(sc); 1564 nge_init_locked(sc); 1565 NGE_UNLOCK(sc); 1566} 1567 1568static void 1569nge_init_locked(struct nge_softc *sc) 1570{ 1571 struct ifnet *ifp = sc->nge_ifp; 1572 struct mii_data *mii; | 2179} 2180 2181static void 2182nge_init(void *xsc) 2183{ 2184 struct nge_softc *sc = xsc; 2185 2186 NGE_LOCK(sc); 2187 nge_init_locked(sc); 2188 NGE_UNLOCK(sc); 2189} 2190 2191static void 2192nge_init_locked(struct nge_softc *sc) 2193{ 2194 struct ifnet *ifp = sc->nge_ifp; 2195 struct mii_data *mii; |
2196 uint8_t *eaddr; 2197 uint32_t reg; |
|
1573 1574 NGE_LOCK_ASSERT(sc); 1575 | 2198 2199 NGE_LOCK_ASSERT(sc); 2200 |
1576 if (ifp->if_drv_flags & IFF_DRV_RUNNING) | 2201 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) |
1577 return; 1578 1579 /* 1580 * Cancel pending I/O and free all RX/TX buffers. 1581 */ 1582 nge_stop(sc); 1583 | 2202 return; 2203 2204 /* 2205 * Cancel pending I/O and free all RX/TX buffers. 2206 */ 2207 nge_stop(sc); 2208 |
1584 if (sc->nge_tbi) { 1585 mii = NULL; 1586 } else { 1587 mii = device_get_softc(sc->nge_miibus); 1588 } | 2209 /* Reset the adapter. */ 2210 nge_reset(sc); |
1589 | 2211 |
1590 /* Set MAC address */ | 2212 /* Disable Rx filter prior to programming Rx filter. */ 2213 CSR_WRITE_4(sc, NGE_RXFILT_CTL, 0); 2214 CSR_BARRIER_WRITE_4(sc, NGE_RXFILT_CTL); 2215 2216 mii = device_get_softc(sc->nge_miibus); 2217 2218 /* Set MAC address. */ 2219 eaddr = IF_LLADDR(sc->nge_ifp); |
1591 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); | 2220 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); |
1592 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1593 ((uint16_t *)IF_LLADDR(sc->nge_ifp))[0]); | 2221 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[1] << 8) | eaddr[0]); |
1594 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); | 2222 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); |
1595 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1596 ((uint16_t *)IF_LLADDR(sc->nge_ifp))[1]); | 2223 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[3] << 8) | eaddr[2]); |
1597 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); | 2224 CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); |
1598 CSR_WRITE_4(sc, NGE_RXFILT_DATA, 1599 ((uint16_t *)IF_LLADDR(sc->nge_ifp))[2]); | 2225 CSR_WRITE_4(sc, NGE_RXFILT_DATA, (eaddr[5] << 8) | eaddr[4]); |
1600 1601 /* Init circular RX list. */ 1602 if (nge_list_rx_init(sc) == ENOBUFS) { 1603 device_printf(sc->nge_dev, "initialization failed: no " 1604 "memory for rx buffers\n"); 1605 nge_stop(sc); 1606 return; 1607 } --- 7 unchanged lines hidden (view full) --- 1615 * For the NatSemi chip, we have to explicitly enable the 1616 * reception of ARP frames, as well as turn on the 'perfect 1617 * match' filter where we store the station address, otherwise 1618 * we won't receive unicasts meant for this host. 1619 */ 1620 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); 1621 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); 1622 | 2226 2227 /* Init circular RX list. */ 2228 if (nge_list_rx_init(sc) == ENOBUFS) { 2229 device_printf(sc->nge_dev, "initialization failed: no " 2230 "memory for rx buffers\n"); 2231 nge_stop(sc); 2232 return; 2233 } --- 7 unchanged lines hidden (view full) --- 2241 * For the NatSemi chip, we have to explicitly enable the 2242 * reception of ARP frames, as well as turn on the 'perfect 2243 * match' filter where we store the station address, otherwise 2244 * we won't receive unicasts meant for this host. 2245 */ 2246 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); 2247 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); 2248 |
1623 /* If we want promiscuous mode, set the allframes bit. */ 1624 if (ifp->if_flags & IFF_PROMISC) { 1625 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1626 } else { 1627 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); 1628 } 1629 | |
1630 /* 1631 * Set the capture broadcast bit to capture broadcast frames. 1632 */ 1633 if (ifp->if_flags & IFF_BROADCAST) { 1634 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1635 } else { 1636 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 1637 } 1638 | 2249 /* 2250 * Set the capture broadcast bit to capture broadcast frames. 2251 */ 2252 if (ifp->if_flags & IFF_BROADCAST) { 2253 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 2254 } else { 2255 NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); 2256 } 2257 |
2258 /* Turn the receive filter on. */ 2259 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); 2260 2261 /* Set Rx filter. */ 2262 nge_rxfilter(sc); 2263 2264 /* Disable PRIQ ctl. */ 2265 CSR_WRITE_4(sc, NGE_PRIOQCTL, 0); 2266 |
|
1639 /* | 2267 /* |
1640 * Load the multicast filter. | 2268 * Set pause frames paramters. 2269 * Rx stat FIFO hi-threshold : 2 or more packets 2270 * Rx stat FIFO lo-threshold : less than 2 packets 2271 * Rx data FIFO hi-threshold : 2K or more bytes 2272 * Rx data FIFO lo-threshold : less than 2K bytes 2273 * pause time : (512ns * 0xffff) -> 33.55ms |
1641 */ | 2274 */ |
1642 nge_setmulti(sc); | 2275 CSR_WRITE_4(sc, NGE_PAUSECSR, 2276 NGE_PAUSECSR_PAUSE_ON_MCAST | 2277 NGE_PAUSECSR_PAUSE_ON_DA | 2278 ((1 << 24) & NGE_PAUSECSR_RX_STATFIFO_THR_HI) | 2279 ((1 << 22) & NGE_PAUSECSR_RX_STATFIFO_THR_LO) | 2280 ((1 << 20) & NGE_PAUSECSR_RX_DATAFIFO_THR_HI) | 2281 ((1 << 18) & NGE_PAUSECSR_RX_DATAFIFO_THR_LO) | 2282 NGE_PAUSECSR_CNT); |
1643 | 2283 |
1644 /* Turn the receive filter on */ 1645 NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); 1646 | |
1647 /* 1648 * Load the address of the RX and TX lists. 1649 */ | 2284 /* 2285 * Load the address of the RX and TX lists. 2286 */ |
1650 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 1651 vtophys(&sc->nge_ldata->nge_rx_list[0])); 1652 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 1653 vtophys(&sc->nge_ldata->nge_tx_list[0])); | 2287 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 2288 NGE_ADDR_HI(sc->nge_rdata.nge_rx_ring_paddr)); 2289 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 2290 NGE_ADDR_LO(sc->nge_rdata.nge_rx_ring_paddr)); 2291 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 2292 NGE_ADDR_HI(sc->nge_rdata.nge_tx_ring_paddr)); 2293 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 2294 NGE_ADDR_LO(sc->nge_rdata.nge_tx_ring_paddr)); |
1654 | 2295 |
1655 /* Set RX configuration */ | 2296 /* Set RX configuration. */ |
1656 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); | 2297 CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); |
2298 2299 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, 0); |
|
1657 /* 1658 * Enable hardware checksum validation for all IPv4 1659 * packets, do not reject packets with bad checksums. 1660 */ | 2300 /* 2301 * Enable hardware checksum validation for all IPv4 2302 * packets, do not reject packets with bad checksums. 2303 */ |
1661 CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); | 2304 if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) 2305 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); |
1662 1663 /* 1664 * Tell the chip to detect and strip VLAN tag info from 1665 * received frames. The tag will be provided in the extsts 1666 * field in the RX descriptors. 1667 */ | 2306 2307 /* 2308 * Tell the chip to detect and strip VLAN tag info from 2309 * received frames. The tag will be provided in the extsts 2310 * field in the RX descriptors. 2311 */ |
1668 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, 1669 NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB); | 2312 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB); 2313 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) 2314 NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_STRIP_ENB); |
1670 | 2315 |
1671 /* Set TX configuration */ | 2316 /* Set TX configuration. */ |
1672 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 1673 1674 /* 1675 * Enable TX IPv4 checksumming on a per-packet basis. 1676 */ 1677 CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); 1678 1679 /* 1680 * Tell the chip to insert VLAN tags on a per-packet basis as 1681 * dictated by the code in the frame encapsulation routine. 1682 */ 1683 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 1684 | 2317 CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); 2318 2319 /* 2320 * Enable TX IPv4 checksumming on a per-packet basis. 2321 */ 2322 CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); 2323 2324 /* 2325 * Tell the chip to insert VLAN tags on a per-packet basis as 2326 * dictated by the code in the frame encapsulation routine. 2327 */ 2328 NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); 2329 |
1685 /* Set full/half duplex mode. */ 1686 if (sc->nge_tbi) { 1687 if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) 1688 == IFM_FDX) { 1689 NGE_SETBIT(sc, NGE_TX_CFG, 1690 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1691 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1692 } else { 1693 NGE_CLRBIT(sc, NGE_TX_CFG, 1694 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1695 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1696 } 1697 } else { 1698 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 1699 NGE_SETBIT(sc, NGE_TX_CFG, 1700 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1701 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1702 } else { 1703 NGE_CLRBIT(sc, NGE_TX_CFG, 1704 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1705 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1706 } 1707 } 1708 1709 nge_tick(sc); 1710 | |
1711 /* 1712 * Enable the delivery of PHY interrupts based on 1713 * link/speed/duplex status changes. Also enable the 1714 * extsts field in the DMA descriptors (needed for 1715 * TCP/IP checksum offload on transmit). 1716 */ | 2330 /* 2331 * Enable the delivery of PHY interrupts based on 2332 * link/speed/duplex status changes. Also enable the 2333 * extsts field in the DMA descriptors (needed for 2334 * TCP/IP checksum offload on transmit). 2335 */ |
1717 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD| 1718 NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB); | 2336 NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD | 2337 NGE_CFG_PHYINTR_LNK | NGE_CFG_PHYINTR_DUP | NGE_CFG_EXTSTS_ENB); |
1719 1720 /* 1721 * Configure interrupt holdoff (moderation). We can 1722 * have the chip delay interrupt delivery for a certain 1723 * period. Units are in 100us, and the max setting 1724 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 1725 */ | 2338 2339 /* 2340 * Configure interrupt holdoff (moderation). We can 2341 * have the chip delay interrupt delivery for a certain 2342 * period. Units are in 100us, and the max setting 2343 * is 25500us (0xFF x 100us). Default is a 100us holdoff. 2344 */ |
1726 CSR_WRITE_4(sc, NGE_IHR, 0x01); | 2345 CSR_WRITE_4(sc, NGE_IHR, sc->nge_int_holdoff); |
1727 1728 /* | 2346 2347 /* |
2348 * Enable MAC statistics counters and clear. 2349 */ 2350 reg = CSR_READ_4(sc, NGE_MIBCTL); 2351 reg &= ~NGE_MIBCTL_FREEZE_CNT; 2352 reg |= NGE_MIBCTL_CLEAR_CNT; 2353 CSR_WRITE_4(sc, NGE_MIBCTL, reg); 2354 2355 /* |
|
1729 * Enable interrupts. 1730 */ 1731 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 1732#ifdef DEVICE_POLLING 1733 /* 1734 * ... only enable interrupts if we are not polling, make sure 1735 * they are off otherwise. 1736 */ | 2356 * Enable interrupts. 2357 */ 2358 CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); 2359#ifdef DEVICE_POLLING 2360 /* 2361 * ... only enable interrupts if we are not polling, make sure 2362 * they are off otherwise. 2363 */ |
1737 if (ifp->if_capenable & IFCAP_POLLING) | 2364 if ((ifp->if_capenable & IFCAP_POLLING) != 0) |
1738 CSR_WRITE_4(sc, NGE_IER, 0); 1739 else 1740#endif 1741 CSR_WRITE_4(sc, NGE_IER, 1); 1742 | 2365 CSR_WRITE_4(sc, NGE_IER, 0); 2366 else 2367#endif 2368 CSR_WRITE_4(sc, NGE_IER, 1); 2369 |
1743 /* Enable receiver and transmitter. */ 1744 NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 1745 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); | 2370 sc->nge_flags &= ~NGE_FLAG_LINK; 2371 mii_mediachg(mii); |
1746 | 2372 |
1747 nge_ifmedia_upd_locked(ifp); | 2373 sc->nge_watchdog_timer = 0; 2374 callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); |
1748 1749 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1750 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1751} 1752 1753/* 1754 * Set media options. 1755 */ 1756static int | 2375 2376 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2377 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2378} 2379 2380/* 2381 * Set media options. 2382 */ 2383static int |
1757nge_ifmedia_upd(struct ifnet *ifp) | 2384nge_mediachange(struct ifnet *ifp) |
1758{ 1759 struct nge_softc *sc; | 2385{ 2386 struct nge_softc *sc; |
2387 struct mii_data *mii; 2388 struct mii_softc *miisc; 2389 int error; |
|
1760 1761 sc = ifp->if_softc; 1762 NGE_LOCK(sc); | 2390 2391 sc = ifp->if_softc; 2392 NGE_LOCK(sc); |
1763 nge_ifmedia_upd_locked(ifp); | 2393 mii = device_get_softc(sc->nge_miibus); 2394 if (mii->mii_instance) { 2395 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 2396 mii_phy_reset(miisc); 2397 } 2398 error = mii_mediachg(mii); |
1764 NGE_UNLOCK(sc); | 2399 NGE_UNLOCK(sc); |
1765 return (0); 1766} | |
1767 | 2400 |
1768static void 1769nge_ifmedia_upd_locked(struct ifnet *ifp) 1770{ 1771 struct nge_softc *sc; 1772 struct mii_data *mii; 1773 1774 sc = ifp->if_softc; 1775 NGE_LOCK_ASSERT(sc); 1776 1777 if (sc->nge_tbi) { 1778 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) 1779 == IFM_AUTO) { 1780 CSR_WRITE_4(sc, NGE_TBI_ANAR, 1781 CSR_READ_4(sc, NGE_TBI_ANAR) 1782 | NGE_TBIANAR_HDX | NGE_TBIANAR_FDX 1783 | NGE_TBIANAR_PS1 | NGE_TBIANAR_PS2); 1784 CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG 1785 | NGE_TBIBMCR_RESTART_ANEG); 1786 CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG); 1787 } else if ((sc->nge_ifmedia.ifm_cur->ifm_media 1788 & IFM_GMASK) == IFM_FDX) { 1789 NGE_SETBIT(sc, NGE_TX_CFG, 1790 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1791 NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1792 1793 CSR_WRITE_4(sc, NGE_TBI_ANAR, 0); 1794 CSR_WRITE_4(sc, NGE_TBI_BMCR, 0); 1795 } else { 1796 NGE_CLRBIT(sc, NGE_TX_CFG, 1797 (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); 1798 NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); 1799 1800 CSR_WRITE_4(sc, NGE_TBI_ANAR, 0); 1801 CSR_WRITE_4(sc, NGE_TBI_BMCR, 0); 1802 } 1803 1804 CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) 1805 & ~NGE_GPIO_GP3_OUT); 1806 } else { 1807 mii = device_get_softc(sc->nge_miibus); 1808 sc->nge_link = 0; 1809 if (mii->mii_instance) { 1810 struct mii_softc *miisc; 1811 1812 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1813 mii_phy_reset(miisc); 1814 } 1815 mii_mediachg(mii); 1816 } | 2401 return (error); |
1817} 1818 1819/* 1820 * Report current media status. 1821 */ 1822static void | 2402} 2403 2404/* 2405 * Report current media status. 2406 */ 2407static void |
1823nge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) | 2408nge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) |
1824{ 1825 struct nge_softc *sc; 1826 struct mii_data *mii; 1827 1828 sc = ifp->if_softc; | 2409{ 2410 struct nge_softc *sc; 2411 struct mii_data *mii; 2412 2413 sc = ifp->if_softc; |
1829 | |
1830 NGE_LOCK(sc); | 2414 NGE_LOCK(sc); |
1831 if (sc->nge_tbi) { 1832 ifmr->ifm_status = IFM_AVALID; 1833 ifmr->ifm_active = IFM_ETHER; 1834 1835 if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) { 1836 ifmr->ifm_status |= IFM_ACTIVE; 1837 } 1838 if (CSR_READ_4(sc, NGE_TBI_BMCR) & NGE_TBIBMCR_LOOPBACK) 1839 ifmr->ifm_active |= IFM_LOOP; 1840 if (!CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) { 1841 ifmr->ifm_active |= IFM_NONE; 1842 ifmr->ifm_status = 0; 1843 NGE_UNLOCK(sc); 1844 return; 1845 } 1846 ifmr->ifm_active |= IFM_1000_SX; 1847 if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) 1848 == IFM_AUTO) { 1849 ifmr->ifm_active |= IFM_AUTO; 1850 if (CSR_READ_4(sc, NGE_TBI_ANLPAR) 1851 & NGE_TBIANAR_FDX) { 1852 ifmr->ifm_active |= IFM_FDX; 1853 }else if (CSR_READ_4(sc, NGE_TBI_ANLPAR) 1854 & NGE_TBIANAR_HDX) { 1855 ifmr->ifm_active |= IFM_HDX; 1856 } 1857 } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) 1858 == IFM_FDX) 1859 ifmr->ifm_active |= IFM_FDX; 1860 else 1861 ifmr->ifm_active |= IFM_HDX; 1862 1863 } else { 1864 mii = device_get_softc(sc->nge_miibus); 1865 mii_pollstat(mii); 1866 ifmr->ifm_active = mii->mii_media_active; 1867 ifmr->ifm_status = mii->mii_media_status; 1868 } | 2415 mii = device_get_softc(sc->nge_miibus); 2416 mii_pollstat(mii); |
1869 NGE_UNLOCK(sc); | 2417 NGE_UNLOCK(sc); |
2418 ifmr->ifm_active = mii->mii_media_active; 2419 ifmr->ifm_status = mii->mii_media_status; |
|
1870} 1871 1872static int 1873nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 1874{ 1875 struct nge_softc *sc = ifp->if_softc; 1876 struct ifreq *ifr = (struct ifreq *) data; 1877 struct mii_data *mii; | 2420} 2421 2422static int 2423nge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2424{ 2425 struct nge_softc *sc = ifp->if_softc; 2426 struct ifreq *ifr = (struct ifreq *) data; 2427 struct mii_data *mii; |
1878 int error = 0; | 2428 int error = 0, mask; |
1879 1880 switch (command) { 1881 case SIOCSIFMTU: | 2429 2430 switch (command) { 2431 case SIOCSIFMTU: |
1882 if (ifr->ifr_mtu > NGE_JUMBO_MTU) | 2432 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NGE_JUMBO_MTU) |
1883 error = EINVAL; 1884 else { 1885 NGE_LOCK(sc); 1886 ifp->if_mtu = ifr->ifr_mtu; 1887 /* 1888 * Workaround: if the MTU is larger than 1889 * 8152 (TX FIFO size minus 64 minus 18), turn off 1890 * TX checksum offloading. 1891 */ 1892 if (ifr->ifr_mtu >= 8152) { 1893 ifp->if_capenable &= ~IFCAP_TXCSUM; | 2433 error = EINVAL; 2434 else { 2435 NGE_LOCK(sc); 2436 ifp->if_mtu = ifr->ifr_mtu; 2437 /* 2438 * Workaround: if the MTU is larger than 2439 * 8152 (TX FIFO size minus 64 minus 18), turn off 2440 * TX checksum offloading. 2441 */ 2442 if (ifr->ifr_mtu >= 8152) { 2443 ifp->if_capenable &= ~IFCAP_TXCSUM; |
1894 ifp->if_hwassist = 0; | 2444 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; |
1895 } else { 1896 ifp->if_capenable |= IFCAP_TXCSUM; | 2445 } else { 2446 ifp->if_capenable |= IFCAP_TXCSUM; |
1897 ifp->if_hwassist = NGE_CSUM_FEATURES; | 2447 ifp->if_hwassist |= NGE_CSUM_FEATURES; |
1898 } 1899 NGE_UNLOCK(sc); | 2448 } 2449 NGE_UNLOCK(sc); |
2450 VLAN_CAPABILITIES(ifp); |
|
1900 } 1901 break; 1902 case SIOCSIFFLAGS: 1903 NGE_LOCK(sc); | 2451 } 2452 break; 2453 case SIOCSIFFLAGS: 2454 NGE_LOCK(sc); |
1904 if (ifp->if_flags & IFF_UP) { 1905 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1906 ifp->if_flags & IFF_PROMISC && 1907 !(sc->nge_if_flags & IFF_PROMISC)) { 1908 NGE_SETBIT(sc, NGE_RXFILT_CTL, 1909 NGE_RXFILTCTL_ALLPHYS| 1910 NGE_RXFILTCTL_ALLMULTI); 1911 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 1912 !(ifp->if_flags & IFF_PROMISC) && 1913 sc->nge_if_flags & IFF_PROMISC) { 1914 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1915 NGE_RXFILTCTL_ALLPHYS); 1916 if (!(ifp->if_flags & IFF_ALLMULTI)) 1917 NGE_CLRBIT(sc, NGE_RXFILT_CTL, 1918 NGE_RXFILTCTL_ALLMULTI); | 2455 if ((ifp->if_flags & IFF_UP) != 0) { 2456 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2457 if ((ifp->if_flags ^ sc->nge_if_flags) & 2458 (IFF_PROMISC | IFF_ALLMULTI)) 2459 nge_rxfilter(sc); |
1919 } else { | 2460 } else { |
1920 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1921 nge_init_locked(sc); | 2461 if ((sc->nge_flags & NGE_FLAG_DETACH) == 0) 2462 nge_init_locked(sc); |
1922 } 1923 } else { | 2463 } 2464 } else { |
1924 if (ifp->if_drv_flags & IFF_DRV_RUNNING) | 2465 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) |
1925 nge_stop(sc); 1926 } 1927 sc->nge_if_flags = ifp->if_flags; 1928 NGE_UNLOCK(sc); 1929 error = 0; 1930 break; 1931 case SIOCADDMULTI: 1932 case SIOCDELMULTI: 1933 NGE_LOCK(sc); | 2466 nge_stop(sc); 2467 } 2468 sc->nge_if_flags = ifp->if_flags; 2469 NGE_UNLOCK(sc); 2470 error = 0; 2471 break; 2472 case SIOCADDMULTI: 2473 case SIOCDELMULTI: 2474 NGE_LOCK(sc); |
1934 nge_setmulti(sc); | 2475 nge_rxfilter(sc); |
1935 NGE_UNLOCK(sc); 1936 error = 0; 1937 break; 1938 case SIOCGIFMEDIA: 1939 case SIOCSIFMEDIA: | 2476 NGE_UNLOCK(sc); 2477 error = 0; 2478 break; 2479 case SIOCGIFMEDIA: 2480 case SIOCSIFMEDIA: |
1940 if (sc->nge_tbi) { 1941 error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia, 1942 command); 1943 } else { 1944 mii = device_get_softc(sc->nge_miibus); 1945 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, 1946 command); 1947 } | 2481 mii = device_get_softc(sc->nge_miibus); 2482 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); |
1948 break; 1949 case SIOCSIFCAP: | 2483 break; 2484 case SIOCSIFCAP: |
2485 NGE_LOCK(sc); 2486 mask = ifr->ifr_reqcap ^ ifp->if_capenable; |
|
1950#ifdef DEVICE_POLLING | 2487#ifdef DEVICE_POLLING |
1951 if (ifr->ifr_reqcap & IFCAP_POLLING && 1952 !(ifp->if_capenable & IFCAP_POLLING)) { 1953 error = ether_poll_register(nge_poll, ifp); 1954 if (error) 1955 return (error); 1956 NGE_LOCK(sc); 1957 /* Disable interrupts */ 1958 CSR_WRITE_4(sc, NGE_IER, 0); 1959 ifp->if_capenable |= IFCAP_POLLING; 1960 NGE_UNLOCK(sc); 1961 return (error); | 2488 if ((mask & IFCAP_POLLING) != 0 && 2489 (IFCAP_POLLING & ifp->if_capabilities) != 0) { 2490 ifp->if_capenable ^= IFCAP_POLLING; 2491 if ((IFCAP_POLLING & ifp->if_capenable) != 0) { 2492 error = ether_poll_register(nge_poll, ifp); 2493 if (error != 0) { 2494 NGE_UNLOCK(sc); 2495 break; 2496 } 2497 /* Disable interrupts. */ 2498 CSR_WRITE_4(sc, NGE_IER, 0); 2499 } else { 2500 error = ether_poll_deregister(ifp); 2501 /* Enable interrupts. */ 2502 CSR_WRITE_4(sc, NGE_IER, 1); 2503 } 2504 } 2505#endif /* DEVICE_POLLING */ 2506 if ((mask & IFCAP_TXCSUM) != 0 && 2507 (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { 2508 ifp->if_capenable ^= IFCAP_TXCSUM; 2509 if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) 2510 ifp->if_hwassist |= NGE_CSUM_FEATURES; 2511 else 2512 ifp->if_hwassist &= ~NGE_CSUM_FEATURES; 2513 } 2514 if ((mask & IFCAP_RXCSUM) != 0 && 2515 (IFCAP_RXCSUM & ifp->if_capabilities) != 0) 2516 ifp->if_capenable ^= IFCAP_RXCSUM; |
1962 | 2517 |
2518 if ((mask & IFCAP_WOL) != 0 && 2519 (ifp->if_capabilities & IFCAP_WOL) != 0) { 2520 if ((mask & IFCAP_WOL_UCAST) != 0) 2521 ifp->if_capenable ^= IFCAP_WOL_UCAST; 2522 if ((mask & IFCAP_WOL_MCAST) != 0) 2523 ifp->if_capenable ^= IFCAP_WOL_MCAST; 2524 if ((mask & IFCAP_WOL_MAGIC) != 0) 2525 ifp->if_capenable ^= IFCAP_WOL_MAGIC; |
|
1963 } | 2526 } |
1964 if (!(ifr->ifr_reqcap & IFCAP_POLLING) && 1965 ifp->if_capenable & IFCAP_POLLING) { 1966 error = ether_poll_deregister(ifp); 1967 /* Enable interrupts. */ 1968 NGE_LOCK(sc); 1969 CSR_WRITE_4(sc, NGE_IER, 1); 1970 ifp->if_capenable &= ~IFCAP_POLLING; 1971 NGE_UNLOCK(sc); 1972 return (error); | 2527 2528 if ((mask & IFCAP_VLAN_HWCSUM) != 0 && 2529 (ifp->if_capabilities & IFCAP_VLAN_HWCSUM) != 0) 2530 ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; 2531 if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && 2532 (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { 2533 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2534 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { 2535 if ((ifp->if_capenable & 2536 IFCAP_VLAN_HWTAGGING) != 0) 2537 NGE_SETBIT(sc, 2538 NGE_VLAN_IP_RXCTL, 2539 NGE_VIPRXCTL_TAG_STRIP_ENB); 2540 else 2541 NGE_CLRBIT(sc, 2542 NGE_VLAN_IP_RXCTL, 2543 NGE_VIPRXCTL_TAG_STRIP_ENB); 2544 } |
1973 } | 2545 } |
1974#endif /* DEVICE_POLLING */ | 2546 /* 2547 * Both VLAN hardware tagging and checksum offload is 2548 * required to do checksum offload on VLAN interface. 2549 */ 2550 if ((ifp->if_capenable & IFCAP_TXCSUM) == 0) 2551 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2552 if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) 2553 ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; 2554 NGE_UNLOCK(sc); 2555 VLAN_CAPABILITIES(ifp); |
1975 break; 1976 default: 1977 error = ether_ioctl(ifp, command, data); 1978 break; 1979 } 1980 1981 return (error); 1982} 1983 1984static void | 2556 break; 2557 default: 2558 error = ether_ioctl(ifp, command, data); 2559 break; 2560 } 2561 2562 return (error); 2563} 2564 2565static void |
1985nge_watchdog(struct ifnet *ifp) | 2566nge_watchdog(struct nge_softc *sc) |
1986{ | 2567{ |
1987 struct nge_softc *sc; | 2568 struct ifnet *ifp; |
1988 | 2569 |
1989 sc = ifp->if_softc; | 2570 NGE_LOCK_ASSERT(sc); |
1990 | 2571 |
2572 if (sc->nge_watchdog_timer == 0 || --sc->nge_watchdog_timer) 2573 return; 2574 2575 ifp = sc->nge_ifp; |
|
1991 ifp->if_oerrors++; 1992 if_printf(ifp, "watchdog timeout\n"); 1993 | 2576 ifp->if_oerrors++; 2577 if_printf(ifp, "watchdog timeout\n"); 2578 |
1994 NGE_LOCK(sc); 1995 nge_stop(sc); 1996 nge_reset(sc); | |
1997 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 1998 nge_init_locked(sc); 1999 | 2579 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2580 nge_init_locked(sc); 2581 |
2000 if (ifp->if_snd.ifq_head != NULL) | 2582 if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) |
2001 nge_start_locked(ifp); | 2583 nge_start_locked(ifp); |
2584} |
|
2002 | 2585 |
2003 NGE_UNLOCK(sc); | 2586static int 2587nge_stop_mac(struct nge_softc *sc) 2588{ 2589 uint32_t reg; 2590 int i; 2591 2592 NGE_LOCK_ASSERT(sc); 2593 2594 reg = CSR_READ_4(sc, NGE_CSR); 2595 if ((reg & (NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE)) != 0) { 2596 reg &= ~(NGE_CSR_TX_ENABLE | NGE_CSR_RX_ENABLE); 2597 reg |= NGE_CSR_TX_DISABLE | NGE_CSR_RX_DISABLE; 2598 CSR_WRITE_4(sc, NGE_CSR, reg); 2599 for (i = 0; i < NGE_TIMEOUT; i++) { 2600 DELAY(1); 2601 if ((CSR_READ_4(sc, NGE_CSR) & 2602 (NGE_CSR_RX_ENABLE | NGE_CSR_TX_ENABLE)) == 0) 2603 break; 2604 } 2605 if (i == NGE_TIMEOUT) 2606 return (ETIMEDOUT); 2607 } 2608 2609 return (0); |
2004} 2005 2006/* 2007 * Stop the adapter and free any mbufs allocated to the 2008 * RX and TX lists. 2009 */ 2010static void 2011nge_stop(struct nge_softc *sc) 2012{ | 2610} 2611 2612/* 2613 * Stop the adapter and free any mbufs allocated to the 2614 * RX and TX lists. 2615 */ 2616static void 2617nge_stop(struct nge_softc *sc) 2618{ |
2619 struct nge_txdesc *txd; 2620 struct nge_rxdesc *rxd; |
|
2013 int i; 2014 struct ifnet *ifp; | 2621 int i; 2622 struct ifnet *ifp; |
2015 struct mii_data *mii; | |
2016 2017 NGE_LOCK_ASSERT(sc); 2018 ifp = sc->nge_ifp; | 2623 2624 NGE_LOCK_ASSERT(sc); 2625 ifp = sc->nge_ifp; |
2019 ifp->if_timer = 0; 2020 if (sc->nge_tbi) { 2021 mii = NULL; 2022 } else { 2023 mii = device_get_softc(sc->nge_miibus); 2024 } | |
2025 | 2626 |
2627 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2628 sc->nge_flags &= ~NGE_FLAG_LINK; |
|
2026 callout_stop(&sc->nge_stat_ch); | 2629 callout_stop(&sc->nge_stat_ch); |
2630 sc->nge_watchdog_timer = 0; 2631 |
|
2027 CSR_WRITE_4(sc, NGE_IER, 0); 2028 CSR_WRITE_4(sc, NGE_IMR, 0); | 2632 CSR_WRITE_4(sc, NGE_IER, 0); 2633 CSR_WRITE_4(sc, NGE_IMR, 0); |
2029 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); 2030 DELAY(1000); 2031 CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0); 2032 CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0); | 2634 if (nge_stop_mac(sc) == ETIMEDOUT) 2635 device_printf(sc->nge_dev, 2636 "%s: unable to stop Tx/Rx MAC\n", __func__); 2637 CSR_WRITE_4(sc, NGE_TX_LISTPTR_HI, 0); 2638 CSR_WRITE_4(sc, NGE_TX_LISTPTR_LO, 0); 2639 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2640 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2641 nge_stats_update(sc); 2642 if (sc->nge_head != NULL) { 2643 m_freem(sc->nge_head); 2644 sc->nge_head = sc->nge_tail = NULL; 2645 } |
2033 | 2646 |
2034 if (!sc->nge_tbi) 2035 mii_down(mii); 2036 2037 sc->nge_link = 0; 2038 | |
2039 /* | 2647 /* |
2040 * Free data in the RX lists. | 2648 * Free RX and TX mbufs still in the queues. |
2041 */ | 2649 */ |
2042 for (i = 0; i < NGE_RX_LIST_CNT; i++) { 2043 if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) { 2044 m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf); 2045 sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL; | 2650 for (i = 0; i < NGE_RX_RING_CNT; i++) { 2651 rxd = &sc->nge_cdata.nge_rxdesc[i]; 2652 if (rxd->rx_m != NULL) { 2653 bus_dmamap_sync(sc->nge_cdata.nge_rx_tag, 2654 rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); 2655 bus_dmamap_unload(sc->nge_cdata.nge_rx_tag, 2656 rxd->rx_dmamap); 2657 m_freem(rxd->rx_m); 2658 rxd->rx_m = NULL; |
2046 } 2047 } | 2659 } 2660 } |
2048 bzero((char *)&sc->nge_ldata->nge_rx_list, 2049 sizeof(sc->nge_ldata->nge_rx_list)); 2050 2051 /* 2052 * Free the TX list buffers. 2053 */ 2054 for (i = 0; i < NGE_TX_LIST_CNT; i++) { 2055 if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) { 2056 m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf); 2057 sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL; | 2661 for (i = 0; i < NGE_TX_RING_CNT; i++) { 2662 txd = &sc->nge_cdata.nge_txdesc[i]; 2663 if (txd->tx_m != NULL) { 2664 bus_dmamap_sync(sc->nge_cdata.nge_tx_tag, 2665 txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); 2666 bus_dmamap_unload(sc->nge_cdata.nge_tx_tag, 2667 txd->tx_dmamap); 2668 m_freem(txd->tx_m); 2669 txd->tx_m = NULL; |
2058 } 2059 } | 2670 } 2671 } |
2672} |
|
2060 | 2673 |
2061 bzero((char *)&sc->nge_ldata->nge_tx_list, 2062 sizeof(sc->nge_ldata->nge_tx_list)); | 2674/* 2675 * Before setting WOL bits, caller should have stopped Receiver. 2676 */ 2677static void 2678nge_wol(struct nge_softc *sc) 2679{ 2680 struct ifnet *ifp; 2681 uint32_t reg; 2682 uint16_t pmstat; 2683 int pmc; |
2063 | 2684 |
2064 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); | 2685 NGE_LOCK_ASSERT(sc); 2686 2687 if (pci_find_extcap(sc->nge_dev, PCIY_PMG, &pmc) != 0) 2688 return; 2689 2690 ifp = sc->nge_ifp; 2691 if ((ifp->if_capenable & IFCAP_WOL) == 0) { 2692 /* Disable WOL & disconnect CLKRUN to save power. */ 2693 CSR_WRITE_4(sc, NGE_WOLCSR, 0); 2694 CSR_WRITE_4(sc, NGE_CLKRUN, 0); 2695 } else { 2696 if (nge_stop_mac(sc) == ETIMEDOUT) 2697 device_printf(sc->nge_dev, 2698 "%s: unable to stop Tx/Rx MAC\n", __func__); 2699 /* 2700 * Make sure wake frames will be buffered in the Rx FIFO. 2701 * (i.e. Silent Rx mode.) 2702 */ 2703 CSR_WRITE_4(sc, NGE_RX_LISTPTR_HI, 0); 2704 CSR_BARRIER_WRITE_4(sc, NGE_RX_LISTPTR_HI); 2705 CSR_WRITE_4(sc, NGE_RX_LISTPTR_LO, 0); 2706 CSR_BARRIER_WRITE_4(sc, NGE_RX_LISTPTR_LO); 2707 /* Enable Rx again. */ 2708 NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); 2709 CSR_BARRIER_WRITE_4(sc, NGE_CSR); 2710 2711 /* Configure WOL events. */ 2712 reg = 0; 2713 if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) 2714 reg |= NGE_WOLCSR_WAKE_ON_UNICAST; 2715 if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) 2716 reg |= NGE_WOLCSR_WAKE_ON_MULTICAST; 2717 if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) 2718 reg |= NGE_WOLCSR_WAKE_ON_MAGICPKT; 2719 CSR_WRITE_4(sc, NGE_WOLCSR, reg); 2720 2721 /* Activate CLKRUN. */ 2722 reg = CSR_READ_4(sc, NGE_CLKRUN); 2723 reg |= NGE_CLKRUN_PMEENB | NGE_CLNRUN_CLKRUN_ENB; 2724 CSR_WRITE_4(sc, NGE_CLKRUN, reg); 2725 } 2726 2727 /* Request PME. */ 2728 pmstat = pci_read_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, 2); 2729 pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); 2730 if ((ifp->if_capenable & IFCAP_WOL) != 0) 2731 pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; 2732 pci_write_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); |
2065} 2066 2067/* 2068 * Stop all chip I/O so that the kernel's probe routines don't 2069 * get confused by errant DMAs when rebooting. 2070 */ 2071static int 2072nge_shutdown(device_t dev) 2073{ | 2733} 2734 2735/* 2736 * Stop all chip I/O so that the kernel's probe routines don't 2737 * get confused by errant DMAs when rebooting. 2738 */ 2739static int 2740nge_shutdown(device_t dev) 2741{ |
2742 2743 return (nge_suspend(dev)); 2744} 2745 2746static int 2747nge_suspend(device_t dev) 2748{ |
|
2074 struct nge_softc *sc; 2075 2076 sc = device_get_softc(dev); 2077 2078 NGE_LOCK(sc); | 2749 struct nge_softc *sc; 2750 2751 sc = device_get_softc(dev); 2752 2753 NGE_LOCK(sc); |
2079 nge_reset(sc); | |
2080 nge_stop(sc); | 2754 nge_stop(sc); |
2755 nge_wol(sc); 2756 sc->nge_flags |= NGE_FLAG_SUSPENDED; |
|
2081 NGE_UNLOCK(sc); 2082 2083 return (0); 2084} | 2757 NGE_UNLOCK(sc); 2758 2759 return (0); 2760} |
2761 2762static int 2763nge_resume(device_t dev) 2764{ 2765 struct nge_softc *sc; 2766 struct ifnet *ifp; 2767 uint16_t pmstat; 2768 int pmc; 2769 2770 sc = device_get_softc(dev); 2771 2772 NGE_LOCK(sc); 2773 ifp = sc->nge_ifp; 2774 if (pci_find_extcap(sc->nge_dev, PCIY_PMG, &pmc) == 0) { 2775 /* Disable PME and clear PME status. */ 2776 pmstat = pci_read_config(sc->nge_dev, 2777 pmc + PCIR_POWER_STATUS, 2); 2778 if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { 2779 pmstat &= ~PCIM_PSTAT_PMEENABLE; 2780 pci_write_config(sc->nge_dev, 2781 pmc + PCIR_POWER_STATUS, pmstat, 2); 2782 } 2783 } 2784 if (ifp->if_flags & IFF_UP) { 2785 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 2786 nge_init_locked(sc); 2787 } 2788 2789 sc->nge_flags &= ~NGE_FLAG_SUSPENDED; 2790 NGE_UNLOCK(sc); 2791 2792 return (0); 2793} 2794 2795#define NGE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ 2796 SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) 2797 2798static void 2799nge_sysctl_node(struct nge_softc *sc) 2800{ 2801 struct sysctl_ctx_list *ctx; 2802 struct sysctl_oid_list *child, *parent; 2803 struct sysctl_oid *tree; 2804 struct nge_stats *stats; 2805 int error; 2806 2807 ctx = device_get_sysctl_ctx(sc->nge_dev); 2808 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nge_dev)); 2809 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "int_holdoff", 2810 CTLTYPE_INT | CTLFLAG_RW, &sc->nge_int_holdoff, 0, 2811 sysctl_hw_nge_int_holdoff, "I", "NGE interrupt moderation"); 2812 /* Pull in device tunables. */ 2813 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2814 error = resource_int_value(device_get_name(sc->nge_dev), 2815 device_get_unit(sc->nge_dev), "int_holdoff", &sc->nge_int_holdoff); 2816 if (error == 0) { 2817 if (sc->nge_int_holdoff < NGE_INT_HOLDOFF_MIN || 2818 sc->nge_int_holdoff > NGE_INT_HOLDOFF_MAX ) { 2819 device_printf(sc->nge_dev, 2820 "int_holdoff value out of range; " 2821 "using default: %d(%d us)\n", 2822 NGE_INT_HOLDOFF_DEFAULT, 2823 NGE_INT_HOLDOFF_DEFAULT * 100); 2824 sc->nge_int_holdoff = NGE_INT_HOLDOFF_DEFAULT; 2825 } 2826 } 2827 2828 stats = &sc->nge_stats; 2829 tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, 2830 NULL, "NGE statistics"); 2831 parent = SYSCTL_CHILDREN(tree); 2832 2833 /* Rx statistics. */ 2834 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, 2835 NULL, "Rx MAC statistics"); 2836 child = SYSCTL_CHILDREN(tree); 2837 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_errs", 2838 &stats->rx_pkts_errs, 2839 "Packet errors including both wire errors and FIFO overruns"); 2840 NGE_SYSCTL_STAT_ADD32(ctx, child, "crc_errs", 2841 &stats->rx_crc_errs, "CRC errors"); 2842 NGE_SYSCTL_STAT_ADD32(ctx, child, "fifo_oflows", 2843 &stats->rx_fifo_oflows, "FIFO overflows"); 2844 NGE_SYSCTL_STAT_ADD32(ctx, child, "align_errs", 2845 &stats->rx_align_errs, "Frame alignment errors"); 2846 NGE_SYSCTL_STAT_ADD32(ctx, child, "sym_errs", 2847 &stats->rx_sym_errs, "One or more symbol errors"); 2848 NGE_SYSCTL_STAT_ADD32(ctx, child, "pkts_jumbos", 2849 &stats->rx_pkts_jumbos, 2850 "Packets received with length greater than 1518 bytes"); 2851 NGE_SYSCTL_STAT_ADD32(ctx, child, "len_errs", 2852 &stats->rx_len_errs, "In Range Length errors"); 2853 NGE_SYSCTL_STAT_ADD32(ctx, child, "unctl_frames", 2854 &stats->rx_unctl_frames, "Control frames with unsupported opcode"); 2855 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2856 &stats->rx_pause, "Pause frames"); 2857 2858 /* Tx statistics. */ 2859 tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, 2860 NULL, "Tx MAC statistics"); 2861 child = SYSCTL_CHILDREN(tree); 2862 NGE_SYSCTL_STAT_ADD32(ctx, child, "pause", 2863 &stats->tx_pause, "Pause frames"); 2864 NGE_SYSCTL_STAT_ADD32(ctx, child, "seq_errs", 2865 &stats->tx_seq_errs, 2866 "Loss of collision heartbeat during transmission"); 2867} 2868 2869#undef NGE_SYSCTL_STAT_ADD32 2870 2871static int 2872sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) 2873{ 2874 int error, value; 2875 2876 if (arg1 == NULL) 2877 return (EINVAL); 2878 value = *(int *)arg1; 2879 error = sysctl_handle_int(oidp, &value, 0, req); 2880 if (error != 0 || req->newptr == NULL) 2881 return (error); 2882 if (value < low || value > high) 2883 return (EINVAL); 2884 *(int *)arg1 = value; 2885 2886 return (0); 2887} 2888 2889static int 2890sysctl_hw_nge_int_holdoff(SYSCTL_HANDLER_ARGS) 2891{ 2892 2893 return (sysctl_int_range(oidp, arg1, arg2, req, NGE_INT_HOLDOFF_MIN, 2894 NGE_INT_HOLDOFF_MAX)); 2895} |
|