if_dc.c revision 129878
162587Sitojun/* 262587Sitojun * Copyright (c) 1997, 1998, 1999 362587Sitojun * Bill Paul <wpaul@ee.columbia.edu>. All rights reserved. 453541Sshin * 553541Sshin * Redistribution and use in source and binary forms, with or without 653541Sshin * modification, are permitted provided that the following conditions 753541Sshin * are met: 853541Sshin * 1. Redistributions of source code must retain the above copyright 953541Sshin * notice, this list of conditions and the following disclaimer. 1053541Sshin * 2. Redistributions in binary form must reproduce the above copyright 1153541Sshin * notice, this list of conditions and the following disclaimer in the 1253541Sshin * documentation and/or other materials provided with the distribution. 1353541Sshin * 3. All advertising materials mentioning features or use of this software 1453541Sshin * must display the following acknowledgement: 1553541Sshin * This product includes software developed by Bill Paul. 1653541Sshin * 4. Neither the name of the author nor the names of any co-contributors 1753541Sshin * may be used to endorse or promote products derived from this software 1853541Sshin * without specific prior written permission. 1953541Sshin * 2053541Sshin * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 2153541Sshin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2253541Sshin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2353541Sshin * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 2453541Sshin * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2553541Sshin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2653541Sshin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2753541Sshin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2853541Sshin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 2953541Sshin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 3053541Sshin * THE POSSIBILITY OF SUCH DAMAGE. 3153541Sshin */ 3253541Sshin 3353541Sshin#include <sys/cdefs.h> 3453541Sshin__FBSDID("$FreeBSD: head/sys/dev/dc/if_dc.c 129878 2004-05-30 20:00:41Z phk $"); 3553541Sshin 3653541Sshin/* 3753541Sshin * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 3853541Sshin * series chips and several workalikes including the following: 3953541Sshin * 4053541Sshin * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) 4153541Sshin * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) 4253541Sshin * Lite-On 82c168/82c169 PNIC (www.litecom.com) 4353541Sshin * ASIX Electronics AX88140A (www.asix.com.tw) 4453541Sshin * ASIX Electronics AX88141 (www.asix.com.tw) 4553541Sshin * ADMtek AL981 (www.admtek.com.tw) 4653541Sshin * ADMtek AN985 (www.admtek.com.tw) 4753541Sshin * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985 4853541Sshin * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) 4953541Sshin * Accton EN1217 (www.accton.com) 5053541Sshin * Xircom X3201 (www.xircom.com) 5153541Sshin * Abocom FE2500 5253541Sshin * Conexant LANfinity (www.conexant.com) 5353541Sshin * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com) 5453541Sshin * 5553541Sshin * Datasheets for the 21143 are available at developer.intel.com. 5653541Sshin * Datasheets for the clone parts can be found at their respective sites. 5753541Sshin * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) 5853541Sshin * The PNIC II is essentially a Macronix 98715A chip; the only difference 5953541Sshin * worth noting is that its multicast hash table is only 128 bits wide 6053541Sshin * instead of 512. 6153541Sshin * 6253541Sshin * Written by Bill Paul <wpaul@ee.columbia.edu> 6353541Sshin * Electrical Engineering Department 6453541Sshin * Columbia University, New York City 6553541Sshin */ 6653541Sshin/* 6753541Sshin * The Intel 21143 is the successor to the DEC 21140. It is basically 6853541Sshin * the same as the 21140 but with a few new features. The 21143 supports 6953541Sshin * three kinds of media attachments: 7053541Sshin * 7153541Sshin * o MII port, for 10Mbps and 100Mbps support and NWAY 7253541Sshin * autonegotiation provided by an external PHY. 7353541Sshin * o SYM port, for symbol mode 100Mbps support. 7453541Sshin * o 10baseT port. 7553541Sshin * o AUI/BNC port. 7653541Sshin * 7753541Sshin * The 100Mbps SYM port and 10baseT port can be used together in 7853541Sshin * combination with the internal NWAY support to create a 10/100 7953541Sshin * autosensing configuration. 8053541Sshin * 8153541Sshin * Note that not all tulip workalikes are handled in this driver: we only 8253541Sshin * deal with those which are relatively well behaved. The Winbond is 8353541Sshin * handled separately due to its different register offsets and the 8453541Sshin * special handling needed for its various bugs. The PNIC is handled 8553541Sshin * here, but I'm not thrilled about it. 8653541Sshin * 8753541Sshin * All of the workalike chips use some form of MII transceiver support 8853541Sshin * with the exception of the Macronix chips, which also have a SYM port. 8953541Sshin * The ASIX AX88140A is also documented to have a SYM port, but all 9053541Sshin * the cards I've seen use an MII transceiver, probably because the 9153541Sshin * AX88140A doesn't support internal NWAY. 9262587Sitojun */ 9353541Sshin 9453541Sshin#include <sys/param.h> 9562587Sitojun#include <sys/endian.h> 9653541Sshin#include <sys/systm.h> 9753541Sshin#include <sys/sockio.h> 9853541Sshin#include <sys/mbuf.h> 9953541Sshin#include <sys/malloc.h> 10053541Sshin#include <sys/kernel.h> 10153541Sshin#include <sys/module.h> 10253541Sshin#include <sys/socket.h> 10353541Sshin#include <sys/sysctl.h> 10462587Sitojun 10553541Sshin#include <net/if.h> 10653541Sshin#include <net/if_arp.h> 10753541Sshin#include <net/ethernet.h> 10853541Sshin#include <net/if_dl.h> 10953541Sshin#include <net/if_media.h> 11053541Sshin#include <net/if_types.h> 11162587Sitojun#include <net/if_vlan_var.h> 11253541Sshin 11353541Sshin#include <net/bpf.h> 11453541Sshin 11553541Sshin#include <machine/bus_pio.h> 11653541Sshin#include <machine/bus_memio.h> 11753541Sshin#include <machine/bus.h> 11853541Sshin#include <machine/resource.h> 11953541Sshin#include <sys/bus.h> 12053541Sshin#include <sys/rman.h> 12153541Sshin 12253541Sshin#include <dev/mii/mii.h> 12353541Sshin#include <dev/mii/miivar.h> 12453541Sshin 12553541Sshin#include <dev/pci/pcireg.h> 12653541Sshin#include <dev/pci/pcivar.h> 12753541Sshin 12853541Sshin#define DC_USEIOSPACE 12953541Sshin#ifdef __alpha__ 13053541Sshin#define SRM_MEDIA 13153541Sshin#endif 13253541Sshin 13353541Sshin#include <pci/if_dcreg.h> 13453541Sshin 13553541Sshin#ifdef __sparc64__ 13653541Sshin#include <dev/ofw/openfirm.h> 13753541Sshin#include <machine/ofw_machdep.h> 13853541Sshin#endif 13953541Sshin 14053541SshinMODULE_DEPEND(dc, pci, 1, 1, 1); 14153541SshinMODULE_DEPEND(dc, ether, 1, 1, 1); 14253541SshinMODULE_DEPEND(dc, miibus, 1, 1, 1); 14353541Sshin 14453541Sshin/* "controller miibus0" required. See GENERIC if you get errors here. */ 14553541Sshin#include "miibus_if.h" 14653541Sshin 14753541Sshin/* 14853541Sshin * Various supported device vendors/types and their names. 14953541Sshin */ 15053541Sshinstatic struct dc_type dc_devs[] = { 15153541Sshin { DC_VENDORID_DEC, DC_DEVICEID_21143, 15253541Sshin "Intel 21143 10/100BaseTX" }, 15353541Sshin { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009, 15453541Sshin "Davicom DM9009 10/100BaseTX" }, 15553541Sshin { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, 15653541Sshin "Davicom DM9100 10/100BaseTX" }, 15753541Sshin { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 15853541Sshin "Davicom DM9102 10/100BaseTX" }, 15953541Sshin { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, 16053541Sshin "Davicom DM9102A 10/100BaseTX" }, 16153541Sshin { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, 16253541Sshin "ADMtek AL981 10/100BaseTX" }, 16353541Sshin { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, 16453541Sshin "ADMtek AN985 10/100BaseTX" }, 16553541Sshin { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511, 16653541Sshin "ADMtek ADM9511 10/100BaseTX" }, 16753541Sshin { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513, 16853541Sshin "ADMtek ADM9513 10/100BaseTX" }, 16953541Sshin { DC_VENDORID_ADMTEK, DC_DEVICEID_FA511, 17053541Sshin "Netgear FA511 10/100BaseTX" }, 17153541Sshin { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 17253541Sshin "ASIX AX88140A 10/100BaseTX" }, 17353541Sshin { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, 17453541Sshin "ASIX AX88141 10/100BaseTX" }, 17553541Sshin { DC_VENDORID_MX, DC_DEVICEID_98713, 17653541Sshin "Macronix 98713 10/100BaseTX" }, 17753541Sshin { DC_VENDORID_MX, DC_DEVICEID_98713, 17853541Sshin "Macronix 98713A 10/100BaseTX" }, 17953541Sshin { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 18053541Sshin "Compex RL100-TX 10/100BaseTX" }, 18153541Sshin { DC_VENDORID_CP, DC_DEVICEID_98713_CP, 18253541Sshin "Compex RL100-TX 10/100BaseTX" }, 18353541Sshin { DC_VENDORID_MX, DC_DEVICEID_987x5, 18453541Sshin "Macronix 98715/98715A 10/100BaseTX" }, 18553541Sshin { DC_VENDORID_MX, DC_DEVICEID_987x5, 18653541Sshin "Macronix 98715AEC-C 10/100BaseTX" }, 18753541Sshin { DC_VENDORID_MX, DC_DEVICEID_987x5, 18853541Sshin "Macronix 98725 10/100BaseTX" }, 18953541Sshin { DC_VENDORID_MX, DC_DEVICEID_98727, 19053541Sshin "Macronix 98727/98732 10/100BaseTX" }, 19153541Sshin { DC_VENDORID_LO, DC_DEVICEID_82C115, 19253541Sshin "LC82C115 PNIC II 10/100BaseTX" }, 19353541Sshin { DC_VENDORID_LO, DC_DEVICEID_82C168, 19453541Sshin "82c168 PNIC 10/100BaseTX" }, 19553541Sshin { DC_VENDORID_LO, DC_DEVICEID_82C168, 19653541Sshin "82c169 PNIC 10/100BaseTX" }, 19753541Sshin { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, 19853541Sshin "Accton EN1217 10/100BaseTX" }, 19953541Sshin { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, 20053541Sshin "Accton EN2242 MiniPCI 10/100BaseTX" }, 20153541Sshin { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, 20253541Sshin "Xircom X3201 10/100BaseTX" }, 20353541Sshin { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500, 20453541Sshin "Abocom FE2500 10/100BaseTX" }, 20553541Sshin { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX, 20653541Sshin "Abocom FE2500MX 10/100BaseTX" }, 20753541Sshin { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112, 20853541Sshin "Conexant LANfinity MiniPCI 10/100BaseTX" }, 20953541Sshin { DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX, 21053541Sshin "Hawking CB102 CardBus 10/100" }, 21153541Sshin { DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T, 21253541Sshin "PlaneX FNW-3602-T CardBus 10/100" }, 21353541Sshin { DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB, 21453541Sshin "3Com OfficeConnect 10/100B" }, 21553541Sshin { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120, 21653541Sshin "Microsoft MN-120 CardBus 10/100" }, 21753541Sshin { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130, 21853541Sshin "Microsoft MN-130 10/100" }, 21953541Sshin { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130_FAKE, 22053541Sshin "Microsoft MN-130 10/100" }, 22153541Sshin { 0, 0, NULL } 22253541Sshin}; 22353541Sshin 22453541Sshinstatic int dc_probe (device_t); 22553541Sshinstatic int dc_attach (device_t); 22653541Sshinstatic int dc_detach (device_t); 22753541Sshinstatic int dc_suspend (device_t); 22853541Sshinstatic int dc_resume (device_t); 22953541Sshin#ifndef BURN_BRIDGES 23053541Sshinstatic void dc_acpi (device_t); 23153541Sshin#endif 23253541Sshinstatic struct dc_type *dc_devtype (device_t); 23353541Sshinstatic int dc_newbuf (struct dc_softc *, int, int); 23453541Sshinstatic int dc_encap (struct dc_softc *, struct mbuf **); 23553541Sshinstatic void dc_pnic_rx_bug_war (struct dc_softc *, int); 23653541Sshinstatic int dc_rx_resync (struct dc_softc *); 23753541Sshinstatic void dc_rxeof (struct dc_softc *); 23853541Sshinstatic void dc_txeof (struct dc_softc *); 23953541Sshinstatic void dc_tick (void *); 24053541Sshinstatic void dc_tx_underrun (struct dc_softc *); 24153541Sshinstatic void dc_intr (void *); 24253541Sshinstatic void dc_start (struct ifnet *); 24353541Sshinstatic int dc_ioctl (struct ifnet *, u_long, caddr_t); 24462604Sitojunstatic void dc_init (void *); 24562604Sitojunstatic void dc_stop (struct dc_softc *); 24653541Sshinstatic void dc_watchdog (struct ifnet *); 24753541Sshinstatic void dc_shutdown (device_t); 24862604Sitojunstatic int dc_ifmedia_upd (struct ifnet *); 24953541Sshinstatic void dc_ifmedia_sts (struct ifnet *, struct ifmediareq *); 25053541Sshin 25153541Sshinstatic void dc_delay (struct dc_softc *); 25253541Sshinstatic void dc_eeprom_idle (struct dc_softc *); 25362604Sitojunstatic void dc_eeprom_putbyte (struct dc_softc *, int); 25453541Sshinstatic void dc_eeprom_getword (struct dc_softc *, int, u_int16_t *); 25553541Sshinstatic void dc_eeprom_getword_pnic 25653541Sshin (struct dc_softc *, int, u_int16_t *); 25753541Sshinstatic void dc_eeprom_getword_xircom 25862604Sitojun (struct dc_softc *, int, u_int16_t *); 25953541Sshinstatic void dc_eeprom_width (struct dc_softc *); 26053541Sshinstatic void dc_read_eeprom (struct dc_softc *, caddr_t, int, int, int); 26153541Sshin 26253541Sshinstatic void dc_mii_writebit (struct dc_softc *, int); 26353541Sshinstatic int dc_mii_readbit (struct dc_softc *); 26453541Sshinstatic void dc_mii_sync (struct dc_softc *); 26553541Sshinstatic void dc_mii_send (struct dc_softc *, u_int32_t, int); 26653541Sshinstatic int dc_mii_readreg (struct dc_softc *, struct dc_mii_frame *); 26753541Sshinstatic int dc_mii_writereg (struct dc_softc *, struct dc_mii_frame *); 26853541Sshinstatic int dc_miibus_readreg (device_t, int, int); 26953541Sshinstatic int dc_miibus_writereg (device_t, int, int, int); 27053541Sshinstatic void dc_miibus_statchg (device_t); 27153541Sshinstatic void dc_miibus_mediainit (device_t); 27253541Sshin 27353541Sshinstatic void dc_setcfg (struct dc_softc *, int); 27453541Sshinstatic uint32_t dc_mchash_le (struct dc_softc *, const uint8_t *); 27553541Sshinstatic uint32_t dc_mchash_be (const uint8_t *); 27653541Sshinstatic void dc_setfilt_21143 (struct dc_softc *); 27753541Sshinstatic void dc_setfilt_asix (struct dc_softc *); 27853541Sshinstatic void dc_setfilt_admtek (struct dc_softc *); 27953541Sshinstatic void dc_setfilt_xircom (struct dc_softc *); 28053541Sshin 28153541Sshinstatic void dc_setfilt (struct dc_softc *); 28253541Sshin 28353541Sshinstatic void dc_reset (struct dc_softc *); 28453541Sshinstatic int dc_list_rx_init (struct dc_softc *); 28553541Sshinstatic int dc_list_tx_init (struct dc_softc *); 28653541Sshin 28753541Sshinstatic void dc_read_srom (struct dc_softc *, int); 28853541Sshinstatic void dc_parse_21143_srom (struct dc_softc *); 28953541Sshinstatic void dc_decode_leaf_sia (struct dc_softc *, struct dc_eblock_sia *); 29053541Sshinstatic void dc_decode_leaf_mii (struct dc_softc *, struct dc_eblock_mii *); 29153541Sshinstatic void dc_decode_leaf_sym (struct dc_softc *, struct dc_eblock_sym *); 29253541Sshinstatic void dc_apply_fixup (struct dc_softc *, int); 29353541Sshin 29453541Sshinstatic void dc_dma_map_txbuf (void *, bus_dma_segment_t *, int, bus_size_t, 29553541Sshin int); 29653541Sshinstatic void dc_dma_map_rxbuf (void *, bus_dma_segment_t *, int, bus_size_t, 29753541Sshin int); 29853541Sshin 29953541Sshin#ifdef DC_USEIOSPACE 30053541Sshin#define DC_RES SYS_RES_IOPORT 30153541Sshin#define DC_RID DC_PCI_CFBIO 30253541Sshin#else 30353541Sshin#define DC_RES SYS_RES_MEMORY 30453541Sshin#define DC_RID DC_PCI_CFBMA 30553541Sshin#endif 30653541Sshin 30753541Sshinstatic device_method_t dc_methods[] = { 30853541Sshin /* Device interface */ 30953541Sshin DEVMETHOD(device_probe, dc_probe), 31053541Sshin DEVMETHOD(device_attach, dc_attach), 31153541Sshin DEVMETHOD(device_detach, dc_detach), 31253541Sshin DEVMETHOD(device_suspend, dc_suspend), 31353541Sshin DEVMETHOD(device_resume, dc_resume), 31453541Sshin DEVMETHOD(device_shutdown, dc_shutdown), 31553541Sshin 31653541Sshin /* bus interface */ 31753541Sshin DEVMETHOD(bus_print_child, bus_generic_print_child), 31853541Sshin DEVMETHOD(bus_driver_added, bus_generic_driver_added), 31953541Sshin 32053541Sshin /* MII interface */ 32153541Sshin DEVMETHOD(miibus_readreg, dc_miibus_readreg), 32253541Sshin DEVMETHOD(miibus_writereg, dc_miibus_writereg), 32353541Sshin DEVMETHOD(miibus_statchg, dc_miibus_statchg), 32453541Sshin DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), 32553541Sshin 32653541Sshin { 0, 0 } 32753541Sshin}; 32853541Sshin 32953541Sshinstatic driver_t dc_driver = { 33053541Sshin "dc", 33153541Sshin dc_methods, 33253541Sshin sizeof(struct dc_softc) 33353541Sshin}; 33453541Sshin 33553541Sshinstatic devclass_t dc_devclass; 33653541Sshin#ifdef __i386__ 33753541Sshinstatic int dc_quick = 1; 33853541SshinSYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, &dc_quick, 0, 33953541Sshin "do not m_devget() in dc driver"); 34053541Sshin#endif 34153541Sshin 34253541SshinDRIVER_MODULE(dc, cardbus, dc_driver, dc_devclass, 0, 0); 34353541SshinDRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0); 34453541SshinDRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); 34553541Sshin 34653541Sshin#define DC_SETBIT(sc, reg, x) \ 34753541Sshin CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) 34853541Sshin 34953541Sshin#define DC_CLRBIT(sc, reg, x) \ 35053541Sshin CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) 35153541Sshin 35253541Sshin#define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) 35353541Sshin#define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) 35453541Sshin 35553541Sshin#define IS_MPSAFE 0 35653541Sshin 35753541Sshinstatic void 35853541Sshindc_delay(struct dc_softc *sc) 35953541Sshin{ 36053541Sshin int idx; 36153541Sshin 36253541Sshin for (idx = (300 / 33) + 1; idx > 0; idx--) 36353541Sshin CSR_READ_4(sc, DC_BUSCTL); 36453541Sshin} 36553541Sshin 36653541Sshinstatic void 36753541Sshindc_eeprom_width(struct dc_softc *sc) 36853541Sshin{ 36953541Sshin int i; 37053541Sshin 37153541Sshin /* Force EEPROM to idle state. */ 37253541Sshin dc_eeprom_idle(sc); 37353541Sshin 37453541Sshin /* Enter EEPROM access mode. */ 37553541Sshin CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 37653541Sshin dc_delay(sc); 37753541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 37853541Sshin dc_delay(sc); 37953541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 38053541Sshin dc_delay(sc); 38153541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 38253541Sshin dc_delay(sc); 38353541Sshin 38453541Sshin for (i = 3; i--;) { 38553541Sshin if (6 & (1 << i)) 38653541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 38753541Sshin else 38853541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 38953541Sshin dc_delay(sc); 39053541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 39153541Sshin dc_delay(sc); 39253541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 39353541Sshin dc_delay(sc); 39453541Sshin } 39553541Sshin 39653541Sshin for (i = 1; i <= 12; i++) { 39753541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 39853541Sshin dc_delay(sc); 39953541Sshin if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { 40053541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 40153541Sshin dc_delay(sc); 40253541Sshin break; 40353541Sshin } 40453541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 40553541Sshin dc_delay(sc); 40653541Sshin } 40753541Sshin 40853541Sshin /* Turn off EEPROM access mode. */ 40953541Sshin dc_eeprom_idle(sc); 41053541Sshin 41153541Sshin if (i < 4 || i > 12) 41253541Sshin sc->dc_romwidth = 6; 41353541Sshin else 41453541Sshin sc->dc_romwidth = i; 41553541Sshin 41653541Sshin /* Enter EEPROM access mode. */ 41753541Sshin CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 41853541Sshin dc_delay(sc); 41953541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 42053541Sshin dc_delay(sc); 42153541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 42253541Sshin dc_delay(sc); 42353541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 42453541Sshin dc_delay(sc); 42553541Sshin 42653541Sshin /* Turn off EEPROM access mode. */ 42753541Sshin dc_eeprom_idle(sc); 42853541Sshin} 42953541Sshin 43053541Sshinstatic void 43153541Sshindc_eeprom_idle(struct dc_softc *sc) 43253541Sshin{ 43353541Sshin int i; 43453541Sshin 43553541Sshin CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 43653541Sshin dc_delay(sc); 43753541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 43853541Sshin dc_delay(sc); 43953541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 44053541Sshin dc_delay(sc); 44153541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 44253541Sshin dc_delay(sc); 44353541Sshin 44453541Sshin for (i = 0; i < 25; i++) { 44553541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 44653541Sshin dc_delay(sc); 44753541Sshin DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 44853541Sshin dc_delay(sc); 44953541Sshin } 45053541Sshin 45153541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 45253541Sshin dc_delay(sc); 45353541Sshin DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); 45453541Sshin dc_delay(sc); 45553541Sshin CSR_WRITE_4(sc, DC_SIO, 0x00000000); 45653541Sshin} 45753541Sshin 45862587Sitojun/* 45962587Sitojun * Send a read command and address to the EEPROM, check for ACK. 46062587Sitojun */ 46162587Sitojunstatic void 46262587Sitojundc_eeprom_putbyte(struct dc_softc *sc, int addr) 46362587Sitojun{ 46462587Sitojun int d, i; 46562587Sitojun 46662587Sitojun d = DC_EECMD_READ >> 6; 46762587Sitojun for (i = 3; i--; ) { 46862587Sitojun if (d & (1 << i)) 46962587Sitojun DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 47062587Sitojun else 47162587Sitojun DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); 47262587Sitojun dc_delay(sc); 47362587Sitojun DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); 47462587Sitojun dc_delay(sc); 47562587Sitojun DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 47653541Sshin dc_delay(sc); 47753541Sshin } 47853541Sshin 47953541Sshin /* 48053541Sshin * Feed in each bit and strobe the clock. 48153541Sshin */ 48253541Sshin for (i = sc->dc_romwidth; i--;) { 48353541Sshin if (addr & (1 << i)) { 48453541Sshin SIO_SET(DC_SIO_EE_DATAIN); 48553541Sshin } else { 48653541Sshin SIO_CLR(DC_SIO_EE_DATAIN); 48753541Sshin } 48853541Sshin dc_delay(sc); 48953541Sshin SIO_SET(DC_SIO_EE_CLK); 49053541Sshin dc_delay(sc); 49153541Sshin SIO_CLR(DC_SIO_EE_CLK); 49253541Sshin dc_delay(sc); 49353541Sshin } 49453541Sshin} 49553541Sshin 49653541Sshin/* 49753541Sshin * Read a word of data stored in the EEPROM at address 'addr.' 498 * The PNIC 82c168/82c169 has its own non-standard way to read 499 * the EEPROM. 500 */ 501static void 502dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) 503{ 504 int i; 505 u_int32_t r; 506 507 CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr); 508 509 for (i = 0; i < DC_TIMEOUT; i++) { 510 DELAY(1); 511 r = CSR_READ_4(sc, DC_SIO); 512 if (!(r & DC_PN_SIOCTL_BUSY)) { 513 *dest = (u_int16_t)(r & 0xFFFF); 514 return; 515 } 516 } 517} 518 519/* 520 * Read a word of data stored in the EEPROM at address 'addr.' 521 * The Xircom X3201 has its own non-standard way to read 522 * the EEPROM, too. 523 */ 524static void 525dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) 526{ 527 528 SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 529 530 addr *= 2; 531 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 532 *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff; 533 addr += 1; 534 CSR_WRITE_4(sc, DC_ROM, addr | 0x160); 535 *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8; 536 537 SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); 538} 539 540/* 541 * Read a word of data stored in the EEPROM at address 'addr.' 542 */ 543static void 544dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) 545{ 546 int i; 547 u_int16_t word = 0; 548 549 /* Force EEPROM to idle state. */ 550 dc_eeprom_idle(sc); 551 552 /* Enter EEPROM access mode. */ 553 CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); 554 dc_delay(sc); 555 DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); 556 dc_delay(sc); 557 DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); 558 dc_delay(sc); 559 DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); 560 dc_delay(sc); 561 562 /* 563 * Send address of word we want to read. 564 */ 565 dc_eeprom_putbyte(sc, addr); 566 567 /* 568 * Start reading bits from EEPROM. 569 */ 570 for (i = 0x8000; i; i >>= 1) { 571 SIO_SET(DC_SIO_EE_CLK); 572 dc_delay(sc); 573 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) 574 word |= i; 575 dc_delay(sc); 576 SIO_CLR(DC_SIO_EE_CLK); 577 dc_delay(sc); 578 } 579 580 /* Turn off EEPROM access mode. */ 581 dc_eeprom_idle(sc); 582 583 *dest = word; 584} 585 586/* 587 * Read a sequence of words from the EEPROM. 588 */ 589static void 590dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int swap) 591{ 592 int i; 593 u_int16_t word = 0, *ptr; 594 595 for (i = 0; i < cnt; i++) { 596 if (DC_IS_PNIC(sc)) 597 dc_eeprom_getword_pnic(sc, off + i, &word); 598 else if (DC_IS_XIRCOM(sc)) 599 dc_eeprom_getword_xircom(sc, off + i, &word); 600 else 601 dc_eeprom_getword(sc, off + i, &word); 602 ptr = (u_int16_t *)(dest + (i * 2)); 603 if (swap) 604 *ptr = ntohs(word); 605 else 606 *ptr = word; 607 } 608} 609 610/* 611 * The following two routines are taken from the Macronix 98713 612 * Application Notes pp.19-21. 613 */ 614/* 615 * Write a bit to the MII bus. 616 */ 617static void 618dc_mii_writebit(struct dc_softc *sc, int bit) 619{ 620 621 if (bit) 622 CSR_WRITE_4(sc, DC_SIO, 623 DC_SIO_ROMCTL_WRITE | DC_SIO_MII_DATAOUT); 624 else 625 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 626 627 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 628 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 629} 630 631/* 632 * Read a bit from the MII bus. 633 */ 634static int 635dc_mii_readbit(struct dc_softc *sc) 636{ 637 638 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR); 639 CSR_READ_4(sc, DC_SIO); 640 DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); 641 DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); 642 if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) 643 return (1); 644 645 return (0); 646} 647 648/* 649 * Sync the PHYs by setting data bit and strobing the clock 32 times. 650 */ 651static void 652dc_mii_sync(struct dc_softc *sc) 653{ 654 int i; 655 656 CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); 657 658 for (i = 0; i < 32; i++) 659 dc_mii_writebit(sc, 1); 660} 661 662/* 663 * Clock a series of bits through the MII. 664 */ 665static void 666dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) 667{ 668 int i; 669 670 for (i = (0x1 << (cnt - 1)); i; i >>= 1) 671 dc_mii_writebit(sc, bits & i); 672} 673 674/* 675 * Read an PHY register through the MII. 676 */ 677static int 678dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) 679{ 680 int i, ack; 681 682 DC_LOCK(sc); 683 684 /* 685 * Set up frame for RX. 686 */ 687 frame->mii_stdelim = DC_MII_STARTDELIM; 688 frame->mii_opcode = DC_MII_READOP; 689 frame->mii_turnaround = 0; 690 frame->mii_data = 0; 691 692 /* 693 * Sync the PHYs. 694 */ 695 dc_mii_sync(sc); 696 697 /* 698 * Send command/address info. 699 */ 700 dc_mii_send(sc, frame->mii_stdelim, 2); 701 dc_mii_send(sc, frame->mii_opcode, 2); 702 dc_mii_send(sc, frame->mii_phyaddr, 5); 703 dc_mii_send(sc, frame->mii_regaddr, 5); 704 705#ifdef notdef 706 /* Idle bit */ 707 dc_mii_writebit(sc, 1); 708 dc_mii_writebit(sc, 0); 709#endif 710 711 /* Check for ack. */ 712 ack = dc_mii_readbit(sc); 713 714 /* 715 * Now try reading data bits. If the ack failed, we still 716 * need to clock through 16 cycles to keep the PHY(s) in sync. 717 */ 718 if (ack) { 719 for (i = 0; i < 16; i++) 720 dc_mii_readbit(sc); 721 goto fail; 722 } 723 724 for (i = 0x8000; i; i >>= 1) { 725 if (!ack) { 726 if (dc_mii_readbit(sc)) 727 frame->mii_data |= i; 728 } 729 } 730 731fail: 732 733 dc_mii_writebit(sc, 0); 734 dc_mii_writebit(sc, 0); 735 736 DC_UNLOCK(sc); 737 738 if (ack) 739 return (1); 740 return (0); 741} 742 743/* 744 * Write to a PHY register through the MII. 745 */ 746static int 747dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) 748{ 749 750 DC_LOCK(sc); 751 /* 752 * Set up frame for TX. 753 */ 754 755 frame->mii_stdelim = DC_MII_STARTDELIM; 756 frame->mii_opcode = DC_MII_WRITEOP; 757 frame->mii_turnaround = DC_MII_TURNAROUND; 758 759 /* 760 * Sync the PHYs. 761 */ 762 dc_mii_sync(sc); 763 764 dc_mii_send(sc, frame->mii_stdelim, 2); 765 dc_mii_send(sc, frame->mii_opcode, 2); 766 dc_mii_send(sc, frame->mii_phyaddr, 5); 767 dc_mii_send(sc, frame->mii_regaddr, 5); 768 dc_mii_send(sc, frame->mii_turnaround, 2); 769 dc_mii_send(sc, frame->mii_data, 16); 770 771 /* Idle bit. */ 772 dc_mii_writebit(sc, 0); 773 dc_mii_writebit(sc, 0); 774 775 DC_UNLOCK(sc); 776 777 return (0); 778} 779 780static int 781dc_miibus_readreg(device_t dev, int phy, int reg) 782{ 783 struct dc_mii_frame frame; 784 struct dc_softc *sc; 785 int i, rval, phy_reg = 0; 786 787 sc = device_get_softc(dev); 788 bzero(&frame, sizeof(frame)); 789 790 /* 791 * Note: both the AL981 and AN985 have internal PHYs, 792 * however the AL981 provides direct access to the PHY 793 * registers while the AN985 uses a serial MII interface. 794 * The AN985's MII interface is also buggy in that you 795 * can read from any MII address (0 to 31), but only address 1 796 * behaves normally. To deal with both cases, we pretend 797 * that the PHY is at MII address 1. 798 */ 799 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 800 return (0); 801 802 /* 803 * Note: the ukphy probes of the RS7112 report a PHY at 804 * MII address 0 (possibly HomePNA?) and 1 (ethernet) 805 * so we only respond to correct one. 806 */ 807 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 808 return (0); 809 810 if (sc->dc_pmode != DC_PMODE_MII) { 811 if (phy == (MII_NPHY - 1)) { 812 switch (reg) { 813 case MII_BMSR: 814 /* 815 * Fake something to make the probe 816 * code think there's a PHY here. 817 */ 818 return (BMSR_MEDIAMASK); 819 break; 820 case MII_PHYIDR1: 821 if (DC_IS_PNIC(sc)) 822 return (DC_VENDORID_LO); 823 return (DC_VENDORID_DEC); 824 break; 825 case MII_PHYIDR2: 826 if (DC_IS_PNIC(sc)) 827 return (DC_DEVICEID_82C168); 828 return (DC_DEVICEID_21143); 829 break; 830 default: 831 return (0); 832 break; 833 } 834 } else 835 return (0); 836 } 837 838 if (DC_IS_PNIC(sc)) { 839 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | 840 (phy << 23) | (reg << 18)); 841 for (i = 0; i < DC_TIMEOUT; i++) { 842 DELAY(1); 843 rval = CSR_READ_4(sc, DC_PN_MII); 844 if (!(rval & DC_PN_MII_BUSY)) { 845 rval &= 0xFFFF; 846 return (rval == 0xFFFF ? 0 : rval); 847 } 848 } 849 return (0); 850 } 851 852 if (DC_IS_COMET(sc)) { 853 switch (reg) { 854 case MII_BMCR: 855 phy_reg = DC_AL_BMCR; 856 break; 857 case MII_BMSR: 858 phy_reg = DC_AL_BMSR; 859 break; 860 case MII_PHYIDR1: 861 phy_reg = DC_AL_VENID; 862 break; 863 case MII_PHYIDR2: 864 phy_reg = DC_AL_DEVID; 865 break; 866 case MII_ANAR: 867 phy_reg = DC_AL_ANAR; 868 break; 869 case MII_ANLPAR: 870 phy_reg = DC_AL_LPAR; 871 break; 872 case MII_ANER: 873 phy_reg = DC_AL_ANER; 874 break; 875 default: 876 printf("dc%d: phy_read: bad phy register %x\n", 877 sc->dc_unit, reg); 878 return (0); 879 break; 880 } 881 882 rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; 883 884 if (rval == 0xFFFF) 885 return (0); 886 return (rval); 887 } 888 889 frame.mii_phyaddr = phy; 890 frame.mii_regaddr = reg; 891 if (sc->dc_type == DC_TYPE_98713) { 892 phy_reg = CSR_READ_4(sc, DC_NETCFG); 893 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 894 } 895 dc_mii_readreg(sc, &frame); 896 if (sc->dc_type == DC_TYPE_98713) 897 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 898 899 return (frame.mii_data); 900} 901 902static int 903dc_miibus_writereg(device_t dev, int phy, int reg, int data) 904{ 905 struct dc_softc *sc; 906 struct dc_mii_frame frame; 907 int i, phy_reg = 0; 908 909 sc = device_get_softc(dev); 910 bzero(&frame, sizeof(frame)); 911 912 if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) 913 return (0); 914 915 if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) 916 return (0); 917 918 if (DC_IS_PNIC(sc)) { 919 CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | 920 (phy << 23) | (reg << 10) | data); 921 for (i = 0; i < DC_TIMEOUT; i++) { 922 if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) 923 break; 924 } 925 return (0); 926 } 927 928 if (DC_IS_COMET(sc)) { 929 switch (reg) { 930 case MII_BMCR: 931 phy_reg = DC_AL_BMCR; 932 break; 933 case MII_BMSR: 934 phy_reg = DC_AL_BMSR; 935 break; 936 case MII_PHYIDR1: 937 phy_reg = DC_AL_VENID; 938 break; 939 case MII_PHYIDR2: 940 phy_reg = DC_AL_DEVID; 941 break; 942 case MII_ANAR: 943 phy_reg = DC_AL_ANAR; 944 break; 945 case MII_ANLPAR: 946 phy_reg = DC_AL_LPAR; 947 break; 948 case MII_ANER: 949 phy_reg = DC_AL_ANER; 950 break; 951 default: 952 printf("dc%d: phy_write: bad phy register %x\n", 953 sc->dc_unit, reg); 954 return (0); 955 break; 956 } 957 958 CSR_WRITE_4(sc, phy_reg, data); 959 return (0); 960 } 961 962 frame.mii_phyaddr = phy; 963 frame.mii_regaddr = reg; 964 frame.mii_data = data; 965 966 if (sc->dc_type == DC_TYPE_98713) { 967 phy_reg = CSR_READ_4(sc, DC_NETCFG); 968 CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); 969 } 970 dc_mii_writereg(sc, &frame); 971 if (sc->dc_type == DC_TYPE_98713) 972 CSR_WRITE_4(sc, DC_NETCFG, phy_reg); 973 974 return (0); 975} 976 977static void 978dc_miibus_statchg(device_t dev) 979{ 980 struct dc_softc *sc; 981 struct mii_data *mii; 982 struct ifmedia *ifm; 983 984 sc = device_get_softc(dev); 985 if (DC_IS_ADMTEK(sc)) 986 return; 987 988 mii = device_get_softc(sc->dc_miibus); 989 ifm = &mii->mii_media; 990 if (DC_IS_DAVICOM(sc) && 991 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 992 dc_setcfg(sc, ifm->ifm_media); 993 sc->dc_if_media = ifm->ifm_media; 994 } else { 995 dc_setcfg(sc, mii->mii_media_active); 996 sc->dc_if_media = mii->mii_media_active; 997 } 998} 999 1000/* 1001 * Special support for DM9102A cards with HomePNA PHYs. Note: 1002 * with the Davicom DM9102A/DM9801 eval board that I have, it seems 1003 * to be impossible to talk to the management interface of the DM9801 1004 * PHY (its MDIO pin is not connected to anything). Consequently, 1005 * the driver has to just 'know' about the additional mode and deal 1006 * with it itself. *sigh* 1007 */ 1008static void 1009dc_miibus_mediainit(device_t dev) 1010{ 1011 struct dc_softc *sc; 1012 struct mii_data *mii; 1013 struct ifmedia *ifm; 1014 int rev; 1015 1016 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1017 1018 sc = device_get_softc(dev); 1019 mii = device_get_softc(sc->dc_miibus); 1020 ifm = &mii->mii_media; 1021 1022 if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) 1023 ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); 1024} 1025 1026#define DC_POLY 0xEDB88320 1027#define DC_BITS_512 9 1028#define DC_BITS_128 7 1029#define DC_BITS_64 6 1030 1031static uint32_t 1032dc_mchash_le(struct dc_softc *sc, const uint8_t *addr) 1033{ 1034 uint32_t crc; 1035 int idx, bit; 1036 uint8_t data; 1037 1038 /* Compute CRC for the address value. */ 1039 crc = 0xFFFFFFFF; /* initial value */ 1040 1041 for (idx = 0; idx < 6; idx++) { 1042 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) 1043 crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); 1044 } 1045 1046 /* 1047 * The hash table on the PNIC II and the MX98715AEC-C/D/E 1048 * chips is only 128 bits wide. 1049 */ 1050 if (sc->dc_flags & DC_128BIT_HASH) 1051 return (crc & ((1 << DC_BITS_128) - 1)); 1052 1053 /* The hash table on the MX98715BEC is only 64 bits wide. */ 1054 if (sc->dc_flags & DC_64BIT_HASH) 1055 return (crc & ((1 << DC_BITS_64) - 1)); 1056 1057 /* Xircom's hash filtering table is different (read: weird) */ 1058 /* Xircom uses the LEAST significant bits */ 1059 if (DC_IS_XIRCOM(sc)) { 1060 if ((crc & 0x180) == 0x180) 1061 return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4)); 1062 else 1063 return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 + 1064 (12 << 4)); 1065 } 1066 1067 return (crc & ((1 << DC_BITS_512) - 1)); 1068} 1069 1070/* 1071 * Calculate CRC of a multicast group address, return the lower 6 bits. 1072 */ 1073static uint32_t 1074dc_mchash_be(const uint8_t *addr) 1075{ 1076 uint32_t crc, carry; 1077 int idx, bit; 1078 uint8_t data; 1079 1080 /* Compute CRC for the address value. */ 1081 crc = 0xFFFFFFFF; /* initial value */ 1082 1083 for (idx = 0; idx < 6; idx++) { 1084 for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { 1085 carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); 1086 data >>= 1; 1087 crc <<= 1; 1088 if (carry) 1089 crc = (crc ^ 0x04c11db6) | carry; 1090 } 1091 } 1092 1093 /* Return the filter bit position. */ 1094 return ((crc >> 26) & 0x0000003F); 1095} 1096 1097/* 1098 * 21143-style RX filter setup routine. Filter programming is done by 1099 * downloading a special setup frame into the TX engine. 21143, Macronix, 1100 * PNIC, PNIC II and Davicom chips are programmed this way. 1101 * 1102 * We always program the chip using 'hash perfect' mode, i.e. one perfect 1103 * address (our node address) and a 512-bit hash filter for multicast 1104 * frames. We also sneak the broadcast address into the hash filter since 1105 * we need that too. 1106 */ 1107static void 1108dc_setfilt_21143(struct dc_softc *sc) 1109{ 1110 struct dc_desc *sframe; 1111 u_int32_t h, *sp; 1112 struct ifmultiaddr *ifma; 1113 struct ifnet *ifp; 1114 int i; 1115 1116 ifp = &sc->arpcom.ac_if; 1117 1118 i = sc->dc_cdata.dc_tx_prod; 1119 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1120 sc->dc_cdata.dc_tx_cnt++; 1121 sframe = &sc->dc_ldata->dc_tx_list[i]; 1122 sp = sc->dc_cdata.dc_sbuf; 1123 bzero(sp, DC_SFRAME_LEN); 1124 1125 sframe->dc_data = htole32(sc->dc_saddr); 1126 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1127 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1128 1129 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; 1130 1131 /* If we want promiscuous mode, set the allframes bit. */ 1132 if (ifp->if_flags & IFF_PROMISC) 1133 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1134 else 1135 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1136 1137 if (ifp->if_flags & IFF_ALLMULTI) 1138 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1139 else 1140 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1141 1142 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1143 if (ifma->ifma_addr->sa_family != AF_LINK) 1144 continue; 1145 h = dc_mchash_le(sc, 1146 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1147 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1148 } 1149 1150 if (ifp->if_flags & IFF_BROADCAST) { 1151 h = dc_mchash_le(sc, ifp->if_broadcastaddr); 1152 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1153 } 1154 1155 /* Set our MAC address */ 1156 sp[39] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1157 sp[40] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1158 sp[41] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1159 1160 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1161 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1162 1163 /* 1164 * The PNIC takes an exceedingly long time to process its 1165 * setup frame; wait 10ms after posting the setup frame 1166 * before proceeding, just so it has time to swallow its 1167 * medicine. 1168 */ 1169 DELAY(10000); 1170 1171 ifp->if_timer = 5; 1172} 1173 1174static void 1175dc_setfilt_admtek(struct dc_softc *sc) 1176{ 1177 struct ifnet *ifp; 1178 struct ifmultiaddr *ifma; 1179 int h = 0; 1180 u_int32_t hashes[2] = { 0, 0 }; 1181 1182 ifp = &sc->arpcom.ac_if; 1183 1184 /* Init our MAC address. */ 1185 CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1186 CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1187 1188 /* If we want promiscuous mode, set the allframes bit. */ 1189 if (ifp->if_flags & IFF_PROMISC) 1190 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1191 else 1192 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1193 1194 if (ifp->if_flags & IFF_ALLMULTI) 1195 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1196 else 1197 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1198 1199 /* First, zot all the existing hash bits. */ 1200 CSR_WRITE_4(sc, DC_AL_MAR0, 0); 1201 CSR_WRITE_4(sc, DC_AL_MAR1, 0); 1202 1203 /* 1204 * If we're already in promisc or allmulti mode, we 1205 * don't have to bother programming the multicast filter. 1206 */ 1207 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) 1208 return; 1209 1210 /* Now program new ones. */ 1211 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1212 if (ifma->ifma_addr->sa_family != AF_LINK) 1213 continue; 1214 if (DC_IS_CENTAUR(sc)) 1215 h = dc_mchash_le(sc, 1216 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1217 else 1218 h = dc_mchash_be( 1219 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1220 if (h < 32) 1221 hashes[0] |= (1 << h); 1222 else 1223 hashes[1] |= (1 << (h - 32)); 1224 } 1225 1226 CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); 1227 CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); 1228} 1229 1230static void 1231dc_setfilt_asix(struct dc_softc *sc) 1232{ 1233 struct ifnet *ifp; 1234 struct ifmultiaddr *ifma; 1235 int h = 0; 1236 u_int32_t hashes[2] = { 0, 0 }; 1237 1238 ifp = &sc->arpcom.ac_if; 1239 1240 /* Init our MAC address */ 1241 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); 1242 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1243 *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); 1244 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); 1245 CSR_WRITE_4(sc, DC_AX_FILTDATA, 1246 *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); 1247 1248 /* If we want promiscuous mode, set the allframes bit. */ 1249 if (ifp->if_flags & IFF_PROMISC) 1250 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1251 else 1252 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1253 1254 if (ifp->if_flags & IFF_ALLMULTI) 1255 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1256 else 1257 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1258 1259 /* 1260 * The ASIX chip has a special bit to enable reception 1261 * of broadcast frames. 1262 */ 1263 if (ifp->if_flags & IFF_BROADCAST) 1264 DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1265 else 1266 DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); 1267 1268 /* first, zot all the existing hash bits */ 1269 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1270 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1271 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1272 CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); 1273 1274 /* 1275 * If we're already in promisc or allmulti mode, we 1276 * don't have to bother programming the multicast filter. 1277 */ 1278 if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) 1279 return; 1280 1281 /* now program new ones */ 1282 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1283 if (ifma->ifma_addr->sa_family != AF_LINK) 1284 continue; 1285 h = dc_mchash_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1286 if (h < 32) 1287 hashes[0] |= (1 << h); 1288 else 1289 hashes[1] |= (1 << (h - 32)); 1290 } 1291 1292 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); 1293 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); 1294 CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); 1295 CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); 1296} 1297 1298static void 1299dc_setfilt_xircom(struct dc_softc *sc) 1300{ 1301 struct ifnet *ifp; 1302 struct ifmultiaddr *ifma; 1303 struct dc_desc *sframe; 1304 u_int32_t h, *sp; 1305 int i; 1306 1307 ifp = &sc->arpcom.ac_if; 1308 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); 1309 1310 i = sc->dc_cdata.dc_tx_prod; 1311 DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); 1312 sc->dc_cdata.dc_tx_cnt++; 1313 sframe = &sc->dc_ldata->dc_tx_list[i]; 1314 sp = sc->dc_cdata.dc_sbuf; 1315 bzero(sp, DC_SFRAME_LEN); 1316 1317 sframe->dc_data = htole32(sc->dc_saddr); 1318 sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | 1319 DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); 1320 1321 sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; 1322 1323 /* If we want promiscuous mode, set the allframes bit. */ 1324 if (ifp->if_flags & IFF_PROMISC) 1325 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1326 else 1327 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); 1328 1329 if (ifp->if_flags & IFF_ALLMULTI) 1330 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1331 else 1332 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); 1333 1334 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1335 if (ifma->ifma_addr->sa_family != AF_LINK) 1336 continue; 1337 h = dc_mchash_le(sc, 1338 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1339 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1340 } 1341 1342 if (ifp->if_flags & IFF_BROADCAST) { 1343 h = dc_mchash_le(sc, ifp->if_broadcastaddr); 1344 sp[h >> 4] |= htole32(1 << (h & 0xF)); 1345 } 1346 1347 /* Set our MAC address */ 1348 sp[0] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[0]); 1349 sp[1] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[1]); 1350 sp[2] = DC_SP_MAC(((u_int16_t *)sc->arpcom.ac_enaddr)[2]); 1351 1352 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 1353 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 1354 ifp->if_flags |= IFF_RUNNING; 1355 sframe->dc_status = htole32(DC_TXSTAT_OWN); 1356 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 1357 1358 /* 1359 * Wait some time... 1360 */ 1361 DELAY(1000); 1362 1363 ifp->if_timer = 5; 1364} 1365 1366static void 1367dc_setfilt(struct dc_softc *sc) 1368{ 1369 1370 if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || 1371 DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) 1372 dc_setfilt_21143(sc); 1373 1374 if (DC_IS_ASIX(sc)) 1375 dc_setfilt_asix(sc); 1376 1377 if (DC_IS_ADMTEK(sc)) 1378 dc_setfilt_admtek(sc); 1379 1380 if (DC_IS_XIRCOM(sc)) 1381 dc_setfilt_xircom(sc); 1382} 1383 1384/* 1385 * In order to fiddle with the 'full-duplex' and '100Mbps' bits in 1386 * the netconfig register, we first have to put the transmit and/or 1387 * receive logic in the idle state. 1388 */ 1389static void 1390dc_setcfg(struct dc_softc *sc, int media) 1391{ 1392 int i, restart = 0, watchdogreg; 1393 u_int32_t isr; 1394 1395 if (IFM_SUBTYPE(media) == IFM_NONE) 1396 return; 1397 1398 if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) { 1399 restart = 1; 1400 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); 1401 1402 for (i = 0; i < DC_TIMEOUT; i++) { 1403 isr = CSR_READ_4(sc, DC_ISR); 1404 if (isr & DC_ISR_TX_IDLE && 1405 ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || 1406 (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) 1407 break; 1408 DELAY(10); 1409 } 1410 1411 if (i == DC_TIMEOUT) 1412 printf("dc%d: failed to force tx and " 1413 "rx to idle state\n", sc->dc_unit); 1414 } 1415 1416 if (IFM_SUBTYPE(media) == IFM_100_TX) { 1417 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1418 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1419 if (sc->dc_pmode == DC_PMODE_MII) { 1420 if (DC_IS_INTEL(sc)) { 1421 /* There's a write enable bit here that reads as 1. */ 1422 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1423 watchdogreg &= ~DC_WDOG_CTLWREN; 1424 watchdogreg |= DC_WDOG_JABBERDIS; 1425 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1426 } else { 1427 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1428 } 1429 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1430 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); 1431 if (sc->dc_type == DC_TYPE_98713) 1432 DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1433 DC_NETCFG_SCRAMBLER)); 1434 if (!DC_IS_DAVICOM(sc)) 1435 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1436 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1437 if (DC_IS_INTEL(sc)) 1438 dc_apply_fixup(sc, IFM_AUTO); 1439 } else { 1440 if (DC_IS_PNIC(sc)) { 1441 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); 1442 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1443 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1444 } 1445 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1446 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1447 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1448 if (DC_IS_INTEL(sc)) 1449 dc_apply_fixup(sc, 1450 (media & IFM_GMASK) == IFM_FDX ? 1451 IFM_100_TX | IFM_FDX : IFM_100_TX); 1452 } 1453 } 1454 1455 if (IFM_SUBTYPE(media) == IFM_10_T) { 1456 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); 1457 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); 1458 if (sc->dc_pmode == DC_PMODE_MII) { 1459 /* There's a write enable bit here that reads as 1. */ 1460 if (DC_IS_INTEL(sc)) { 1461 watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); 1462 watchdogreg &= ~DC_WDOG_CTLWREN; 1463 watchdogreg |= DC_WDOG_JABBERDIS; 1464 CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); 1465 } else { 1466 DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); 1467 } 1468 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | 1469 DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); 1470 if (sc->dc_type == DC_TYPE_98713) 1471 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1472 if (!DC_IS_DAVICOM(sc)) 1473 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1474 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1475 if (DC_IS_INTEL(sc)) 1476 dc_apply_fixup(sc, IFM_AUTO); 1477 } else { 1478 if (DC_IS_PNIC(sc)) { 1479 DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); 1480 DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); 1481 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); 1482 } 1483 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1484 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); 1485 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); 1486 if (DC_IS_INTEL(sc)) { 1487 DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); 1488 DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); 1489 if ((media & IFM_GMASK) == IFM_FDX) 1490 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); 1491 else 1492 DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); 1493 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1494 DC_CLRBIT(sc, DC_10BTCTRL, 1495 DC_TCTL_AUTONEGENBL); 1496 dc_apply_fixup(sc, 1497 (media & IFM_GMASK) == IFM_FDX ? 1498 IFM_10_T | IFM_FDX : IFM_10_T); 1499 DELAY(20000); 1500 } 1501 } 1502 } 1503 1504 /* 1505 * If this is a Davicom DM9102A card with a DM9801 HomePNA 1506 * PHY and we want HomePNA mode, set the portsel bit to turn 1507 * on the external MII port. 1508 */ 1509 if (DC_IS_DAVICOM(sc)) { 1510 if (IFM_SUBTYPE(media) == IFM_HPNA_1) { 1511 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1512 sc->dc_link = 1; 1513 } else { 1514 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); 1515 } 1516 } 1517 1518 if ((media & IFM_GMASK) == IFM_FDX) { 1519 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1520 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1521 DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1522 } else { 1523 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); 1524 if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) 1525 DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); 1526 } 1527 1528 if (restart) 1529 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON); 1530} 1531 1532static void 1533dc_reset(struct dc_softc *sc) 1534{ 1535 int i; 1536 1537 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1538 1539 for (i = 0; i < DC_TIMEOUT; i++) { 1540 DELAY(10); 1541 if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) 1542 break; 1543 } 1544 1545 if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) || 1546 DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { 1547 DELAY(10000); 1548 DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); 1549 i = 0; 1550 } 1551 1552 if (i == DC_TIMEOUT) 1553 printf("dc%d: reset never completed!\n", sc->dc_unit); 1554 1555 /* Wait a little while for the chip to get its brains in order. */ 1556 DELAY(1000); 1557 1558 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 1559 CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); 1560 CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); 1561 1562 /* 1563 * Bring the SIA out of reset. In some cases, it looks 1564 * like failing to unreset the SIA soon enough gets it 1565 * into a state where it will never come out of reset 1566 * until we reset the whole chip again. 1567 */ 1568 if (DC_IS_INTEL(sc)) { 1569 DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); 1570 CSR_WRITE_4(sc, DC_10BTCTRL, 0); 1571 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 1572 } 1573} 1574 1575static struct dc_type * 1576dc_devtype(device_t dev) 1577{ 1578 struct dc_type *t; 1579 u_int32_t rev; 1580 1581 t = dc_devs; 1582 1583 while (t->dc_name != NULL) { 1584 if ((pci_get_vendor(dev) == t->dc_vid) && 1585 (pci_get_device(dev) == t->dc_did)) { 1586 /* Check the PCI revision */ 1587 rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; 1588 if (t->dc_did == DC_DEVICEID_98713 && 1589 rev >= DC_REVISION_98713A) 1590 t++; 1591 if (t->dc_did == DC_DEVICEID_98713_CP && 1592 rev >= DC_REVISION_98713A) 1593 t++; 1594 if (t->dc_did == DC_DEVICEID_987x5 && 1595 rev >= DC_REVISION_98715AEC_C) 1596 t++; 1597 if (t->dc_did == DC_DEVICEID_987x5 && 1598 rev >= DC_REVISION_98725) 1599 t++; 1600 if (t->dc_did == DC_DEVICEID_AX88140A && 1601 rev >= DC_REVISION_88141) 1602 t++; 1603 if (t->dc_did == DC_DEVICEID_82C168 && 1604 rev >= DC_REVISION_82C169) 1605 t++; 1606 if (t->dc_did == DC_DEVICEID_DM9102 && 1607 rev >= DC_REVISION_DM9102A) 1608 t++; 1609 /* 1610 * The Microsoft MN-130 has a device ID of 0x0002, 1611 * which happens to be the same as the PNIC 82c168. 1612 * To keep dc_attach() from getting confused, we 1613 * pretend its ID is something different. 1614 * XXX: ideally, dc_attach() should be checking 1615 * vendorid+deviceid together to avoid such 1616 * collisions. 1617 */ 1618 if (t->dc_vid == DC_VENDORID_MICROSOFT && 1619 t->dc_did == DC_DEVICEID_MSMN130) 1620 t++; 1621 return (t); 1622 } 1623 t++; 1624 } 1625 1626 return (NULL); 1627} 1628 1629/* 1630 * Probe for a 21143 or clone chip. Check the PCI vendor and device 1631 * IDs against our list and return a device name if we find a match. 1632 * We do a little bit of extra work to identify the exact type of 1633 * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, 1634 * but different revision IDs. The same is true for 98715/98715A 1635 * chips and the 98725, as well as the ASIX and ADMtek chips. In some 1636 * cases, the exact chip revision affects driver behavior. 1637 */ 1638static int 1639dc_probe(device_t dev) 1640{ 1641 struct dc_type *t; 1642 1643 t = dc_devtype(dev); 1644 1645 if (t != NULL) { 1646 device_set_desc(dev, t->dc_name); 1647 return (0); 1648 } 1649 1650 return (ENXIO); 1651} 1652 1653#ifndef BURN_BRIDGES 1654static void 1655dc_acpi(device_t dev) 1656{ 1657 int unit; 1658 u_int32_t iobase, membase, irq; 1659 1660 unit = device_get_unit(dev); 1661 1662 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { 1663 /* Save important PCI config data. */ 1664 iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); 1665 membase = pci_read_config(dev, DC_PCI_CFBMA, 4); 1666 irq = pci_read_config(dev, DC_PCI_CFIT, 4); 1667 1668 /* Reset the power state. */ 1669 printf("dc%d: chip is in D%d power mode " 1670 "-- setting to D0\n", unit, 1671 pci_get_powerstate(dev)); 1672 pci_set_powerstate(dev, PCI_POWERSTATE_D0); 1673 1674 /* Restore PCI config data. */ 1675 pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); 1676 pci_write_config(dev, DC_PCI_CFBMA, membase, 4); 1677 pci_write_config(dev, DC_PCI_CFIT, irq, 4); 1678 } 1679} 1680#endif 1681 1682static void 1683dc_apply_fixup(struct dc_softc *sc, int media) 1684{ 1685 struct dc_mediainfo *m; 1686 u_int8_t *p; 1687 int i; 1688 u_int32_t reg; 1689 1690 m = sc->dc_mi; 1691 1692 while (m != NULL) { 1693 if (m->dc_media == media) 1694 break; 1695 m = m->dc_next; 1696 } 1697 1698 if (m == NULL) 1699 return; 1700 1701 for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { 1702 reg = (p[0] | (p[1] << 8)) << 16; 1703 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1704 } 1705 1706 for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { 1707 reg = (p[0] | (p[1] << 8)) << 16; 1708 CSR_WRITE_4(sc, DC_WATCHDOG, reg); 1709 } 1710} 1711 1712static void 1713dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) 1714{ 1715 struct dc_mediainfo *m; 1716 1717 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1718 switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) { 1719 case DC_SIA_CODE_10BT: 1720 m->dc_media = IFM_10_T; 1721 break; 1722 case DC_SIA_CODE_10BT_FDX: 1723 m->dc_media = IFM_10_T | IFM_FDX; 1724 break; 1725 case DC_SIA_CODE_10B2: 1726 m->dc_media = IFM_10_2; 1727 break; 1728 case DC_SIA_CODE_10B5: 1729 m->dc_media = IFM_10_5; 1730 break; 1731 default: 1732 break; 1733 } 1734 1735 /* 1736 * We need to ignore CSR13, CSR14, CSR15 for SIA mode. 1737 * Things apparently already work for cards that do 1738 * supply Media Specific Data. 1739 */ 1740 if (l->dc_sia_code & DC_SIA_CODE_EXT) { 1741 m->dc_gp_len = 2; 1742 m->dc_gp_ptr = 1743 (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; 1744 } else { 1745 m->dc_gp_len = 2; 1746 m->dc_gp_ptr = 1747 (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; 1748 } 1749 1750 m->dc_next = sc->dc_mi; 1751 sc->dc_mi = m; 1752 1753 sc->dc_pmode = DC_PMODE_SIA; 1754} 1755 1756static void 1757dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) 1758{ 1759 struct dc_mediainfo *m; 1760 1761 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1762 if (l->dc_sym_code == DC_SYM_CODE_100BT) 1763 m->dc_media = IFM_100_TX; 1764 1765 if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) 1766 m->dc_media = IFM_100_TX | IFM_FDX; 1767 1768 m->dc_gp_len = 2; 1769 m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; 1770 1771 m->dc_next = sc->dc_mi; 1772 sc->dc_mi = m; 1773 1774 sc->dc_pmode = DC_PMODE_SYM; 1775} 1776 1777static void 1778dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) 1779{ 1780 struct dc_mediainfo *m; 1781 u_int8_t *p; 1782 1783 m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); 1784 /* We abuse IFM_AUTO to represent MII. */ 1785 m->dc_media = IFM_AUTO; 1786 m->dc_gp_len = l->dc_gpr_len; 1787 1788 p = (u_int8_t *)l; 1789 p += sizeof(struct dc_eblock_mii); 1790 m->dc_gp_ptr = p; 1791 p += 2 * l->dc_gpr_len; 1792 m->dc_reset_len = *p; 1793 p++; 1794 m->dc_reset_ptr = p; 1795 1796 m->dc_next = sc->dc_mi; 1797 sc->dc_mi = m; 1798} 1799 1800static void 1801dc_read_srom(struct dc_softc *sc, int bits) 1802{ 1803 int size; 1804 1805 size = 2 << bits; 1806 sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); 1807 dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); 1808} 1809 1810static void 1811dc_parse_21143_srom(struct dc_softc *sc) 1812{ 1813 struct dc_leaf_hdr *lhdr; 1814 struct dc_eblock_hdr *hdr; 1815 int have_mii, i, loff; 1816 char *ptr; 1817 1818 have_mii = 0; 1819 loff = sc->dc_srom[27]; 1820 lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); 1821 1822 ptr = (char *)lhdr; 1823 ptr += sizeof(struct dc_leaf_hdr) - 1; 1824 /* 1825 * Look if we got a MII media block. 1826 */ 1827 for (i = 0; i < lhdr->dc_mcnt; i++) { 1828 hdr = (struct dc_eblock_hdr *)ptr; 1829 if (hdr->dc_type == DC_EBLOCK_MII) 1830 have_mii++; 1831 1832 ptr += (hdr->dc_len & 0x7F); 1833 ptr++; 1834 } 1835 1836 /* 1837 * Do the same thing again. Only use SIA and SYM media 1838 * blocks if no MII media block is available. 1839 */ 1840 ptr = (char *)lhdr; 1841 ptr += sizeof(struct dc_leaf_hdr) - 1; 1842 for (i = 0; i < lhdr->dc_mcnt; i++) { 1843 hdr = (struct dc_eblock_hdr *)ptr; 1844 switch (hdr->dc_type) { 1845 case DC_EBLOCK_MII: 1846 dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); 1847 break; 1848 case DC_EBLOCK_SIA: 1849 if (! have_mii) 1850 dc_decode_leaf_sia(sc, 1851 (struct dc_eblock_sia *)hdr); 1852 break; 1853 case DC_EBLOCK_SYM: 1854 if (! have_mii) 1855 dc_decode_leaf_sym(sc, 1856 (struct dc_eblock_sym *)hdr); 1857 break; 1858 default: 1859 /* Don't care. Yet. */ 1860 break; 1861 } 1862 ptr += (hdr->dc_len & 0x7F); 1863 ptr++; 1864 } 1865} 1866 1867static void 1868dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1869{ 1870 u_int32_t *paddr; 1871 1872 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 1873 paddr = arg; 1874 *paddr = segs->ds_addr; 1875} 1876 1877/* 1878 * Attach the interface. Allocate softc structures, do ifmedia 1879 * setup and ethernet/BPF attach. 1880 */ 1881static int 1882dc_attach(device_t dev) 1883{ 1884 int tmp = 0; 1885 u_char eaddr[ETHER_ADDR_LEN]; 1886 u_int32_t command; 1887 struct dc_softc *sc; 1888 struct ifnet *ifp; 1889 u_int32_t revision; 1890 int unit, error = 0, rid, mac_offset; 1891 int i; 1892 u_int8_t *mac; 1893 1894 sc = device_get_softc(dev); 1895 unit = device_get_unit(dev); 1896 1897 mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1898 MTX_DEF | MTX_RECURSE); 1899#ifndef BURN_BRIDGES 1900 /* 1901 * Handle power management nonsense. 1902 */ 1903 dc_acpi(dev); 1904#endif 1905 /* 1906 * Map control/status registers. 1907 */ 1908 pci_enable_busmaster(dev); 1909 1910 rid = DC_RID; 1911 sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE); 1912 1913 if (sc->dc_res == NULL) { 1914 printf("dc%d: couldn't map ports/memory\n", unit); 1915 error = ENXIO; 1916 goto fail; 1917 } 1918 1919 sc->dc_btag = rman_get_bustag(sc->dc_res); 1920 sc->dc_bhandle = rman_get_bushandle(sc->dc_res); 1921 1922 /* Allocate interrupt. */ 1923 rid = 0; 1924 sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1925 RF_SHAREABLE | RF_ACTIVE); 1926 1927 if (sc->dc_irq == NULL) { 1928 printf("dc%d: couldn't map interrupt\n", unit); 1929 error = ENXIO; 1930 goto fail; 1931 } 1932 1933 /* Need this info to decide on a chip type. */ 1934 sc->dc_info = dc_devtype(dev); 1935 revision = pci_read_config(dev, DC_PCI_CFRV, 4) & 0x000000FF; 1936 1937 /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */ 1938 if (sc->dc_info->dc_did != DC_DEVICEID_82C168 && 1939 sc->dc_info->dc_did != DC_DEVICEID_X3201) 1940 dc_eeprom_width(sc); 1941 1942 switch (sc->dc_info->dc_did) { 1943 case DC_DEVICEID_21143: 1944 sc->dc_type = DC_TYPE_21143; 1945 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1946 sc->dc_flags |= DC_REDUCED_MII_POLL; 1947 /* Save EEPROM contents so we can parse them later. */ 1948 dc_read_srom(sc, sc->dc_romwidth); 1949 break; 1950 case DC_DEVICEID_DM9009: 1951 case DC_DEVICEID_DM9100: 1952 case DC_DEVICEID_DM9102: 1953 sc->dc_type = DC_TYPE_DM9102; 1954 sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS; 1955 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD; 1956 sc->dc_flags |= DC_TX_ALIGN; 1957 sc->dc_pmode = DC_PMODE_MII; 1958 /* Increase the latency timer value. */ 1959 command = pci_read_config(dev, DC_PCI_CFLT, 4); 1960 command &= 0xFFFF00FF; 1961 command |= 0x00008000; 1962 pci_write_config(dev, DC_PCI_CFLT, command, 4); 1963 break; 1964 case DC_DEVICEID_AL981: 1965 sc->dc_type = DC_TYPE_AL981; 1966 sc->dc_flags |= DC_TX_USE_TX_INTR; 1967 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1968 sc->dc_pmode = DC_PMODE_MII; 1969 dc_read_srom(sc, sc->dc_romwidth); 1970 break; 1971 case DC_DEVICEID_AN985: 1972 case DC_DEVICEID_ADM9511: 1973 case DC_DEVICEID_ADM9513: 1974 case DC_DEVICEID_FA511: 1975 case DC_DEVICEID_FE2500: 1976 case DC_DEVICEID_EN2242: 1977 case DC_DEVICEID_HAWKING_PN672TX: 1978 case DC_DEVICEID_3CSOHOB: 1979 case DC_DEVICEID_MSMN120: 1980 case DC_DEVICEID_MSMN130_FAKE: /* XXX avoid collision with PNIC*/ 1981 sc->dc_type = DC_TYPE_AN985; 1982 sc->dc_flags |= DC_64BIT_HASH; 1983 sc->dc_flags |= DC_TX_USE_TX_INTR; 1984 sc->dc_flags |= DC_TX_ADMTEK_WAR; 1985 sc->dc_pmode = DC_PMODE_MII; 1986 /* Don't read SROM for - auto-loaded on reset */ 1987 break; 1988 case DC_DEVICEID_98713: 1989 case DC_DEVICEID_98713_CP: 1990 if (revision < DC_REVISION_98713A) { 1991 sc->dc_type = DC_TYPE_98713; 1992 } 1993 if (revision >= DC_REVISION_98713A) { 1994 sc->dc_type = DC_TYPE_98713A; 1995 sc->dc_flags |= DC_21143_NWAY; 1996 } 1997 sc->dc_flags |= DC_REDUCED_MII_POLL; 1998 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 1999 break; 2000 case DC_DEVICEID_987x5: 2001 case DC_DEVICEID_EN1217: 2002 /* 2003 * Macronix MX98715AEC-C/D/E parts have only a 2004 * 128-bit hash table. We need to deal with these 2005 * in the same manner as the PNIC II so that we 2006 * get the right number of bits out of the 2007 * CRC routine. 2008 */ 2009 if (revision >= DC_REVISION_98715AEC_C && 2010 revision < DC_REVISION_98725) 2011 sc->dc_flags |= DC_128BIT_HASH; 2012 sc->dc_type = DC_TYPE_987x5; 2013 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 2014 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 2015 break; 2016 case DC_DEVICEID_98727: 2017 sc->dc_type = DC_TYPE_987x5; 2018 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; 2019 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 2020 break; 2021 case DC_DEVICEID_82C115: 2022 sc->dc_type = DC_TYPE_PNICII; 2023 sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH; 2024 sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; 2025 break; 2026 case DC_DEVICEID_82C168: 2027 sc->dc_type = DC_TYPE_PNIC; 2028 sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS; 2029 sc->dc_flags |= DC_PNIC_RX_BUG_WAR; 2030 sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); 2031 if (revision < DC_REVISION_82C169) 2032 sc->dc_pmode = DC_PMODE_SYM; 2033 break; 2034 case DC_DEVICEID_AX88140A: 2035 sc->dc_type = DC_TYPE_ASIX; 2036 sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG; 2037 sc->dc_flags |= DC_REDUCED_MII_POLL; 2038 sc->dc_pmode = DC_PMODE_MII; 2039 break; 2040 case DC_DEVICEID_X3201: 2041 sc->dc_type = DC_TYPE_XIRCOM; 2042 sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | 2043 DC_TX_ALIGN; 2044 /* 2045 * We don't actually need to coalesce, but we're doing 2046 * it to obtain a double word aligned buffer. 2047 * The DC_TX_COALESCE flag is required. 2048 */ 2049 sc->dc_pmode = DC_PMODE_MII; 2050 break; 2051 case DC_DEVICEID_RS7112: 2052 sc->dc_type = DC_TYPE_CONEXANT; 2053 sc->dc_flags |= DC_TX_INTR_ALWAYS; 2054 sc->dc_flags |= DC_REDUCED_MII_POLL; 2055 sc->dc_pmode = DC_PMODE_MII; 2056 dc_read_srom(sc, sc->dc_romwidth); 2057 break; 2058 default: 2059 printf("dc%d: unknown device: %x\n", sc->dc_unit, 2060 sc->dc_info->dc_did); 2061 break; 2062 } 2063 2064 /* Save the cache line size. */ 2065 if (DC_IS_DAVICOM(sc)) 2066 sc->dc_cachesize = 0; 2067 else 2068 sc->dc_cachesize = pci_read_config(dev, 2069 DC_PCI_CFLT, 4) & 0xFF; 2070 2071 /* Reset the adapter. */ 2072 dc_reset(sc); 2073 2074 /* Take 21143 out of snooze mode */ 2075 if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { 2076 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2077 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); 2078 pci_write_config(dev, DC_PCI_CFDD, command, 4); 2079 } 2080 2081 /* 2082 * Try to learn something about the supported media. 2083 * We know that ASIX and ADMtek and Davicom devices 2084 * will *always* be using MII media, so that's a no-brainer. 2085 * The tricky ones are the Macronix/PNIC II and the 2086 * Intel 21143. 2087 */ 2088 if (DC_IS_INTEL(sc)) 2089 dc_parse_21143_srom(sc); 2090 else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 2091 if (sc->dc_type == DC_TYPE_98713) 2092 sc->dc_pmode = DC_PMODE_MII; 2093 else 2094 sc->dc_pmode = DC_PMODE_SYM; 2095 } else if (!sc->dc_pmode) 2096 sc->dc_pmode = DC_PMODE_MII; 2097 2098 /* 2099 * Get station address from the EEPROM. 2100 */ 2101 switch(sc->dc_type) { 2102 case DC_TYPE_98713: 2103 case DC_TYPE_98713A: 2104 case DC_TYPE_987x5: 2105 case DC_TYPE_PNICII: 2106 dc_read_eeprom(sc, (caddr_t)&mac_offset, 2107 (DC_EE_NODEADDR_OFFSET / 2), 1, 0); 2108 dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); 2109 break; 2110 case DC_TYPE_PNIC: 2111 dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); 2112 break; 2113 case DC_TYPE_DM9102: 2114 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2115#ifdef __sparc64__ 2116 /* 2117 * If this is an onboard dc(4) the station address read from 2118 * the EEPROM is all zero and we have to get it from the fcode. 2119 */ 2120 for (i = 0; i < ETHER_ADDR_LEN; i++) 2121 if (eaddr[i] != 0x00) 2122 break; 2123 if (i >= ETHER_ADDR_LEN && OF_getetheraddr2(dev, eaddr) == -1) 2124 OF_getetheraddr(dev, eaddr); 2125#endif 2126 break; 2127 case DC_TYPE_21143: 2128 case DC_TYPE_ASIX: 2129 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2130 break; 2131 case DC_TYPE_AL981: 2132 case DC_TYPE_AN985: 2133 *(u_int32_t *)(&eaddr[0]) = CSR_READ_4(sc, DC_AL_PAR0); 2134 *(u_int16_t *)(&eaddr[4]) = CSR_READ_4(sc, DC_AL_PAR1); 2135 break; 2136 case DC_TYPE_CONEXANT: 2137 bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, 2138 ETHER_ADDR_LEN); 2139 break; 2140 case DC_TYPE_XIRCOM: 2141 /* The MAC comes from the CIS. */ 2142 mac = pci_get_ether(dev); 2143 if (!mac) { 2144 device_printf(dev, "No station address in CIS!\n"); 2145 error = ENXIO; 2146 goto fail; 2147 } 2148 bcopy(mac, eaddr, ETHER_ADDR_LEN); 2149 break; 2150 default: 2151 dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); 2152 break; 2153 } 2154 2155 sc->dc_unit = unit; 2156 bcopy(eaddr, &sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 2157 2158 /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ 2159 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 2160 BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct dc_list_data), 1, 2161 sizeof(struct dc_list_data), 0, NULL, NULL, &sc->dc_ltag); 2162 if (error) { 2163 printf("dc%d: failed to allocate busdma tag\n", unit); 2164 error = ENXIO; 2165 goto fail; 2166 } 2167 error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata, 2168 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->dc_lmap); 2169 if (error) { 2170 printf("dc%d: failed to allocate DMA safe memory\n", unit); 2171 error = ENXIO; 2172 goto fail; 2173 } 2174 error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata, 2175 sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr, 2176 BUS_DMA_NOWAIT); 2177 if (error) { 2178 printf("dc%d: cannot get address of the descriptors\n", unit); 2179 error = ENXIO; 2180 goto fail; 2181 } 2182 2183 /* 2184 * Allocate a busdma tag and DMA safe memory for the multicast 2185 * setup frame. 2186 */ 2187 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 2188 BUS_SPACE_MAXADDR, NULL, NULL, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, 2189 DC_SFRAME_LEN + DC_MIN_FRAMELEN, 0, NULL, NULL, &sc->dc_stag); 2190 if (error) { 2191 printf("dc%d: failed to allocate busdma tag\n", unit); 2192 error = ENXIO; 2193 goto fail; 2194 } 2195 error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf, 2196 BUS_DMA_NOWAIT, &sc->dc_smap); 2197 if (error) { 2198 printf("dc%d: failed to allocate DMA safe memory\n", unit); 2199 error = ENXIO; 2200 goto fail; 2201 } 2202 error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf, 2203 DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT); 2204 if (error) { 2205 printf("dc%d: cannot get address of the descriptors\n", unit); 2206 error = ENXIO; 2207 goto fail; 2208 } 2209 2210 /* Allocate a busdma tag for mbufs. */ 2211 error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 2212 BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * DC_TX_LIST_CNT, 2213 DC_TX_LIST_CNT, MCLBYTES, 0, NULL, NULL, &sc->dc_mtag); 2214 if (error) { 2215 printf("dc%d: failed to allocate busdma tag\n", unit); 2216 error = ENXIO; 2217 goto fail; 2218 } 2219 2220 /* Create the TX/RX busdma maps. */ 2221 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2222 error = bus_dmamap_create(sc->dc_mtag, 0, 2223 &sc->dc_cdata.dc_tx_map[i]); 2224 if (error) { 2225 printf("dc%d: failed to init TX ring\n", unit); 2226 error = ENXIO; 2227 goto fail; 2228 } 2229 } 2230 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2231 error = bus_dmamap_create(sc->dc_mtag, 0, 2232 &sc->dc_cdata.dc_rx_map[i]); 2233 if (error) { 2234 printf("dc%d: failed to init RX ring\n", unit); 2235 error = ENXIO; 2236 goto fail; 2237 } 2238 } 2239 error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap); 2240 if (error) { 2241 printf("dc%d: failed to init RX ring\n", unit); 2242 error = ENXIO; 2243 goto fail; 2244 } 2245 2246 ifp = &sc->arpcom.ac_if; 2247 ifp->if_softc = sc; 2248 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2249 /* XXX: bleah, MTU gets overwritten in ether_ifattach() */ 2250 ifp->if_mtu = ETHERMTU; 2251 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2252 ifp->if_ioctl = dc_ioctl; 2253 ifp->if_start = dc_start; 2254 ifp->if_watchdog = dc_watchdog; 2255 ifp->if_init = dc_init; 2256 ifp->if_baudrate = 10000000; 2257 ifp->if_snd.ifq_maxlen = DC_TX_LIST_CNT - 1; 2258 2259 /* 2260 * Do MII setup. If this is a 21143, check for a PHY on the 2261 * MII bus after applying any necessary fixups to twiddle the 2262 * GPIO bits. If we don't end up finding a PHY, restore the 2263 * old selection (SIA only or SIA/SYM) and attach the dcphy 2264 * driver instead. 2265 */ 2266 if (DC_IS_INTEL(sc)) { 2267 dc_apply_fixup(sc, IFM_AUTO); 2268 tmp = sc->dc_pmode; 2269 sc->dc_pmode = DC_PMODE_MII; 2270 } 2271 2272 error = mii_phy_probe(dev, &sc->dc_miibus, 2273 dc_ifmedia_upd, dc_ifmedia_sts); 2274 2275 if (error && DC_IS_INTEL(sc)) { 2276 sc->dc_pmode = tmp; 2277 if (sc->dc_pmode != DC_PMODE_SIA) 2278 sc->dc_pmode = DC_PMODE_SYM; 2279 sc->dc_flags |= DC_21143_NWAY; 2280 mii_phy_probe(dev, &sc->dc_miibus, 2281 dc_ifmedia_upd, dc_ifmedia_sts); 2282 /* 2283 * For non-MII cards, we need to have the 21143 2284 * drive the LEDs. Except there are some systems 2285 * like the NEC VersaPro NoteBook PC which have no 2286 * LEDs, and twiddling these bits has adverse effects 2287 * on them. (I.e. you suddenly can't get a link.) 2288 */ 2289 if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) 2290 sc->dc_flags |= DC_TULIP_LEDS; 2291 error = 0; 2292 } 2293 2294 if (error) { 2295 printf("dc%d: MII without any PHY!\n", sc->dc_unit); 2296 goto fail; 2297 } 2298 2299 if (DC_IS_XIRCOM(sc)) { 2300 /* 2301 * setup General Purpose Port mode and data so the tulip 2302 * can talk to the MII. 2303 */ 2304 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 2305 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2306 DELAY(10); 2307 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 2308 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 2309 DELAY(10); 2310 } 2311 2312 if (DC_IS_ADMTEK(sc)) { 2313 /* 2314 * Set automatic TX underrun recovery for the ADMtek chips 2315 */ 2316 DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); 2317 } 2318 2319 /* 2320 * Tell the upper layer(s) we support long frames. 2321 */ 2322 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); 2323 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2324#ifdef DEVICE_POLLING 2325 ifp->if_capabilities |= IFCAP_POLLING; 2326#endif 2327 ifp->if_capenable = ifp->if_capabilities; 2328 2329 callout_init(&sc->dc_stat_ch, IS_MPSAFE ? CALLOUT_MPSAFE : 0); 2330 2331#ifdef SRM_MEDIA 2332 sc->dc_srm_media = 0; 2333 2334 /* Remember the SRM console media setting */ 2335 if (DC_IS_INTEL(sc)) { 2336 command = pci_read_config(dev, DC_PCI_CFDD, 4); 2337 command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); 2338 switch ((command >> 8) & 0xff) { 2339 case 3: 2340 sc->dc_srm_media = IFM_10_T; 2341 break; 2342 case 4: 2343 sc->dc_srm_media = IFM_10_T | IFM_FDX; 2344 break; 2345 case 5: 2346 sc->dc_srm_media = IFM_100_TX; 2347 break; 2348 case 6: 2349 sc->dc_srm_media = IFM_100_TX | IFM_FDX; 2350 break; 2351 } 2352 if (sc->dc_srm_media) 2353 sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER; 2354 } 2355#endif 2356 2357 /* 2358 * Call MI attach routine. 2359 */ 2360 ether_ifattach(ifp, eaddr); 2361 2362 /* Hook interrupt last to avoid having to lock softc */ 2363 error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | 2364 (IS_MPSAFE ? INTR_MPSAFE : 0), 2365 dc_intr, sc, &sc->dc_intrhand); 2366 2367 if (error) { 2368 printf("dc%d: couldn't set up irq\n", unit); 2369 ether_ifdetach(ifp); 2370 goto fail; 2371 } 2372 2373fail: 2374 if (error) 2375 dc_detach(dev); 2376 return (error); 2377} 2378 2379/* 2380 * Shutdown hardware and free up resources. This can be called any 2381 * time after the mutex has been initialized. It is called in both 2382 * the error case in attach and the normal detach case so it needs 2383 * to be careful about only freeing resources that have actually been 2384 * allocated. 2385 */ 2386static int 2387dc_detach(device_t dev) 2388{ 2389 struct dc_softc *sc; 2390 struct ifnet *ifp; 2391 struct dc_mediainfo *m; 2392 int i; 2393 2394 sc = device_get_softc(dev); 2395 KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized")); 2396 DC_LOCK(sc); 2397 2398 ifp = &sc->arpcom.ac_if; 2399 2400 /* These should only be active if attach succeeded */ 2401 if (device_is_attached(dev)) { 2402 dc_stop(sc); 2403 ether_ifdetach(ifp); 2404 } 2405 if (sc->dc_miibus) 2406 device_delete_child(dev, sc->dc_miibus); 2407 bus_generic_detach(dev); 2408 2409 if (sc->dc_intrhand) 2410 bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); 2411 if (sc->dc_irq) 2412 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); 2413 if (sc->dc_res) 2414 bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); 2415 2416 if (sc->dc_cdata.dc_sbuf != NULL) 2417 bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap); 2418 if (sc->dc_ldata != NULL) 2419 bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap); 2420 for (i = 0; i < DC_TX_LIST_CNT; i++) 2421 bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_tx_map[i]); 2422 for (i = 0; i < DC_RX_LIST_CNT; i++) 2423 bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); 2424 bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap); 2425 if (sc->dc_stag) 2426 bus_dma_tag_destroy(sc->dc_stag); 2427 if (sc->dc_mtag) 2428 bus_dma_tag_destroy(sc->dc_mtag); 2429 if (sc->dc_ltag) 2430 bus_dma_tag_destroy(sc->dc_ltag); 2431 2432 free(sc->dc_pnic_rx_buf, M_DEVBUF); 2433 2434 while (sc->dc_mi != NULL) { 2435 m = sc->dc_mi->dc_next; 2436 free(sc->dc_mi, M_DEVBUF); 2437 sc->dc_mi = m; 2438 } 2439 free(sc->dc_srom, M_DEVBUF); 2440 2441 DC_UNLOCK(sc); 2442 mtx_destroy(&sc->dc_mtx); 2443 2444 return (0); 2445} 2446 2447/* 2448 * Initialize the transmit descriptors. 2449 */ 2450static int 2451dc_list_tx_init(struct dc_softc *sc) 2452{ 2453 struct dc_chain_data *cd; 2454 struct dc_list_data *ld; 2455 int i, nexti; 2456 2457 cd = &sc->dc_cdata; 2458 ld = sc->dc_ldata; 2459 for (i = 0; i < DC_TX_LIST_CNT; i++) { 2460 if (i == DC_TX_LIST_CNT - 1) 2461 nexti = 0; 2462 else 2463 nexti = i + 1; 2464 ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti)); 2465 cd->dc_tx_chain[i] = NULL; 2466 ld->dc_tx_list[i].dc_data = 0; 2467 ld->dc_tx_list[i].dc_ctl = 0; 2468 } 2469 2470 cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; 2471 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2472 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2473 return (0); 2474} 2475 2476 2477/* 2478 * Initialize the RX descriptors and allocate mbufs for them. Note that 2479 * we arrange the descriptors in a closed ring, so that the last descriptor 2480 * points back to the first. 2481 */ 2482static int 2483dc_list_rx_init(struct dc_softc *sc) 2484{ 2485 struct dc_chain_data *cd; 2486 struct dc_list_data *ld; 2487 int i, nexti; 2488 2489 cd = &sc->dc_cdata; 2490 ld = sc->dc_ldata; 2491 2492 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2493 if (dc_newbuf(sc, i, 1) != 0) 2494 return (ENOBUFS); 2495 if (i == DC_RX_LIST_CNT - 1) 2496 nexti = 0; 2497 else 2498 nexti = i + 1; 2499 ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti)); 2500 } 2501 2502 cd->dc_rx_prod = 0; 2503 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2504 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2505 return (0); 2506} 2507 2508static void 2509dc_dma_map_rxbuf(arg, segs, nseg, mapsize, error) 2510 void *arg; 2511 bus_dma_segment_t *segs; 2512 int nseg; 2513 bus_size_t mapsize; 2514 int error; 2515{ 2516 struct dc_softc *sc; 2517 struct dc_desc *c; 2518 2519 sc = arg; 2520 c = &sc->dc_ldata->dc_rx_list[sc->dc_cdata.dc_rx_cur]; 2521 if (error) { 2522 sc->dc_cdata.dc_rx_err = error; 2523 return; 2524 } 2525 2526 KASSERT(nseg == 1, ("wrong number of segments, should be 1")); 2527 sc->dc_cdata.dc_rx_err = 0; 2528 c->dc_data = htole32(segs->ds_addr); 2529} 2530 2531/* 2532 * Initialize an RX descriptor and attach an MBUF cluster. 2533 */ 2534static int 2535dc_newbuf(struct dc_softc *sc, int i, int alloc) 2536{ 2537 struct mbuf *m_new; 2538 bus_dmamap_t tmp; 2539 int error; 2540 2541 if (alloc) { 2542 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2543 if (m_new == NULL) 2544 return (ENOBUFS); 2545 } else { 2546 m_new = sc->dc_cdata.dc_rx_chain[i]; 2547 m_new->m_data = m_new->m_ext.ext_buf; 2548 } 2549 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 2550 m_adj(m_new, sizeof(u_int64_t)); 2551 2552 /* 2553 * If this is a PNIC chip, zero the buffer. This is part 2554 * of the workaround for the receive bug in the 82c168 and 2555 * 82c169 chips. 2556 */ 2557 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) 2558 bzero(mtod(m_new, char *), m_new->m_len); 2559 2560 /* No need to remap the mbuf if we're reusing it. */ 2561 if (alloc) { 2562 sc->dc_cdata.dc_rx_cur = i; 2563 error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_sparemap, 2564 m_new, dc_dma_map_rxbuf, sc, 0); 2565 if (error) { 2566 m_freem(m_new); 2567 return (error); 2568 } 2569 if (sc->dc_cdata.dc_rx_err != 0) { 2570 m_freem(m_new); 2571 return (sc->dc_cdata.dc_rx_err); 2572 } 2573 bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); 2574 tmp = sc->dc_cdata.dc_rx_map[i]; 2575 sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap; 2576 sc->dc_sparemap = tmp; 2577 sc->dc_cdata.dc_rx_chain[i] = m_new; 2578 } 2579 2580 sc->dc_ldata->dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN); 2581 sc->dc_ldata->dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN); 2582 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], 2583 BUS_DMASYNC_PREREAD); 2584 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 2585 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 2586 return (0); 2587} 2588 2589/* 2590 * Grrrrr. 2591 * The PNIC chip has a terrible bug in it that manifests itself during 2592 * periods of heavy activity. The exact mode of failure if difficult to 2593 * pinpoint: sometimes it only happens in promiscuous mode, sometimes it 2594 * will happen on slow machines. The bug is that sometimes instead of 2595 * uploading one complete frame during reception, it uploads what looks 2596 * like the entire contents of its FIFO memory. The frame we want is at 2597 * the end of the whole mess, but we never know exactly how much data has 2598 * been uploaded, so salvaging the frame is hard. 2599 * 2600 * There is only one way to do it reliably, and it's disgusting. 2601 * Here's what we know: 2602 * 2603 * - We know there will always be somewhere between one and three extra 2604 * descriptors uploaded. 2605 * 2606 * - We know the desired received frame will always be at the end of the 2607 * total data upload. 2608 * 2609 * - We know the size of the desired received frame because it will be 2610 * provided in the length field of the status word in the last descriptor. 2611 * 2612 * Here's what we do: 2613 * 2614 * - When we allocate buffers for the receive ring, we bzero() them. 2615 * This means that we know that the buffer contents should be all 2616 * zeros, except for data uploaded by the chip. 2617 * 2618 * - We also force the PNIC chip to upload frames that include the 2619 * ethernet CRC at the end. 2620 * 2621 * - We gather all of the bogus frame data into a single buffer. 2622 * 2623 * - We then position a pointer at the end of this buffer and scan 2624 * backwards until we encounter the first non-zero byte of data. 2625 * This is the end of the received frame. We know we will encounter 2626 * some data at the end of the frame because the CRC will always be 2627 * there, so even if the sender transmits a packet of all zeros, 2628 * we won't be fooled. 2629 * 2630 * - We know the size of the actual received frame, so we subtract 2631 * that value from the current pointer location. This brings us 2632 * to the start of the actual received packet. 2633 * 2634 * - We copy this into an mbuf and pass it on, along with the actual 2635 * frame length. 2636 * 2637 * The performance hit is tremendous, but it beats dropping frames all 2638 * the time. 2639 */ 2640 2641#define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG) 2642static void 2643dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) 2644{ 2645 struct dc_desc *cur_rx; 2646 struct dc_desc *c = NULL; 2647 struct mbuf *m = NULL; 2648 unsigned char *ptr; 2649 int i, total_len; 2650 u_int32_t rxstat = 0; 2651 2652 i = sc->dc_pnic_rx_bug_save; 2653 cur_rx = &sc->dc_ldata->dc_rx_list[idx]; 2654 ptr = sc->dc_pnic_rx_buf; 2655 bzero(ptr, DC_RXLEN * 5); 2656 2657 /* Copy all the bytes from the bogus buffers. */ 2658 while (1) { 2659 c = &sc->dc_ldata->dc_rx_list[i]; 2660 rxstat = le32toh(c->dc_status); 2661 m = sc->dc_cdata.dc_rx_chain[i]; 2662 bcopy(mtod(m, char *), ptr, DC_RXLEN); 2663 ptr += DC_RXLEN; 2664 /* If this is the last buffer, break out. */ 2665 if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) 2666 break; 2667 dc_newbuf(sc, i, 0); 2668 DC_INC(i, DC_RX_LIST_CNT); 2669 } 2670 2671 /* Find the length of the actual receive frame. */ 2672 total_len = DC_RXBYTES(rxstat); 2673 2674 /* Scan backwards until we hit a non-zero byte. */ 2675 while (*ptr == 0x00) 2676 ptr--; 2677 2678 /* Round off. */ 2679 if ((uintptr_t)(ptr) & 0x3) 2680 ptr -= 1; 2681 2682 /* Now find the start of the frame. */ 2683 ptr -= total_len; 2684 if (ptr < sc->dc_pnic_rx_buf) 2685 ptr = sc->dc_pnic_rx_buf; 2686 2687 /* 2688 * Now copy the salvaged frame to the last mbuf and fake up 2689 * the status word to make it look like a successful 2690 * frame reception. 2691 */ 2692 dc_newbuf(sc, i, 0); 2693 bcopy(ptr, mtod(m, char *), total_len); 2694 cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); 2695} 2696 2697/* 2698 * This routine searches the RX ring for dirty descriptors in the 2699 * event that the rxeof routine falls out of sync with the chip's 2700 * current descriptor pointer. This may happen sometimes as a result 2701 * of a "no RX buffer available" condition that happens when the chip 2702 * consumes all of the RX buffers before the driver has a chance to 2703 * process the RX ring. This routine may need to be called more than 2704 * once to bring the driver back in sync with the chip, however we 2705 * should still be getting RX DONE interrupts to drive the search 2706 * for new packets in the RX ring, so we should catch up eventually. 2707 */ 2708static int 2709dc_rx_resync(struct dc_softc *sc) 2710{ 2711 struct dc_desc *cur_rx; 2712 int i, pos; 2713 2714 pos = sc->dc_cdata.dc_rx_prod; 2715 2716 for (i = 0; i < DC_RX_LIST_CNT; i++) { 2717 cur_rx = &sc->dc_ldata->dc_rx_list[pos]; 2718 if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN)) 2719 break; 2720 DC_INC(pos, DC_RX_LIST_CNT); 2721 } 2722 2723 /* If the ring really is empty, then just return. */ 2724 if (i == DC_RX_LIST_CNT) 2725 return (0); 2726 2727 /* We've fallen behing the chip: catch it. */ 2728 sc->dc_cdata.dc_rx_prod = pos; 2729 2730 return (EAGAIN); 2731} 2732 2733/* 2734 * A frame has been uploaded: pass the resulting mbuf chain up to 2735 * the higher level protocols. 2736 */ 2737static void 2738dc_rxeof(struct dc_softc *sc) 2739{ 2740 struct mbuf *m; 2741 struct ifnet *ifp; 2742 struct dc_desc *cur_rx; 2743 int i, total_len = 0; 2744 u_int32_t rxstat; 2745 2746 DC_LOCK_ASSERT(sc); 2747 2748 ifp = &sc->arpcom.ac_if; 2749 i = sc->dc_cdata.dc_rx_prod; 2750 2751 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); 2752 while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) & 2753 DC_RXSTAT_OWN)) { 2754#ifdef DEVICE_POLLING 2755 if (ifp->if_flags & IFF_POLLING) { 2756 if (sc->rxcycles <= 0) 2757 break; 2758 sc->rxcycles--; 2759 } 2760#endif 2761 cur_rx = &sc->dc_ldata->dc_rx_list[i]; 2762 rxstat = le32toh(cur_rx->dc_status); 2763 m = sc->dc_cdata.dc_rx_chain[i]; 2764 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], 2765 BUS_DMASYNC_POSTREAD); 2766 total_len = DC_RXBYTES(rxstat); 2767 2768 if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { 2769 if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { 2770 if (rxstat & DC_RXSTAT_FIRSTFRAG) 2771 sc->dc_pnic_rx_bug_save = i; 2772 if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { 2773 DC_INC(i, DC_RX_LIST_CNT); 2774 continue; 2775 } 2776 dc_pnic_rx_bug_war(sc, i); 2777 rxstat = le32toh(cur_rx->dc_status); 2778 total_len = DC_RXBYTES(rxstat); 2779 } 2780 } 2781 2782 /* 2783 * If an error occurs, update stats, clear the 2784 * status word and leave the mbuf cluster in place: 2785 * it should simply get re-used next time this descriptor 2786 * comes up in the ring. However, don't report long 2787 * frames as errors since they could be vlans. 2788 */ 2789 if ((rxstat & DC_RXSTAT_RXERR)) { 2790 if (!(rxstat & DC_RXSTAT_GIANT) || 2791 (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | 2792 DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | 2793 DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { 2794 ifp->if_ierrors++; 2795 if (rxstat & DC_RXSTAT_COLLSEEN) 2796 ifp->if_collisions++; 2797 dc_newbuf(sc, i, 0); 2798 if (rxstat & DC_RXSTAT_CRCERR) { 2799 DC_INC(i, DC_RX_LIST_CNT); 2800 continue; 2801 } else { 2802 dc_init(sc); 2803 return; 2804 } 2805 } 2806 } 2807 2808 /* No errors; receive the packet. */ 2809 total_len -= ETHER_CRC_LEN; 2810#ifdef __i386__ 2811 /* 2812 * On the x86 we do not have alignment problems, so try to 2813 * allocate a new buffer for the receive ring, and pass up 2814 * the one where the packet is already, saving the expensive 2815 * copy done in m_devget(). 2816 * If we are on an architecture with alignment problems, or 2817 * if the allocation fails, then use m_devget and leave the 2818 * existing buffer in the receive ring. 2819 */ 2820 if (dc_quick && dc_newbuf(sc, i, 1) == 0) { 2821 m->m_pkthdr.rcvif = ifp; 2822 m->m_pkthdr.len = m->m_len = total_len; 2823 DC_INC(i, DC_RX_LIST_CNT); 2824 } else 2825#endif 2826 { 2827 struct mbuf *m0; 2828 2829 m0 = m_devget(mtod(m, char *), total_len, 2830 ETHER_ALIGN, ifp, NULL); 2831 dc_newbuf(sc, i, 0); 2832 DC_INC(i, DC_RX_LIST_CNT); 2833 if (m0 == NULL) { 2834 ifp->if_ierrors++; 2835 continue; 2836 } 2837 m = m0; 2838 } 2839 2840 ifp->if_ipackets++; 2841 DC_UNLOCK(sc); 2842 (*ifp->if_input)(ifp, m); 2843 DC_LOCK(sc); 2844 } 2845 2846 sc->dc_cdata.dc_rx_prod = i; 2847} 2848 2849/* 2850 * A frame was downloaded to the chip. It's safe for us to clean up 2851 * the list buffers. 2852 */ 2853 2854static void 2855dc_txeof(struct dc_softc *sc) 2856{ 2857 struct dc_desc *cur_tx = NULL; 2858 struct ifnet *ifp; 2859 int idx; 2860 u_int32_t ctl, txstat; 2861 2862 ifp = &sc->arpcom.ac_if; 2863 2864 /* 2865 * Go through our tx list and free mbufs for those 2866 * frames that have been transmitted. 2867 */ 2868 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); 2869 idx = sc->dc_cdata.dc_tx_cons; 2870 while (idx != sc->dc_cdata.dc_tx_prod) { 2871 2872 cur_tx = &sc->dc_ldata->dc_tx_list[idx]; 2873 txstat = le32toh(cur_tx->dc_status); 2874 ctl = le32toh(cur_tx->dc_ctl); 2875 2876 if (txstat & DC_TXSTAT_OWN) 2877 break; 2878 2879 if (!(ctl & DC_TXCTL_LASTFRAG) || ctl & DC_TXCTL_SETUP) { 2880 if (ctl & DC_TXCTL_SETUP) { 2881 /* 2882 * Yes, the PNIC is so brain damaged 2883 * that it will sometimes generate a TX 2884 * underrun error while DMAing the RX 2885 * filter setup frame. If we detect this, 2886 * we have to send the setup frame again, 2887 * or else the filter won't be programmed 2888 * correctly. 2889 */ 2890 if (DC_IS_PNIC(sc)) { 2891 if (txstat & DC_TXSTAT_ERRSUM) 2892 dc_setfilt(sc); 2893 } 2894 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2895 } 2896 sc->dc_cdata.dc_tx_cnt--; 2897 DC_INC(idx, DC_TX_LIST_CNT); 2898 continue; 2899 } 2900 2901 if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { 2902 /* 2903 * XXX: Why does my Xircom taunt me so? 2904 * For some reason it likes setting the CARRLOST flag 2905 * even when the carrier is there. wtf?!? 2906 * Who knows, but Conexant chips have the 2907 * same problem. Maybe they took lessons 2908 * from Xircom. 2909 */ 2910 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2911 sc->dc_pmode == DC_PMODE_MII && 2912 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | 2913 DC_TXSTAT_NOCARRIER))) 2914 txstat &= ~DC_TXSTAT_ERRSUM; 2915 } else { 2916 if (/*sc->dc_type == DC_TYPE_21143 &&*/ 2917 sc->dc_pmode == DC_PMODE_MII && 2918 ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | 2919 DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST))) 2920 txstat &= ~DC_TXSTAT_ERRSUM; 2921 } 2922 2923 if (txstat & DC_TXSTAT_ERRSUM) { 2924 ifp->if_oerrors++; 2925 if (txstat & DC_TXSTAT_EXCESSCOLL) 2926 ifp->if_collisions++; 2927 if (txstat & DC_TXSTAT_LATECOLL) 2928 ifp->if_collisions++; 2929 if (!(txstat & DC_TXSTAT_UNDERRUN)) { 2930 dc_init(sc); 2931 return; 2932 } 2933 } 2934 2935 ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; 2936 2937 ifp->if_opackets++; 2938 if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { 2939 bus_dmamap_sync(sc->dc_mtag, 2940 sc->dc_cdata.dc_tx_map[idx], 2941 BUS_DMASYNC_POSTWRITE); 2942 bus_dmamap_unload(sc->dc_mtag, 2943 sc->dc_cdata.dc_tx_map[idx]); 2944 m_freem(sc->dc_cdata.dc_tx_chain[idx]); 2945 sc->dc_cdata.dc_tx_chain[idx] = NULL; 2946 } 2947 2948 sc->dc_cdata.dc_tx_cnt--; 2949 DC_INC(idx, DC_TX_LIST_CNT); 2950 } 2951 2952 if (idx != sc->dc_cdata.dc_tx_cons) { 2953 /* Some buffers have been freed. */ 2954 sc->dc_cdata.dc_tx_cons = idx; 2955 ifp->if_flags &= ~IFF_OACTIVE; 2956 } 2957 ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5; 2958} 2959 2960static void 2961dc_tick(void *xsc) 2962{ 2963 struct dc_softc *sc; 2964 struct mii_data *mii; 2965 struct ifnet *ifp; 2966 u_int32_t r; 2967 2968 sc = xsc; 2969 DC_LOCK(sc); 2970 ifp = &sc->arpcom.ac_if; 2971 mii = device_get_softc(sc->dc_miibus); 2972 2973 if (sc->dc_flags & DC_REDUCED_MII_POLL) { 2974 if (sc->dc_flags & DC_21143_NWAY) { 2975 r = CSR_READ_4(sc, DC_10BTSTAT); 2976 if (IFM_SUBTYPE(mii->mii_media_active) == 2977 IFM_100_TX && (r & DC_TSTAT_LS100)) { 2978 sc->dc_link = 0; 2979 mii_mediachg(mii); 2980 } 2981 if (IFM_SUBTYPE(mii->mii_media_active) == 2982 IFM_10_T && (r & DC_TSTAT_LS10)) { 2983 sc->dc_link = 0; 2984 mii_mediachg(mii); 2985 } 2986 if (sc->dc_link == 0) 2987 mii_tick(mii); 2988 } else { 2989 r = CSR_READ_4(sc, DC_ISR); 2990 if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && 2991 sc->dc_cdata.dc_tx_cnt == 0) { 2992 mii_tick(mii); 2993 if (!(mii->mii_media_status & IFM_ACTIVE)) 2994 sc->dc_link = 0; 2995 } 2996 } 2997 } else 2998 mii_tick(mii); 2999 3000 /* 3001 * When the init routine completes, we expect to be able to send 3002 * packets right away, and in fact the network code will send a 3003 * gratuitous ARP the moment the init routine marks the interface 3004 * as running. However, even though the MAC may have been initialized, 3005 * there may be a delay of a few seconds before the PHY completes 3006 * autonegotiation and the link is brought up. Any transmissions 3007 * made during that delay will be lost. Dealing with this is tricky: 3008 * we can't just pause in the init routine while waiting for the 3009 * PHY to come ready since that would bring the whole system to 3010 * a screeching halt for several seconds. 3011 * 3012 * What we do here is prevent the TX start routine from sending 3013 * any packets until a link has been established. After the 3014 * interface has been initialized, the tick routine will poll 3015 * the state of the PHY until the IFM_ACTIVE flag is set. Until 3016 * that time, packets will stay in the send queue, and once the 3017 * link comes up, they will be flushed out to the wire. 3018 */ 3019 if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && 3020 IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { 3021 sc->dc_link++; 3022 if (ifp->if_snd.ifq_head != NULL) 3023 dc_start(ifp); 3024 } 3025 3026 if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) 3027 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 3028 else 3029 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 3030 3031 DC_UNLOCK(sc); 3032} 3033 3034/* 3035 * A transmit underrun has occurred. Back off the transmit threshold, 3036 * or switch to store and forward mode if we have to. 3037 */ 3038static void 3039dc_tx_underrun(struct dc_softc *sc) 3040{ 3041 u_int32_t isr; 3042 int i; 3043 3044 if (DC_IS_DAVICOM(sc)) 3045 dc_init(sc); 3046 3047 if (DC_IS_INTEL(sc)) { 3048 /* 3049 * The real 21143 requires that the transmitter be idle 3050 * in order to change the transmit threshold or store 3051 * and forward state. 3052 */ 3053 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3054 3055 for (i = 0; i < DC_TIMEOUT; i++) { 3056 isr = CSR_READ_4(sc, DC_ISR); 3057 if (isr & DC_ISR_TX_IDLE) 3058 break; 3059 DELAY(10); 3060 } 3061 if (i == DC_TIMEOUT) { 3062 printf("dc%d: failed to force tx to idle state\n", 3063 sc->dc_unit); 3064 dc_init(sc); 3065 } 3066 } 3067 3068 printf("dc%d: TX underrun -- ", sc->dc_unit); 3069 sc->dc_txthresh += DC_TXTHRESH_INC; 3070 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3071 printf("using store and forward mode\n"); 3072 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3073 } else { 3074 printf("increasing TX threshold\n"); 3075 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3076 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3077 } 3078 3079 if (DC_IS_INTEL(sc)) 3080 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3081} 3082 3083#ifdef DEVICE_POLLING 3084static poll_handler_t dc_poll; 3085 3086static void 3087dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) 3088{ 3089 struct dc_softc *sc = ifp->if_softc; 3090 3091 if (!(ifp->if_capenable & IFCAP_POLLING)) { 3092 ether_poll_deregister(ifp); 3093 cmd = POLL_DEREGISTER; 3094 } 3095 if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ 3096 /* Re-enable interrupts. */ 3097 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3098 return; 3099 } 3100 DC_LOCK(sc); 3101 sc->rxcycles = count; 3102 dc_rxeof(sc); 3103 dc_txeof(sc); 3104 if (ifp->if_snd.ifq_head != NULL && !(ifp->if_flags & IFF_OACTIVE)) 3105 dc_start(ifp); 3106 3107 if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ 3108 u_int32_t status; 3109 3110 status = CSR_READ_4(sc, DC_ISR); 3111 status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF | 3112 DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN | 3113 DC_ISR_BUS_ERR); 3114 if (!status) { 3115 DC_UNLOCK(sc); 3116 return; 3117 } 3118 /* ack what we have */ 3119 CSR_WRITE_4(sc, DC_ISR, status); 3120 3121 if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) { 3122 u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); 3123 ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); 3124 3125 if (dc_rx_resync(sc)) 3126 dc_rxeof(sc); 3127 } 3128 /* restart transmit unit if necessary */ 3129 if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) 3130 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3131 3132 if (status & DC_ISR_TX_UNDERRUN) 3133 dc_tx_underrun(sc); 3134 3135 if (status & DC_ISR_BUS_ERR) { 3136 printf("dc_poll: dc%d bus error\n", sc->dc_unit); 3137 dc_reset(sc); 3138 dc_init(sc); 3139 } 3140 } 3141 DC_UNLOCK(sc); 3142} 3143#endif /* DEVICE_POLLING */ 3144 3145static void 3146dc_intr(void *arg) 3147{ 3148 struct dc_softc *sc; 3149 struct ifnet *ifp; 3150 u_int32_t status; 3151 3152 sc = arg; 3153 3154 if (sc->suspended) 3155 return; 3156 3157 if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) 3158 return; 3159 3160 DC_LOCK(sc); 3161 ifp = &sc->arpcom.ac_if; 3162#ifdef DEVICE_POLLING 3163 if (ifp->if_flags & IFF_POLLING) 3164 goto done; 3165 if ((ifp->if_capenable & IFCAP_POLLING) && 3166 ether_poll_register(dc_poll, ifp)) { /* ok, disable interrupts */ 3167 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3168 goto done; 3169 } 3170#endif 3171 3172 /* Suppress unwanted interrupts */ 3173 if (!(ifp->if_flags & IFF_UP)) { 3174 if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) 3175 dc_stop(sc); 3176 DC_UNLOCK(sc); 3177 return; 3178 } 3179 3180 /* Disable interrupts. */ 3181 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3182 3183 while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) 3184 && status != 0xFFFFFFFF) { 3185 3186 CSR_WRITE_4(sc, DC_ISR, status); 3187 3188 if (status & DC_ISR_RX_OK) { 3189 int curpkts; 3190 curpkts = ifp->if_ipackets; 3191 dc_rxeof(sc); 3192 if (curpkts == ifp->if_ipackets) { 3193 while (dc_rx_resync(sc)) 3194 dc_rxeof(sc); 3195 } 3196 } 3197 3198 if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF)) 3199 dc_txeof(sc); 3200 3201 if (status & DC_ISR_TX_IDLE) { 3202 dc_txeof(sc); 3203 if (sc->dc_cdata.dc_tx_cnt) { 3204 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3205 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3206 } 3207 } 3208 3209 if (status & DC_ISR_TX_UNDERRUN) 3210 dc_tx_underrun(sc); 3211 3212 if ((status & DC_ISR_RX_WATDOGTIMEO) 3213 || (status & DC_ISR_RX_NOBUF)) { 3214 int curpkts; 3215 curpkts = ifp->if_ipackets; 3216 dc_rxeof(sc); 3217 if (curpkts == ifp->if_ipackets) { 3218 while (dc_rx_resync(sc)) 3219 dc_rxeof(sc); 3220 } 3221 } 3222 3223 if (status & DC_ISR_BUS_ERR) { 3224 dc_reset(sc); 3225 dc_init(sc); 3226 } 3227 } 3228 3229 /* Re-enable interrupts. */ 3230 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3231 3232 if (ifp->if_snd.ifq_head != NULL) 3233 dc_start(ifp); 3234 3235#ifdef DEVICE_POLLING 3236done: 3237#endif 3238 3239 DC_UNLOCK(sc); 3240} 3241 3242static void 3243dc_dma_map_txbuf(arg, segs, nseg, mapsize, error) 3244 void *arg; 3245 bus_dma_segment_t *segs; 3246 int nseg; 3247 bus_size_t mapsize; 3248 int error; 3249{ 3250 struct dc_softc *sc; 3251 struct dc_desc *f; 3252 int cur, first, frag, i; 3253 3254 sc = arg; 3255 if (error) { 3256 sc->dc_cdata.dc_tx_err = error; 3257 return; 3258 } 3259 3260 first = cur = frag = sc->dc_cdata.dc_tx_prod; 3261 for (i = 0; i < nseg; i++) { 3262 if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && 3263 (frag == (DC_TX_LIST_CNT - 1)) && 3264 (first != sc->dc_cdata.dc_tx_first)) { 3265 bus_dmamap_unload(sc->dc_mtag, 3266 sc->dc_cdata.dc_tx_map[first]); 3267 sc->dc_cdata.dc_tx_err = ENOBUFS; 3268 return; 3269 } 3270 3271 f = &sc->dc_ldata->dc_tx_list[frag]; 3272 f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len); 3273 if (i == 0) { 3274 f->dc_status = 0; 3275 f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); 3276 } else 3277 f->dc_status = htole32(DC_TXSTAT_OWN); 3278 f->dc_data = htole32(segs[i].ds_addr); 3279 cur = frag; 3280 DC_INC(frag, DC_TX_LIST_CNT); 3281 } 3282 3283 sc->dc_cdata.dc_tx_err = 0; 3284 sc->dc_cdata.dc_tx_prod = frag; 3285 sc->dc_cdata.dc_tx_cnt += nseg; 3286 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); 3287 sc->dc_cdata.dc_tx_chain[cur] = sc->dc_cdata.dc_tx_mapping; 3288 if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) 3289 sc->dc_ldata->dc_tx_list[first].dc_ctl |= 3290 htole32(DC_TXCTL_FINT); 3291 if (sc->dc_flags & DC_TX_INTR_ALWAYS) 3292 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); 3293 if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) 3294 sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); 3295 sc->dc_ldata->dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN); 3296} 3297 3298/* 3299 * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data 3300 * pointers to the fragment pointers. 3301 */ 3302static int 3303dc_encap(struct dc_softc *sc, struct mbuf **m_head) 3304{ 3305 struct mbuf *m; 3306 int error, idx, chainlen = 0; 3307 3308 /* 3309 * If there's no way we can send any packets, return now. 3310 */ 3311 if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt < 6) 3312 return (ENOBUFS); 3313 3314 /* 3315 * Count the number of frags in this chain to see if 3316 * we need to m_defrag. Since the descriptor list is shared 3317 * by all packets, we'll m_defrag long chains so that they 3318 * do not use up the entire list, even if they would fit. 3319 */ 3320 for (m = *m_head; m != NULL; m = m->m_next) 3321 chainlen++; 3322 3323 if ((chainlen > DC_TX_LIST_CNT / 4) || 3324 ((DC_TX_LIST_CNT - (chainlen + sc->dc_cdata.dc_tx_cnt)) < 6)) { 3325 m = m_defrag(*m_head, M_DONTWAIT); 3326 if (m == NULL) 3327 return (ENOBUFS); 3328 *m_head = m; 3329 } 3330 3331 /* 3332 * Start packing the mbufs in this chain into 3333 * the fragment pointers. Stop when we run out 3334 * of fragments or hit the end of the mbuf chain. 3335 */ 3336 idx = sc->dc_cdata.dc_tx_prod; 3337 sc->dc_cdata.dc_tx_mapping = *m_head; 3338 error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], 3339 *m_head, dc_dma_map_txbuf, sc, 0); 3340 if (error) 3341 return (error); 3342 if (sc->dc_cdata.dc_tx_err != 0) 3343 return (sc->dc_cdata.dc_tx_err); 3344 bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], 3345 BUS_DMASYNC_PREWRITE); 3346 bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, 3347 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); 3348 return (0); 3349} 3350 3351/* 3352 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 3353 * to the mbuf data regions directly in the transmit lists. We also save a 3354 * copy of the pointers since the transmit list fragment pointers are 3355 * physical addresses. 3356 */ 3357 3358static void 3359dc_start(struct ifnet *ifp) 3360{ 3361 struct dc_softc *sc; 3362 struct mbuf *m_head = NULL, *m; 3363 int idx; 3364 3365 sc = ifp->if_softc; 3366 3367 DC_LOCK(sc); 3368 3369 if (!sc->dc_link && ifp->if_snd.ifq_len < 10) { 3370 DC_UNLOCK(sc); 3371 return; 3372 } 3373 3374 if (ifp->if_flags & IFF_OACTIVE) { 3375 DC_UNLOCK(sc); 3376 return; 3377 } 3378 3379 idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod; 3380 3381 while (sc->dc_cdata.dc_tx_chain[idx] == NULL) { 3382 IF_DEQUEUE(&ifp->if_snd, m_head); 3383 if (m_head == NULL) 3384 break; 3385 3386 if (sc->dc_flags & DC_TX_COALESCE && 3387 (m_head->m_next != NULL || 3388 sc->dc_flags & DC_TX_ALIGN)) { 3389 m = m_defrag(m_head, M_DONTWAIT); 3390 if (m == NULL) { 3391 IF_PREPEND(&ifp->if_snd, m_head); 3392 ifp->if_flags |= IFF_OACTIVE; 3393 break; 3394 } else { 3395 m_head = m; 3396 } 3397 } 3398 3399 if (dc_encap(sc, &m_head)) { 3400 IF_PREPEND(&ifp->if_snd, m_head); 3401 ifp->if_flags |= IFF_OACTIVE; 3402 break; 3403 } 3404 idx = sc->dc_cdata.dc_tx_prod; 3405 3406 /* 3407 * If there's a BPF listener, bounce a copy of this frame 3408 * to him. 3409 */ 3410 BPF_MTAP(ifp, m_head); 3411 3412 if (sc->dc_flags & DC_TX_ONE) { 3413 ifp->if_flags |= IFF_OACTIVE; 3414 break; 3415 } 3416 } 3417 3418 /* Transmit */ 3419 if (!(sc->dc_flags & DC_TX_POLL)) 3420 CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); 3421 3422 /* 3423 * Set a timeout in case the chip goes out to lunch. 3424 */ 3425 ifp->if_timer = 5; 3426 3427 DC_UNLOCK(sc); 3428} 3429 3430static void 3431dc_init(void *xsc) 3432{ 3433 struct dc_softc *sc = xsc; 3434 struct ifnet *ifp = &sc->arpcom.ac_if; 3435 struct mii_data *mii; 3436 3437 DC_LOCK(sc); 3438 3439 mii = device_get_softc(sc->dc_miibus); 3440 3441 /* 3442 * Cancel pending I/O and free all RX/TX buffers. 3443 */ 3444 dc_stop(sc); 3445 dc_reset(sc); 3446 3447 /* 3448 * Set cache alignment and burst length. 3449 */ 3450 if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) 3451 CSR_WRITE_4(sc, DC_BUSCTL, 0); 3452 else 3453 CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE); 3454 /* 3455 * Evenly share the bus between receive and transmit process. 3456 */ 3457 if (DC_IS_INTEL(sc)) 3458 DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); 3459 if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { 3460 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); 3461 } else { 3462 DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); 3463 } 3464 if (sc->dc_flags & DC_TX_POLL) 3465 DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); 3466 switch(sc->dc_cachesize) { 3467 case 32: 3468 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); 3469 break; 3470 case 16: 3471 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); 3472 break; 3473 case 8: 3474 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); 3475 break; 3476 case 0: 3477 default: 3478 DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); 3479 break; 3480 } 3481 3482 if (sc->dc_flags & DC_TX_STORENFWD) 3483 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3484 else { 3485 if (sc->dc_txthresh > DC_TXTHRESH_MAX) { 3486 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3487 } else { 3488 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); 3489 DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); 3490 } 3491 } 3492 3493 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); 3494 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); 3495 3496 if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { 3497 /* 3498 * The app notes for the 98713 and 98715A say that 3499 * in order to have the chips operate properly, a magic 3500 * number must be written to CSR16. Macronix does not 3501 * document the meaning of these bits so there's no way 3502 * to know exactly what they do. The 98713 has a magic 3503 * number all its own; the rest all use a different one. 3504 */ 3505 DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); 3506 if (sc->dc_type == DC_TYPE_98713) 3507 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); 3508 else 3509 DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); 3510 } 3511 3512 if (DC_IS_XIRCOM(sc)) { 3513 /* 3514 * setup General Purpose Port mode and data so the tulip 3515 * can talk to the MII. 3516 */ 3517 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | 3518 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3519 DELAY(10); 3520 CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | 3521 DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); 3522 DELAY(10); 3523 } 3524 3525 DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); 3526 DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); 3527 3528 /* Init circular RX list. */ 3529 if (dc_list_rx_init(sc) == ENOBUFS) { 3530 printf("dc%d: initialization failed: no " 3531 "memory for rx buffers\n", sc->dc_unit); 3532 dc_stop(sc); 3533 DC_UNLOCK(sc); 3534 return; 3535 } 3536 3537 /* 3538 * Init TX descriptors. 3539 */ 3540 dc_list_tx_init(sc); 3541 3542 /* 3543 * Load the address of the RX list. 3544 */ 3545 CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0)); 3546 CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0)); 3547 3548 /* 3549 * Enable interrupts. 3550 */ 3551#ifdef DEVICE_POLLING 3552 /* 3553 * ... but only if we are not polling, and make sure they are off in 3554 * the case of polling. Some cards (e.g. fxp) turn interrupts on 3555 * after a reset. 3556 */ 3557 if (ifp->if_flags & IFF_POLLING) 3558 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3559 else 3560#endif 3561 CSR_WRITE_4(sc, DC_IMR, DC_INTRS); 3562 CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); 3563 3564 /* Enable transmitter. */ 3565 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); 3566 3567 /* 3568 * If this is an Intel 21143 and we're not using the 3569 * MII port, program the LED control pins so we get 3570 * link and activity indications. 3571 */ 3572 if (sc->dc_flags & DC_TULIP_LEDS) { 3573 CSR_WRITE_4(sc, DC_WATCHDOG, 3574 DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY); 3575 CSR_WRITE_4(sc, DC_WATCHDOG, 0); 3576 } 3577 3578 /* 3579 * Load the RX/multicast filter. We do this sort of late 3580 * because the filter programming scheme on the 21143 and 3581 * some clones requires DMAing a setup frame via the TX 3582 * engine, and we need the transmitter enabled for that. 3583 */ 3584 dc_setfilt(sc); 3585 3586 /* Enable receiver. */ 3587 DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); 3588 CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); 3589 3590 mii_mediachg(mii); 3591 dc_setcfg(sc, sc->dc_if_media); 3592 3593 ifp->if_flags |= IFF_RUNNING; 3594 ifp->if_flags &= ~IFF_OACTIVE; 3595 3596 /* Don't start the ticker if this is a homePNA link. */ 3597 if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) 3598 sc->dc_link = 1; 3599 else { 3600 if (sc->dc_flags & DC_21143_NWAY) 3601 callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); 3602 else 3603 callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); 3604 } 3605 3606#ifdef SRM_MEDIA 3607 if(sc->dc_srm_media) { 3608 struct ifreq ifr; 3609 3610 ifr.ifr_media = sc->dc_srm_media; 3611 ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); 3612 sc->dc_srm_media = 0; 3613 } 3614#endif 3615 DC_UNLOCK(sc); 3616} 3617 3618/* 3619 * Set media options. 3620 */ 3621static int 3622dc_ifmedia_upd(struct ifnet *ifp) 3623{ 3624 struct dc_softc *sc; 3625 struct mii_data *mii; 3626 struct ifmedia *ifm; 3627 3628 sc = ifp->if_softc; 3629 mii = device_get_softc(sc->dc_miibus); 3630 mii_mediachg(mii); 3631 ifm = &mii->mii_media; 3632 3633 if (DC_IS_DAVICOM(sc) && 3634 IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) 3635 dc_setcfg(sc, ifm->ifm_media); 3636 else 3637 sc->dc_link = 0; 3638 3639 return (0); 3640} 3641 3642/* 3643 * Report current media status. 3644 */ 3645static void 3646dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 3647{ 3648 struct dc_softc *sc; 3649 struct mii_data *mii; 3650 struct ifmedia *ifm; 3651 3652 sc = ifp->if_softc; 3653 mii = device_get_softc(sc->dc_miibus); 3654 mii_pollstat(mii); 3655 ifm = &mii->mii_media; 3656 if (DC_IS_DAVICOM(sc)) { 3657 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { 3658 ifmr->ifm_active = ifm->ifm_media; 3659 ifmr->ifm_status = 0; 3660 return; 3661 } 3662 } 3663 ifmr->ifm_active = mii->mii_media_active; 3664 ifmr->ifm_status = mii->mii_media_status; 3665} 3666 3667static int 3668dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 3669{ 3670 struct dc_softc *sc = ifp->if_softc; 3671 struct ifreq *ifr = (struct ifreq *)data; 3672 struct mii_data *mii; 3673 int error = 0; 3674 3675 DC_LOCK(sc); 3676 3677 switch (command) { 3678 case SIOCSIFFLAGS: 3679 if (ifp->if_flags & IFF_UP) { 3680 int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & 3681 (IFF_PROMISC | IFF_ALLMULTI); 3682 3683 if (ifp->if_flags & IFF_RUNNING) { 3684 if (need_setfilt) 3685 dc_setfilt(sc); 3686 } else { 3687 sc->dc_txthresh = 0; 3688 dc_init(sc); 3689 } 3690 } else { 3691 if (ifp->if_flags & IFF_RUNNING) 3692 dc_stop(sc); 3693 } 3694 sc->dc_if_flags = ifp->if_flags; 3695 error = 0; 3696 break; 3697 case SIOCADDMULTI: 3698 case SIOCDELMULTI: 3699 dc_setfilt(sc); 3700 error = 0; 3701 break; 3702 case SIOCGIFMEDIA: 3703 case SIOCSIFMEDIA: 3704 mii = device_get_softc(sc->dc_miibus); 3705 error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); 3706#ifdef SRM_MEDIA 3707 if (sc->dc_srm_media) 3708 sc->dc_srm_media = 0; 3709#endif 3710 break; 3711 case SIOCSIFCAP: 3712 ifp->if_capenable &= ~IFCAP_POLLING; 3713 ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; 3714 break; 3715 default: 3716 error = ether_ioctl(ifp, command, data); 3717 break; 3718 } 3719 3720 DC_UNLOCK(sc); 3721 3722 return (error); 3723} 3724 3725static void 3726dc_watchdog(struct ifnet *ifp) 3727{ 3728 struct dc_softc *sc; 3729 3730 sc = ifp->if_softc; 3731 3732 DC_LOCK(sc); 3733 3734 ifp->if_oerrors++; 3735 printf("dc%d: watchdog timeout\n", sc->dc_unit); 3736 3737 dc_stop(sc); 3738 dc_reset(sc); 3739 dc_init(sc); 3740 3741 if (ifp->if_snd.ifq_head != NULL) 3742 dc_start(ifp); 3743 3744 DC_UNLOCK(sc); 3745} 3746 3747/* 3748 * Stop the adapter and free any mbufs allocated to the 3749 * RX and TX lists. 3750 */ 3751static void 3752dc_stop(struct dc_softc *sc) 3753{ 3754 struct ifnet *ifp; 3755 struct dc_list_data *ld; 3756 struct dc_chain_data *cd; 3757 int i; 3758 u_int32_t ctl; 3759 3760 DC_LOCK(sc); 3761 3762 ifp = &sc->arpcom.ac_if; 3763 ifp->if_timer = 0; 3764 ld = sc->dc_ldata; 3765 cd = &sc->dc_cdata; 3766 3767 callout_stop(&sc->dc_stat_ch); 3768 3769 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 3770#ifdef DEVICE_POLLING 3771 ether_poll_deregister(ifp); 3772#endif 3773 3774 DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)); 3775 CSR_WRITE_4(sc, DC_IMR, 0x00000000); 3776 CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); 3777 CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); 3778 sc->dc_link = 0; 3779 3780 /* 3781 * Free data in the RX lists. 3782 */ 3783 for (i = 0; i < DC_RX_LIST_CNT; i++) { 3784 if (cd->dc_rx_chain[i] != NULL) { 3785 m_freem(cd->dc_rx_chain[i]); 3786 cd->dc_rx_chain[i] = NULL; 3787 } 3788 } 3789 bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list)); 3790 3791 /* 3792 * Free the TX list buffers. 3793 */ 3794 for (i = 0; i < DC_TX_LIST_CNT; i++) { 3795 if (cd->dc_tx_chain[i] != NULL) { 3796 ctl = le32toh(ld->dc_tx_list[i].dc_ctl); 3797 if ((ctl & DC_TXCTL_SETUP) || 3798 !(ctl & DC_TXCTL_LASTFRAG)) { 3799 cd->dc_tx_chain[i] = NULL; 3800 continue; 3801 } 3802 bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]); 3803 m_freem(cd->dc_tx_chain[i]); 3804 cd->dc_tx_chain[i] = NULL; 3805 } 3806 } 3807 bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list)); 3808 3809 DC_UNLOCK(sc); 3810} 3811 3812/* 3813 * Device suspend routine. Stop the interface and save some PCI 3814 * settings in case the BIOS doesn't restore them properly on 3815 * resume. 3816 */ 3817static int 3818dc_suspend(device_t dev) 3819{ 3820 struct dc_softc *sc; 3821 int i, s; 3822 3823 s = splimp(); 3824 3825 sc = device_get_softc(dev); 3826 3827 dc_stop(sc); 3828 3829 for (i = 0; i < 5; i++) 3830 sc->saved_maps[i] = pci_read_config(dev, PCIR_BAR(i), 4); 3831 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); 3832 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); 3833 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); 3834 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); 3835 3836 sc->suspended = 1; 3837 3838 splx(s); 3839 return (0); 3840} 3841 3842/* 3843 * Device resume routine. Restore some PCI settings in case the BIOS 3844 * doesn't, re-enable busmastering, and restart the interface if 3845 * appropriate. 3846 */ 3847static int 3848dc_resume(device_t dev) 3849{ 3850 struct dc_softc *sc; 3851 struct ifnet *ifp; 3852 int i, s; 3853 3854 s = splimp(); 3855 3856 sc = device_get_softc(dev); 3857 ifp = &sc->arpcom.ac_if; 3858#ifndef BURN_BRIDGES 3859 dc_acpi(dev); 3860#endif 3861 /* better way to do this? */ 3862 for (i = 0; i < 5; i++) 3863 pci_write_config(dev, PCIR_BAR(i), sc->saved_maps[i], 4); 3864 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); 3865 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); 3866 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); 3867 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); 3868 3869 /* reenable busmastering */ 3870 pci_enable_busmaster(dev); 3871 pci_enable_io(dev, DC_RES); 3872 3873 /* reinitialize interface if necessary */ 3874 if (ifp->if_flags & IFF_UP) 3875 dc_init(sc); 3876 3877 sc->suspended = 0; 3878 3879 splx(s); 3880 return (0); 3881} 3882 3883/* 3884 * Stop all chip I/O so that the kernel's probe routines don't 3885 * get confused by errant DMAs when rebooting. 3886 */ 3887static void 3888dc_shutdown(device_t dev) 3889{ 3890 struct dc_softc *sc; 3891 3892 sc = device_get_softc(dev); 3893 3894 dc_stop(sc); 3895} 3896