if_tl.c revision 195049
1309260Scognet/*- 2309260Scognet * Copyright (c) 1997, 1998 3309260Scognet * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4309260Scognet * 5309260Scognet * Redistribution and use in source and binary forms, with or without 6309260Scognet * modification, are permitted provided that the following conditions 7309260Scognet * are met: 8309260Scognet * 1. Redistributions of source code must retain the above copyright 9309260Scognet * notice, this list of conditions and the following disclaimer. 10309260Scognet * 2. Redistributions in binary form must reproduce the above copyright 11309260Scognet * notice, this list of conditions and the following disclaimer in the 12309260Scognet * documentation and/or other materials provided with the distribution. 13309260Scognet * 3. All advertising materials mentioning features or use of this software 14309260Scognet * must display the following acknowledgement: 15309260Scognet * This product includes software developed by Bill Paul. 16309260Scognet * 4. Neither the name of the author nor the names of any co-contributors 17309260Scognet * may be used to endorse or promote products derived from this software 18309260Scognet * without specific prior written permission. 19309260Scognet * 20309260Scognet * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21309260Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22309260Scognet * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23309260Scognet * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24309260Scognet * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25309260Scognet * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26309260Scognet * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27309260Scognet * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28309260Scognet * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29309260Scognet * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30309260Scognet * THE POSSIBILITY OF SUCH DAMAGE. 31309260Scognet */ 32309260Scognet 33309260Scognet#include <sys/cdefs.h> 34309260Scognet__FBSDID("$FreeBSD: head/sys/dev/tl/if_tl.c 195049 2009-06-26 11:45:06Z rwatson $"); 35309260Scognet 36309260Scognet/* 37309260Scognet * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x. 38309260Scognet * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller, 39309260Scognet * the National Semiconductor DP83840A physical interface and the 40309260Scognet * Microchip Technology 24Cxx series serial EEPROM. 41309260Scognet * 42309260Scognet * Written using the following four documents: 43309260Scognet * 44309260Scognet * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com) 45309260Scognet * National Semiconductor DP83840A data sheet (www.national.com) 46309260Scognet * Microchip Technology 24C02C data sheet (www.microchip.com) 47309260Scognet * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com) 48309260Scognet * 49309260Scognet * Written by Bill Paul <wpaul@ctr.columbia.edu> 50309260Scognet * Electrical Engineering Department 51309260Scognet * Columbia University, New York City 52309260Scognet */ 53309260Scognet/* 54309260Scognet * Some notes about the ThunderLAN: 55309260Scognet * 56309260Scognet * The ThunderLAN controller is a single chip containing PCI controller 57309260Scognet * logic, approximately 3K of on-board SRAM, a LAN controller, and media 58309260Scognet * independent interface (MII) bus. The MII allows the ThunderLAN chip to 59309260Scognet * control up to 32 different physical interfaces (PHYs). The ThunderLAN 60309260Scognet * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller 61309260Scognet * to act as a complete ethernet interface. 62309260Scognet * 63309260Scognet * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards 64309260Scognet * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec 65309260Scognet * in full or half duplex. Some of the Compaq Deskpro machines use a 66309260Scognet * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters 67309260Scognet * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in 68309260Scognet * concert with the ThunderLAN's internal PHY to provide full 10/100 69309260Scognet * support. This is cheaper than using a standalone external PHY for both 70309260Scognet * 10/100 modes and letting the ThunderLAN's internal PHY go to waste. 71309260Scognet * A serial EEPROM is also attached to the ThunderLAN chip to provide 72309260Scognet * power-up default register settings and for storing the adapter's 73309260Scognet * station address. Although not supported by this driver, the ThunderLAN 74309260Scognet * chip can also be connected to token ring PHYs. 75309260Scognet * 76309260Scognet * The ThunderLAN has a set of registers which can be used to issue 77309260Scognet * commands, acknowledge interrupts, and to manipulate other internal 78309260Scognet * registers on its DIO bus. The primary registers can be accessed 79309260Scognet * using either programmed I/O (inb/outb) or via PCI memory mapping, 80309260Scognet * depending on how the card is configured during the PCI probing 81309260Scognet * phase. It is even possible to have both PIO and memory mapped 82309260Scognet * access turned on at the same time. 83309260Scognet * 84309260Scognet * Frame reception and transmission with the ThunderLAN chip is done 85309260Scognet * using frame 'lists.' A list structure looks more or less like this: 86309260Scognet * 87309260Scognet * struct tl_frag { 88309260Scognet * u_int32_t fragment_address; 89309260Scognet * u_int32_t fragment_size; 90309260Scognet * }; 91309260Scognet * struct tl_list { 92309260Scognet * u_int32_t forward_pointer; 93309260Scognet * u_int16_t cstat; 94309260Scognet * u_int16_t frame_size; 95309260Scognet * struct tl_frag fragments[10]; 96309260Scognet * }; 97309260Scognet * 98309260Scognet * The forward pointer in the list header can be either a 0 or the address 99309260Scognet * of another list, which allows several lists to be linked together. Each 100309260Scognet * list contains up to 10 fragment descriptors. This means the chip allows 101309260Scognet * ethernet frames to be broken up into up to 10 chunks for transfer to 102309260Scognet * and from the SRAM. Note that the forward pointer and fragment buffer 103309260Scognet * addresses are physical memory addresses, not virtual. Note also that 104309260Scognet * a single ethernet frame can not span lists: if the host wants to 105309260Scognet * transmit a frame and the frame data is split up over more than 10 106309260Scognet * buffers, the frame has to collapsed before it can be transmitted. 107309260Scognet * 108309260Scognet * To receive frames, the driver sets up a number of lists and populates 109309260Scognet * the fragment descriptors, then it sends an RX GO command to the chip. 110309260Scognet * When a frame is received, the chip will DMA it into the memory regions 111309260Scognet * specified by the fragment descriptors and then trigger an RX 'end of 112309260Scognet * frame interrupt' when done. The driver may choose to use only one 113309260Scognet * fragment per list; this may result is slighltly less efficient use 114309260Scognet * of memory in exchange for improving performance. 115309260Scognet * 116309260Scognet * To transmit frames, the driver again sets up lists and fragment 117309260Scognet * descriptors, only this time the buffers contain frame data that 118309260Scognet * is to be DMA'ed into the chip instead of out of it. Once the chip 119309260Scognet * has transfered the data into its on-board SRAM, it will trigger a 120309260Scognet * TX 'end of frame' interrupt. It will also generate an 'end of channel' 121309260Scognet * interrupt when it reaches the end of the list. 122309260Scognet */ 123309260Scognet/* 124309260Scognet * Some notes about this driver: 125309260Scognet * 126309260Scognet * The ThunderLAN chip provides a couple of different ways to organize 127309260Scognet * reception, transmission and interrupt handling. The simplest approach 128309260Scognet * is to use one list each for transmission and reception. In this mode, 129309260Scognet * the ThunderLAN will generate two interrupts for every received frame 130309260Scognet * (one RX EOF and one RX EOC) and two for each transmitted frame (one 131309260Scognet * TX EOF and one TX EOC). This may make the driver simpler but it hurts 132309260Scognet * performance to have to handle so many interrupts. 133309260Scognet * 134309260Scognet * Initially I wanted to create a circular list of receive buffers so 135309260Scognet * that the ThunderLAN chip would think there was an infinitely long 136309260Scognet * receive channel and never deliver an RXEOC interrupt. However this 137309260Scognet * doesn't work correctly under heavy load: while the manual says the 138309260Scognet * chip will trigger an RXEOF interrupt each time a frame is copied into 139309260Scognet * memory, you can't count on the chip waiting around for you to acknowledge 140309260Scognet * the interrupt before it starts trying to DMA the next frame. The result 141309260Scognet * is that the chip might traverse the entire circular list and then wrap 142309260Scognet * around before you have a chance to do anything about it. Consequently, 143309260Scognet * the receive list is terminated (with a 0 in the forward pointer in the 144309260Scognet * last element). Each time an RXEOF interrupt arrives, the used list 145309260Scognet * is shifted to the end of the list. This gives the appearance of an 146309260Scognet * infinitely large RX chain so long as the driver doesn't fall behind 147309260Scognet * the chip and allow all of the lists to be filled up. 148309260Scognet * 149309260Scognet * If all the lists are filled, the adapter will deliver an RX 'end of 150309260Scognet * channel' interrupt when it hits the 0 forward pointer at the end of 151309260Scognet * the chain. The RXEOC handler then cleans out the RX chain and resets 152309260Scognet * the list head pointer in the ch_parm register and restarts the receiver. 153309260Scognet * 154309260Scognet * For frame transmission, it is possible to program the ThunderLAN's 155309260Scognet * transmit interrupt threshold so that the chip can acknowledge multiple 156309260Scognet * lists with only a single TX EOF interrupt. This allows the driver to 157309260Scognet * queue several frames in one shot, and only have to handle a total 158309260Scognet * two interrupts (one TX EOF and one TX EOC) no matter how many frames 159309260Scognet * are transmitted. Frame transmission is done directly out of the 160309260Scognet * mbufs passed to the tl_start() routine via the interface send queue. 161309260Scognet * The driver simply sets up the fragment descriptors in the transmit 162309260Scognet * lists to point to the mbuf data regions and sends a TX GO command. 163309260Scognet * 164309260Scognet * Note that since the RX and TX lists themselves are always used 165309260Scognet * only by the driver, the are malloc()ed once at driver initialization 166309260Scognet * time and never free()ed. 167309260Scognet * 168309260Scognet * Also, in order to remain as platform independent as possible, this 169309260Scognet * driver uses memory mapped register access to manipulate the card 170309260Scognet * as opposed to programmed I/O. This avoids the use of the inb/outb 171309260Scognet * (and related) instructions which are specific to the i386 platform. 172309260Scognet * 173309260Scognet * Using these techniques, this driver achieves very high performance 174309260Scognet * by minimizing the amount of interrupts generated during large 175309260Scognet * transfers and by completely avoiding buffer copies. Frame transfer 176309260Scognet * to and from the ThunderLAN chip is performed entirely by the chip 177309260Scognet * itself thereby reducing the load on the host CPU. 178309260Scognet */ 179309260Scognet 180309260Scognet#include <sys/param.h> 181309260Scognet#include <sys/systm.h> 182309260Scognet#include <sys/sockio.h> 183309260Scognet#include <sys/mbuf.h> 184309260Scognet#include <sys/malloc.h> 185309260Scognet#include <sys/kernel.h> 186309260Scognet#include <sys/module.h> 187309260Scognet#include <sys/socket.h> 188309260Scognet 189309260Scognet#include <net/if.h> 190309260Scognet#include <net/if_arp.h> 191309260Scognet#include <net/ethernet.h> 192309260Scognet#include <net/if_dl.h> 193309260Scognet#include <net/if_media.h> 194309260Scognet#include <net/if_types.h> 195309260Scognet 196309260Scognet#include <net/bpf.h> 197309260Scognet 198309260Scognet#include <vm/vm.h> /* for vtophys */ 199309260Scognet#include <vm/pmap.h> /* for vtophys */ 200309260Scognet#include <machine/bus.h> 201309260Scognet#include <machine/resource.h> 202309260Scognet#include <sys/bus.h> 203309260Scognet#include <sys/rman.h> 204309260Scognet 205309260Scognet#include <dev/mii/mii.h> 206309260Scognet#include <dev/mii/miivar.h> 207309260Scognet 208309260Scognet#include <dev/pci/pcireg.h> 209309260Scognet#include <dev/pci/pcivar.h> 210309260Scognet 211309260Scognet/* 212309260Scognet * Default to using PIO register access mode to pacify certain 213309260Scognet * laptop docking stations with built-in ThunderLAN chips that 214309260Scognet * don't seem to handle memory mapped mode properly. 215309260Scognet */ 216309260Scognet#define TL_USEIOSPACE 217309260Scognet 218309260Scognet#include <dev/tl/if_tlreg.h> 219309260Scognet 220309260ScognetMODULE_DEPEND(tl, pci, 1, 1, 1); 221309260ScognetMODULE_DEPEND(tl, ether, 1, 1, 1); 222309260ScognetMODULE_DEPEND(tl, miibus, 1, 1, 1); 223309260Scognet 224309260Scognet/* "device miibus" required. See GENERIC if you get errors here. */ 225309260Scognet#include "miibus_if.h" 226309260Scognet 227309260Scognet/* 228309260Scognet * Various supported device vendors/types and their names. 229309260Scognet */ 230309260Scognet 231309260Scognetstatic struct tl_type tl_devs[] = { 232309260Scognet { TI_VENDORID, TI_DEVICEID_THUNDERLAN, 233309260Scognet "Texas Instruments ThunderLAN" }, 234309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10, 235309260Scognet "Compaq Netelligent 10" }, 236309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100, 237309260Scognet "Compaq Netelligent 10/100" }, 238309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT, 239309260Scognet "Compaq Netelligent 10/100 Proliant" }, 240309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL, 241309260Scognet "Compaq Netelligent 10/100 Dual Port" }, 242309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED, 243309260Scognet "Compaq NetFlex-3/P Integrated" }, 244309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P, 245309260Scognet "Compaq NetFlex-3/P" }, 246309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC, 247309260Scognet "Compaq NetFlex 3/P w/ BNC" }, 248309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED, 249309260Scognet "Compaq Netelligent 10/100 TX Embedded UTP" }, 250309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX, 251309260Scognet "Compaq Netelligent 10 T/2 PCI UTP/Coax" }, 252309260Scognet { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP, 253309260Scognet "Compaq Netelligent 10/100 TX UTP" }, 254309260Scognet { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183, 255309260Scognet "Olicom OC-2183/2185" }, 256309260Scognet { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325, 257309260Scognet "Olicom OC-2325" }, 258309260Scognet { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326, 259309260Scognet "Olicom OC-2326 10/100 TX UTP" }, 260309260Scognet { 0, 0, NULL } 261309260Scognet}; 262309260Scognet 263309260Scognetstatic int tl_probe(device_t); 264309260Scognetstatic int tl_attach(device_t); 265309260Scognetstatic int tl_detach(device_t); 266309260Scognetstatic int tl_intvec_rxeoc(void *, u_int32_t); 267309260Scognetstatic int tl_intvec_txeoc(void *, u_int32_t); 268309260Scognetstatic int tl_intvec_txeof(void *, u_int32_t); 269309260Scognetstatic int tl_intvec_rxeof(void *, u_int32_t); 270309260Scognetstatic int tl_intvec_adchk(void *, u_int32_t); 271309260Scognetstatic int tl_intvec_netsts(void *, u_int32_t); 272309260Scognet 273309260Scognetstatic int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *); 274309260Scognetstatic void tl_stats_update(void *); 275309260Scognetstatic int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *); 276309260Scognet 277309260Scognetstatic void tl_intr(void *); 278309260Scognetstatic void tl_start(struct ifnet *); 279309260Scognetstatic void tl_start_locked(struct ifnet *); 280309260Scognetstatic int tl_ioctl(struct ifnet *, u_long, caddr_t); 281309260Scognetstatic void tl_init(void *); 282309260Scognetstatic void tl_init_locked(struct tl_softc *); 283309260Scognetstatic void tl_stop(struct tl_softc *); 284309260Scognetstatic void tl_watchdog(struct ifnet *); 285309260Scognetstatic int tl_shutdown(device_t); 286309260Scognetstatic int tl_ifmedia_upd(struct ifnet *); 287309260Scognetstatic void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *); 288309260Scognet 289309260Scognetstatic u_int8_t tl_eeprom_putbyte(struct tl_softc *, int); 290309260Scognetstatic u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *); 291309260Scognetstatic int tl_read_eeprom(struct tl_softc *, caddr_t, int, int); 292309260Scognet 293309260Scognetstatic void tl_mii_sync(struct tl_softc *); 294309260Scognetstatic void tl_mii_send(struct tl_softc *, u_int32_t, int); 295309260Scognetstatic int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *); 296309260Scognetstatic int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *); 297309260Scognetstatic int tl_miibus_readreg(device_t, int, int); 298309260Scognetstatic int tl_miibus_writereg(device_t, int, int, int); 299309260Scognetstatic void tl_miibus_statchg(device_t); 300309260Scognet 301309260Scognetstatic void tl_setmode(struct tl_softc *, int); 302309260Scognetstatic uint32_t tl_mchash(const uint8_t *); 303static void tl_setmulti(struct tl_softc *); 304static void tl_setfilt(struct tl_softc *, caddr_t, int); 305static void tl_softreset(struct tl_softc *, int); 306static void tl_hardreset(device_t); 307static int tl_list_rx_init(struct tl_softc *); 308static int tl_list_tx_init(struct tl_softc *); 309 310static u_int8_t tl_dio_read8(struct tl_softc *, int); 311static u_int16_t tl_dio_read16(struct tl_softc *, int); 312static u_int32_t tl_dio_read32(struct tl_softc *, int); 313static void tl_dio_write8(struct tl_softc *, int, int); 314static void tl_dio_write16(struct tl_softc *, int, int); 315static void tl_dio_write32(struct tl_softc *, int, int); 316static void tl_dio_setbit(struct tl_softc *, int, int); 317static void tl_dio_clrbit(struct tl_softc *, int, int); 318static void tl_dio_setbit16(struct tl_softc *, int, int); 319static void tl_dio_clrbit16(struct tl_softc *, int, int); 320 321#ifdef TL_USEIOSPACE 322#define TL_RES SYS_RES_IOPORT 323#define TL_RID TL_PCI_LOIO 324#else 325#define TL_RES SYS_RES_MEMORY 326#define TL_RID TL_PCI_LOMEM 327#endif 328 329static device_method_t tl_methods[] = { 330 /* Device interface */ 331 DEVMETHOD(device_probe, tl_probe), 332 DEVMETHOD(device_attach, tl_attach), 333 DEVMETHOD(device_detach, tl_detach), 334 DEVMETHOD(device_shutdown, tl_shutdown), 335 336 /* bus interface */ 337 DEVMETHOD(bus_print_child, bus_generic_print_child), 338 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 339 340 /* MII interface */ 341 DEVMETHOD(miibus_readreg, tl_miibus_readreg), 342 DEVMETHOD(miibus_writereg, tl_miibus_writereg), 343 DEVMETHOD(miibus_statchg, tl_miibus_statchg), 344 345 { 0, 0 } 346}; 347 348static driver_t tl_driver = { 349 "tl", 350 tl_methods, 351 sizeof(struct tl_softc) 352}; 353 354static devclass_t tl_devclass; 355 356DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0); 357DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0); 358 359static u_int8_t tl_dio_read8(sc, reg) 360 struct tl_softc *sc; 361 int reg; 362{ 363 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 364 return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3))); 365} 366 367static u_int16_t tl_dio_read16(sc, reg) 368 struct tl_softc *sc; 369 int reg; 370{ 371 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 372 return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3))); 373} 374 375static u_int32_t tl_dio_read32(sc, reg) 376 struct tl_softc *sc; 377 int reg; 378{ 379 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 380 return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3))); 381} 382 383static void tl_dio_write8(sc, reg, val) 384 struct tl_softc *sc; 385 int reg; 386 int val; 387{ 388 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 389 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val); 390 return; 391} 392 393static void tl_dio_write16(sc, reg, val) 394 struct tl_softc *sc; 395 int reg; 396 int val; 397{ 398 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 399 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val); 400 return; 401} 402 403static void tl_dio_write32(sc, reg, val) 404 struct tl_softc *sc; 405 int reg; 406 int val; 407{ 408 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 409 CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val); 410 return; 411} 412 413static void 414tl_dio_setbit(sc, reg, bit) 415 struct tl_softc *sc; 416 int reg; 417 int bit; 418{ 419 u_int8_t f; 420 421 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 422 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 423 f |= bit; 424 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 425 426 return; 427} 428 429static void 430tl_dio_clrbit(sc, reg, bit) 431 struct tl_softc *sc; 432 int reg; 433 int bit; 434{ 435 u_int8_t f; 436 437 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 438 f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); 439 f &= ~bit; 440 CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); 441 442 return; 443} 444 445static void tl_dio_setbit16(sc, reg, bit) 446 struct tl_softc *sc; 447 int reg; 448 int bit; 449{ 450 u_int16_t f; 451 452 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 453 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 454 f |= bit; 455 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 456 457 return; 458} 459 460static void tl_dio_clrbit16(sc, reg, bit) 461 struct tl_softc *sc; 462 int reg; 463 int bit; 464{ 465 u_int16_t f; 466 467 CSR_WRITE_2(sc, TL_DIO_ADDR, reg); 468 f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); 469 f &= ~bit; 470 CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); 471 472 return; 473} 474 475/* 476 * Send an instruction or address to the EEPROM, check for ACK. 477 */ 478static u_int8_t tl_eeprom_putbyte(sc, byte) 479 struct tl_softc *sc; 480 int byte; 481{ 482 register int i, ack = 0; 483 484 /* 485 * Make sure we're in TX mode. 486 */ 487 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN); 488 489 /* 490 * Feed in each bit and stobe the clock. 491 */ 492 for (i = 0x80; i; i >>= 1) { 493 if (byte & i) { 494 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA); 495 } else { 496 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA); 497 } 498 DELAY(1); 499 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 500 DELAY(1); 501 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 502 } 503 504 /* 505 * Turn off TX mode. 506 */ 507 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 508 509 /* 510 * Check for ack. 511 */ 512 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 513 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA; 514 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 515 516 return(ack); 517} 518 519/* 520 * Read a byte of data stored in the EEPROM at address 'addr.' 521 */ 522static u_int8_t tl_eeprom_getbyte(sc, addr, dest) 523 struct tl_softc *sc; 524 int addr; 525 u_int8_t *dest; 526{ 527 register int i; 528 u_int8_t byte = 0; 529 device_t tl_dev = sc->tl_dev; 530 531 tl_dio_write8(sc, TL_NETSIO, 0); 532 533 EEPROM_START; 534 535 /* 536 * Send write control code to EEPROM. 537 */ 538 if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 539 device_printf(tl_dev, "failed to send write command, status: %x\n", 540 tl_dio_read8(sc, TL_NETSIO)); 541 return(1); 542 } 543 544 /* 545 * Send address of byte we want to read. 546 */ 547 if (tl_eeprom_putbyte(sc, addr)) { 548 device_printf(tl_dev, "failed to send address, status: %x\n", 549 tl_dio_read8(sc, TL_NETSIO)); 550 return(1); 551 } 552 553 EEPROM_STOP; 554 EEPROM_START; 555 /* 556 * Send read control code to EEPROM. 557 */ 558 if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 559 device_printf(tl_dev, "failed to send write command, status: %x\n", 560 tl_dio_read8(sc, TL_NETSIO)); 561 return(1); 562 } 563 564 /* 565 * Start reading bits from EEPROM. 566 */ 567 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); 568 for (i = 0x80; i; i >>= 1) { 569 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); 570 DELAY(1); 571 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA) 572 byte |= i; 573 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); 574 DELAY(1); 575 } 576 577 EEPROM_STOP; 578 579 /* 580 * No ACK generated for read, so just return byte. 581 */ 582 583 *dest = byte; 584 585 return(0); 586} 587 588/* 589 * Read a sequence of bytes from the EEPROM. 590 */ 591static int 592tl_read_eeprom(sc, dest, off, cnt) 593 struct tl_softc *sc; 594 caddr_t dest; 595 int off; 596 int cnt; 597{ 598 int err = 0, i; 599 u_int8_t byte = 0; 600 601 for (i = 0; i < cnt; i++) { 602 err = tl_eeprom_getbyte(sc, off + i, &byte); 603 if (err) 604 break; 605 *(dest + i) = byte; 606 } 607 608 return(err ? 1 : 0); 609} 610 611static void 612tl_mii_sync(sc) 613 struct tl_softc *sc; 614{ 615 register int i; 616 617 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 618 619 for (i = 0; i < 32; i++) { 620 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 621 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 622 } 623 624 return; 625} 626 627static void 628tl_mii_send(sc, bits, cnt) 629 struct tl_softc *sc; 630 u_int32_t bits; 631 int cnt; 632{ 633 int i; 634 635 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 636 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 637 if (bits & i) { 638 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA); 639 } else { 640 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA); 641 } 642 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 643 } 644} 645 646static int 647tl_mii_readreg(sc, frame) 648 struct tl_softc *sc; 649 struct tl_mii_frame *frame; 650 651{ 652 int i, ack; 653 int minten = 0; 654 655 tl_mii_sync(sc); 656 657 /* 658 * Set up frame for RX. 659 */ 660 frame->mii_stdelim = TL_MII_STARTDELIM; 661 frame->mii_opcode = TL_MII_READOP; 662 frame->mii_turnaround = 0; 663 frame->mii_data = 0; 664 665 /* 666 * Turn off MII interrupt by forcing MINTEN low. 667 */ 668 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 669 if (minten) { 670 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 671 } 672 673 /* 674 * Turn on data xmit. 675 */ 676 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 677 678 /* 679 * Send command/address info. 680 */ 681 tl_mii_send(sc, frame->mii_stdelim, 2); 682 tl_mii_send(sc, frame->mii_opcode, 2); 683 tl_mii_send(sc, frame->mii_phyaddr, 5); 684 tl_mii_send(sc, frame->mii_regaddr, 5); 685 686 /* 687 * Turn off xmit. 688 */ 689 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 690 691 /* Idle bit */ 692 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 693 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 694 695 /* Check for ack */ 696 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 697 ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA; 698 699 /* Complete the cycle */ 700 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 701 702 /* 703 * Now try reading data bits. If the ack failed, we still 704 * need to clock through 16 cycles to keep the PHYs in sync. 705 */ 706 if (ack) { 707 for(i = 0; i < 16; i++) { 708 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 709 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 710 } 711 goto fail; 712 } 713 714 for (i = 0x8000; i; i >>= 1) { 715 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 716 if (!ack) { 717 if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA) 718 frame->mii_data |= i; 719 } 720 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 721 } 722 723fail: 724 725 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 726 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 727 728 /* Reenable interrupts */ 729 if (minten) { 730 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 731 } 732 733 if (ack) 734 return(1); 735 return(0); 736} 737 738static int 739tl_mii_writereg(sc, frame) 740 struct tl_softc *sc; 741 struct tl_mii_frame *frame; 742 743{ 744 int minten; 745 746 tl_mii_sync(sc); 747 748 /* 749 * Set up frame for TX. 750 */ 751 752 frame->mii_stdelim = TL_MII_STARTDELIM; 753 frame->mii_opcode = TL_MII_WRITEOP; 754 frame->mii_turnaround = TL_MII_TURNAROUND; 755 756 /* 757 * Turn off MII interrupt by forcing MINTEN low. 758 */ 759 minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; 760 if (minten) { 761 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); 762 } 763 764 /* 765 * Turn on data output. 766 */ 767 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); 768 769 tl_mii_send(sc, frame->mii_stdelim, 2); 770 tl_mii_send(sc, frame->mii_opcode, 2); 771 tl_mii_send(sc, frame->mii_phyaddr, 5); 772 tl_mii_send(sc, frame->mii_regaddr, 5); 773 tl_mii_send(sc, frame->mii_turnaround, 2); 774 tl_mii_send(sc, frame->mii_data, 16); 775 776 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); 777 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); 778 779 /* 780 * Turn off xmit. 781 */ 782 tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); 783 784 /* Reenable interrupts */ 785 if (minten) 786 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); 787 788 return(0); 789} 790 791static int 792tl_miibus_readreg(dev, phy, reg) 793 device_t dev; 794 int phy, reg; 795{ 796 struct tl_softc *sc; 797 struct tl_mii_frame frame; 798 799 sc = device_get_softc(dev); 800 bzero((char *)&frame, sizeof(frame)); 801 802 frame.mii_phyaddr = phy; 803 frame.mii_regaddr = reg; 804 tl_mii_readreg(sc, &frame); 805 806 return(frame.mii_data); 807} 808 809static int 810tl_miibus_writereg(dev, phy, reg, data) 811 device_t dev; 812 int phy, reg, data; 813{ 814 struct tl_softc *sc; 815 struct tl_mii_frame frame; 816 817 sc = device_get_softc(dev); 818 bzero((char *)&frame, sizeof(frame)); 819 820 frame.mii_phyaddr = phy; 821 frame.mii_regaddr = reg; 822 frame.mii_data = data; 823 824 tl_mii_writereg(sc, &frame); 825 826 return(0); 827} 828 829static void 830tl_miibus_statchg(dev) 831 device_t dev; 832{ 833 struct tl_softc *sc; 834 struct mii_data *mii; 835 836 sc = device_get_softc(dev); 837 mii = device_get_softc(sc->tl_miibus); 838 839 if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { 840 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 841 } else { 842 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 843 } 844 845 return; 846} 847 848/* 849 * Set modes for bitrate devices. 850 */ 851static void 852tl_setmode(sc, media) 853 struct tl_softc *sc; 854 int media; 855{ 856 if (IFM_SUBTYPE(media) == IFM_10_5) 857 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 858 if (IFM_SUBTYPE(media) == IFM_10_T) { 859 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1); 860 if ((media & IFM_GMASK) == IFM_FDX) { 861 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 862 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 863 } else { 864 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3); 865 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); 866 } 867 } 868 869 return; 870} 871 872/* 873 * Calculate the hash of a MAC address for programming the multicast hash 874 * table. This hash is simply the address split into 6-bit chunks 875 * XOR'd, e.g. 876 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 877 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 878 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then 879 * the folded 24-bit value is split into 6-bit portions and XOR'd. 880 */ 881static uint32_t 882tl_mchash(addr) 883 const uint8_t *addr; 884{ 885 int t; 886 887 t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | 888 (addr[2] ^ addr[5]); 889 return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; 890} 891 892/* 893 * The ThunderLAN has a perfect MAC address filter in addition to 894 * the multicast hash filter. The perfect filter can be programmed 895 * with up to four MAC addresses. The first one is always used to 896 * hold the station address, which leaves us free to use the other 897 * three for multicast addresses. 898 */ 899static void 900tl_setfilt(sc, addr, slot) 901 struct tl_softc *sc; 902 caddr_t addr; 903 int slot; 904{ 905 int i; 906 u_int16_t regaddr; 907 908 regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN); 909 910 for (i = 0; i < ETHER_ADDR_LEN; i++) 911 tl_dio_write8(sc, regaddr + i, *(addr + i)); 912 913 return; 914} 915 916/* 917 * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly 918 * linked list. This is fine, except addresses are added from the head 919 * end of the list. We want to arrange for 224.0.0.1 (the "all hosts") 920 * group to always be in the perfect filter, but as more groups are added, 921 * the 224.0.0.1 entry (which is always added first) gets pushed down 922 * the list and ends up at the tail. So after 3 or 4 multicast groups 923 * are added, the all-hosts entry gets pushed out of the perfect filter 924 * and into the hash table. 925 * 926 * Because the multicast list is a doubly-linked list as opposed to a 927 * circular queue, we don't have the ability to just grab the tail of 928 * the list and traverse it backwards. Instead, we have to traverse 929 * the list once to find the tail, then traverse it again backwards to 930 * update the multicast filter. 931 */ 932static void 933tl_setmulti(sc) 934 struct tl_softc *sc; 935{ 936 struct ifnet *ifp; 937 u_int32_t hashes[2] = { 0, 0 }; 938 int h, i; 939 struct ifmultiaddr *ifma; 940 u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; 941 ifp = sc->tl_ifp; 942 943 /* First, zot all the existing filters. */ 944 for (i = 1; i < 4; i++) 945 tl_setfilt(sc, (caddr_t)&dummy, i); 946 tl_dio_write32(sc, TL_HASH1, 0); 947 tl_dio_write32(sc, TL_HASH2, 0); 948 949 /* Now program new ones. */ 950 if (ifp->if_flags & IFF_ALLMULTI) { 951 hashes[0] = 0xFFFFFFFF; 952 hashes[1] = 0xFFFFFFFF; 953 } else { 954 i = 1; 955 if_maddr_rlock(ifp); 956 TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { 957 if (ifma->ifma_addr->sa_family != AF_LINK) 958 continue; 959 /* 960 * Program the first three multicast groups 961 * into the perfect filter. For all others, 962 * use the hash table. 963 */ 964 if (i < 4) { 965 tl_setfilt(sc, 966 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); 967 i++; 968 continue; 969 } 970 971 h = tl_mchash( 972 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 973 if (h < 32) 974 hashes[0] |= (1 << h); 975 else 976 hashes[1] |= (1 << (h - 32)); 977 } 978 if_maddr_runlock(ifp); 979 } 980 981 tl_dio_write32(sc, TL_HASH1, hashes[0]); 982 tl_dio_write32(sc, TL_HASH2, hashes[1]); 983 984 return; 985} 986 987/* 988 * This routine is recommended by the ThunderLAN manual to insure that 989 * the internal PHY is powered up correctly. It also recommends a one 990 * second pause at the end to 'wait for the clocks to start' but in my 991 * experience this isn't necessary. 992 */ 993static void 994tl_hardreset(dev) 995 device_t dev; 996{ 997 struct tl_softc *sc; 998 int i; 999 u_int16_t flags; 1000 1001 sc = device_get_softc(dev); 1002 1003 tl_mii_sync(sc); 1004 1005 flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN; 1006 1007 for (i = 0; i < MII_NPHY; i++) 1008 tl_miibus_writereg(dev, i, MII_BMCR, flags); 1009 1010 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO); 1011 DELAY(50000); 1012 tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO); 1013 tl_mii_sync(sc); 1014 while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET); 1015 1016 DELAY(50000); 1017 return; 1018} 1019 1020static void 1021tl_softreset(sc, internal) 1022 struct tl_softc *sc; 1023 int internal; 1024{ 1025 u_int32_t cmd, dummy, i; 1026 1027 /* Assert the adapter reset bit. */ 1028 CMD_SET(sc, TL_CMD_ADRST); 1029 1030 /* Turn off interrupts */ 1031 CMD_SET(sc, TL_CMD_INTSOFF); 1032 1033 /* First, clear the stats registers. */ 1034 for (i = 0; i < 5; i++) 1035 dummy = tl_dio_read32(sc, TL_TXGOODFRAMES); 1036 1037 /* Clear Areg and Hash registers */ 1038 for (i = 0; i < 8; i++) 1039 tl_dio_write32(sc, TL_AREG0_B5, 0x00000000); 1040 1041 /* 1042 * Set up Netconfig register. Enable one channel and 1043 * one fragment mode. 1044 */ 1045 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG); 1046 if (internal && !sc->tl_bitrate) { 1047 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 1048 } else { 1049 tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); 1050 } 1051 1052 /* Handle cards with bitrate devices. */ 1053 if (sc->tl_bitrate) 1054 tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE); 1055 1056 /* 1057 * Load adapter irq pacing timer and tx threshold. 1058 * We make the transmit threshold 1 initially but we may 1059 * change that later. 1060 */ 1061 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1062 cmd |= TL_CMD_NES; 1063 cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); 1064 CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR)); 1065 CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003)); 1066 1067 /* Unreset the MII */ 1068 tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST); 1069 1070 /* Take the adapter out of reset */ 1071 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP); 1072 1073 /* Wait for things to settle down a little. */ 1074 DELAY(500); 1075 1076 return; 1077} 1078 1079/* 1080 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs 1081 * against our list and return its name if we find a match. 1082 */ 1083static int 1084tl_probe(dev) 1085 device_t dev; 1086{ 1087 struct tl_type *t; 1088 1089 t = tl_devs; 1090 1091 while(t->tl_name != NULL) { 1092 if ((pci_get_vendor(dev) == t->tl_vid) && 1093 (pci_get_device(dev) == t->tl_did)) { 1094 device_set_desc(dev, t->tl_name); 1095 return (BUS_PROBE_DEFAULT); 1096 } 1097 t++; 1098 } 1099 1100 return(ENXIO); 1101} 1102 1103static int 1104tl_attach(dev) 1105 device_t dev; 1106{ 1107 int i; 1108 u_int16_t did, vid; 1109 struct tl_type *t; 1110 struct ifnet *ifp; 1111 struct tl_softc *sc; 1112 int unit, error = 0, rid; 1113 u_char eaddr[6]; 1114 1115 vid = pci_get_vendor(dev); 1116 did = pci_get_device(dev); 1117 sc = device_get_softc(dev); 1118 sc->tl_dev = dev; 1119 unit = device_get_unit(dev); 1120 1121 t = tl_devs; 1122 while(t->tl_name != NULL) { 1123 if (vid == t->tl_vid && did == t->tl_did) 1124 break; 1125 t++; 1126 } 1127 1128 if (t->tl_name == NULL) { 1129 device_printf(dev, "unknown device!?\n"); 1130 return (ENXIO); 1131 } 1132 1133 mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, 1134 MTX_DEF); 1135 1136 /* 1137 * Map control/status registers. 1138 */ 1139 pci_enable_busmaster(dev); 1140 1141#ifdef TL_USEIOSPACE 1142 1143 rid = TL_PCI_LOIO; 1144 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 1145 RF_ACTIVE); 1146 1147 /* 1148 * Some cards have the I/O and memory mapped address registers 1149 * reversed. Try both combinations before giving up. 1150 */ 1151 if (sc->tl_res == NULL) { 1152 rid = TL_PCI_LOMEM; 1153 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, 1154 RF_ACTIVE); 1155 } 1156#else 1157 rid = TL_PCI_LOMEM; 1158 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1159 RF_ACTIVE); 1160 if (sc->tl_res == NULL) { 1161 rid = TL_PCI_LOIO; 1162 sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 1163 RF_ACTIVE); 1164 } 1165#endif 1166 1167 if (sc->tl_res == NULL) { 1168 device_printf(dev, "couldn't map ports/memory\n"); 1169 error = ENXIO; 1170 goto fail; 1171 } 1172 1173 sc->tl_btag = rman_get_bustag(sc->tl_res); 1174 sc->tl_bhandle = rman_get_bushandle(sc->tl_res); 1175 1176#ifdef notdef 1177 /* 1178 * The ThunderLAN manual suggests jacking the PCI latency 1179 * timer all the way up to its maximum value. I'm not sure 1180 * if this is really necessary, but what the manual wants, 1181 * the manual gets. 1182 */ 1183 command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4); 1184 command |= 0x0000FF00; 1185 pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4); 1186#endif 1187 1188 /* Allocate interrupt */ 1189 rid = 0; 1190 sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 1191 RF_SHAREABLE | RF_ACTIVE); 1192 1193 if (sc->tl_irq == NULL) { 1194 device_printf(dev, "couldn't map interrupt\n"); 1195 error = ENXIO; 1196 goto fail; 1197 } 1198 1199 /* 1200 * Now allocate memory for the TX and RX lists. 1201 */ 1202 sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF, 1203 M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); 1204 1205 if (sc->tl_ldata == NULL) { 1206 device_printf(dev, "no memory for list buffers!\n"); 1207 error = ENXIO; 1208 goto fail; 1209 } 1210 1211 bzero(sc->tl_ldata, sizeof(struct tl_list_data)); 1212 1213 sc->tl_dinfo = t; 1214 if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID) 1215 sc->tl_eeaddr = TL_EEPROM_EADDR; 1216 if (t->tl_vid == OLICOM_VENDORID) 1217 sc->tl_eeaddr = TL_EEPROM_EADDR_OC; 1218 1219 /* Reset the adapter. */ 1220 tl_softreset(sc, 1); 1221 tl_hardreset(dev); 1222 tl_softreset(sc, 1); 1223 1224 /* 1225 * Get station address from the EEPROM. 1226 */ 1227 if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) { 1228 device_printf(dev, "failed to read station address\n"); 1229 error = ENXIO; 1230 goto fail; 1231 } 1232 1233 /* 1234 * XXX Olicom, in its desire to be different from the 1235 * rest of the world, has done strange things with the 1236 * encoding of the station address in the EEPROM. First 1237 * of all, they store the address at offset 0xF8 rather 1238 * than at 0x83 like the ThunderLAN manual suggests. 1239 * Second, they store the address in three 16-bit words in 1240 * network byte order, as opposed to storing it sequentially 1241 * like all the other ThunderLAN cards. In order to get 1242 * the station address in a form that matches what the Olicom 1243 * diagnostic utility specifies, we have to byte-swap each 1244 * word. To make things even more confusing, neither 00:00:28 1245 * nor 00:00:24 appear in the IEEE OUI database. 1246 */ 1247 if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) { 1248 for (i = 0; i < ETHER_ADDR_LEN; i += 2) { 1249 u_int16_t *p; 1250 p = (u_int16_t *)&eaddr[i]; 1251 *p = ntohs(*p); 1252 } 1253 } 1254 1255 ifp = sc->tl_ifp = if_alloc(IFT_ETHER); 1256 if (ifp == NULL) { 1257 device_printf(dev, "can not if_alloc()\n"); 1258 error = ENOSPC; 1259 goto fail; 1260 } 1261 ifp->if_softc = sc; 1262 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1263 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1264 ifp->if_ioctl = tl_ioctl; 1265 ifp->if_start = tl_start; 1266 ifp->if_watchdog = tl_watchdog; 1267 ifp->if_init = tl_init; 1268 ifp->if_mtu = ETHERMTU; 1269 ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1; 1270 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1271 ifp->if_capenable |= IFCAP_VLAN_MTU; 1272 callout_init_mtx(&sc->tl_stat_callout, &sc->tl_mtx, 0); 1273 1274 /* Reset the adapter again. */ 1275 tl_softreset(sc, 1); 1276 tl_hardreset(dev); 1277 tl_softreset(sc, 1); 1278 1279 /* 1280 * Do MII setup. If no PHYs are found, then this is a 1281 * bitrate ThunderLAN chip that only supports 10baseT 1282 * and AUI/BNC. 1283 */ 1284 if (mii_phy_probe(dev, &sc->tl_miibus, 1285 tl_ifmedia_upd, tl_ifmedia_sts)) { 1286 struct ifmedia *ifm; 1287 sc->tl_bitrate = 1; 1288 ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); 1289 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1290 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 1291 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1292 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); 1293 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T); 1294 /* Reset again, this time setting bitrate mode. */ 1295 tl_softreset(sc, 1); 1296 ifm = &sc->ifmedia; 1297 ifm->ifm_media = ifm->ifm_cur->ifm_media; 1298 tl_ifmedia_upd(ifp); 1299 } 1300 1301 /* 1302 * Call MI attach routine. 1303 */ 1304 ether_ifattach(ifp, eaddr); 1305 1306 /* Hook interrupt last to avoid having to lock softc */ 1307 error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET | INTR_MPSAFE, 1308 NULL, tl_intr, sc, &sc->tl_intrhand); 1309 1310 if (error) { 1311 device_printf(dev, "couldn't set up irq\n"); 1312 ether_ifdetach(ifp); 1313 goto fail; 1314 } 1315 1316fail: 1317 if (error) 1318 tl_detach(dev); 1319 1320 return(error); 1321} 1322 1323/* 1324 * Shutdown hardware and free up resources. This can be called any 1325 * time after the mutex has been initialized. It is called in both 1326 * the error case in attach and the normal detach case so it needs 1327 * to be careful about only freeing resources that have actually been 1328 * allocated. 1329 */ 1330static int 1331tl_detach(dev) 1332 device_t dev; 1333{ 1334 struct tl_softc *sc; 1335 struct ifnet *ifp; 1336 1337 sc = device_get_softc(dev); 1338 KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized")); 1339 ifp = sc->tl_ifp; 1340 1341 /* These should only be active if attach succeeded */ 1342 if (device_is_attached(dev)) { 1343 TL_LOCK(sc); 1344 tl_stop(sc); 1345 TL_UNLOCK(sc); 1346 callout_drain(&sc->tl_stat_callout); 1347 ether_ifdetach(ifp); 1348 } 1349 if (sc->tl_miibus) 1350 device_delete_child(dev, sc->tl_miibus); 1351 bus_generic_detach(dev); 1352 1353 if (sc->tl_ldata) 1354 contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF); 1355 if (sc->tl_bitrate) 1356 ifmedia_removeall(&sc->ifmedia); 1357 1358 if (sc->tl_intrhand) 1359 bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand); 1360 if (sc->tl_irq) 1361 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); 1362 if (sc->tl_res) 1363 bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); 1364 1365 if (ifp) 1366 if_free(ifp); 1367 1368 mtx_destroy(&sc->tl_mtx); 1369 1370 return(0); 1371} 1372 1373/* 1374 * Initialize the transmit lists. 1375 */ 1376static int 1377tl_list_tx_init(sc) 1378 struct tl_softc *sc; 1379{ 1380 struct tl_chain_data *cd; 1381 struct tl_list_data *ld; 1382 int i; 1383 1384 cd = &sc->tl_cdata; 1385 ld = sc->tl_ldata; 1386 for (i = 0; i < TL_TX_LIST_CNT; i++) { 1387 cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; 1388 if (i == (TL_TX_LIST_CNT - 1)) 1389 cd->tl_tx_chain[i].tl_next = NULL; 1390 else 1391 cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; 1392 } 1393 1394 cd->tl_tx_free = &cd->tl_tx_chain[0]; 1395 cd->tl_tx_tail = cd->tl_tx_head = NULL; 1396 sc->tl_txeoc = 1; 1397 1398 return(0); 1399} 1400 1401/* 1402 * Initialize the RX lists and allocate mbufs for them. 1403 */ 1404static int 1405tl_list_rx_init(sc) 1406 struct tl_softc *sc; 1407{ 1408 struct tl_chain_data *cd; 1409 struct tl_list_data *ld; 1410 int i; 1411 1412 cd = &sc->tl_cdata; 1413 ld = sc->tl_ldata; 1414 1415 for (i = 0; i < TL_RX_LIST_CNT; i++) { 1416 cd->tl_rx_chain[i].tl_ptr = 1417 (struct tl_list_onefrag *)&ld->tl_rx_list[i]; 1418 if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS) 1419 return(ENOBUFS); 1420 if (i == (TL_RX_LIST_CNT - 1)) { 1421 cd->tl_rx_chain[i].tl_next = NULL; 1422 ld->tl_rx_list[i].tlist_fptr = 0; 1423 } else { 1424 cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; 1425 ld->tl_rx_list[i].tlist_fptr = 1426 vtophys(&ld->tl_rx_list[i + 1]); 1427 } 1428 } 1429 1430 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1431 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1432 1433 return(0); 1434} 1435 1436static int 1437tl_newbuf(sc, c) 1438 struct tl_softc *sc; 1439 struct tl_chain_onefrag *c; 1440{ 1441 struct mbuf *m_new = NULL; 1442 1443 m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 1444 if (m_new == NULL) 1445 return(ENOBUFS); 1446 1447 c->tl_mbuf = m_new; 1448 c->tl_next = NULL; 1449 c->tl_ptr->tlist_frsize = MCLBYTES; 1450 c->tl_ptr->tlist_fptr = 0; 1451 c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t)); 1452 c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 1453 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1454 1455 return(0); 1456} 1457/* 1458 * Interrupt handler for RX 'end of frame' condition (EOF). This 1459 * tells us that a full ethernet frame has been captured and we need 1460 * to handle it. 1461 * 1462 * Reception is done using 'lists' which consist of a header and a 1463 * series of 10 data count/data address pairs that point to buffers. 1464 * Initially you're supposed to create a list, populate it with pointers 1465 * to buffers, then load the physical address of the list into the 1466 * ch_parm register. The adapter is then supposed to DMA the received 1467 * frame into the buffers for you. 1468 * 1469 * To make things as fast as possible, we have the chip DMA directly 1470 * into mbufs. This saves us from having to do a buffer copy: we can 1471 * just hand the mbufs directly to ether_input(). Once the frame has 1472 * been sent on its way, the 'list' structure is assigned a new buffer 1473 * and moved to the end of the RX chain. As long we we stay ahead of 1474 * the chip, it will always think it has an endless receive channel. 1475 * 1476 * If we happen to fall behind and the chip manages to fill up all of 1477 * the buffers, it will generate an end of channel interrupt and wait 1478 * for us to empty the chain and restart the receiver. 1479 */ 1480static int 1481tl_intvec_rxeof(xsc, type) 1482 void *xsc; 1483 u_int32_t type; 1484{ 1485 struct tl_softc *sc; 1486 int r = 0, total_len = 0; 1487 struct ether_header *eh; 1488 struct mbuf *m; 1489 struct ifnet *ifp; 1490 struct tl_chain_onefrag *cur_rx; 1491 1492 sc = xsc; 1493 ifp = sc->tl_ifp; 1494 1495 TL_LOCK_ASSERT(sc); 1496 1497 while(sc->tl_cdata.tl_rx_head != NULL) { 1498 cur_rx = sc->tl_cdata.tl_rx_head; 1499 if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1500 break; 1501 r++; 1502 sc->tl_cdata.tl_rx_head = cur_rx->tl_next; 1503 m = cur_rx->tl_mbuf; 1504 total_len = cur_rx->tl_ptr->tlist_frsize; 1505 1506 if (tl_newbuf(sc, cur_rx) == ENOBUFS) { 1507 ifp->if_ierrors++; 1508 cur_rx->tl_ptr->tlist_frsize = MCLBYTES; 1509 cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1510 cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; 1511 continue; 1512 } 1513 1514 sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = 1515 vtophys(cur_rx->tl_ptr); 1516 sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; 1517 sc->tl_cdata.tl_rx_tail = cur_rx; 1518 1519 /* 1520 * Note: when the ThunderLAN chip is in 'capture all 1521 * frames' mode, it will receive its own transmissions. 1522 * We drop don't need to process our own transmissions, 1523 * so we drop them here and continue. 1524 */ 1525 eh = mtod(m, struct ether_header *); 1526 /*if (ifp->if_flags & IFF_PROMISC && */ 1527 if (!bcmp(eh->ether_shost, IF_LLADDR(sc->tl_ifp), 1528 ETHER_ADDR_LEN)) { 1529 m_freem(m); 1530 continue; 1531 } 1532 1533 m->m_pkthdr.rcvif = ifp; 1534 m->m_pkthdr.len = m->m_len = total_len; 1535 1536 TL_UNLOCK(sc); 1537 (*ifp->if_input)(ifp, m); 1538 TL_LOCK(sc); 1539 } 1540 1541 return(r); 1542} 1543 1544/* 1545 * The RX-EOC condition hits when the ch_parm address hasn't been 1546 * initialized or the adapter reached a list with a forward pointer 1547 * of 0 (which indicates the end of the chain). In our case, this means 1548 * the card has hit the end of the receive buffer chain and we need to 1549 * empty out the buffers and shift the pointer back to the beginning again. 1550 */ 1551static int 1552tl_intvec_rxeoc(xsc, type) 1553 void *xsc; 1554 u_int32_t type; 1555{ 1556 struct tl_softc *sc; 1557 int r; 1558 struct tl_chain_data *cd; 1559 1560 1561 sc = xsc; 1562 cd = &sc->tl_cdata; 1563 1564 /* Flush out the receive queue and ack RXEOF interrupts. */ 1565 r = tl_intvec_rxeof(xsc, type); 1566 CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000))); 1567 r = 1; 1568 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1569 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1570 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr)); 1571 r |= (TL_CMD_GO|TL_CMD_RT); 1572 return(r); 1573} 1574 1575static int 1576tl_intvec_txeof(xsc, type) 1577 void *xsc; 1578 u_int32_t type; 1579{ 1580 struct tl_softc *sc; 1581 int r = 0; 1582 struct tl_chain *cur_tx; 1583 1584 sc = xsc; 1585 1586 /* 1587 * Go through our tx list and free mbufs for those 1588 * frames that have been sent. 1589 */ 1590 while (sc->tl_cdata.tl_tx_head != NULL) { 1591 cur_tx = sc->tl_cdata.tl_tx_head; 1592 if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1593 break; 1594 sc->tl_cdata.tl_tx_head = cur_tx->tl_next; 1595 1596 r++; 1597 m_freem(cur_tx->tl_mbuf); 1598 cur_tx->tl_mbuf = NULL; 1599 1600 cur_tx->tl_next = sc->tl_cdata.tl_tx_free; 1601 sc->tl_cdata.tl_tx_free = cur_tx; 1602 if (!cur_tx->tl_ptr->tlist_fptr) 1603 break; 1604 } 1605 1606 return(r); 1607} 1608 1609/* 1610 * The transmit end of channel interrupt. The adapter triggers this 1611 * interrupt to tell us it hit the end of the current transmit list. 1612 * 1613 * A note about this: it's possible for a condition to arise where 1614 * tl_start() may try to send frames between TXEOF and TXEOC interrupts. 1615 * You have to avoid this since the chip expects things to go in a 1616 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. 1617 * When the TXEOF handler is called, it will free all of the transmitted 1618 * frames and reset the tx_head pointer to NULL. However, a TXEOC 1619 * interrupt should be received and acknowledged before any more frames 1620 * are queued for transmission. If tl_statrt() is called after TXEOF 1621 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, 1622 * it could attempt to issue a transmit command prematurely. 1623 * 1624 * To guard against this, tl_start() will only issue transmit commands 1625 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler 1626 * can set this flag once tl_start() has cleared it. 1627 */ 1628static int 1629tl_intvec_txeoc(xsc, type) 1630 void *xsc; 1631 u_int32_t type; 1632{ 1633 struct tl_softc *sc; 1634 struct ifnet *ifp; 1635 u_int32_t cmd; 1636 1637 sc = xsc; 1638 ifp = sc->tl_ifp; 1639 1640 /* Clear the timeout timer. */ 1641 ifp->if_timer = 0; 1642 1643 if (sc->tl_cdata.tl_tx_head == NULL) { 1644 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1645 sc->tl_cdata.tl_tx_tail = NULL; 1646 sc->tl_txeoc = 1; 1647 } else { 1648 sc->tl_txeoc = 0; 1649 /* First we have to ack the EOC interrupt. */ 1650 CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type); 1651 /* Then load the address of the next TX list. */ 1652 CSR_WRITE_4(sc, TL_CH_PARM, 1653 vtophys(sc->tl_cdata.tl_tx_head->tl_ptr)); 1654 /* Restart TX channel. */ 1655 cmd = CSR_READ_4(sc, TL_HOSTCMD); 1656 cmd &= ~TL_CMD_RT; 1657 cmd |= TL_CMD_GO|TL_CMD_INTSON; 1658 CMD_PUT(sc, cmd); 1659 return(0); 1660 } 1661 1662 return(1); 1663} 1664 1665static int 1666tl_intvec_adchk(xsc, type) 1667 void *xsc; 1668 u_int32_t type; 1669{ 1670 struct tl_softc *sc; 1671 1672 sc = xsc; 1673 1674 if (type) 1675 device_printf(sc->tl_dev, "adapter check: %x\n", 1676 (unsigned int)CSR_READ_4(sc, TL_CH_PARM)); 1677 1678 tl_softreset(sc, 1); 1679 tl_stop(sc); 1680 tl_init_locked(sc); 1681 CMD_SET(sc, TL_CMD_INTSON); 1682 1683 return(0); 1684} 1685 1686static int 1687tl_intvec_netsts(xsc, type) 1688 void *xsc; 1689 u_int32_t type; 1690{ 1691 struct tl_softc *sc; 1692 u_int16_t netsts; 1693 1694 sc = xsc; 1695 1696 netsts = tl_dio_read16(sc, TL_NETSTS); 1697 tl_dio_write16(sc, TL_NETSTS, netsts); 1698 1699 device_printf(sc->tl_dev, "network status: %x\n", netsts); 1700 1701 return(1); 1702} 1703 1704static void 1705tl_intr(xsc) 1706 void *xsc; 1707{ 1708 struct tl_softc *sc; 1709 struct ifnet *ifp; 1710 int r = 0; 1711 u_int32_t type = 0; 1712 u_int16_t ints = 0; 1713 u_int8_t ivec = 0; 1714 1715 sc = xsc; 1716 TL_LOCK(sc); 1717 1718 /* Disable interrupts */ 1719 ints = CSR_READ_2(sc, TL_HOST_INT); 1720 CSR_WRITE_2(sc, TL_HOST_INT, ints); 1721 type = (ints << 16) & 0xFFFF0000; 1722 ivec = (ints & TL_VEC_MASK) >> 5; 1723 ints = (ints & TL_INT_MASK) >> 2; 1724 1725 ifp = sc->tl_ifp; 1726 1727 switch(ints) { 1728 case (TL_INTR_INVALID): 1729#ifdef DIAGNOSTIC 1730 device_printf(sc->tl_dev, "got an invalid interrupt!\n"); 1731#endif 1732 /* Re-enable interrupts but don't ack this one. */ 1733 CMD_PUT(sc, type); 1734 r = 0; 1735 break; 1736 case (TL_INTR_TXEOF): 1737 r = tl_intvec_txeof((void *)sc, type); 1738 break; 1739 case (TL_INTR_TXEOC): 1740 r = tl_intvec_txeoc((void *)sc, type); 1741 break; 1742 case (TL_INTR_STATOFLOW): 1743 tl_stats_update(sc); 1744 r = 1; 1745 break; 1746 case (TL_INTR_RXEOF): 1747 r = tl_intvec_rxeof((void *)sc, type); 1748 break; 1749 case (TL_INTR_DUMMY): 1750 device_printf(sc->tl_dev, "got a dummy interrupt\n"); 1751 r = 1; 1752 break; 1753 case (TL_INTR_ADCHK): 1754 if (ivec) 1755 r = tl_intvec_adchk((void *)sc, type); 1756 else 1757 r = tl_intvec_netsts((void *)sc, type); 1758 break; 1759 case (TL_INTR_RXEOC): 1760 r = tl_intvec_rxeoc((void *)sc, type); 1761 break; 1762 default: 1763 device_printf(sc->tl_dev, "bogus interrupt type\n"); 1764 break; 1765 } 1766 1767 /* Re-enable interrupts */ 1768 if (r) { 1769 CMD_PUT(sc, TL_CMD_ACK | r | type); 1770 } 1771 1772 if (ifp->if_snd.ifq_head != NULL) 1773 tl_start_locked(ifp); 1774 1775 TL_UNLOCK(sc); 1776 1777 return; 1778} 1779 1780static void 1781tl_stats_update(xsc) 1782 void *xsc; 1783{ 1784 struct tl_softc *sc; 1785 struct ifnet *ifp; 1786 struct tl_stats tl_stats; 1787 struct mii_data *mii; 1788 u_int32_t *p; 1789 1790 bzero((char *)&tl_stats, sizeof(struct tl_stats)); 1791 1792 sc = xsc; 1793 TL_LOCK_ASSERT(sc); 1794 ifp = sc->tl_ifp; 1795 1796 p = (u_int32_t *)&tl_stats; 1797 1798 CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC); 1799 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1800 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1801 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1802 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1803 *p++ = CSR_READ_4(sc, TL_DIO_DATA); 1804 1805 ifp->if_opackets += tl_tx_goodframes(tl_stats); 1806 ifp->if_collisions += tl_stats.tl_tx_single_collision + 1807 tl_stats.tl_tx_multi_collision; 1808 ifp->if_ipackets += tl_rx_goodframes(tl_stats); 1809 ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors + 1810 tl_rx_overrun(tl_stats); 1811 ifp->if_oerrors += tl_tx_underrun(tl_stats); 1812 1813 if (tl_tx_underrun(tl_stats)) { 1814 u_int8_t tx_thresh; 1815 tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH; 1816 if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) { 1817 tx_thresh >>= 4; 1818 tx_thresh++; 1819 device_printf(sc->tl_dev, "tx underrun -- increasing " 1820 "tx threshold to %d bytes\n", 1821 (64 * (tx_thresh * 4))); 1822 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 1823 tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4); 1824 } 1825 } 1826 1827 callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc); 1828 1829 if (!sc->tl_bitrate) { 1830 mii = device_get_softc(sc->tl_miibus); 1831 mii_tick(mii); 1832 } 1833 1834 return; 1835} 1836 1837/* 1838 * Encapsulate an mbuf chain in a list by coupling the mbuf data 1839 * pointers to the fragment pointers. 1840 */ 1841static int 1842tl_encap(sc, c, m_head) 1843 struct tl_softc *sc; 1844 struct tl_chain *c; 1845 struct mbuf *m_head; 1846{ 1847 int frag = 0; 1848 struct tl_frag *f = NULL; 1849 int total_len; 1850 struct mbuf *m; 1851 struct ifnet *ifp = sc->tl_ifp; 1852 1853 /* 1854 * Start packing the mbufs in this chain into 1855 * the fragment pointers. Stop when we run out 1856 * of fragments or hit the end of the mbuf chain. 1857 */ 1858 m = m_head; 1859 total_len = 0; 1860 1861 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 1862 if (m->m_len != 0) { 1863 if (frag == TL_MAXFRAGS) 1864 break; 1865 total_len+= m->m_len; 1866 c->tl_ptr->tl_frag[frag].tlist_dadr = 1867 vtophys(mtod(m, vm_offset_t)); 1868 c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; 1869 frag++; 1870 } 1871 } 1872 1873 /* 1874 * Handle special cases. 1875 * Special case #1: we used up all 10 fragments, but 1876 * we have more mbufs left in the chain. Copy the 1877 * data into an mbuf cluster. Note that we don't 1878 * bother clearing the values in the other fragment 1879 * pointers/counters; it wouldn't gain us anything, 1880 * and would waste cycles. 1881 */ 1882 if (m != NULL) { 1883 struct mbuf *m_new = NULL; 1884 1885 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1886 if (m_new == NULL) { 1887 if_printf(ifp, "no memory for tx list\n"); 1888 return(1); 1889 } 1890 if (m_head->m_pkthdr.len > MHLEN) { 1891 MCLGET(m_new, M_DONTWAIT); 1892 if (!(m_new->m_flags & M_EXT)) { 1893 m_freem(m_new); 1894 if_printf(ifp, "no memory for tx list\n"); 1895 return(1); 1896 } 1897 } 1898 m_copydata(m_head, 0, m_head->m_pkthdr.len, 1899 mtod(m_new, caddr_t)); 1900 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 1901 m_freem(m_head); 1902 m_head = m_new; 1903 f = &c->tl_ptr->tl_frag[0]; 1904 f->tlist_dadr = vtophys(mtod(m_new, caddr_t)); 1905 f->tlist_dcnt = total_len = m_new->m_len; 1906 frag = 1; 1907 } 1908 1909 /* 1910 * Special case #2: the frame is smaller than the minimum 1911 * frame size. We have to pad it to make the chip happy. 1912 */ 1913 if (total_len < TL_MIN_FRAMELEN) { 1914 if (frag == TL_MAXFRAGS) 1915 if_printf(ifp, 1916 "all frags filled but frame still to small!\n"); 1917 f = &c->tl_ptr->tl_frag[frag]; 1918 f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; 1919 f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad); 1920 total_len += f->tlist_dcnt; 1921 frag++; 1922 } 1923 1924 c->tl_mbuf = m_head; 1925 c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; 1926 c->tl_ptr->tlist_frsize = total_len; 1927 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1928 c->tl_ptr->tlist_fptr = 0; 1929 1930 return(0); 1931} 1932 1933/* 1934 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 1935 * to the mbuf data regions directly in the transmit lists. We also save a 1936 * copy of the pointers since the transmit list fragment pointers are 1937 * physical addresses. 1938 */ 1939static void 1940tl_start(ifp) 1941 struct ifnet *ifp; 1942{ 1943 struct tl_softc *sc; 1944 1945 sc = ifp->if_softc; 1946 TL_LOCK(sc); 1947 tl_start_locked(ifp); 1948 TL_UNLOCK(sc); 1949} 1950 1951static void 1952tl_start_locked(ifp) 1953 struct ifnet *ifp; 1954{ 1955 struct tl_softc *sc; 1956 struct mbuf *m_head = NULL; 1957 u_int32_t cmd; 1958 struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 1959 1960 sc = ifp->if_softc; 1961 TL_LOCK_ASSERT(sc); 1962 1963 /* 1964 * Check for an available queue slot. If there are none, 1965 * punt. 1966 */ 1967 if (sc->tl_cdata.tl_tx_free == NULL) { 1968 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 1969 return; 1970 } 1971 1972 start_tx = sc->tl_cdata.tl_tx_free; 1973 1974 while(sc->tl_cdata.tl_tx_free != NULL) { 1975 IF_DEQUEUE(&ifp->if_snd, m_head); 1976 if (m_head == NULL) 1977 break; 1978 1979 /* Pick a chain member off the free list. */ 1980 cur_tx = sc->tl_cdata.tl_tx_free; 1981 sc->tl_cdata.tl_tx_free = cur_tx->tl_next; 1982 1983 cur_tx->tl_next = NULL; 1984 1985 /* Pack the data into the list. */ 1986 tl_encap(sc, cur_tx, m_head); 1987 1988 /* Chain it together */ 1989 if (prev != NULL) { 1990 prev->tl_next = cur_tx; 1991 prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr); 1992 } 1993 prev = cur_tx; 1994 1995 /* 1996 * If there's a BPF listener, bounce a copy of this frame 1997 * to him. 1998 */ 1999 BPF_MTAP(ifp, cur_tx->tl_mbuf); 2000 } 2001 2002 /* 2003 * If there are no packets queued, bail. 2004 */ 2005 if (cur_tx == NULL) 2006 return; 2007 2008 /* 2009 * That's all we can stands, we can't stands no more. 2010 * If there are no other transfers pending, then issue the 2011 * TX GO command to the adapter to start things moving. 2012 * Otherwise, just leave the data in the queue and let 2013 * the EOF/EOC interrupt handler send. 2014 */ 2015 if (sc->tl_cdata.tl_tx_head == NULL) { 2016 sc->tl_cdata.tl_tx_head = start_tx; 2017 sc->tl_cdata.tl_tx_tail = cur_tx; 2018 2019 if (sc->tl_txeoc) { 2020 sc->tl_txeoc = 0; 2021 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr)); 2022 cmd = CSR_READ_4(sc, TL_HOSTCMD); 2023 cmd &= ~TL_CMD_RT; 2024 cmd |= TL_CMD_GO|TL_CMD_INTSON; 2025 CMD_PUT(sc, cmd); 2026 } 2027 } else { 2028 sc->tl_cdata.tl_tx_tail->tl_next = start_tx; 2029 sc->tl_cdata.tl_tx_tail = cur_tx; 2030 } 2031 2032 /* 2033 * Set a timeout in case the chip goes out to lunch. 2034 */ 2035 ifp->if_timer = 5; 2036 2037 return; 2038} 2039 2040static void 2041tl_init(xsc) 2042 void *xsc; 2043{ 2044 struct tl_softc *sc = xsc; 2045 2046 TL_LOCK(sc); 2047 tl_init_locked(sc); 2048 TL_UNLOCK(sc); 2049} 2050 2051static void 2052tl_init_locked(sc) 2053 struct tl_softc *sc; 2054{ 2055 struct ifnet *ifp = sc->tl_ifp; 2056 struct mii_data *mii; 2057 2058 TL_LOCK_ASSERT(sc); 2059 2060 ifp = sc->tl_ifp; 2061 2062 /* 2063 * Cancel pending I/O. 2064 */ 2065 tl_stop(sc); 2066 2067 /* Initialize TX FIFO threshold */ 2068 tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); 2069 tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG); 2070 2071 /* Set PCI burst size */ 2072 tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG); 2073 2074 /* 2075 * Set 'capture all frames' bit for promiscuous mode. 2076 */ 2077 if (ifp->if_flags & IFF_PROMISC) 2078 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); 2079 else 2080 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); 2081 2082 /* 2083 * Set capture broadcast bit to capture broadcast frames. 2084 */ 2085 if (ifp->if_flags & IFF_BROADCAST) 2086 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX); 2087 else 2088 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX); 2089 2090 tl_dio_write16(sc, TL_MAXRX, MCLBYTES); 2091 2092 /* Init our MAC address */ 2093 tl_setfilt(sc, IF_LLADDR(sc->tl_ifp), 0); 2094 2095 /* Init multicast filter, if needed. */ 2096 tl_setmulti(sc); 2097 2098 /* Init circular RX list. */ 2099 if (tl_list_rx_init(sc) == ENOBUFS) { 2100 device_printf(sc->tl_dev, 2101 "initialization failed: no memory for rx buffers\n"); 2102 tl_stop(sc); 2103 return; 2104 } 2105 2106 /* Init TX pointers. */ 2107 tl_list_tx_init(sc); 2108 2109 /* Enable PCI interrupts. */ 2110 CMD_SET(sc, TL_CMD_INTSON); 2111 2112 /* Load the address of the rx list */ 2113 CMD_SET(sc, TL_CMD_RT); 2114 CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0])); 2115 2116 if (!sc->tl_bitrate) { 2117 if (sc->tl_miibus != NULL) { 2118 mii = device_get_softc(sc->tl_miibus); 2119 mii_mediachg(mii); 2120 } 2121 } else { 2122 tl_ifmedia_upd(ifp); 2123 } 2124 2125 /* Send the RX go command */ 2126 CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT); 2127 2128 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2129 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2130 2131 /* Start the stats update counter */ 2132 callout_reset(&sc->tl_stat_callout, hz, tl_stats_update, sc); 2133 2134 return; 2135} 2136 2137/* 2138 * Set media options. 2139 */ 2140static int 2141tl_ifmedia_upd(ifp) 2142 struct ifnet *ifp; 2143{ 2144 struct tl_softc *sc; 2145 struct mii_data *mii = NULL; 2146 2147 sc = ifp->if_softc; 2148 2149 TL_LOCK(sc); 2150 if (sc->tl_bitrate) 2151 tl_setmode(sc, sc->ifmedia.ifm_media); 2152 else { 2153 mii = device_get_softc(sc->tl_miibus); 2154 mii_mediachg(mii); 2155 } 2156 TL_UNLOCK(sc); 2157 2158 return(0); 2159} 2160 2161/* 2162 * Report current media status. 2163 */ 2164static void 2165tl_ifmedia_sts(ifp, ifmr) 2166 struct ifnet *ifp; 2167 struct ifmediareq *ifmr; 2168{ 2169 struct tl_softc *sc; 2170 struct mii_data *mii; 2171 2172 sc = ifp->if_softc; 2173 2174 TL_LOCK(sc); 2175 ifmr->ifm_active = IFM_ETHER; 2176 2177 if (sc->tl_bitrate) { 2178 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1) 2179 ifmr->ifm_active = IFM_ETHER|IFM_10_5; 2180 else 2181 ifmr->ifm_active = IFM_ETHER|IFM_10_T; 2182 if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3) 2183 ifmr->ifm_active |= IFM_HDX; 2184 else 2185 ifmr->ifm_active |= IFM_FDX; 2186 return; 2187 } else { 2188 mii = device_get_softc(sc->tl_miibus); 2189 mii_pollstat(mii); 2190 ifmr->ifm_active = mii->mii_media_active; 2191 ifmr->ifm_status = mii->mii_media_status; 2192 } 2193 TL_UNLOCK(sc); 2194 2195 return; 2196} 2197 2198static int 2199tl_ioctl(ifp, command, data) 2200 struct ifnet *ifp; 2201 u_long command; 2202 caddr_t data; 2203{ 2204 struct tl_softc *sc = ifp->if_softc; 2205 struct ifreq *ifr = (struct ifreq *) data; 2206 int error = 0; 2207 2208 switch(command) { 2209 case SIOCSIFFLAGS: 2210 TL_LOCK(sc); 2211 if (ifp->if_flags & IFF_UP) { 2212 if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2213 ifp->if_flags & IFF_PROMISC && 2214 !(sc->tl_if_flags & IFF_PROMISC)) { 2215 tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); 2216 tl_setmulti(sc); 2217 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && 2218 !(ifp->if_flags & IFF_PROMISC) && 2219 sc->tl_if_flags & IFF_PROMISC) { 2220 tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); 2221 tl_setmulti(sc); 2222 } else 2223 tl_init_locked(sc); 2224 } else { 2225 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2226 tl_stop(sc); 2227 } 2228 } 2229 sc->tl_if_flags = ifp->if_flags; 2230 TL_UNLOCK(sc); 2231 error = 0; 2232 break; 2233 case SIOCADDMULTI: 2234 case SIOCDELMULTI: 2235 TL_LOCK(sc); 2236 tl_setmulti(sc); 2237 TL_UNLOCK(sc); 2238 error = 0; 2239 break; 2240 case SIOCSIFMEDIA: 2241 case SIOCGIFMEDIA: 2242 if (sc->tl_bitrate) 2243 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2244 else { 2245 struct mii_data *mii; 2246 mii = device_get_softc(sc->tl_miibus); 2247 error = ifmedia_ioctl(ifp, ifr, 2248 &mii->mii_media, command); 2249 } 2250 break; 2251 default: 2252 error = ether_ioctl(ifp, command, data); 2253 break; 2254 } 2255 2256 return(error); 2257} 2258 2259static void 2260tl_watchdog(ifp) 2261 struct ifnet *ifp; 2262{ 2263 struct tl_softc *sc; 2264 2265 sc = ifp->if_softc; 2266 2267 if_printf(ifp, "device timeout\n"); 2268 2269 TL_LOCK(sc); 2270 ifp->if_oerrors++; 2271 2272 tl_softreset(sc, 1); 2273 tl_init_locked(sc); 2274 TL_UNLOCK(sc); 2275 2276 return; 2277} 2278 2279/* 2280 * Stop the adapter and free any mbufs allocated to the 2281 * RX and TX lists. 2282 */ 2283static void 2284tl_stop(sc) 2285 struct tl_softc *sc; 2286{ 2287 register int i; 2288 struct ifnet *ifp; 2289 2290 TL_LOCK_ASSERT(sc); 2291 2292 ifp = sc->tl_ifp; 2293 2294 /* Stop the stats updater. */ 2295 callout_stop(&sc->tl_stat_callout); 2296 2297 /* Stop the transmitter */ 2298 CMD_CLR(sc, TL_CMD_RT); 2299 CMD_SET(sc, TL_CMD_STOP); 2300 CSR_WRITE_4(sc, TL_CH_PARM, 0); 2301 2302 /* Stop the receiver */ 2303 CMD_SET(sc, TL_CMD_RT); 2304 CMD_SET(sc, TL_CMD_STOP); 2305 CSR_WRITE_4(sc, TL_CH_PARM, 0); 2306 2307 /* 2308 * Disable host interrupts. 2309 */ 2310 CMD_SET(sc, TL_CMD_INTSOFF); 2311 2312 /* 2313 * Clear list pointer. 2314 */ 2315 CSR_WRITE_4(sc, TL_CH_PARM, 0); 2316 2317 /* 2318 * Free the RX lists. 2319 */ 2320 for (i = 0; i < TL_RX_LIST_CNT; i++) { 2321 if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { 2322 m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); 2323 sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; 2324 } 2325 } 2326 bzero((char *)&sc->tl_ldata->tl_rx_list, 2327 sizeof(sc->tl_ldata->tl_rx_list)); 2328 2329 /* 2330 * Free the TX list buffers. 2331 */ 2332 for (i = 0; i < TL_TX_LIST_CNT; i++) { 2333 if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { 2334 m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); 2335 sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; 2336 } 2337 } 2338 bzero((char *)&sc->tl_ldata->tl_tx_list, 2339 sizeof(sc->tl_ldata->tl_tx_list)); 2340 2341 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 2342 2343 return; 2344} 2345 2346/* 2347 * Stop all chip I/O so that the kernel's probe routines don't 2348 * get confused by errant DMAs when rebooting. 2349 */ 2350static int 2351tl_shutdown(dev) 2352 device_t dev; 2353{ 2354 struct tl_softc *sc; 2355 2356 sc = device_get_softc(dev); 2357 2358 TL_LOCK(sc); 2359 tl_stop(sc); 2360 TL_UNLOCK(sc); 2361 2362 return (0); 2363} 2364