if_tl.c revision 36464
1227753Stheraven/* 2227753Stheraven * Copyright (c) 1997, 1998 3227753Stheraven * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 4227753Stheraven * 5227753Stheraven * Redistribution and use in source and binary forms, with or without 6227753Stheraven * modification, are permitted provided that the following conditions 7227753Stheraven * are met: 8227753Stheraven * 1. Redistributions of source code must retain the above copyright 9227753Stheraven * notice, this list of conditions and the following disclaimer. 10227753Stheraven * 2. Redistributions in binary form must reproduce the above copyright 11227753Stheraven * notice, this list of conditions and the following disclaimer in the 12227753Stheraven * documentation and/or other materials provided with the distribution. 13227753Stheraven * 3. All advertising materials mentioning features or use of this software 14227753Stheraven * must display the following acknowledgement: 15227753Stheraven * This product includes software developed by Bill Paul. 16227753Stheraven * 4. Neither the name of the author nor the names of any co-contributors 17227753Stheraven * may be used to endorse or promote products derived from this software 18227753Stheraven * without specific prior written permission. 19227753Stheraven * 20227753Stheraven * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 21227753Stheraven * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22227753Stheraven * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23227753Stheraven * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 24227753Stheraven * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25227753Stheraven * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26227753Stheraven * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27227753Stheraven * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28227753Stheraven * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29227753Stheraven * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30227753Stheraven * THE POSSIBILITY OF SUCH DAMAGE. 31227753Stheraven * 32227753Stheraven * $Id: if_tl.c,v 1.6 1998/05/26 23:42:24 wpaul Exp $ 33227753Stheraven */ 34 35/* 36 * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x. 37 * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller, 38 * the National Semiconductor DP83840A physical interface and the 39 * Microchip Technology 24Cxx series serial EEPROM. 40 * 41 * Written using the following three documents: 42 * 43 * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com) 44 * National Semiconductor DP83840A data sheet (www.national.com) 45 * Microchip Technology 24C02C data sheet (www.microchip.com) 46 * 47 * Written by Bill Paul <wpaul@ctr.columbia.edu> 48 * Electrical Engineering Department 49 * Columbia University, New York City 50 */ 51 52/* 53 * Some notes about the ThunderLAN: 54 * 55 * The ThunderLAN controller is a single chip containing PCI controller 56 * logic, approximately 3K of on-board SRAM, a LAN controller, and media 57 * independent interface (MII). The MII allows the ThunderLAN chip to 58 * control up to 32 different physical interfaces (PHYs). The ThunderLAN 59 * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller 60 * to act as a complete ethernet interface. 61 * 62 * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards 63 * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec 64 * in full or half duplex. Some of the Compaq Deskpro machines use a 65 * Level 1 LXT970 PHY with the same capabilities. A serial EEPROM is also 66 * attached to the ThunderLAN chip to provide power-up default register 67 * settings and for storing the adapter's stattion address. Although not 68 * supported by this driver, the ThunderLAN chip can also be connected 69 * to token ring PHYs. 70 * 71 * It is important to note that while it is possible to have multiple 72 * PHYs attached to the ThunderLAN's MII, only one PHY may be active at 73 * any time. (This makes me wonder exactly how the dual port Compaq 74 * adapter is supposed to work.) This driver attempts to compensate for 75 * this in the following way: 76 * 77 * When the ThunderLAN chip is probed, the probe routine attempts to 78 * locate all attached PHYs by checking all 32 possible PHY addresses 79 * (0x00 to 0x1F). Each PHY is attached as a separate logical interface. 80 * The driver allows any one interface to be brought up at any given 81 * time: if an attempt is made to bring up a second PHY while another 82 * PHY is already enabled, the driver will return an error. 83 * 84 * The ThunderLAN has a set of registers which can be used to issue 85 * command, acknowledge interrupts, and to manipulate other internal 86 * registers on its DIO bus. The primary registers can be accessed 87 * using either programmed I/O (inb/outb) or via PCI memory mapping, 88 * depending on how the card is configured during the PCI probing 89 * phase. It is even possible to have both PIO and memory mapped 90 * access turned on at the same time. 91 * 92 * Frame reception and transmission with the ThunderLAN chip is done 93 * using frame 'lists.' A list structure looks more or less like this: 94 * 95 * struct tl_frag { 96 * u_int32_t fragment_address; 97 * u_int32_t fragment_size; 98 * }; 99 * struct tl_list { 100 * u_int32_t forward_pointer; 101 * u_int16_t cstat; 102 * u_int16_t frame_size; 103 * struct tl_frag fragments[10]; 104 * }; 105 * 106 * The forward pointer in the list header can be either a 0 or the address 107 * of another list, which allows several lists to be linked together. Each 108 * list contains up to 10 fragment descriptors. This means the chip allows 109 * ethernet frames to be broken up into up to 10 chunks for transfer to 110 * and from the SRAM. Note that the forward pointer and fragment buffer 111 * addresses are physical memory addresses, not virtual. Note also that 112 * a single ethernet frame can not span lists: if the host wants to 113 * transmit a frame and the frame data is split up over more than 10 114 * buffers, the frame has to collapsed before it can be transmitted. 115 * 116 * To receive frames, the driver sets up a number of lists and populates 117 * the fragment descriptors, then it sends an RX GO command to the chip. 118 * When a frame is received, the chip will DMA it into the memory regions 119 * specified by the fragment descriptors and then trigger an RX 'end of 120 * frame interrupt' when done. The driver may choose to use only one 121 * fragment per list; this may result is slighltly less efficient use 122 * of memory in exchange for improving performance. 123 * 124 * To transmit frames, the driver again sets up lists and fragment 125 * descriptors, only this time the buffers contain frame data that 126 * is to be DMA'ed into the chip instead of out of it. Once the chip 127 * has transfered the data into its on-board SRAM, it will trigger a 128 * TX 'end of frame' interrupt. It will also generate an 'end of channel' 129 * interrupt when it reaches the end of the list. 130 */ 131 132/* 133 * Some notes about this driver: 134 * 135 * The ThunderLAN chip provides a couple of different ways to organize 136 * reception, transmission and interrupt handling. The simplest approach 137 * is to use one list each for transmission and reception. In this mode, 138 * the ThunderLAN will generate two interrupts for every received frame 139 * (one RX EOF and one RX EOC) and two for each transmitted frame (one 140 * TX EOF and one TX EOC). This may make the driver simpler but it hurts 141 * performance to have to handle so many interrupts. 142 * 143 * Initially I wanted to create a circular list of receive buffers so 144 * that the ThunderLAN chip would think there was an infinitely long 145 * receive channel and never deliver an RXEOC interrupt. However this 146 * doesn't work correctly under heavy load: while the manual says the 147 * chip will trigger an RXEOF interrupt each time a frame is copied into 148 * memory, you can't count on the chip waiting around for you to acknowledge 149 * the interrupt before it starts trying to DMA the next frame. The result 150 * is that the chip might traverse the entire circular list and then wrap 151 * around before you have a chance to do anything about it. Consequently, 152 * the receive list is terminated (with a 0 in the forward pointer in the 153 * last element). Each time an RXEOF interrupt arrives, the used list 154 * is shifted to the end of the list. This gives the appearance of an 155 * infinitely large RX chain so long as the driver doesn't fall behind 156 * the chip and allow all of the lists to be filled up. 157 * 158 * If all the lists are filled, the adapter will deliver an RX 'end of 159 * channel' interrupt when it hits the 0 forward pointer at the end of 160 * the chain. The RXEOC handler then cleans out the RX chain and resets 161 * the list head pointer in the ch_parm register and restarts the receiver. 162 * 163 * For frame transmission, it is possible to program the ThunderLAN's 164 * transmit interrupt threshold so that the chip can acknowledge multiple 165 * lists with only a single TX EOF interrupt. This allows the driver to 166 * queue several frames in one shot, and only have to handle a total 167 * two interrupts (one TX EOF and one TX EOC) no matter how many frames 168 * are transmitted. Frame transmission is done directly out of the 169 * mbufs passed to the tl_start() routine via the interface send queue. 170 * The driver simply sets up the fragment descriptors in the transmit 171 * lists to point to the mbuf data regions and sends a TX GO command. 172 * 173 * Note that since the RX and TX lists themselves are always used 174 * only by the driver, the are malloc()ed once at driver initialization 175 * time and never free()ed. 176 * 177 * Also, in order to remain as platform independent as possible, this 178 * driver uses memory mapped register access to manipulate the card 179 * as opposed to programmed I/O. This avoids the use of the inb/outb 180 * (and related) instructions which are specific to the i386 platform. 181 * 182 * Using these techniques, this driver achieves very high performance 183 * by minimizing the amount of interrupts generated during large 184 * transfers and by completely avoiding buffer copies. Frame transfer 185 * to and from the ThunderLAN chip is performed entirely by the chip 186 * itself thereby reducing the load on the host CPU. 187 */ 188 189#include "bpfilter.h" 190 191#include <sys/param.h> 192#include <sys/systm.h> 193#include <sys/sockio.h> 194#include <sys/mbuf.h> 195#include <sys/malloc.h> 196#include <sys/kernel.h> 197#include <sys/socket.h> 198#include <sys/syslog.h> 199 200#include <net/if.h> 201#include <net/if_arp.h> 202#include <net/ethernet.h> 203#include <net/if_dl.h> 204#include <net/if_mib.h> 205#include <net/if_media.h> 206#include <net/if_types.h> 207 208#ifdef INET 209#include <netinet/in.h> 210#include <netinet/in_systm.h> 211#include <netinet/in_var.h> 212#include <netinet/ip.h> 213#include <netinet/if_ether.h> 214#endif 215 216#ifdef IPX 217#include <netipx/ipx.h> 218#include <netipx/ipx_if.h> 219#endif 220 221#ifdef NS 222#include <netns/ns.h> 223#include <netns/ns_if.h> 224#endif 225 226#if NBPFILTER > 0 227#include <net/bpf.h> 228#include <net/bpfdesc.h> 229#endif 230 231#include <vm/vm.h> /* for vtophys */ 232#include <vm/vm_param.h> /* for vtophys */ 233#include <vm/pmap.h> /* for vtophys */ 234#include <machine/clock.h> /* for DELAY */ 235 236#include <pci/pcireg.h> 237#include <pci/pcivar.h> 238 239#include <pci/if_tlreg.h> 240 241#ifndef lint 242static char rcsid[] = 243 "$Id: if_tl.c,v 1.6 1998/05/26 23:42:24 wpaul Exp $"; 244#endif 245 246/* 247 * Various supported device vendors/types and their names. 248 */ 249 250static struct tl_type tl_devs[] = { 251 { TI_VENDORID, TI_DEVICEID_THUNDERLAN, 252 "Texas Instruments ThunderLAN" }, 253 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10, 254 "Compaq Netelligent 10" }, 255 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100, 256 "Compaq Netelligent 10/100" }, 257 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT, 258 "Compaq Netelligent 10/100 Proliant" }, 259 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL, 260 "Compaq Netelligent 10/100 Dual Port" }, 261 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED, 262 "Compaq NetFlex-3/P Integrated" }, 263 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P, 264 "Compaq NetFlex-3/P" }, 265 { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC, 266 "Compaq NetFlex 3/P w/ BNC" }, 267 { COMPAQ_VENDORID, COMPAQ_DEVICEID_DESKPRO_4000_5233MMX, 268 "Compaq Deskpro 4000 5233MMX" }, 269 { 0, 0, NULL } 270}; 271 272/* 273 * Various supported PHY vendors/types and their names. Note that 274 * this driver will work with pretty much any MII-compliant PHY, 275 * so failure to positively identify the chip is not a fatal error. 276 */ 277 278static struct tl_type tl_phys[] = { 279 { TI_PHY_VENDORID, TI_PHY_10BT, "<TI ThunderLAN 10BT (internal)>" }, 280 { TI_PHY_VENDORID, TI_PHY_100VGPMI, "<TI TNETE211 100VG Any-LAN>" }, 281 { NS_PHY_VENDORID, NS_PHY_83840A, "<National Semiconductor DP83840A>"}, 282 { LEVEL1_PHY_VENDORID, LEVEL1_PHY_LXT970, "<Level 1 LXT970>" }, 283 { INTEL_PHY_VENDORID, INTEL_PHY_82555, "<Intel 82555>" }, 284 { SEEQ_PHY_VENDORID, SEEQ_PHY_80220, "<SEEQ 80220>" }, 285 { 0, 0, "<MII-compliant physical interface>" } 286}; 287 288static struct tl_iflist *tl_iflist = NULL; 289static unsigned long tl_count; 290 291static char *tl_probe __P((pcici_t, pcidi_t)); 292static void tl_attach_ctlr __P((pcici_t, int)); 293static int tl_attach_phy __P((struct tl_csr *, int, char *, 294 int, struct tl_iflist *)); 295static int tl_intvec_invalid __P((void *, u_int32_t)); 296static int tl_intvec_dummy __P((void *, u_int32_t)); 297static int tl_intvec_rxeoc __P((void *, u_int32_t)); 298static int tl_intvec_txeoc __P((void *, u_int32_t)); 299static int tl_intvec_txeof __P((void *, u_int32_t)); 300static int tl_intvec_rxeof __P((void *, u_int32_t)); 301static int tl_intvec_adchk __P((void *, u_int32_t)); 302static int tl_intvec_netsts __P((void *, u_int32_t)); 303static int tl_intvec_statoflow __P((void *, u_int32_t)); 304 305static int tl_newbuf __P((struct tl_softc *, struct tl_chain *)); 306static void tl_stats_update __P((void *)); 307static int tl_encap __P((struct tl_softc *, struct tl_chain *, 308 struct mbuf *)); 309 310static void tl_intr __P((void *)); 311static void tl_start __P((struct ifnet *)); 312static int tl_ioctl __P((struct ifnet *, int, caddr_t)); 313static void tl_init __P((void *)); 314static void tl_stop __P((struct tl_softc *)); 315static void tl_watchdog __P((struct ifnet *)); 316static void tl_shutdown __P((int, void *)); 317static int tl_ifmedia_upd __P((struct ifnet *)); 318static void tl_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 319 320static u_int8_t tl_eeprom_putbyte __P((struct tl_csr *, u_int8_t)); 321static u_int8_t tl_eeprom_getbyte __P((struct tl_csr *, u_int8_t , 322 u_int8_t * )); 323static int tl_read_eeprom __P((struct tl_csr *, caddr_t, int, int)); 324 325static void tl_mii_sync __P((struct tl_csr *)); 326static void tl_mii_send __P((struct tl_csr *, u_int32_t, int)); 327static int tl_mii_readreg __P((struct tl_csr *, struct tl_mii_frame *)); 328static int tl_mii_writereg __P((struct tl_csr *, struct tl_mii_frame *)); 329static u_int16_t tl_phy_readreg __P((struct tl_softc *, int)); 330static void tl_phy_writereg __P((struct tl_softc *, u_int16_t, u_int16_t)); 331 332static void tl_autoneg __P((struct tl_softc *, int, int)); 333static void tl_setmode __P((struct tl_softc *, int)); 334static int tl_calchash __P((unsigned char *)); 335static void tl_setmulti __P((struct tl_softc *)); 336static void tl_softreset __P((struct tl_csr *, int)); 337static int tl_list_rx_init __P((struct tl_softc *)); 338static int tl_list_tx_init __P((struct tl_softc *)); 339 340/* 341 * ThunderLAN adapters typically have a serial EEPROM containing 342 * configuration information. The main reason we're interested in 343 * it is because it also contains the adapters's station address. 344 * 345 * Access to the EEPROM is a bit goofy since it is a serial device: 346 * you have to do reads and writes one bit at a time. The state of 347 * the DATA bit can only change while the CLOCK line is held low. 348 * Transactions work basically like this: 349 * 350 * 1) Send the EEPROM_START sequence to prepare the EEPROM for 351 * accepting commands. This pulls the clock high, sets 352 * the data bit to 0, enables transmission to the EEPROM, 353 * pulls the data bit up to 1, then pulls the clock low. 354 * The idea is to do a 0 to 1 transition of the data bit 355 * while the clock pin is held high. 356 * 357 * 2) To write a bit to the EEPROM, set the TXENABLE bit, then 358 * set the EDATA bit to send a 1 or clear it to send a 0. 359 * Finally, set and then clear ECLOK. Strobing the clock 360 * transmits the bit. After 8 bits have been written, the 361 * EEPROM should respond with an ACK, which should be read. 362 * 363 * 3) To read a bit from the EEPROM, clear the TXENABLE bit, 364 * then set ECLOK. The bit can then be read by reading EDATA. 365 * ECLOCK should then be cleared again. This can be repeated 366 * 8 times to read a whole byte, after which the 367 * 368 * 4) We need to send the address byte to the EEPROM. For this 369 * we have to send the write control byte to the EEPROM to 370 * tell it to accept data. The byte is 0xA0. The EEPROM should 371 * ack this. The address byte can be send after that. 372 * 373 * 5) Now we have to tell the EEPROM to send us data. For that we 374 * have to transmit the read control byte, which is 0xA1. This 375 * byte should also be acked. We can then read the data bits 376 * from the EEPROM. 377 * 378 * 6) When we're all finished, send the EEPROM_STOP sequence. 379 * 380 * Note that we use the ThunderLAN's NetSio register to access the 381 * EEPROM, however there is an alternate method. There is a PCI NVRAM 382 * register at PCI offset 0xB4 which can also be used with minor changes. 383 * The difference is that access to PCI registers via pci_conf_read() 384 * and pci_conf_write() is done using programmed I/O, which we want to 385 * avoid. 386 */ 387 388/* 389 * Note that EEPROM_START leaves transmission enabled. 390 */ 391#define EEPROM_START \ 392 DIO_SEL(TL_NETSIO); \ 393 DIO_BYTE1_SET(TL_SIO_ECLOK); /* Pull clock pin high */ \ 394 DIO_BYTE1_SET(TL_SIO_EDATA); /* Set DATA bit to 1 */ \ 395 DIO_BYTE1_SET(TL_SIO_ETXEN); /* Enable xmit to write bit */ \ 396 DIO_BYTE1_CLR(TL_SIO_EDATA); /* Pull DATA bit to 0 again */ \ 397 DIO_BYTE1_CLR(TL_SIO_ECLOK); /* Pull clock low again */ 398 399/* 400 * EEPROM_STOP ends access to the EEPROM and clears the ETXEN bit so 401 * that no further data can be written to the EEPROM I/O pin. 402 */ 403#define EEPROM_STOP \ 404 DIO_SEL(TL_NETSIO); \ 405 DIO_BYTE1_CLR(TL_SIO_ETXEN); /* Disable xmit */ \ 406 DIO_BYTE1_CLR(TL_SIO_EDATA); /* Pull DATA to 0 */ \ 407 DIO_BYTE1_SET(TL_SIO_ECLOK); /* Pull clock high */ \ 408 DIO_BYTE1_SET(TL_SIO_ETXEN); /* Enable xmit */ \ 409 DIO_BYTE1_SET(TL_SIO_EDATA); /* Toggle DATA to 1 */ \ 410 DIO_BYTE1_CLR(TL_SIO_ETXEN); /* Disable xmit. */ \ 411 DIO_BYTE1_CLR(TL_SIO_ECLOK); /* Pull clock low again */ 412 413/* 414 * Send an instruction or address to the EEPROM, check for ACK. 415 */ 416static u_int8_t tl_eeprom_putbyte(csr, byte) 417 struct tl_csr *csr; 418 u_int8_t byte; 419{ 420 register int i, ack = 0; 421 422 /* 423 * Make sure we're in TX mode. 424 */ 425 DIO_SEL(TL_NETSIO); 426 DIO_BYTE1_SET(TL_SIO_ETXEN); 427 428 /* 429 * Feed in each bit and stobe the clock. 430 */ 431 for (i = 0x80; i; i >>= 1) { 432 DIO_SEL(TL_NETSIO); 433 if (byte & i) { 434 DIO_BYTE1_SET(TL_SIO_EDATA); 435 } else { 436 DIO_BYTE1_CLR(TL_SIO_EDATA); 437 } 438 DIO_BYTE1_SET(TL_SIO_ECLOK); 439 DIO_BYTE1_CLR(TL_SIO_ECLOK); 440 } 441 442 /* 443 * Turn off TX mode. 444 */ 445 DIO_BYTE1_CLR(TL_SIO_ETXEN); 446 447 /* 448 * Check for ack. 449 */ 450 DIO_BYTE1_SET(TL_SIO_ECLOK); 451 ack = DIO_BYTE1_GET(TL_SIO_EDATA); 452 DIO_BYTE1_CLR(TL_SIO_ECLOK); 453 454 return(ack); 455} 456 457/* 458 * Read a byte of data stored in the EEPROM at address 'addr.' 459 */ 460static u_int8_t tl_eeprom_getbyte(csr, addr, dest) 461 struct tl_csr *csr; 462 u_int8_t addr; 463 u_int8_t *dest; 464{ 465 register int i; 466 u_int8_t byte = 0; 467 468 EEPROM_START; 469 /* 470 * Send write control code to EEPROM. 471 */ 472 if (tl_eeprom_putbyte(csr, EEPROM_CTL_WRITE)) 473 return(1); 474 475 /* 476 * Send address of byte we want to read. 477 */ 478 if (tl_eeprom_putbyte(csr, addr)) 479 return(1); 480 481 EEPROM_STOP; 482 EEPROM_START; 483 /* 484 * Send read control code to EEPROM. 485 */ 486 if (tl_eeprom_putbyte(csr, EEPROM_CTL_READ)) 487 return(1); 488 489 /* 490 * Start reading bits from EEPROM. 491 */ 492 DIO_SEL(TL_NETSIO); 493 DIO_BYTE1_CLR(TL_SIO_ETXEN); 494 for (i = 0x80; i; i >>= 1) { 495 DIO_SEL(TL_NETSIO); 496 DIO_BYTE1_SET(TL_SIO_ECLOK); 497 if (DIO_BYTE1_GET(TL_SIO_EDATA)) 498 byte |= i; 499 DIO_BYTE1_CLR(TL_SIO_ECLOK); 500 } 501 502 EEPROM_STOP; 503 504 /* 505 * No ACK generated for read, so just return byte. 506 */ 507 508 *dest = byte; 509 510 return(0); 511} 512 513static void tl_mii_sync(csr) 514 struct tl_csr *csr; 515{ 516 register int i; 517 518 DIO_SEL(TL_NETSIO); 519 DIO_BYTE1_CLR(TL_SIO_MTXEN); 520 521 for (i = 0; i < 32; i++) { 522 DIO_BYTE1_SET(TL_SIO_MCLK); 523 DIO_BYTE1_CLR(TL_SIO_MCLK); 524 } 525 526 return; 527} 528 529static void tl_mii_send(csr, bits, cnt) 530 struct tl_csr *csr; 531 u_int32_t bits; 532 int cnt; 533{ 534 int i; 535 536 for (i = (0x1 << (cnt - 1)); i; i >>= 1) { 537 DIO_BYTE1_CLR(TL_SIO_MCLK); 538 if (bits & i) { 539 DIO_BYTE1_SET(TL_SIO_MDATA); 540 } else { 541 DIO_BYTE1_CLR(TL_SIO_MDATA); 542 } 543 DIO_BYTE1_SET(TL_SIO_MCLK); 544 } 545} 546 547static int tl_mii_readreg(csr, frame) 548 struct tl_csr *csr; 549 struct tl_mii_frame *frame; 550 551{ 552 int i, ack, s; 553 int minten = 0; 554 555 s = splimp(); 556 557 tl_mii_sync(csr); 558 559 /* 560 * Set up frame for RX. 561 */ 562 frame->mii_stdelim = TL_MII_STARTDELIM; 563 frame->mii_opcode = TL_MII_READOP; 564 frame->mii_turnaround = 0; 565 frame->mii_data = 0; 566 567 /* 568 * Select the NETSIO register. We will be using it 569 * to communicate indirectly with the MII. 570 */ 571 572 DIO_SEL(TL_NETSIO); 573 574 /* 575 * Turn off MII interrupt by forcing MINTEN low. 576 */ 577 minten = DIO_BYTE1_GET(TL_SIO_MINTEN); 578 if (minten) { 579 DIO_BYTE1_CLR(TL_SIO_MINTEN); 580 } 581 582 /* 583 * Turn on data xmit. 584 */ 585 DIO_BYTE1_SET(TL_SIO_MTXEN); 586 587 /* 588 * Send command/address info. 589 */ 590 tl_mii_send(csr, frame->mii_stdelim, 2); 591 tl_mii_send(csr, frame->mii_opcode, 2); 592 tl_mii_send(csr, frame->mii_phyaddr, 5); 593 tl_mii_send(csr, frame->mii_regaddr, 5); 594 595 /* 596 * Turn off xmit. 597 */ 598 DIO_BYTE1_CLR(TL_SIO_MTXEN); 599 600 /* Idle bit */ 601 DIO_BYTE1_CLR(TL_SIO_MCLK); 602 DIO_BYTE1_SET(TL_SIO_MCLK); 603 604 /* Check for ack */ 605 DIO_BYTE1_CLR(TL_SIO_MCLK); 606 ack = DIO_BYTE1_GET(TL_SIO_MDATA); 607 608 /* Complete the cycle */ 609 DIO_BYTE1_SET(TL_SIO_MCLK); 610 611 /* 612 * Now try reading data bits. If the ack failed, we still 613 * need to clock through 16 cycles to keep the PHYs in sync. 614 */ 615 if (ack) { 616 for(i = 0; i < 16; i++) { 617 DIO_BYTE1_CLR(TL_SIO_MCLK); 618 DIO_BYTE1_SET(TL_SIO_MCLK); 619 } 620 goto fail; 621 } 622 623 for (i = 0x8000; i; i >>= 1) { 624 DIO_BYTE1_CLR(TL_SIO_MCLK); 625 if (!ack) { 626 if (DIO_BYTE1_GET(TL_SIO_MDATA)) 627 frame->mii_data |= i; 628 } 629 DIO_BYTE1_SET(TL_SIO_MCLK); 630 } 631 632fail: 633 634 DIO_BYTE1_CLR(TL_SIO_MCLK); 635 DIO_BYTE1_SET(TL_SIO_MCLK); 636 637 /* Reenable interrupts */ 638 if (minten) { 639 DIO_BYTE1_SET(TL_SIO_MINTEN); 640 } 641 642 splx(s); 643 644 if (ack) 645 return(1); 646 return(0); 647} 648 649static int tl_mii_writereg(csr, frame) 650 struct tl_csr *csr; 651 struct tl_mii_frame *frame; 652 653{ 654 int s; 655 int minten; 656 657 tl_mii_sync(csr); 658 659 s = splimp(); 660 /* 661 * Set up frame for TX. 662 */ 663 664 frame->mii_stdelim = TL_MII_STARTDELIM; 665 frame->mii_opcode = TL_MII_WRITEOP; 666 frame->mii_turnaround = TL_MII_TURNAROUND; 667 668 /* 669 * Select the NETSIO register. We will be using it 670 * to communicate indirectly with the MII. 671 */ 672 673 DIO_SEL(TL_NETSIO); 674 675 /* 676 * Turn off MII interrupt by forcing MINTEN low. 677 */ 678 minten = DIO_BYTE1_GET(TL_SIO_MINTEN); 679 if (minten) { 680 DIO_BYTE1_CLR(TL_SIO_MINTEN); 681 } 682 683 /* 684 * Turn on data output. 685 */ 686 DIO_BYTE1_SET(TL_SIO_MTXEN); 687 688 tl_mii_send(csr, frame->mii_stdelim, 2); 689 tl_mii_send(csr, frame->mii_opcode, 2); 690 tl_mii_send(csr, frame->mii_phyaddr, 5); 691 tl_mii_send(csr, frame->mii_regaddr, 5); 692 tl_mii_send(csr, frame->mii_turnaround, 2); 693 tl_mii_send(csr, frame->mii_data, 16); 694 695 DIO_BYTE1_SET(TL_SIO_MCLK); 696 DIO_BYTE1_CLR(TL_SIO_MCLK); 697 698 /* 699 * Turn off xmit. 700 */ 701 DIO_BYTE1_CLR(TL_SIO_MTXEN); 702 703 /* Reenable interrupts */ 704 if (minten) 705 DIO_BYTE1_SET(TL_SIO_MINTEN); 706 707 splx(s); 708 709 return(0); 710} 711 712static u_int16_t tl_phy_readreg(sc, reg) 713 struct tl_softc *sc; 714 int reg; 715{ 716 struct tl_mii_frame frame; 717 struct tl_csr *csr; 718 719 bzero((char *)&frame, sizeof(frame)); 720 721 csr = sc->csr; 722 723 frame.mii_phyaddr = sc->tl_phy_addr; 724 frame.mii_regaddr = reg; 725 tl_mii_readreg(sc->csr, &frame); 726 727 /* Reenable MII interrupts, just in case. */ 728 DIO_SEL(TL_NETSIO); 729 DIO_BYTE1_SET(TL_SIO_MINTEN); 730 731 return(frame.mii_data); 732} 733 734static void tl_phy_writereg(sc, reg, data) 735 struct tl_softc *sc; 736 u_int16_t reg; 737 u_int16_t data; 738{ 739 struct tl_mii_frame frame; 740 struct tl_csr *csr; 741 742 bzero((char *)&frame, sizeof(frame)); 743 744 csr = sc->csr; 745 frame.mii_phyaddr = sc->tl_phy_addr; 746 frame.mii_regaddr = reg; 747 frame.mii_data = data; 748 749 tl_mii_writereg(sc->csr, &frame); 750 751 /* Reenable MII interrupts, just in case. */ 752 DIO_SEL(TL_NETSIO); 753 DIO_BYTE1_SET(TL_SIO_MINTEN); 754 755 return; 756} 757 758/* 759 * Read a sequence of bytes from the EEPROM. 760 */ 761static int tl_read_eeprom(csr, dest, off, cnt) 762 struct tl_csr *csr; 763 caddr_t dest; 764 int off; 765 int cnt; 766{ 767 int err = 0, i; 768 u_int8_t byte = 0; 769 770 for (i = 0; i < cnt; i++) { 771 err = tl_eeprom_getbyte(csr, off + i, &byte); 772 if (err) 773 break; 774 *(dest + i) = byte; 775 } 776 777 return(err ? 1 : 0); 778} 779 780/* 781 * Initiate autonegotiation with a link partner. 782 * 783 * Note that the Texas Instruments ThunderLAN programmer's guide 784 * fails to mention one very important point about autonegotiation. 785 * Autonegotiation is done largely by the PHY, independent of the 786 * ThunderLAN chip itself: the PHY sets the flags in the BMCR 787 * register to indicate what modes were selected and if link status 788 * is good. In fact, the PHY does pretty much all of the work itself, 789 * except for one small detail. 790 * 791 * The PHY may negotiate a full-duplex of half-duplex link, and set 792 * the PHY_BMCR_DUPLEX bit accordingly, but the ThunderLAN's 'NetCommand' 793 * register _also_ has a half-duplex/full-duplex bit, and you MUST ALSO 794 * SET THIS BIT MANUALLY TO CORRESPOND TO THE MODE SELECTED FOR THE PHY! 795 * In other words, both the ThunderLAN chip and the PHY have to be 796 * programmed for full-duplex mode in order for full-duplex to actually 797 * work. So in order for autonegotiation to really work right, we have 798 * to wait for the link to come up, check the BMCR register, then set 799 * the ThunderLAN for full or half-duplex as needed. 800 * 801 * I struggled for two days to figure this out, so I'm making a point 802 * of drawing attention to this fact. I think it's very strange that 803 * the ThunderLAN doesn't automagically track the duplex state of the 804 * PHY, but there you have it. 805 * 806 * Also when, using a National Semiconductor DP83840A PHY, we have to 807 * allow a full three seconds for autonegotiation to complete. So what 808 * we do is flip the autonegotiation restart bit, then set a timeout 809 * to wake us up in three seconds to check the link state. 810 */ 811static void tl_autoneg(sc, flag, verbose) 812 struct tl_softc *sc; 813 int flag; 814 int verbose; 815{ 816 u_int16_t phy_sts = 0, media = 0; 817 struct ifnet *ifp; 818 struct ifmedia *ifm; 819 struct tl_csr *csr; 820 821 ifm = &sc->ifmedia; 822 ifp = &sc->arpcom.ac_if; 823 csr = sc->csr; 824 825 /* 826 * First, see if autoneg is supported. If not, there's 827 * no point in continuing. 828 */ 829 phy_sts = tl_phy_readreg(sc, PHY_BMSR); 830 if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { 831 if (verbose) 832 printf("tl%d: autonegotiation not supported\n", 833 sc->tl_unit); 834 return; 835 } 836 837 switch (flag) { 838 case TL_FLAG_FORCEDELAY: 839 /* 840 * XXX Never use this option anywhere but in the probe 841 * routine: making the kernel stop dead in its tracks 842 * for three whole seconds after we've gone multi-user 843 * is really bad manners. 844 */ 845 phy_sts = tl_phy_readreg(sc, PHY_BMCR); 846 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR; 847 tl_phy_writereg(sc, PHY_BMCR, phy_sts); 848 DELAY(3000000); 849 break; 850 case TL_FLAG_SCHEDDELAY: 851 phy_sts = tl_phy_readreg(sc, PHY_BMCR); 852 phy_sts |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR; 853 tl_phy_writereg(sc, PHY_BMCR, phy_sts); 854 ifp->if_timer = 3; 855 sc->tl_autoneg = 1; 856 return; 857 case TL_FLAG_DELAYTIMEO: 858 ifp->if_timer = 0; 859 sc->tl_autoneg = 0; 860 break; 861 default: 862 printf("tl%d: invalid autoneg flag: %d\n", flag, sc->tl_unit); 863 return; 864 } 865 866 /* 867 * Read the BMSR register twice: the LINKSTAT bit is a 868 * latching bit. 869 */ 870 tl_phy_readreg(sc, PHY_BMSR); 871 phy_sts = tl_phy_readreg(sc, PHY_BMSR); 872 if (phy_sts & PHY_BMSR_AUTONEGCOMP) { 873 if (verbose) 874 printf("tl%d: autoneg complete, ", sc->tl_unit); 875 phy_sts = tl_phy_readreg(sc, PHY_BMSR); 876 } else { 877 if (verbose) 878 printf("tl%d: autoneg not complete, ", sc->tl_unit); 879 } 880 881 /* Link is good. Report modes and set duplex mode. */ 882 if (phy_sts & PHY_BMSR_LINKSTAT) { 883 if (verbose) 884 printf("link status good "); 885 media = tl_phy_readreg(sc, PHY_BMCR); 886 887 /* Set the DUPLEX bit in the NetCmd register accordingly. */ 888 if (media & PHY_BMCR_DUPLEX) { 889 if (verbose) 890 printf("(full-duplex, "); 891 ifm->ifm_media |= IFM_FDX; 892 ifm->ifm_media &= ~IFM_HDX; 893 DIO_SEL(TL_NETCMD); 894 DIO_BYTE0_SET(TL_CMD_DUPLEX); 895 } else { 896 if (verbose) 897 printf("(half-duplex, "); 898 ifm->ifm_media &= ~IFM_FDX; 899 ifm->ifm_media |= IFM_HDX; 900 DIO_SEL(TL_NETCMD); 901 DIO_BYTE0_CLR(TL_CMD_DUPLEX); 902 } 903 904 if (media & PHY_BMCR_SPEEDSEL) { 905 if (verbose) 906 printf("100Mb/s)\n"); 907 ifm->ifm_media |= IFM_100_TX; 908 ifm->ifm_media &= ~IFM_10_T; 909 } else { 910 if (verbose) 911 printf("10Mb/s)\n"); 912 ifm->ifm_media &= ~IFM_100_TX; 913 ifm->ifm_media |= IFM_10_T; 914 } 915 916 /* Turn off autoneg */ 917 media &= ~PHY_BMCR_AUTONEGENBL; 918 tl_phy_writereg(sc, PHY_BMCR, media); 919 } else { 920 if (verbose) 921 printf("no carrier\n"); 922 } 923 924 return; 925} 926 927/* 928 * Set speed and duplex mode. Also program autoneg advertisements 929 * accordingly. 930 */ 931static void tl_setmode(sc, media) 932 struct tl_softc *sc; 933 int media; 934{ 935 u_int16_t bmcr, anar, ctl; 936 struct tl_csr *csr; 937 938 csr = sc->csr; 939 bmcr = tl_phy_readreg(sc, PHY_BMCR); 940 anar = tl_phy_readreg(sc, PHY_ANAR); 941 ctl = tl_phy_readreg(sc, TL_PHY_CTL); 942 DIO_SEL(TL_NETCMD); 943 944 bmcr &= ~(PHY_BMCR_SPEEDSEL|PHY_BMCR_DUPLEX|PHY_BMCR_AUTONEGENBL| 945 PHY_BMCR_LOOPBK); 946 anar &= ~(PHY_ANAR_100BT4|PHY_ANAR_100BTXFULL|PHY_ANAR_100BTXHALF| 947 PHY_ANAR_10BTFULL|PHY_ANAR_10BTHALF); 948 949 ctl &= ~PHY_CTL_AUISEL; 950 951 if (IFM_SUBTYPE(media) == IFM_LOOP) 952 bmcr |= PHY_BMCR_LOOPBK; 953 954 if (IFM_SUBTYPE(media) == IFM_AUTO) 955 bmcr |= PHY_BMCR_AUTONEGENBL; 956 957 if (IFM_SUBTYPE(media) == IFM_10_5) 958 ctl |= PHY_CTL_AUISEL; 959 960 if (IFM_SUBTYPE(media) == IFM_100_TX) { 961 bmcr |= PHY_BMCR_SPEEDSEL; 962 if ((media & IFM_GMASK) == IFM_FDX) { 963 bmcr |= PHY_BMCR_DUPLEX; 964 anar |= PHY_ANAR_100BTXFULL; 965 DIO_BYTE0_SET(TL_CMD_DUPLEX); 966 } else if ((media & IFM_GMASK) == IFM_HDX) { 967 bmcr &= ~PHY_BMCR_DUPLEX; 968 anar |= PHY_ANAR_100BTXHALF; 969 DIO_BYTE0_CLR(TL_CMD_DUPLEX); 970 } else { 971 bmcr &= ~PHY_BMCR_DUPLEX; 972 anar |= PHY_ANAR_100BTXHALF; 973 DIO_BYTE0_CLR(TL_CMD_DUPLEX); 974 } 975 } 976 977 if (IFM_SUBTYPE(media) == IFM_10_T) { 978 bmcr &= ~PHY_BMCR_SPEEDSEL; 979 if ((media & IFM_GMASK) == IFM_FDX) { 980 bmcr |= PHY_BMCR_DUPLEX; 981 anar |= PHY_ANAR_10BTFULL; 982 DIO_BYTE0_SET(TL_CMD_DUPLEX); 983 } else if ((media & IFM_GMASK) == IFM_HDX) { 984 bmcr &= ~PHY_BMCR_DUPLEX; 985 anar |= PHY_ANAR_10BTHALF; 986 DIO_BYTE0_CLR(TL_CMD_DUPLEX); 987 } else { 988 bmcr &= ~PHY_BMCR_DUPLEX; 989 anar |= PHY_ANAR_10BTHALF; 990 DIO_BYTE0_CLR(TL_CMD_DUPLEX); 991 } 992 } 993 994 tl_phy_writereg(sc, PHY_BMCR, bmcr); 995#ifdef notyet 996 tl_phy_writereg(sc, PHY_ANAR, anar); 997#endif 998 tl_phy_writereg(sc, TL_PHY_CTL, ctl); 999 1000 return; 1001} 1002 1003/* 1004 * Calculate the hash of a MAC address for programming the multicast hash 1005 * table. This hash is simply the address split into 6-bit chunks 1006 * XOR'd, e.g. 1007 * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 1008 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 1009 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then 1010 * the folded 24-bit value is split into 6-bit portions and XOR'd. 1011 */ 1012static int tl_calchash(addr) 1013 unsigned char *addr; 1014{ 1015 int t; 1016 1017 t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | 1018 (addr[2] ^ addr[5]); 1019 return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; 1020} 1021 1022static void tl_setmulti(sc) 1023 struct tl_softc *sc; 1024{ 1025 struct ifnet *ifp; 1026 struct tl_csr *csr; 1027 u_int32_t hashes[2] = { 0, 0 }; 1028 int h; 1029 struct ifmultiaddr *ifma; 1030 1031 csr = sc->csr; 1032 ifp = &sc->arpcom.ac_if; 1033 1034 if (sc->arpcom.ac_multicnt > 64 || ifp->if_flags & IFF_ALLMULTI) { 1035 hashes[0] = 0xFFFFFFFF; 1036 hashes[1] = 0xFFFFFFFF; 1037 } else { 1038 for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; 1039 ifma = ifma->ifma_link.le_next) { 1040 if (ifma->ifma_addr->sa_family != AF_LINK) 1041 continue; 1042 h = tl_calchash( 1043 LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); 1044 if (h < 32) 1045 hashes[0] |= (1 << h); 1046 else 1047 hashes[1] |= (1 << (h - 32)); 1048 } 1049 } 1050 1051 DIO_SEL(TL_HASH1); 1052 DIO_LONG_PUT(hashes[0]); 1053 DIO_SEL(TL_HASH2); 1054 DIO_LONG_PUT(hashes[1]); 1055 1056 return; 1057} 1058 1059static void tl_softreset(csr, internal) 1060 struct tl_csr *csr; 1061 int internal; 1062{ 1063 u_int32_t cmd, dummy; 1064 1065 /* Assert the adapter reset bit. */ 1066 csr->tl_host_cmd |= TL_CMD_ADRST; 1067 /* Turn off interrupts */ 1068 csr->tl_host_cmd |= TL_CMD_INTSOFF; 1069 1070 /* First, clear the stats registers. */ 1071 DIO_SEL(TL_TXGOODFRAMES|TL_DIO_ADDR_INC); 1072 DIO_LONG_GET(dummy); 1073 DIO_LONG_GET(dummy); 1074 DIO_LONG_GET(dummy); 1075 DIO_LONG_GET(dummy); 1076 DIO_LONG_GET(dummy); 1077 1078 /* Clear Areg and Hash registers */ 1079 DIO_SEL(TL_AREG0_B5|TL_DIO_ADDR_INC); 1080 DIO_LONG_PUT(0x00000000); 1081 DIO_LONG_PUT(0x00000000); 1082 DIO_LONG_PUT(0x00000000); 1083 DIO_LONG_PUT(0x00000000); 1084 DIO_LONG_PUT(0x00000000); 1085 DIO_LONG_PUT(0x00000000); 1086 DIO_LONG_PUT(0x00000000); 1087 DIO_LONG_PUT(0x00000000); 1088 1089 /* 1090 * Set up Netconfig register. Enable one channel and 1091 * one fragment mode. 1092 */ 1093 DIO_SEL(TL_NETCONFIG); 1094 DIO_WORD0_SET(TL_CFG_ONECHAN|TL_CFG_ONEFRAG); 1095 if (internal) { 1096 DIO_SEL(TL_NETCONFIG); 1097 DIO_WORD0_SET(TL_CFG_PHYEN); 1098 } else { 1099 DIO_SEL(TL_NETCONFIG); 1100 DIO_WORD0_CLR(TL_CFG_PHYEN); 1101 } 1102 1103 /* Set PCI burst size */ 1104 DIO_SEL(TL_BSIZEREG); 1105 DIO_BYTE1_SET(0x33); 1106 1107 /* 1108 * Load adapter irq pacing timer and tx threshold. 1109 * We make the transmit threshold 1 initially but we may 1110 * change that later. 1111 */ 1112 cmd = csr->tl_host_cmd; 1113 cmd |= TL_CMD_NES; 1114 cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); 1115 csr->tl_host_cmd = cmd | (TL_CMD_LDTHR | TX_THR); 1116 csr->tl_host_cmd = cmd | (TL_CMD_LDTMR | 0x00000003); 1117 1118 /* Unreset the MII */ 1119 DIO_SEL(TL_NETSIO); 1120 DIO_BYTE1_SET(TL_SIO_NMRST); 1121 1122 /* Clear status register */ 1123 DIO_SEL(TL_NETSTS); 1124 DIO_BYTE2_SET(TL_STS_MIRQ); 1125 DIO_BYTE2_SET(TL_STS_HBEAT); 1126 DIO_BYTE2_SET(TL_STS_TXSTOP); 1127 DIO_BYTE2_SET(TL_STS_RXSTOP); 1128 1129 /* Enable network status interrupts for everything. */ 1130 DIO_SEL(TL_NETMASK); 1131 DIO_BYTE3_SET(TL_MASK_MASK7|TL_MASK_MASK6| 1132 TL_MASK_MASK5|TL_MASK_MASK4); 1133 1134 /* Take the adapter out of reset */ 1135 DIO_SEL(TL_NETCMD); 1136 DIO_BYTE0_SET(TL_CMD_NRESET|TL_CMD_NWRAP); 1137 1138 /* Wait for things to settle down a little. */ 1139 DELAY(500); 1140 1141 return; 1142} 1143 1144/* 1145 * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs 1146 * against our list and return its name if we find a match. Note that 1147 * we also save a pointer to the tl_type struct for this card since we 1148 * will need it for the softc struct and attach routine later. 1149 */ 1150static char * 1151tl_probe(config_id, device_id) 1152 pcici_t config_id; 1153 pcidi_t device_id; 1154{ 1155 struct tl_type *t; 1156 struct tl_iflist *new; 1157 1158 t = tl_devs; 1159 1160 while(t->tl_name != NULL) { 1161 if ((device_id & 0xFFFF) == t->tl_vid && 1162 ((device_id >> 16) & 0xFFFF) == t->tl_did) { 1163 new = malloc(sizeof(struct tl_iflist), 1164 M_DEVBUF, M_NOWAIT); 1165 if (new == NULL) { 1166 printf("no memory for controller struct!\n"); 1167 break; 1168 } 1169 bzero(new, sizeof(struct tl_iflist)); 1170 new->tl_config_id = config_id; 1171 new->tl_dinfo = t; 1172 new->tl_next = tl_iflist; 1173 tl_iflist = new; 1174 return(t->tl_name); 1175 } 1176 t++; 1177 } 1178 1179 return(NULL); 1180} 1181 1182/* 1183 * The ThunderLAN controller can support multiple PHYs. Logically, 1184 * this means we have to be able to deal with each PHY as a separate 1185 * interface. We therefore consider ThunderLAN devices as follows: 1186 * 1187 * o Each ThunderLAN controller device is assigned the name tlcX where 1188 * X is the controller's unit number. Each ThunderLAN device found 1189 * is assigned a different number. 1190 * 1191 * o Each PHY on each controller is assigned the name tlX. X starts at 1192 * 0 and is incremented each time an additional PHY is found. 1193 * 1194 * So, if you had two dual-channel ThunderLAN cards, you'd have 1195 * tlc0 and tlc1 (the controllers) and tl0, tl1, tl2, tl3 (the logical 1196 * interfaces). I think. I'm still not sure how dual chanel controllers 1197 * work as I've yet to see one. 1198 */ 1199 1200/* 1201 * Do the interface setup and attach for a PHY on a particular 1202 * ThunderLAN chip. Also also set up interrupt vectors. 1203 */ 1204static int tl_attach_phy(csr, tl_unit, eaddr, tl_phy, ilist) 1205 struct tl_csr *csr; 1206 int tl_unit; 1207 char *eaddr; 1208 int tl_phy; 1209 struct tl_iflist *ilist; 1210{ 1211 struct tl_softc *sc; 1212 struct ifnet *ifp; 1213 int phy_ctl; 1214 struct tl_type *p = tl_phys; 1215 struct tl_mii_frame frame; 1216 int i, media = IFM_ETHER|IFM_100_TX|IFM_FDX; 1217 unsigned int round; 1218 caddr_t roundptr; 1219 1220 if (tl_phy != TL_PHYADDR_MAX) 1221 tl_softreset(csr, 0); 1222 1223 /* Reset the PHY again, just in case. */ 1224 bzero((char *)&frame, sizeof(frame)); 1225 frame.mii_phyaddr = tl_phy; 1226 frame.mii_regaddr = TL_PHY_GENCTL; 1227 frame.mii_data = PHY_BMCR_RESET; 1228 tl_mii_writereg(csr, &frame); 1229 DELAY(500); 1230 frame.mii_data = 0; 1231 1232 /* First, allocate memory for the softc struct. */ 1233 sc = malloc(sizeof(struct tl_softc), M_DEVBUF, M_NOWAIT); 1234 if (sc == NULL) { 1235 printf("tlc%d: no memory for softc struct!\n", ilist->tlc_unit); 1236 return(1); 1237 } 1238 1239 bzero(sc, sizeof(struct tl_softc)); 1240 1241 /* 1242 * Now allocate memory for the TX and RX lists. Note that 1243 * we actually allocate 8 bytes more than we really need: 1244 * this is because we need to adjust the final address to 1245 * be aligned on a quadword (64-bit) boundary in order to 1246 * make the chip happy. If the list structures aren't properly 1247 * aligned, DMA fails and the chip generates an adapter check 1248 * interrupt and has to be reset. If you set up the softc struct 1249 * just right you can sort of obtain proper alignment 'by chance.' 1250 * But I don't want to depend on this, so instead the alignment 1251 * is forced here. 1252 */ 1253 sc->tl_ldata_ptr = malloc(sizeof(struct tl_list_data) + 8, 1254 M_DEVBUF, M_NOWAIT); 1255 1256 if (sc->tl_ldata_ptr == NULL) { 1257 free(sc, M_DEVBUF); 1258 printf("tlc%d: no memory for list buffers!\n", ilist->tlc_unit); 1259 return(1); 1260 } 1261 1262 /* 1263 * Convoluted but satisfies my ANSI sensibilities. GCC lets 1264 * you do casts on the LHS of an assignment, but ANSI doesn't 1265 * allow that. 1266 */ 1267 sc->tl_ldata = (struct tl_list_data *)sc->tl_ldata_ptr; 1268 round = (unsigned int)sc->tl_ldata_ptr & 0xF; 1269 roundptr = sc->tl_ldata_ptr; 1270 for (i = 0; i < 8; i++) { 1271 if (round % 8) { 1272 round++; 1273 roundptr++; 1274 } else 1275 break; 1276 } 1277 sc->tl_ldata = (struct tl_list_data *)roundptr; 1278 1279 bzero(sc->tl_ldata, sizeof(struct tl_list_data)); 1280 1281 sc->csr = csr; 1282 sc->tl_dinfo = ilist->tl_dinfo; 1283 sc->tl_ctlr = ilist->tlc_unit; 1284 sc->tl_unit = tl_unit; 1285 sc->tl_phy_addr = tl_phy; 1286 sc->tl_iflist = ilist; 1287 callout_handle_init(&sc->tl_stat_ch); 1288 1289 frame.mii_regaddr = TL_PHY_VENID; 1290 tl_mii_readreg(csr, &frame); 1291 sc->tl_phy_vid = frame.mii_data; 1292 1293 frame.mii_regaddr = TL_PHY_DEVID; 1294 tl_mii_readreg(csr, &frame); 1295 sc->tl_phy_did = frame.mii_data; 1296 1297 frame.mii_regaddr = TL_PHY_GENSTS; 1298 tl_mii_readreg(csr, &frame); 1299 sc->tl_phy_sts = frame.mii_data; 1300 1301 frame.mii_regaddr = TL_PHY_GENCTL; 1302 tl_mii_readreg(csr, &frame); 1303 phy_ctl = frame.mii_data; 1304 1305 /* 1306 * PHY revision numbers tend to vary a bit. Our algorithm here 1307 * is to check everything but the 8 least significant bits. 1308 */ 1309 while(p->tl_vid) { 1310 if (sc->tl_phy_vid == p->tl_vid && 1311 (sc->tl_phy_did | 0x000F) == p->tl_did) { 1312 sc->tl_pinfo = p; 1313 break; 1314 } 1315 p++; 1316 } 1317 if (sc->tl_pinfo == NULL) { 1318 sc->tl_pinfo = &tl_phys[PHY_UNKNOWN]; 1319 } 1320 1321 bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); 1322 ifp = &sc->arpcom.ac_if; 1323 ifp->if_softc = sc; 1324 ifp->if_unit = tl_unit; 1325 ifp->if_name = "tl"; 1326 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1327 ifp->if_ioctl = tl_ioctl; 1328 ifp->if_output = ether_output; 1329 ifp->if_start = tl_start; 1330 ifp->if_watchdog = tl_watchdog; 1331 ifp->if_init = tl_init; 1332 1333 if (sc->tl_phy_sts & PHY_BMSR_100BT4 || 1334 sc->tl_phy_sts & PHY_BMSR_100BTXFULL || 1335 sc->tl_phy_sts & PHY_BMSR_100BTXHALF) 1336 ifp->if_baudrate = 100000000; 1337 else 1338 ifp->if_baudrate = 10000000; 1339 1340 ilist->tl_sc[tl_phy] = sc; 1341 1342 printf("tl%d at tlc%d physical interface %d\n", ifp->if_unit, 1343 sc->tl_ctlr, 1344 sc->tl_phy_addr); 1345 1346 printf("tl%d: %s ", ifp->if_unit, sc->tl_pinfo->tl_name); 1347 1348 if (sc->tl_phy_sts & PHY_BMSR_100BT4 || 1349 sc->tl_phy_sts & PHY_BMSR_100BTXHALF || 1350 sc->tl_phy_sts & PHY_BMSR_100BTXHALF) 1351 printf("10/100Mbps "); 1352 else { 1353 media &= ~IFM_100_TX; 1354 media |= IFM_10_T; 1355 printf("10Mbps "); 1356 } 1357 1358 if (sc->tl_phy_sts & PHY_BMSR_100BTXFULL || 1359 sc->tl_phy_sts & PHY_BMSR_10BTFULL) 1360 printf("full duplex "); 1361 else { 1362 printf("half duplex "); 1363 media &= ~IFM_FDX; 1364 } 1365 1366 if (sc->tl_phy_sts & PHY_BMSR_CANAUTONEG) { 1367 media = IFM_ETHER|IFM_AUTO; 1368 printf("autonegotiating\n"); 1369 } else 1370 printf("\n"); 1371 1372 /* If this isn't a known PHY, print the PHY indentifier info. */ 1373 if (sc->tl_pinfo->tl_vid == 0) 1374 printf("tl%d: vendor id: %04x product id: %04x\n", 1375 sc->tl_unit, sc->tl_phy_vid, sc->tl_phy_did); 1376 1377 /* Set up ifmedia data and callbacks. */ 1378 ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); 1379 1380 /* 1381 * All ThunderLANs support at least 10baseT half duplex. 1382 * They also support AUI selection if used in 10Mb/s modes. 1383 * They all also support a loopback mode. 1384 */ 1385 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 1386 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1387 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); 1388 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_LOOP, 0, NULL); 1389 1390 /* Some ThunderLAN PHYs support autonegotiation. */ 1391 if (sc->tl_phy_sts & PHY_BMSR_CANAUTONEG) 1392 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1393 1394 /* Some support 10baseT full duplex. */ 1395 if (sc->tl_phy_sts & PHY_BMSR_10BTFULL) 1396 ifmedia_add(&sc->ifmedia, 1397 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1398 1399 /* Some support 100BaseTX half duplex. */ 1400 if (sc->tl_phy_sts & PHY_BMSR_100BTXHALF) 1401 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 1402 if (sc->tl_phy_sts & PHY_BMSR_100BTXHALF) 1403 ifmedia_add(&sc->ifmedia, 1404 IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); 1405 1406 /* Some support 100BaseTX full duplex. */ 1407 if (sc->tl_phy_sts & PHY_BMSR_100BTXFULL) 1408 ifmedia_add(&sc->ifmedia, 1409 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 1410 1411 /* Some also support 100BaseT4. */ 1412 if (sc->tl_phy_sts & PHY_BMSR_100BT4) 1413 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_T4, 0, NULL); 1414 1415 /* Set default media. */ 1416 ifmedia_set(&sc->ifmedia, media); 1417 1418 /* 1419 * Kick off an autonegotiation session if this PHY supports it. 1420 * This is necessary to make sure the chip's duplex mode matches 1421 * the PHY's duplex mode. It may not: once enabled, the PHY may 1422 * autonegotiate full-duplex mode with its link partner, but the 1423 * ThunderLAN chip defaults to half-duplex and stays there unless 1424 * told otherwise. 1425 */ 1426 if (sc->tl_phy_sts & PHY_BMSR_CANAUTONEG) 1427 tl_autoneg(sc, TL_FLAG_FORCEDELAY, 0); 1428 1429 /* 1430 * Call MI attach routines. 1431 */ 1432 if_attach(ifp); 1433 ether_ifattach(ifp); 1434 1435#if NBPFILTER > 0 1436 bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); 1437#endif 1438 1439 return(0); 1440} 1441 1442static void 1443tl_attach_ctlr(config_id, unit) 1444 pcici_t config_id; 1445 int unit; 1446{ 1447 int s, i, phys = 0; 1448 vm_offset_t pbase, vbase; 1449 struct tl_csr *csr; 1450 char eaddr[ETHER_ADDR_LEN]; 1451 struct tl_mii_frame frame; 1452 u_int32_t command; 1453 struct tl_iflist *ilist; 1454 1455 s = splimp(); 1456 1457 for (ilist = tl_iflist; ilist != NULL; ilist = ilist->tl_next) 1458 if (ilist->tl_config_id == config_id) 1459 break; 1460 1461 if (ilist == NULL) { 1462 printf("couldn't match config id with controller struct\n"); 1463 goto fail; 1464 } 1465 1466 /* 1467 * Map control/status registers. 1468 */ 1469 pci_conf_write(config_id, PCI_COMMAND_STATUS_REG, 1470 PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); 1471 1472 command = pci_conf_read(config_id, PCI_COMMAND_STATUS_REG); 1473 1474 if (!(command & PCIM_CMD_MEMEN)) { 1475 printf("tlc%d: failed to enable memory mapping!\n", unit); 1476 goto fail; 1477 } 1478 1479 if (!pci_map_mem(config_id, TL_PCI_LOMEM, &vbase, &pbase)) { 1480 printf ("tlc%d: couldn't map memory\n", unit); 1481 goto fail; 1482 } 1483 1484 csr = (struct tl_csr *)vbase; 1485 1486 ilist->csr = csr; 1487 ilist->tl_active_phy = TL_PHYS_IDLE; 1488 ilist->tlc_unit = unit; 1489 1490 /* Allocate interrupt */ 1491 if (!pci_map_int(config_id, tl_intr, ilist, &net_imask)) { 1492 printf("tlc%d: couldn't map interrupt\n", unit); 1493 goto fail; 1494 } 1495 1496 /* Reset the adapter. */ 1497 tl_softreset(csr, 1); 1498 1499 /* 1500 * Get station address from the EEPROM. 1501 */ 1502 if (tl_read_eeprom(csr, (caddr_t)&eaddr, 1503 TL_EEPROM_EADDR, ETHER_ADDR_LEN)) { 1504 printf("tlc%d: failed to read station address\n", unit); 1505 goto fail; 1506 } 1507 1508 /* 1509 * A ThunderLAN chip was detected. Inform the world. 1510 */ 1511 printf("tlc%d: Ethernet address: %6D\n", unit, eaddr, ":"); 1512 1513 /* 1514 * Now attach the ThunderLAN's PHYs. There will always 1515 * be at least one PHY; if the PHY address is 0x1F, then 1516 * it's the internal one. If we encounter a lower numbered 1517 * PHY, we ignore the internal once since enabling the 1518 * internal PHY disables the external one. 1519 */ 1520 1521 bzero((char *)&frame, sizeof(frame)); 1522 1523 for (i = TL_PHYADDR_MIN; i < TL_PHYADDR_MAX + 1; i++) { 1524 frame.mii_phyaddr = i; 1525 frame.mii_regaddr = TL_PHY_GENCTL; 1526 frame.mii_data = PHY_BMCR_RESET; 1527 tl_mii_writereg(csr, &frame); 1528 DELAY(500); 1529 while(frame.mii_data & PHY_BMCR_RESET) 1530 tl_mii_readreg(csr, &frame); 1531 frame.mii_regaddr = TL_PHY_VENID; 1532 frame.mii_data = 0; 1533 tl_mii_readreg(csr, &frame); 1534 if (!frame.mii_data) 1535 continue; 1536 if (tl_attach_phy(csr, phys, eaddr, i, ilist)) { 1537 printf("tlc%d: failed to attach interface %d\n", 1538 unit, i); 1539 goto fail; 1540 } 1541 phys++; 1542 if (phys && i != TL_PHYADDR_MAX) 1543 break; 1544 } 1545 1546 if (!phys) { 1547 printf("tlc%d: no physical interfaces attached!\n", unit); 1548 goto fail; 1549 } 1550 1551 at_shutdown(tl_shutdown, ilist, SHUTDOWN_POST_SYNC); 1552 1553fail: 1554 splx(s); 1555 return; 1556} 1557 1558/* 1559 * Initialize the transmit lists. 1560 */ 1561static int tl_list_tx_init(sc) 1562 struct tl_softc *sc; 1563{ 1564 struct tl_chain_data *cd; 1565 struct tl_list_data *ld; 1566 int i; 1567 1568 cd = &sc->tl_cdata; 1569 ld = sc->tl_ldata; 1570 for (i = 0; i < TL_TX_LIST_CNT; i++) { 1571 cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; 1572 if (i == (TL_TX_LIST_CNT - 1)) 1573 cd->tl_tx_chain[i].tl_next = NULL; 1574 else 1575 cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; 1576 } 1577 1578 cd->tl_tx_free = &cd->tl_tx_chain[0]; 1579 cd->tl_tx_tail = cd->tl_tx_head = NULL; 1580 sc->tl_txeoc = 1; 1581 1582 return(0); 1583} 1584 1585/* 1586 * Initialize the RX lists and allocate mbufs for them. 1587 */ 1588static int tl_list_rx_init(sc) 1589 struct tl_softc *sc; 1590{ 1591 struct tl_chain_data *cd; 1592 struct tl_list_data *ld; 1593 int i; 1594 1595 cd = &sc->tl_cdata; 1596 ld = sc->tl_ldata; 1597 1598 for (i = 0; i < TL_TX_LIST_CNT; i++) { 1599 cd->tl_rx_chain[i].tl_ptr = 1600 (struct tl_list *)&ld->tl_rx_list[i]; 1601 tl_newbuf(sc, &cd->tl_rx_chain[i]); 1602 if (i == (TL_TX_LIST_CNT - 1)) { 1603 cd->tl_rx_chain[i].tl_next = NULL; 1604 ld->tl_rx_list[i].tlist_fptr = 0; 1605 } else { 1606 cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; 1607 ld->tl_rx_list[i].tlist_fptr = 1608 vtophys(&ld->tl_rx_list[i + 1]); 1609 } 1610 } 1611 1612 cd->tl_rx_head = &cd->tl_rx_chain[0]; 1613 cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; 1614 1615 return(0); 1616} 1617 1618static int tl_newbuf(sc, c) 1619 struct tl_softc *sc; 1620 struct tl_chain *c; 1621{ 1622 struct mbuf *m_new = NULL; 1623 1624 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 1625 if (m_new == NULL) { 1626 printf("tl%d: no memory for rx list", 1627 sc->tl_unit); 1628 return(ENOBUFS); 1629 } 1630 1631 MCLGET(m_new, M_DONTWAIT); 1632 if (!(m_new->m_flags & M_EXT)) { 1633 printf("tl%d: no memory for rx list", sc->tl_unit); 1634 m_freem(m_new); 1635 return(ENOBUFS); 1636 } 1637 1638 c->tl_mbuf = m_new; 1639 c->tl_next = NULL; 1640 c->tl_ptr->tlist_frsize = MCLBYTES; 1641 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 1642 c->tl_ptr->tlist_fptr = 0; 1643 c->tl_ptr->tl_frag[0].tlist_dadr = vtophys(mtod(m_new, caddr_t)); 1644 c->tl_ptr->tl_frag[0].tlist_dcnt = MCLBYTES; 1645 1646 return(0); 1647} 1648/* 1649 * Interrupt handler for RX 'end of frame' condition (EOF). This 1650 * tells us that a full ethernet frame has been captured and we need 1651 * to handle it. 1652 * 1653 * Reception is done using 'lists' which consist of a header and a 1654 * series of 10 data count/data address pairs that point to buffers. 1655 * Initially you're supposed to create a list, populate it with pointers 1656 * to buffers, then load the physical address of the list into the 1657 * ch_parm register. The adapter is then supposed to DMA the received 1658 * frame into the buffers for you. 1659 * 1660 * To make things as fast as possible, we have the chip DMA directly 1661 * into mbufs. This saves us from having to do a buffer copy: we can 1662 * just hand the mbufs directly to ether_input(). Once the frame has 1663 * been sent on its way, the 'list' structure is assigned a new buffer 1664 * and moved to the end of the RX chain. As long we we stay ahead of 1665 * the chip, it will always think it has an endless receive channel. 1666 * 1667 * If we happen to fall behind and the chip manages to fill up all of 1668 * the buffers, it will generate an end of channel interrupt and wait 1669 * for us to empty the chain and restart the receiver. 1670 */ 1671static int tl_intvec_rxeof(xsc, type) 1672 void *xsc; 1673 u_int32_t type; 1674{ 1675 struct tl_softc *sc; 1676 int r = 0, total_len = 0; 1677 struct ether_header *eh; 1678 struct mbuf *m; 1679 struct ifnet *ifp; 1680 struct tl_chain *cur_rx; 1681 1682 sc = xsc; 1683 ifp = &sc->arpcom.ac_if; 1684 1685 while(sc->tl_cdata.tl_rx_head->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP){ 1686 r++; 1687 cur_rx = sc->tl_cdata.tl_rx_head; 1688 sc->tl_cdata.tl_rx_head = cur_rx->tl_next; 1689 m = cur_rx->tl_mbuf; 1690 total_len = cur_rx->tl_ptr->tlist_frsize; 1691 1692 tl_newbuf(sc, cur_rx); 1693 1694 sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = 1695 vtophys(cur_rx->tl_ptr); 1696 sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; 1697 sc->tl_cdata.tl_rx_tail = cur_rx; 1698 1699 eh = mtod(m, struct ether_header *); 1700 m->m_pkthdr.rcvif = ifp; 1701 1702#if NBPFILTER > 0 1703 /* 1704 * Handle BPF listeners. Let the BPF user see the packet, but 1705 * don't pass it up to the ether_input() layer unless it's 1706 * a broadcast packet, multicast packet, matches our ethernet 1707 * address or the interface is in promiscuous mode. If we don't 1708 * want the packet, just forget it. We leave the mbuf in place 1709 * since it can be used again later. 1710 */ 1711 if (ifp->if_bpf) { 1712 m->m_pkthdr.len = m->m_len = total_len; 1713 bpf_mtap(ifp, m); 1714 if (ifp->if_flags & IFF_PROMISC && 1715 (bcmp(eh->ether_dhost, sc->arpcom.ac_enaddr, 1716 ETHER_ADDR_LEN) && 1717 (eh->ether_dhost[0] & 1) == 0)) { 1718 m_freem(m); 1719 continue; 1720 } 1721 } 1722#endif 1723 /* Remove header from mbuf and pass it on. */ 1724 m->m_pkthdr.len = m->m_len = 1725 total_len - sizeof(struct ether_header); 1726 m->m_data += sizeof(struct ether_header); 1727 ether_input(ifp, eh, m); 1728 } 1729 1730 return(r); 1731} 1732 1733/* 1734 * The RX-EOC condition hits when the ch_parm address hasn't been 1735 * initialized or the adapter reached a list with a forward pointer 1736 * of 0 (which indicates the end of the chain). In our case, this means 1737 * the card has hit the end of the receive buffer chain and we need to 1738 * empty out the buffers and shift the pointer back to the beginning again. 1739 */ 1740static int tl_intvec_rxeoc(xsc, type) 1741 void *xsc; 1742 u_int32_t type; 1743{ 1744 struct tl_softc *sc; 1745 int r; 1746 1747 sc = xsc; 1748 1749 /* Flush out the receive queue and ack RXEOF interrupts. */ 1750 r = tl_intvec_rxeof(xsc, type); 1751 sc->csr->tl_host_cmd = TL_CMD_ACK | r | (type & ~(0x00100000)); 1752 r = 1; 1753 sc->csr->tl_ch_parm = vtophys(sc->tl_cdata.tl_rx_head->tl_ptr); 1754 r |= (TL_CMD_GO|TL_CMD_RT); 1755 return(r); 1756} 1757 1758/* 1759 * Invalid interrupt handler. The manual says invalid interrupts 1760 * are caused by a hardware error in other hardware and that they 1761 * should just be ignored. 1762 */ 1763static int tl_intvec_invalid(xsc, type) 1764 void *xsc; 1765 u_int32_t type; 1766{ 1767 struct tl_softc *sc; 1768 1769 sc = xsc; 1770 1771#ifdef DIAGNOSTIC 1772 printf("tl%d: got an invalid interrupt!\n", sc->tl_unit); 1773#endif 1774 /* Re-enable interrupts but don't ack this one. */ 1775 sc->csr->tl_host_cmd |= type; 1776 1777 return(0); 1778} 1779 1780/* 1781 * Dummy interrupt handler. Dummy interrupts are generated by setting 1782 * the ReqInt bit in the host command register. They should only occur 1783 * if we ask for them, and we never do, so if one magically appears, 1784 * we should make some noise about it. 1785 */ 1786static int tl_intvec_dummy(xsc, type) 1787 void *xsc; 1788 u_int32_t type; 1789{ 1790 struct tl_softc *sc; 1791 1792 sc = xsc; 1793 printf("tl%d: got a dummy interrupt\n", sc->tl_unit); 1794 1795 return(1); 1796} 1797 1798/* 1799 * Stats counter overflow interrupt. The chip delivers one of these 1800 * if we don't poll the stats counters often enough. 1801 */ 1802static int tl_intvec_statoflow(xsc, type) 1803 void *xsc; 1804 u_int32_t type; 1805{ 1806 struct tl_softc *sc; 1807 1808 sc = xsc; 1809 1810 tl_stats_update(sc); 1811 1812 return(1); 1813} 1814 1815static int tl_intvec_txeof(xsc, type) 1816 void *xsc; 1817 u_int32_t type; 1818{ 1819 struct tl_softc *sc; 1820 int r = 0; 1821 struct tl_chain *cur_tx; 1822 1823 sc = xsc; 1824 1825 /* 1826 * Go through our tx list and free mbufs for those 1827 * frames that have been sent. 1828 */ 1829 while (sc->tl_cdata.tl_tx_head != NULL) { 1830 cur_tx = sc->tl_cdata.tl_tx_head; 1831 if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) 1832 break; 1833 sc->tl_cdata.tl_tx_head = cur_tx->tl_next; 1834 1835 r++; 1836 m_freem(cur_tx->tl_mbuf); 1837 cur_tx->tl_mbuf = NULL; 1838 1839 cur_tx->tl_next = sc->tl_cdata.tl_tx_free; 1840 sc->tl_cdata.tl_tx_free = cur_tx; 1841 } 1842 1843 return(r); 1844} 1845 1846/* 1847 * The transmit end of channel interrupt. The adapter triggers this 1848 * interrupt to tell us it hit the end of the current transmit list. 1849 * 1850 * A note about this: it's possible for a condition to arise where 1851 * tl_start() may try to send frames between TXEOF and TXEOC interrupts. 1852 * You have to avoid this since the chip expects things to go in a 1853 * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. 1854 * When the TXEOF handler is called, it will free all of the transmitted 1855 * frames and reset the tx_head pointer to NULL. However, a TXEOC 1856 * interrupt should be received and acknowledged before any more frames 1857 * are queued for transmission. If tl_statrt() is called after TXEOF 1858 * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, 1859 * it could attempt to issue a transmit command prematurely. 1860 * 1861 * To guard against this, tl_start() will only issue transmit commands 1862 * if the tl_txeoc flag is set, and only the TXEOC interrupt handler 1863 * can set this flag once tl_start() has cleared it. 1864 */ 1865static int tl_intvec_txeoc(xsc, type) 1866 void *xsc; 1867 u_int32_t type; 1868{ 1869 struct tl_softc *sc; 1870 struct ifnet *ifp; 1871 u_int32_t cmd; 1872 1873 sc = xsc; 1874 ifp = &sc->arpcom.ac_if; 1875 1876 /* Clear the timeout timer. */ 1877 ifp->if_timer = 0; 1878 1879 if (sc->tl_cdata.tl_tx_head == NULL) { 1880 ifp->if_flags &= ~IFF_OACTIVE; 1881 sc->tl_cdata.tl_tx_tail = NULL; 1882 sc->tl_txeoc = 1; 1883 } else { 1884 sc->tl_txeoc = 0; 1885 /* First we have to ack the EOC interrupt. */ 1886 sc->csr->tl_host_cmd = TL_CMD_ACK | 0x00000001 | type; 1887 /* Then load the address of the next TX list. */ 1888 sc->csr->tl_ch_parm = vtophys(sc->tl_cdata.tl_tx_head->tl_ptr); 1889 /* Restart TX channel. */ 1890 cmd = sc->csr->tl_host_cmd; 1891 cmd &= ~TL_CMD_RT; 1892 cmd |= TL_CMD_GO|TL_CMD_INTSON; 1893 sc->csr->tl_host_cmd = cmd; 1894 return(0); 1895 } 1896 1897 return(1); 1898} 1899 1900static int tl_intvec_adchk(xsc, type) 1901 void *xsc; 1902 u_int32_t type; 1903{ 1904 struct tl_softc *sc; 1905 1906 sc = xsc; 1907 1908 printf("tl%d: adapter check: %x\n", sc->tl_unit, sc->csr->tl_ch_parm); 1909 1910 tl_softreset(sc->csr, sc->tl_phy_addr == TL_PHYADDR_MAX ? 1 : 0); 1911 tl_init(sc); 1912 sc->csr->tl_host_cmd |= TL_CMD_INTSON; 1913 1914 return(0); 1915} 1916 1917static int tl_intvec_netsts(xsc, type) 1918 void *xsc; 1919 u_int32_t type; 1920{ 1921 struct tl_softc *sc; 1922 u_int16_t netsts; 1923 struct tl_csr *csr; 1924 1925 sc = xsc; 1926 csr = sc->csr; 1927 1928 DIO_SEL(TL_NETSTS); 1929 netsts = DIO_BYTE2_GET(0xFF); 1930 DIO_BYTE2_SET(netsts); 1931 1932 printf("tl%d: network status: %x\n", sc->tl_unit, netsts); 1933 1934 return(1); 1935} 1936 1937static void tl_intr(xilist) 1938 void *xilist; 1939{ 1940 struct tl_iflist *ilist; 1941 struct tl_softc *sc; 1942 struct tl_csr *csr; 1943 struct ifnet *ifp; 1944 int r = 0; 1945 u_int32_t type = 0; 1946 u_int16_t ints = 0; 1947 u_int8_t ivec = 0; 1948 1949 ilist = xilist; 1950 csr = ilist->csr; 1951 1952 /* Disable interrupts */ 1953 ints = csr->tl_host_int; 1954 csr->tl_host_int = ints; 1955 type = (ints << 16) & 0xFFFF0000; 1956 ivec = (ints & TL_VEC_MASK) >> 5; 1957 ints = (ints & TL_INT_MASK) >> 2; 1958 /* 1959 * An interrupt has been posted by the ThunderLAN, but we 1960 * have to figure out which PHY generated it before we can 1961 * do anything with it. If we receive an interrupt when we 1962 * know none of the PHYs are turned on, then either there's 1963 * a bug in the driver or we we handed an interrupt that 1964 * doesn't actually belong to us. 1965 */ 1966 if (ilist->tl_active_phy == TL_PHYS_IDLE) { 1967 /* 1968 * Exception: if this is an invalid interrupt, 1969 * just re-enable interrupts and ignore it. Probably 1970 * what's happened is that we got an interrupt meant 1971 * for another PCI device that's sharing our IRQ. 1972 */ 1973 if (ints == TL_INTR_INVALID) { 1974 csr->tl_host_cmd |= type; 1975 return; 1976 } 1977 printf("tlc%d: interrupt type %x with all phys idle\n", 1978 ilist->tlc_unit, ints); 1979 return; 1980 } 1981 1982 sc = ilist->tl_sc[ilist->tl_active_phy]; 1983 csr = sc->csr; 1984 ifp = &sc->arpcom.ac_if; 1985 1986 switch(ints) { 1987 case (TL_INTR_INVALID): 1988 r = tl_intvec_invalid((void *)sc, type); 1989 break; 1990 case (TL_INTR_TXEOF): 1991 r = tl_intvec_txeof((void *)sc, type); 1992 break; 1993 case (TL_INTR_TXEOC): 1994 r = tl_intvec_txeoc((void *)sc, type); 1995 break; 1996 case (TL_INTR_STATOFLOW): 1997 r = tl_intvec_statoflow((void *)sc, type); 1998 break; 1999 case (TL_INTR_RXEOF): 2000 r = tl_intvec_rxeof((void *)sc, type); 2001 break; 2002 case (TL_INTR_DUMMY): 2003 r = tl_intvec_dummy((void *)sc, type); 2004 break; 2005 case (TL_INTR_ADCHK): 2006 if (ivec) 2007 r = tl_intvec_adchk((void *)sc, type); 2008 else 2009 r = tl_intvec_netsts((void *)sc, type); 2010 break; 2011 case (TL_INTR_RXEOC): 2012 r = tl_intvec_rxeoc((void *)sc, type); 2013 break; 2014 default: 2015 printf("tl%d: bogus interrupt type\n", ifp->if_unit); 2016 break; 2017 } 2018 2019 /* Re-enable interrupts */ 2020 if (r) 2021 csr->tl_host_cmd = TL_CMD_ACK | r | type; 2022 2023 return; 2024} 2025 2026static void tl_stats_update(xsc) 2027 void *xsc; 2028{ 2029 struct tl_softc *sc; 2030 struct ifnet *ifp; 2031 struct tl_csr *csr; 2032 struct tl_stats tl_stats; 2033 u_int32_t *p; 2034 2035 bzero((char *)&tl_stats, sizeof(struct tl_stats)); 2036 2037 sc = xsc; 2038 csr = sc->csr; 2039 ifp = &sc->arpcom.ac_if; 2040 2041 p = (u_int32_t *)&tl_stats; 2042 2043 DIO_SEL(TL_TXGOODFRAMES|TL_DIO_ADDR_INC); 2044 DIO_LONG_GET(*p++); 2045 DIO_LONG_GET(*p++); 2046 DIO_LONG_GET(*p++); 2047 DIO_LONG_GET(*p++); 2048 DIO_LONG_GET(*p++); 2049 2050 ifp->if_opackets += tl_tx_goodframes(tl_stats); 2051 ifp->if_collisions += tl_stats.tl_tx_single_collision + 2052 tl_stats.tl_tx_multi_collision; 2053 ifp->if_ipackets += tl_rx_goodframes(tl_stats); 2054 ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors + 2055 tl_rx_overrun(tl_stats); 2056 ifp->if_oerrors += tl_tx_underrun(tl_stats); 2057 2058 sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); 2059 2060 return; 2061} 2062 2063/* 2064 * Encapsulate an mbuf chain in a list by coupling the mbuf data 2065 * pointers to the fragment pointers. 2066 */ 2067static int tl_encap(sc, c, m_head) 2068 struct tl_softc *sc; 2069 struct tl_chain *c; 2070 struct mbuf *m_head; 2071{ 2072 int frag = 0; 2073 struct tl_frag *f = NULL; 2074 int total_len; 2075 struct mbuf *m; 2076 2077 /* 2078 * Start packing the mbufs in this chain into 2079 * the fragment pointers. Stop when we run out 2080 * of fragments or hit the end of the mbuf chain. 2081 */ 2082 m = m_head; 2083 total_len = 0; 2084 2085 for (m = m_head, frag = 0; m != NULL; m = m->m_next) { 2086 if (m->m_len != 0) { 2087 if (frag == TL_MAXFRAGS) 2088 break; 2089 total_len+= m->m_len; 2090 c->tl_ptr->tl_frag[frag].tlist_dadr = 2091 vtophys(mtod(m, vm_offset_t)); 2092 c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; 2093 frag++; 2094 } 2095 } 2096 2097 /* 2098 * Handle special cases. 2099 * Special case #1: we used up all 10 fragments, but 2100 * we have more mbufs left in the chain. Copy the 2101 * data into an mbuf cluster. Note that we don't 2102 * bother clearing the values in the other fragment 2103 * pointers/counters; it wouldn't gain us anything, 2104 * and would waste cycles. 2105 */ 2106 if (m != NULL) { 2107 struct mbuf *m_new = NULL; 2108 2109 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 2110 if (m_new == NULL) { 2111 printf("tl%d: no memory for tx list", sc->tl_unit); 2112 return(1); 2113 } 2114 if (m_head->m_pkthdr.len > MHLEN) { 2115 MCLGET(m_new, M_DONTWAIT); 2116 if (!(m_new->m_flags & M_EXT)) { 2117 m_freem(m_new); 2118 printf("tl%d: no memory for tx list", 2119 sc->tl_unit); 2120 return(1); 2121 } 2122 } 2123 m_copydata(m_head, 0, m_head->m_pkthdr.len, 2124 mtod(m_new, caddr_t)); 2125 m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; 2126 m_freem(m_head); 2127 m_head = m_new; 2128 f = &c->tl_ptr->tl_frag[0]; 2129 f->tlist_dadr = vtophys(mtod(m_new, caddr_t)); 2130 f->tlist_dcnt = total_len = m_new->m_len; 2131 frag = 1; 2132 } 2133 2134 /* 2135 * Special case #2: the frame is smaller than the minimum 2136 * frame size. We have to pad it to make the chip happy. 2137 */ 2138 if (total_len < TL_MIN_FRAMELEN) { 2139 if (frag == TL_MAXFRAGS) 2140 printf("all frags filled but frame still to small!\n"); 2141 f = &c->tl_ptr->tl_frag[frag]; 2142 f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; 2143 f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad); 2144 total_len += f->tlist_dcnt; 2145 frag++; 2146 } 2147 2148 c->tl_mbuf = m_head; 2149 c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; 2150 c->tl_ptr->tlist_frsize = total_len; 2151 c->tl_ptr->tlist_cstat = TL_CSTAT_READY; 2152 c->tl_ptr->tlist_fptr = 0; 2153 2154 return(0); 2155} 2156 2157/* 2158 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2159 * to the mbuf data regions directly in the transmit lists. We also save a 2160 * copy of the pointers since the transmit list fragment pointers are 2161 * physical addresses. 2162 */ 2163static void tl_start(ifp) 2164 struct ifnet *ifp; 2165{ 2166 struct tl_softc *sc; 2167 struct tl_csr *csr; 2168 struct mbuf *m_head = NULL; 2169 u_int32_t cmd; 2170 struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; 2171 2172 sc = ifp->if_softc; 2173 csr = sc->csr; 2174 2175 /* 2176 * Check for an available queue slot. If there are none, 2177 * punt. 2178 */ 2179 if (sc->tl_cdata.tl_tx_free == NULL) { 2180 ifp->if_flags |= IFF_OACTIVE; 2181 return; 2182 } 2183 2184 start_tx = sc->tl_cdata.tl_tx_free; 2185 2186 while(sc->tl_cdata.tl_tx_free != NULL) { 2187 IF_DEQUEUE(&ifp->if_snd, m_head); 2188 if (m_head == NULL) 2189 break; 2190 2191 /* Pick a chain member off the free list. */ 2192 cur_tx = sc->tl_cdata.tl_tx_free; 2193 sc->tl_cdata.tl_tx_free = cur_tx->tl_next; 2194 2195 cur_tx->tl_next = NULL; 2196 2197 /* Pack the data into the list. */ 2198 tl_encap(sc, cur_tx, m_head); 2199 2200 /* Chain it together */ 2201 if (prev != NULL) { 2202 prev->tl_next = cur_tx; 2203 prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr); 2204 } 2205 prev = cur_tx; 2206 2207 /* 2208 * If there's a BPF listener, bounce a copy of this frame 2209 * to him. 2210 */ 2211#if NBPFILTER > 0 2212 if (ifp->if_bpf) 2213 bpf_mtap(ifp, cur_tx->tl_mbuf); 2214#endif 2215 } 2216 2217 /* 2218 * That's all we can stands, we can't stands no more. 2219 * If there are no other transfers pending, then issue the 2220 * TX GO command to the adapter to start things moving. 2221 * Otherwise, just leave the data in the queue and let 2222 * the EOF/EOC interrupt handler send. 2223 */ 2224 if (sc->tl_cdata.tl_tx_head == NULL) { 2225 sc->tl_cdata.tl_tx_head = start_tx; 2226 sc->tl_cdata.tl_tx_tail = cur_tx; 2227 if (sc->tl_txeoc) { 2228 sc->tl_txeoc = 0; 2229 sc->csr->tl_ch_parm = vtophys(start_tx->tl_ptr); 2230 cmd = sc->csr->tl_host_cmd; 2231 cmd &= ~TL_CMD_RT; 2232 cmd |= TL_CMD_GO|TL_CMD_INTSON; 2233 sc->csr->tl_host_cmd = cmd; 2234 } 2235 } else { 2236 sc->tl_cdata.tl_tx_tail->tl_next = start_tx; 2237 sc->tl_cdata.tl_tx_tail->tl_ptr->tlist_fptr = 2238 vtophys(start_tx->tl_ptr); 2239 sc->tl_cdata.tl_tx_tail = start_tx; 2240 } 2241 2242 /* 2243 * Set a timeout in case the chip goes out to lunch. 2244 */ 2245 ifp->if_timer = 5; 2246 2247 return; 2248} 2249 2250static void tl_init(xsc) 2251 void *xsc; 2252{ 2253 struct tl_softc *sc = xsc; 2254 struct ifnet *ifp = &sc->arpcom.ac_if; 2255 struct tl_csr *csr = sc->csr; 2256 int s; 2257 u_int16_t phy_sts; 2258 2259 s = splimp(); 2260 2261 ifp = &sc->arpcom.ac_if; 2262 2263 /* 2264 * Cancel pending I/O. 2265 */ 2266 tl_stop(sc); 2267 2268 /* 2269 * Set 'capture all frames' bit for promiscuous mode. 2270 */ 2271 if (ifp->if_flags & IFF_PROMISC) { 2272 DIO_SEL(TL_NETCMD); 2273 DIO_BYTE0_SET(TL_CMD_CAF); 2274 } else { 2275 DIO_SEL(TL_NETCMD); 2276 DIO_BYTE0_CLR(TL_CMD_CAF); 2277 } 2278 2279 /* 2280 * Set capture broadcast bit to capture broadcast frames. 2281 */ 2282 if (ifp->if_flags & IFF_BROADCAST) { 2283 DIO_SEL(TL_NETCMD); 2284 DIO_BYTE0_CLR(TL_CMD_NOBRX); 2285 } else { 2286 DIO_SEL(TL_NETCMD); 2287 DIO_BYTE0_SET(TL_CMD_NOBRX); 2288 } 2289 2290 /* Init our MAC address */ 2291 DIO_SEL(TL_AREG0_B5); 2292 csr->u.tl_dio_bytes.byte0 = sc->arpcom.ac_enaddr[0]; 2293 csr->u.tl_dio_bytes.byte1 = sc->arpcom.ac_enaddr[1]; 2294 csr->u.tl_dio_bytes.byte2 = sc->arpcom.ac_enaddr[2]; 2295 csr->u.tl_dio_bytes.byte3 = sc->arpcom.ac_enaddr[3]; 2296 DIO_SEL(TL_AREG0_B1); 2297 csr->u.tl_dio_bytes.byte0 = sc->arpcom.ac_enaddr[4]; 2298 csr->u.tl_dio_bytes.byte1 = sc->arpcom.ac_enaddr[5]; 2299 2300 /* Init circular RX list. */ 2301 if (tl_list_rx_init(sc)) { 2302 printf("tl%d: failed to set up rx lists\n", sc->tl_unit); 2303 return; 2304 } 2305 2306 /* Init TX pointers. */ 2307 tl_list_tx_init(sc); 2308 2309 /* 2310 * Enable PHY interrupts. 2311 */ 2312 phy_sts = tl_phy_readreg(sc, TL_PHY_CTL); 2313 phy_sts |= PHY_CTL_INTEN; 2314 tl_phy_writereg(sc, TL_PHY_CTL, phy_sts); 2315 2316 /* Enable MII interrupts. */ 2317 DIO_SEL(TL_NETSIO); 2318 DIO_BYTE1_SET(TL_SIO_MINTEN); 2319 2320 /* Enable PCI interrupts. */ 2321 csr->tl_host_cmd |= TL_CMD_INTSON; 2322 2323 /* Load the address of the rx list */ 2324 sc->csr->tl_host_cmd |= TL_CMD_RT; 2325 sc->csr->tl_ch_parm = vtophys(&sc->tl_ldata->tl_rx_list[0]); 2326 2327 /* Send the RX go command */ 2328 sc->csr->tl_host_cmd |= (TL_CMD_GO|TL_CMD_RT); 2329 sc->tl_iflist->tl_active_phy = sc->tl_phy_addr; 2330 2331 ifp->if_flags |= IFF_RUNNING; 2332 ifp->if_flags &= ~IFF_OACTIVE; 2333 2334 (void)splx(s); 2335 2336 /* Start the stats update counter */ 2337 sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); 2338 2339 return; 2340} 2341 2342/* 2343 * Set media options. 2344 */ 2345static int tl_ifmedia_upd(ifp) 2346 struct ifnet *ifp; 2347{ 2348 struct tl_softc *sc; 2349 struct tl_csr *csr; 2350 struct ifmedia *ifm; 2351 2352 sc = ifp->if_softc; 2353 csr = sc->csr; 2354 ifm = &sc->ifmedia; 2355 2356 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2357 return(EINVAL); 2358 2359 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) 2360 tl_autoneg(sc, TL_FLAG_SCHEDDELAY, 1); 2361 else 2362 tl_setmode(sc, ifm->ifm_media); 2363 2364 return(0); 2365} 2366 2367/* 2368 * Report current media status. 2369 */ 2370static void tl_ifmedia_sts(ifp, ifmr) 2371 struct ifnet *ifp; 2372 struct ifmediareq *ifmr; 2373{ 2374 u_int16_t phy_ctl; 2375 u_int16_t phy_sts; 2376 struct tl_softc *sc; 2377 struct tl_csr *csr; 2378 2379 sc = ifp->if_softc; 2380 csr = sc->csr; 2381 2382 ifmr->ifm_active = IFM_ETHER; 2383 2384 phy_ctl = tl_phy_readreg(sc, PHY_BMCR); 2385 phy_sts = tl_phy_readreg(sc, TL_PHY_CTL); 2386 2387 if (phy_sts & PHY_CTL_AUISEL) 2388 ifmr->ifm_active |= IFM_10_5; 2389 2390 if (phy_ctl & PHY_BMCR_LOOPBK) 2391 ifmr->ifm_active |= IFM_LOOP; 2392 2393 if (phy_ctl & PHY_BMCR_SPEEDSEL) 2394 ifmr->ifm_active |= IFM_100_TX; 2395 else 2396 ifmr->ifm_active |= IFM_10_T; 2397 2398 if (phy_ctl & PHY_BMCR_DUPLEX) { 2399 ifmr->ifm_active |= IFM_FDX; 2400 ifmr->ifm_active &= ~IFM_HDX; 2401 } else { 2402 ifmr->ifm_active &= ~IFM_FDX; 2403 ifmr->ifm_active |= IFM_HDX; 2404 } 2405 2406 if (phy_ctl & PHY_BMCR_AUTONEGENBL) 2407 ifmr->ifm_active |= IFM_AUTO; 2408 2409 return; 2410} 2411 2412static int tl_ioctl(ifp, command, data) 2413 struct ifnet *ifp; 2414 int command; 2415 caddr_t data; 2416{ 2417 struct tl_softc *sc = ifp->if_softc; 2418 struct ifreq *ifr = (struct ifreq *) data; 2419 int s, error = 0; 2420 2421 s = splimp(); 2422 2423 switch(command) { 2424 case SIOCSIFADDR: 2425 case SIOCGIFADDR: 2426 case SIOCSIFMTU: 2427 error = ether_ioctl(ifp, command, data); 2428 break; 2429 case SIOCSIFFLAGS: 2430 /* 2431 * Make sure no more than one PHY is active 2432 * at any one time. 2433 */ 2434 if (ifp->if_flags & IFF_UP) { 2435 if (sc->tl_iflist->tl_active_phy != TL_PHYS_IDLE && 2436 sc->tl_iflist->tl_active_phy != sc->tl_phy_addr) { 2437 error = EINVAL; 2438 break; 2439 } 2440 sc->tl_iflist->tl_active_phy = sc->tl_phy_addr; 2441 tl_init(sc); 2442 } else { 2443 if (ifp->if_flags & IFF_RUNNING) { 2444 sc->tl_iflist->tl_active_phy = TL_PHYS_IDLE; 2445 tl_stop(sc); 2446 } 2447 } 2448 error = 0; 2449 break; 2450 case SIOCADDMULTI: 2451 case SIOCDELMULTI: 2452 tl_setmulti(sc); 2453 error = 0; 2454 break; 2455 case SIOCSIFMEDIA: 2456 case SIOCGIFMEDIA: 2457 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2458 break; 2459 default: 2460 error = EINVAL; 2461 break; 2462 } 2463 2464 (void)splx(s); 2465 2466 return(error); 2467} 2468 2469static void tl_watchdog(ifp) 2470 struct ifnet *ifp; 2471{ 2472 struct tl_softc *sc; 2473 u_int16_t bmsr; 2474 2475 sc = ifp->if_softc; 2476 2477 if (sc->tl_autoneg) { 2478 tl_autoneg(sc, TL_FLAG_DELAYTIMEO, 1); 2479 return; 2480 } 2481 2482 /* Check that we're still connected. */ 2483 tl_phy_readreg(sc, PHY_BMSR); 2484 bmsr = tl_phy_readreg(sc, PHY_BMSR); 2485 if (!(bmsr & PHY_BMSR_LINKSTAT)) { 2486 printf("tl%d: no carrier\n", sc->tl_unit); 2487 tl_autoneg(sc, TL_FLAG_SCHEDDELAY, 1); 2488 } else 2489 printf("tl%d: device timeout\n", sc->tl_unit); 2490 2491 ifp->if_oerrors++; 2492 2493 tl_init(sc); 2494 2495 return; 2496} 2497 2498/* 2499 * Stop the adapter and free any mbufs allocated to the 2500 * RX and TX lists. 2501 */ 2502static void tl_stop(sc) 2503 struct tl_softc *sc; 2504{ 2505 register int i; 2506 struct ifnet *ifp; 2507 struct tl_csr *csr; 2508 struct tl_mii_frame frame; 2509 2510 csr = sc->csr; 2511 ifp = &sc->arpcom.ac_if; 2512 2513 /* Stop the stats updater. */ 2514 untimeout(tl_stats_update, sc, sc->tl_stat_ch); 2515 2516 /* Stop the transmitter */ 2517 sc->csr->tl_host_cmd &= TL_CMD_RT; 2518 sc->csr->tl_host_cmd |= TL_CMD_STOP; 2519 2520 /* Stop the receiver */ 2521 sc->csr->tl_host_cmd |= TL_CMD_RT; 2522 sc->csr->tl_host_cmd |= TL_CMD_STOP; 2523 2524 /* 2525 * Disable host interrupts. 2526 */ 2527 sc->csr->tl_host_cmd |= TL_CMD_INTSOFF; 2528 2529 /* 2530 * Disable PHY interrupts. 2531 */ 2532 bzero((char *)&frame, sizeof(frame)); 2533 2534 frame.mii_phyaddr = sc->tl_phy_addr; 2535 frame.mii_regaddr = TL_PHY_CTL; 2536 tl_mii_readreg(csr, &frame); 2537 frame.mii_data |= PHY_CTL_INTEN; 2538 tl_mii_writereg(csr, &frame); 2539 2540 /* 2541 * Disable MII interrupts. 2542 */ 2543 DIO_SEL(TL_NETSIO); 2544 DIO_BYTE1_CLR(TL_SIO_MINTEN); 2545 2546 /* 2547 * Clear list pointer. 2548 */ 2549 sc->csr->tl_ch_parm = 0; 2550 2551 /* 2552 * Free the RX lists. 2553 */ 2554 for (i = 0; i < TL_RX_LIST_CNT; i++) { 2555 if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { 2556 m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); 2557 sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; 2558 } 2559 } 2560 bzero((char *)&sc->tl_ldata->tl_rx_list, 2561 sizeof(sc->tl_ldata->tl_rx_list)); 2562 2563 /* 2564 * Free the TX list buffers. 2565 */ 2566 for (i = 0; i < TL_TX_LIST_CNT; i++) { 2567 if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { 2568 m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); 2569 sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; 2570 } 2571 } 2572 bzero((char *)&sc->tl_ldata->tl_tx_list, 2573 sizeof(sc->tl_ldata->tl_tx_list)); 2574 2575 sc->tl_iflist->tl_active_phy = TL_PHYS_IDLE; 2576 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2577 2578 return; 2579} 2580 2581/* 2582 * Stop all chip I/O so that the kernel's probe routines don't 2583 * get confused by errant DMAs when rebooting. 2584 */ 2585static void tl_shutdown(howto, xilist) 2586 int howto; 2587 void *xilist; 2588{ 2589 struct tl_iflist *ilist = (struct tl_iflist *)xilist; 2590 struct tl_csr *csr = ilist->csr; 2591 struct tl_mii_frame frame; 2592 int i; 2593 2594 /* Stop the transmitter */ 2595 csr->tl_host_cmd &= TL_CMD_RT; 2596 csr->tl_host_cmd |= TL_CMD_STOP; 2597 2598 /* Stop the receiver */ 2599 csr->tl_host_cmd |= TL_CMD_RT; 2600 csr->tl_host_cmd |= TL_CMD_STOP; 2601 2602 /* 2603 * Disable host interrupts. 2604 */ 2605 csr->tl_host_cmd |= TL_CMD_INTSOFF; 2606 2607 /* 2608 * Disable PHY interrupts. 2609 */ 2610 bzero((char *)&frame, sizeof(frame)); 2611 2612 for (i = TL_PHYADDR_MIN; i < TL_PHYADDR_MAX + 1; i++) { 2613 frame.mii_phyaddr = i; 2614 frame.mii_regaddr = TL_PHY_CTL; 2615 tl_mii_readreg(csr, &frame); 2616 frame.mii_data |= PHY_CTL_INTEN; 2617 tl_mii_writereg(csr, &frame); 2618 }; 2619 2620 /* 2621 * Disable MII interrupts. 2622 */ 2623 DIO_SEL(TL_NETSIO); 2624 DIO_BYTE1_CLR(TL_SIO_MINTEN); 2625 2626 return; 2627} 2628 2629 2630static struct pci_device tlc_device = { 2631 "tlc", 2632 tl_probe, 2633 tl_attach_ctlr, 2634 &tl_count, 2635 NULL 2636}; 2637DATA_SET(pcidevice_set, tlc_device); 2638